qemu/target/arm/helper.c
<<
>>
Prefs
   1/*
   2 * ARM generic helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8
   9#include "qemu/osdep.h"
  10#include "qemu/log.h"
  11#include "trace.h"
  12#include "cpu.h"
  13#include "internals.h"
  14#include "exec/helper-proto.h"
  15#include "qemu/main-loop.h"
  16#include "qemu/timer.h"
  17#include "qemu/bitops.h"
  18#include "qemu/crc32c.h"
  19#include "qemu/qemu-print.h"
  20#include "exec/exec-all.h"
  21#include <zlib.h> /* For crc32 */
  22#include "hw/irq.h"
  23#include "sysemu/cpu-timers.h"
  24#include "sysemu/kvm.h"
  25#include "sysemu/tcg.h"
  26#include "qapi/error.h"
  27#include "qemu/guest-random.h"
  28#ifdef CONFIG_TCG
  29#include "semihosting/common-semi.h"
  30#endif
  31#include "cpregs.h"
  32
  33#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  34
  35static void switch_mode(CPUARMState *env, int mode);
  36
  37static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
  38{
  39    assert(ri->fieldoffset);
  40    if (cpreg_field_is_64bit(ri)) {
  41        return CPREG_FIELD64(env, ri);
  42    } else {
  43        return CPREG_FIELD32(env, ri);
  44    }
  45}
  46
  47void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
  48{
  49    assert(ri->fieldoffset);
  50    if (cpreg_field_is_64bit(ri)) {
  51        CPREG_FIELD64(env, ri) = value;
  52    } else {
  53        CPREG_FIELD32(env, ri) = value;
  54    }
  55}
  56
  57static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
  58{
  59    return (char *)env + ri->fieldoffset;
  60}
  61
  62uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
  63{
  64    /* Raw read of a coprocessor register (as needed for migration, etc). */
  65    if (ri->type & ARM_CP_CONST) {
  66        return ri->resetvalue;
  67    } else if (ri->raw_readfn) {
  68        return ri->raw_readfn(env, ri);
  69    } else if (ri->readfn) {
  70        return ri->readfn(env, ri);
  71    } else {
  72        return raw_read(env, ri);
  73    }
  74}
  75
  76static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
  77                             uint64_t v)
  78{
  79    /*
  80     * Raw write of a coprocessor register (as needed for migration, etc).
  81     * Note that constant registers are treated as write-ignored; the
  82     * caller should check for success by whether a readback gives the
  83     * value written.
  84     */
  85    if (ri->type & ARM_CP_CONST) {
  86        return;
  87    } else if (ri->raw_writefn) {
  88        ri->raw_writefn(env, ri, v);
  89    } else if (ri->writefn) {
  90        ri->writefn(env, ri, v);
  91    } else {
  92        raw_write(env, ri, v);
  93    }
  94}
  95
  96static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
  97{
  98   /*
  99    * Return true if the regdef would cause an assertion if you called
 100    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 101    * program bug for it not to have the NO_RAW flag).
 102    * NB that returning false here doesn't necessarily mean that calling
 103    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 104    * read/write access functions which are safe for raw use" from "has
 105    * read/write access functions which have side effects but has forgotten
 106    * to provide raw access functions".
 107    * The tests here line up with the conditions in read/write_raw_cp_reg()
 108    * and assertions in raw_read()/raw_write().
 109    */
 110    if ((ri->type & ARM_CP_CONST) ||
 111        ri->fieldoffset ||
 112        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 113        return false;
 114    }
 115    return true;
 116}
 117
 118bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
 119{
 120    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 121    int i;
 122    bool ok = true;
 123
 124    for (i = 0; i < cpu->cpreg_array_len; i++) {
 125        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 126        const ARMCPRegInfo *ri;
 127        uint64_t newval;
 128
 129        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 130        if (!ri) {
 131            ok = false;
 132            continue;
 133        }
 134        if (ri->type & ARM_CP_NO_RAW) {
 135            continue;
 136        }
 137
 138        newval = read_raw_cp_reg(&cpu->env, ri);
 139        if (kvm_sync) {
 140            /*
 141             * Only sync if the previous list->cpustate sync succeeded.
 142             * Rather than tracking the success/failure state for every
 143             * item in the list, we just recheck "does the raw write we must
 144             * have made in write_list_to_cpustate() read back OK" here.
 145             */
 146            uint64_t oldval = cpu->cpreg_values[i];
 147
 148            if (oldval == newval) {
 149                continue;
 150            }
 151
 152            write_raw_cp_reg(&cpu->env, ri, oldval);
 153            if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
 154                continue;
 155            }
 156
 157            write_raw_cp_reg(&cpu->env, ri, newval);
 158        }
 159        cpu->cpreg_values[i] = newval;
 160    }
 161    return ok;
 162}
 163
 164bool write_list_to_cpustate(ARMCPU *cpu)
 165{
 166    int i;
 167    bool ok = true;
 168
 169    for (i = 0; i < cpu->cpreg_array_len; i++) {
 170        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 171        uint64_t v = cpu->cpreg_values[i];
 172        const ARMCPRegInfo *ri;
 173
 174        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 175        if (!ri) {
 176            ok = false;
 177            continue;
 178        }
 179        if (ri->type & ARM_CP_NO_RAW) {
 180            continue;
 181        }
 182        /*
 183         * Write value and confirm it reads back as written
 184         * (to catch read-only registers and partially read-only
 185         * registers where the incoming migration value doesn't match)
 186         */
 187        write_raw_cp_reg(&cpu->env, ri, v);
 188        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 189            ok = false;
 190        }
 191    }
 192    return ok;
 193}
 194
 195static void add_cpreg_to_list(gpointer key, gpointer opaque)
 196{
 197    ARMCPU *cpu = opaque;
 198    uint32_t regidx = (uintptr_t)key;
 199    const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 200
 201    if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
 202        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 203        /* The value array need not be initialized at this point */
 204        cpu->cpreg_array_len++;
 205    }
 206}
 207
 208static void count_cpreg(gpointer key, gpointer opaque)
 209{
 210    ARMCPU *cpu = opaque;
 211    const ARMCPRegInfo *ri;
 212
 213    ri = g_hash_table_lookup(cpu->cp_regs, key);
 214
 215    if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
 216        cpu->cpreg_array_len++;
 217    }
 218}
 219
 220static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 221{
 222    uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
 223    uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
 224
 225    if (aidx > bidx) {
 226        return 1;
 227    }
 228    if (aidx < bidx) {
 229        return -1;
 230    }
 231    return 0;
 232}
 233
 234void init_cpreg_list(ARMCPU *cpu)
 235{
 236    /*
 237     * Initialise the cpreg_tuples[] array based on the cp_regs hash.
 238     * Note that we require cpreg_tuples[] to be sorted by key ID.
 239     */
 240    GList *keys;
 241    int arraylen;
 242
 243    keys = g_hash_table_get_keys(cpu->cp_regs);
 244    keys = g_list_sort(keys, cpreg_key_compare);
 245
 246    cpu->cpreg_array_len = 0;
 247
 248    g_list_foreach(keys, count_cpreg, cpu);
 249
 250    arraylen = cpu->cpreg_array_len;
 251    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 252    cpu->cpreg_values = g_new(uint64_t, arraylen);
 253    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 254    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 255    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 256    cpu->cpreg_array_len = 0;
 257
 258    g_list_foreach(keys, add_cpreg_to_list, cpu);
 259
 260    assert(cpu->cpreg_array_len == arraylen);
 261
 262    g_list_free(keys);
 263}
 264
 265/*
 266 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
 267 */
 268static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 269                                        const ARMCPRegInfo *ri,
 270                                        bool isread)
 271{
 272    if (!is_a64(env) && arm_current_el(env) == 3 &&
 273        arm_is_secure_below_el3(env)) {
 274        return CP_ACCESS_TRAP_UNCATEGORIZED;
 275    }
 276    return CP_ACCESS_OK;
 277}
 278
 279/*
 280 * Some secure-only AArch32 registers trap to EL3 if used from
 281 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 282 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 283 * We assume that the .access field is set to PL1_RW.
 284 */
 285static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 286                                            const ARMCPRegInfo *ri,
 287                                            bool isread)
 288{
 289    if (arm_current_el(env) == 3) {
 290        return CP_ACCESS_OK;
 291    }
 292    if (arm_is_secure_below_el3(env)) {
 293        if (env->cp15.scr_el3 & SCR_EEL2) {
 294            return CP_ACCESS_TRAP_EL2;
 295        }
 296        return CP_ACCESS_TRAP_EL3;
 297    }
 298    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 299    return CP_ACCESS_TRAP_UNCATEGORIZED;
 300}
 301
 302/*
 303 * Check for traps to performance monitor registers, which are controlled
 304 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 305 */
 306static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 307                                 bool isread)
 308{
 309    int el = arm_current_el(env);
 310    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
 311
 312    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
 313        return CP_ACCESS_TRAP_EL2;
 314    }
 315    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 316        return CP_ACCESS_TRAP_EL3;
 317    }
 318    return CP_ACCESS_OK;
 319}
 320
 321/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
 322static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
 323                                      bool isread)
 324{
 325    if (arm_current_el(env) == 1) {
 326        uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
 327        if (arm_hcr_el2_eff(env) & trap) {
 328            return CP_ACCESS_TRAP_EL2;
 329        }
 330    }
 331    return CP_ACCESS_OK;
 332}
 333
 334/* Check for traps from EL1 due to HCR_EL2.TSW.  */
 335static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
 336                                 bool isread)
 337{
 338    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
 339        return CP_ACCESS_TRAP_EL2;
 340    }
 341    return CP_ACCESS_OK;
 342}
 343
 344/* Check for traps from EL1 due to HCR_EL2.TACR.  */
 345static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
 346                                  bool isread)
 347{
 348    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
 349        return CP_ACCESS_TRAP_EL2;
 350    }
 351    return CP_ACCESS_OK;
 352}
 353
 354/* Check for traps from EL1 due to HCR_EL2.TTLB. */
 355static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
 356                                  bool isread)
 357{
 358    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
 359        return CP_ACCESS_TRAP_EL2;
 360    }
 361    return CP_ACCESS_OK;
 362}
 363
 364/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */
 365static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
 366                                    bool isread)
 367{
 368    if (arm_current_el(env) == 1 &&
 369        (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) {
 370        return CP_ACCESS_TRAP_EL2;
 371    }
 372    return CP_ACCESS_OK;
 373}
 374
 375#ifdef TARGET_AARCH64
 376/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
 377static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
 378                                    bool isread)
 379{
 380    if (arm_current_el(env) == 1 &&
 381        (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) {
 382        return CP_ACCESS_TRAP_EL2;
 383    }
 384    return CP_ACCESS_OK;
 385}
 386#endif
 387
 388static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 389{
 390    ARMCPU *cpu = env_archcpu(env);
 391
 392    raw_write(env, ri, value);
 393    tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
 394}
 395
 396static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 397{
 398    ARMCPU *cpu = env_archcpu(env);
 399
 400    if (raw_read(env, ri) != value) {
 401        /*
 402         * Unlike real hardware the qemu TLB uses virtual addresses,
 403         * not modified virtual addresses, so this causes a TLB flush.
 404         */
 405        tlb_flush(CPU(cpu));
 406        raw_write(env, ri, value);
 407    }
 408}
 409
 410static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 411                             uint64_t value)
 412{
 413    ARMCPU *cpu = env_archcpu(env);
 414
 415    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
 416        && !extended_addresses_enabled(env)) {
 417        /*
 418         * For VMSA (when not using the LPAE long descriptor page table
 419         * format) this register includes the ASID, so do a TLB flush.
 420         * For PMSA it is purely a process ID and no action is needed.
 421         */
 422        tlb_flush(CPU(cpu));
 423    }
 424    raw_write(env, ri, value);
 425}
 426
 427static int alle1_tlbmask(CPUARMState *env)
 428{
 429    /*
 430     * Note that the 'ALL' scope must invalidate both stage 1 and
 431     * stage 2 translations, whereas most other scopes only invalidate
 432     * stage 1 translations.
 433     */
 434    return (ARMMMUIdxBit_E10_1 |
 435            ARMMMUIdxBit_E10_1_PAN |
 436            ARMMMUIdxBit_E10_0 |
 437            ARMMMUIdxBit_Stage2 |
 438            ARMMMUIdxBit_Stage2_S);
 439}
 440
 441
 442/* IS variants of TLB operations must affect all cores */
 443static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 444                             uint64_t value)
 445{
 446    CPUState *cs = env_cpu(env);
 447
 448    tlb_flush_all_cpus_synced(cs);
 449}
 450
 451static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 452                             uint64_t value)
 453{
 454    CPUState *cs = env_cpu(env);
 455
 456    tlb_flush_all_cpus_synced(cs);
 457}
 458
 459static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 460                             uint64_t value)
 461{
 462    CPUState *cs = env_cpu(env);
 463
 464    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 465}
 466
 467static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 468                             uint64_t value)
 469{
 470    CPUState *cs = env_cpu(env);
 471
 472    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 473}
 474
 475/*
 476 * Non-IS variants of TLB operations are upgraded to
 477 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
 478 * force broadcast of these operations.
 479 */
 480static bool tlb_force_broadcast(CPUARMState *env)
 481{
 482    return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
 483}
 484
 485static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 486                          uint64_t value)
 487{
 488    /* Invalidate all (TLBIALL) */
 489    CPUState *cs = env_cpu(env);
 490
 491    if (tlb_force_broadcast(env)) {
 492        tlb_flush_all_cpus_synced(cs);
 493    } else {
 494        tlb_flush(cs);
 495    }
 496}
 497
 498static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 499                          uint64_t value)
 500{
 501    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 502    CPUState *cs = env_cpu(env);
 503
 504    value &= TARGET_PAGE_MASK;
 505    if (tlb_force_broadcast(env)) {
 506        tlb_flush_page_all_cpus_synced(cs, value);
 507    } else {
 508        tlb_flush_page(cs, value);
 509    }
 510}
 511
 512static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 513                           uint64_t value)
 514{
 515    /* Invalidate by ASID (TLBIASID) */
 516    CPUState *cs = env_cpu(env);
 517
 518    if (tlb_force_broadcast(env)) {
 519        tlb_flush_all_cpus_synced(cs);
 520    } else {
 521        tlb_flush(cs);
 522    }
 523}
 524
 525static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 526                           uint64_t value)
 527{
 528    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 529    CPUState *cs = env_cpu(env);
 530
 531    value &= TARGET_PAGE_MASK;
 532    if (tlb_force_broadcast(env)) {
 533        tlb_flush_page_all_cpus_synced(cs, value);
 534    } else {
 535        tlb_flush_page(cs, value);
 536    }
 537}
 538
 539static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
 540                               uint64_t value)
 541{
 542    CPUState *cs = env_cpu(env);
 543
 544    tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
 545}
 546
 547static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 548                                  uint64_t value)
 549{
 550    CPUState *cs = env_cpu(env);
 551
 552    tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
 553}
 554
 555
 556static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 557                              uint64_t value)
 558{
 559    CPUState *cs = env_cpu(env);
 560
 561    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
 562}
 563
 564static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 565                                 uint64_t value)
 566{
 567    CPUState *cs = env_cpu(env);
 568
 569    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
 570}
 571
 572static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 573                              uint64_t value)
 574{
 575    CPUState *cs = env_cpu(env);
 576    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 577
 578    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
 579}
 580
 581static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 582                                 uint64_t value)
 583{
 584    CPUState *cs = env_cpu(env);
 585    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 586
 587    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 588                                             ARMMMUIdxBit_E2);
 589}
 590
 591static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 592                                uint64_t value)
 593{
 594    CPUState *cs = env_cpu(env);
 595    uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
 596
 597    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
 598}
 599
 600static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 601                                uint64_t value)
 602{
 603    CPUState *cs = env_cpu(env);
 604    uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
 605
 606    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
 607}
 608
 609static const ARMCPRegInfo cp_reginfo[] = {
 610    /*
 611     * Define the secure and non-secure FCSE identifier CP registers
 612     * separately because there is no secure bank in V8 (no _EL3).  This allows
 613     * the secure register to be properly reset and migrated. There is also no
 614     * v8 EL1 version of the register so the non-secure instance stands alone.
 615     */
 616    { .name = "FCSEIDR",
 617      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 618      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 619      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 620      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 621    { .name = "FCSEIDR_S",
 622      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 623      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 624      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 625      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 626    /*
 627     * Define the secure and non-secure context identifier CP registers
 628     * separately because there is no secure bank in V8 (no _EL3).  This allows
 629     * the secure register to be properly reset and migrated.  In the
 630     * non-secure case, the 32-bit register will have reset and migration
 631     * disabled during registration as it is handled by the 64-bit instance.
 632     */
 633    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 634      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 635      .access = PL1_RW, .accessfn = access_tvm_trvm,
 636      .fgt = FGT_CONTEXTIDR_EL1,
 637      .secure = ARM_CP_SECSTATE_NS,
 638      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 639      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 640    { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
 641      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 642      .access = PL1_RW, .accessfn = access_tvm_trvm,
 643      .secure = ARM_CP_SECSTATE_S,
 644      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 645      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 646};
 647
 648static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 649    /*
 650     * NB: Some of these registers exist in v8 but with more precise
 651     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 652     */
 653    /* MMU Domain access control / MPU write buffer control */
 654    { .name = "DACR",
 655      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 656      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
 657      .writefn = dacr_write, .raw_writefn = raw_write,
 658      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 659                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 660    /*
 661     * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 662     * For v6 and v5, these mappings are overly broad.
 663     */
 664    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 665      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 666    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 667      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 668    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 669      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 670    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 671      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 672    /* Cache maintenance ops; some of this space may be overridden later. */
 673    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 674      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 675      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 676};
 677
 678static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 679    /*
 680     * Not all pre-v6 cores implemented this WFI, so this is slightly
 681     * over-broad.
 682     */
 683    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 684      .access = PL1_W, .type = ARM_CP_WFI },
 685};
 686
 687static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 688    /*
 689     * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 690     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 691     */
 692    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 693      .access = PL1_W, .type = ARM_CP_WFI },
 694    /*
 695     * L1 cache lockdown. Not architectural in v6 and earlier but in practice
 696     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 697     * OMAPCP will override this space.
 698     */
 699    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 700      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 701      .resetvalue = 0 },
 702    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 703      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 704      .resetvalue = 0 },
 705    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 706    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 707      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 708      .resetvalue = 0 },
 709    /*
 710     * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 711     * implementing it as RAZ means the "debug architecture version" bits
 712     * will read as a reserved value, which should cause Linux to not try
 713     * to use the debug hardware.
 714     */
 715    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 716      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 717    /*
 718     * MMU TLB control. Note that the wildcarding means we cover not just
 719     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 720     */
 721    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 722      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 723      .type = ARM_CP_NO_RAW },
 724    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 725      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 726      .type = ARM_CP_NO_RAW },
 727    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 728      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 729      .type = ARM_CP_NO_RAW },
 730    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 731      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 732      .type = ARM_CP_NO_RAW },
 733    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 734      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 735    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 736      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 737};
 738
 739static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 740                        uint64_t value)
 741{
 742    uint32_t mask = 0;
 743
 744    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 745    if (!arm_feature(env, ARM_FEATURE_V8)) {
 746        /*
 747         * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 748         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 749         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 750         */
 751        if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
 752            /* VFP coprocessor: cp10 & cp11 [23:20] */
 753            mask |= R_CPACR_ASEDIS_MASK |
 754                    R_CPACR_D32DIS_MASK |
 755                    R_CPACR_CP11_MASK |
 756                    R_CPACR_CP10_MASK;
 757
 758            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 759                /* ASEDIS [31] bit is RAO/WI */
 760                value |= R_CPACR_ASEDIS_MASK;
 761            }
 762
 763            /*
 764             * VFPv3 and upwards with NEON implement 32 double precision
 765             * registers (D0-D31).
 766             */
 767            if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
 768                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
 769                value |= R_CPACR_D32DIS_MASK;
 770            }
 771        }
 772        value &= mask;
 773    }
 774
 775    /*
 776     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
 777     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
 778     */
 779    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
 780        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
 781        mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
 782        value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
 783    }
 784
 785    env->cp15.cpacr_el1 = value;
 786}
 787
 788static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 789{
 790    /*
 791     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
 792     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
 793     */
 794    uint64_t value = env->cp15.cpacr_el1;
 795
 796    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
 797        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
 798        value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
 799    }
 800    return value;
 801}
 802
 803
 804static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
 805{
 806    /*
 807     * Call cpacr_write() so that we reset with the correct RAO bits set
 808     * for our CPU features.
 809     */
 810    cpacr_write(env, ri, 0);
 811}
 812
 813static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 814                                   bool isread)
 815{
 816    if (arm_feature(env, ARM_FEATURE_V8)) {
 817        /* Check if CPACR accesses are to be trapped to EL2 */
 818        if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
 819            FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
 820            return CP_ACCESS_TRAP_EL2;
 821        /* Check if CPACR accesses are to be trapped to EL3 */
 822        } else if (arm_current_el(env) < 3 &&
 823                   FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
 824            return CP_ACCESS_TRAP_EL3;
 825        }
 826    }
 827
 828    return CP_ACCESS_OK;
 829}
 830
 831static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 832                                  bool isread)
 833{
 834    /* Check if CPTR accesses are set to trap to EL3 */
 835    if (arm_current_el(env) == 2 &&
 836        FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
 837        return CP_ACCESS_TRAP_EL3;
 838    }
 839
 840    return CP_ACCESS_OK;
 841}
 842
 843static const ARMCPRegInfo v6_cp_reginfo[] = {
 844    /* prefetch by MVA in v6, NOP in v7 */
 845    { .name = "MVA_prefetch",
 846      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
 847      .access = PL1_W, .type = ARM_CP_NOP },
 848    /*
 849     * We need to break the TB after ISB to execute self-modifying code
 850     * correctly and also to take any pending interrupts immediately.
 851     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
 852     */
 853    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
 854      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
 855    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
 856      .access = PL0_W, .type = ARM_CP_NOP },
 857    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
 858      .access = PL0_W, .type = ARM_CP_NOP },
 859    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
 860      .access = PL1_RW, .accessfn = access_tvm_trvm,
 861      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
 862                             offsetof(CPUARMState, cp15.ifar_ns) },
 863      .resetvalue = 0, },
 864    /*
 865     * Watchpoint Fault Address Register : should actually only be present
 866     * for 1136, 1176, 11MPCore.
 867     */
 868    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
 869      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
 870    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
 871      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
 872      .fgt = FGT_CPACR_EL1,
 873      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
 874      .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
 875};
 876
 877typedef struct pm_event {
 878    uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
 879    /* If the event is supported on this CPU (used to generate PMCEID[01]) */
 880    bool (*supported)(CPUARMState *);
 881    /*
 882     * Retrieve the current count of the underlying event. The programmed
 883     * counters hold a difference from the return value from this function
 884     */
 885    uint64_t (*get_count)(CPUARMState *);
 886    /*
 887     * Return how many nanoseconds it will take (at a minimum) for count events
 888     * to occur. A negative value indicates the counter will never overflow, or
 889     * that the counter has otherwise arranged for the overflow bit to be set
 890     * and the PMU interrupt to be raised on overflow.
 891     */
 892    int64_t (*ns_per_count)(uint64_t);
 893} pm_event;
 894
 895static bool event_always_supported(CPUARMState *env)
 896{
 897    return true;
 898}
 899
 900static uint64_t swinc_get_count(CPUARMState *env)
 901{
 902    /*
 903     * SW_INCR events are written directly to the pmevcntr's by writes to
 904     * PMSWINC, so there is no underlying count maintained by the PMU itself
 905     */
 906    return 0;
 907}
 908
 909static int64_t swinc_ns_per(uint64_t ignored)
 910{
 911    return -1;
 912}
 913
 914/*
 915 * Return the underlying cycle count for the PMU cycle counters. If we're in
 916 * usermode, simply return 0.
 917 */
 918static uint64_t cycles_get_count(CPUARMState *env)
 919{
 920#ifndef CONFIG_USER_ONLY
 921    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 922                   ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
 923#else
 924    return cpu_get_host_ticks();
 925#endif
 926}
 927
 928#ifndef CONFIG_USER_ONLY
 929static int64_t cycles_ns_per(uint64_t cycles)
 930{
 931    return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
 932}
 933
 934static bool instructions_supported(CPUARMState *env)
 935{
 936    return icount_enabled() == 1; /* Precise instruction counting */
 937}
 938
 939static uint64_t instructions_get_count(CPUARMState *env)
 940{
 941    return (uint64_t)icount_get_raw();
 942}
 943
 944static int64_t instructions_ns_per(uint64_t icount)
 945{
 946    return icount_to_ns((int64_t)icount);
 947}
 948#endif
 949
 950static bool pmuv3p1_events_supported(CPUARMState *env)
 951{
 952    /* For events which are supported in any v8.1 PMU */
 953    return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
 954}
 955
 956static bool pmuv3p4_events_supported(CPUARMState *env)
 957{
 958    /* For events which are supported in any v8.1 PMU */
 959    return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
 960}
 961
 962static uint64_t zero_event_get_count(CPUARMState *env)
 963{
 964    /* For events which on QEMU never fire, so their count is always zero */
 965    return 0;
 966}
 967
 968static int64_t zero_event_ns_per(uint64_t cycles)
 969{
 970    /* An event which never fires can never overflow */
 971    return -1;
 972}
 973
 974static const pm_event pm_events[] = {
 975    { .number = 0x000, /* SW_INCR */
 976      .supported = event_always_supported,
 977      .get_count = swinc_get_count,
 978      .ns_per_count = swinc_ns_per,
 979    },
 980#ifndef CONFIG_USER_ONLY
 981    { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
 982      .supported = instructions_supported,
 983      .get_count = instructions_get_count,
 984      .ns_per_count = instructions_ns_per,
 985    },
 986    { .number = 0x011, /* CPU_CYCLES, Cycle */
 987      .supported = event_always_supported,
 988      .get_count = cycles_get_count,
 989      .ns_per_count = cycles_ns_per,
 990    },
 991#endif
 992    { .number = 0x023, /* STALL_FRONTEND */
 993      .supported = pmuv3p1_events_supported,
 994      .get_count = zero_event_get_count,
 995      .ns_per_count = zero_event_ns_per,
 996    },
 997    { .number = 0x024, /* STALL_BACKEND */
 998      .supported = pmuv3p1_events_supported,
 999      .get_count = zero_event_get_count,
1000      .ns_per_count = zero_event_ns_per,
1001    },
1002    { .number = 0x03c, /* STALL */
1003      .supported = pmuv3p4_events_supported,
1004      .get_count = zero_event_get_count,
1005      .ns_per_count = zero_event_ns_per,
1006    },
1007};
1008
1009/*
1010 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1011 * events (i.e. the statistical profiling extension), this implementation
1012 * should first be updated to something sparse instead of the current
1013 * supported_event_map[] array.
1014 */
1015#define MAX_EVENT_ID 0x3c
1016#define UNSUPPORTED_EVENT UINT16_MAX
1017static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1018
1019/*
1020 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1021 * of ARM event numbers to indices in our pm_events array.
1022 *
1023 * Note: Events in the 0x40XX range are not currently supported.
1024 */
1025void pmu_init(ARMCPU *cpu)
1026{
1027    unsigned int i;
1028
1029    /*
1030     * Empty supported_event_map and cpu->pmceid[01] before adding supported
1031     * events to them
1032     */
1033    for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1034        supported_event_map[i] = UNSUPPORTED_EVENT;
1035    }
1036    cpu->pmceid0 = 0;
1037    cpu->pmceid1 = 0;
1038
1039    for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1040        const pm_event *cnt = &pm_events[i];
1041        assert(cnt->number <= MAX_EVENT_ID);
1042        /* We do not currently support events in the 0x40xx range */
1043        assert(cnt->number <= 0x3f);
1044
1045        if (cnt->supported(&cpu->env)) {
1046            supported_event_map[cnt->number] = i;
1047            uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1048            if (cnt->number & 0x20) {
1049                cpu->pmceid1 |= event_mask;
1050            } else {
1051                cpu->pmceid0 |= event_mask;
1052            }
1053        }
1054    }
1055}
1056
1057/*
1058 * Check at runtime whether a PMU event is supported for the current machine
1059 */
1060static bool event_supported(uint16_t number)
1061{
1062    if (number > MAX_EVENT_ID) {
1063        return false;
1064    }
1065    return supported_event_map[number] != UNSUPPORTED_EVENT;
1066}
1067
1068static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1069                                   bool isread)
1070{
1071    /*
1072     * Performance monitor registers user accessibility is controlled
1073     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1074     * trapping to EL2 or EL3 for other accesses.
1075     */
1076    int el = arm_current_el(env);
1077    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1078
1079    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1080        return CP_ACCESS_TRAP;
1081    }
1082    if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1083        return CP_ACCESS_TRAP_EL2;
1084    }
1085    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1086        return CP_ACCESS_TRAP_EL3;
1087    }
1088
1089    return CP_ACCESS_OK;
1090}
1091
1092static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1093                                           const ARMCPRegInfo *ri,
1094                                           bool isread)
1095{
1096    /* ER: event counter read trap control */
1097    if (arm_feature(env, ARM_FEATURE_V8)
1098        && arm_current_el(env) == 0
1099        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1100        && isread) {
1101        return CP_ACCESS_OK;
1102    }
1103
1104    return pmreg_access(env, ri, isread);
1105}
1106
1107static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1108                                         const ARMCPRegInfo *ri,
1109                                         bool isread)
1110{
1111    /* SW: software increment write trap control */
1112    if (arm_feature(env, ARM_FEATURE_V8)
1113        && arm_current_el(env) == 0
1114        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1115        && !isread) {
1116        return CP_ACCESS_OK;
1117    }
1118
1119    return pmreg_access(env, ri, isread);
1120}
1121
1122static CPAccessResult pmreg_access_selr(CPUARMState *env,
1123                                        const ARMCPRegInfo *ri,
1124                                        bool isread)
1125{
1126    /* ER: event counter read trap control */
1127    if (arm_feature(env, ARM_FEATURE_V8)
1128        && arm_current_el(env) == 0
1129        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1130        return CP_ACCESS_OK;
1131    }
1132
1133    return pmreg_access(env, ri, isread);
1134}
1135
1136static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1137                                         const ARMCPRegInfo *ri,
1138                                         bool isread)
1139{
1140    /* CR: cycle counter read trap control */
1141    if (arm_feature(env, ARM_FEATURE_V8)
1142        && arm_current_el(env) == 0
1143        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1144        && isread) {
1145        return CP_ACCESS_OK;
1146    }
1147
1148    return pmreg_access(env, ri, isread);
1149}
1150
1151/*
1152 * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
1153 * We use these to decide whether we need to wrap a write to MDCR_EL2
1154 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
1155 */
1156#define MDCR_EL2_PMU_ENABLE_BITS \
1157    (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
1158#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
1159
1160/*
1161 * Returns true if the counter (pass 31 for PMCCNTR) should count events using
1162 * the current EL, security state, and register configuration.
1163 */
1164static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1165{
1166    uint64_t filter;
1167    bool e, p, u, nsk, nsu, nsh, m;
1168    bool enabled, prohibited = false, filtered;
1169    bool secure = arm_is_secure(env);
1170    int el = arm_current_el(env);
1171    uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1172    uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
1173
1174    if (!arm_feature(env, ARM_FEATURE_PMU)) {
1175        return false;
1176    }
1177
1178    if (!arm_feature(env, ARM_FEATURE_EL2) ||
1179            (counter < hpmn || counter == 31)) {
1180        e = env->cp15.c9_pmcr & PMCRE;
1181    } else {
1182        e = mdcr_el2 & MDCR_HPME;
1183    }
1184    enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1185
1186    /* Is event counting prohibited? */
1187    if (el == 2 && (counter < hpmn || counter == 31)) {
1188        prohibited = mdcr_el2 & MDCR_HPMD;
1189    }
1190    if (secure) {
1191        prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
1192    }
1193
1194    if (counter == 31) {
1195        /*
1196         * The cycle counter defaults to running. PMCR.DP says "disable
1197         * the cycle counter when event counting is prohibited".
1198         * Some MDCR bits disable the cycle counter specifically.
1199         */
1200        prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
1201        if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1202            if (secure) {
1203                prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
1204            }
1205            if (el == 2) {
1206                prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
1207            }
1208        }
1209    }
1210
1211    if (counter == 31) {
1212        filter = env->cp15.pmccfiltr_el0;
1213    } else {
1214        filter = env->cp15.c14_pmevtyper[counter];
1215    }
1216
1217    p   = filter & PMXEVTYPER_P;
1218    u   = filter & PMXEVTYPER_U;
1219    nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1220    nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1221    nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1222    m   = arm_el_is_aa64(env, 1) &&
1223              arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1224
1225    if (el == 0) {
1226        filtered = secure ? u : u != nsu;
1227    } else if (el == 1) {
1228        filtered = secure ? p : p != nsk;
1229    } else if (el == 2) {
1230        filtered = !nsh;
1231    } else { /* EL3 */
1232        filtered = m != p;
1233    }
1234
1235    if (counter != 31) {
1236        /*
1237         * If not checking PMCCNTR, ensure the counter is setup to an event we
1238         * support
1239         */
1240        uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1241        if (!event_supported(event)) {
1242            return false;
1243        }
1244    }
1245
1246    return enabled && !prohibited && !filtered;
1247}
1248
1249static void pmu_update_irq(CPUARMState *env)
1250{
1251    ARMCPU *cpu = env_archcpu(env);
1252    qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1253            (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1254}
1255
1256static bool pmccntr_clockdiv_enabled(CPUARMState *env)
1257{
1258    /*
1259     * Return true if the clock divider is enabled and the cycle counter
1260     * is supposed to tick only once every 64 clock cycles. This is
1261     * controlled by PMCR.D, but if PMCR.LC is set to enable the long
1262     * (64-bit) cycle counter PMCR.D has no effect.
1263     */
1264    return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
1265}
1266
1267static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
1268{
1269    /* Return true if the specified event counter is configured to be 64 bit */
1270
1271    /* This isn't intended to be used with the cycle counter */
1272    assert(counter < 31);
1273
1274    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1275        return false;
1276    }
1277
1278    if (arm_feature(env, ARM_FEATURE_EL2)) {
1279        /*
1280         * MDCR_EL2.HLP still applies even when EL2 is disabled in the
1281         * current security state, so we don't use arm_mdcr_el2_eff() here.
1282         */
1283        bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
1284        int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1285
1286        if (hpmn != 0 && counter >= hpmn) {
1287            return hlp;
1288        }
1289    }
1290    return env->cp15.c9_pmcr & PMCRLP;
1291}
1292
1293/*
1294 * Ensure c15_ccnt is the guest-visible count so that operations such as
1295 * enabling/disabling the counter or filtering, modifying the count itself,
1296 * etc. can be done logically. This is essentially a no-op if the counter is
1297 * not enabled at the time of the call.
1298 */
1299static void pmccntr_op_start(CPUARMState *env)
1300{
1301    uint64_t cycles = cycles_get_count(env);
1302
1303    if (pmu_counter_enabled(env, 31)) {
1304        uint64_t eff_cycles = cycles;
1305        if (pmccntr_clockdiv_enabled(env)) {
1306            eff_cycles /= 64;
1307        }
1308
1309        uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1310
1311        uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1312                                 1ull << 63 : 1ull << 31;
1313        if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1314            env->cp15.c9_pmovsr |= (1ULL << 31);
1315            pmu_update_irq(env);
1316        }
1317
1318        env->cp15.c15_ccnt = new_pmccntr;
1319    }
1320    env->cp15.c15_ccnt_delta = cycles;
1321}
1322
1323/*
1324 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1325 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1326 * pmccntr_op_start.
1327 */
1328static void pmccntr_op_finish(CPUARMState *env)
1329{
1330    if (pmu_counter_enabled(env, 31)) {
1331#ifndef CONFIG_USER_ONLY
1332        /* Calculate when the counter will next overflow */
1333        uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1334        if (!(env->cp15.c9_pmcr & PMCRLC)) {
1335            remaining_cycles = (uint32_t)remaining_cycles;
1336        }
1337        int64_t overflow_in = cycles_ns_per(remaining_cycles);
1338
1339        if (overflow_in > 0) {
1340            int64_t overflow_at;
1341
1342            if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1343                                 overflow_in, &overflow_at)) {
1344                ARMCPU *cpu = env_archcpu(env);
1345                timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1346            }
1347        }
1348#endif
1349
1350        uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1351        if (pmccntr_clockdiv_enabled(env)) {
1352            prev_cycles /= 64;
1353        }
1354        env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1355    }
1356}
1357
1358static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1359{
1360
1361    uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1362    uint64_t count = 0;
1363    if (event_supported(event)) {
1364        uint16_t event_idx = supported_event_map[event];
1365        count = pm_events[event_idx].get_count(env);
1366    }
1367
1368    if (pmu_counter_enabled(env, counter)) {
1369        uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1370        uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
1371            1ULL << 63 : 1ULL << 31;
1372
1373        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
1374            env->cp15.c9_pmovsr |= (1 << counter);
1375            pmu_update_irq(env);
1376        }
1377        env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1378    }
1379    env->cp15.c14_pmevcntr_delta[counter] = count;
1380}
1381
1382static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1383{
1384    if (pmu_counter_enabled(env, counter)) {
1385#ifndef CONFIG_USER_ONLY
1386        uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1387        uint16_t event_idx = supported_event_map[event];
1388        uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
1389        int64_t overflow_in;
1390
1391        if (!pmevcntr_is_64_bit(env, counter)) {
1392            delta = (uint32_t)delta;
1393        }
1394        overflow_in = pm_events[event_idx].ns_per_count(delta);
1395
1396        if (overflow_in > 0) {
1397            int64_t overflow_at;
1398
1399            if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1400                                 overflow_in, &overflow_at)) {
1401                ARMCPU *cpu = env_archcpu(env);
1402                timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1403            }
1404        }
1405#endif
1406
1407        env->cp15.c14_pmevcntr_delta[counter] -=
1408            env->cp15.c14_pmevcntr[counter];
1409    }
1410}
1411
1412void pmu_op_start(CPUARMState *env)
1413{
1414    unsigned int i;
1415    pmccntr_op_start(env);
1416    for (i = 0; i < pmu_num_counters(env); i++) {
1417        pmevcntr_op_start(env, i);
1418    }
1419}
1420
1421void pmu_op_finish(CPUARMState *env)
1422{
1423    unsigned int i;
1424    pmccntr_op_finish(env);
1425    for (i = 0; i < pmu_num_counters(env); i++) {
1426        pmevcntr_op_finish(env, i);
1427    }
1428}
1429
1430void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1431{
1432    pmu_op_start(&cpu->env);
1433}
1434
1435void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1436{
1437    pmu_op_finish(&cpu->env);
1438}
1439
1440void arm_pmu_timer_cb(void *opaque)
1441{
1442    ARMCPU *cpu = opaque;
1443
1444    /*
1445     * Update all the counter values based on the current underlying counts,
1446     * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1447     * has the effect of setting the cpu->pmu_timer to the next earliest time a
1448     * counter may expire.
1449     */
1450    pmu_op_start(&cpu->env);
1451    pmu_op_finish(&cpu->env);
1452}
1453
1454static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1455                       uint64_t value)
1456{
1457    pmu_op_start(env);
1458
1459    if (value & PMCRC) {
1460        /* The counter has been reset */
1461        env->cp15.c15_ccnt = 0;
1462    }
1463
1464    if (value & PMCRP) {
1465        unsigned int i;
1466        for (i = 0; i < pmu_num_counters(env); i++) {
1467            env->cp15.c14_pmevcntr[i] = 0;
1468        }
1469    }
1470
1471    env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1472    env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
1473
1474    pmu_op_finish(env);
1475}
1476
1477static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1478                          uint64_t value)
1479{
1480    unsigned int i;
1481    uint64_t overflow_mask, new_pmswinc;
1482
1483    for (i = 0; i < pmu_num_counters(env); i++) {
1484        /* Increment a counter's count iff: */
1485        if ((value & (1 << i)) && /* counter's bit is set */
1486                /* counter is enabled and not filtered */
1487                pmu_counter_enabled(env, i) &&
1488                /* counter is SW_INCR */
1489                (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1490            pmevcntr_op_start(env, i);
1491
1492            /*
1493             * Detect if this write causes an overflow since we can't predict
1494             * PMSWINC overflows like we can for other events
1495             */
1496            new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1497
1498            overflow_mask = pmevcntr_is_64_bit(env, i) ?
1499                1ULL << 63 : 1ULL << 31;
1500
1501            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
1502                env->cp15.c9_pmovsr |= (1 << i);
1503                pmu_update_irq(env);
1504            }
1505
1506            env->cp15.c14_pmevcntr[i] = new_pmswinc;
1507
1508            pmevcntr_op_finish(env, i);
1509        }
1510    }
1511}
1512
1513static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1514{
1515    uint64_t ret;
1516    pmccntr_op_start(env);
1517    ret = env->cp15.c15_ccnt;
1518    pmccntr_op_finish(env);
1519    return ret;
1520}
1521
1522static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1523                         uint64_t value)
1524{
1525    /*
1526     * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1527     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1528     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1529     * accessed.
1530     */
1531    env->cp15.c9_pmselr = value & 0x1f;
1532}
1533
1534static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1535                        uint64_t value)
1536{
1537    pmccntr_op_start(env);
1538    env->cp15.c15_ccnt = value;
1539    pmccntr_op_finish(env);
1540}
1541
1542static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1543                            uint64_t value)
1544{
1545    uint64_t cur_val = pmccntr_read(env, NULL);
1546
1547    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1548}
1549
1550static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1551                            uint64_t value)
1552{
1553    pmccntr_op_start(env);
1554    env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1555    pmccntr_op_finish(env);
1556}
1557
1558static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1559                            uint64_t value)
1560{
1561    pmccntr_op_start(env);
1562    /* M is not accessible from AArch32 */
1563    env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1564        (value & PMCCFILTR);
1565    pmccntr_op_finish(env);
1566}
1567
1568static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1569{
1570    /* M is not visible in AArch32 */
1571    return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1572}
1573
1574static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1575                            uint64_t value)
1576{
1577    pmu_op_start(env);
1578    value &= pmu_counter_mask(env);
1579    env->cp15.c9_pmcnten |= value;
1580    pmu_op_finish(env);
1581}
1582
1583static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1584                             uint64_t value)
1585{
1586    pmu_op_start(env);
1587    value &= pmu_counter_mask(env);
1588    env->cp15.c9_pmcnten &= ~value;
1589    pmu_op_finish(env);
1590}
1591
1592static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1593                         uint64_t value)
1594{
1595    value &= pmu_counter_mask(env);
1596    env->cp15.c9_pmovsr &= ~value;
1597    pmu_update_irq(env);
1598}
1599
1600static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1601                         uint64_t value)
1602{
1603    value &= pmu_counter_mask(env);
1604    env->cp15.c9_pmovsr |= value;
1605    pmu_update_irq(env);
1606}
1607
1608static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1609                             uint64_t value, const uint8_t counter)
1610{
1611    if (counter == 31) {
1612        pmccfiltr_write(env, ri, value);
1613    } else if (counter < pmu_num_counters(env)) {
1614        pmevcntr_op_start(env, counter);
1615
1616        /*
1617         * If this counter's event type is changing, store the current
1618         * underlying count for the new type in c14_pmevcntr_delta[counter] so
1619         * pmevcntr_op_finish has the correct baseline when it converts back to
1620         * a delta.
1621         */
1622        uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1623            PMXEVTYPER_EVTCOUNT;
1624        uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1625        if (old_event != new_event) {
1626            uint64_t count = 0;
1627            if (event_supported(new_event)) {
1628                uint16_t event_idx = supported_event_map[new_event];
1629                count = pm_events[event_idx].get_count(env);
1630            }
1631            env->cp15.c14_pmevcntr_delta[counter] = count;
1632        }
1633
1634        env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1635        pmevcntr_op_finish(env, counter);
1636    }
1637    /*
1638     * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1639     * PMSELR value is equal to or greater than the number of implemented
1640     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1641     */
1642}
1643
1644static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1645                               const uint8_t counter)
1646{
1647    if (counter == 31) {
1648        return env->cp15.pmccfiltr_el0;
1649    } else if (counter < pmu_num_counters(env)) {
1650        return env->cp15.c14_pmevtyper[counter];
1651    } else {
1652      /*
1653       * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1654       * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1655       */
1656        return 0;
1657    }
1658}
1659
1660static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1661                              uint64_t value)
1662{
1663    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1664    pmevtyper_write(env, ri, value, counter);
1665}
1666
1667static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1668                               uint64_t value)
1669{
1670    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1671    env->cp15.c14_pmevtyper[counter] = value;
1672
1673    /*
1674     * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1675     * pmu_op_finish calls when loading saved state for a migration. Because
1676     * we're potentially updating the type of event here, the value written to
1677     * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
1678     * different counter type. Therefore, we need to set this value to the
1679     * current count for the counter type we're writing so that pmu_op_finish
1680     * has the correct count for its calculation.
1681     */
1682    uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1683    if (event_supported(event)) {
1684        uint16_t event_idx = supported_event_map[event];
1685        env->cp15.c14_pmevcntr_delta[counter] =
1686            pm_events[event_idx].get_count(env);
1687    }
1688}
1689
1690static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1691{
1692    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1693    return pmevtyper_read(env, ri, counter);
1694}
1695
1696static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1697                             uint64_t value)
1698{
1699    pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1700}
1701
1702static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1703{
1704    return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1705}
1706
1707static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1708                             uint64_t value, uint8_t counter)
1709{
1710    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1711        /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1712        value &= MAKE_64BIT_MASK(0, 32);
1713    }
1714    if (counter < pmu_num_counters(env)) {
1715        pmevcntr_op_start(env, counter);
1716        env->cp15.c14_pmevcntr[counter] = value;
1717        pmevcntr_op_finish(env, counter);
1718    }
1719    /*
1720     * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1721     * are CONSTRAINED UNPREDICTABLE.
1722     */
1723}
1724
1725static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1726                              uint8_t counter)
1727{
1728    if (counter < pmu_num_counters(env)) {
1729        uint64_t ret;
1730        pmevcntr_op_start(env, counter);
1731        ret = env->cp15.c14_pmevcntr[counter];
1732        pmevcntr_op_finish(env, counter);
1733        if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1734            /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1735            ret &= MAKE_64BIT_MASK(0, 32);
1736        }
1737        return ret;
1738    } else {
1739      /*
1740       * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1741       * are CONSTRAINED UNPREDICTABLE.
1742       */
1743        return 0;
1744    }
1745}
1746
1747static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1748                             uint64_t value)
1749{
1750    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1751    pmevcntr_write(env, ri, value, counter);
1752}
1753
1754static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1755{
1756    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1757    return pmevcntr_read(env, ri, counter);
1758}
1759
1760static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1761                             uint64_t value)
1762{
1763    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1764    assert(counter < pmu_num_counters(env));
1765    env->cp15.c14_pmevcntr[counter] = value;
1766    pmevcntr_write(env, ri, value, counter);
1767}
1768
1769static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1770{
1771    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1772    assert(counter < pmu_num_counters(env));
1773    return env->cp15.c14_pmevcntr[counter];
1774}
1775
1776static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1777                             uint64_t value)
1778{
1779    pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1780}
1781
1782static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1783{
1784    return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1785}
1786
1787static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1788                            uint64_t value)
1789{
1790    if (arm_feature(env, ARM_FEATURE_V8)) {
1791        env->cp15.c9_pmuserenr = value & 0xf;
1792    } else {
1793        env->cp15.c9_pmuserenr = value & 1;
1794    }
1795}
1796
1797static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1798                             uint64_t value)
1799{
1800    /* We have no event counters so only the C bit can be changed */
1801    value &= pmu_counter_mask(env);
1802    env->cp15.c9_pminten |= value;
1803    pmu_update_irq(env);
1804}
1805
1806static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1807                             uint64_t value)
1808{
1809    value &= pmu_counter_mask(env);
1810    env->cp15.c9_pminten &= ~value;
1811    pmu_update_irq(env);
1812}
1813
1814static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1815                       uint64_t value)
1816{
1817    /*
1818     * Note that even though the AArch64 view of this register has bits
1819     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1820     * architectural requirements for bits which are RES0 only in some
1821     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1822     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1823     */
1824    raw_write(env, ri, value & ~0x1FULL);
1825}
1826
1827static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1828{
1829    /* Begin with base v8.0 state.  */
1830    uint64_t valid_mask = 0x3fff;
1831    ARMCPU *cpu = env_archcpu(env);
1832    uint64_t changed;
1833
1834    /*
1835     * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1836     * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1837     * Instead, choose the format based on the mode of EL3.
1838     */
1839    if (arm_el_is_aa64(env, 3)) {
1840        value |= SCR_FW | SCR_AW;      /* RES1 */
1841        valid_mask &= ~SCR_NET;        /* RES0 */
1842
1843        if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1844            !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1845            value |= SCR_RW;           /* RAO/WI */
1846        }
1847        if (cpu_isar_feature(aa64_ras, cpu)) {
1848            valid_mask |= SCR_TERR;
1849        }
1850        if (cpu_isar_feature(aa64_lor, cpu)) {
1851            valid_mask |= SCR_TLOR;
1852        }
1853        if (cpu_isar_feature(aa64_pauth, cpu)) {
1854            valid_mask |= SCR_API | SCR_APK;
1855        }
1856        if (cpu_isar_feature(aa64_sel2, cpu)) {
1857            valid_mask |= SCR_EEL2;
1858        } else if (cpu_isar_feature(aa64_rme, cpu)) {
1859            /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
1860            value |= SCR_NS;
1861        }
1862        if (cpu_isar_feature(aa64_mte, cpu)) {
1863            valid_mask |= SCR_ATA;
1864        }
1865        if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1866            valid_mask |= SCR_ENSCXT;
1867        }
1868        if (cpu_isar_feature(aa64_doublefault, cpu)) {
1869            valid_mask |= SCR_EASE | SCR_NMEA;
1870        }
1871        if (cpu_isar_feature(aa64_sme, cpu)) {
1872            valid_mask |= SCR_ENTP2;
1873        }
1874        if (cpu_isar_feature(aa64_hcx, cpu)) {
1875            valid_mask |= SCR_HXEN;
1876        }
1877        if (cpu_isar_feature(aa64_fgt, cpu)) {
1878            valid_mask |= SCR_FGTEN;
1879        }
1880        if (cpu_isar_feature(aa64_rme, cpu)) {
1881            valid_mask |= SCR_NSE | SCR_GPF;
1882        }
1883    } else {
1884        valid_mask &= ~(SCR_RW | SCR_ST);
1885        if (cpu_isar_feature(aa32_ras, cpu)) {
1886            valid_mask |= SCR_TERR;
1887        }
1888    }
1889
1890    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1891        valid_mask &= ~SCR_HCE;
1892
1893        /*
1894         * On ARMv7, SMD (or SCD as it is called in v7) is only
1895         * supported if EL2 exists. The bit is UNK/SBZP when
1896         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1897         * when EL2 is unavailable.
1898         * On ARMv8, this bit is always available.
1899         */
1900        if (arm_feature(env, ARM_FEATURE_V7) &&
1901            !arm_feature(env, ARM_FEATURE_V8)) {
1902            valid_mask &= ~SCR_SMD;
1903        }
1904    }
1905
1906    /* Clear all-context RES0 bits.  */
1907    value &= valid_mask;
1908    changed = env->cp15.scr_el3 ^ value;
1909    env->cp15.scr_el3 = value;
1910
1911    /*
1912     * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
1913     * we must invalidate all TLBs below EL3.
1914     */
1915    if (changed & (SCR_NS | SCR_NSE)) {
1916        tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
1917                                           ARMMMUIdxBit_E20_0 |
1918                                           ARMMMUIdxBit_E10_1 |
1919                                           ARMMMUIdxBit_E20_2 |
1920                                           ARMMMUIdxBit_E10_1_PAN |
1921                                           ARMMMUIdxBit_E20_2_PAN |
1922                                           ARMMMUIdxBit_E2));
1923    }
1924}
1925
1926static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1927{
1928    /*
1929     * scr_write will set the RES1 bits on an AArch64-only CPU.
1930     * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1931     */
1932    scr_write(env, ri, 0);
1933}
1934
1935static CPAccessResult access_tid4(CPUARMState *env,
1936                                  const ARMCPRegInfo *ri,
1937                                  bool isread)
1938{
1939    if (arm_current_el(env) == 1 &&
1940        (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
1941        return CP_ACCESS_TRAP_EL2;
1942    }
1943
1944    return CP_ACCESS_OK;
1945}
1946
1947static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1948{
1949    ARMCPU *cpu = env_archcpu(env);
1950
1951    /*
1952     * Acquire the CSSELR index from the bank corresponding to the CCSIDR
1953     * bank
1954     */
1955    uint32_t index = A32_BANKED_REG_GET(env, csselr,
1956                                        ri->secure & ARM_CP_SECSTATE_S);
1957
1958    return cpu->ccsidr[index];
1959}
1960
1961static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1962                         uint64_t value)
1963{
1964    raw_write(env, ri, value & 0xf);
1965}
1966
1967static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1968{
1969    CPUState *cs = env_cpu(env);
1970    bool el1 = arm_current_el(env) == 1;
1971    uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
1972    uint64_t ret = 0;
1973
1974    if (hcr_el2 & HCR_IMO) {
1975        if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1976            ret |= CPSR_I;
1977        }
1978    } else {
1979        if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1980            ret |= CPSR_I;
1981        }
1982    }
1983
1984    if (hcr_el2 & HCR_FMO) {
1985        if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1986            ret |= CPSR_F;
1987        }
1988    } else {
1989        if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1990            ret |= CPSR_F;
1991        }
1992    }
1993
1994    if (hcr_el2 & HCR_AMO) {
1995        if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
1996            ret |= CPSR_A;
1997        }
1998    }
1999
2000    return ret;
2001}
2002
2003static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2004                                       bool isread)
2005{
2006    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2007        return CP_ACCESS_TRAP_EL2;
2008    }
2009
2010    return CP_ACCESS_OK;
2011}
2012
2013static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2014                                       bool isread)
2015{
2016    if (arm_feature(env, ARM_FEATURE_V8)) {
2017        return access_aa64_tid1(env, ri, isread);
2018    }
2019
2020    return CP_ACCESS_OK;
2021}
2022
2023static const ARMCPRegInfo v7_cp_reginfo[] = {
2024    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2025    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2026      .access = PL1_W, .type = ARM_CP_NOP },
2027    /*
2028     * Performance monitors are implementation defined in v7,
2029     * but with an ARM recommended set of registers, which we
2030     * follow.
2031     *
2032     * Performance registers fall into three categories:
2033     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2034     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2035     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2036     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2037     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2038     */
2039    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2040      .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
2041      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2042      .writefn = pmcntenset_write,
2043      .accessfn = pmreg_access,
2044      .fgt = FGT_PMCNTEN,
2045      .raw_writefn = raw_write },
2046    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
2047      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2048      .access = PL0_RW, .accessfn = pmreg_access,
2049      .fgt = FGT_PMCNTEN,
2050      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2051      .writefn = pmcntenset_write, .raw_writefn = raw_write },
2052    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2053      .access = PL0_RW,
2054      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2055      .accessfn = pmreg_access,
2056      .fgt = FGT_PMCNTEN,
2057      .writefn = pmcntenclr_write,
2058      .type = ARM_CP_ALIAS | ARM_CP_IO },
2059    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2060      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2061      .access = PL0_RW, .accessfn = pmreg_access,
2062      .fgt = FGT_PMCNTEN,
2063      .type = ARM_CP_ALIAS | ARM_CP_IO,
2064      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2065      .writefn = pmcntenclr_write },
2066    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2067      .access = PL0_RW, .type = ARM_CP_IO,
2068      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2069      .accessfn = pmreg_access,
2070      .fgt = FGT_PMOVS,
2071      .writefn = pmovsr_write,
2072      .raw_writefn = raw_write },
2073    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2074      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2075      .access = PL0_RW, .accessfn = pmreg_access,
2076      .fgt = FGT_PMOVS,
2077      .type = ARM_CP_ALIAS | ARM_CP_IO,
2078      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2079      .writefn = pmovsr_write,
2080      .raw_writefn = raw_write },
2081    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2082      .access = PL0_W, .accessfn = pmreg_access_swinc,
2083      .fgt = FGT_PMSWINC_EL0,
2084      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2085      .writefn = pmswinc_write },
2086    { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2087      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2088      .access = PL0_W, .accessfn = pmreg_access_swinc,
2089      .fgt = FGT_PMSWINC_EL0,
2090      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2091      .writefn = pmswinc_write },
2092    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2093      .access = PL0_RW, .type = ARM_CP_ALIAS,
2094      .fgt = FGT_PMSELR_EL0,
2095      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2096      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2097      .raw_writefn = raw_write},
2098    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2099      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2100      .access = PL0_RW, .accessfn = pmreg_access_selr,
2101      .fgt = FGT_PMSELR_EL0,
2102      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2103      .writefn = pmselr_write, .raw_writefn = raw_write, },
2104    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2105      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2106      .fgt = FGT_PMCCNTR_EL0,
2107      .readfn = pmccntr_read, .writefn = pmccntr_write32,
2108      .accessfn = pmreg_access_ccntr },
2109    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2110      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2111      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2112      .fgt = FGT_PMCCNTR_EL0,
2113      .type = ARM_CP_IO,
2114      .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2115      .readfn = pmccntr_read, .writefn = pmccntr_write,
2116      .raw_readfn = raw_read, .raw_writefn = raw_write, },
2117    { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2118      .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2119      .access = PL0_RW, .accessfn = pmreg_access,
2120      .fgt = FGT_PMCCFILTR_EL0,
2121      .type = ARM_CP_ALIAS | ARM_CP_IO,
2122      .resetvalue = 0, },
2123    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2124      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2125      .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2126      .access = PL0_RW, .accessfn = pmreg_access,
2127      .fgt = FGT_PMCCFILTR_EL0,
2128      .type = ARM_CP_IO,
2129      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2130      .resetvalue = 0, },
2131    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2132      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2133      .accessfn = pmreg_access,
2134      .fgt = FGT_PMEVTYPERN_EL0,
2135      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2136    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2137      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2138      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2139      .accessfn = pmreg_access,
2140      .fgt = FGT_PMEVTYPERN_EL0,
2141      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2142    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2143      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2144      .accessfn = pmreg_access_xevcntr,
2145      .fgt = FGT_PMEVCNTRN_EL0,
2146      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2147    { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2148      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2149      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2150      .accessfn = pmreg_access_xevcntr,
2151      .fgt = FGT_PMEVCNTRN_EL0,
2152      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2153    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2154      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2155      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2156      .resetvalue = 0,
2157      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2158    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2159      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2160      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2161      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2162      .resetvalue = 0,
2163      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2164    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2165      .access = PL1_RW, .accessfn = access_tpm,
2166      .fgt = FGT_PMINTEN,
2167      .type = ARM_CP_ALIAS | ARM_CP_IO,
2168      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2169      .resetvalue = 0,
2170      .writefn = pmintenset_write, .raw_writefn = raw_write },
2171    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2172      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2173      .access = PL1_RW, .accessfn = access_tpm,
2174      .fgt = FGT_PMINTEN,
2175      .type = ARM_CP_IO,
2176      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2177      .writefn = pmintenset_write, .raw_writefn = raw_write,
2178      .resetvalue = 0x0 },
2179    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2180      .access = PL1_RW, .accessfn = access_tpm,
2181      .fgt = FGT_PMINTEN,
2182      .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2183      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2184      .writefn = pmintenclr_write, },
2185    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2186      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2187      .access = PL1_RW, .accessfn = access_tpm,
2188      .fgt = FGT_PMINTEN,
2189      .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2190      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2191      .writefn = pmintenclr_write },
2192    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2193      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2194      .access = PL1_R,
2195      .accessfn = access_tid4,
2196      .fgt = FGT_CCSIDR_EL1,
2197      .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2198    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2199      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2200      .access = PL1_RW,
2201      .accessfn = access_tid4,
2202      .fgt = FGT_CSSELR_EL1,
2203      .writefn = csselr_write, .resetvalue = 0,
2204      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2205                             offsetof(CPUARMState, cp15.csselr_ns) } },
2206    /*
2207     * Auxiliary ID register: this actually has an IMPDEF value but for now
2208     * just RAZ for all cores:
2209     */
2210    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2211      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2212      .access = PL1_R, .type = ARM_CP_CONST,
2213      .accessfn = access_aa64_tid1,
2214      .fgt = FGT_AIDR_EL1,
2215      .resetvalue = 0 },
2216    /*
2217     * Auxiliary fault status registers: these also are IMPDEF, and we
2218     * choose to RAZ/WI for all cores.
2219     */
2220    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2221      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2222      .access = PL1_RW, .accessfn = access_tvm_trvm,
2223      .fgt = FGT_AFSR0_EL1,
2224      .type = ARM_CP_CONST, .resetvalue = 0 },
2225    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2226      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2227      .access = PL1_RW, .accessfn = access_tvm_trvm,
2228      .fgt = FGT_AFSR1_EL1,
2229      .type = ARM_CP_CONST, .resetvalue = 0 },
2230    /*
2231     * MAIR can just read-as-written because we don't implement caches
2232     * and so don't need to care about memory attributes.
2233     */
2234    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2235      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2236      .access = PL1_RW, .accessfn = access_tvm_trvm,
2237      .fgt = FGT_MAIR_EL1,
2238      .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2239      .resetvalue = 0 },
2240    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2241      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2242      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2243      .resetvalue = 0 },
2244    /*
2245     * For non-long-descriptor page tables these are PRRR and NMRR;
2246     * regardless they still act as reads-as-written for QEMU.
2247     */
2248     /*
2249      * MAIR0/1 are defined separately from their 64-bit counterpart which
2250      * allows them to assign the correct fieldoffset based on the endianness
2251      * handled in the field definitions.
2252      */
2253    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2254      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2255      .access = PL1_RW, .accessfn = access_tvm_trvm,
2256      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2257                             offsetof(CPUARMState, cp15.mair0_ns) },
2258      .resetfn = arm_cp_reset_ignore },
2259    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2260      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2261      .access = PL1_RW, .accessfn = access_tvm_trvm,
2262      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2263                             offsetof(CPUARMState, cp15.mair1_ns) },
2264      .resetfn = arm_cp_reset_ignore },
2265    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2266      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2267      .fgt = FGT_ISR_EL1,
2268      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2269    /* 32 bit ITLB invalidates */
2270    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2271      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2272      .writefn = tlbiall_write },
2273    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2274      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2275      .writefn = tlbimva_write },
2276    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2277      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2278      .writefn = tlbiasid_write },
2279    /* 32 bit DTLB invalidates */
2280    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2281      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2282      .writefn = tlbiall_write },
2283    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2284      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2285      .writefn = tlbimva_write },
2286    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2287      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2288      .writefn = tlbiasid_write },
2289    /* 32 bit TLB invalidates */
2290    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2291      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2292      .writefn = tlbiall_write },
2293    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2294      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2295      .writefn = tlbimva_write },
2296    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2297      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2298      .writefn = tlbiasid_write },
2299    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2300      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2301      .writefn = tlbimvaa_write },
2302};
2303
2304static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2305    /* 32 bit TLB invalidates, Inner Shareable */
2306    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2307      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2308      .writefn = tlbiall_is_write },
2309    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2310      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2311      .writefn = tlbimva_is_write },
2312    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2313      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2314      .writefn = tlbiasid_is_write },
2315    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2316      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2317      .writefn = tlbimvaa_is_write },
2318};
2319
2320static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2321    /* PMOVSSET is not implemented in v7 before v7ve */
2322    { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2323      .access = PL0_RW, .accessfn = pmreg_access,
2324      .fgt = FGT_PMOVS,
2325      .type = ARM_CP_ALIAS | ARM_CP_IO,
2326      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2327      .writefn = pmovsset_write,
2328      .raw_writefn = raw_write },
2329    { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2330      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2331      .access = PL0_RW, .accessfn = pmreg_access,
2332      .fgt = FGT_PMOVS,
2333      .type = ARM_CP_ALIAS | ARM_CP_IO,
2334      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2335      .writefn = pmovsset_write,
2336      .raw_writefn = raw_write },
2337};
2338
2339static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2340                        uint64_t value)
2341{
2342    value &= 1;
2343    env->teecr = value;
2344}
2345
2346static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2347                                   bool isread)
2348{
2349    /*
2350     * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2351     * at all, so we don't need to check whether we're v8A.
2352     */
2353    if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2354        (env->cp15.hstr_el2 & HSTR_TTEE)) {
2355        return CP_ACCESS_TRAP_EL2;
2356    }
2357    return CP_ACCESS_OK;
2358}
2359
2360static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2361                                    bool isread)
2362{
2363    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2364        return CP_ACCESS_TRAP;
2365    }
2366    return teecr_access(env, ri, isread);
2367}
2368
2369static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2370    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2371      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2372      .resetvalue = 0,
2373      .writefn = teecr_write, .accessfn = teecr_access },
2374    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2375      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2376      .accessfn = teehbr_access, .resetvalue = 0 },
2377};
2378
2379static const ARMCPRegInfo v6k_cp_reginfo[] = {
2380    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2381      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2382      .access = PL0_RW,
2383      .fgt = FGT_TPIDR_EL0,
2384      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2385    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2386      .access = PL0_RW,
2387      .fgt = FGT_TPIDR_EL0,
2388      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2389                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2390      .resetfn = arm_cp_reset_ignore },
2391    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2392      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2393      .access = PL0_R | PL1_W,
2394      .fgt = FGT_TPIDRRO_EL0,
2395      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2396      .resetvalue = 0},
2397    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2398      .access = PL0_R | PL1_W,
2399      .fgt = FGT_TPIDRRO_EL0,
2400      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2401                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2402      .resetfn = arm_cp_reset_ignore },
2403    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2404      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2405      .access = PL1_RW,
2406      .fgt = FGT_TPIDR_EL1,
2407      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2408    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2409      .access = PL1_RW,
2410      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2411                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2412      .resetvalue = 0 },
2413};
2414
2415#ifndef CONFIG_USER_ONLY
2416
2417static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2418                                       bool isread)
2419{
2420    /*
2421     * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2422     * Writable only at the highest implemented exception level.
2423     */
2424    int el = arm_current_el(env);
2425    uint64_t hcr;
2426    uint32_t cntkctl;
2427
2428    switch (el) {
2429    case 0:
2430        hcr = arm_hcr_el2_eff(env);
2431        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2432            cntkctl = env->cp15.cnthctl_el2;
2433        } else {
2434            cntkctl = env->cp15.c14_cntkctl;
2435        }
2436        if (!extract32(cntkctl, 0, 2)) {
2437            return CP_ACCESS_TRAP;
2438        }
2439        break;
2440    case 1:
2441        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2442            arm_is_secure_below_el3(env)) {
2443            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2444            return CP_ACCESS_TRAP_UNCATEGORIZED;
2445        }
2446        break;
2447    case 2:
2448    case 3:
2449        break;
2450    }
2451
2452    if (!isread && el < arm_highest_el(env)) {
2453        return CP_ACCESS_TRAP_UNCATEGORIZED;
2454    }
2455
2456    return CP_ACCESS_OK;
2457}
2458
2459static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2460                                        bool isread)
2461{
2462    unsigned int cur_el = arm_current_el(env);
2463    bool has_el2 = arm_is_el2_enabled(env);
2464    uint64_t hcr = arm_hcr_el2_eff(env);
2465
2466    switch (cur_el) {
2467    case 0:
2468        /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2469        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2470            return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2471                    ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2472        }
2473
2474        /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2475        if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2476            return CP_ACCESS_TRAP;
2477        }
2478
2479        /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2480        if (hcr & HCR_E2H) {
2481            if (timeridx == GTIMER_PHYS &&
2482                !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2483                return CP_ACCESS_TRAP_EL2;
2484            }
2485        } else {
2486            /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2487            if (has_el2 && timeridx == GTIMER_PHYS &&
2488                !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2489                return CP_ACCESS_TRAP_EL2;
2490            }
2491        }
2492        break;
2493
2494    case 1:
2495        /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2496        if (has_el2 && timeridx == GTIMER_PHYS &&
2497            (hcr & HCR_E2H
2498             ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2499             : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2500            return CP_ACCESS_TRAP_EL2;
2501        }
2502        break;
2503    }
2504    return CP_ACCESS_OK;
2505}
2506
2507static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2508                                      bool isread)
2509{
2510    unsigned int cur_el = arm_current_el(env);
2511    bool has_el2 = arm_is_el2_enabled(env);
2512    uint64_t hcr = arm_hcr_el2_eff(env);
2513
2514    switch (cur_el) {
2515    case 0:
2516        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2517            /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2518            return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2519                    ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2520        }
2521
2522        /*
2523         * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2524         * EL0 if EL0[PV]TEN is zero.
2525         */
2526        if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2527            return CP_ACCESS_TRAP;
2528        }
2529        /* fall through */
2530
2531    case 1:
2532        if (has_el2 && timeridx == GTIMER_PHYS) {
2533            if (hcr & HCR_E2H) {
2534                /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2535                if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2536                    return CP_ACCESS_TRAP_EL2;
2537                }
2538            } else {
2539                /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2540                if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2541                    return CP_ACCESS_TRAP_EL2;
2542                }
2543            }
2544        }
2545        break;
2546    }
2547    return CP_ACCESS_OK;
2548}
2549
2550static CPAccessResult gt_pct_access(CPUARMState *env,
2551                                    const ARMCPRegInfo *ri,
2552                                    bool isread)
2553{
2554    return gt_counter_access(env, GTIMER_PHYS, isread);
2555}
2556
2557static CPAccessResult gt_vct_access(CPUARMState *env,
2558                                    const ARMCPRegInfo *ri,
2559                                    bool isread)
2560{
2561    return gt_counter_access(env, GTIMER_VIRT, isread);
2562}
2563
2564static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2565                                       bool isread)
2566{
2567    return gt_timer_access(env, GTIMER_PHYS, isread);
2568}
2569
2570static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2571                                       bool isread)
2572{
2573    return gt_timer_access(env, GTIMER_VIRT, isread);
2574}
2575
2576static CPAccessResult gt_stimer_access(CPUARMState *env,
2577                                       const ARMCPRegInfo *ri,
2578                                       bool isread)
2579{
2580    /*
2581     * The AArch64 register view of the secure physical timer is
2582     * always accessible from EL3, and configurably accessible from
2583     * Secure EL1.
2584     */
2585    switch (arm_current_el(env)) {
2586    case 1:
2587        if (!arm_is_secure(env)) {
2588            return CP_ACCESS_TRAP;
2589        }
2590        if (!(env->cp15.scr_el3 & SCR_ST)) {
2591            return CP_ACCESS_TRAP_EL3;
2592        }
2593        return CP_ACCESS_OK;
2594    case 0:
2595    case 2:
2596        return CP_ACCESS_TRAP;
2597    case 3:
2598        return CP_ACCESS_OK;
2599    default:
2600        g_assert_not_reached();
2601    }
2602}
2603
2604static uint64_t gt_get_countervalue(CPUARMState *env)
2605{
2606    ARMCPU *cpu = env_archcpu(env);
2607
2608    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2609}
2610
2611static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2612{
2613    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2614
2615    if (gt->ctl & 1) {
2616        /*
2617         * Timer enabled: calculate and set current ISTATUS, irq, and
2618         * reset timer to when ISTATUS next has to change
2619         */
2620        uint64_t offset = timeridx == GTIMER_VIRT ?
2621                                      cpu->env.cp15.cntvoff_el2 : 0;
2622        uint64_t count = gt_get_countervalue(&cpu->env);
2623        /* Note that this must be unsigned 64 bit arithmetic: */
2624        int istatus = count - offset >= gt->cval;
2625        uint64_t nexttick;
2626        int irqstate;
2627
2628        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2629
2630        irqstate = (istatus && !(gt->ctl & 2));
2631        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2632
2633        if (istatus) {
2634            /* Next transition is when count rolls back over to zero */
2635            nexttick = UINT64_MAX;
2636        } else {
2637            /* Next transition is when we hit cval */
2638            nexttick = gt->cval + offset;
2639        }
2640        /*
2641         * Note that the desired next expiry time might be beyond the
2642         * signed-64-bit range of a QEMUTimer -- in this case we just
2643         * set the timer for as far in the future as possible. When the
2644         * timer expires we will reset the timer for any remaining period.
2645         */
2646        if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2647            timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2648        } else {
2649            timer_mod(cpu->gt_timer[timeridx], nexttick);
2650        }
2651        trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2652    } else {
2653        /* Timer disabled: ISTATUS and timer output always clear */
2654        gt->ctl &= ~4;
2655        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2656        timer_del(cpu->gt_timer[timeridx]);
2657        trace_arm_gt_recalc_disabled(timeridx);
2658    }
2659}
2660
2661static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2662                           int timeridx)
2663{
2664    ARMCPU *cpu = env_archcpu(env);
2665
2666    timer_del(cpu->gt_timer[timeridx]);
2667}
2668
2669static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2670{
2671    return gt_get_countervalue(env);
2672}
2673
2674static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2675{
2676    uint64_t hcr;
2677
2678    switch (arm_current_el(env)) {
2679    case 2:
2680        hcr = arm_hcr_el2_eff(env);
2681        if (hcr & HCR_E2H) {
2682            return 0;
2683        }
2684        break;
2685    case 0:
2686        hcr = arm_hcr_el2_eff(env);
2687        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2688            return 0;
2689        }
2690        break;
2691    }
2692
2693    return env->cp15.cntvoff_el2;
2694}
2695
2696static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2697{
2698    return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2699}
2700
2701static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2702                          int timeridx,
2703                          uint64_t value)
2704{
2705    trace_arm_gt_cval_write(timeridx, value);
2706    env->cp15.c14_timer[timeridx].cval = value;
2707    gt_recalc_timer(env_archcpu(env), timeridx);
2708}
2709
2710static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2711                             int timeridx)
2712{
2713    uint64_t offset = 0;
2714
2715    switch (timeridx) {
2716    case GTIMER_VIRT:
2717    case GTIMER_HYPVIRT:
2718        offset = gt_virt_cnt_offset(env);
2719        break;
2720    }
2721
2722    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2723                      (gt_get_countervalue(env) - offset));
2724}
2725
2726static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2727                          int timeridx,
2728                          uint64_t value)
2729{
2730    uint64_t offset = 0;
2731
2732    switch (timeridx) {
2733    case GTIMER_VIRT:
2734    case GTIMER_HYPVIRT:
2735        offset = gt_virt_cnt_offset(env);
2736        break;
2737    }
2738
2739    trace_arm_gt_tval_write(timeridx, value);
2740    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2741                                         sextract64(value, 0, 32);
2742    gt_recalc_timer(env_archcpu(env), timeridx);
2743}
2744
2745static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2746                         int timeridx,
2747                         uint64_t value)
2748{
2749    ARMCPU *cpu = env_archcpu(env);
2750    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2751
2752    trace_arm_gt_ctl_write(timeridx, value);
2753    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2754    if ((oldval ^ value) & 1) {
2755        /* Enable toggled */
2756        gt_recalc_timer(cpu, timeridx);
2757    } else if ((oldval ^ value) & 2) {
2758        /*
2759         * IMASK toggled: don't need to recalculate,
2760         * just set the interrupt line based on ISTATUS
2761         */
2762        int irqstate = (oldval & 4) && !(value & 2);
2763
2764        trace_arm_gt_imask_toggle(timeridx, irqstate);
2765        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2766    }
2767}
2768
2769static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2770{
2771    gt_timer_reset(env, ri, GTIMER_PHYS);
2772}
2773
2774static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2775                               uint64_t value)
2776{
2777    gt_cval_write(env, ri, GTIMER_PHYS, value);
2778}
2779
2780static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2781{
2782    return gt_tval_read(env, ri, GTIMER_PHYS);
2783}
2784
2785static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2786                               uint64_t value)
2787{
2788    gt_tval_write(env, ri, GTIMER_PHYS, value);
2789}
2790
2791static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2792                              uint64_t value)
2793{
2794    gt_ctl_write(env, ri, GTIMER_PHYS, value);
2795}
2796
2797static int gt_phys_redir_timeridx(CPUARMState *env)
2798{
2799    switch (arm_mmu_idx(env)) {
2800    case ARMMMUIdx_E20_0:
2801    case ARMMMUIdx_E20_2:
2802    case ARMMMUIdx_E20_2_PAN:
2803        return GTIMER_HYP;
2804    default:
2805        return GTIMER_PHYS;
2806    }
2807}
2808
2809static int gt_virt_redir_timeridx(CPUARMState *env)
2810{
2811    switch (arm_mmu_idx(env)) {
2812    case ARMMMUIdx_E20_0:
2813    case ARMMMUIdx_E20_2:
2814    case ARMMMUIdx_E20_2_PAN:
2815        return GTIMER_HYPVIRT;
2816    default:
2817        return GTIMER_VIRT;
2818    }
2819}
2820
2821static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2822                                        const ARMCPRegInfo *ri)
2823{
2824    int timeridx = gt_phys_redir_timeridx(env);
2825    return env->cp15.c14_timer[timeridx].cval;
2826}
2827
2828static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2829                                     uint64_t value)
2830{
2831    int timeridx = gt_phys_redir_timeridx(env);
2832    gt_cval_write(env, ri, timeridx, value);
2833}
2834
2835static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2836                                        const ARMCPRegInfo *ri)
2837{
2838    int timeridx = gt_phys_redir_timeridx(env);
2839    return gt_tval_read(env, ri, timeridx);
2840}
2841
2842static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2843                                     uint64_t value)
2844{
2845    int timeridx = gt_phys_redir_timeridx(env);
2846    gt_tval_write(env, ri, timeridx, value);
2847}
2848
2849static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2850                                       const ARMCPRegInfo *ri)
2851{
2852    int timeridx = gt_phys_redir_timeridx(env);
2853    return env->cp15.c14_timer[timeridx].ctl;
2854}
2855
2856static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857                                    uint64_t value)
2858{
2859    int timeridx = gt_phys_redir_timeridx(env);
2860    gt_ctl_write(env, ri, timeridx, value);
2861}
2862
2863static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2864{
2865    gt_timer_reset(env, ri, GTIMER_VIRT);
2866}
2867
2868static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2869                               uint64_t value)
2870{
2871    gt_cval_write(env, ri, GTIMER_VIRT, value);
2872}
2873
2874static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2875{
2876    return gt_tval_read(env, ri, GTIMER_VIRT);
2877}
2878
2879static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2880                               uint64_t value)
2881{
2882    gt_tval_write(env, ri, GTIMER_VIRT, value);
2883}
2884
2885static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2886                              uint64_t value)
2887{
2888    gt_ctl_write(env, ri, GTIMER_VIRT, value);
2889}
2890
2891static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2892                              uint64_t value)
2893{
2894    ARMCPU *cpu = env_archcpu(env);
2895
2896    trace_arm_gt_cntvoff_write(value);
2897    raw_write(env, ri, value);
2898    gt_recalc_timer(cpu, GTIMER_VIRT);
2899}
2900
2901static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2902                                        const ARMCPRegInfo *ri)
2903{
2904    int timeridx = gt_virt_redir_timeridx(env);
2905    return env->cp15.c14_timer[timeridx].cval;
2906}
2907
2908static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2909                                     uint64_t value)
2910{
2911    int timeridx = gt_virt_redir_timeridx(env);
2912    gt_cval_write(env, ri, timeridx, value);
2913}
2914
2915static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2916                                        const ARMCPRegInfo *ri)
2917{
2918    int timeridx = gt_virt_redir_timeridx(env);
2919    return gt_tval_read(env, ri, timeridx);
2920}
2921
2922static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2923                                     uint64_t value)
2924{
2925    int timeridx = gt_virt_redir_timeridx(env);
2926    gt_tval_write(env, ri, timeridx, value);
2927}
2928
2929static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2930                                       const ARMCPRegInfo *ri)
2931{
2932    int timeridx = gt_virt_redir_timeridx(env);
2933    return env->cp15.c14_timer[timeridx].ctl;
2934}
2935
2936static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2937                                    uint64_t value)
2938{
2939    int timeridx = gt_virt_redir_timeridx(env);
2940    gt_ctl_write(env, ri, timeridx, value);
2941}
2942
2943static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2944{
2945    gt_timer_reset(env, ri, GTIMER_HYP);
2946}
2947
2948static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2949                              uint64_t value)
2950{
2951    gt_cval_write(env, ri, GTIMER_HYP, value);
2952}
2953
2954static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2955{
2956    return gt_tval_read(env, ri, GTIMER_HYP);
2957}
2958
2959static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2960                              uint64_t value)
2961{
2962    gt_tval_write(env, ri, GTIMER_HYP, value);
2963}
2964
2965static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2966                              uint64_t value)
2967{
2968    gt_ctl_write(env, ri, GTIMER_HYP, value);
2969}
2970
2971static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2972{
2973    gt_timer_reset(env, ri, GTIMER_SEC);
2974}
2975
2976static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2977                              uint64_t value)
2978{
2979    gt_cval_write(env, ri, GTIMER_SEC, value);
2980}
2981
2982static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2983{
2984    return gt_tval_read(env, ri, GTIMER_SEC);
2985}
2986
2987static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2988                              uint64_t value)
2989{
2990    gt_tval_write(env, ri, GTIMER_SEC, value);
2991}
2992
2993static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2994                              uint64_t value)
2995{
2996    gt_ctl_write(env, ri, GTIMER_SEC, value);
2997}
2998
2999static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3000{
3001    gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3002}
3003
3004static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3005                             uint64_t value)
3006{
3007    gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3008}
3009
3010static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3011{
3012    return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3013}
3014
3015static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3016                             uint64_t value)
3017{
3018    gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3019}
3020
3021static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3022                            uint64_t value)
3023{
3024    gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3025}
3026
3027void arm_gt_ptimer_cb(void *opaque)
3028{
3029    ARMCPU *cpu = opaque;
3030
3031    gt_recalc_timer(cpu, GTIMER_PHYS);
3032}
3033
3034void arm_gt_vtimer_cb(void *opaque)
3035{
3036    ARMCPU *cpu = opaque;
3037
3038    gt_recalc_timer(cpu, GTIMER_VIRT);
3039}
3040
3041void arm_gt_htimer_cb(void *opaque)
3042{
3043    ARMCPU *cpu = opaque;
3044
3045    gt_recalc_timer(cpu, GTIMER_HYP);
3046}
3047
3048void arm_gt_stimer_cb(void *opaque)
3049{
3050    ARMCPU *cpu = opaque;
3051
3052    gt_recalc_timer(cpu, GTIMER_SEC);
3053}
3054
3055void arm_gt_hvtimer_cb(void *opaque)
3056{
3057    ARMCPU *cpu = opaque;
3058
3059    gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3060}
3061
3062static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3063{
3064    ARMCPU *cpu = env_archcpu(env);
3065
3066    cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3067}
3068
3069static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3070    /*
3071     * Note that CNTFRQ is purely reads-as-written for the benefit
3072     * of software; writing it doesn't actually change the timer frequency.
3073     * Our reset value matches the fixed frequency we implement the timer at.
3074     */
3075    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3076      .type = ARM_CP_ALIAS,
3077      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3078      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3079    },
3080    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3081      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3082      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3083      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3084      .resetfn = arm_gt_cntfrq_reset,
3085    },
3086    /* overall control: mostly access permissions */
3087    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3088      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3089      .access = PL1_RW,
3090      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3091      .resetvalue = 0,
3092    },
3093    /* per-timer control */
3094    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3095      .secure = ARM_CP_SECSTATE_NS,
3096      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3097      .accessfn = gt_ptimer_access,
3098      .fieldoffset = offsetoflow32(CPUARMState,
3099                                   cp15.c14_timer[GTIMER_PHYS].ctl),
3100      .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3101      .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3102    },
3103    { .name = "CNTP_CTL_S",
3104      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3105      .secure = ARM_CP_SECSTATE_S,
3106      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3107      .accessfn = gt_ptimer_access,
3108      .fieldoffset = offsetoflow32(CPUARMState,
3109                                   cp15.c14_timer[GTIMER_SEC].ctl),
3110      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3111    },
3112    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3113      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3114      .type = ARM_CP_IO, .access = PL0_RW,
3115      .accessfn = gt_ptimer_access,
3116      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3117      .resetvalue = 0,
3118      .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3119      .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3120    },
3121    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3122      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3123      .accessfn = gt_vtimer_access,
3124      .fieldoffset = offsetoflow32(CPUARMState,
3125                                   cp15.c14_timer[GTIMER_VIRT].ctl),
3126      .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3127      .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3128    },
3129    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3130      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3131      .type = ARM_CP_IO, .access = PL0_RW,
3132      .accessfn = gt_vtimer_access,
3133      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3134      .resetvalue = 0,
3135      .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3136      .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3137    },
3138    /* TimerValue views: a 32 bit downcounting view of the underlying state */
3139    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3140      .secure = ARM_CP_SECSTATE_NS,
3141      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3142      .accessfn = gt_ptimer_access,
3143      .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3144    },
3145    { .name = "CNTP_TVAL_S",
3146      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3147      .secure = ARM_CP_SECSTATE_S,
3148      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3149      .accessfn = gt_ptimer_access,
3150      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3151    },
3152    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3153      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3154      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3155      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3156      .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3157    },
3158    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3159      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3160      .accessfn = gt_vtimer_access,
3161      .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3162    },
3163    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3164      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3165      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3166      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3167      .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3168    },
3169    /* The counter itself */
3170    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3171      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3172      .accessfn = gt_pct_access,
3173      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3174    },
3175    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3176      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3177      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3178      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3179    },
3180    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3181      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3182      .accessfn = gt_vct_access,
3183      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3184    },
3185    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3186      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3187      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3188      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3189    },
3190    /* Comparison value, indicating when the timer goes off */
3191    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3192      .secure = ARM_CP_SECSTATE_NS,
3193      .access = PL0_RW,
3194      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3195      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3196      .accessfn = gt_ptimer_access,
3197      .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3198      .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3199    },
3200    { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3201      .secure = ARM_CP_SECSTATE_S,
3202      .access = PL0_RW,
3203      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3204      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3205      .accessfn = gt_ptimer_access,
3206      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3207    },
3208    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3209      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3210      .access = PL0_RW,
3211      .type = ARM_CP_IO,
3212      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3213      .resetvalue = 0, .accessfn = gt_ptimer_access,
3214      .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3215      .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3216    },
3217    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3218      .access = PL0_RW,
3219      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3220      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3221      .accessfn = gt_vtimer_access,
3222      .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3223      .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3224    },
3225    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3226      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3227      .access = PL0_RW,
3228      .type = ARM_CP_IO,
3229      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3230      .resetvalue = 0, .accessfn = gt_vtimer_access,
3231      .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3232      .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3233    },
3234    /*
3235     * Secure timer -- this is actually restricted to only EL3
3236     * and configurably Secure-EL1 via the accessfn.
3237     */
3238    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3239      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3240      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3241      .accessfn = gt_stimer_access,
3242      .readfn = gt_sec_tval_read,
3243      .writefn = gt_sec_tval_write,
3244      .resetfn = gt_sec_timer_reset,
3245    },
3246    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3247      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3248      .type = ARM_CP_IO, .access = PL1_RW,
3249      .accessfn = gt_stimer_access,
3250      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3251      .resetvalue = 0,
3252      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3253    },
3254    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3255      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3256      .type = ARM_CP_IO, .access = PL1_RW,
3257      .accessfn = gt_stimer_access,
3258      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3259      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3260    },
3261};
3262
3263static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3264                                 bool isread)
3265{
3266    if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3267        return CP_ACCESS_TRAP;
3268    }
3269    return CP_ACCESS_OK;
3270}
3271
3272#else
3273
3274/*
3275 * In user-mode most of the generic timer registers are inaccessible
3276 * however modern kernels (4.12+) allow access to cntvct_el0
3277 */
3278
3279static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3280{
3281    ARMCPU *cpu = env_archcpu(env);
3282
3283    /*
3284     * Currently we have no support for QEMUTimer in linux-user so we
3285     * can't call gt_get_countervalue(env), instead we directly
3286     * call the lower level functions.
3287     */
3288    return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3289}
3290
3291static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3292    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3293      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3294      .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3295      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3296      .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3297    },
3298    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3299      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3300      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3301      .readfn = gt_virt_cnt_read,
3302    },
3303};
3304
3305#endif
3306
3307static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3308{
3309    if (arm_feature(env, ARM_FEATURE_LPAE)) {
3310        raw_write(env, ri, value);
3311    } else if (arm_feature(env, ARM_FEATURE_V7)) {
3312        raw_write(env, ri, value & 0xfffff6ff);
3313    } else {
3314        raw_write(env, ri, value & 0xfffff1ff);
3315    }
3316}
3317
3318#ifndef CONFIG_USER_ONLY
3319/* get_phys_addr() isn't present for user-mode-only targets */
3320
3321static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3322                                 bool isread)
3323{
3324    if (ri->opc2 & 4) {
3325        /*
3326         * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3327         * Secure EL1 (which can only happen if EL3 is AArch64).
3328         * They are simply UNDEF if executed from NS EL1.
3329         * They function normally from EL2 or EL3.
3330         */
3331        if (arm_current_el(env) == 1) {
3332            if (arm_is_secure_below_el3(env)) {
3333                if (env->cp15.scr_el3 & SCR_EEL2) {
3334                    return CP_ACCESS_TRAP_EL2;
3335                }
3336                return CP_ACCESS_TRAP_EL3;
3337            }
3338            return CP_ACCESS_TRAP_UNCATEGORIZED;
3339        }
3340    }
3341    return CP_ACCESS_OK;
3342}
3343
3344#ifdef CONFIG_TCG
3345static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3346                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
3347                             bool is_secure)
3348{
3349    bool ret;
3350    uint64_t par64;
3351    bool format64 = false;
3352    ARMMMUFaultInfo fi = {};
3353    GetPhysAddrResult res = {};
3354
3355    ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx,
3356                                    is_secure, &res, &fi);
3357
3358    /*
3359     * ATS operations only do S1 or S1+S2 translations, so we never
3360     * have to deal with the ARMCacheAttrs format for S2 only.
3361     */
3362    assert(!res.cacheattrs.is_s2_format);
3363
3364    if (ret) {
3365        /*
3366         * Some kinds of translation fault must cause exceptions rather
3367         * than being reported in the PAR.
3368         */
3369        int current_el = arm_current_el(env);
3370        int target_el;
3371        uint32_t syn, fsr, fsc;
3372        bool take_exc = false;
3373
3374        if (fi.s1ptw && current_el == 1
3375            && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3376            /*
3377             * Synchronous stage 2 fault on an access made as part of the
3378             * translation table walk for AT S1E0* or AT S1E1* insn
3379             * executed from NS EL1. If this is a synchronous external abort
3380             * and SCR_EL3.EA == 1, then we take a synchronous external abort
3381             * to EL3. Otherwise the fault is taken as an exception to EL2,
3382             * and HPFAR_EL2 holds the faulting IPA.
3383             */
3384            if (fi.type == ARMFault_SyncExternalOnWalk &&
3385                (env->cp15.scr_el3 & SCR_EA)) {
3386                target_el = 3;
3387            } else {
3388                env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3389                if (arm_is_secure_below_el3(env) && fi.s1ns) {
3390                    env->cp15.hpfar_el2 |= HPFAR_NS;
3391                }
3392                target_el = 2;
3393            }
3394            take_exc = true;
3395        } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3396            /*
3397             * Synchronous external aborts during a translation table walk
3398             * are taken as Data Abort exceptions.
3399             */
3400            if (fi.stage2) {
3401                if (current_el == 3) {
3402                    target_el = 3;
3403                } else {
3404                    target_el = 2;
3405                }
3406            } else {
3407                target_el = exception_target_el(env);
3408            }
3409            take_exc = true;
3410        }
3411
3412        if (take_exc) {
3413            /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3414            if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3415                arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3416                fsr = arm_fi_to_lfsc(&fi);
3417                fsc = extract32(fsr, 0, 6);
3418            } else {
3419                fsr = arm_fi_to_sfsc(&fi);
3420                fsc = 0x3f;
3421            }
3422            /*
3423             * Report exception with ESR indicating a fault due to a
3424             * translation table walk for a cache maintenance instruction.
3425             */
3426            syn = syn_data_abort_no_iss(current_el == target_el, 0,
3427                                        fi.ea, 1, fi.s1ptw, 1, fsc);
3428            env->exception.vaddress = value;
3429            env->exception.fsr = fsr;
3430            raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3431        }
3432    }
3433
3434    if (is_a64(env)) {
3435        format64 = true;
3436    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3437        /*
3438         * ATS1Cxx:
3439         * * TTBCR.EAE determines whether the result is returned using the
3440         *   32-bit or the 64-bit PAR format
3441         * * Instructions executed in Hyp mode always use the 64bit format
3442         *
3443         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3444         * * The Non-secure TTBCR.EAE bit is set to 1
3445         * * The implementation includes EL2, and the value of HCR.VM is 1
3446         *
3447         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3448         *
3449         * ATS1Hx always uses the 64bit format.
3450         */
3451        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3452
3453        if (arm_feature(env, ARM_FEATURE_EL2)) {
3454            if (mmu_idx == ARMMMUIdx_E10_0 ||
3455                mmu_idx == ARMMMUIdx_E10_1 ||
3456                mmu_idx == ARMMMUIdx_E10_1_PAN) {
3457                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3458            } else {
3459                format64 |= arm_current_el(env) == 2;
3460            }
3461        }
3462    }
3463
3464    if (format64) {
3465        /* Create a 64-bit PAR */
3466        par64 = (1 << 11); /* LPAE bit always set */
3467        if (!ret) {
3468            par64 |= res.f.phys_addr & ~0xfffULL;
3469            if (!res.f.attrs.secure) {
3470                par64 |= (1 << 9); /* NS */
3471            }
3472            par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
3473            par64 |= res.cacheattrs.shareability << 7; /* SH */
3474        } else {
3475            uint32_t fsr = arm_fi_to_lfsc(&fi);
3476
3477            par64 |= 1; /* F */
3478            par64 |= (fsr & 0x3f) << 1; /* FS */
3479            if (fi.stage2) {
3480                par64 |= (1 << 9); /* S */
3481            }
3482            if (fi.s1ptw) {
3483                par64 |= (1 << 8); /* PTW */
3484            }
3485        }
3486    } else {
3487        /*
3488         * fsr is a DFSR/IFSR value for the short descriptor
3489         * translation table format (with WnR always clear).
3490         * Convert it to a 32-bit PAR.
3491         */
3492        if (!ret) {
3493            /* We do not set any attribute bits in the PAR */
3494            if (res.f.lg_page_size == 24
3495                && arm_feature(env, ARM_FEATURE_V7)) {
3496                par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
3497            } else {
3498                par64 = res.f.phys_addr & 0xfffff000;
3499            }
3500            if (!res.f.attrs.secure) {
3501                par64 |= (1 << 9); /* NS */
3502            }
3503        } else {
3504            uint32_t fsr = arm_fi_to_sfsc(&fi);
3505
3506            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3507                    ((fsr & 0xf) << 1) | 1;
3508        }
3509    }
3510    return par64;
3511}
3512#endif /* CONFIG_TCG */
3513
3514static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3515{
3516#ifdef CONFIG_TCG
3517    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3518    uint64_t par64;
3519    ARMMMUIdx mmu_idx;
3520    int el = arm_current_el(env);
3521    bool secure = arm_is_secure_below_el3(env);
3522
3523    switch (ri->opc2 & 6) {
3524    case 0:
3525        /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3526        switch (el) {
3527        case 3:
3528            mmu_idx = ARMMMUIdx_E3;
3529            secure = true;
3530            break;
3531        case 2:
3532            g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3533            /* fall through */
3534        case 1:
3535            if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
3536                mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
3537            } else {
3538                mmu_idx = ARMMMUIdx_Stage1_E1;
3539            }
3540            break;
3541        default:
3542            g_assert_not_reached();
3543        }
3544        break;
3545    case 2:
3546        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3547        switch (el) {
3548        case 3:
3549            mmu_idx = ARMMMUIdx_E10_0;
3550            secure = true;
3551            break;
3552        case 2:
3553            g_assert(!secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3554            mmu_idx = ARMMMUIdx_Stage1_E0;
3555            break;
3556        case 1:
3557            mmu_idx = ARMMMUIdx_Stage1_E0;
3558            break;
3559        default:
3560            g_assert_not_reached();
3561        }
3562        break;
3563    case 4:
3564        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3565        mmu_idx = ARMMMUIdx_E10_1;
3566        secure = false;
3567        break;
3568    case 6:
3569        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3570        mmu_idx = ARMMMUIdx_E10_0;
3571        secure = false;
3572        break;
3573    default:
3574        g_assert_not_reached();
3575    }
3576
3577    par64 = do_ats_write(env, value, access_type, mmu_idx, secure);
3578
3579    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3580#else
3581    /* Handled by hardware accelerator. */
3582    g_assert_not_reached();
3583#endif /* CONFIG_TCG */
3584}
3585
3586static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3587                        uint64_t value)
3588{
3589#ifdef CONFIG_TCG
3590    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3591    uint64_t par64;
3592
3593    /* There is no SecureEL2 for AArch32. */
3594    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false);
3595
3596    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3597#else
3598    /* Handled by hardware accelerator. */
3599    g_assert_not_reached();
3600#endif /* CONFIG_TCG */
3601}
3602
3603static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3604                                     bool isread)
3605{
3606    if (arm_current_el(env) == 3 &&
3607        !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3608        return CP_ACCESS_TRAP;
3609    }
3610    return CP_ACCESS_OK;
3611}
3612
3613static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3614                        uint64_t value)
3615{
3616#ifdef CONFIG_TCG
3617    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3618    ARMMMUIdx mmu_idx;
3619    int secure = arm_is_secure_below_el3(env);
3620    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
3621    bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
3622
3623    switch (ri->opc2 & 6) {
3624    case 0:
3625        switch (ri->opc1) {
3626        case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3627            if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
3628                mmu_idx = regime_e20 ?
3629                          ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
3630            } else {
3631                mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
3632            }
3633            break;
3634        case 4: /* AT S1E2R, AT S1E2W */
3635            mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
3636            break;
3637        case 6: /* AT S1E3R, AT S1E3W */
3638            mmu_idx = ARMMMUIdx_E3;
3639            secure = true;
3640            break;
3641        default:
3642            g_assert_not_reached();
3643        }
3644        break;
3645    case 2: /* AT S1E0R, AT S1E0W */
3646        mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
3647        break;
3648    case 4: /* AT S12E1R, AT S12E1W */
3649        mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
3650        break;
3651    case 6: /* AT S12E0R, AT S12E0W */
3652        mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
3653        break;
3654    default:
3655        g_assert_not_reached();
3656    }
3657
3658    env->cp15.par_el[1] = do_ats_write(env, value, access_type,
3659                                       mmu_idx, secure);
3660#else
3661    /* Handled by hardware accelerator. */
3662    g_assert_not_reached();
3663#endif /* CONFIG_TCG */
3664}
3665#endif
3666
3667static const ARMCPRegInfo vapa_cp_reginfo[] = {
3668    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3669      .access = PL1_RW, .resetvalue = 0,
3670      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3671                             offsetoflow32(CPUARMState, cp15.par_ns) },
3672      .writefn = par_write },
3673#ifndef CONFIG_USER_ONLY
3674    /* This underdecoding is safe because the reginfo is NO_RAW. */
3675    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3676      .access = PL1_W, .accessfn = ats_access,
3677      .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
3678#endif
3679};
3680
3681/* Return basic MPU access permission bits.  */
3682static uint32_t simple_mpu_ap_bits(uint32_t val)
3683{
3684    uint32_t ret;
3685    uint32_t mask;
3686    int i;
3687    ret = 0;
3688    mask = 3;
3689    for (i = 0; i < 16; i += 2) {
3690        ret |= (val >> i) & mask;
3691        mask <<= 2;
3692    }
3693    return ret;
3694}
3695
3696/* Pad basic MPU access permission bits to extended format.  */
3697static uint32_t extended_mpu_ap_bits(uint32_t val)
3698{
3699    uint32_t ret;
3700    uint32_t mask;
3701    int i;
3702    ret = 0;
3703    mask = 3;
3704    for (i = 0; i < 16; i += 2) {
3705        ret |= (val & mask) << i;
3706        mask <<= 2;
3707    }
3708    return ret;
3709}
3710
3711static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3712                                 uint64_t value)
3713{
3714    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3715}
3716
3717static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3718{
3719    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3720}
3721
3722static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3723                                 uint64_t value)
3724{
3725    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3726}
3727
3728static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3729{
3730    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3731}
3732
3733static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3734{
3735    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3736
3737    if (!u32p) {
3738        return 0;
3739    }
3740
3741    u32p += env->pmsav7.rnr[M_REG_NS];
3742    return *u32p;
3743}
3744
3745static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3746                         uint64_t value)
3747{
3748    ARMCPU *cpu = env_archcpu(env);
3749    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3750
3751    if (!u32p) {
3752        return;
3753    }
3754
3755    u32p += env->pmsav7.rnr[M_REG_NS];
3756    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3757    *u32p = value;
3758}
3759
3760static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3761                              uint64_t value)
3762{
3763    ARMCPU *cpu = env_archcpu(env);
3764    uint32_t nrgs = cpu->pmsav7_dregion;
3765
3766    if (value >= nrgs) {
3767        qemu_log_mask(LOG_GUEST_ERROR,
3768                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3769                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3770        return;
3771    }
3772
3773    raw_write(env, ri, value);
3774}
3775
3776static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3777                          uint64_t value)
3778{
3779    ARMCPU *cpu = env_archcpu(env);
3780
3781    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3782    env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3783}
3784
3785static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3786{
3787    return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3788}
3789
3790static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3791                          uint64_t value)
3792{
3793    ARMCPU *cpu = env_archcpu(env);
3794
3795    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3796    env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
3797}
3798
3799static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3800{
3801    return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
3802}
3803
3804static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3805                           uint64_t value)
3806{
3807    ARMCPU *cpu = env_archcpu(env);
3808
3809    /*
3810     * Ignore writes that would select not implemented region.
3811     * This is architecturally UNPREDICTABLE.
3812     */
3813    if (value >= cpu->pmsav7_dregion) {
3814        return;
3815    }
3816
3817    env->pmsav7.rnr[M_REG_NS] = value;
3818}
3819
3820static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3821                          uint64_t value)
3822{
3823    ARMCPU *cpu = env_archcpu(env);
3824
3825    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3826    env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
3827}
3828
3829static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3830{
3831    return env->pmsav8.hprbar[env->pmsav8.hprselr];
3832}
3833
3834static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3835                          uint64_t value)
3836{
3837    ARMCPU *cpu = env_archcpu(env);
3838
3839    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3840    env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
3841}
3842
3843static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
3844{
3845    return env->pmsav8.hprlar[env->pmsav8.hprselr];
3846}
3847
3848static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3849                          uint64_t value)
3850{
3851    uint32_t n;
3852    uint32_t bit;
3853    ARMCPU *cpu = env_archcpu(env);
3854
3855    /* Ignore writes to unimplemented regions */
3856    int rmax = MIN(cpu->pmsav8r_hdregion, 32);
3857    value &= MAKE_64BIT_MASK(0, rmax);
3858
3859    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3860
3861    /* Register alias is only valid for first 32 indexes */
3862    for (n = 0; n < rmax; ++n) {
3863        bit = extract32(value, n, 1);
3864        env->pmsav8.hprlar[n] = deposit32(
3865                    env->pmsav8.hprlar[n], 0, 1, bit);
3866    }
3867}
3868
3869static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3870{
3871    uint32_t n;
3872    uint32_t result = 0x0;
3873    ARMCPU *cpu = env_archcpu(env);
3874
3875    /* Register alias is only valid for first 32 indexes */
3876    for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
3877        if (env->pmsav8.hprlar[n] & 0x1) {
3878            result |= (0x1 << n);
3879        }
3880    }
3881    return result;
3882}
3883
3884static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3885                           uint64_t value)
3886{
3887    ARMCPU *cpu = env_archcpu(env);
3888
3889    /*
3890     * Ignore writes that would select not implemented region.
3891     * This is architecturally UNPREDICTABLE.
3892     */
3893    if (value >= cpu->pmsav8r_hdregion) {
3894        return;
3895    }
3896
3897    env->pmsav8.hprselr = value;
3898}
3899
3900static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
3901                          uint64_t value)
3902{
3903    ARMCPU *cpu = env_archcpu(env);
3904    uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
3905                    (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
3906
3907    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3908
3909    if (ri->opc1 & 4) {
3910        if (index >= cpu->pmsav8r_hdregion) {
3911            return;
3912        }
3913        if (ri->opc2 & 0x1) {
3914            env->pmsav8.hprlar[index] = value;
3915        } else {
3916            env->pmsav8.hprbar[index] = value;
3917        }
3918    } else {
3919        if (index >= cpu->pmsav7_dregion) {
3920            return;
3921        }
3922        if (ri->opc2 & 0x1) {
3923            env->pmsav8.rlar[M_REG_NS][index] = value;
3924        } else {
3925            env->pmsav8.rbar[M_REG_NS][index] = value;
3926        }
3927    }
3928}
3929
3930static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
3931{
3932    ARMCPU *cpu = env_archcpu(env);
3933    uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
3934                    (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
3935
3936    if (ri->opc1 & 4) {
3937        if (index >= cpu->pmsav8r_hdregion) {
3938            return 0x0;
3939        }
3940        if (ri->opc2 & 0x1) {
3941            return env->pmsav8.hprlar[index];
3942        } else {
3943            return env->pmsav8.hprbar[index];
3944        }
3945    } else {
3946        if (index >= cpu->pmsav7_dregion) {
3947            return 0x0;
3948        }
3949        if (ri->opc2 & 0x1) {
3950            return env->pmsav8.rlar[M_REG_NS][index];
3951        } else {
3952            return env->pmsav8.rbar[M_REG_NS][index];
3953        }
3954    }
3955}
3956
3957static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
3958    { .name = "PRBAR",
3959      .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
3960      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3961      .accessfn = access_tvm_trvm,
3962      .readfn = prbar_read, .writefn = prbar_write },
3963    { .name = "PRLAR",
3964      .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
3965      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3966      .accessfn = access_tvm_trvm,
3967      .readfn = prlar_read, .writefn = prlar_write },
3968    { .name = "PRSELR", .resetvalue = 0,
3969      .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
3970      .access = PL1_RW, .accessfn = access_tvm_trvm,
3971      .writefn = prselr_write,
3972      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
3973    { .name = "HPRBAR", .resetvalue = 0,
3974      .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
3975      .access = PL2_RW, .type = ARM_CP_NO_RAW,
3976      .readfn = hprbar_read, .writefn = hprbar_write },
3977    { .name = "HPRLAR",
3978      .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
3979      .access = PL2_RW, .type = ARM_CP_NO_RAW,
3980      .readfn = hprlar_read, .writefn = hprlar_write },
3981    { .name = "HPRSELR", .resetvalue = 0,
3982      .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
3983      .access = PL2_RW,
3984      .writefn = hprselr_write,
3985      .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
3986    { .name = "HPRENR",
3987      .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
3988      .access = PL2_RW, .type = ARM_CP_NO_RAW,
3989      .readfn = hprenr_read, .writefn = hprenr_write },
3990};
3991
3992static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3993    /*
3994     * Reset for all these registers is handled in arm_cpu_reset(),
3995     * because the PMSAv7 is also used by M-profile CPUs, which do
3996     * not register cpregs but still need the state to be reset.
3997     */
3998    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3999      .access = PL1_RW, .type = ARM_CP_NO_RAW,
4000      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
4001      .readfn = pmsav7_read, .writefn = pmsav7_write,
4002      .resetfn = arm_cp_reset_ignore },
4003    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
4004      .access = PL1_RW, .type = ARM_CP_NO_RAW,
4005      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
4006      .readfn = pmsav7_read, .writefn = pmsav7_write,
4007      .resetfn = arm_cp_reset_ignore },
4008    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
4009      .access = PL1_RW, .type = ARM_CP_NO_RAW,
4010      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
4011      .readfn = pmsav7_read, .writefn = pmsav7_write,
4012      .resetfn = arm_cp_reset_ignore },
4013    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
4014      .access = PL1_RW,
4015      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
4016      .writefn = pmsav7_rgnr_write,
4017      .resetfn = arm_cp_reset_ignore },
4018};
4019
4020static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
4021    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4022      .access = PL1_RW, .type = ARM_CP_ALIAS,
4023      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4024      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
4025    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4026      .access = PL1_RW, .type = ARM_CP_ALIAS,
4027      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4028      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
4029    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
4030      .access = PL1_RW,
4031      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4032      .resetvalue = 0, },
4033    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
4034      .access = PL1_RW,
4035      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4036      .resetvalue = 0, },
4037    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4038      .access = PL1_RW,
4039      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
4040    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
4041      .access = PL1_RW,
4042      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
4043    /* Protection region base and size registers */
4044    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
4045      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4046      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
4047    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
4048      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4049      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
4050    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
4051      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4052      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
4053    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
4054      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4055      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
4056    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
4057      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4058      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
4059    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
4060      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4061      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
4062    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
4063      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4064      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
4065    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
4066      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4067      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
4068};
4069
4070static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4071                             uint64_t value)
4072{
4073    ARMCPU *cpu = env_archcpu(env);
4074
4075    if (!arm_feature(env, ARM_FEATURE_V8)) {
4076        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
4077            /*
4078             * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
4079             * using Long-descriptor translation table format
4080             */
4081            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
4082        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
4083            /*
4084             * In an implementation that includes the Security Extensions
4085             * TTBCR has additional fields PD0 [4] and PD1 [5] for
4086             * Short-descriptor translation table format.
4087             */
4088            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
4089        } else {
4090            value &= TTBCR_N;
4091        }
4092    }
4093
4094    if (arm_feature(env, ARM_FEATURE_LPAE)) {
4095        /*
4096         * With LPAE the TTBCR could result in a change of ASID
4097         * via the TTBCR.A1 bit, so do a TLB flush.
4098         */
4099        tlb_flush(CPU(cpu));
4100    }
4101    raw_write(env, ri, value);
4102}
4103
4104static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
4105                               uint64_t value)
4106{
4107    ARMCPU *cpu = env_archcpu(env);
4108
4109    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
4110    tlb_flush(CPU(cpu));
4111    raw_write(env, ri, value);
4112}
4113
4114static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4115                            uint64_t value)
4116{
4117    /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
4118    if (cpreg_field_is_64bit(ri) &&
4119        extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4120        ARMCPU *cpu = env_archcpu(env);
4121        tlb_flush(CPU(cpu));
4122    }
4123    raw_write(env, ri, value);
4124}
4125
4126static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4127                                    uint64_t value)
4128{
4129    /*
4130     * If we are running with E2&0 regime, then an ASID is active.
4131     * Flush if that might be changing.  Note we're not checking
4132     * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4133     * holds the active ASID, only checking the field that might.
4134     */
4135    if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
4136        (arm_hcr_el2_eff(env) & HCR_E2H)) {
4137        uint16_t mask = ARMMMUIdxBit_E20_2 |
4138                        ARMMMUIdxBit_E20_2_PAN |
4139                        ARMMMUIdxBit_E20_0;
4140        tlb_flush_by_mmuidx(env_cpu(env), mask);
4141    }
4142    raw_write(env, ri, value);
4143}
4144
4145static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4146                        uint64_t value)
4147{
4148    ARMCPU *cpu = env_archcpu(env);
4149    CPUState *cs = CPU(cpu);
4150
4151    /*
4152     * A change in VMID to the stage2 page table (Stage2) invalidates
4153     * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
4154     */
4155    if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4156        tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
4157    }
4158    raw_write(env, ri, value);
4159}
4160
4161static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
4162    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4163      .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4164      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
4165                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
4166    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4167      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4168      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
4169                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
4170    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4171      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4172      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
4173                             offsetof(CPUARMState, cp15.dfar_ns) } },
4174    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4175      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4176      .access = PL1_RW, .accessfn = access_tvm_trvm,
4177      .fgt = FGT_FAR_EL1,
4178      .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
4179      .resetvalue = 0, },
4180};
4181
4182static const ARMCPRegInfo vmsa_cp_reginfo[] = {
4183    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4184      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
4185      .access = PL1_RW, .accessfn = access_tvm_trvm,
4186      .fgt = FGT_ESR_EL1,
4187      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
4188    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4189      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
4190      .access = PL1_RW, .accessfn = access_tvm_trvm,
4191      .fgt = FGT_TTBR0_EL1,
4192      .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4193      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4194                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
4195    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4196      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
4197      .access = PL1_RW, .accessfn = access_tvm_trvm,
4198      .fgt = FGT_TTBR1_EL1,
4199      .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4200      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4201                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
4202    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4203      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4204      .access = PL1_RW, .accessfn = access_tvm_trvm,
4205      .fgt = FGT_TCR_EL1,
4206      .writefn = vmsa_tcr_el12_write,
4207      .raw_writefn = raw_write,
4208      .resetvalue = 0,
4209      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
4210    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4211      .access = PL1_RW, .accessfn = access_tvm_trvm,
4212      .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
4213      .raw_writefn = raw_write,
4214      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
4215                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
4216};
4217
4218/*
4219 * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4220 * qemu tlbs nor adjusting cached masks.
4221 */
4222static const ARMCPRegInfo ttbcr2_reginfo = {
4223    .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4224    .access = PL1_RW, .accessfn = access_tvm_trvm,
4225    .type = ARM_CP_ALIAS,
4226    .bank_fieldoffsets = {
4227        offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
4228        offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
4229    },
4230};
4231
4232static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
4233                                uint64_t value)
4234{
4235    env->cp15.c15_ticonfig = value & 0xe7;
4236    /* The OS_TYPE bit in this register changes the reported CPUID! */
4237    env->cp15.c0_cpuid = (value & (1 << 5)) ?
4238        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
4239}
4240
4241static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
4242                                uint64_t value)
4243{
4244    env->cp15.c15_threadid = value & 0xffff;
4245}
4246
4247static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
4248                           uint64_t value)
4249{
4250    /* Wait-for-interrupt (deprecated) */
4251    cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
4252}
4253
4254static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
4255                                  uint64_t value)
4256{
4257    /*
4258     * On OMAP there are registers indicating the max/min index of dcache lines
4259     * containing a dirty line; cache flush operations have to reset these.
4260     */
4261    env->cp15.c15_i_max = 0x000;
4262    env->cp15.c15_i_min = 0xff0;
4263}
4264
4265static const ARMCPRegInfo omap_cp_reginfo[] = {
4266    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4267      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
4268      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
4269      .resetvalue = 0, },
4270    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4271      .access = PL1_RW, .type = ARM_CP_NOP },
4272    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4273      .access = PL1_RW,
4274      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
4275      .writefn = omap_ticonfig_write },
4276    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4277      .access = PL1_RW,
4278      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
4279    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4280      .access = PL1_RW, .resetvalue = 0xff0,
4281      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
4282    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4283      .access = PL1_RW,
4284      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
4285      .writefn = omap_threadid_write },
4286    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4287      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4288      .type = ARM_CP_NO_RAW,
4289      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4290    /*
4291     * TODO: Peripheral port remap register:
4292     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4293     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4294     * when MMU is off.
4295     */
4296    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4297      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4298      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4299      .writefn = omap_cachemaint_write },
4300    { .name = "C9", .cp = 15, .crn = 9,
4301      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4302      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4303};
4304
4305static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4306                              uint64_t value)
4307{
4308    env->cp15.c15_cpar = value & 0x3fff;
4309}
4310
4311static const ARMCPRegInfo xscale_cp_reginfo[] = {
4312    { .name = "XSCALE_CPAR",
4313      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4314      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4315      .writefn = xscale_cpar_write, },
4316    { .name = "XSCALE_AUXCR",
4317      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4318      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4319      .resetvalue = 0, },
4320    /*
4321     * XScale specific cache-lockdown: since we have no cache we NOP these
4322     * and hope the guest does not really rely on cache behaviour.
4323     */
4324    { .name = "XSCALE_LOCK_ICACHE_LINE",
4325      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4326      .access = PL1_W, .type = ARM_CP_NOP },
4327    { .name = "XSCALE_UNLOCK_ICACHE",
4328      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4329      .access = PL1_W, .type = ARM_CP_NOP },
4330    { .name = "XSCALE_DCACHE_LOCK",
4331      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4332      .access = PL1_RW, .type = ARM_CP_NOP },
4333    { .name = "XSCALE_UNLOCK_DCACHE",
4334      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4335      .access = PL1_W, .type = ARM_CP_NOP },
4336};
4337
4338static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4339    /*
4340     * RAZ/WI the whole crn=15 space, when we don't have a more specific
4341     * implementation of this implementation-defined space.
4342     * Ideally this should eventually disappear in favour of actually
4343     * implementing the correct behaviour for all cores.
4344     */
4345    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4346      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4347      .access = PL1_RW,
4348      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4349      .resetvalue = 0 },
4350};
4351
4352static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4353    /* Cache status: RAZ because we have no cache so it's always clean */
4354    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4355      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4356      .resetvalue = 0 },
4357};
4358
4359static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4360    /* We never have a block transfer operation in progress */
4361    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4362      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4363      .resetvalue = 0 },
4364    /* The cache ops themselves: these all NOP for QEMU */
4365    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4366      .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4367    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4368      .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4369    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4370      .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4371    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4372      .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4373    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4374      .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4375    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4376      .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4377};
4378
4379static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4380    /*
4381     * The cache test-and-clean instructions always return (1 << 30)
4382     * to indicate that there are no dirty cache lines.
4383     */
4384    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4385      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4386      .resetvalue = (1 << 30) },
4387    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4388      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4389      .resetvalue = (1 << 30) },
4390};
4391
4392static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4393    /* Ignore ReadBuffer accesses */
4394    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4395      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4396      .access = PL1_RW, .resetvalue = 0,
4397      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4398};
4399
4400static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4401{
4402    unsigned int cur_el = arm_current_el(env);
4403
4404    if (arm_is_el2_enabled(env) && cur_el == 1) {
4405        return env->cp15.vpidr_el2;
4406    }
4407    return raw_read(env, ri);
4408}
4409
4410static uint64_t mpidr_read_val(CPUARMState *env)
4411{
4412    ARMCPU *cpu = env_archcpu(env);
4413    uint64_t mpidr = cpu->mp_affinity;
4414
4415    if (arm_feature(env, ARM_FEATURE_V7MP)) {
4416        mpidr |= (1U << 31);
4417        /*
4418         * Cores which are uniprocessor (non-coherent)
4419         * but still implement the MP extensions set
4420         * bit 30. (For instance, Cortex-R5).
4421         */
4422        if (cpu->mp_is_up) {
4423            mpidr |= (1u << 30);
4424        }
4425    }
4426    return mpidr;
4427}
4428
4429static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4430{
4431    unsigned int cur_el = arm_current_el(env);
4432
4433    if (arm_is_el2_enabled(env) && cur_el == 1) {
4434        return env->cp15.vmpidr_el2;
4435    }
4436    return mpidr_read_val(env);
4437}
4438
4439static const ARMCPRegInfo lpae_cp_reginfo[] = {
4440    /* NOP AMAIR0/1 */
4441    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4442      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4443      .access = PL1_RW, .accessfn = access_tvm_trvm,
4444      .fgt = FGT_AMAIR_EL1,
4445      .type = ARM_CP_CONST, .resetvalue = 0 },
4446    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4447    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4448      .access = PL1_RW, .accessfn = access_tvm_trvm,
4449      .type = ARM_CP_CONST, .resetvalue = 0 },
4450    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4451      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4452      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4453                             offsetof(CPUARMState, cp15.par_ns)} },
4454    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4455      .access = PL1_RW, .accessfn = access_tvm_trvm,
4456      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4457      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4458                             offsetof(CPUARMState, cp15.ttbr0_ns) },
4459      .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4460    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4461      .access = PL1_RW, .accessfn = access_tvm_trvm,
4462      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4463      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4464                             offsetof(CPUARMState, cp15.ttbr1_ns) },
4465      .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4466};
4467
4468static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4469{
4470    return vfp_get_fpcr(env);
4471}
4472
4473static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4474                            uint64_t value)
4475{
4476    vfp_set_fpcr(env, value);
4477}
4478
4479static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4480{
4481    return vfp_get_fpsr(env);
4482}
4483
4484static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4485                            uint64_t value)
4486{
4487    vfp_set_fpsr(env, value);
4488}
4489
4490static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4491                                       bool isread)
4492{
4493    if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4494        return CP_ACCESS_TRAP;
4495    }
4496    return CP_ACCESS_OK;
4497}
4498
4499static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4500                            uint64_t value)
4501{
4502    env->daif = value & PSTATE_DAIF;
4503}
4504
4505static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4506{
4507    return env->pstate & PSTATE_PAN;
4508}
4509
4510static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4511                           uint64_t value)
4512{
4513    env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4514}
4515
4516static const ARMCPRegInfo pan_reginfo = {
4517    .name = "PAN", .state = ARM_CP_STATE_AA64,
4518    .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4519    .type = ARM_CP_NO_RAW, .access = PL1_RW,
4520    .readfn = aa64_pan_read, .writefn = aa64_pan_write
4521};
4522
4523static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4524{
4525    return env->pstate & PSTATE_UAO;
4526}
4527
4528static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4529                           uint64_t value)
4530{
4531    env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4532}
4533
4534static const ARMCPRegInfo uao_reginfo = {
4535    .name = "UAO", .state = ARM_CP_STATE_AA64,
4536    .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4537    .type = ARM_CP_NO_RAW, .access = PL1_RW,
4538    .readfn = aa64_uao_read, .writefn = aa64_uao_write
4539};
4540
4541static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4542{
4543    return env->pstate & PSTATE_DIT;
4544}
4545
4546static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4547                           uint64_t value)
4548{
4549    env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4550}
4551
4552static const ARMCPRegInfo dit_reginfo = {
4553    .name = "DIT", .state = ARM_CP_STATE_AA64,
4554    .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4555    .type = ARM_CP_NO_RAW, .access = PL0_RW,
4556    .readfn = aa64_dit_read, .writefn = aa64_dit_write
4557};
4558
4559static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4560{
4561    return env->pstate & PSTATE_SSBS;
4562}
4563
4564static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4565                           uint64_t value)
4566{
4567    env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4568}
4569
4570static const ARMCPRegInfo ssbs_reginfo = {
4571    .name = "SSBS", .state = ARM_CP_STATE_AA64,
4572    .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4573    .type = ARM_CP_NO_RAW, .access = PL0_RW,
4574    .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4575};
4576
4577static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4578                                              const ARMCPRegInfo *ri,
4579                                              bool isread)
4580{
4581    /* Cache invalidate/clean to Point of Coherency or Persistence...  */
4582    switch (arm_current_el(env)) {
4583    case 0:
4584        /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4585        if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4586            return CP_ACCESS_TRAP;
4587        }
4588        /* fall through */
4589    case 1:
4590        /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
4591        if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4592            return CP_ACCESS_TRAP_EL2;
4593        }
4594        break;
4595    }
4596    return CP_ACCESS_OK;
4597}
4598
4599static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
4600{
4601    /* Cache invalidate/clean to Point of Unification... */
4602    switch (arm_current_el(env)) {
4603    case 0:
4604        /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4605        if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4606            return CP_ACCESS_TRAP;
4607        }
4608        /* fall through */
4609    case 1:
4610        /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set.  */
4611        if (arm_hcr_el2_eff(env) & hcrflags) {
4612            return CP_ACCESS_TRAP_EL2;
4613        }
4614        break;
4615    }
4616    return CP_ACCESS_OK;
4617}
4618
4619static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
4620                                   bool isread)
4621{
4622    return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
4623}
4624
4625static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
4626                                  bool isread)
4627{
4628    return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
4629}
4630
4631/*
4632 * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4633 * Page D4-1736 (DDI0487A.b)
4634 */
4635
4636static int vae1_tlbmask(CPUARMState *env)
4637{
4638    uint64_t hcr = arm_hcr_el2_eff(env);
4639    uint16_t mask;
4640
4641    if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4642        mask = ARMMMUIdxBit_E20_2 |
4643               ARMMMUIdxBit_E20_2_PAN |
4644               ARMMMUIdxBit_E20_0;
4645    } else {
4646        mask = ARMMMUIdxBit_E10_1 |
4647               ARMMMUIdxBit_E10_1_PAN |
4648               ARMMMUIdxBit_E10_0;
4649    }
4650    return mask;
4651}
4652
4653/* Return 56 if TBI is enabled, 64 otherwise. */
4654static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4655                              uint64_t addr)
4656{
4657    uint64_t tcr = regime_tcr(env, mmu_idx);
4658    int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4659    int select = extract64(addr, 55, 1);
4660
4661    return (tbi >> select) & 1 ? 56 : 64;
4662}
4663
4664static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4665{
4666    uint64_t hcr = arm_hcr_el2_eff(env);
4667    ARMMMUIdx mmu_idx;
4668
4669    /* Only the regime of the mmu_idx below is significant. */
4670    if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4671        mmu_idx = ARMMMUIdx_E20_0;
4672    } else {
4673        mmu_idx = ARMMMUIdx_E10_0;
4674    }
4675
4676    return tlbbits_for_regime(env, mmu_idx, addr);
4677}
4678
4679static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4680                                      uint64_t value)
4681{
4682    CPUState *cs = env_cpu(env);
4683    int mask = vae1_tlbmask(env);
4684
4685    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4686}
4687
4688static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4689                                    uint64_t value)
4690{
4691    CPUState *cs = env_cpu(env);
4692    int mask = vae1_tlbmask(env);
4693
4694    if (tlb_force_broadcast(env)) {
4695        tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4696    } else {
4697        tlb_flush_by_mmuidx(cs, mask);
4698    }
4699}
4700
4701static int e2_tlbmask(CPUARMState *env)
4702{
4703    return (ARMMMUIdxBit_E20_0 |
4704            ARMMMUIdxBit_E20_2 |
4705            ARMMMUIdxBit_E20_2_PAN |
4706            ARMMMUIdxBit_E2);
4707}
4708
4709static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4710                                  uint64_t value)
4711{
4712    CPUState *cs = env_cpu(env);
4713    int mask = alle1_tlbmask(env);
4714
4715    tlb_flush_by_mmuidx(cs, mask);
4716}
4717
4718static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4719                                  uint64_t value)
4720{
4721    CPUState *cs = env_cpu(env);
4722    int mask = e2_tlbmask(env);
4723
4724    tlb_flush_by_mmuidx(cs, mask);
4725}
4726
4727static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4728                                  uint64_t value)
4729{
4730    ARMCPU *cpu = env_archcpu(env);
4731    CPUState *cs = CPU(cpu);
4732
4733    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
4734}
4735
4736static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4737                                    uint64_t value)
4738{
4739    CPUState *cs = env_cpu(env);
4740    int mask = alle1_tlbmask(env);
4741
4742    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4743}
4744
4745static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4746                                    uint64_t value)
4747{
4748    CPUState *cs = env_cpu(env);
4749    int mask = e2_tlbmask(env);
4750
4751    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4752}
4753
4754static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4755                                    uint64_t value)
4756{
4757    CPUState *cs = env_cpu(env);
4758
4759    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
4760}
4761
4762static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4763                                 uint64_t value)
4764{
4765    /*
4766     * Invalidate by VA, EL2
4767     * Currently handles both VAE2 and VALE2, since we don't support
4768     * flush-last-level-only.
4769     */
4770    CPUState *cs = env_cpu(env);
4771    int mask = e2_tlbmask(env);
4772    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4773
4774    tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4775}
4776
4777static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4778                                 uint64_t value)
4779{
4780    /*
4781     * Invalidate by VA, EL3
4782     * Currently handles both VAE3 and VALE3, since we don't support
4783     * flush-last-level-only.
4784     */
4785    ARMCPU *cpu = env_archcpu(env);
4786    CPUState *cs = CPU(cpu);
4787    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4788
4789    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
4790}
4791
4792static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4793                                   uint64_t value)
4794{
4795    CPUState *cs = env_cpu(env);
4796    int mask = vae1_tlbmask(env);
4797    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4798    int bits = vae1_tlbbits(env, pageaddr);
4799
4800    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4801}
4802
4803static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4804                                 uint64_t value)
4805{
4806    /*
4807     * Invalidate by VA, EL1&0 (AArch64 version).
4808     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4809     * since we don't support flush-for-specific-ASID-only or
4810     * flush-last-level-only.
4811     */
4812    CPUState *cs = env_cpu(env);
4813    int mask = vae1_tlbmask(env);
4814    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4815    int bits = vae1_tlbbits(env, pageaddr);
4816
4817    if (tlb_force_broadcast(env)) {
4818        tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4819    } else {
4820        tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
4821    }
4822}
4823
4824static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4825                                   uint64_t value)
4826{
4827    CPUState *cs = env_cpu(env);
4828    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4829    int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
4830
4831    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4832                                                  ARMMMUIdxBit_E2, bits);
4833}
4834
4835static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4836                                   uint64_t value)
4837{
4838    CPUState *cs = env_cpu(env);
4839    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4840    int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
4841
4842    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4843                                                  ARMMMUIdxBit_E3, bits);
4844}
4845
4846static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
4847{
4848    /*
4849     * The MSB of value is the NS field, which only applies if SEL2
4850     * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
4851     */
4852    return (value >= 0
4853            && cpu_isar_feature(aa64_sel2, env_archcpu(env))
4854            && arm_is_secure_below_el3(env)
4855            ? ARMMMUIdxBit_Stage2_S
4856            : ARMMMUIdxBit_Stage2);
4857}
4858
4859static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4860                                    uint64_t value)
4861{
4862    CPUState *cs = env_cpu(env);
4863    int mask = ipas2e1_tlbmask(env, value);
4864    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4865
4866    if (tlb_force_broadcast(env)) {
4867        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
4868    } else {
4869        tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4870    }
4871}
4872
4873static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4874                                      uint64_t value)
4875{
4876    CPUState *cs = env_cpu(env);
4877    int mask = ipas2e1_tlbmask(env, value);
4878    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4879
4880    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
4881}
4882
4883#ifdef TARGET_AARCH64
4884typedef struct {
4885    uint64_t base;
4886    uint64_t length;
4887} TLBIRange;
4888
4889static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
4890{
4891    /*
4892     * Note that the TLBI range TG field encoding differs from both
4893     * TG0 and TG1 encodings.
4894     */
4895    switch (tg) {
4896    case 1:
4897        return Gran4K;
4898    case 2:
4899        return Gran16K;
4900    case 3:
4901        return Gran64K;
4902    default:
4903        return GranInvalid;
4904    }
4905}
4906
4907static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
4908                                     uint64_t value)
4909{
4910    unsigned int page_size_granule, page_shift, num, scale, exponent;
4911    /* Extract one bit to represent the va selector in use. */
4912    uint64_t select = sextract64(value, 36, 1);
4913    ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false);
4914    TLBIRange ret = { };
4915    ARMGranuleSize gran;
4916
4917    page_size_granule = extract64(value, 46, 2);
4918    gran = tlbi_range_tg_to_gran_size(page_size_granule);
4919
4920    /* The granule encoded in value must match the granule in use. */
4921    if (gran != param.gran) {
4922        qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
4923                      page_size_granule);
4924        return ret;
4925    }
4926
4927    page_shift = arm_granule_bits(gran);
4928    num = extract64(value, 39, 5);
4929    scale = extract64(value, 44, 2);
4930    exponent = (5 * scale) + 1;
4931
4932    ret.length = (num + 1) << (exponent + page_shift);
4933
4934    if (param.select) {
4935        ret.base = sextract64(value, 0, 37);
4936    } else {
4937        ret.base = extract64(value, 0, 37);
4938    }
4939    if (param.ds) {
4940        /*
4941         * With DS=1, BaseADDR is always shifted 16 so that it is able
4942         * to address all 52 va bits.  The input address is perforce
4943         * aligned on a 64k boundary regardless of translation granule.
4944         */
4945        page_shift = 16;
4946    }
4947    ret.base <<= page_shift;
4948
4949    return ret;
4950}
4951
4952static void do_rvae_write(CPUARMState *env, uint64_t value,
4953                          int idxmap, bool synced)
4954{
4955    ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
4956    TLBIRange range;
4957    int bits;
4958
4959    range = tlbi_aa64_get_range(env, one_idx, value);
4960    bits = tlbbits_for_regime(env, one_idx, range.base);
4961
4962    if (synced) {
4963        tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
4964                                                  range.base,
4965                                                  range.length,
4966                                                  idxmap,
4967                                                  bits);
4968    } else {
4969        tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
4970                                  range.length, idxmap, bits);
4971    }
4972}
4973
4974static void tlbi_aa64_rvae1_write(CPUARMState *env,
4975                                  const ARMCPRegInfo *ri,
4976                                  uint64_t value)
4977{
4978    /*
4979     * Invalidate by VA range, EL1&0.
4980     * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4981     * since we don't support flush-for-specific-ASID-only or
4982     * flush-last-level-only.
4983     */
4984
4985    do_rvae_write(env, value, vae1_tlbmask(env),
4986                  tlb_force_broadcast(env));
4987}
4988
4989static void tlbi_aa64_rvae1is_write(CPUARMState *env,
4990                                    const ARMCPRegInfo *ri,
4991                                    uint64_t value)
4992{
4993    /*
4994     * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4995     * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4996     * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4997     * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4998     * shareable specific flushes.
4999     */
5000
5001    do_rvae_write(env, value, vae1_tlbmask(env), true);
5002}
5003
5004static int vae2_tlbmask(CPUARMState *env)
5005{
5006    return ARMMMUIdxBit_E2;
5007}
5008
5009static void tlbi_aa64_rvae2_write(CPUARMState *env,
5010                                  const ARMCPRegInfo *ri,
5011                                  uint64_t value)
5012{
5013    /*
5014     * Invalidate by VA range, EL2.
5015     * Currently handles all of RVAE2 and RVALE2,
5016     * since we don't support flush-for-specific-ASID-only or
5017     * flush-last-level-only.
5018     */
5019
5020    do_rvae_write(env, value, vae2_tlbmask(env),
5021                  tlb_force_broadcast(env));
5022
5023
5024}
5025
5026static void tlbi_aa64_rvae2is_write(CPUARMState *env,
5027                                    const ARMCPRegInfo *ri,
5028                                    uint64_t value)
5029{
5030    /*
5031     * Invalidate by VA range, Inner/Outer Shareable, EL2.
5032     * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
5033     * since we don't support flush-for-specific-ASID-only,
5034     * flush-last-level-only or inner/outer shareable specific flushes.
5035     */
5036
5037    do_rvae_write(env, value, vae2_tlbmask(env), true);
5038
5039}
5040
5041static void tlbi_aa64_rvae3_write(CPUARMState *env,
5042                                  const ARMCPRegInfo *ri,
5043                                  uint64_t value)
5044{
5045    /*
5046     * Invalidate by VA range, EL3.
5047     * Currently handles all of RVAE3 and RVALE3,
5048     * since we don't support flush-for-specific-ASID-only or
5049     * flush-last-level-only.
5050     */
5051
5052    do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
5053}
5054
5055static void tlbi_aa64_rvae3is_write(CPUARMState *env,
5056                                    const ARMCPRegInfo *ri,
5057                                    uint64_t value)
5058{
5059    /*
5060     * Invalidate by VA range, EL3, Inner/Outer Shareable.
5061     * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
5062     * since we don't support flush-for-specific-ASID-only,
5063     * flush-last-level-only or inner/outer specific flushes.
5064     */
5065
5066    do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
5067}
5068
5069static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5070                                     uint64_t value)
5071{
5072    do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
5073                  tlb_force_broadcast(env));
5074}
5075
5076static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
5077                                       const ARMCPRegInfo *ri,
5078                                       uint64_t value)
5079{
5080    do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
5081}
5082#endif
5083
5084static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
5085                                      bool isread)
5086{
5087    int cur_el = arm_current_el(env);
5088
5089    if (cur_el < 2) {
5090        uint64_t hcr = arm_hcr_el2_eff(env);
5091
5092        if (cur_el == 0) {
5093            if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5094                if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
5095                    return CP_ACCESS_TRAP_EL2;
5096                }
5097            } else {
5098                if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
5099                    return CP_ACCESS_TRAP;
5100                }
5101                if (hcr & HCR_TDZ) {
5102                    return CP_ACCESS_TRAP_EL2;
5103                }
5104            }
5105        } else if (hcr & HCR_TDZ) {
5106            return CP_ACCESS_TRAP_EL2;
5107        }
5108    }
5109    return CP_ACCESS_OK;
5110}
5111
5112static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
5113{
5114    ARMCPU *cpu = env_archcpu(env);
5115    int dzp_bit = 1 << 4;
5116
5117    /* DZP indicates whether DC ZVA access is allowed */
5118    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
5119        dzp_bit = 0;
5120    }
5121    return cpu->dcz_blocksize | dzp_bit;
5122}
5123
5124static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5125                                    bool isread)
5126{
5127    if (!(env->pstate & PSTATE_SP)) {
5128        /*
5129         * Access to SP_EL0 is undefined if it's being used as
5130         * the stack pointer.
5131         */
5132        return CP_ACCESS_TRAP_UNCATEGORIZED;
5133    }
5134    return CP_ACCESS_OK;
5135}
5136
5137static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
5138{
5139    return env->pstate & PSTATE_SP;
5140}
5141
5142static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5143{
5144    update_spsel(env, val);
5145}
5146
5147static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5148                        uint64_t value)
5149{
5150    ARMCPU *cpu = env_archcpu(env);
5151
5152    if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
5153        /* M bit is RAZ/WI for PMSA with no MPU implemented */
5154        value &= ~SCTLR_M;
5155    }
5156
5157    /* ??? Lots of these bits are not implemented.  */
5158
5159    if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
5160        if (ri->opc1 == 6) { /* SCTLR_EL3 */
5161            value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
5162        } else {
5163            value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
5164                       SCTLR_ATA0 | SCTLR_ATA);
5165        }
5166    }
5167
5168    if (raw_read(env, ri) == value) {
5169        /*
5170         * Skip the TLB flush if nothing actually changed; Linux likes
5171         * to do a lot of pointless SCTLR writes.
5172         */
5173        return;
5174    }
5175
5176    raw_write(env, ri, value);
5177
5178    /* This may enable/disable the MMU, so do a TLB flush.  */
5179    tlb_flush(CPU(cpu));
5180
5181    if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
5182        /*
5183         * Normally we would always end the TB on an SCTLR write; see the
5184         * comment in ARMCPRegInfo sctlr initialization below for why Xscale
5185         * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
5186         * of hflags from the translator, so do it here.
5187         */
5188        arm_rebuild_hflags(env);
5189    }
5190}
5191
5192static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5193                           uint64_t value)
5194{
5195    /*
5196     * Some MDCR_EL3 bits affect whether PMU counters are running:
5197     * if we are trying to change any of those then we must
5198     * bracket this update with PMU start/finish calls.
5199     */
5200    bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
5201
5202    if (pmu_op) {
5203        pmu_op_start(env);
5204    }
5205    env->cp15.mdcr_el3 = value;
5206    if (pmu_op) {
5207        pmu_op_finish(env);
5208    }
5209}
5210
5211static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5212                       uint64_t value)
5213{
5214    /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
5215    mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
5216}
5217
5218static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5219                           uint64_t value)
5220{
5221    /*
5222     * Some MDCR_EL2 bits affect whether PMU counters are running:
5223     * if we are trying to change any of those then we must
5224     * bracket this update with PMU start/finish calls.
5225     */
5226    bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
5227
5228    if (pmu_op) {
5229        pmu_op_start(env);
5230    }
5231    env->cp15.mdcr_el2 = value;
5232    if (pmu_op) {
5233        pmu_op_finish(env);
5234    }
5235}
5236
5237#ifdef CONFIG_USER_ONLY
5238/*
5239 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
5240 * code to get around W^X restrictions, where one region is writable and the
5241 * other is executable.
5242 *
5243 * Since the executable region is never written to we cannot detect code
5244 * changes when running in user mode, and rely on the emulated JIT telling us
5245 * that the code has changed by executing this instruction.
5246 */
5247static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
5248                          uint64_t value)
5249{
5250    uint64_t icache_line_mask, start_address, end_address;
5251    const ARMCPU *cpu;
5252
5253    cpu = env_archcpu(env);
5254
5255    icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
5256    start_address = value & ~icache_line_mask;
5257    end_address = value | icache_line_mask;
5258
5259    mmap_lock();
5260
5261    tb_invalidate_phys_range(start_address, end_address);
5262
5263    mmap_unlock();
5264}
5265#endif
5266
5267static const ARMCPRegInfo v8_cp_reginfo[] = {
5268    /*
5269     * Minimal set of EL0-visible registers. This will need to be expanded
5270     * significantly for system emulation of AArch64 CPUs.
5271     */
5272    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
5273      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
5274      .access = PL0_RW, .type = ARM_CP_NZCV },
5275    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
5276      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
5277      .type = ARM_CP_NO_RAW,
5278      .access = PL0_RW, .accessfn = aa64_daif_access,
5279      .fieldoffset = offsetof(CPUARMState, daif),
5280      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
5281    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
5282      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
5283      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5284      .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
5285    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
5286      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
5287      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5288      .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
5289    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
5290      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
5291      .access = PL0_R, .type = ARM_CP_NO_RAW,
5292      .fgt = FGT_DCZID_EL0,
5293      .readfn = aa64_dczid_read },
5294    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
5295      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
5296      .access = PL0_W, .type = ARM_CP_DC_ZVA,
5297#ifndef CONFIG_USER_ONLY
5298      /* Avoid overhead of an access check that always passes in user-mode */
5299      .accessfn = aa64_zva_access,
5300      .fgt = FGT_DCZVA,
5301#endif
5302    },
5303    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
5304      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
5305      .access = PL1_R, .type = ARM_CP_CURRENTEL },
5306    /*
5307     * Instruction cache ops. All of these except `IC IVAU` NOP because we
5308     * don't emulate caches.
5309     */
5310    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
5311      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5312      .access = PL1_W, .type = ARM_CP_NOP,
5313      .fgt = FGT_ICIALLUIS,
5314      .accessfn = access_ticab },
5315    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
5316      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5317      .access = PL1_W, .type = ARM_CP_NOP,
5318      .fgt = FGT_ICIALLU,
5319      .accessfn = access_tocu },
5320    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
5321      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
5322      .access = PL0_W,
5323      .fgt = FGT_ICIVAU,
5324      .accessfn = access_tocu,
5325#ifdef CONFIG_USER_ONLY
5326      .type = ARM_CP_NO_RAW,
5327      .writefn = ic_ivau_write
5328#else
5329      .type = ARM_CP_NOP
5330#endif
5331    },
5332    /* Cache ops: all NOPs since we don't emulate caches */
5333    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
5334      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5335      .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
5336      .fgt = FGT_DCIVAC,
5337      .type = ARM_CP_NOP },
5338    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
5339      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5340      .fgt = FGT_DCISW,
5341      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5342    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
5343      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
5344      .access = PL0_W, .type = ARM_CP_NOP,
5345      .fgt = FGT_DCCVAC,
5346      .accessfn = aa64_cacheop_poc_access },
5347    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
5348      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5349      .fgt = FGT_DCCSW,
5350      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5351    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
5352      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
5353      .access = PL0_W, .type = ARM_CP_NOP,
5354      .fgt = FGT_DCCVAU,
5355      .accessfn = access_tocu },
5356    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
5357      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
5358      .access = PL0_W, .type = ARM_CP_NOP,
5359      .fgt = FGT_DCCIVAC,
5360      .accessfn = aa64_cacheop_poc_access },
5361    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
5362      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5363      .fgt = FGT_DCCISW,
5364      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5365    /* TLBI operations */
5366    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
5367      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
5368      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5369      .fgt = FGT_TLBIVMALLE1IS,
5370      .writefn = tlbi_aa64_vmalle1is_write },
5371    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
5372      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
5373      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5374      .fgt = FGT_TLBIVAE1IS,
5375      .writefn = tlbi_aa64_vae1is_write },
5376    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
5377      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
5378      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5379      .fgt = FGT_TLBIASIDE1IS,
5380      .writefn = tlbi_aa64_vmalle1is_write },
5381    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
5382      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
5383      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5384      .fgt = FGT_TLBIVAAE1IS,
5385      .writefn = tlbi_aa64_vae1is_write },
5386    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
5387      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5388      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5389      .fgt = FGT_TLBIVALE1IS,
5390      .writefn = tlbi_aa64_vae1is_write },
5391    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
5392      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5393      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5394      .fgt = FGT_TLBIVAALE1IS,
5395      .writefn = tlbi_aa64_vae1is_write },
5396    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
5397      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
5398      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5399      .fgt = FGT_TLBIVMALLE1,
5400      .writefn = tlbi_aa64_vmalle1_write },
5401    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
5402      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
5403      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5404      .fgt = FGT_TLBIVAE1,
5405      .writefn = tlbi_aa64_vae1_write },
5406    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
5407      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
5408      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5409      .fgt = FGT_TLBIASIDE1,
5410      .writefn = tlbi_aa64_vmalle1_write },
5411    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
5412      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
5413      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5414      .fgt = FGT_TLBIVAAE1,
5415      .writefn = tlbi_aa64_vae1_write },
5416    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
5417      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5418      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5419      .fgt = FGT_TLBIVALE1,
5420      .writefn = tlbi_aa64_vae1_write },
5421    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
5422      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5423      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5424      .fgt = FGT_TLBIVAALE1,
5425      .writefn = tlbi_aa64_vae1_write },
5426    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
5427      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5428      .access = PL2_W, .type = ARM_CP_NO_RAW,
5429      .writefn = tlbi_aa64_ipas2e1is_write },
5430    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
5431      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5432      .access = PL2_W, .type = ARM_CP_NO_RAW,
5433      .writefn = tlbi_aa64_ipas2e1is_write },
5434    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
5435      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5436      .access = PL2_W, .type = ARM_CP_NO_RAW,
5437      .writefn = tlbi_aa64_alle1is_write },
5438    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
5439      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
5440      .access = PL2_W, .type = ARM_CP_NO_RAW,
5441      .writefn = tlbi_aa64_alle1is_write },
5442    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
5443      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5444      .access = PL2_W, .type = ARM_CP_NO_RAW,
5445      .writefn = tlbi_aa64_ipas2e1_write },
5446    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
5447      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5448      .access = PL2_W, .type = ARM_CP_NO_RAW,
5449      .writefn = tlbi_aa64_ipas2e1_write },
5450    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
5451      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5452      .access = PL2_W, .type = ARM_CP_NO_RAW,
5453      .writefn = tlbi_aa64_alle1_write },
5454    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
5455      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
5456      .access = PL2_W, .type = ARM_CP_NO_RAW,
5457      .writefn = tlbi_aa64_alle1is_write },
5458#ifndef CONFIG_USER_ONLY
5459    /* 64 bit address translation operations */
5460    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
5461      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
5462      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5463      .fgt = FGT_ATS1E1R,
5464      .writefn = ats_write64 },
5465    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
5466      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
5467      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5468      .fgt = FGT_ATS1E1W,
5469      .writefn = ats_write64 },
5470    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
5471      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
5472      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5473      .fgt = FGT_ATS1E0R,
5474      .writefn = ats_write64 },
5475    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
5476      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
5477      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5478      .fgt = FGT_ATS1E0W,
5479      .writefn = ats_write64 },
5480    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
5481      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
5482      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5483      .writefn = ats_write64 },
5484    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
5485      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
5486      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5487      .writefn = ats_write64 },
5488    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
5489      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
5490      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5491      .writefn = ats_write64 },
5492    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
5493      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
5494      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5495      .writefn = ats_write64 },
5496    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5497    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
5498      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
5499      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5500      .writefn = ats_write64 },
5501    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
5502      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
5503      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5504      .writefn = ats_write64 },
5505    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
5506      .type = ARM_CP_ALIAS,
5507      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
5508      .access = PL1_RW, .resetvalue = 0,
5509      .fgt = FGT_PAR_EL1,
5510      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
5511      .writefn = par_write },
5512#endif
5513    /* TLB invalidate last level of translation table walk */
5514    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5515      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5516      .writefn = tlbimva_is_write },
5517    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5518      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5519      .writefn = tlbimvaa_is_write },
5520    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5521      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5522      .writefn = tlbimva_write },
5523    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5524      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5525      .writefn = tlbimvaa_write },
5526    { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5527      .type = ARM_CP_NO_RAW, .access = PL2_W,
5528      .writefn = tlbimva_hyp_write },
5529    { .name = "TLBIMVALHIS",
5530      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5531      .type = ARM_CP_NO_RAW, .access = PL2_W,
5532      .writefn = tlbimva_hyp_is_write },
5533    { .name = "TLBIIPAS2",
5534      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5535      .type = ARM_CP_NO_RAW, .access = PL2_W,
5536      .writefn = tlbiipas2_hyp_write },
5537    { .name = "TLBIIPAS2IS",
5538      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5539      .type = ARM_CP_NO_RAW, .access = PL2_W,
5540      .writefn = tlbiipas2is_hyp_write },
5541    { .name = "TLBIIPAS2L",
5542      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5543      .type = ARM_CP_NO_RAW, .access = PL2_W,
5544      .writefn = tlbiipas2_hyp_write },
5545    { .name = "TLBIIPAS2LIS",
5546      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5547      .type = ARM_CP_NO_RAW, .access = PL2_W,
5548      .writefn = tlbiipas2is_hyp_write },
5549    /* 32 bit cache operations */
5550    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5551      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
5552    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5553      .type = ARM_CP_NOP, .access = PL1_W },
5554    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5555      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5556    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5557      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5558    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5559      .type = ARM_CP_NOP, .access = PL1_W },
5560    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5561      .type = ARM_CP_NOP, .access = PL1_W },
5562    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5563      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5564    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5565      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5566    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5567      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5568    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5569      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5570    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5571      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5572    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5573      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5574    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5575      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5576    /* MMU Domain access control / MPU write buffer control */
5577    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5578      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5579      .writefn = dacr_write, .raw_writefn = raw_write,
5580      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5581                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5582    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5583      .type = ARM_CP_ALIAS,
5584      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5585      .access = PL1_RW,
5586      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5587    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5588      .type = ARM_CP_ALIAS,
5589      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5590      .access = PL1_RW,
5591      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5592    /*
5593     * We rely on the access checks not allowing the guest to write to the
5594     * state field when SPSel indicates that it's being used as the stack
5595     * pointer.
5596     */
5597    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5598      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5599      .access = PL1_RW, .accessfn = sp_el0_access,
5600      .type = ARM_CP_ALIAS,
5601      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5602    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5603      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5604      .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
5605      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5606    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5607      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5608      .type = ARM_CP_NO_RAW,
5609      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5610    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5611      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5612      .access = PL2_RW,
5613      .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
5614      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
5615    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5616      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5617      .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5618      .writefn = dacr_write, .raw_writefn = raw_write,
5619      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5620    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5621      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5622      .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5623      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5624    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5625      .type = ARM_CP_ALIAS,
5626      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5627      .access = PL2_RW,
5628      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5629    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5630      .type = ARM_CP_ALIAS,
5631      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5632      .access = PL2_RW,
5633      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5634    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5635      .type = ARM_CP_ALIAS,
5636      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5637      .access = PL2_RW,
5638      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5639    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5640      .type = ARM_CP_ALIAS,
5641      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5642      .access = PL2_RW,
5643      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5644    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5645      .type = ARM_CP_IO,
5646      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5647      .resetvalue = 0,
5648      .access = PL3_RW,
5649      .writefn = mdcr_el3_write,
5650      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5651    { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
5652      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5653      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5654      .writefn = sdcr_write,
5655      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5656};
5657
5658static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5659{
5660    ARMCPU *cpu = env_archcpu(env);
5661
5662    if (arm_feature(env, ARM_FEATURE_V8)) {
5663        valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
5664    } else {
5665        valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
5666    }
5667
5668    if (arm_feature(env, ARM_FEATURE_EL3)) {
5669        valid_mask &= ~HCR_HCD;
5670    } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5671        /*
5672         * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5673         * However, if we're using the SMC PSCI conduit then QEMU is
5674         * effectively acting like EL3 firmware and so the guest at
5675         * EL2 should retain the ability to prevent EL1 from being
5676         * able to make SMC calls into the ersatz firmware, so in
5677         * that case HCR.TSC should be read/write.
5678         */
5679        valid_mask &= ~HCR_TSC;
5680    }
5681
5682    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5683        if (cpu_isar_feature(aa64_vh, cpu)) {
5684            valid_mask |= HCR_E2H;
5685        }
5686        if (cpu_isar_feature(aa64_ras, cpu)) {
5687            valid_mask |= HCR_TERR | HCR_TEA;
5688        }
5689        if (cpu_isar_feature(aa64_lor, cpu)) {
5690            valid_mask |= HCR_TLOR;
5691        }
5692        if (cpu_isar_feature(aa64_pauth, cpu)) {
5693            valid_mask |= HCR_API | HCR_APK;
5694        }
5695        if (cpu_isar_feature(aa64_mte, cpu)) {
5696            valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5697        }
5698        if (cpu_isar_feature(aa64_scxtnum, cpu)) {
5699            valid_mask |= HCR_ENSCXT;
5700        }
5701        if (cpu_isar_feature(aa64_fwb, cpu)) {
5702            valid_mask |= HCR_FWB;
5703        }
5704        if (cpu_isar_feature(aa64_rme, cpu)) {
5705            valid_mask |= HCR_GPF;
5706        }
5707    }
5708
5709    if (cpu_isar_feature(any_evt, cpu)) {
5710        valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
5711    } else if (cpu_isar_feature(any_half_evt, cpu)) {
5712        valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
5713    }
5714
5715    /* Clear RES0 bits.  */
5716    value &= valid_mask;
5717
5718    /*
5719     * These bits change the MMU setup:
5720     * HCR_VM enables stage 2 translation
5721     * HCR_PTW forbids certain page-table setups
5722     * HCR_DC disables stage1 and enables stage2 translation
5723     * HCR_DCT enables tagging on (disabled) stage1 translation
5724     * HCR_FWB changes the interpretation of stage2 descriptor bits
5725     */
5726    if ((env->cp15.hcr_el2 ^ value) &
5727        (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) {
5728        tlb_flush(CPU(cpu));
5729    }
5730    env->cp15.hcr_el2 = value;
5731
5732    /*
5733     * Updates to VI and VF require us to update the status of
5734     * virtual interrupts, which are the logical OR of these bits
5735     * and the state of the input lines from the GIC. (This requires
5736     * that we have the iothread lock, which is done by marking the
5737     * reginfo structs as ARM_CP_IO.)
5738     * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5739     * possible for it to be taken immediately, because VIRQ and
5740     * VFIQ are masked unless running at EL0 or EL1, and HCR
5741     * can only be written at EL2.
5742     */
5743    g_assert(qemu_mutex_iothread_locked());
5744    arm_cpu_update_virq(cpu);
5745    arm_cpu_update_vfiq(cpu);
5746    arm_cpu_update_vserr(cpu);
5747}
5748
5749static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5750{
5751    do_hcr_write(env, value, 0);
5752}
5753
5754static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5755                          uint64_t value)
5756{
5757    /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5758    value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5759    do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
5760}
5761
5762static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5763                         uint64_t value)
5764{
5765    /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5766    value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5767    do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
5768}
5769
5770/*
5771 * Return the effective value of HCR_EL2, at the given security state.
5772 * Bits that are not included here:
5773 * RW       (read from SCR_EL3.RW as needed)
5774 */
5775uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure)
5776{
5777    uint64_t ret = env->cp15.hcr_el2;
5778
5779    if (!arm_is_el2_enabled_secstate(env, secure)) {
5780        /*
5781         * "This register has no effect if EL2 is not enabled in the
5782         * current Security state".  This is ARMv8.4-SecEL2 speak for
5783         * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5784         *
5785         * Prior to that, the language was "In an implementation that
5786         * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5787         * as if this field is 0 for all purposes other than a direct
5788         * read or write access of HCR_EL2".  With lots of enumeration
5789         * on a per-field basis.  In current QEMU, this is condition
5790         * is arm_is_secure_below_el3.
5791         *
5792         * Since the v8.4 language applies to the entire register, and
5793         * appears to be backward compatible, use that.
5794         */
5795        return 0;
5796    }
5797
5798    /*
5799     * For a cpu that supports both aarch64 and aarch32, we can set bits
5800     * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5801     * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5802     */
5803    if (!arm_el_is_aa64(env, 2)) {
5804        uint64_t aa32_valid;
5805
5806        /*
5807         * These bits are up-to-date as of ARMv8.6.
5808         * For HCR, it's easiest to list just the 2 bits that are invalid.
5809         * For HCR2, list those that are valid.
5810         */
5811        aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5812        aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5813                       HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5814        ret &= aa32_valid;
5815    }
5816
5817    if (ret & HCR_TGE) {
5818        /* These bits are up-to-date as of ARMv8.6.  */
5819        if (ret & HCR_E2H) {
5820            ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5821                     HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5822                     HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
5823                     HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5824                     HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5825                     HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
5826        } else {
5827            ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5828        }
5829        ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5830                 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5831                 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5832                 HCR_TLOR);
5833    }
5834
5835    return ret;
5836}
5837
5838uint64_t arm_hcr_el2_eff(CPUARMState *env)
5839{
5840    if (arm_feature(env, ARM_FEATURE_M)) {
5841        return 0;
5842    }
5843    return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env));
5844}
5845
5846/*
5847 * Corresponds to ARM pseudocode function ELIsInHost().
5848 */
5849bool el_is_in_host(CPUARMState *env, int el)
5850{
5851    uint64_t mask;
5852
5853    /*
5854     * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
5855     * Perform the simplest bit tests first, and validate EL2 afterward.
5856     */
5857    if (el & 1) {
5858        return false; /* EL1 or EL3 */
5859    }
5860
5861    /*
5862     * Note that hcr_write() checks isar_feature_aa64_vh(),
5863     * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
5864     */
5865    mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
5866    if ((env->cp15.hcr_el2 & mask) != mask) {
5867        return false;
5868    }
5869
5870    /* TGE and/or E2H set: double check those bits are currently legal. */
5871    return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
5872}
5873
5874static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
5875                       uint64_t value)
5876{
5877    uint64_t valid_mask = 0;
5878
5879    /* No features adding bits to HCRX are implemented. */
5880
5881    /* Clear RES0 bits.  */
5882    env->cp15.hcrx_el2 = value & valid_mask;
5883}
5884
5885static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
5886                                  bool isread)
5887{
5888    if (arm_current_el(env) < 3
5889        && arm_feature(env, ARM_FEATURE_EL3)
5890        && !(env->cp15.scr_el3 & SCR_HXEN)) {
5891        return CP_ACCESS_TRAP_EL3;
5892    }
5893    return CP_ACCESS_OK;
5894}
5895
5896static const ARMCPRegInfo hcrx_el2_reginfo = {
5897    .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
5898    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
5899    .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
5900    .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
5901};
5902
5903/* Return the effective value of HCRX_EL2.  */
5904uint64_t arm_hcrx_el2_eff(CPUARMState *env)
5905{
5906    /*
5907     * The bits in this register behave as 0 for all purposes other than
5908     * direct reads of the register if:
5909     *   - EL2 is not enabled in the current security state,
5910     *   - SCR_EL3.HXEn is 0.
5911     */
5912    if (!arm_is_el2_enabled(env)
5913        || (arm_feature(env, ARM_FEATURE_EL3)
5914            && !(env->cp15.scr_el3 & SCR_HXEN))) {
5915        return 0;
5916    }
5917    return env->cp15.hcrx_el2;
5918}
5919
5920static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5921                           uint64_t value)
5922{
5923    /*
5924     * For A-profile AArch32 EL3, if NSACR.CP10
5925     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5926     */
5927    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5928        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5929        uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5930        value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
5931    }
5932    env->cp15.cptr_el[2] = value;
5933}
5934
5935static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5936{
5937    /*
5938     * For A-profile AArch32 EL3, if NSACR.CP10
5939     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5940     */
5941    uint64_t value = env->cp15.cptr_el[2];
5942
5943    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5944        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5945        value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5946    }
5947    return value;
5948}
5949
5950static const ARMCPRegInfo el2_cp_reginfo[] = {
5951    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5952      .type = ARM_CP_IO,
5953      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5954      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5955      .writefn = hcr_write, .raw_writefn = raw_write },
5956    { .name = "HCR", .state = ARM_CP_STATE_AA32,
5957      .type = ARM_CP_ALIAS | ARM_CP_IO,
5958      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5959      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5960      .writefn = hcr_writelow },
5961    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5962      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5963      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5964    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5965      .type = ARM_CP_ALIAS,
5966      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5967      .access = PL2_RW,
5968      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
5969    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5970      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5971      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
5972    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5973      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5974      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
5975    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5976      .type = ARM_CP_ALIAS,
5977      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5978      .access = PL2_RW,
5979      .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
5980    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5981      .type = ARM_CP_ALIAS,
5982      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
5983      .access = PL2_RW,
5984      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
5985    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5986      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5987      .access = PL2_RW, .writefn = vbar_write,
5988      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5989      .resetvalue = 0 },
5990    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5991      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
5992      .access = PL3_RW, .type = ARM_CP_ALIAS,
5993      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
5994    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5995      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5996      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
5997      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5998      .readfn = cptr_el2_read, .writefn = cptr_el2_write },
5999    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
6000      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
6001      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
6002      .resetvalue = 0 },
6003    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
6004      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
6005      .access = PL2_RW, .type = ARM_CP_ALIAS,
6006      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
6007    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
6008      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
6009      .access = PL2_RW, .type = ARM_CP_CONST,
6010      .resetvalue = 0 },
6011    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
6012    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
6013      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
6014      .access = PL2_RW, .type = ARM_CP_CONST,
6015      .resetvalue = 0 },
6016    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
6017      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
6018      .access = PL2_RW, .type = ARM_CP_CONST,
6019      .resetvalue = 0 },
6020    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
6021      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
6022      .access = PL2_RW, .type = ARM_CP_CONST,
6023      .resetvalue = 0 },
6024    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
6025      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
6026      .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
6027      .raw_writefn = raw_write,
6028      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
6029    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
6030      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6031      .type = ARM_CP_ALIAS,
6032      .access = PL2_RW, .accessfn = access_el3_aa32ns,
6033      .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
6034    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
6035      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6036      .access = PL2_RW,
6037      /* no .writefn needed as this can't cause an ASID change */
6038      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
6039    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
6040      .cp = 15, .opc1 = 6, .crm = 2,
6041      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6042      .access = PL2_RW, .accessfn = access_el3_aa32ns,
6043      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
6044      .writefn = vttbr_write, .raw_writefn = raw_write },
6045    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
6046      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
6047      .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
6048      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
6049    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
6050      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
6051      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
6052      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
6053    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6054      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
6055      .access = PL2_RW, .resetvalue = 0,
6056      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
6057    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
6058      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
6059      .access = PL2_RW, .resetvalue = 0,
6060      .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
6061      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6062    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
6063      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6064      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6065    { .name = "TLBIALLNSNH",
6066      .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
6067      .type = ARM_CP_NO_RAW, .access = PL2_W,
6068      .writefn = tlbiall_nsnh_write },
6069    { .name = "TLBIALLNSNHIS",
6070      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
6071      .type = ARM_CP_NO_RAW, .access = PL2_W,
6072      .writefn = tlbiall_nsnh_is_write },
6073    { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6074      .type = ARM_CP_NO_RAW, .access = PL2_W,
6075      .writefn = tlbiall_hyp_write },
6076    { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6077      .type = ARM_CP_NO_RAW, .access = PL2_W,
6078      .writefn = tlbiall_hyp_is_write },
6079    { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6080      .type = ARM_CP_NO_RAW, .access = PL2_W,
6081      .writefn = tlbimva_hyp_write },
6082    { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6083      .type = ARM_CP_NO_RAW, .access = PL2_W,
6084      .writefn = tlbimva_hyp_is_write },
6085    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
6086      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6087      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6088      .writefn = tlbi_aa64_alle2_write },
6089    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
6090      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6091      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6092      .writefn = tlbi_aa64_vae2_write },
6093    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
6094      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
6095      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6096      .writefn = tlbi_aa64_vae2_write },
6097    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
6098      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6099      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6100      .writefn = tlbi_aa64_alle2is_write },
6101    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
6102      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6103      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6104      .writefn = tlbi_aa64_vae2is_write },
6105    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
6106      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
6107      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6108      .writefn = tlbi_aa64_vae2is_write },
6109#ifndef CONFIG_USER_ONLY
6110    /*
6111     * Unlike the other EL2-related AT operations, these must
6112     * UNDEF from EL3 if EL2 is not implemented, which is why we
6113     * define them here rather than with the rest of the AT ops.
6114     */
6115    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
6116      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6117      .access = PL2_W, .accessfn = at_s1e2_access,
6118      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6119      .writefn = ats_write64 },
6120    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
6121      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6122      .access = PL2_W, .accessfn = at_s1e2_access,
6123      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6124      .writefn = ats_write64 },
6125    /*
6126     * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
6127     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
6128     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
6129     * to behave as if SCR.NS was 1.
6130     */
6131    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6132      .access = PL2_W,
6133      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6134    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6135      .access = PL2_W,
6136      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6137    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
6138      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
6139      /*
6140       * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
6141       * reset values as IMPDEF. We choose to reset to 3 to comply with
6142       * both ARMv7 and ARMv8.
6143       */
6144      .access = PL2_RW, .resetvalue = 3,
6145      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
6146    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
6147      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
6148      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
6149      .writefn = gt_cntvoff_write,
6150      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6151    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
6152      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
6153      .writefn = gt_cntvoff_write,
6154      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6155    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
6156      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
6157      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6158      .type = ARM_CP_IO, .access = PL2_RW,
6159      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6160    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
6161      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6162      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
6163      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6164    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
6165      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
6166      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
6167      .resetfn = gt_hyp_timer_reset,
6168      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
6169    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
6170      .type = ARM_CP_IO,
6171      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
6172      .access = PL2_RW,
6173      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
6174      .resetvalue = 0,
6175      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
6176#endif
6177    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
6178      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6179      .access = PL2_RW, .accessfn = access_el3_aa32ns,
6180      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6181    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
6182      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6183      .access = PL2_RW,
6184      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6185    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
6186      .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
6187      .access = PL2_RW,
6188      .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
6189};
6190
6191static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
6192    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
6193      .type = ARM_CP_ALIAS | ARM_CP_IO,
6194      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
6195      .access = PL2_RW,
6196      .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
6197      .writefn = hcr_writehigh },
6198};
6199
6200static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
6201                                  bool isread)
6202{
6203    if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
6204        return CP_ACCESS_OK;
6205    }
6206    return CP_ACCESS_TRAP_UNCATEGORIZED;
6207}
6208
6209static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
6210    { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
6211      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
6212      .access = PL2_RW, .accessfn = sel2_access,
6213      .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
6214    { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
6215      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
6216      .access = PL2_RW, .accessfn = sel2_access,
6217      .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
6218};
6219
6220static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
6221                                   bool isread)
6222{
6223    /*
6224     * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
6225     * At Secure EL1 it traps to EL3 or EL2.
6226     */
6227    if (arm_current_el(env) == 3) {
6228        return CP_ACCESS_OK;
6229    }
6230    if (arm_is_secure_below_el3(env)) {
6231        if (env->cp15.scr_el3 & SCR_EEL2) {
6232            return CP_ACCESS_TRAP_EL2;
6233        }
6234        return CP_ACCESS_TRAP_EL3;
6235    }
6236    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
6237    if (isread) {
6238        return CP_ACCESS_OK;
6239    }
6240    return CP_ACCESS_TRAP_UNCATEGORIZED;
6241}
6242
6243static const ARMCPRegInfo el3_cp_reginfo[] = {
6244    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
6245      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
6246      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
6247      .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
6248    { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
6249      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
6250      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6251      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
6252      .writefn = scr_write, .raw_writefn = raw_write },
6253    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
6254      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
6255      .access = PL3_RW, .resetvalue = 0,
6256      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
6257    { .name = "SDER",
6258      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
6259      .access = PL3_RW, .resetvalue = 0,
6260      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
6261    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6262      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6263      .writefn = vbar_write, .resetvalue = 0,
6264      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
6265    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
6266      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
6267      .access = PL3_RW, .resetvalue = 0,
6268      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
6269    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
6270      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6271      .access = PL3_RW,
6272      /* no .writefn needed as this can't cause an ASID change */
6273      .resetvalue = 0,
6274      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
6275    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
6276      .type = ARM_CP_ALIAS,
6277      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
6278      .access = PL3_RW,
6279      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
6280    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
6281      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
6282      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
6283    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
6284      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
6285      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
6286    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
6287      .type = ARM_CP_ALIAS,
6288      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
6289      .access = PL3_RW,
6290      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
6291    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
6292      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
6293      .access = PL3_RW, .writefn = vbar_write,
6294      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
6295      .resetvalue = 0 },
6296    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
6297      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
6298      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
6299      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
6300    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
6301      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
6302      .access = PL3_RW, .resetvalue = 0,
6303      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
6304    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
6305      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
6306      .access = PL3_RW, .type = ARM_CP_CONST,
6307      .resetvalue = 0 },
6308    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
6309      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
6310      .access = PL3_RW, .type = ARM_CP_CONST,
6311      .resetvalue = 0 },
6312    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
6313      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
6314      .access = PL3_RW, .type = ARM_CP_CONST,
6315      .resetvalue = 0 },
6316    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
6317      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
6318      .access = PL3_W, .type = ARM_CP_NO_RAW,
6319      .writefn = tlbi_aa64_alle3is_write },
6320    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
6321      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
6322      .access = PL3_W, .type = ARM_CP_NO_RAW,
6323      .writefn = tlbi_aa64_vae3is_write },
6324    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
6325      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
6326      .access = PL3_W, .type = ARM_CP_NO_RAW,
6327      .writefn = tlbi_aa64_vae3is_write },
6328    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
6329      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
6330      .access = PL3_W, .type = ARM_CP_NO_RAW,
6331      .writefn = tlbi_aa64_alle3_write },
6332    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
6333      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
6334      .access = PL3_W, .type = ARM_CP_NO_RAW,
6335      .writefn = tlbi_aa64_vae3_write },
6336    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
6337      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
6338      .access = PL3_W, .type = ARM_CP_NO_RAW,
6339      .writefn = tlbi_aa64_vae3_write },
6340};
6341
6342#ifndef CONFIG_USER_ONLY
6343/* Test if system register redirection is to occur in the current state.  */
6344static bool redirect_for_e2h(CPUARMState *env)
6345{
6346    return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
6347}
6348
6349static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
6350{
6351    CPReadFn *readfn;
6352
6353    if (redirect_for_e2h(env)) {
6354        /* Switch to the saved EL2 version of the register.  */
6355        ri = ri->opaque;
6356        readfn = ri->readfn;
6357    } else {
6358        readfn = ri->orig_readfn;
6359    }
6360    if (readfn == NULL) {
6361        readfn = raw_read;
6362    }
6363    return readfn(env, ri);
6364}
6365
6366static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
6367                          uint64_t value)
6368{
6369    CPWriteFn *writefn;
6370
6371    if (redirect_for_e2h(env)) {
6372        /* Switch to the saved EL2 version of the register.  */
6373        ri = ri->opaque;
6374        writefn = ri->writefn;
6375    } else {
6376        writefn = ri->orig_writefn;
6377    }
6378    if (writefn == NULL) {
6379        writefn = raw_write;
6380    }
6381    writefn(env, ri, value);
6382}
6383
6384static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
6385{
6386    struct E2HAlias {
6387        uint32_t src_key, dst_key, new_key;
6388        const char *src_name, *dst_name, *new_name;
6389        bool (*feature)(const ARMISARegisters *id);
6390    };
6391
6392#define K(op0, op1, crn, crm, op2) \
6393    ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
6394
6395    static const struct E2HAlias aliases[] = {
6396        { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
6397          "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
6398        { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
6399          "CPACR", "CPTR_EL2", "CPACR_EL12" },
6400        { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
6401          "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
6402        { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
6403          "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
6404        { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
6405          "TCR_EL1", "TCR_EL2", "TCR_EL12" },
6406        { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
6407          "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
6408        { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
6409          "ELR_EL1", "ELR_EL2", "ELR_EL12" },
6410        { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
6411          "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
6412        { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
6413          "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
6414        { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
6415          "ESR_EL1", "ESR_EL2", "ESR_EL12" },
6416        { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
6417          "FAR_EL1", "FAR_EL2", "FAR_EL12" },
6418        { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
6419          "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
6420        { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
6421          "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
6422        { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
6423          "VBAR", "VBAR_EL2", "VBAR_EL12" },
6424        { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
6425          "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
6426        { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
6427          "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
6428
6429        /*
6430         * Note that redirection of ZCR is mentioned in the description
6431         * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
6432         * not in the summary table.
6433         */
6434        { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
6435          "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
6436        { K(3, 0,  1, 2, 6), K(3, 4,  1, 2, 6), K(3, 5, 1, 2, 6),
6437          "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
6438
6439        { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
6440          "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
6441
6442        { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
6443          "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
6444          isar_feature_aa64_scxtnum },
6445
6446        /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6447        /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6448    };
6449#undef K
6450
6451    size_t i;
6452
6453    for (i = 0; i < ARRAY_SIZE(aliases); i++) {
6454        const struct E2HAlias *a = &aliases[i];
6455        ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
6456        bool ok;
6457
6458        if (a->feature && !a->feature(&cpu->isar)) {
6459            continue;
6460        }
6461
6462        src_reg = g_hash_table_lookup(cpu->cp_regs,
6463                                      (gpointer)(uintptr_t)a->src_key);
6464        dst_reg = g_hash_table_lookup(cpu->cp_regs,
6465                                      (gpointer)(uintptr_t)a->dst_key);
6466        g_assert(src_reg != NULL);
6467        g_assert(dst_reg != NULL);
6468
6469        /* Cross-compare names to detect typos in the keys.  */
6470        g_assert(strcmp(src_reg->name, a->src_name) == 0);
6471        g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
6472
6473        /* None of the core system registers use opaque; we will.  */
6474        g_assert(src_reg->opaque == NULL);
6475
6476        /* Create alias before redirection so we dup the right data. */
6477        new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
6478
6479        new_reg->name = a->new_name;
6480        new_reg->type |= ARM_CP_ALIAS;
6481        /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
6482        new_reg->access &= PL2_RW | PL3_RW;
6483
6484        ok = g_hash_table_insert(cpu->cp_regs,
6485                                 (gpointer)(uintptr_t)a->new_key, new_reg);
6486        g_assert(ok);
6487
6488        src_reg->opaque = dst_reg;
6489        src_reg->orig_readfn = src_reg->readfn ?: raw_read;
6490        src_reg->orig_writefn = src_reg->writefn ?: raw_write;
6491        if (!src_reg->raw_readfn) {
6492            src_reg->raw_readfn = raw_read;
6493        }
6494        if (!src_reg->raw_writefn) {
6495            src_reg->raw_writefn = raw_write;
6496        }
6497        src_reg->readfn = el2_e2h_read;
6498        src_reg->writefn = el2_e2h_write;
6499    }
6500}
6501#endif
6502
6503static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
6504                                     bool isread)
6505{
6506    int cur_el = arm_current_el(env);
6507
6508    if (cur_el < 2) {
6509        uint64_t hcr = arm_hcr_el2_eff(env);
6510
6511        if (cur_el == 0) {
6512            if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
6513                if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
6514                    return CP_ACCESS_TRAP_EL2;
6515                }
6516            } else {
6517                if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
6518                    return CP_ACCESS_TRAP;
6519                }
6520                if (hcr & HCR_TID2) {
6521                    return CP_ACCESS_TRAP_EL2;
6522                }
6523            }
6524        } else if (hcr & HCR_TID2) {
6525            return CP_ACCESS_TRAP_EL2;
6526        }
6527    }
6528
6529    if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
6530        return CP_ACCESS_TRAP_EL2;
6531    }
6532
6533    return CP_ACCESS_OK;
6534}
6535
6536/*
6537 * Check for traps to RAS registers, which are controlled
6538 * by HCR_EL2.TERR and SCR_EL3.TERR.
6539 */
6540static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
6541                                  bool isread)
6542{
6543    int el = arm_current_el(env);
6544
6545    if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
6546        return CP_ACCESS_TRAP_EL2;
6547    }
6548    if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
6549        return CP_ACCESS_TRAP_EL3;
6550    }
6551    return CP_ACCESS_OK;
6552}
6553
6554static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
6555{
6556    int el = arm_current_el(env);
6557
6558    if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6559        return env->cp15.vdisr_el2;
6560    }
6561    if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6562        return 0; /* RAZ/WI */
6563    }
6564    return env->cp15.disr_el1;
6565}
6566
6567static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
6568{
6569    int el = arm_current_el(env);
6570
6571    if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
6572        env->cp15.vdisr_el2 = val;
6573        return;
6574    }
6575    if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
6576        return; /* RAZ/WI */
6577    }
6578    env->cp15.disr_el1 = val;
6579}
6580
6581/*
6582 * Minimal RAS implementation with no Error Records.
6583 * Which means that all of the Error Record registers:
6584 *   ERXADDR_EL1
6585 *   ERXCTLR_EL1
6586 *   ERXFR_EL1
6587 *   ERXMISC0_EL1
6588 *   ERXMISC1_EL1
6589 *   ERXMISC2_EL1
6590 *   ERXMISC3_EL1
6591 *   ERXPFGCDN_EL1  (RASv1p1)
6592 *   ERXPFGCTL_EL1  (RASv1p1)
6593 *   ERXPFGF_EL1    (RASv1p1)
6594 *   ERXSTATUS_EL1
6595 * and
6596 *   ERRSELR_EL1
6597 * may generate UNDEFINED, which is the effect we get by not
6598 * listing them at all.
6599 *
6600 * These registers have fine-grained trap bits, but UNDEF-to-EL1
6601 * is higher priority than FGT-to-EL2 so we do not need to list them
6602 * in order to check for an FGT.
6603 */
6604static const ARMCPRegInfo minimal_ras_reginfo[] = {
6605    { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
6606      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
6607      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
6608      .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
6609    { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
6610      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
6611      .access = PL1_R, .accessfn = access_terr,
6612      .fgt = FGT_ERRIDR_EL1,
6613      .type = ARM_CP_CONST, .resetvalue = 0 },
6614    { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
6615      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
6616      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
6617    { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
6618      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
6619      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
6620};
6621
6622/*
6623 * Return the exception level to which exceptions should be taken
6624 * via SVEAccessTrap.  This excludes the check for whether the exception
6625 * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
6626 * be found by testing 0 < fp_exception_el < sve_exception_el.
6627 *
6628 * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
6629 * pseudocode does *not* separate out the FP trap checks, but has them
6630 * all in one function.
6631 */
6632int sve_exception_el(CPUARMState *env, int el)
6633{
6634#ifndef CONFIG_USER_ONLY
6635    if (el <= 1 && !el_is_in_host(env, el)) {
6636        switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
6637        case 1:
6638            if (el != 0) {
6639                break;
6640            }
6641            /* fall through */
6642        case 0:
6643        case 2:
6644            return 1;
6645        }
6646    }
6647
6648    if (el <= 2 && arm_is_el2_enabled(env)) {
6649        /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6650        if (env->cp15.hcr_el2 & HCR_E2H) {
6651            switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
6652            case 1:
6653                if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6654                    break;
6655                }
6656                /* fall through */
6657            case 0:
6658            case 2:
6659                return 2;
6660            }
6661        } else {
6662            if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
6663                return 2;
6664            }
6665        }
6666    }
6667
6668    /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
6669    if (arm_feature(env, ARM_FEATURE_EL3)
6670        && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
6671        return 3;
6672    }
6673#endif
6674    return 0;
6675}
6676
6677/*
6678 * Return the exception level to which exceptions should be taken for SME.
6679 * C.f. the ARM pseudocode function CheckSMEAccess.
6680 */
6681int sme_exception_el(CPUARMState *env, int el)
6682{
6683#ifndef CONFIG_USER_ONLY
6684    if (el <= 1 && !el_is_in_host(env, el)) {
6685        switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
6686        case 1:
6687            if (el != 0) {
6688                break;
6689            }
6690            /* fall through */
6691        case 0:
6692        case 2:
6693            return 1;
6694        }
6695    }
6696
6697    if (el <= 2 && arm_is_el2_enabled(env)) {
6698        /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6699        if (env->cp15.hcr_el2 & HCR_E2H) {
6700            switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
6701            case 1:
6702                if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6703                    break;
6704                }
6705                /* fall through */
6706            case 0:
6707            case 2:
6708                return 2;
6709            }
6710        } else {
6711            if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
6712                return 2;
6713            }
6714        }
6715    }
6716
6717    /* CPTR_EL3.  Since ESM is negative we must check for EL3.  */
6718    if (arm_feature(env, ARM_FEATURE_EL3)
6719        && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6720        return 3;
6721    }
6722#endif
6723    return 0;
6724}
6725
6726/*
6727 * Given that SVE is enabled, return the vector length for EL.
6728 */
6729uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
6730{
6731    ARMCPU *cpu = env_archcpu(env);
6732    uint64_t *cr = env->vfp.zcr_el;
6733    uint32_t map = cpu->sve_vq.map;
6734    uint32_t len = ARM_MAX_VQ - 1;
6735
6736    if (sm) {
6737        cr = env->vfp.smcr_el;
6738        map = cpu->sme_vq.map;
6739    }
6740
6741    if (el <= 1 && !el_is_in_host(env, el)) {
6742        len = MIN(len, 0xf & (uint32_t)cr[1]);
6743    }
6744    if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6745        len = MIN(len, 0xf & (uint32_t)cr[2]);
6746    }
6747    if (arm_feature(env, ARM_FEATURE_EL3)) {
6748        len = MIN(len, 0xf & (uint32_t)cr[3]);
6749    }
6750
6751    map &= MAKE_64BIT_MASK(0, len + 1);
6752    if (map != 0) {
6753        return 31 - clz32(map);
6754    }
6755
6756    /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
6757    assert(sm);
6758    return ctz32(cpu->sme_vq.map);
6759}
6760
6761uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
6762{
6763    return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
6764}
6765
6766static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6767                      uint64_t value)
6768{
6769    int cur_el = arm_current_el(env);
6770    int old_len = sve_vqm1_for_el(env, cur_el);
6771    int new_len;
6772
6773    /* Bits other than [3:0] are RAZ/WI.  */
6774    QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
6775    raw_write(env, ri, value & 0xf);
6776
6777    /*
6778     * Because we arrived here, we know both FP and SVE are enabled;
6779     * otherwise we would have trapped access to the ZCR_ELn register.
6780     */
6781    new_len = sve_vqm1_for_el(env, cur_el);
6782    if (new_len < old_len) {
6783        aarch64_sve_narrow_vq(env, new_len + 1);
6784    }
6785}
6786
6787static const ARMCPRegInfo zcr_reginfo[] = {
6788    { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6789      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6790      .access = PL1_RW, .type = ARM_CP_SVE,
6791      .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6792      .writefn = zcr_write, .raw_writefn = raw_write },
6793    { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6794      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6795      .access = PL2_RW, .type = ARM_CP_SVE,
6796      .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6797      .writefn = zcr_write, .raw_writefn = raw_write },
6798    { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6799      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6800      .access = PL3_RW, .type = ARM_CP_SVE,
6801      .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6802      .writefn = zcr_write, .raw_writefn = raw_write },
6803};
6804
6805#ifdef TARGET_AARCH64
6806static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
6807                                    bool isread)
6808{
6809    int el = arm_current_el(env);
6810
6811    if (el == 0) {
6812        uint64_t sctlr = arm_sctlr(env, el);
6813        if (!(sctlr & SCTLR_EnTP2)) {
6814            return CP_ACCESS_TRAP;
6815        }
6816    }
6817    /* TODO: FEAT_FGT */
6818    if (el < 3
6819        && arm_feature(env, ARM_FEATURE_EL3)
6820        && !(env->cp15.scr_el3 & SCR_ENTP2)) {
6821        return CP_ACCESS_TRAP_EL3;
6822    }
6823    return CP_ACCESS_OK;
6824}
6825
6826static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
6827                                 bool isread)
6828{
6829    /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */
6830    if (arm_current_el(env) < 3
6831        && arm_feature(env, ARM_FEATURE_EL3)
6832        && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6833        return CP_ACCESS_TRAP_EL3;
6834    }
6835    return CP_ACCESS_OK;
6836}
6837
6838/* ResetSVEState */
6839static void arm_reset_sve_state(CPUARMState *env)
6840{
6841    memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
6842    /* Recall that FFR is stored as pregs[16]. */
6843    memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
6844    vfp_set_fpcr(env, 0x0800009f);
6845}
6846
6847void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
6848{
6849    uint64_t change = (env->svcr ^ new) & mask;
6850
6851    if (change == 0) {
6852        return;
6853    }
6854    env->svcr ^= change;
6855
6856    if (change & R_SVCR_SM_MASK) {
6857        arm_reset_sve_state(env);
6858    }
6859
6860    /*
6861     * ResetSMEState.
6862     *
6863     * SetPSTATE_ZA zeros on enable and disable.  We can zero this only
6864     * on enable: while disabled, the storage is inaccessible and the
6865     * value does not matter.  We're not saving the storage in vmstate
6866     * when disabled either.
6867     */
6868    if (change & new & R_SVCR_ZA_MASK) {
6869        memset(env->zarray, 0, sizeof(env->zarray));
6870    }
6871
6872    if (tcg_enabled()) {
6873        arm_rebuild_hflags(env);
6874    }
6875}
6876
6877static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6878                       uint64_t value)
6879{
6880    aarch64_set_svcr(env, value, -1);
6881}
6882
6883static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6884                       uint64_t value)
6885{
6886    int cur_el = arm_current_el(env);
6887    int old_len = sve_vqm1_for_el(env, cur_el);
6888    int new_len;
6889
6890    QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
6891    value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
6892    raw_write(env, ri, value);
6893
6894    /*
6895     * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
6896     * when SVL is widened (old values kept, or zeros).  Choose to keep the
6897     * current values for simplicity.  But for QEMU internals, we must still
6898     * apply the narrower SVL to the Zregs and Pregs -- see the comment
6899     * above aarch64_sve_narrow_vq.
6900     */
6901    new_len = sve_vqm1_for_el(env, cur_el);
6902    if (new_len < old_len) {
6903        aarch64_sve_narrow_vq(env, new_len + 1);
6904    }
6905}
6906
6907static const ARMCPRegInfo sme_reginfo[] = {
6908    { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
6909      .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
6910      .access = PL0_RW, .accessfn = access_tpidr2,
6911      .fgt = FGT_NTPIDR2_EL0,
6912      .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
6913    { .name = "SVCR", .state = ARM_CP_STATE_AA64,
6914      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
6915      .access = PL0_RW, .type = ARM_CP_SME,
6916      .fieldoffset = offsetof(CPUARMState, svcr),
6917      .writefn = svcr_write, .raw_writefn = raw_write },
6918    { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
6919      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
6920      .access = PL1_RW, .type = ARM_CP_SME,
6921      .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
6922      .writefn = smcr_write, .raw_writefn = raw_write },
6923    { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
6924      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
6925      .access = PL2_RW, .type = ARM_CP_SME,
6926      .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
6927      .writefn = smcr_write, .raw_writefn = raw_write },
6928    { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
6929      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
6930      .access = PL3_RW, .type = ARM_CP_SME,
6931      .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
6932      .writefn = smcr_write, .raw_writefn = raw_write },
6933    { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
6934      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
6935      .access = PL1_R, .accessfn = access_aa64_tid1,
6936      /*
6937       * IMPLEMENTOR = 0 (software)
6938       * REVISION    = 0 (implementation defined)
6939       * SMPS        = 0 (no streaming execution priority in QEMU)
6940       * AFFINITY    = 0 (streaming sve mode not shared with other PEs)
6941       */
6942      .type = ARM_CP_CONST, .resetvalue = 0, },
6943    /*
6944     * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
6945     */
6946    { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
6947      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
6948      .access = PL1_RW, .accessfn = access_esm,
6949      .fgt = FGT_NSMPRI_EL1,
6950      .type = ARM_CP_CONST, .resetvalue = 0 },
6951    { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
6952      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
6953      .access = PL2_RW, .accessfn = access_esm,
6954      .type = ARM_CP_CONST, .resetvalue = 0 },
6955};
6956
6957static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
6958                                  uint64_t value)
6959{
6960    CPUState *cs = env_cpu(env);
6961
6962    tlb_flush(cs);
6963}
6964
6965static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6966                        uint64_t value)
6967{
6968    /* L0GPTSZ is RO; other bits not mentioned are RES0. */
6969    uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
6970        R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
6971        R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
6972
6973    env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
6974}
6975
6976static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
6977{
6978    env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
6979                                     env_archcpu(env)->reset_l0gptsz);
6980}
6981
6982static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
6983                                    uint64_t value)
6984{
6985    CPUState *cs = env_cpu(env);
6986
6987    tlb_flush_all_cpus_synced(cs);
6988}
6989
6990static const ARMCPRegInfo rme_reginfo[] = {
6991    { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
6992      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
6993      .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
6994      .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
6995    { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
6996      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
6997      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
6998    { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
6999      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
7000      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
7001    { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
7002      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
7003      .access = PL3_W, .type = ARM_CP_NO_RAW,
7004      .writefn = tlbi_aa64_paall_write },
7005    { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
7006      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
7007      .access = PL3_W, .type = ARM_CP_NO_RAW,
7008      .writefn = tlbi_aa64_paallos_write },
7009    /*
7010     * QEMU does not have a way to invalidate by physical address, thus
7011     * invalidating a range of physical addresses is accomplished by
7012     * flushing all tlb entries in the outer shareable domain,
7013     * just like PAALLOS.
7014     */
7015    { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
7016      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
7017      .access = PL3_W, .type = ARM_CP_NO_RAW,
7018      .writefn = tlbi_aa64_paallos_write },
7019    { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
7020      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
7021      .access = PL3_W, .type = ARM_CP_NO_RAW,
7022      .writefn = tlbi_aa64_paallos_write },
7023    { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
7024      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
7025      .access = PL3_W, .type = ARM_CP_NOP },
7026};
7027
7028static const ARMCPRegInfo rme_mte_reginfo[] = {
7029    { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
7030      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
7031      .access = PL3_W, .type = ARM_CP_NOP },
7032};
7033#endif /* TARGET_AARCH64 */
7034
7035static void define_pmu_regs(ARMCPU *cpu)
7036{
7037    /*
7038     * v7 performance monitor control register: same implementor
7039     * field as main ID register, and we implement four counters in
7040     * addition to the cycle count register.
7041     */
7042    unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
7043    ARMCPRegInfo pmcr = {
7044        .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
7045        .access = PL0_RW,
7046        .fgt = FGT_PMCR_EL0,
7047        .type = ARM_CP_IO | ARM_CP_ALIAS,
7048        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
7049        .accessfn = pmreg_access, .writefn = pmcr_write,
7050        .raw_writefn = raw_write,
7051    };
7052    ARMCPRegInfo pmcr64 = {
7053        .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
7054        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
7055        .access = PL0_RW, .accessfn = pmreg_access,
7056        .fgt = FGT_PMCR_EL0,
7057        .type = ARM_CP_IO,
7058        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
7059        .resetvalue = cpu->isar.reset_pmcr_el0,
7060        .writefn = pmcr_write, .raw_writefn = raw_write,
7061    };
7062
7063    define_one_arm_cp_reg(cpu, &pmcr);
7064    define_one_arm_cp_reg(cpu, &pmcr64);
7065    for (i = 0; i < pmcrn; i++) {
7066        char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
7067        char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
7068        char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
7069        char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
7070        ARMCPRegInfo pmev_regs[] = {
7071            { .name = pmevcntr_name, .cp = 15, .crn = 14,
7072              .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7073              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7074              .fgt = FGT_PMEVCNTRN_EL0,
7075              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7076              .accessfn = pmreg_access_xevcntr },
7077            { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
7078              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
7079              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
7080              .type = ARM_CP_IO,
7081              .fgt = FGT_PMEVCNTRN_EL0,
7082              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7083              .raw_readfn = pmevcntr_rawread,
7084              .raw_writefn = pmevcntr_rawwrite },
7085            { .name = pmevtyper_name, .cp = 15, .crn = 14,
7086              .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7087              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7088              .fgt = FGT_PMEVTYPERN_EL0,
7089              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7090              .accessfn = pmreg_access },
7091            { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
7092              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
7093              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
7094              .fgt = FGT_PMEVTYPERN_EL0,
7095              .type = ARM_CP_IO,
7096              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7097              .raw_writefn = pmevtyper_rawwrite },
7098        };
7099        define_arm_cp_regs(cpu, pmev_regs);
7100        g_free(pmevcntr_name);
7101        g_free(pmevcntr_el0_name);
7102        g_free(pmevtyper_name);
7103        g_free(pmevtyper_el0_name);
7104    }
7105    if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
7106        ARMCPRegInfo v81_pmu_regs[] = {
7107            { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
7108              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
7109              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7110              .fgt = FGT_PMCEIDN_EL0,
7111              .resetvalue = extract64(cpu->pmceid0, 32, 32) },
7112            { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
7113              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
7114              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7115              .fgt = FGT_PMCEIDN_EL0,
7116              .resetvalue = extract64(cpu->pmceid1, 32, 32) },
7117        };
7118        define_arm_cp_regs(cpu, v81_pmu_regs);
7119    }
7120    if (cpu_isar_feature(any_pmuv3p4, cpu)) {
7121        static const ARMCPRegInfo v84_pmmir = {
7122            .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
7123            .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
7124            .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7125            .fgt = FGT_PMMIR_EL1,
7126            .resetvalue = 0
7127        };
7128        define_one_arm_cp_reg(cpu, &v84_pmmir);
7129    }
7130}
7131
7132#ifndef CONFIG_USER_ONLY
7133/*
7134 * We don't know until after realize whether there's a GICv3
7135 * attached, and that is what registers the gicv3 sysregs.
7136 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
7137 * at runtime.
7138 */
7139static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
7140{
7141    ARMCPU *cpu = env_archcpu(env);
7142    uint64_t pfr1 = cpu->isar.id_pfr1;
7143
7144    if (env->gicv3state) {
7145        pfr1 |= 1 << 28;
7146    }
7147    return pfr1;
7148}
7149
7150static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
7151{
7152    ARMCPU *cpu = env_archcpu(env);
7153    uint64_t pfr0 = cpu->isar.id_aa64pfr0;
7154
7155    if (env->gicv3state) {
7156        pfr0 |= 1 << 24;
7157    }
7158    return pfr0;
7159}
7160#endif
7161
7162/*
7163 * Shared logic between LORID and the rest of the LOR* registers.
7164 * Secure state exclusion has already been dealt with.
7165 */
7166static CPAccessResult access_lor_ns(CPUARMState *env,
7167                                    const ARMCPRegInfo *ri, bool isread)
7168{
7169    int el = arm_current_el(env);
7170
7171    if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
7172        return CP_ACCESS_TRAP_EL2;
7173    }
7174    if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
7175        return CP_ACCESS_TRAP_EL3;
7176    }
7177    return CP_ACCESS_OK;
7178}
7179
7180static CPAccessResult access_lor_other(CPUARMState *env,
7181                                       const ARMCPRegInfo *ri, bool isread)
7182{
7183    if (arm_is_secure_below_el3(env)) {
7184        /* Access denied in secure mode.  */
7185        return CP_ACCESS_TRAP;
7186    }
7187    return access_lor_ns(env, ri, isread);
7188}
7189
7190/*
7191 * A trivial implementation of ARMv8.1-LOR leaves all of these
7192 * registers fixed at 0, which indicates that there are zero
7193 * supported Limited Ordering regions.
7194 */
7195static const ARMCPRegInfo lor_reginfo[] = {
7196    { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
7197      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
7198      .access = PL1_RW, .accessfn = access_lor_other,
7199      .fgt = FGT_LORSA_EL1,
7200      .type = ARM_CP_CONST, .resetvalue = 0 },
7201    { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
7202      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
7203      .access = PL1_RW, .accessfn = access_lor_other,
7204      .fgt = FGT_LOREA_EL1,
7205      .type = ARM_CP_CONST, .resetvalue = 0 },
7206    { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
7207      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
7208      .access = PL1_RW, .accessfn = access_lor_other,
7209      .fgt = FGT_LORN_EL1,
7210      .type = ARM_CP_CONST, .resetvalue = 0 },
7211    { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
7212      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
7213      .access = PL1_RW, .accessfn = access_lor_other,
7214      .fgt = FGT_LORC_EL1,
7215      .type = ARM_CP_CONST, .resetvalue = 0 },
7216    { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
7217      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
7218      .access = PL1_R, .accessfn = access_lor_ns,
7219      .fgt = FGT_LORID_EL1,
7220      .type = ARM_CP_CONST, .resetvalue = 0 },
7221};
7222
7223#ifdef TARGET_AARCH64
7224static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
7225                                   bool isread)
7226{
7227    int el = arm_current_el(env);
7228
7229    if (el < 2 &&
7230        arm_is_el2_enabled(env) &&
7231        !(arm_hcr_el2_eff(env) & HCR_APK)) {
7232        return CP_ACCESS_TRAP_EL2;
7233    }
7234    if (el < 3 &&
7235        arm_feature(env, ARM_FEATURE_EL3) &&
7236        !(env->cp15.scr_el3 & SCR_APK)) {
7237        return CP_ACCESS_TRAP_EL3;
7238    }
7239    return CP_ACCESS_OK;
7240}
7241
7242static const ARMCPRegInfo pauth_reginfo[] = {
7243    { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7244      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
7245      .access = PL1_RW, .accessfn = access_pauth,
7246      .fgt = FGT_APDAKEY,
7247      .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
7248    { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7249      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
7250      .access = PL1_RW, .accessfn = access_pauth,
7251      .fgt = FGT_APDAKEY,
7252      .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
7253    { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7254      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
7255      .access = PL1_RW, .accessfn = access_pauth,
7256      .fgt = FGT_APDBKEY,
7257      .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
7258    { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7259      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
7260      .access = PL1_RW, .accessfn = access_pauth,
7261      .fgt = FGT_APDBKEY,
7262      .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
7263    { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7264      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
7265      .access = PL1_RW, .accessfn = access_pauth,
7266      .fgt = FGT_APGAKEY,
7267      .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
7268    { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7269      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
7270      .access = PL1_RW, .accessfn = access_pauth,
7271      .fgt = FGT_APGAKEY,
7272      .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
7273    { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7274      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
7275      .access = PL1_RW, .accessfn = access_pauth,
7276      .fgt = FGT_APIAKEY,
7277      .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
7278    { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7279      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
7280      .access = PL1_RW, .accessfn = access_pauth,
7281      .fgt = FGT_APIAKEY,
7282      .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
7283    { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7284      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
7285      .access = PL1_RW, .accessfn = access_pauth,
7286      .fgt = FGT_APIBKEY,
7287      .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
7288    { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7289      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
7290      .access = PL1_RW, .accessfn = access_pauth,
7291      .fgt = FGT_APIBKEY,
7292      .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
7293};
7294
7295static const ARMCPRegInfo tlbirange_reginfo[] = {
7296    { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
7297      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
7298      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7299      .fgt = FGT_TLBIRVAE1IS,
7300      .writefn = tlbi_aa64_rvae1is_write },
7301    { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
7302      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
7303      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7304      .fgt = FGT_TLBIRVAAE1IS,
7305      .writefn = tlbi_aa64_rvae1is_write },
7306   { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
7307      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
7308      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7309      .fgt = FGT_TLBIRVALE1IS,
7310      .writefn = tlbi_aa64_rvae1is_write },
7311    { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
7312      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
7313      .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7314      .fgt = FGT_TLBIRVAALE1IS,
7315      .writefn = tlbi_aa64_rvae1is_write },
7316    { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
7317      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
7318      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7319      .fgt = FGT_TLBIRVAE1OS,
7320      .writefn = tlbi_aa64_rvae1is_write },
7321    { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
7322      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
7323      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7324      .fgt = FGT_TLBIRVAAE1OS,
7325      .writefn = tlbi_aa64_rvae1is_write },
7326   { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
7327      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
7328      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7329      .fgt = FGT_TLBIRVALE1OS,
7330      .writefn = tlbi_aa64_rvae1is_write },
7331    { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
7332      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
7333      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7334      .fgt = FGT_TLBIRVAALE1OS,
7335      .writefn = tlbi_aa64_rvae1is_write },
7336    { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
7337      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
7338      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7339      .fgt = FGT_TLBIRVAE1,
7340      .writefn = tlbi_aa64_rvae1_write },
7341    { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
7342      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
7343      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7344      .fgt = FGT_TLBIRVAAE1,
7345      .writefn = tlbi_aa64_rvae1_write },
7346   { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
7347      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
7348      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7349      .fgt = FGT_TLBIRVALE1,
7350      .writefn = tlbi_aa64_rvae1_write },
7351    { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
7352      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
7353      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7354      .fgt = FGT_TLBIRVAALE1,
7355      .writefn = tlbi_aa64_rvae1_write },
7356    { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
7357      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
7358      .access = PL2_W, .type = ARM_CP_NO_RAW,
7359      .writefn = tlbi_aa64_ripas2e1is_write },
7360    { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
7361      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
7362      .access = PL2_W, .type = ARM_CP_NO_RAW,
7363      .writefn = tlbi_aa64_ripas2e1is_write },
7364    { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
7365      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
7366      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7367      .writefn = tlbi_aa64_rvae2is_write },
7368   { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
7369      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
7370      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7371      .writefn = tlbi_aa64_rvae2is_write },
7372    { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
7373      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
7374      .access = PL2_W, .type = ARM_CP_NO_RAW,
7375      .writefn = tlbi_aa64_ripas2e1_write },
7376    { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
7377      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
7378      .access = PL2_W, .type = ARM_CP_NO_RAW,
7379      .writefn = tlbi_aa64_ripas2e1_write },
7380   { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
7381      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
7382      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7383      .writefn = tlbi_aa64_rvae2is_write },
7384   { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
7385      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
7386      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7387      .writefn = tlbi_aa64_rvae2is_write },
7388    { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
7389      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
7390      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7391      .writefn = tlbi_aa64_rvae2_write },
7392   { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
7393      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
7394      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7395      .writefn = tlbi_aa64_rvae2_write },
7396   { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
7397      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
7398      .access = PL3_W, .type = ARM_CP_NO_RAW,
7399      .writefn = tlbi_aa64_rvae3is_write },
7400   { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
7401      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
7402      .access = PL3_W, .type = ARM_CP_NO_RAW,
7403      .writefn = tlbi_aa64_rvae3is_write },
7404   { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
7405      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
7406      .access = PL3_W, .type = ARM_CP_NO_RAW,
7407      .writefn = tlbi_aa64_rvae3is_write },
7408   { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
7409      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
7410      .access = PL3_W, .type = ARM_CP_NO_RAW,
7411      .writefn = tlbi_aa64_rvae3is_write },
7412   { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
7413      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
7414      .access = PL3_W, .type = ARM_CP_NO_RAW,
7415      .writefn = tlbi_aa64_rvae3_write },
7416   { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
7417      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
7418      .access = PL3_W, .type = ARM_CP_NO_RAW,
7419      .writefn = tlbi_aa64_rvae3_write },
7420};
7421
7422static const ARMCPRegInfo tlbios_reginfo[] = {
7423    { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
7424      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
7425      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7426      .fgt = FGT_TLBIVMALLE1OS,
7427      .writefn = tlbi_aa64_vmalle1is_write },
7428    { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
7429      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
7430      .fgt = FGT_TLBIVAE1OS,
7431      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7432      .writefn = tlbi_aa64_vae1is_write },
7433    { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
7434      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
7435      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7436      .fgt = FGT_TLBIASIDE1OS,
7437      .writefn = tlbi_aa64_vmalle1is_write },
7438    { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
7439      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
7440      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7441      .fgt = FGT_TLBIVAAE1OS,
7442      .writefn = tlbi_aa64_vae1is_write },
7443    { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
7444      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
7445      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7446      .fgt = FGT_TLBIVALE1OS,
7447      .writefn = tlbi_aa64_vae1is_write },
7448    { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
7449      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
7450      .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7451      .fgt = FGT_TLBIVAALE1OS,
7452      .writefn = tlbi_aa64_vae1is_write },
7453    { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
7454      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
7455      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7456      .writefn = tlbi_aa64_alle2is_write },
7457    { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
7458      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
7459      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7460      .writefn = tlbi_aa64_vae2is_write },
7461   { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
7462      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
7463      .access = PL2_W, .type = ARM_CP_NO_RAW,
7464      .writefn = tlbi_aa64_alle1is_write },
7465    { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
7466      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
7467      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7468      .writefn = tlbi_aa64_vae2is_write },
7469    { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
7470      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
7471      .access = PL2_W, .type = ARM_CP_NO_RAW,
7472      .writefn = tlbi_aa64_alle1is_write },
7473    { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
7474      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
7475      .access = PL2_W, .type = ARM_CP_NOP },
7476    { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
7477      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
7478      .access = PL2_W, .type = ARM_CP_NOP },
7479    { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7480      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
7481      .access = PL2_W, .type = ARM_CP_NOP },
7482    { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
7483      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
7484      .access = PL2_W, .type = ARM_CP_NOP },
7485    { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
7486      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
7487      .access = PL3_W, .type = ARM_CP_NO_RAW,
7488      .writefn = tlbi_aa64_alle3is_write },
7489    { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
7490      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
7491      .access = PL3_W, .type = ARM_CP_NO_RAW,
7492      .writefn = tlbi_aa64_vae3is_write },
7493    { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
7494      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
7495      .access = PL3_W, .type = ARM_CP_NO_RAW,
7496      .writefn = tlbi_aa64_vae3is_write },
7497};
7498
7499static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
7500{
7501    Error *err = NULL;
7502    uint64_t ret;
7503
7504    /* Success sets NZCV = 0000.  */
7505    env->NF = env->CF = env->VF = 0, env->ZF = 1;
7506
7507    if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
7508        /*
7509         * ??? Failed, for unknown reasons in the crypto subsystem.
7510         * The best we can do is log the reason and return the
7511         * timed-out indication to the guest.  There is no reason
7512         * we know to expect this failure to be transitory, so the
7513         * guest may well hang retrying the operation.
7514         */
7515        qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
7516                      ri->name, error_get_pretty(err));
7517        error_free(err);
7518
7519        env->ZF = 0; /* NZCF = 0100 */
7520        return 0;
7521    }
7522    return ret;
7523}
7524
7525/* We do not support re-seeding, so the two registers operate the same.  */
7526static const ARMCPRegInfo rndr_reginfo[] = {
7527    { .name = "RNDR", .state = ARM_CP_STATE_AA64,
7528      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7529      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
7530      .access = PL0_R, .readfn = rndr_readfn },
7531    { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
7532      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
7533      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
7534      .access = PL0_R, .readfn = rndr_readfn },
7535};
7536
7537static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
7538                          uint64_t value)
7539{
7540    ARMCPU *cpu = env_archcpu(env);
7541    /* CTR_EL0 System register -> DminLine, bits [19:16] */
7542    uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
7543    uint64_t vaddr_in = (uint64_t) value;
7544    uint64_t vaddr = vaddr_in & ~(dline_size - 1);
7545    void *haddr;
7546    int mem_idx = cpu_mmu_index(env, false);
7547
7548    /* This won't be crossing page boundaries */
7549    haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
7550    if (haddr) {
7551#ifndef CONFIG_USER_ONLY
7552
7553        ram_addr_t offset;
7554        MemoryRegion *mr;
7555
7556        /* RCU lock is already being held */
7557        mr = memory_region_from_host(haddr, &offset);
7558
7559        if (mr) {
7560            memory_region_writeback(mr, offset, dline_size);
7561        }
7562#endif /*CONFIG_USER_ONLY*/
7563    }
7564}
7565
7566static const ARMCPRegInfo dcpop_reg[] = {
7567    { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
7568      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
7569      .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7570      .fgt = FGT_DCCVAP,
7571      .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7572};
7573
7574static const ARMCPRegInfo dcpodp_reg[] = {
7575    { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
7576      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
7577      .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
7578      .fgt = FGT_DCCVADP,
7579      .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
7580};
7581
7582static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
7583                                       bool isread)
7584{
7585    if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
7586        return CP_ACCESS_TRAP_EL2;
7587    }
7588
7589    return CP_ACCESS_OK;
7590}
7591
7592static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
7593                                 bool isread)
7594{
7595    int el = arm_current_el(env);
7596
7597    if (el < 2 && arm_is_el2_enabled(env)) {
7598        uint64_t hcr = arm_hcr_el2_eff(env);
7599        if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
7600            return CP_ACCESS_TRAP_EL2;
7601        }
7602    }
7603    if (el < 3 &&
7604        arm_feature(env, ARM_FEATURE_EL3) &&
7605        !(env->cp15.scr_el3 & SCR_ATA)) {
7606        return CP_ACCESS_TRAP_EL3;
7607    }
7608    return CP_ACCESS_OK;
7609}
7610
7611static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
7612{
7613    return env->pstate & PSTATE_TCO;
7614}
7615
7616static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7617{
7618    env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
7619}
7620
7621static const ARMCPRegInfo mte_reginfo[] = {
7622    { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
7623      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
7624      .access = PL1_RW, .accessfn = access_mte,
7625      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
7626    { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
7627      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
7628      .access = PL1_RW, .accessfn = access_mte,
7629      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
7630    { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
7631      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
7632      .access = PL2_RW, .accessfn = access_mte,
7633      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
7634    { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
7635      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
7636      .access = PL3_RW,
7637      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
7638    { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
7639      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
7640      .access = PL1_RW, .accessfn = access_mte,
7641      .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
7642    { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
7643      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
7644      .access = PL1_RW, .accessfn = access_mte,
7645      .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
7646    { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7647      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7648      .access = PL1_R, .accessfn = access_aa64_tid5,
7649      .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
7650    { .name = "TCO", .state = ARM_CP_STATE_AA64,
7651      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7652      .type = ARM_CP_NO_RAW,
7653      .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
7654    { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
7655      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
7656      .type = ARM_CP_NOP, .access = PL1_W,
7657      .fgt = FGT_DCIVAC,
7658      .accessfn = aa64_cacheop_poc_access },
7659    { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
7660      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
7661      .fgt = FGT_DCISW,
7662      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7663    { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
7664      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
7665      .type = ARM_CP_NOP, .access = PL1_W,
7666      .fgt = FGT_DCIVAC,
7667      .accessfn = aa64_cacheop_poc_access },
7668    { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
7669      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
7670      .fgt = FGT_DCISW,
7671      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7672    { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
7673      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
7674      .fgt = FGT_DCCSW,
7675      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7676    { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
7677      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
7678      .fgt = FGT_DCCSW,
7679      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7680    { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
7681      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
7682      .fgt = FGT_DCCISW,
7683      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7684    { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
7685      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
7686      .fgt = FGT_DCCISW,
7687      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7688};
7689
7690static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
7691    { .name = "TCO", .state = ARM_CP_STATE_AA64,
7692      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7693      .type = ARM_CP_CONST, .access = PL0_RW, },
7694};
7695
7696static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
7697    { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
7698      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
7699      .type = ARM_CP_NOP, .access = PL0_W,
7700      .fgt = FGT_DCCVAC,
7701      .accessfn = aa64_cacheop_poc_access },
7702    { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
7703      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
7704      .type = ARM_CP_NOP, .access = PL0_W,
7705      .fgt = FGT_DCCVAC,
7706      .accessfn = aa64_cacheop_poc_access },
7707    { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
7708      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
7709      .type = ARM_CP_NOP, .access = PL0_W,
7710      .fgt = FGT_DCCVAP,
7711      .accessfn = aa64_cacheop_poc_access },
7712    { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
7713      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
7714      .type = ARM_CP_NOP, .access = PL0_W,
7715      .fgt = FGT_DCCVAP,
7716      .accessfn = aa64_cacheop_poc_access },
7717    { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
7718      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
7719      .type = ARM_CP_NOP, .access = PL0_W,
7720      .fgt = FGT_DCCVADP,
7721      .accessfn = aa64_cacheop_poc_access },
7722    { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
7723      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
7724      .type = ARM_CP_NOP, .access = PL0_W,
7725      .fgt = FGT_DCCVADP,
7726      .accessfn = aa64_cacheop_poc_access },
7727    { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
7728      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
7729      .type = ARM_CP_NOP, .access = PL0_W,
7730      .fgt = FGT_DCCIVAC,
7731      .accessfn = aa64_cacheop_poc_access },
7732    { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
7733      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
7734      .type = ARM_CP_NOP, .access = PL0_W,
7735      .fgt = FGT_DCCIVAC,
7736      .accessfn = aa64_cacheop_poc_access },
7737    { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
7738      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
7739      .access = PL0_W, .type = ARM_CP_DC_GVA,
7740#ifndef CONFIG_USER_ONLY
7741      /* Avoid overhead of an access check that always passes in user-mode */
7742      .accessfn = aa64_zva_access,
7743      .fgt = FGT_DCZVA,
7744#endif
7745    },
7746    { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
7747      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
7748      .access = PL0_W, .type = ARM_CP_DC_GZVA,
7749#ifndef CONFIG_USER_ONLY
7750      /* Avoid overhead of an access check that always passes in user-mode */
7751      .accessfn = aa64_zva_access,
7752      .fgt = FGT_DCZVA,
7753#endif
7754    },
7755};
7756
7757static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
7758                                     bool isread)
7759{
7760    uint64_t hcr = arm_hcr_el2_eff(env);
7761    int el = arm_current_el(env);
7762
7763    if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
7764        if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
7765            if (hcr & HCR_TGE) {
7766                return CP_ACCESS_TRAP_EL2;
7767            }
7768            return CP_ACCESS_TRAP;
7769        }
7770    } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
7771        return CP_ACCESS_TRAP_EL2;
7772    }
7773    if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
7774        return CP_ACCESS_TRAP_EL2;
7775    }
7776    if (el < 3
7777        && arm_feature(env, ARM_FEATURE_EL3)
7778        && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
7779        return CP_ACCESS_TRAP_EL3;
7780    }
7781    return CP_ACCESS_OK;
7782}
7783
7784static const ARMCPRegInfo scxtnum_reginfo[] = {
7785    { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
7786      .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
7787      .access = PL0_RW, .accessfn = access_scxtnum,
7788      .fgt = FGT_SCXTNUM_EL0,
7789      .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
7790    { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
7791      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
7792      .access = PL1_RW, .accessfn = access_scxtnum,
7793      .fgt = FGT_SCXTNUM_EL1,
7794      .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
7795    { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
7796      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
7797      .access = PL2_RW, .accessfn = access_scxtnum,
7798      .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
7799    { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
7800      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
7801      .access = PL3_RW,
7802      .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
7803};
7804
7805static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
7806                                 bool isread)
7807{
7808    if (arm_current_el(env) == 2 &&
7809        arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
7810        return CP_ACCESS_TRAP_EL3;
7811    }
7812    return CP_ACCESS_OK;
7813}
7814
7815static const ARMCPRegInfo fgt_reginfo[] = {
7816    { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
7817      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
7818      .access = PL2_RW, .accessfn = access_fgt,
7819      .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
7820    { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
7821      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
7822      .access = PL2_RW, .accessfn = access_fgt,
7823      .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
7824    { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
7825      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
7826      .access = PL2_RW, .accessfn = access_fgt,
7827      .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
7828    { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
7829      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
7830      .access = PL2_RW, .accessfn = access_fgt,
7831      .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
7832    { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
7833      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
7834      .access = PL2_RW, .accessfn = access_fgt,
7835      .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
7836};
7837#endif /* TARGET_AARCH64 */
7838
7839static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
7840                                     bool isread)
7841{
7842    int el = arm_current_el(env);
7843
7844    if (el == 0) {
7845        uint64_t sctlr = arm_sctlr(env, el);
7846        if (!(sctlr & SCTLR_EnRCTX)) {
7847            return CP_ACCESS_TRAP;
7848        }
7849    } else if (el == 1) {
7850        uint64_t hcr = arm_hcr_el2_eff(env);
7851        if (hcr & HCR_NV) {
7852            return CP_ACCESS_TRAP_EL2;
7853        }
7854    }
7855    return CP_ACCESS_OK;
7856}
7857
7858static const ARMCPRegInfo predinv_reginfo[] = {
7859    { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7860      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7861      .fgt = FGT_CFPRCTX,
7862      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7863    { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7864      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7865      .fgt = FGT_DVPRCTX,
7866      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7867    { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7868      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7869      .fgt = FGT_CPPRCTX,
7870      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7871    /*
7872     * Note the AArch32 opcodes have a different OPC1.
7873     */
7874    { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7875      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7876      .fgt = FGT_CFPRCTX,
7877      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7878    { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7879      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7880      .fgt = FGT_DVPRCTX,
7881      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7882    { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7883      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7884      .fgt = FGT_CPPRCTX,
7885      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7886};
7887
7888static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7889{
7890    /* Read the high 32 bits of the current CCSIDR */
7891    return extract64(ccsidr_read(env, ri), 32, 32);
7892}
7893
7894static const ARMCPRegInfo ccsidr2_reginfo[] = {
7895    { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7896      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7897      .access = PL1_R,
7898      .accessfn = access_tid4,
7899      .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
7900};
7901
7902static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7903                                       bool isread)
7904{
7905    if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7906        return CP_ACCESS_TRAP_EL2;
7907    }
7908
7909    return CP_ACCESS_OK;
7910}
7911
7912static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7913                                       bool isread)
7914{
7915    if (arm_feature(env, ARM_FEATURE_V8)) {
7916        return access_aa64_tid3(env, ri, isread);
7917    }
7918
7919    return CP_ACCESS_OK;
7920}
7921
7922static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7923                                     bool isread)
7924{
7925    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7926        return CP_ACCESS_TRAP_EL2;
7927    }
7928
7929    return CP_ACCESS_OK;
7930}
7931
7932static CPAccessResult access_joscr_jmcr(CPUARMState *env,
7933                                        const ARMCPRegInfo *ri, bool isread)
7934{
7935    /*
7936     * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7937     * in v7A, not in v8A.
7938     */
7939    if (!arm_feature(env, ARM_FEATURE_V8) &&
7940        arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
7941        (env->cp15.hstr_el2 & HSTR_TJDBX)) {
7942        return CP_ACCESS_TRAP_EL2;
7943    }
7944    return CP_ACCESS_OK;
7945}
7946
7947static const ARMCPRegInfo jazelle_regs[] = {
7948    { .name = "JIDR",
7949      .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7950      .access = PL1_R, .accessfn = access_jazelle,
7951      .type = ARM_CP_CONST, .resetvalue = 0 },
7952    { .name = "JOSCR",
7953      .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
7954      .accessfn = access_joscr_jmcr,
7955      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7956    { .name = "JMCR",
7957      .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
7958      .accessfn = access_joscr_jmcr,
7959      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7960};
7961
7962static const ARMCPRegInfo contextidr_el2 = {
7963    .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7964    .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7965    .access = PL2_RW,
7966    .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
7967};
7968
7969static const ARMCPRegInfo vhe_reginfo[] = {
7970    { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7971      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7972      .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7973      .raw_writefn = raw_write,
7974      .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
7975#ifndef CONFIG_USER_ONLY
7976    { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7977      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7978      .fieldoffset =
7979        offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7980      .type = ARM_CP_IO, .access = PL2_RW,
7981      .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7982    { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7983      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7984      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7985      .resetfn = gt_hv_timer_reset,
7986      .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7987    { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7988      .type = ARM_CP_IO,
7989      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7990      .access = PL2_RW,
7991      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7992      .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
7993    { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7994      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7995      .type = ARM_CP_IO | ARM_CP_ALIAS,
7996      .access = PL2_RW, .accessfn = e2h_access,
7997      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7998      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7999    { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
8000      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
8001      .type = ARM_CP_IO | ARM_CP_ALIAS,
8002      .access = PL2_RW, .accessfn = e2h_access,
8003      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
8004      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
8005    { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8006      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
8007      .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8008      .access = PL2_RW, .accessfn = e2h_access,
8009      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
8010    { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8011      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
8012      .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8013      .access = PL2_RW, .accessfn = e2h_access,
8014      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
8015    { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8016      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
8017      .type = ARM_CP_IO | ARM_CP_ALIAS,
8018      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
8019      .access = PL2_RW, .accessfn = e2h_access,
8020      .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
8021    { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8022      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
8023      .type = ARM_CP_IO | ARM_CP_ALIAS,
8024      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
8025      .access = PL2_RW, .accessfn = e2h_access,
8026      .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
8027#endif
8028};
8029
8030#ifndef CONFIG_USER_ONLY
8031static const ARMCPRegInfo ats1e1_reginfo[] = {
8032    { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
8033      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8034      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8035      .fgt = FGT_ATS1E1RP,
8036      .writefn = ats_write64 },
8037    { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
8038      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8039      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8040      .fgt = FGT_ATS1E1WP,
8041      .writefn = ats_write64 },
8042};
8043
8044static const ARMCPRegInfo ats1cp_reginfo[] = {
8045    { .name = "ATS1CPRP",
8046      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8047      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8048      .writefn = ats_write },
8049    { .name = "ATS1CPWP",
8050      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8051      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8052      .writefn = ats_write },
8053};
8054#endif
8055
8056/*
8057 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
8058 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
8059 * is non-zero, which is never for ARMv7, optionally in ARMv8
8060 * and mandatorily for ARMv8.2 and up.
8061 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
8062 * implementation is RAZ/WI we can ignore this detail, as we
8063 * do for ACTLR.
8064 */
8065static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
8066    { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
8067      .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
8068      .access = PL1_RW, .accessfn = access_tacr,
8069      .type = ARM_CP_CONST, .resetvalue = 0 },
8070    { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
8071      .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
8072      .access = PL2_RW, .type = ARM_CP_CONST,
8073      .resetvalue = 0 },
8074};
8075
8076void register_cp_regs_for_features(ARMCPU *cpu)
8077{
8078    /* Register all the coprocessor registers based on feature bits */
8079    CPUARMState *env = &cpu->env;
8080    if (arm_feature(env, ARM_FEATURE_M)) {
8081        /* M profile has no coprocessor registers */
8082        return;
8083    }
8084
8085    define_arm_cp_regs(cpu, cp_reginfo);
8086    if (!arm_feature(env, ARM_FEATURE_V8)) {
8087        /*
8088         * Must go early as it is full of wildcards that may be
8089         * overridden by later definitions.
8090         */
8091        define_arm_cp_regs(cpu, not_v8_cp_reginfo);
8092    }
8093
8094    if (arm_feature(env, ARM_FEATURE_V6)) {
8095        /* The ID registers all have impdef reset values */
8096        ARMCPRegInfo v6_idregs[] = {
8097            { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
8098              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
8099              .access = PL1_R, .type = ARM_CP_CONST,
8100              .accessfn = access_aa32_tid3,
8101              .resetvalue = cpu->isar.id_pfr0 },
8102            /*
8103             * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
8104             * the value of the GIC field until after we define these regs.
8105             */
8106            { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
8107              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
8108              .access = PL1_R, .type = ARM_CP_NO_RAW,
8109              .accessfn = access_aa32_tid3,
8110#ifdef CONFIG_USER_ONLY
8111              .type = ARM_CP_CONST,
8112              .resetvalue = cpu->isar.id_pfr1,
8113#else
8114              .type = ARM_CP_NO_RAW,
8115              .accessfn = access_aa32_tid3,
8116              .readfn = id_pfr1_read,
8117              .writefn = arm_cp_write_ignore
8118#endif
8119            },
8120            { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
8121              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
8122              .access = PL1_R, .type = ARM_CP_CONST,
8123              .accessfn = access_aa32_tid3,
8124              .resetvalue = cpu->isar.id_dfr0 },
8125            { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
8126              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
8127              .access = PL1_R, .type = ARM_CP_CONST,
8128              .accessfn = access_aa32_tid3,
8129              .resetvalue = cpu->id_afr0 },
8130            { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
8131              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
8132              .access = PL1_R, .type = ARM_CP_CONST,
8133              .accessfn = access_aa32_tid3,
8134              .resetvalue = cpu->isar.id_mmfr0 },
8135            { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
8136              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
8137              .access = PL1_R, .type = ARM_CP_CONST,
8138              .accessfn = access_aa32_tid3,
8139              .resetvalue = cpu->isar.id_mmfr1 },
8140            { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
8141              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
8142              .access = PL1_R, .type = ARM_CP_CONST,
8143              .accessfn = access_aa32_tid3,
8144              .resetvalue = cpu->isar.id_mmfr2 },
8145            { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
8146              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
8147              .access = PL1_R, .type = ARM_CP_CONST,
8148              .accessfn = access_aa32_tid3,
8149              .resetvalue = cpu->isar.id_mmfr3 },
8150            { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
8151              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
8152              .access = PL1_R, .type = ARM_CP_CONST,
8153              .accessfn = access_aa32_tid3,
8154              .resetvalue = cpu->isar.id_isar0 },
8155            { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
8156              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
8157              .access = PL1_R, .type = ARM_CP_CONST,
8158              .accessfn = access_aa32_tid3,
8159              .resetvalue = cpu->isar.id_isar1 },
8160            { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
8161              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
8162              .access = PL1_R, .type = ARM_CP_CONST,
8163              .accessfn = access_aa32_tid3,
8164              .resetvalue = cpu->isar.id_isar2 },
8165            { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
8166              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
8167              .access = PL1_R, .type = ARM_CP_CONST,
8168              .accessfn = access_aa32_tid3,
8169              .resetvalue = cpu->isar.id_isar3 },
8170            { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
8171              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
8172              .access = PL1_R, .type = ARM_CP_CONST,
8173              .accessfn = access_aa32_tid3,
8174              .resetvalue = cpu->isar.id_isar4 },
8175            { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
8176              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
8177              .access = PL1_R, .type = ARM_CP_CONST,
8178              .accessfn = access_aa32_tid3,
8179              .resetvalue = cpu->isar.id_isar5 },
8180            { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
8181              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
8182              .access = PL1_R, .type = ARM_CP_CONST,
8183              .accessfn = access_aa32_tid3,
8184              .resetvalue = cpu->isar.id_mmfr4 },
8185            { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
8186              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
8187              .access = PL1_R, .type = ARM_CP_CONST,
8188              .accessfn = access_aa32_tid3,
8189              .resetvalue = cpu->isar.id_isar6 },
8190        };
8191        define_arm_cp_regs(cpu, v6_idregs);
8192        define_arm_cp_regs(cpu, v6_cp_reginfo);
8193    } else {
8194        define_arm_cp_regs(cpu, not_v6_cp_reginfo);
8195    }
8196    if (arm_feature(env, ARM_FEATURE_V6K)) {
8197        define_arm_cp_regs(cpu, v6k_cp_reginfo);
8198    }
8199    if (arm_feature(env, ARM_FEATURE_V7MP) &&
8200        !arm_feature(env, ARM_FEATURE_PMSA)) {
8201        define_arm_cp_regs(cpu, v7mp_cp_reginfo);
8202    }
8203    if (arm_feature(env, ARM_FEATURE_V7VE)) {
8204        define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
8205    }
8206    if (arm_feature(env, ARM_FEATURE_V7)) {
8207        ARMCPRegInfo clidr = {
8208            .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
8209            .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
8210            .access = PL1_R, .type = ARM_CP_CONST,
8211            .accessfn = access_tid4,
8212            .fgt = FGT_CLIDR_EL1,
8213            .resetvalue = cpu->clidr
8214        };
8215        define_one_arm_cp_reg(cpu, &clidr);
8216        define_arm_cp_regs(cpu, v7_cp_reginfo);
8217        define_debug_regs(cpu);
8218        define_pmu_regs(cpu);
8219    } else {
8220        define_arm_cp_regs(cpu, not_v7_cp_reginfo);
8221    }
8222    if (arm_feature(env, ARM_FEATURE_V8)) {
8223        /*
8224         * v8 ID registers, which all have impdef reset values.
8225         * Note that within the ID register ranges the unused slots
8226         * must all RAZ, not UNDEF; future architecture versions may
8227         * define new registers here.
8228         * ID registers which are AArch64 views of the AArch32 ID registers
8229         * which already existed in v6 and v7 are handled elsewhere,
8230         * in v6_idregs[].
8231         */
8232        int i;
8233        ARMCPRegInfo v8_idregs[] = {
8234            /*
8235             * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
8236             * emulation because we don't know the right value for the
8237             * GIC field until after we define these regs.
8238             */
8239            { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
8240              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
8241              .access = PL1_R,
8242#ifdef CONFIG_USER_ONLY
8243              .type = ARM_CP_CONST,
8244              .resetvalue = cpu->isar.id_aa64pfr0
8245#else
8246              .type = ARM_CP_NO_RAW,
8247              .accessfn = access_aa64_tid3,
8248              .readfn = id_aa64pfr0_read,
8249              .writefn = arm_cp_write_ignore
8250#endif
8251            },
8252            { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
8253              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
8254              .access = PL1_R, .type = ARM_CP_CONST,
8255              .accessfn = access_aa64_tid3,
8256              .resetvalue = cpu->isar.id_aa64pfr1},
8257            { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8258              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
8259              .access = PL1_R, .type = ARM_CP_CONST,
8260              .accessfn = access_aa64_tid3,
8261              .resetvalue = 0 },
8262            { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8263              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
8264              .access = PL1_R, .type = ARM_CP_CONST,
8265              .accessfn = access_aa64_tid3,
8266              .resetvalue = 0 },
8267            { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
8268              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
8269              .access = PL1_R, .type = ARM_CP_CONST,
8270              .accessfn = access_aa64_tid3,
8271              .resetvalue = cpu->isar.id_aa64zfr0 },
8272            { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
8273              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
8274              .access = PL1_R, .type = ARM_CP_CONST,
8275              .accessfn = access_aa64_tid3,
8276              .resetvalue = cpu->isar.id_aa64smfr0 },
8277            { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8278              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
8279              .access = PL1_R, .type = ARM_CP_CONST,
8280              .accessfn = access_aa64_tid3,
8281              .resetvalue = 0 },
8282            { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8283              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
8284              .access = PL1_R, .type = ARM_CP_CONST,
8285              .accessfn = access_aa64_tid3,
8286              .resetvalue = 0 },
8287            { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
8288              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
8289              .access = PL1_R, .type = ARM_CP_CONST,
8290              .accessfn = access_aa64_tid3,
8291              .resetvalue = cpu->isar.id_aa64dfr0 },
8292            { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
8293              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
8294              .access = PL1_R, .type = ARM_CP_CONST,
8295              .accessfn = access_aa64_tid3,
8296              .resetvalue = cpu->isar.id_aa64dfr1 },
8297            { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8298              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
8299              .access = PL1_R, .type = ARM_CP_CONST,
8300              .accessfn = access_aa64_tid3,
8301              .resetvalue = 0 },
8302            { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8303              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
8304              .access = PL1_R, .type = ARM_CP_CONST,
8305              .accessfn = access_aa64_tid3,
8306              .resetvalue = 0 },
8307            { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
8308              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
8309              .access = PL1_R, .type = ARM_CP_CONST,
8310              .accessfn = access_aa64_tid3,
8311              .resetvalue = cpu->id_aa64afr0 },
8312            { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
8313              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
8314              .access = PL1_R, .type = ARM_CP_CONST,
8315              .accessfn = access_aa64_tid3,
8316              .resetvalue = cpu->id_aa64afr1 },
8317            { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8318              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
8319              .access = PL1_R, .type = ARM_CP_CONST,
8320              .accessfn = access_aa64_tid3,
8321              .resetvalue = 0 },
8322            { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8323              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
8324              .access = PL1_R, .type = ARM_CP_CONST,
8325              .accessfn = access_aa64_tid3,
8326              .resetvalue = 0 },
8327            { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
8328              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
8329              .access = PL1_R, .type = ARM_CP_CONST,
8330              .accessfn = access_aa64_tid3,
8331              .resetvalue = cpu->isar.id_aa64isar0 },
8332            { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
8333              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
8334              .access = PL1_R, .type = ARM_CP_CONST,
8335              .accessfn = access_aa64_tid3,
8336              .resetvalue = cpu->isar.id_aa64isar1 },
8337            { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8338              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
8339              .access = PL1_R, .type = ARM_CP_CONST,
8340              .accessfn = access_aa64_tid3,
8341              .resetvalue = 0 },
8342            { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8343              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
8344              .access = PL1_R, .type = ARM_CP_CONST,
8345              .accessfn = access_aa64_tid3,
8346              .resetvalue = 0 },
8347            { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8348              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
8349              .access = PL1_R, .type = ARM_CP_CONST,
8350              .accessfn = access_aa64_tid3,
8351              .resetvalue = 0 },
8352            { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8353              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
8354              .access = PL1_R, .type = ARM_CP_CONST,
8355              .accessfn = access_aa64_tid3,
8356              .resetvalue = 0 },
8357            { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8358              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
8359              .access = PL1_R, .type = ARM_CP_CONST,
8360              .accessfn = access_aa64_tid3,
8361              .resetvalue = 0 },
8362            { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8363              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
8364              .access = PL1_R, .type = ARM_CP_CONST,
8365              .accessfn = access_aa64_tid3,
8366              .resetvalue = 0 },
8367            { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
8368              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
8369              .access = PL1_R, .type = ARM_CP_CONST,
8370              .accessfn = access_aa64_tid3,
8371              .resetvalue = cpu->isar.id_aa64mmfr0 },
8372            { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
8373              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
8374              .access = PL1_R, .type = ARM_CP_CONST,
8375              .accessfn = access_aa64_tid3,
8376              .resetvalue = cpu->isar.id_aa64mmfr1 },
8377            { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
8378              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
8379              .access = PL1_R, .type = ARM_CP_CONST,
8380              .accessfn = access_aa64_tid3,
8381              .resetvalue = cpu->isar.id_aa64mmfr2 },
8382            { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8383              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
8384              .access = PL1_R, .type = ARM_CP_CONST,
8385              .accessfn = access_aa64_tid3,
8386              .resetvalue = 0 },
8387            { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8388              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
8389              .access = PL1_R, .type = ARM_CP_CONST,
8390              .accessfn = access_aa64_tid3,
8391              .resetvalue = 0 },
8392            { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8393              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
8394              .access = PL1_R, .type = ARM_CP_CONST,
8395              .accessfn = access_aa64_tid3,
8396              .resetvalue = 0 },
8397            { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8398              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
8399              .access = PL1_R, .type = ARM_CP_CONST,
8400              .accessfn = access_aa64_tid3,
8401              .resetvalue = 0 },
8402            { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8403              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
8404              .access = PL1_R, .type = ARM_CP_CONST,
8405              .accessfn = access_aa64_tid3,
8406              .resetvalue = 0 },
8407            { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
8408              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
8409              .access = PL1_R, .type = ARM_CP_CONST,
8410              .accessfn = access_aa64_tid3,
8411              .resetvalue = cpu->isar.mvfr0 },
8412            { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
8413              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
8414              .access = PL1_R, .type = ARM_CP_CONST,
8415              .accessfn = access_aa64_tid3,
8416              .resetvalue = cpu->isar.mvfr1 },
8417            { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
8418              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
8419              .access = PL1_R, .type = ARM_CP_CONST,
8420              .accessfn = access_aa64_tid3,
8421              .resetvalue = cpu->isar.mvfr2 },
8422            /*
8423             * "0, c0, c3, {0,1,2}" are the encodings corresponding to
8424             * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
8425             * as RAZ, since it is in the "reserved for future ID
8426             * registers, RAZ" part of the AArch32 encoding space.
8427             */
8428            { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
8429              .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
8430              .access = PL1_R, .type = ARM_CP_CONST,
8431              .accessfn = access_aa64_tid3,
8432              .resetvalue = 0 },
8433            { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
8434              .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
8435              .access = PL1_R, .type = ARM_CP_CONST,
8436              .accessfn = access_aa64_tid3,
8437              .resetvalue = 0 },
8438            { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
8439              .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
8440              .access = PL1_R, .type = ARM_CP_CONST,
8441              .accessfn = access_aa64_tid3,
8442              .resetvalue = 0 },
8443            /*
8444             * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
8445             * they're also RAZ for AArch64, and in v8 are gradually
8446             * being filled with AArch64-view-of-AArch32-ID-register
8447             * for new ID registers.
8448             */
8449            { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
8450              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
8451              .access = PL1_R, .type = ARM_CP_CONST,
8452              .accessfn = access_aa64_tid3,
8453              .resetvalue = 0 },
8454            { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
8455              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
8456              .access = PL1_R, .type = ARM_CP_CONST,
8457              .accessfn = access_aa64_tid3,
8458              .resetvalue = cpu->isar.id_pfr2 },
8459            { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
8460              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
8461              .access = PL1_R, .type = ARM_CP_CONST,
8462              .accessfn = access_aa64_tid3,
8463              .resetvalue = cpu->isar.id_dfr1 },
8464            { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
8465              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
8466              .access = PL1_R, .type = ARM_CP_CONST,
8467              .accessfn = access_aa64_tid3,
8468              .resetvalue = cpu->isar.id_mmfr5 },
8469            { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
8470              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
8471              .access = PL1_R, .type = ARM_CP_CONST,
8472              .accessfn = access_aa64_tid3,
8473              .resetvalue = 0 },
8474            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
8475              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
8476              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8477              .fgt = FGT_PMCEIDN_EL0,
8478              .resetvalue = extract64(cpu->pmceid0, 0, 32) },
8479            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
8480              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
8481              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8482              .fgt = FGT_PMCEIDN_EL0,
8483              .resetvalue = cpu->pmceid0 },
8484            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
8485              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
8486              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8487              .fgt = FGT_PMCEIDN_EL0,
8488              .resetvalue = extract64(cpu->pmceid1, 0, 32) },
8489            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
8490              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
8491              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
8492              .fgt = FGT_PMCEIDN_EL0,
8493              .resetvalue = cpu->pmceid1 },
8494        };
8495#ifdef CONFIG_USER_ONLY
8496        static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
8497            { .name = "ID_AA64PFR0_EL1",
8498              .exported_bits = R_ID_AA64PFR0_FP_MASK |
8499                               R_ID_AA64PFR0_ADVSIMD_MASK |
8500                               R_ID_AA64PFR0_SVE_MASK |
8501                               R_ID_AA64PFR0_DIT_MASK,
8502              .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
8503                            (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
8504            { .name = "ID_AA64PFR1_EL1",
8505              .exported_bits = R_ID_AA64PFR1_BT_MASK |
8506                               R_ID_AA64PFR1_SSBS_MASK |
8507                               R_ID_AA64PFR1_MTE_MASK |
8508                               R_ID_AA64PFR1_SME_MASK },
8509            { .name = "ID_AA64PFR*_EL1_RESERVED",
8510              .is_glob = true },
8511            { .name = "ID_AA64ZFR0_EL1",
8512              .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
8513                               R_ID_AA64ZFR0_AES_MASK |
8514                               R_ID_AA64ZFR0_BITPERM_MASK |
8515                               R_ID_AA64ZFR0_BFLOAT16_MASK |
8516                               R_ID_AA64ZFR0_SHA3_MASK |
8517                               R_ID_AA64ZFR0_SM4_MASK |
8518                               R_ID_AA64ZFR0_I8MM_MASK |
8519                               R_ID_AA64ZFR0_F32MM_MASK |
8520                               R_ID_AA64ZFR0_F64MM_MASK },
8521            { .name = "ID_AA64SMFR0_EL1",
8522              .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
8523                               R_ID_AA64SMFR0_B16F32_MASK |
8524                               R_ID_AA64SMFR0_F16F32_MASK |
8525                               R_ID_AA64SMFR0_I8I32_MASK |
8526                               R_ID_AA64SMFR0_F64F64_MASK |
8527                               R_ID_AA64SMFR0_I16I64_MASK |
8528                               R_ID_AA64SMFR0_FA64_MASK },
8529            { .name = "ID_AA64MMFR0_EL1",
8530              .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
8531              .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
8532                            (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
8533            { .name = "ID_AA64MMFR1_EL1",
8534              .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
8535            { .name = "ID_AA64MMFR2_EL1",
8536              .exported_bits = R_ID_AA64MMFR2_AT_MASK },
8537            { .name = "ID_AA64MMFR*_EL1_RESERVED",
8538              .is_glob = true },
8539            { .name = "ID_AA64DFR0_EL1",
8540              .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
8541            { .name = "ID_AA64DFR1_EL1" },
8542            { .name = "ID_AA64DFR*_EL1_RESERVED",
8543              .is_glob = true },
8544            { .name = "ID_AA64AFR*",
8545              .is_glob = true },
8546            { .name = "ID_AA64ISAR0_EL1",
8547              .exported_bits = R_ID_AA64ISAR0_AES_MASK |
8548                               R_ID_AA64ISAR0_SHA1_MASK |
8549                               R_ID_AA64ISAR0_SHA2_MASK |
8550                               R_ID_AA64ISAR0_CRC32_MASK |
8551                               R_ID_AA64ISAR0_ATOMIC_MASK |
8552                               R_ID_AA64ISAR0_RDM_MASK |
8553                               R_ID_AA64ISAR0_SHA3_MASK |
8554                               R_ID_AA64ISAR0_SM3_MASK |
8555                               R_ID_AA64ISAR0_SM4_MASK |
8556                               R_ID_AA64ISAR0_DP_MASK |
8557                               R_ID_AA64ISAR0_FHM_MASK |
8558                               R_ID_AA64ISAR0_TS_MASK |
8559                               R_ID_AA64ISAR0_RNDR_MASK },
8560            { .name = "ID_AA64ISAR1_EL1",
8561              .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
8562                               R_ID_AA64ISAR1_APA_MASK |
8563                               R_ID_AA64ISAR1_API_MASK |
8564                               R_ID_AA64ISAR1_JSCVT_MASK |
8565                               R_ID_AA64ISAR1_FCMA_MASK |
8566                               R_ID_AA64ISAR1_LRCPC_MASK |
8567                               R_ID_AA64ISAR1_GPA_MASK |
8568                               R_ID_AA64ISAR1_GPI_MASK |
8569                               R_ID_AA64ISAR1_FRINTTS_MASK |
8570                               R_ID_AA64ISAR1_SB_MASK |
8571                               R_ID_AA64ISAR1_BF16_MASK |
8572                               R_ID_AA64ISAR1_DGH_MASK |
8573                               R_ID_AA64ISAR1_I8MM_MASK },
8574            { .name = "ID_AA64ISAR2_EL1",
8575              .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
8576                               R_ID_AA64ISAR2_RPRES_MASK |
8577                               R_ID_AA64ISAR2_GPA3_MASK |
8578                               R_ID_AA64ISAR2_APA3_MASK },
8579            { .name = "ID_AA64ISAR*_EL1_RESERVED",
8580              .is_glob = true },
8581        };
8582        modify_arm_cp_regs(v8_idregs, v8_user_idregs);
8583#endif
8584        /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
8585        if (!arm_feature(env, ARM_FEATURE_EL3) &&
8586            !arm_feature(env, ARM_FEATURE_EL2)) {
8587            ARMCPRegInfo rvbar = {
8588                .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
8589                .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
8590                .access = PL1_R,
8591                .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
8592            };
8593            define_one_arm_cp_reg(cpu, &rvbar);
8594        }
8595        define_arm_cp_regs(cpu, v8_idregs);
8596        define_arm_cp_regs(cpu, v8_cp_reginfo);
8597
8598        for (i = 4; i < 16; i++) {
8599            /*
8600             * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
8601             * For pre-v8 cores there are RAZ patterns for these in
8602             * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
8603             * v8 extends the "must RAZ" part of the ID register space
8604             * to also cover c0, 0, c{8-15}, {0-7}.
8605             * These are STATE_AA32 because in the AArch64 sysreg space
8606             * c4-c7 is where the AArch64 ID registers live (and we've
8607             * already defined those in v8_idregs[]), and c8-c15 are not
8608             * "must RAZ" for AArch64.
8609             */
8610            g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
8611            ARMCPRegInfo v8_aa32_raz_idregs = {
8612                .name = name,
8613                .state = ARM_CP_STATE_AA32,
8614                .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
8615                .access = PL1_R, .type = ARM_CP_CONST,
8616                .accessfn = access_aa64_tid3,
8617                .resetvalue = 0 };
8618            define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
8619        }
8620    }
8621
8622    /*
8623     * Register the base EL2 cpregs.
8624     * Pre v8, these registers are implemented only as part of the
8625     * Virtualization Extensions (EL2 present).  Beginning with v8,
8626     * if EL2 is missing but EL3 is enabled, mostly these become
8627     * RES0 from EL3, with some specific exceptions.
8628     */
8629    if (arm_feature(env, ARM_FEATURE_EL2)
8630        || (arm_feature(env, ARM_FEATURE_EL3)
8631            && arm_feature(env, ARM_FEATURE_V8))) {
8632        uint64_t vmpidr_def = mpidr_read_val(env);
8633        ARMCPRegInfo vpidr_regs[] = {
8634            { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
8635              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
8636              .access = PL2_RW, .accessfn = access_el3_aa32ns,
8637              .resetvalue = cpu->midr,
8638              .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
8639              .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
8640            { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
8641              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
8642              .access = PL2_RW, .resetvalue = cpu->midr,
8643              .type = ARM_CP_EL3_NO_EL2_C_NZ,
8644              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
8645            { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
8646              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
8647              .access = PL2_RW, .accessfn = access_el3_aa32ns,
8648              .resetvalue = vmpidr_def,
8649              .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
8650              .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
8651            { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
8652              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
8653              .access = PL2_RW, .resetvalue = vmpidr_def,
8654              .type = ARM_CP_EL3_NO_EL2_C_NZ,
8655              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
8656        };
8657        /*
8658         * The only field of MDCR_EL2 that has a defined architectural reset
8659         * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
8660         */
8661        ARMCPRegInfo mdcr_el2 = {
8662            .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
8663            .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
8664            .writefn = mdcr_el2_write,
8665            .access = PL2_RW, .resetvalue = pmu_num_counters(env),
8666            .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
8667        };
8668        define_one_arm_cp_reg(cpu, &mdcr_el2);
8669        define_arm_cp_regs(cpu, vpidr_regs);
8670        define_arm_cp_regs(cpu, el2_cp_reginfo);
8671        if (arm_feature(env, ARM_FEATURE_V8)) {
8672            define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
8673        }
8674        if (cpu_isar_feature(aa64_sel2, cpu)) {
8675            define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
8676        }
8677        /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
8678        if (!arm_feature(env, ARM_FEATURE_EL3)) {
8679            ARMCPRegInfo rvbar[] = {
8680                {
8681                    .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
8682                    .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
8683                    .access = PL2_R,
8684                    .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
8685                },
8686                {   .name = "RVBAR", .type = ARM_CP_ALIAS,
8687                    .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
8688                    .access = PL2_R,
8689                    .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
8690                },
8691            };
8692            define_arm_cp_regs(cpu, rvbar);
8693        }
8694    }
8695
8696    /* Register the base EL3 cpregs. */
8697    if (arm_feature(env, ARM_FEATURE_EL3)) {
8698        define_arm_cp_regs(cpu, el3_cp_reginfo);
8699        ARMCPRegInfo el3_regs[] = {
8700            { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
8701              .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
8702              .access = PL3_R,
8703              .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
8704            },
8705            { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
8706              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
8707              .access = PL3_RW,
8708              .raw_writefn = raw_write, .writefn = sctlr_write,
8709              .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
8710              .resetvalue = cpu->reset_sctlr },
8711        };
8712
8713        define_arm_cp_regs(cpu, el3_regs);
8714    }
8715    /*
8716     * The behaviour of NSACR is sufficiently various that we don't
8717     * try to describe it in a single reginfo:
8718     *  if EL3 is 64 bit, then trap to EL3 from S EL1,
8719     *     reads as constant 0xc00 from NS EL1 and NS EL2
8720     *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
8721     *  if v7 without EL3, register doesn't exist
8722     *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
8723     */
8724    if (arm_feature(env, ARM_FEATURE_EL3)) {
8725        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8726            static const ARMCPRegInfo nsacr = {
8727                .name = "NSACR", .type = ARM_CP_CONST,
8728                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8729                .access = PL1_RW, .accessfn = nsacr_access,
8730                .resetvalue = 0xc00
8731            };
8732            define_one_arm_cp_reg(cpu, &nsacr);
8733        } else {
8734            static const ARMCPRegInfo nsacr = {
8735                .name = "NSACR",
8736                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8737                .access = PL3_RW | PL1_R,
8738                .resetvalue = 0,
8739                .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
8740            };
8741            define_one_arm_cp_reg(cpu, &nsacr);
8742        }
8743    } else {
8744        if (arm_feature(env, ARM_FEATURE_V8)) {
8745            static const ARMCPRegInfo nsacr = {
8746                .name = "NSACR", .type = ARM_CP_CONST,
8747                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
8748                .access = PL1_R,
8749                .resetvalue = 0xc00
8750            };
8751            define_one_arm_cp_reg(cpu, &nsacr);
8752        }
8753    }
8754
8755    if (arm_feature(env, ARM_FEATURE_PMSA)) {
8756        if (arm_feature(env, ARM_FEATURE_V6)) {
8757            /* PMSAv6 not implemented */
8758            assert(arm_feature(env, ARM_FEATURE_V7));
8759            define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8760            define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
8761        } else {
8762            define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
8763        }
8764    } else {
8765        define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
8766        define_arm_cp_regs(cpu, vmsa_cp_reginfo);
8767        /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
8768        if (cpu_isar_feature(aa32_hpd, cpu)) {
8769            define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
8770        }
8771    }
8772    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
8773        define_arm_cp_regs(cpu, t2ee_cp_reginfo);
8774    }
8775    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
8776        define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
8777    }
8778    if (arm_feature(env, ARM_FEATURE_VAPA)) {
8779        define_arm_cp_regs(cpu, vapa_cp_reginfo);
8780    }
8781    if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
8782        define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
8783    }
8784    if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
8785        define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
8786    }
8787    if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
8788        define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
8789    }
8790    if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
8791        define_arm_cp_regs(cpu, omap_cp_reginfo);
8792    }
8793    if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
8794        define_arm_cp_regs(cpu, strongarm_cp_reginfo);
8795    }
8796    if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8797        define_arm_cp_regs(cpu, xscale_cp_reginfo);
8798    }
8799    if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
8800        define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
8801    }
8802    if (arm_feature(env, ARM_FEATURE_LPAE)) {
8803        define_arm_cp_regs(cpu, lpae_cp_reginfo);
8804    }
8805    if (cpu_isar_feature(aa32_jazelle, cpu)) {
8806        define_arm_cp_regs(cpu, jazelle_regs);
8807    }
8808    /*
8809     * Slightly awkwardly, the OMAP and StrongARM cores need all of
8810     * cp15 crn=0 to be writes-ignored, whereas for other cores they should
8811     * be read-only (ie write causes UNDEF exception).
8812     */
8813    {
8814        ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
8815            /*
8816             * Pre-v8 MIDR space.
8817             * Note that the MIDR isn't a simple constant register because
8818             * of the TI925 behaviour where writes to another register can
8819             * cause the MIDR value to change.
8820             *
8821             * Unimplemented registers in the c15 0 0 0 space default to
8822             * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
8823             * and friends override accordingly.
8824             */
8825            { .name = "MIDR",
8826              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
8827              .access = PL1_R, .resetvalue = cpu->midr,
8828              .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
8829              .readfn = midr_read,
8830              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8831              .type = ARM_CP_OVERRIDE },
8832            /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
8833            { .name = "DUMMY",
8834              .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
8835              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8836            { .name = "DUMMY",
8837              .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
8838              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8839            { .name = "DUMMY",
8840              .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
8841              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8842            { .name = "DUMMY",
8843              .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
8844              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8845            { .name = "DUMMY",
8846              .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
8847              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8848        };
8849        ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
8850            { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
8851              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
8852              .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
8853              .fgt = FGT_MIDR_EL1,
8854              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8855              .readfn = midr_read },
8856            /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
8857            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8858              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
8859              .access = PL1_R, .resetvalue = cpu->midr },
8860            { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
8861              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
8862              .access = PL1_R,
8863              .accessfn = access_aa64_tid1,
8864              .fgt = FGT_REVIDR_EL1,
8865              .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
8866        };
8867        ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
8868            .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8869            .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8870            .access = PL1_R, .resetvalue = cpu->midr
8871        };
8872        ARMCPRegInfo id_cp_reginfo[] = {
8873            /* These are common to v8 and pre-v8 */
8874            { .name = "CTR",
8875              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
8876              .access = PL1_R, .accessfn = ctr_el0_access,
8877              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8878            { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
8879              .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
8880              .access = PL0_R, .accessfn = ctr_el0_access,
8881              .fgt = FGT_CTR_EL0,
8882              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8883            /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8884            { .name = "TCMTR",
8885              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
8886              .access = PL1_R,
8887              .accessfn = access_aa32_tid1,
8888              .type = ARM_CP_CONST, .resetvalue = 0 },
8889        };
8890        /* TLBTR is specific to VMSA */
8891        ARMCPRegInfo id_tlbtr_reginfo = {
8892              .name = "TLBTR",
8893              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
8894              .access = PL1_R,
8895              .accessfn = access_aa32_tid1,
8896              .type = ARM_CP_CONST, .resetvalue = 0,
8897        };
8898        /* MPUIR is specific to PMSA V6+ */
8899        ARMCPRegInfo id_mpuir_reginfo = {
8900              .name = "MPUIR",
8901              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8902              .access = PL1_R, .type = ARM_CP_CONST,
8903              .resetvalue = cpu->pmsav7_dregion << 8
8904        };
8905        /* HMPUIR is specific to PMSA V8 */
8906        ARMCPRegInfo id_hmpuir_reginfo = {
8907            .name = "HMPUIR",
8908            .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
8909            .access = PL2_R, .type = ARM_CP_CONST,
8910            .resetvalue = cpu->pmsav8r_hdregion
8911        };
8912        static const ARMCPRegInfo crn0_wi_reginfo = {
8913            .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
8914            .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
8915            .type = ARM_CP_NOP | ARM_CP_OVERRIDE
8916        };
8917#ifdef CONFIG_USER_ONLY
8918        static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
8919            { .name = "MIDR_EL1",
8920              .exported_bits = R_MIDR_EL1_REVISION_MASK |
8921                               R_MIDR_EL1_PARTNUM_MASK |
8922                               R_MIDR_EL1_ARCHITECTURE_MASK |
8923                               R_MIDR_EL1_VARIANT_MASK |
8924                               R_MIDR_EL1_IMPLEMENTER_MASK },
8925            { .name = "REVIDR_EL1" },
8926        };
8927        modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
8928#endif
8929        if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
8930            arm_feature(env, ARM_FEATURE_STRONGARM)) {
8931            size_t i;
8932            /*
8933             * Register the blanket "writes ignored" value first to cover the
8934             * whole space. Then update the specific ID registers to allow write
8935             * access, so that they ignore writes rather than causing them to
8936             * UNDEF.
8937             */
8938            define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
8939            for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
8940                id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
8941            }
8942            for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
8943                id_cp_reginfo[i].access = PL1_RW;
8944            }
8945            id_mpuir_reginfo.access = PL1_RW;
8946            id_tlbtr_reginfo.access = PL1_RW;
8947        }
8948        if (arm_feature(env, ARM_FEATURE_V8)) {
8949            define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
8950            if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8951                define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
8952            }
8953        } else {
8954            define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
8955        }
8956        define_arm_cp_regs(cpu, id_cp_reginfo);
8957        if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8958            define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
8959        } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
8960                   arm_feature(env, ARM_FEATURE_V8)) {
8961            uint32_t i = 0;
8962            char *tmp_string;
8963
8964            define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8965            define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
8966            define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
8967
8968            /* Register alias is only valid for first 32 indexes */
8969            for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
8970                uint8_t crm = 0b1000 | extract32(i, 1, 3);
8971                uint8_t opc1 = extract32(i, 4, 1);
8972                uint8_t opc2 = extract32(i, 0, 1) << 2;
8973
8974                tmp_string = g_strdup_printf("PRBAR%u", i);
8975                ARMCPRegInfo tmp_prbarn_reginfo = {
8976                    .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
8977                    .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
8978                    .access = PL1_RW, .resetvalue = 0,
8979                    .accessfn = access_tvm_trvm,
8980                    .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
8981                };
8982                define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
8983                g_free(tmp_string);
8984
8985                opc2 = extract32(i, 0, 1) << 2 | 0x1;
8986                tmp_string = g_strdup_printf("PRLAR%u", i);
8987                ARMCPRegInfo tmp_prlarn_reginfo = {
8988                    .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
8989                    .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
8990                    .access = PL1_RW, .resetvalue = 0,
8991                    .accessfn = access_tvm_trvm,
8992                    .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
8993                };
8994                define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
8995                g_free(tmp_string);
8996            }
8997
8998            /* Register alias is only valid for first 32 indexes */
8999            for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
9000                uint8_t crm = 0b1000 | extract32(i, 1, 3);
9001                uint8_t opc1 = 0b100 | extract32(i, 4, 1);
9002                uint8_t opc2 = extract32(i, 0, 1) << 2;
9003
9004                tmp_string = g_strdup_printf("HPRBAR%u", i);
9005                ARMCPRegInfo tmp_hprbarn_reginfo = {
9006                    .name = tmp_string,
9007                    .type = ARM_CP_NO_RAW,
9008                    .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9009                    .access = PL2_RW, .resetvalue = 0,
9010                    .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9011                };
9012                define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
9013                g_free(tmp_string);
9014
9015                opc2 = extract32(i, 0, 1) << 2 | 0x1;
9016                tmp_string = g_strdup_printf("HPRLAR%u", i);
9017                ARMCPRegInfo tmp_hprlarn_reginfo = {
9018                    .name = tmp_string,
9019                    .type = ARM_CP_NO_RAW,
9020                    .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9021                    .access = PL2_RW, .resetvalue = 0,
9022                    .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9023                };
9024                define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
9025                g_free(tmp_string);
9026            }
9027        } else if (arm_feature(env, ARM_FEATURE_V7)) {
9028            define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
9029        }
9030    }
9031
9032    if (arm_feature(env, ARM_FEATURE_MPIDR)) {
9033        ARMCPRegInfo mpidr_cp_reginfo[] = {
9034            { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
9035              .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
9036              .fgt = FGT_MPIDR_EL1,
9037              .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
9038        };
9039#ifdef CONFIG_USER_ONLY
9040        static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
9041            { .name = "MPIDR_EL1",
9042              .fixed_bits = 0x0000000080000000 },
9043        };
9044        modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
9045#endif
9046        define_arm_cp_regs(cpu, mpidr_cp_reginfo);
9047    }
9048
9049    if (arm_feature(env, ARM_FEATURE_AUXCR)) {
9050        ARMCPRegInfo auxcr_reginfo[] = {
9051            { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
9052              .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
9053              .access = PL1_RW, .accessfn = access_tacr,
9054              .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
9055            { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
9056              .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
9057              .access = PL2_RW, .type = ARM_CP_CONST,
9058              .resetvalue = 0 },
9059            { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
9060              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
9061              .access = PL3_RW, .type = ARM_CP_CONST,
9062              .resetvalue = 0 },
9063        };
9064        define_arm_cp_regs(cpu, auxcr_reginfo);
9065        if (cpu_isar_feature(aa32_ac2, cpu)) {
9066            define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
9067        }
9068    }
9069
9070    if (arm_feature(env, ARM_FEATURE_CBAR)) {
9071        /*
9072         * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
9073         * There are two flavours:
9074         *  (1) older 32-bit only cores have a simple 32-bit CBAR
9075         *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
9076         *      32-bit register visible to AArch32 at a different encoding
9077         *      to the "flavour 1" register and with the bits rearranged to
9078         *      be able to squash a 64-bit address into the 32-bit view.
9079         * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
9080         * in future if we support AArch32-only configs of some of the
9081         * AArch64 cores we might need to add a specific feature flag
9082         * to indicate cores with "flavour 2" CBAR.
9083         */
9084        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
9085            /* 32 bit view is [31:18] 0...0 [43:32]. */
9086            uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
9087                | extract64(cpu->reset_cbar, 32, 12);
9088            ARMCPRegInfo cbar_reginfo[] = {
9089                { .name = "CBAR",
9090                  .type = ARM_CP_CONST,
9091                  .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
9092                  .access = PL1_R, .resetvalue = cbar32 },
9093                { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
9094                  .type = ARM_CP_CONST,
9095                  .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
9096                  .access = PL1_R, .resetvalue = cpu->reset_cbar },
9097            };
9098            /* We don't implement a r/w 64 bit CBAR currently */
9099            assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
9100            define_arm_cp_regs(cpu, cbar_reginfo);
9101        } else {
9102            ARMCPRegInfo cbar = {
9103                .name = "CBAR",
9104                .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
9105                .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
9106                .fieldoffset = offsetof(CPUARMState,
9107                                        cp15.c15_config_base_address)
9108            };
9109            if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
9110                cbar.access = PL1_R;
9111                cbar.fieldoffset = 0;
9112                cbar.type = ARM_CP_CONST;
9113            }
9114            define_one_arm_cp_reg(cpu, &cbar);
9115        }
9116    }
9117
9118    if (arm_feature(env, ARM_FEATURE_VBAR)) {
9119        static const ARMCPRegInfo vbar_cp_reginfo[] = {
9120            { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
9121              .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
9122              .access = PL1_RW, .writefn = vbar_write,
9123              .fgt = FGT_VBAR_EL1,
9124              .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
9125                                     offsetof(CPUARMState, cp15.vbar_ns) },
9126              .resetvalue = 0 },
9127        };
9128        define_arm_cp_regs(cpu, vbar_cp_reginfo);
9129    }
9130
9131    /* Generic registers whose values depend on the implementation */
9132    {
9133        ARMCPRegInfo sctlr = {
9134            .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
9135            .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
9136            .access = PL1_RW, .accessfn = access_tvm_trvm,
9137            .fgt = FGT_SCTLR_EL1,
9138            .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
9139                                   offsetof(CPUARMState, cp15.sctlr_ns) },
9140            .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
9141            .raw_writefn = raw_write,
9142        };
9143        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
9144            /*
9145             * Normally we would always end the TB on an SCTLR write, but Linux
9146             * arch/arm/mach-pxa/sleep.S expects two instructions following
9147             * an MMU enable to execute from cache.  Imitate this behaviour.
9148             */
9149            sctlr.type |= ARM_CP_SUPPRESS_TB_END;
9150        }
9151        define_one_arm_cp_reg(cpu, &sctlr);
9152
9153        if (arm_feature(env, ARM_FEATURE_PMSA) &&
9154            arm_feature(env, ARM_FEATURE_V8)) {
9155            ARMCPRegInfo vsctlr = {
9156                .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
9157                .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
9158                .access = PL2_RW, .resetvalue = 0x0,
9159                .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
9160            };
9161            define_one_arm_cp_reg(cpu, &vsctlr);
9162        }
9163    }
9164
9165    if (cpu_isar_feature(aa64_lor, cpu)) {
9166        define_arm_cp_regs(cpu, lor_reginfo);
9167    }
9168    if (cpu_isar_feature(aa64_pan, cpu)) {
9169        define_one_arm_cp_reg(cpu, &pan_reginfo);
9170    }
9171#ifndef CONFIG_USER_ONLY
9172    if (cpu_isar_feature(aa64_ats1e1, cpu)) {
9173        define_arm_cp_regs(cpu, ats1e1_reginfo);
9174    }
9175    if (cpu_isar_feature(aa32_ats1e1, cpu)) {
9176        define_arm_cp_regs(cpu, ats1cp_reginfo);
9177    }
9178#endif
9179    if (cpu_isar_feature(aa64_uao, cpu)) {
9180        define_one_arm_cp_reg(cpu, &uao_reginfo);
9181    }
9182
9183    if (cpu_isar_feature(aa64_dit, cpu)) {
9184        define_one_arm_cp_reg(cpu, &dit_reginfo);
9185    }
9186    if (cpu_isar_feature(aa64_ssbs, cpu)) {
9187        define_one_arm_cp_reg(cpu, &ssbs_reginfo);
9188    }
9189    if (cpu_isar_feature(any_ras, cpu)) {
9190        define_arm_cp_regs(cpu, minimal_ras_reginfo);
9191    }
9192
9193    if (cpu_isar_feature(aa64_vh, cpu) ||
9194        cpu_isar_feature(aa64_debugv8p2, cpu)) {
9195        define_one_arm_cp_reg(cpu, &contextidr_el2);
9196    }
9197    if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9198        define_arm_cp_regs(cpu, vhe_reginfo);
9199    }
9200
9201    if (cpu_isar_feature(aa64_sve, cpu)) {
9202        define_arm_cp_regs(cpu, zcr_reginfo);
9203    }
9204
9205    if (cpu_isar_feature(aa64_hcx, cpu)) {
9206        define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
9207    }
9208
9209#ifdef TARGET_AARCH64
9210    if (cpu_isar_feature(aa64_sme, cpu)) {
9211        define_arm_cp_regs(cpu, sme_reginfo);
9212    }
9213    if (cpu_isar_feature(aa64_pauth, cpu)) {
9214        define_arm_cp_regs(cpu, pauth_reginfo);
9215    }
9216    if (cpu_isar_feature(aa64_rndr, cpu)) {
9217        define_arm_cp_regs(cpu, rndr_reginfo);
9218    }
9219    if (cpu_isar_feature(aa64_tlbirange, cpu)) {
9220        define_arm_cp_regs(cpu, tlbirange_reginfo);
9221    }
9222    if (cpu_isar_feature(aa64_tlbios, cpu)) {
9223        define_arm_cp_regs(cpu, tlbios_reginfo);
9224    }
9225    /* Data Cache clean instructions up to PoP */
9226    if (cpu_isar_feature(aa64_dcpop, cpu)) {
9227        define_one_arm_cp_reg(cpu, dcpop_reg);
9228
9229        if (cpu_isar_feature(aa64_dcpodp, cpu)) {
9230            define_one_arm_cp_reg(cpu, dcpodp_reg);
9231        }
9232    }
9233
9234    /*
9235     * If full MTE is enabled, add all of the system registers.
9236     * If only "instructions available at EL0" are enabled,
9237     * then define only a RAZ/WI version of PSTATE.TCO.
9238     */
9239    if (cpu_isar_feature(aa64_mte, cpu)) {
9240        define_arm_cp_regs(cpu, mte_reginfo);
9241        define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9242    } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
9243        define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
9244        define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9245    }
9246
9247    if (cpu_isar_feature(aa64_scxtnum, cpu)) {
9248        define_arm_cp_regs(cpu, scxtnum_reginfo);
9249    }
9250
9251    if (cpu_isar_feature(aa64_fgt, cpu)) {
9252        define_arm_cp_regs(cpu, fgt_reginfo);
9253    }
9254
9255    if (cpu_isar_feature(aa64_rme, cpu)) {
9256        define_arm_cp_regs(cpu, rme_reginfo);
9257        if (cpu_isar_feature(aa64_mte, cpu)) {
9258            define_arm_cp_regs(cpu, rme_mte_reginfo);
9259        }
9260    }
9261#endif
9262
9263    if (cpu_isar_feature(any_predinv, cpu)) {
9264        define_arm_cp_regs(cpu, predinv_reginfo);
9265    }
9266
9267    if (cpu_isar_feature(any_ccidx, cpu)) {
9268        define_arm_cp_regs(cpu, ccsidr2_reginfo);
9269    }
9270
9271#ifndef CONFIG_USER_ONLY
9272    /*
9273     * Register redirections and aliases must be done last,
9274     * after the registers from the other extensions have been defined.
9275     */
9276    if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9277        define_arm_vh_e2h_redirects_aliases(cpu);
9278    }
9279#endif
9280}
9281
9282/* Sort alphabetically by type name, except for "any". */
9283static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
9284{
9285    ObjectClass *class_a = (ObjectClass *)a;
9286    ObjectClass *class_b = (ObjectClass *)b;
9287    const char *name_a, *name_b;
9288
9289    name_a = object_class_get_name(class_a);
9290    name_b = object_class_get_name(class_b);
9291    if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
9292        return 1;
9293    } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
9294        return -1;
9295    } else {
9296        return strcmp(name_a, name_b);
9297    }
9298}
9299
9300static void arm_cpu_list_entry(gpointer data, gpointer user_data)
9301{
9302    ObjectClass *oc = data;
9303    CPUClass *cc = CPU_CLASS(oc);
9304    const char *typename;
9305    char *name;
9306
9307    typename = object_class_get_name(oc);
9308    name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
9309    if (cc->deprecation_note) {
9310        qemu_printf("  %s (deprecated)\n", name);
9311    } else {
9312        qemu_printf("  %s\n", name);
9313    }
9314    g_free(name);
9315}
9316
9317void arm_cpu_list(void)
9318{
9319    GSList *list;
9320
9321    list = object_class_get_list(TYPE_ARM_CPU, false);
9322    list = g_slist_sort(list, arm_cpu_list_compare);
9323    qemu_printf("Available CPUs:\n");
9324    g_slist_foreach(list, arm_cpu_list_entry, NULL);
9325    g_slist_free(list);
9326}
9327
9328/*
9329 * Private utility function for define_one_arm_cp_reg_with_opaque():
9330 * add a single reginfo struct to the hash table.
9331 */
9332static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
9333                                   void *opaque, CPState state,
9334                                   CPSecureState secstate,
9335                                   int crm, int opc1, int opc2,
9336                                   const char *name)
9337{
9338    CPUARMState *env = &cpu->env;
9339    uint32_t key;
9340    ARMCPRegInfo *r2;
9341    bool is64 = r->type & ARM_CP_64BIT;
9342    bool ns = secstate & ARM_CP_SECSTATE_NS;
9343    int cp = r->cp;
9344    size_t name_len;
9345    bool make_const;
9346
9347    switch (state) {
9348    case ARM_CP_STATE_AA32:
9349        /* We assume it is a cp15 register if the .cp field is left unset. */
9350        if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
9351            cp = 15;
9352        }
9353        key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
9354        break;
9355    case ARM_CP_STATE_AA64:
9356        /*
9357         * To allow abbreviation of ARMCPRegInfo definitions, we treat
9358         * cp == 0 as equivalent to the value for "standard guest-visible
9359         * sysreg".  STATE_BOTH definitions are also always "standard sysreg"
9360         * in their AArch64 view (the .cp value may be non-zero for the
9361         * benefit of the AArch32 view).
9362         */
9363        if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
9364            cp = CP_REG_ARM64_SYSREG_CP;
9365        }
9366        key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
9367        break;
9368    default:
9369        g_assert_not_reached();
9370    }
9371
9372    /* Overriding of an existing definition must be explicitly requested. */
9373    if (!(r->type & ARM_CP_OVERRIDE)) {
9374        const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
9375        if (oldreg) {
9376            assert(oldreg->type & ARM_CP_OVERRIDE);
9377        }
9378    }
9379
9380    /*
9381     * Eliminate registers that are not present because the EL is missing.
9382     * Doing this here makes it easier to put all registers for a given
9383     * feature into the same ARMCPRegInfo array and define them all at once.
9384     */
9385    make_const = false;
9386    if (arm_feature(env, ARM_FEATURE_EL3)) {
9387        /*
9388         * An EL2 register without EL2 but with EL3 is (usually) RES0.
9389         * See rule RJFFP in section D1.1.3 of DDI0487H.a.
9390         */
9391        int min_el = ctz32(r->access) / 2;
9392        if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
9393            if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
9394                return;
9395            }
9396            make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
9397        }
9398    } else {
9399        CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
9400                                 ? PL2_RW : PL1_RW);
9401        if ((r->access & max_el) == 0) {
9402            return;
9403        }
9404    }
9405
9406    /* Combine cpreg and name into one allocation. */
9407    name_len = strlen(name) + 1;
9408    r2 = g_malloc(sizeof(*r2) + name_len);
9409    *r2 = *r;
9410    r2->name = memcpy(r2 + 1, name, name_len);
9411
9412    /*
9413     * Update fields to match the instantiation, overwiting wildcards
9414     * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
9415     */
9416    r2->cp = cp;
9417    r2->crm = crm;
9418    r2->opc1 = opc1;
9419    r2->opc2 = opc2;
9420    r2->state = state;
9421    r2->secure = secstate;
9422    if (opaque) {
9423        r2->opaque = opaque;
9424    }
9425
9426    if (make_const) {
9427        /* This should not have been a very special register to begin. */
9428        int old_special = r2->type & ARM_CP_SPECIAL_MASK;
9429        assert(old_special == 0 || old_special == ARM_CP_NOP);
9430        /*
9431         * Set the special function to CONST, retaining the other flags.
9432         * This is important for e.g. ARM_CP_SVE so that we still
9433         * take the SVE trap if CPTR_EL3.EZ == 0.
9434         */
9435        r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
9436        /*
9437         * Usually, these registers become RES0, but there are a few
9438         * special cases like VPIDR_EL2 which have a constant non-zero
9439         * value with writes ignored.
9440         */
9441        if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
9442            r2->resetvalue = 0;
9443        }
9444        /*
9445         * ARM_CP_CONST has precedence, so removing the callbacks and
9446         * offsets are not strictly necessary, but it is potentially
9447         * less confusing to debug later.
9448         */
9449        r2->readfn = NULL;
9450        r2->writefn = NULL;
9451        r2->raw_readfn = NULL;
9452        r2->raw_writefn = NULL;
9453        r2->resetfn = NULL;
9454        r2->fieldoffset = 0;
9455        r2->bank_fieldoffsets[0] = 0;
9456        r2->bank_fieldoffsets[1] = 0;
9457    } else {
9458        bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
9459
9460        if (isbanked) {
9461            /*
9462             * Register is banked (using both entries in array).
9463             * Overwriting fieldoffset as the array is only used to define
9464             * banked registers but later only fieldoffset is used.
9465             */
9466            r2->fieldoffset = r->bank_fieldoffsets[ns];
9467        }
9468        if (state == ARM_CP_STATE_AA32) {
9469            if (isbanked) {
9470                /*
9471                 * If the register is banked then we don't need to migrate or
9472                 * reset the 32-bit instance in certain cases:
9473                 *
9474                 * 1) If the register has both 32-bit and 64-bit instances
9475                 *    then we can count on the 64-bit instance taking care
9476                 *    of the non-secure bank.
9477                 * 2) If ARMv8 is enabled then we can count on a 64-bit
9478                 *    version taking care of the secure bank.  This requires
9479                 *    that separate 32 and 64-bit definitions are provided.
9480                 */
9481                if ((r->state == ARM_CP_STATE_BOTH && ns) ||
9482                    (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
9483                    r2->type |= ARM_CP_ALIAS;
9484                }
9485            } else if ((secstate != r->secure) && !ns) {
9486                /*
9487                 * The register is not banked so we only want to allow
9488                 * migration of the non-secure instance.
9489                 */
9490                r2->type |= ARM_CP_ALIAS;
9491            }
9492
9493            if (HOST_BIG_ENDIAN &&
9494                r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
9495                r2->fieldoffset += sizeof(uint32_t);
9496            }
9497        }
9498    }
9499
9500    /*
9501     * By convention, for wildcarded registers only the first
9502     * entry is used for migration; the others are marked as
9503     * ALIAS so we don't try to transfer the register
9504     * multiple times. Special registers (ie NOP/WFI) are
9505     * never migratable and not even raw-accessible.
9506     */
9507    if (r2->type & ARM_CP_SPECIAL_MASK) {
9508        r2->type |= ARM_CP_NO_RAW;
9509    }
9510    if (((r->crm == CP_ANY) && crm != 0) ||
9511        ((r->opc1 == CP_ANY) && opc1 != 0) ||
9512        ((r->opc2 == CP_ANY) && opc2 != 0)) {
9513        r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
9514    }
9515
9516    /*
9517     * Check that raw accesses are either forbidden or handled. Note that
9518     * we can't assert this earlier because the setup of fieldoffset for
9519     * banked registers has to be done first.
9520     */
9521    if (!(r2->type & ARM_CP_NO_RAW)) {
9522        assert(!raw_accessors_invalid(r2));
9523    }
9524
9525    g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
9526}
9527
9528
9529void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
9530                                       const ARMCPRegInfo *r, void *opaque)
9531{
9532    /*
9533     * Define implementations of coprocessor registers.
9534     * We store these in a hashtable because typically
9535     * there are less than 150 registers in a space which
9536     * is 16*16*16*8*8 = 262144 in size.
9537     * Wildcarding is supported for the crm, opc1 and opc2 fields.
9538     * If a register is defined twice then the second definition is
9539     * used, so this can be used to define some generic registers and
9540     * then override them with implementation specific variations.
9541     * At least one of the original and the second definition should
9542     * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
9543     * against accidental use.
9544     *
9545     * The state field defines whether the register is to be
9546     * visible in the AArch32 or AArch64 execution state. If the
9547     * state is set to ARM_CP_STATE_BOTH then we synthesise a
9548     * reginfo structure for the AArch32 view, which sees the lower
9549     * 32 bits of the 64 bit register.
9550     *
9551     * Only registers visible in AArch64 may set r->opc0; opc0 cannot
9552     * be wildcarded. AArch64 registers are always considered to be 64
9553     * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
9554     * the register, if any.
9555     */
9556    int crm, opc1, opc2;
9557    int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
9558    int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
9559    int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
9560    int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
9561    int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
9562    int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
9563    CPState state;
9564
9565    /* 64 bit registers have only CRm and Opc1 fields */
9566    assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
9567    /* op0 only exists in the AArch64 encodings */
9568    assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
9569    /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
9570    assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
9571    /*
9572     * This API is only for Arm's system coprocessors (14 and 15) or
9573     * (M-profile or v7A-and-earlier only) for implementation defined
9574     * coprocessors in the range 0..7.  Our decode assumes this, since
9575     * 8..13 can be used for other insns including VFP and Neon. See
9576     * valid_cp() in translate.c.  Assert here that we haven't tried
9577     * to use an invalid coprocessor number.
9578     */
9579    switch (r->state) {
9580    case ARM_CP_STATE_BOTH:
9581        /* 0 has a special meaning, but otherwise the same rules as AA32. */
9582        if (r->cp == 0) {
9583            break;
9584        }
9585        /* fall through */
9586    case ARM_CP_STATE_AA32:
9587        if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
9588            !arm_feature(&cpu->env, ARM_FEATURE_M)) {
9589            assert(r->cp >= 14 && r->cp <= 15);
9590        } else {
9591            assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
9592        }
9593        break;
9594    case ARM_CP_STATE_AA64:
9595        assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
9596        break;
9597    default:
9598        g_assert_not_reached();
9599    }
9600    /*
9601     * The AArch64 pseudocode CheckSystemAccess() specifies that op1
9602     * encodes a minimum access level for the register. We roll this
9603     * runtime check into our general permission check code, so check
9604     * here that the reginfo's specified permissions are strict enough
9605     * to encompass the generic architectural permission check.
9606     */
9607    if (r->state != ARM_CP_STATE_AA32) {
9608        CPAccessRights mask;
9609        switch (r->opc1) {
9610        case 0:
9611            /* min_EL EL1, but some accessible to EL0 via kernel ABI */
9612            mask = PL0U_R | PL1_RW;
9613            break;
9614        case 1: case 2:
9615            /* min_EL EL1 */
9616            mask = PL1_RW;
9617            break;
9618        case 3:
9619            /* min_EL EL0 */
9620            mask = PL0_RW;
9621            break;
9622        case 4:
9623        case 5:
9624            /* min_EL EL2 */
9625            mask = PL2_RW;
9626            break;
9627        case 6:
9628            /* min_EL EL3 */
9629            mask = PL3_RW;
9630            break;
9631        case 7:
9632            /* min_EL EL1, secure mode only (we don't check the latter) */
9633            mask = PL1_RW;
9634            break;
9635        default:
9636            /* broken reginfo with out-of-range opc1 */
9637            g_assert_not_reached();
9638        }
9639        /* assert our permissions are not too lax (stricter is fine) */
9640        assert((r->access & ~mask) == 0);
9641    }
9642
9643    /*
9644     * Check that the register definition has enough info to handle
9645     * reads and writes if they are permitted.
9646     */
9647    if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
9648        if (r->access & PL3_R) {
9649            assert((r->fieldoffset ||
9650                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
9651                   r->readfn);
9652        }
9653        if (r->access & PL3_W) {
9654            assert((r->fieldoffset ||
9655                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
9656                   r->writefn);
9657        }
9658    }
9659
9660    for (crm = crmmin; crm <= crmmax; crm++) {
9661        for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
9662            for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
9663                for (state = ARM_CP_STATE_AA32;
9664                     state <= ARM_CP_STATE_AA64; state++) {
9665                    if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
9666                        continue;
9667                    }
9668                    if (state == ARM_CP_STATE_AA32) {
9669                        /*
9670                         * Under AArch32 CP registers can be common
9671                         * (same for secure and non-secure world) or banked.
9672                         */
9673                        char *name;
9674
9675                        switch (r->secure) {
9676                        case ARM_CP_SECSTATE_S:
9677                        case ARM_CP_SECSTATE_NS:
9678                            add_cpreg_to_hashtable(cpu, r, opaque, state,
9679                                                   r->secure, crm, opc1, opc2,
9680                                                   r->name);
9681                            break;
9682                        case ARM_CP_SECSTATE_BOTH:
9683                            name = g_strdup_printf("%s_S", r->name);
9684                            add_cpreg_to_hashtable(cpu, r, opaque, state,
9685                                                   ARM_CP_SECSTATE_S,
9686                                                   crm, opc1, opc2, name);
9687                            g_free(name);
9688                            add_cpreg_to_hashtable(cpu, r, opaque, state,
9689                                                   ARM_CP_SECSTATE_NS,
9690                                                   crm, opc1, opc2, r->name);
9691                            break;
9692                        default:
9693                            g_assert_not_reached();
9694                        }
9695                    } else {
9696                        /*
9697                         * AArch64 registers get mapped to non-secure instance
9698                         * of AArch32
9699                         */
9700                        add_cpreg_to_hashtable(cpu, r, opaque, state,
9701                                               ARM_CP_SECSTATE_NS,
9702                                               crm, opc1, opc2, r->name);
9703                    }
9704                }
9705            }
9706        }
9707    }
9708}
9709
9710/* Define a whole list of registers */
9711void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
9712                                        void *opaque, size_t len)
9713{
9714    size_t i;
9715    for (i = 0; i < len; ++i) {
9716        define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
9717    }
9718}
9719
9720/*
9721 * Modify ARMCPRegInfo for access from userspace.
9722 *
9723 * This is a data driven modification directed by
9724 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
9725 * user-space cannot alter any values and dynamic values pertaining to
9726 * execution state are hidden from user space view anyway.
9727 */
9728void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
9729                                 const ARMCPRegUserSpaceInfo *mods,
9730                                 size_t mods_len)
9731{
9732    for (size_t mi = 0; mi < mods_len; ++mi) {
9733        const ARMCPRegUserSpaceInfo *m = mods + mi;
9734        GPatternSpec *pat = NULL;
9735
9736        if (m->is_glob) {
9737            pat = g_pattern_spec_new(m->name);
9738        }
9739        for (size_t ri = 0; ri < regs_len; ++ri) {
9740            ARMCPRegInfo *r = regs + ri;
9741
9742            if (pat && g_pattern_match_string(pat, r->name)) {
9743                r->type = ARM_CP_CONST;
9744                r->access = PL0U_R;
9745                r->resetvalue = 0;
9746                /* continue */
9747            } else if (strcmp(r->name, m->name) == 0) {
9748                r->type = ARM_CP_CONST;
9749                r->access = PL0U_R;
9750                r->resetvalue &= m->exported_bits;
9751                r->resetvalue |= m->fixed_bits;
9752                break;
9753            }
9754        }
9755        if (pat) {
9756            g_pattern_spec_free(pat);
9757        }
9758    }
9759}
9760
9761const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
9762{
9763    return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
9764}
9765
9766void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
9767                         uint64_t value)
9768{
9769    /* Helper coprocessor write function for write-ignore registers */
9770}
9771
9772uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
9773{
9774    /* Helper coprocessor write function for read-as-zero registers */
9775    return 0;
9776}
9777
9778void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
9779{
9780    /* Helper coprocessor reset function for do-nothing-on-reset registers */
9781}
9782
9783static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
9784{
9785    /*
9786     * Return true if it is not valid for us to switch to
9787     * this CPU mode (ie all the UNPREDICTABLE cases in
9788     * the ARM ARM CPSRWriteByInstr pseudocode).
9789     */
9790
9791    /* Changes to or from Hyp via MSR and CPS are illegal. */
9792    if (write_type == CPSRWriteByInstr &&
9793        ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
9794         mode == ARM_CPU_MODE_HYP)) {
9795        return 1;
9796    }
9797
9798    switch (mode) {
9799    case ARM_CPU_MODE_USR:
9800        return 0;
9801    case ARM_CPU_MODE_SYS:
9802    case ARM_CPU_MODE_SVC:
9803    case ARM_CPU_MODE_ABT:
9804    case ARM_CPU_MODE_UND:
9805    case ARM_CPU_MODE_IRQ:
9806    case ARM_CPU_MODE_FIQ:
9807        /*
9808         * Note that we don't implement the IMPDEF NSACR.RFR which in v7
9809         * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
9810         */
9811        /*
9812         * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
9813         * and CPS are treated as illegal mode changes.
9814         */
9815        if (write_type == CPSRWriteByInstr &&
9816            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
9817            (arm_hcr_el2_eff(env) & HCR_TGE)) {
9818            return 1;
9819        }
9820        return 0;
9821    case ARM_CPU_MODE_HYP:
9822        return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
9823    case ARM_CPU_MODE_MON:
9824        return arm_current_el(env) < 3;
9825    default:
9826        return 1;
9827    }
9828}
9829
9830uint32_t cpsr_read(CPUARMState *env)
9831{
9832    int ZF;
9833    ZF = (env->ZF == 0);
9834    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
9835        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
9836        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
9837        | ((env->condexec_bits & 0xfc) << 8)
9838        | (env->GE << 16) | (env->daif & CPSR_AIF);
9839}
9840
9841void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
9842                CPSRWriteType write_type)
9843{
9844    uint32_t changed_daif;
9845    bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
9846        (mask & (CPSR_M | CPSR_E | CPSR_IL));
9847
9848    if (mask & CPSR_NZCV) {
9849        env->ZF = (~val) & CPSR_Z;
9850        env->NF = val;
9851        env->CF = (val >> 29) & 1;
9852        env->VF = (val << 3) & 0x80000000;
9853    }
9854    if (mask & CPSR_Q) {
9855        env->QF = ((val & CPSR_Q) != 0);
9856    }
9857    if (mask & CPSR_T) {
9858        env->thumb = ((val & CPSR_T) != 0);
9859    }
9860    if (mask & CPSR_IT_0_1) {
9861        env->condexec_bits &= ~3;
9862        env->condexec_bits |= (val >> 25) & 3;
9863    }
9864    if (mask & CPSR_IT_2_7) {
9865        env->condexec_bits &= 3;
9866        env->condexec_bits |= (val >> 8) & 0xfc;
9867    }
9868    if (mask & CPSR_GE) {
9869        env->GE = (val >> 16) & 0xf;
9870    }
9871
9872    /*
9873     * In a V7 implementation that includes the security extensions but does
9874     * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
9875     * whether non-secure software is allowed to change the CPSR_F and CPSR_A
9876     * bits respectively.
9877     *
9878     * In a V8 implementation, it is permitted for privileged software to
9879     * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
9880     */
9881    if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
9882        arm_feature(env, ARM_FEATURE_EL3) &&
9883        !arm_feature(env, ARM_FEATURE_EL2) &&
9884        !arm_is_secure(env)) {
9885
9886        changed_daif = (env->daif ^ val) & mask;
9887
9888        if (changed_daif & CPSR_A) {
9889            /*
9890             * Check to see if we are allowed to change the masking of async
9891             * abort exceptions from a non-secure state.
9892             */
9893            if (!(env->cp15.scr_el3 & SCR_AW)) {
9894                qemu_log_mask(LOG_GUEST_ERROR,
9895                              "Ignoring attempt to switch CPSR_A flag from "
9896                              "non-secure world with SCR.AW bit clear\n");
9897                mask &= ~CPSR_A;
9898            }
9899        }
9900
9901        if (changed_daif & CPSR_F) {
9902            /*
9903             * Check to see if we are allowed to change the masking of FIQ
9904             * exceptions from a non-secure state.
9905             */
9906            if (!(env->cp15.scr_el3 & SCR_FW)) {
9907                qemu_log_mask(LOG_GUEST_ERROR,
9908                              "Ignoring attempt to switch CPSR_F flag from "
9909                              "non-secure world with SCR.FW bit clear\n");
9910                mask &= ~CPSR_F;
9911            }
9912
9913            /*
9914             * Check whether non-maskable FIQ (NMFI) support is enabled.
9915             * If this bit is set software is not allowed to mask
9916             * FIQs, but is allowed to set CPSR_F to 0.
9917             */
9918            if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
9919                (val & CPSR_F)) {
9920                qemu_log_mask(LOG_GUEST_ERROR,
9921                              "Ignoring attempt to enable CPSR_F flag "
9922                              "(non-maskable FIQ [NMFI] support enabled)\n");
9923                mask &= ~CPSR_F;
9924            }
9925        }
9926    }
9927
9928    env->daif &= ~(CPSR_AIF & mask);
9929    env->daif |= val & CPSR_AIF & mask;
9930
9931    if (write_type != CPSRWriteRaw &&
9932        ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
9933        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
9934            /*
9935             * Note that we can only get here in USR mode if this is a
9936             * gdb stub write; for this case we follow the architectural
9937             * behaviour for guest writes in USR mode of ignoring an attempt
9938             * to switch mode. (Those are caught by translate.c for writes
9939             * triggered by guest instructions.)
9940             */
9941            mask &= ~CPSR_M;
9942        } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
9943            /*
9944             * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
9945             * v7, and has defined behaviour in v8:
9946             *  + leave CPSR.M untouched
9947             *  + allow changes to the other CPSR fields
9948             *  + set PSTATE.IL
9949             * For user changes via the GDB stub, we don't set PSTATE.IL,
9950             * as this would be unnecessarily harsh for a user error.
9951             */
9952            mask &= ~CPSR_M;
9953            if (write_type != CPSRWriteByGDBStub &&
9954                arm_feature(env, ARM_FEATURE_V8)) {
9955                mask |= CPSR_IL;
9956                val |= CPSR_IL;
9957            }
9958            qemu_log_mask(LOG_GUEST_ERROR,
9959                          "Illegal AArch32 mode switch attempt from %s to %s\n",
9960                          aarch32_mode_name(env->uncached_cpsr),
9961                          aarch32_mode_name(val));
9962        } else {
9963            qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
9964                          write_type == CPSRWriteExceptionReturn ?
9965                          "Exception return from AArch32" :
9966                          "AArch32 mode switch from",
9967                          aarch32_mode_name(env->uncached_cpsr),
9968                          aarch32_mode_name(val), env->regs[15]);
9969            switch_mode(env, val & CPSR_M);
9970        }
9971    }
9972    mask &= ~CACHED_CPSR_BITS;
9973    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
9974    if (tcg_enabled() && rebuild_hflags) {
9975        arm_rebuild_hflags(env);
9976    }
9977}
9978
9979/* Sign/zero extend */
9980uint32_t HELPER(sxtb16)(uint32_t x)
9981{
9982    uint32_t res;
9983    res = (uint16_t)(int8_t)x;
9984    res |= (uint32_t)(int8_t)(x >> 16) << 16;
9985    return res;
9986}
9987
9988static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
9989{
9990    /*
9991     * Take a division-by-zero exception if necessary; otherwise return
9992     * to get the usual non-trapping division behaviour (result of 0)
9993     */
9994    if (arm_feature(env, ARM_FEATURE_M)
9995        && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
9996        raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
9997    }
9998}
9999
10000uint32_t HELPER(uxtb16)(uint32_t x)
10001{
10002    uint32_t res;
10003    res = (uint16_t)(uint8_t)x;
10004    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
10005    return res;
10006}
10007
10008int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
10009{
10010    if (den == 0) {
10011        handle_possible_div0_trap(env, GETPC());
10012        return 0;
10013    }
10014    if (num == INT_MIN && den == -1) {
10015        return INT_MIN;
10016    }
10017    return num / den;
10018}
10019
10020uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
10021{
10022    if (den == 0) {
10023        handle_possible_div0_trap(env, GETPC());
10024        return 0;
10025    }
10026    return num / den;
10027}
10028
10029uint32_t HELPER(rbit)(uint32_t x)
10030{
10031    return revbit32(x);
10032}
10033
10034#ifdef CONFIG_USER_ONLY
10035
10036static void switch_mode(CPUARMState *env, int mode)
10037{
10038    ARMCPU *cpu = env_archcpu(env);
10039
10040    if (mode != ARM_CPU_MODE_USR) {
10041        cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
10042    }
10043}
10044
10045uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10046                                 uint32_t cur_el, bool secure)
10047{
10048    return 1;
10049}
10050
10051void aarch64_sync_64_to_32(CPUARMState *env)
10052{
10053    g_assert_not_reached();
10054}
10055
10056#else
10057
10058static void switch_mode(CPUARMState *env, int mode)
10059{
10060    int old_mode;
10061    int i;
10062
10063    old_mode = env->uncached_cpsr & CPSR_M;
10064    if (mode == old_mode) {
10065        return;
10066    }
10067
10068    if (old_mode == ARM_CPU_MODE_FIQ) {
10069        memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
10070        memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
10071    } else if (mode == ARM_CPU_MODE_FIQ) {
10072        memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
10073        memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
10074    }
10075
10076    i = bank_number(old_mode);
10077    env->banked_r13[i] = env->regs[13];
10078    env->banked_spsr[i] = env->spsr;
10079
10080    i = bank_number(mode);
10081    env->regs[13] = env->banked_r13[i];
10082    env->spsr = env->banked_spsr[i];
10083
10084    env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
10085    env->regs[14] = env->banked_r14[r14_bank_number(mode)];
10086}
10087
10088/*
10089 * Physical Interrupt Target EL Lookup Table
10090 *
10091 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
10092 *
10093 * The below multi-dimensional table is used for looking up the target
10094 * exception level given numerous condition criteria.  Specifically, the
10095 * target EL is based on SCR and HCR routing controls as well as the
10096 * currently executing EL and secure state.
10097 *
10098 *    Dimensions:
10099 *    target_el_table[2][2][2][2][2][4]
10100 *                    |  |  |  |  |  +--- Current EL
10101 *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
10102 *                    |  |  |  +--------- HCR mask override
10103 *                    |  |  +------------ SCR exec state control
10104 *                    |  +--------------- SCR mask override
10105 *                    +------------------ 32-bit(0)/64-bit(1) EL3
10106 *
10107 *    The table values are as such:
10108 *    0-3 = EL0-EL3
10109 *     -1 = Cannot occur
10110 *
10111 * The ARM ARM target EL table includes entries indicating that an "exception
10112 * is not taken".  The two cases where this is applicable are:
10113 *    1) An exception is taken from EL3 but the SCR does not have the exception
10114 *    routed to EL3.
10115 *    2) An exception is taken from EL2 but the HCR does not have the exception
10116 *    routed to EL2.
10117 * In these two cases, the below table contain a target of EL1.  This value is
10118 * returned as it is expected that the consumer of the table data will check
10119 * for "target EL >= current EL" to ensure the exception is not taken.
10120 *
10121 *            SCR     HCR
10122 *         64  EA     AMO                 From
10123 *        BIT IRQ     IMO      Non-secure         Secure
10124 *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
10125 */
10126static const int8_t target_el_table[2][2][2][2][2][4] = {
10127    {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10128       {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
10129      {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10130       {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
10131     {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10132       {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
10133      {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10134       {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
10135    {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
10136       {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
10137      {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
10138       {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
10139     {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
10140       {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
10141      {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
10142       {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
10143};
10144
10145/*
10146 * Determine the target EL for physical exceptions
10147 */
10148uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10149                                 uint32_t cur_el, bool secure)
10150{
10151    CPUARMState *env = cs->env_ptr;
10152    bool rw;
10153    bool scr;
10154    bool hcr;
10155    int target_el;
10156    /* Is the highest EL AArch64? */
10157    bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
10158    uint64_t hcr_el2;
10159
10160    if (arm_feature(env, ARM_FEATURE_EL3)) {
10161        rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
10162    } else {
10163        /*
10164         * Either EL2 is the highest EL (and so the EL2 register width
10165         * is given by is64); or there is no EL2 or EL3, in which case
10166         * the value of 'rw' does not affect the table lookup anyway.
10167         */
10168        rw = is64;
10169    }
10170
10171    hcr_el2 = arm_hcr_el2_eff(env);
10172    switch (excp_idx) {
10173    case EXCP_IRQ:
10174        scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
10175        hcr = hcr_el2 & HCR_IMO;
10176        break;
10177    case EXCP_FIQ:
10178        scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
10179        hcr = hcr_el2 & HCR_FMO;
10180        break;
10181    default:
10182        scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
10183        hcr = hcr_el2 & HCR_AMO;
10184        break;
10185    };
10186
10187    /*
10188     * For these purposes, TGE and AMO/IMO/FMO both force the
10189     * interrupt to EL2.  Fold TGE into the bit extracted above.
10190     */
10191    hcr |= (hcr_el2 & HCR_TGE) != 0;
10192
10193    /* Perform a table-lookup for the target EL given the current state */
10194    target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
10195
10196    assert(target_el > 0);
10197
10198    return target_el;
10199}
10200
10201void arm_log_exception(CPUState *cs)
10202{
10203    int idx = cs->exception_index;
10204
10205    if (qemu_loglevel_mask(CPU_LOG_INT)) {
10206        const char *exc = NULL;
10207        static const char * const excnames[] = {
10208            [EXCP_UDEF] = "Undefined Instruction",
10209            [EXCP_SWI] = "SVC",
10210            [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
10211            [EXCP_DATA_ABORT] = "Data Abort",
10212            [EXCP_IRQ] = "IRQ",
10213            [EXCP_FIQ] = "FIQ",
10214            [EXCP_BKPT] = "Breakpoint",
10215            [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
10216            [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
10217            [EXCP_HVC] = "Hypervisor Call",
10218            [EXCP_HYP_TRAP] = "Hypervisor Trap",
10219            [EXCP_SMC] = "Secure Monitor Call",
10220            [EXCP_VIRQ] = "Virtual IRQ",
10221            [EXCP_VFIQ] = "Virtual FIQ",
10222            [EXCP_SEMIHOST] = "Semihosting call",
10223            [EXCP_NOCP] = "v7M NOCP UsageFault",
10224            [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
10225            [EXCP_STKOF] = "v8M STKOF UsageFault",
10226            [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
10227            [EXCP_LSERR] = "v8M LSERR UsageFault",
10228            [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
10229            [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
10230            [EXCP_VSERR] = "Virtual SERR",
10231            [EXCP_GPC] = "Granule Protection Check",
10232        };
10233
10234        if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
10235            exc = excnames[idx];
10236        }
10237        if (!exc) {
10238            exc = "unknown";
10239        }
10240        qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
10241                      idx, exc, cs->cpu_index);
10242    }
10243}
10244
10245/*
10246 * Function used to synchronize QEMU's AArch64 register set with AArch32
10247 * register set.  This is necessary when switching between AArch32 and AArch64
10248 * execution state.
10249 */
10250void aarch64_sync_32_to_64(CPUARMState *env)
10251{
10252    int i;
10253    uint32_t mode = env->uncached_cpsr & CPSR_M;
10254
10255    /* We can blanket copy R[0:7] to X[0:7] */
10256    for (i = 0; i < 8; i++) {
10257        env->xregs[i] = env->regs[i];
10258    }
10259
10260    /*
10261     * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
10262     * Otherwise, they come from the banked user regs.
10263     */
10264    if (mode == ARM_CPU_MODE_FIQ) {
10265        for (i = 8; i < 13; i++) {
10266            env->xregs[i] = env->usr_regs[i - 8];
10267        }
10268    } else {
10269        for (i = 8; i < 13; i++) {
10270            env->xregs[i] = env->regs[i];
10271        }
10272    }
10273
10274    /*
10275     * Registers x13-x23 are the various mode SP and FP registers. Registers
10276     * r13 and r14 are only copied if we are in that mode, otherwise we copy
10277     * from the mode banked register.
10278     */
10279    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10280        env->xregs[13] = env->regs[13];
10281        env->xregs[14] = env->regs[14];
10282    } else {
10283        env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
10284        /* HYP is an exception in that it is copied from r14 */
10285        if (mode == ARM_CPU_MODE_HYP) {
10286            env->xregs[14] = env->regs[14];
10287        } else {
10288            env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
10289        }
10290    }
10291
10292    if (mode == ARM_CPU_MODE_HYP) {
10293        env->xregs[15] = env->regs[13];
10294    } else {
10295        env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
10296    }
10297
10298    if (mode == ARM_CPU_MODE_IRQ) {
10299        env->xregs[16] = env->regs[14];
10300        env->xregs[17] = env->regs[13];
10301    } else {
10302        env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
10303        env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
10304    }
10305
10306    if (mode == ARM_CPU_MODE_SVC) {
10307        env->xregs[18] = env->regs[14];
10308        env->xregs[19] = env->regs[13];
10309    } else {
10310        env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
10311        env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
10312    }
10313
10314    if (mode == ARM_CPU_MODE_ABT) {
10315        env->xregs[20] = env->regs[14];
10316        env->xregs[21] = env->regs[13];
10317    } else {
10318        env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
10319        env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
10320    }
10321
10322    if (mode == ARM_CPU_MODE_UND) {
10323        env->xregs[22] = env->regs[14];
10324        env->xregs[23] = env->regs[13];
10325    } else {
10326        env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
10327        env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
10328    }
10329
10330    /*
10331     * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
10332     * mode, then we can copy from r8-r14.  Otherwise, we copy from the
10333     * FIQ bank for r8-r14.
10334     */
10335    if (mode == ARM_CPU_MODE_FIQ) {
10336        for (i = 24; i < 31; i++) {
10337            env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
10338        }
10339    } else {
10340        for (i = 24; i < 29; i++) {
10341            env->xregs[i] = env->fiq_regs[i - 24];
10342        }
10343        env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
10344        env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
10345    }
10346
10347    env->pc = env->regs[15];
10348}
10349
10350/*
10351 * Function used to synchronize QEMU's AArch32 register set with AArch64
10352 * register set.  This is necessary when switching between AArch32 and AArch64
10353 * execution state.
10354 */
10355void aarch64_sync_64_to_32(CPUARMState *env)
10356{
10357    int i;
10358    uint32_t mode = env->uncached_cpsr & CPSR_M;
10359
10360    /* We can blanket copy X[0:7] to R[0:7] */
10361    for (i = 0; i < 8; i++) {
10362        env->regs[i] = env->xregs[i];
10363    }
10364
10365    /*
10366     * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
10367     * Otherwise, we copy x8-x12 into the banked user regs.
10368     */
10369    if (mode == ARM_CPU_MODE_FIQ) {
10370        for (i = 8; i < 13; i++) {
10371            env->usr_regs[i - 8] = env->xregs[i];
10372        }
10373    } else {
10374        for (i = 8; i < 13; i++) {
10375            env->regs[i] = env->xregs[i];
10376        }
10377    }
10378
10379    /*
10380     * Registers r13 & r14 depend on the current mode.
10381     * If we are in a given mode, we copy the corresponding x registers to r13
10382     * and r14.  Otherwise, we copy the x register to the banked r13 and r14
10383     * for the mode.
10384     */
10385    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10386        env->regs[13] = env->xregs[13];
10387        env->regs[14] = env->xregs[14];
10388    } else {
10389        env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
10390
10391        /*
10392         * HYP is an exception in that it does not have its own banked r14 but
10393         * shares the USR r14
10394         */
10395        if (mode == ARM_CPU_MODE_HYP) {
10396            env->regs[14] = env->xregs[14];
10397        } else {
10398            env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
10399        }
10400    }
10401
10402    if (mode == ARM_CPU_MODE_HYP) {
10403        env->regs[13] = env->xregs[15];
10404    } else {
10405        env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
10406    }
10407
10408    if (mode == ARM_CPU_MODE_IRQ) {
10409        env->regs[14] = env->xregs[16];
10410        env->regs[13] = env->xregs[17];
10411    } else {
10412        env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
10413        env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
10414    }
10415
10416    if (mode == ARM_CPU_MODE_SVC) {
10417        env->regs[14] = env->xregs[18];
10418        env->regs[13] = env->xregs[19];
10419    } else {
10420        env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
10421        env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
10422    }
10423
10424    if (mode == ARM_CPU_MODE_ABT) {
10425        env->regs[14] = env->xregs[20];
10426        env->regs[13] = env->xregs[21];
10427    } else {
10428        env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
10429        env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
10430    }
10431
10432    if (mode == ARM_CPU_MODE_UND) {
10433        env->regs[14] = env->xregs[22];
10434        env->regs[13] = env->xregs[23];
10435    } else {
10436        env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
10437        env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
10438    }
10439
10440    /*
10441     * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
10442     * mode, then we can copy to r8-r14.  Otherwise, we copy to the
10443     * FIQ bank for r8-r14.
10444     */
10445    if (mode == ARM_CPU_MODE_FIQ) {
10446        for (i = 24; i < 31; i++) {
10447            env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
10448        }
10449    } else {
10450        for (i = 24; i < 29; i++) {
10451            env->fiq_regs[i - 24] = env->xregs[i];
10452        }
10453        env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
10454        env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
10455    }
10456
10457    env->regs[15] = env->pc;
10458}
10459
10460static void take_aarch32_exception(CPUARMState *env, int new_mode,
10461                                   uint32_t mask, uint32_t offset,
10462                                   uint32_t newpc)
10463{
10464    int new_el;
10465
10466    /* Change the CPU state so as to actually take the exception. */
10467    switch_mode(env, new_mode);
10468
10469    /*
10470     * For exceptions taken to AArch32 we must clear the SS bit in both
10471     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
10472     */
10473    env->pstate &= ~PSTATE_SS;
10474    env->spsr = cpsr_read(env);
10475    /* Clear IT bits.  */
10476    env->condexec_bits = 0;
10477    /* Switch to the new mode, and to the correct instruction set.  */
10478    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
10479
10480    /* This must be after mode switching. */
10481    new_el = arm_current_el(env);
10482
10483    /* Set new mode endianness */
10484    env->uncached_cpsr &= ~CPSR_E;
10485    if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
10486        env->uncached_cpsr |= CPSR_E;
10487    }
10488    /* J and IL must always be cleared for exception entry */
10489    env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
10490    env->daif |= mask;
10491
10492    if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
10493        if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
10494            env->uncached_cpsr |= CPSR_SSBS;
10495        } else {
10496            env->uncached_cpsr &= ~CPSR_SSBS;
10497        }
10498    }
10499
10500    if (new_mode == ARM_CPU_MODE_HYP) {
10501        env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
10502        env->elr_el[2] = env->regs[15];
10503    } else {
10504        /* CPSR.PAN is normally preserved preserved unless...  */
10505        if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
10506            switch (new_el) {
10507            case 3:
10508                if (!arm_is_secure_below_el3(env)) {
10509                    /* ... the target is EL3, from non-secure state.  */
10510                    env->uncached_cpsr &= ~CPSR_PAN;
10511                    break;
10512                }
10513                /* ... the target is EL3, from secure state ... */
10514                /* fall through */
10515            case 1:
10516                /* ... the target is EL1 and SCTLR.SPAN is 0.  */
10517                if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
10518                    env->uncached_cpsr |= CPSR_PAN;
10519                }
10520                break;
10521            }
10522        }
10523        /*
10524         * this is a lie, as there was no c1_sys on V4T/V5, but who cares
10525         * and we should just guard the thumb mode on V4
10526         */
10527        if (arm_feature(env, ARM_FEATURE_V4T)) {
10528            env->thumb =
10529                (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
10530        }
10531        env->regs[14] = env->regs[15] + offset;
10532    }
10533    env->regs[15] = newpc;
10534
10535    if (tcg_enabled()) {
10536        arm_rebuild_hflags(env);
10537    }
10538}
10539
10540static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
10541{
10542    /*
10543     * Handle exception entry to Hyp mode; this is sufficiently
10544     * different to entry to other AArch32 modes that we handle it
10545     * separately here.
10546     *
10547     * The vector table entry used is always the 0x14 Hyp mode entry point,
10548     * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
10549     * The offset applied to the preferred return address is always zero
10550     * (see DDI0487C.a section G1.12.3).
10551     * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
10552     */
10553    uint32_t addr, mask;
10554    ARMCPU *cpu = ARM_CPU(cs);
10555    CPUARMState *env = &cpu->env;
10556
10557    switch (cs->exception_index) {
10558    case EXCP_UDEF:
10559        addr = 0x04;
10560        break;
10561    case EXCP_SWI:
10562        addr = 0x08;
10563        break;
10564    case EXCP_BKPT:
10565        /* Fall through to prefetch abort.  */
10566    case EXCP_PREFETCH_ABORT:
10567        env->cp15.ifar_s = env->exception.vaddress;
10568        qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
10569                      (uint32_t)env->exception.vaddress);
10570        addr = 0x0c;
10571        break;
10572    case EXCP_DATA_ABORT:
10573        env->cp15.dfar_s = env->exception.vaddress;
10574        qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
10575                      (uint32_t)env->exception.vaddress);
10576        addr = 0x10;
10577        break;
10578    case EXCP_IRQ:
10579        addr = 0x18;
10580        break;
10581    case EXCP_FIQ:
10582        addr = 0x1c;
10583        break;
10584    case EXCP_HVC:
10585        addr = 0x08;
10586        break;
10587    case EXCP_HYP_TRAP:
10588        addr = 0x14;
10589        break;
10590    default:
10591        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10592    }
10593
10594    if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
10595        if (!arm_feature(env, ARM_FEATURE_V8)) {
10596            /*
10597             * QEMU syndrome values are v8-style. v7 has the IL bit
10598             * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
10599             * If this is a v7 CPU, squash the IL bit in those cases.
10600             */
10601            if (cs->exception_index == EXCP_PREFETCH_ABORT ||
10602                (cs->exception_index == EXCP_DATA_ABORT &&
10603                 !(env->exception.syndrome & ARM_EL_ISV)) ||
10604                syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
10605                env->exception.syndrome &= ~ARM_EL_IL;
10606            }
10607        }
10608        env->cp15.esr_el[2] = env->exception.syndrome;
10609    }
10610
10611    if (arm_current_el(env) != 2 && addr < 0x14) {
10612        addr = 0x14;
10613    }
10614
10615    mask = 0;
10616    if (!(env->cp15.scr_el3 & SCR_EA)) {
10617        mask |= CPSR_A;
10618    }
10619    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
10620        mask |= CPSR_I;
10621    }
10622    if (!(env->cp15.scr_el3 & SCR_FIQ)) {
10623        mask |= CPSR_F;
10624    }
10625
10626    addr += env->cp15.hvbar;
10627
10628    take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
10629}
10630
10631static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
10632{
10633    ARMCPU *cpu = ARM_CPU(cs);
10634    CPUARMState *env = &cpu->env;
10635    uint32_t addr;
10636    uint32_t mask;
10637    int new_mode;
10638    uint32_t offset;
10639    uint32_t moe;
10640
10641    /* If this is a debug exception we must update the DBGDSCR.MOE bits */
10642    switch (syn_get_ec(env->exception.syndrome)) {
10643    case EC_BREAKPOINT:
10644    case EC_BREAKPOINT_SAME_EL:
10645        moe = 1;
10646        break;
10647    case EC_WATCHPOINT:
10648    case EC_WATCHPOINT_SAME_EL:
10649        moe = 10;
10650        break;
10651    case EC_AA32_BKPT:
10652        moe = 3;
10653        break;
10654    case EC_VECTORCATCH:
10655        moe = 5;
10656        break;
10657    default:
10658        moe = 0;
10659        break;
10660    }
10661
10662    if (moe) {
10663        env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
10664    }
10665
10666    if (env->exception.target_el == 2) {
10667        arm_cpu_do_interrupt_aarch32_hyp(cs);
10668        return;
10669    }
10670
10671    switch (cs->exception_index) {
10672    case EXCP_UDEF:
10673        new_mode = ARM_CPU_MODE_UND;
10674        addr = 0x04;
10675        mask = CPSR_I;
10676        if (env->thumb) {
10677            offset = 2;
10678        } else {
10679            offset = 4;
10680        }
10681        break;
10682    case EXCP_SWI:
10683        new_mode = ARM_CPU_MODE_SVC;
10684        addr = 0x08;
10685        mask = CPSR_I;
10686        /* The PC already points to the next instruction.  */
10687        offset = 0;
10688        break;
10689    case EXCP_BKPT:
10690        /* Fall through to prefetch abort.  */
10691    case EXCP_PREFETCH_ABORT:
10692        A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
10693        A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
10694        qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
10695                      env->exception.fsr, (uint32_t)env->exception.vaddress);
10696        new_mode = ARM_CPU_MODE_ABT;
10697        addr = 0x0c;
10698        mask = CPSR_A | CPSR_I;
10699        offset = 4;
10700        break;
10701    case EXCP_DATA_ABORT:
10702        A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
10703        A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
10704        qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
10705                      env->exception.fsr,
10706                      (uint32_t)env->exception.vaddress);
10707        new_mode = ARM_CPU_MODE_ABT;
10708        addr = 0x10;
10709        mask = CPSR_A | CPSR_I;
10710        offset = 8;
10711        break;
10712    case EXCP_IRQ:
10713        new_mode = ARM_CPU_MODE_IRQ;
10714        addr = 0x18;
10715        /* Disable IRQ and imprecise data aborts.  */
10716        mask = CPSR_A | CPSR_I;
10717        offset = 4;
10718        if (env->cp15.scr_el3 & SCR_IRQ) {
10719            /* IRQ routed to monitor mode */
10720            new_mode = ARM_CPU_MODE_MON;
10721            mask |= CPSR_F;
10722        }
10723        break;
10724    case EXCP_FIQ:
10725        new_mode = ARM_CPU_MODE_FIQ;
10726        addr = 0x1c;
10727        /* Disable FIQ, IRQ and imprecise data aborts.  */
10728        mask = CPSR_A | CPSR_I | CPSR_F;
10729        if (env->cp15.scr_el3 & SCR_FIQ) {
10730            /* FIQ routed to monitor mode */
10731            new_mode = ARM_CPU_MODE_MON;
10732        }
10733        offset = 4;
10734        break;
10735    case EXCP_VIRQ:
10736        new_mode = ARM_CPU_MODE_IRQ;
10737        addr = 0x18;
10738        /* Disable IRQ and imprecise data aborts.  */
10739        mask = CPSR_A | CPSR_I;
10740        offset = 4;
10741        break;
10742    case EXCP_VFIQ:
10743        new_mode = ARM_CPU_MODE_FIQ;
10744        addr = 0x1c;
10745        /* Disable FIQ, IRQ and imprecise data aborts.  */
10746        mask = CPSR_A | CPSR_I | CPSR_F;
10747        offset = 4;
10748        break;
10749    case EXCP_VSERR:
10750        {
10751            /*
10752             * Note that this is reported as a data abort, but the DFAR
10753             * has an UNKNOWN value.  Construct the SError syndrome from
10754             * AET and ExT fields.
10755             */
10756            ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
10757
10758            if (extended_addresses_enabled(env)) {
10759                env->exception.fsr = arm_fi_to_lfsc(&fi);
10760            } else {
10761                env->exception.fsr = arm_fi_to_sfsc(&fi);
10762            }
10763            env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
10764            A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
10765            qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
10766                          env->exception.fsr);
10767
10768            new_mode = ARM_CPU_MODE_ABT;
10769            addr = 0x10;
10770            mask = CPSR_A | CPSR_I;
10771            offset = 8;
10772        }
10773        break;
10774    case EXCP_SMC:
10775        new_mode = ARM_CPU_MODE_MON;
10776        addr = 0x08;
10777        mask = CPSR_A | CPSR_I | CPSR_F;
10778        offset = 0;
10779        break;
10780    default:
10781        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10782        return; /* Never happens.  Keep compiler happy.  */
10783    }
10784
10785    if (new_mode == ARM_CPU_MODE_MON) {
10786        addr += env->cp15.mvbar;
10787    } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
10788        /* High vectors. When enabled, base address cannot be remapped. */
10789        addr += 0xffff0000;
10790    } else {
10791        /*
10792         * ARM v7 architectures provide a vector base address register to remap
10793         * the interrupt vector table.
10794         * This register is only followed in non-monitor mode, and is banked.
10795         * Note: only bits 31:5 are valid.
10796         */
10797        addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
10798    }
10799
10800    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
10801        env->cp15.scr_el3 &= ~SCR_NS;
10802    }
10803
10804    take_aarch32_exception(env, new_mode, mask, offset, addr);
10805}
10806
10807static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
10808{
10809    /*
10810     * Return the register number of the AArch64 view of the AArch32
10811     * register @aarch32_reg. The CPUARMState CPSR is assumed to still
10812     * be that of the AArch32 mode the exception came from.
10813     */
10814    int mode = env->uncached_cpsr & CPSR_M;
10815
10816    switch (aarch32_reg) {
10817    case 0 ... 7:
10818        return aarch32_reg;
10819    case 8 ... 12:
10820        return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
10821    case 13:
10822        switch (mode) {
10823        case ARM_CPU_MODE_USR:
10824        case ARM_CPU_MODE_SYS:
10825            return 13;
10826        case ARM_CPU_MODE_HYP:
10827            return 15;
10828        case ARM_CPU_MODE_IRQ:
10829            return 17;
10830        case ARM_CPU_MODE_SVC:
10831            return 19;
10832        case ARM_CPU_MODE_ABT:
10833            return 21;
10834        case ARM_CPU_MODE_UND:
10835            return 23;
10836        case ARM_CPU_MODE_FIQ:
10837            return 29;
10838        default:
10839            g_assert_not_reached();
10840        }
10841    case 14:
10842        switch (mode) {
10843        case ARM_CPU_MODE_USR:
10844        case ARM_CPU_MODE_SYS:
10845        case ARM_CPU_MODE_HYP:
10846            return 14;
10847        case ARM_CPU_MODE_IRQ:
10848            return 16;
10849        case ARM_CPU_MODE_SVC:
10850            return 18;
10851        case ARM_CPU_MODE_ABT:
10852            return 20;
10853        case ARM_CPU_MODE_UND:
10854            return 22;
10855        case ARM_CPU_MODE_FIQ:
10856            return 30;
10857        default:
10858            g_assert_not_reached();
10859        }
10860    case 15:
10861        return 31;
10862    default:
10863        g_assert_not_reached();
10864    }
10865}
10866
10867static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
10868{
10869    uint32_t ret = cpsr_read(env);
10870
10871    /* Move DIT to the correct location for SPSR_ELx */
10872    if (ret & CPSR_DIT) {
10873        ret &= ~CPSR_DIT;
10874        ret |= PSTATE_DIT;
10875    }
10876    /* Merge PSTATE.SS into SPSR_ELx */
10877    ret |= env->pstate & PSTATE_SS;
10878
10879    return ret;
10880}
10881
10882static bool syndrome_is_sync_extabt(uint32_t syndrome)
10883{
10884    /* Return true if this syndrome value is a synchronous external abort */
10885    switch (syn_get_ec(syndrome)) {
10886    case EC_INSNABORT:
10887    case EC_INSNABORT_SAME_EL:
10888    case EC_DATAABORT:
10889    case EC_DATAABORT_SAME_EL:
10890        /* Look at fault status code for all the synchronous ext abort cases */
10891        switch (syndrome & 0x3f) {
10892        case 0x10:
10893        case 0x13:
10894        case 0x14:
10895        case 0x15:
10896        case 0x16:
10897        case 0x17:
10898            return true;
10899        default:
10900            return false;
10901        }
10902    default:
10903        return false;
10904    }
10905}
10906
10907/* Handle exception entry to a target EL which is using AArch64 */
10908static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
10909{
10910    ARMCPU *cpu = ARM_CPU(cs);
10911    CPUARMState *env = &cpu->env;
10912    unsigned int new_el = env->exception.target_el;
10913    target_ulong addr = env->cp15.vbar_el[new_el];
10914    unsigned int new_mode = aarch64_pstate_mode(new_el, true);
10915    unsigned int old_mode;
10916    unsigned int cur_el = arm_current_el(env);
10917    int rt;
10918
10919    if (tcg_enabled()) {
10920        /*
10921         * Note that new_el can never be 0.  If cur_el is 0, then
10922         * el0_a64 is is_a64(), else el0_a64 is ignored.
10923         */
10924        aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
10925    }
10926
10927    if (cur_el < new_el) {
10928        /*
10929         * Entry vector offset depends on whether the implemented EL
10930         * immediately lower than the target level is using AArch32 or AArch64
10931         */
10932        bool is_aa64;
10933        uint64_t hcr;
10934
10935        switch (new_el) {
10936        case 3:
10937            is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
10938            break;
10939        case 2:
10940            hcr = arm_hcr_el2_eff(env);
10941            if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
10942                is_aa64 = (hcr & HCR_RW) != 0;
10943                break;
10944            }
10945            /* fall through */
10946        case 1:
10947            is_aa64 = is_a64(env);
10948            break;
10949        default:
10950            g_assert_not_reached();
10951        }
10952
10953        if (is_aa64) {
10954            addr += 0x400;
10955        } else {
10956            addr += 0x600;
10957        }
10958    } else if (pstate_read(env) & PSTATE_SP) {
10959        addr += 0x200;
10960    }
10961
10962    switch (cs->exception_index) {
10963    case EXCP_GPC:
10964        qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
10965                      env->cp15.mfar_el3);
10966        /* fall through */
10967    case EXCP_PREFETCH_ABORT:
10968    case EXCP_DATA_ABORT:
10969        /*
10970         * FEAT_DoubleFault allows synchronous external aborts taken to EL3
10971         * to be taken to the SError vector entrypoint.
10972         */
10973        if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
10974            syndrome_is_sync_extabt(env->exception.syndrome)) {
10975            addr += 0x180;
10976        }
10977        env->cp15.far_el[new_el] = env->exception.vaddress;
10978        qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
10979                      env->cp15.far_el[new_el]);
10980        /* fall through */
10981    case EXCP_BKPT:
10982    case EXCP_UDEF:
10983    case EXCP_SWI:
10984    case EXCP_HVC:
10985    case EXCP_HYP_TRAP:
10986    case EXCP_SMC:
10987        switch (syn_get_ec(env->exception.syndrome)) {
10988        case EC_ADVSIMDFPACCESSTRAP:
10989            /*
10990             * QEMU internal FP/SIMD syndromes from AArch32 include the
10991             * TA and coproc fields which are only exposed if the exception
10992             * is taken to AArch32 Hyp mode. Mask them out to get a valid
10993             * AArch64 format syndrome.
10994             */
10995            env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
10996            break;
10997        case EC_CP14RTTRAP:
10998        case EC_CP15RTTRAP:
10999        case EC_CP14DTTRAP:
11000            /*
11001             * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
11002             * the raw register field from the insn; when taking this to
11003             * AArch64 we must convert it to the AArch64 view of the register
11004             * number. Notice that we read a 4-bit AArch32 register number and
11005             * write back a 5-bit AArch64 one.
11006             */
11007            rt = extract32(env->exception.syndrome, 5, 4);
11008            rt = aarch64_regnum(env, rt);
11009            env->exception.syndrome = deposit32(env->exception.syndrome,
11010                                                5, 5, rt);
11011            break;
11012        case EC_CP15RRTTRAP:
11013        case EC_CP14RRTTRAP:
11014            /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
11015            rt = extract32(env->exception.syndrome, 5, 4);
11016            rt = aarch64_regnum(env, rt);
11017            env->exception.syndrome = deposit32(env->exception.syndrome,
11018                                                5, 5, rt);
11019            rt = extract32(env->exception.syndrome, 10, 4);
11020            rt = aarch64_regnum(env, rt);
11021            env->exception.syndrome = deposit32(env->exception.syndrome,
11022                                                10, 5, rt);
11023            break;
11024        }
11025        env->cp15.esr_el[new_el] = env->exception.syndrome;
11026        break;
11027    case EXCP_IRQ:
11028    case EXCP_VIRQ:
11029        addr += 0x80;
11030        break;
11031    case EXCP_FIQ:
11032    case EXCP_VFIQ:
11033        addr += 0x100;
11034        break;
11035    case EXCP_VSERR:
11036        addr += 0x180;
11037        /* Construct the SError syndrome from IDS and ISS fields. */
11038        env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
11039        env->cp15.esr_el[new_el] = env->exception.syndrome;
11040        break;
11041    default:
11042        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11043    }
11044
11045    if (is_a64(env)) {
11046        old_mode = pstate_read(env);
11047        aarch64_save_sp(env, arm_current_el(env));
11048        env->elr_el[new_el] = env->pc;
11049    } else {
11050        old_mode = cpsr_read_for_spsr_elx(env);
11051        env->elr_el[new_el] = env->regs[15];
11052
11053        aarch64_sync_32_to_64(env);
11054
11055        env->condexec_bits = 0;
11056    }
11057    env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
11058
11059    qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
11060                  env->elr_el[new_el]);
11061
11062    if (cpu_isar_feature(aa64_pan, cpu)) {
11063        /* The value of PSTATE.PAN is normally preserved, except when ... */
11064        new_mode |= old_mode & PSTATE_PAN;
11065        switch (new_el) {
11066        case 2:
11067            /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
11068            if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
11069                != (HCR_E2H | HCR_TGE)) {
11070                break;
11071            }
11072            /* fall through */
11073        case 1:
11074            /* ... the target is EL1 ... */
11075            /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
11076            if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
11077                new_mode |= PSTATE_PAN;
11078            }
11079            break;
11080        }
11081    }
11082    if (cpu_isar_feature(aa64_mte, cpu)) {
11083        new_mode |= PSTATE_TCO;
11084    }
11085
11086    if (cpu_isar_feature(aa64_ssbs, cpu)) {
11087        if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
11088            new_mode |= PSTATE_SSBS;
11089        } else {
11090            new_mode &= ~PSTATE_SSBS;
11091        }
11092    }
11093
11094    pstate_write(env, PSTATE_DAIF | new_mode);
11095    env->aarch64 = true;
11096    aarch64_restore_sp(env, new_el);
11097
11098    if (tcg_enabled()) {
11099        helper_rebuild_hflags_a64(env, new_el);
11100    }
11101
11102    env->pc = addr;
11103
11104    qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
11105                  new_el, env->pc, pstate_read(env));
11106}
11107
11108/*
11109 * Do semihosting call and set the appropriate return value. All the
11110 * permission and validity checks have been done at translate time.
11111 *
11112 * We only see semihosting exceptions in TCG only as they are not
11113 * trapped to the hypervisor in KVM.
11114 */
11115#ifdef CONFIG_TCG
11116static void tcg_handle_semihosting(CPUState *cs)
11117{
11118    ARMCPU *cpu = ARM_CPU(cs);
11119    CPUARMState *env = &cpu->env;
11120
11121    if (is_a64(env)) {
11122        qemu_log_mask(CPU_LOG_INT,
11123                      "...handling as semihosting call 0x%" PRIx64 "\n",
11124                      env->xregs[0]);
11125        do_common_semihosting(cs);
11126        env->pc += 4;
11127    } else {
11128        qemu_log_mask(CPU_LOG_INT,
11129                      "...handling as semihosting call 0x%x\n",
11130                      env->regs[0]);
11131        do_common_semihosting(cs);
11132        env->regs[15] += env->thumb ? 2 : 4;
11133    }
11134}
11135#endif
11136
11137/*
11138 * Handle a CPU exception for A and R profile CPUs.
11139 * Do any appropriate logging, handle PSCI calls, and then hand off
11140 * to the AArch64-entry or AArch32-entry function depending on the
11141 * target exception level's register width.
11142 *
11143 * Note: this is used for both TCG (as the do_interrupt tcg op),
11144 *       and KVM to re-inject guest debug exceptions, and to
11145 *       inject a Synchronous-External-Abort.
11146 */
11147void arm_cpu_do_interrupt(CPUState *cs)
11148{
11149    ARMCPU *cpu = ARM_CPU(cs);
11150    CPUARMState *env = &cpu->env;
11151    unsigned int new_el = env->exception.target_el;
11152
11153    assert(!arm_feature(env, ARM_FEATURE_M));
11154
11155    arm_log_exception(cs);
11156    qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
11157                  new_el);
11158    if (qemu_loglevel_mask(CPU_LOG_INT)
11159        && !excp_is_internal(cs->exception_index)) {
11160        qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
11161                      syn_get_ec(env->exception.syndrome),
11162                      env->exception.syndrome);
11163    }
11164
11165    if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
11166        arm_handle_psci_call(cpu);
11167        qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
11168        return;
11169    }
11170
11171    /*
11172     * Semihosting semantics depend on the register width of the code
11173     * that caused the exception, not the target exception level, so
11174     * must be handled here.
11175     */
11176#ifdef CONFIG_TCG
11177    if (cs->exception_index == EXCP_SEMIHOST) {
11178        tcg_handle_semihosting(cs);
11179        return;
11180    }
11181#endif
11182
11183    /*
11184     * Hooks may change global state so BQL should be held, also the
11185     * BQL needs to be held for any modification of
11186     * cs->interrupt_request.
11187     */
11188    g_assert(qemu_mutex_iothread_locked());
11189
11190    arm_call_pre_el_change_hook(cpu);
11191
11192    assert(!excp_is_internal(cs->exception_index));
11193    if (arm_el_is_aa64(env, new_el)) {
11194        arm_cpu_do_interrupt_aarch64(cs);
11195    } else {
11196        arm_cpu_do_interrupt_aarch32(cs);
11197    }
11198
11199    arm_call_el_change_hook(cpu);
11200
11201    if (!kvm_enabled()) {
11202        cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
11203    }
11204}
11205#endif /* !CONFIG_USER_ONLY */
11206
11207uint64_t arm_sctlr(CPUARMState *env, int el)
11208{
11209    /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
11210    if (el == 0) {
11211        ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
11212        el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
11213    }
11214    return env->cp15.sctlr_el[el];
11215}
11216
11217int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
11218{
11219    if (regime_has_2_ranges(mmu_idx)) {
11220        return extract64(tcr, 37, 2);
11221    } else if (regime_is_stage2(mmu_idx)) {
11222        return 0; /* VTCR_EL2 */
11223    } else {
11224        /* Replicate the single TBI bit so we always have 2 bits.  */
11225        return extract32(tcr, 20, 1) * 3;
11226    }
11227}
11228
11229int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
11230{
11231    if (regime_has_2_ranges(mmu_idx)) {
11232        return extract64(tcr, 51, 2);
11233    } else if (regime_is_stage2(mmu_idx)) {
11234        return 0; /* VTCR_EL2 */
11235    } else {
11236        /* Replicate the single TBID bit so we always have 2 bits.  */
11237        return extract32(tcr, 29, 1) * 3;
11238    }
11239}
11240
11241int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
11242{
11243    if (regime_has_2_ranges(mmu_idx)) {
11244        return extract64(tcr, 57, 2);
11245    } else {
11246        /* Replicate the single TCMA bit so we always have 2 bits.  */
11247        return extract32(tcr, 30, 1) * 3;
11248    }
11249}
11250
11251static ARMGranuleSize tg0_to_gran_size(int tg)
11252{
11253    switch (tg) {
11254    case 0:
11255        return Gran4K;
11256    case 1:
11257        return Gran64K;
11258    case 2:
11259        return Gran16K;
11260    default:
11261        return GranInvalid;
11262    }
11263}
11264
11265static ARMGranuleSize tg1_to_gran_size(int tg)
11266{
11267    switch (tg) {
11268    case 1:
11269        return Gran16K;
11270    case 2:
11271        return Gran4K;
11272    case 3:
11273        return Gran64K;
11274    default:
11275        return GranInvalid;
11276    }
11277}
11278
11279static inline bool have4k(ARMCPU *cpu, bool stage2)
11280{
11281    return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
11282        : cpu_isar_feature(aa64_tgran4, cpu);
11283}
11284
11285static inline bool have16k(ARMCPU *cpu, bool stage2)
11286{
11287    return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
11288        : cpu_isar_feature(aa64_tgran16, cpu);
11289}
11290
11291static inline bool have64k(ARMCPU *cpu, bool stage2)
11292{
11293    return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
11294        : cpu_isar_feature(aa64_tgran64, cpu);
11295}
11296
11297static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
11298                                         bool stage2)
11299{
11300    switch (gran) {
11301    case Gran4K:
11302        if (have4k(cpu, stage2)) {
11303            return gran;
11304        }
11305        break;
11306    case Gran16K:
11307        if (have16k(cpu, stage2)) {
11308            return gran;
11309        }
11310        break;
11311    case Gran64K:
11312        if (have64k(cpu, stage2)) {
11313            return gran;
11314        }
11315        break;
11316    case GranInvalid:
11317        break;
11318    }
11319    /*
11320     * If the guest selects a granule size that isn't implemented,
11321     * the architecture requires that we behave as if it selected one
11322     * that is (with an IMPDEF choice of which one to pick). We choose
11323     * to implement the smallest supported granule size.
11324     */
11325    if (have4k(cpu, stage2)) {
11326        return Gran4K;
11327    }
11328    if (have16k(cpu, stage2)) {
11329        return Gran16K;
11330    }
11331    assert(have64k(cpu, stage2));
11332    return Gran64K;
11333}
11334
11335ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
11336                                   ARMMMUIdx mmu_idx, bool data,
11337                                   bool el1_is_aa32)
11338{
11339    uint64_t tcr = regime_tcr(env, mmu_idx);
11340    bool epd, hpd, tsz_oob, ds, ha, hd;
11341    int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
11342    ARMGranuleSize gran;
11343    ARMCPU *cpu = env_archcpu(env);
11344    bool stage2 = regime_is_stage2(mmu_idx);
11345
11346    if (!regime_has_2_ranges(mmu_idx)) {
11347        select = 0;
11348        tsz = extract32(tcr, 0, 6);
11349        gran = tg0_to_gran_size(extract32(tcr, 14, 2));
11350        if (stage2) {
11351            /* VTCR_EL2 */
11352            hpd = false;
11353        } else {
11354            hpd = extract32(tcr, 24, 1);
11355        }
11356        epd = false;
11357        sh = extract32(tcr, 12, 2);
11358        ps = extract32(tcr, 16, 3);
11359        ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
11360        hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
11361        ds = extract64(tcr, 32, 1);
11362    } else {
11363        bool e0pd;
11364
11365        /*
11366         * Bit 55 is always between the two regions, and is canonical for
11367         * determining if address tagging is enabled.
11368         */
11369        select = extract64(va, 55, 1);
11370        if (!select) {
11371            tsz = extract32(tcr, 0, 6);
11372            gran = tg0_to_gran_size(extract32(tcr, 14, 2));
11373            epd = extract32(tcr, 7, 1);
11374            sh = extract32(tcr, 12, 2);
11375            hpd = extract64(tcr, 41, 1);
11376            e0pd = extract64(tcr, 55, 1);
11377        } else {
11378            tsz = extract32(tcr, 16, 6);
11379            gran = tg1_to_gran_size(extract32(tcr, 30, 2));
11380            epd = extract32(tcr, 23, 1);
11381            sh = extract32(tcr, 28, 2);
11382            hpd = extract64(tcr, 42, 1);
11383            e0pd = extract64(tcr, 56, 1);
11384        }
11385        ps = extract64(tcr, 32, 3);
11386        ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
11387        hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
11388        ds = extract64(tcr, 59, 1);
11389
11390        if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
11391            regime_is_user(env, mmu_idx)) {
11392            epd = true;
11393        }
11394    }
11395
11396    gran = sanitize_gran_size(cpu, gran, stage2);
11397
11398    if (cpu_isar_feature(aa64_st, cpu)) {
11399        max_tsz = 48 - (gran == Gran64K);
11400    } else {
11401        max_tsz = 39;
11402    }
11403
11404    /*
11405     * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
11406     * adjust the effective value of DS, as documented.
11407     */
11408    min_tsz = 16;
11409    if (gran == Gran64K) {
11410        if (cpu_isar_feature(aa64_lva, cpu)) {
11411            min_tsz = 12;
11412        }
11413        ds = false;
11414    } else if (ds) {
11415        if (regime_is_stage2(mmu_idx)) {
11416            if (gran == Gran16K) {
11417                ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
11418            } else {
11419                ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
11420            }
11421        } else {
11422            if (gran == Gran16K) {
11423                ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
11424            } else {
11425                ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
11426            }
11427        }
11428        if (ds) {
11429            min_tsz = 12;
11430        }
11431    }
11432
11433    if (stage2 && el1_is_aa32) {
11434        /*
11435         * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
11436         * are loosened: a configured IPA of 40 bits is permitted even if
11437         * the implemented PA is less than that (and so a 40 bit IPA would
11438         * fault for an AArch64 EL1). See R_DTLMN.
11439         */
11440        min_tsz = MIN(min_tsz, 24);
11441    }
11442
11443    if (tsz > max_tsz) {
11444        tsz = max_tsz;
11445        tsz_oob = true;
11446    } else if (tsz < min_tsz) {
11447        tsz = min_tsz;
11448        tsz_oob = true;
11449    } else {
11450        tsz_oob = false;
11451    }
11452
11453    /* Present TBI as a composite with TBID.  */
11454    tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
11455    if (!data) {
11456        tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
11457    }
11458    tbi = (tbi >> select) & 1;
11459
11460    return (ARMVAParameters) {
11461        .tsz = tsz,
11462        .ps = ps,
11463        .sh = sh,
11464        .select = select,
11465        .tbi = tbi,
11466        .epd = epd,
11467        .hpd = hpd,
11468        .tsz_oob = tsz_oob,
11469        .ds = ds,
11470        .ha = ha,
11471        .hd = ha && hd,
11472        .gran = gran,
11473    };
11474}
11475
11476/*
11477 * Note that signed overflow is undefined in C.  The following routines are
11478 * careful to use unsigned types where modulo arithmetic is required.
11479 * Failure to do so _will_ break on newer gcc.
11480 */
11481
11482/* Signed saturating arithmetic.  */
11483
11484/* Perform 16-bit signed saturating addition.  */
11485static inline uint16_t add16_sat(uint16_t a, uint16_t b)
11486{
11487    uint16_t res;
11488
11489    res = a + b;
11490    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
11491        if (a & 0x8000) {
11492            res = 0x8000;
11493        } else {
11494            res = 0x7fff;
11495        }
11496    }
11497    return res;
11498}
11499
11500/* Perform 8-bit signed saturating addition.  */
11501static inline uint8_t add8_sat(uint8_t a, uint8_t b)
11502{
11503    uint8_t res;
11504
11505    res = a + b;
11506    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
11507        if (a & 0x80) {
11508            res = 0x80;
11509        } else {
11510            res = 0x7f;
11511        }
11512    }
11513    return res;
11514}
11515
11516/* Perform 16-bit signed saturating subtraction.  */
11517static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
11518{
11519    uint16_t res;
11520
11521    res = a - b;
11522    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
11523        if (a & 0x8000) {
11524            res = 0x8000;
11525        } else {
11526            res = 0x7fff;
11527        }
11528    }
11529    return res;
11530}
11531
11532/* Perform 8-bit signed saturating subtraction.  */
11533static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
11534{
11535    uint8_t res;
11536
11537    res = a - b;
11538    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
11539        if (a & 0x80) {
11540            res = 0x80;
11541        } else {
11542            res = 0x7f;
11543        }
11544    }
11545    return res;
11546}
11547
11548#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11549#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11550#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
11551#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
11552#define PFX q
11553
11554#include "op_addsub.h"
11555
11556/* Unsigned saturating arithmetic.  */
11557static inline uint16_t add16_usat(uint16_t a, uint16_t b)
11558{
11559    uint16_t res;
11560    res = a + b;
11561    if (res < a) {
11562        res = 0xffff;
11563    }
11564    return res;
11565}
11566
11567static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
11568{
11569    if (a > b) {
11570        return a - b;
11571    } else {
11572        return 0;
11573    }
11574}
11575
11576static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11577{
11578    uint8_t res;
11579    res = a + b;
11580    if (res < a) {
11581        res = 0xff;
11582    }
11583    return res;
11584}
11585
11586static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11587{
11588    if (a > b) {
11589        return a - b;
11590    } else {
11591        return 0;
11592    }
11593}
11594
11595#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11596#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11597#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
11598#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
11599#define PFX uq
11600
11601#include "op_addsub.h"
11602
11603/* Signed modulo arithmetic.  */
11604#define SARITH16(a, b, n, op) do { \
11605    int32_t sum; \
11606    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11607    RESULT(sum, n, 16); \
11608    if (sum >= 0) \
11609        ge |= 3 << (n * 2); \
11610    } while (0)
11611
11612#define SARITH8(a, b, n, op) do { \
11613    int32_t sum; \
11614    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11615    RESULT(sum, n, 8); \
11616    if (sum >= 0) \
11617        ge |= 1 << n; \
11618    } while (0)
11619
11620
11621#define ADD16(a, b, n) SARITH16(a, b, n, +)
11622#define SUB16(a, b, n) SARITH16(a, b, n, -)
11623#define ADD8(a, b, n)  SARITH8(a, b, n, +)
11624#define SUB8(a, b, n)  SARITH8(a, b, n, -)
11625#define PFX s
11626#define ARITH_GE
11627
11628#include "op_addsub.h"
11629
11630/* Unsigned modulo arithmetic.  */
11631#define ADD16(a, b, n) do { \
11632    uint32_t sum; \
11633    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11634    RESULT(sum, n, 16); \
11635    if ((sum >> 16) == 1) \
11636        ge |= 3 << (n * 2); \
11637    } while (0)
11638
11639#define ADD8(a, b, n) do { \
11640    uint32_t sum; \
11641    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11642    RESULT(sum, n, 8); \
11643    if ((sum >> 8) == 1) \
11644        ge |= 1 << n; \
11645    } while (0)
11646
11647#define SUB16(a, b, n) do { \
11648    uint32_t sum; \
11649    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11650    RESULT(sum, n, 16); \
11651    if ((sum >> 16) == 0) \
11652        ge |= 3 << (n * 2); \
11653    } while (0)
11654
11655#define SUB8(a, b, n) do { \
11656    uint32_t sum; \
11657    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11658    RESULT(sum, n, 8); \
11659    if ((sum >> 8) == 0) \
11660        ge |= 1 << n; \
11661    } while (0)
11662
11663#define PFX u
11664#define ARITH_GE
11665
11666#include "op_addsub.h"
11667
11668/* Halved signed arithmetic.  */
11669#define ADD16(a, b, n) \
11670  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11671#define SUB16(a, b, n) \
11672  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11673#define ADD8(a, b, n) \
11674  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11675#define SUB8(a, b, n) \
11676  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11677#define PFX sh
11678
11679#include "op_addsub.h"
11680
11681/* Halved unsigned arithmetic.  */
11682#define ADD16(a, b, n) \
11683  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11684#define SUB16(a, b, n) \
11685  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11686#define ADD8(a, b, n) \
11687  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11688#define SUB8(a, b, n) \
11689  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11690#define PFX uh
11691
11692#include "op_addsub.h"
11693
11694static inline uint8_t do_usad(uint8_t a, uint8_t b)
11695{
11696    if (a > b) {
11697        return a - b;
11698    } else {
11699        return b - a;
11700    }
11701}
11702
11703/* Unsigned sum of absolute byte differences.  */
11704uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
11705{
11706    uint32_t sum;
11707    sum = do_usad(a, b);
11708    sum += do_usad(a >> 8, b >> 8);
11709    sum += do_usad(a >> 16, b >> 16);
11710    sum += do_usad(a >> 24, b >> 24);
11711    return sum;
11712}
11713
11714/* For ARMv6 SEL instruction.  */
11715uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
11716{
11717    uint32_t mask;
11718
11719    mask = 0;
11720    if (flags & 1) {
11721        mask |= 0xff;
11722    }
11723    if (flags & 2) {
11724        mask |= 0xff00;
11725    }
11726    if (flags & 4) {
11727        mask |= 0xff0000;
11728    }
11729    if (flags & 8) {
11730        mask |= 0xff000000;
11731    }
11732    return (a & mask) | (b & ~mask);
11733}
11734
11735/*
11736 * CRC helpers.
11737 * The upper bytes of val (above the number specified by 'bytes') must have
11738 * been zeroed out by the caller.
11739 */
11740uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
11741{
11742    uint8_t buf[4];
11743
11744    stl_le_p(buf, val);
11745
11746    /* zlib crc32 converts the accumulator and output to one's complement.  */
11747    return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
11748}
11749
11750uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
11751{
11752    uint8_t buf[4];
11753
11754    stl_le_p(buf, val);
11755
11756    /* Linux crc32c converts the output to one's complement.  */
11757    return crc32c(acc, buf, bytes) ^ 0xffffffff;
11758}
11759
11760/*
11761 * Return the exception level to which FP-disabled exceptions should
11762 * be taken, or 0 if FP is enabled.
11763 */
11764int fp_exception_el(CPUARMState *env, int cur_el)
11765{
11766#ifndef CONFIG_USER_ONLY
11767    uint64_t hcr_el2;
11768
11769    /*
11770     * CPACR and the CPTR registers don't exist before v6, so FP is
11771     * always accessible
11772     */
11773    if (!arm_feature(env, ARM_FEATURE_V6)) {
11774        return 0;
11775    }
11776
11777    if (arm_feature(env, ARM_FEATURE_M)) {
11778        /* CPACR can cause a NOCP UsageFault taken to current security state */
11779        if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
11780            return 1;
11781        }
11782
11783        if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
11784            if (!extract32(env->v7m.nsacr, 10, 1)) {
11785                /* FP insns cause a NOCP UsageFault taken to Secure */
11786                return 3;
11787            }
11788        }
11789
11790        return 0;
11791    }
11792
11793    hcr_el2 = arm_hcr_el2_eff(env);
11794
11795    /*
11796     * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11797     * 0, 2 : trap EL0 and EL1/PL1 accesses
11798     * 1    : trap only EL0 accesses
11799     * 3    : trap no accesses
11800     * This register is ignored if E2H+TGE are both set.
11801     */
11802    if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11803        int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
11804
11805        switch (fpen) {
11806        case 1:
11807            if (cur_el != 0) {
11808                break;
11809            }
11810            /* fall through */
11811        case 0:
11812        case 2:
11813            /* Trap from Secure PL0 or PL1 to Secure PL1. */
11814            if (!arm_el_is_aa64(env, 3)
11815                && (cur_el == 3 || arm_is_secure_below_el3(env))) {
11816                return 3;
11817            }
11818            if (cur_el <= 1) {
11819                return 1;
11820            }
11821            break;
11822        }
11823    }
11824
11825    /*
11826     * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11827     * to control non-secure access to the FPU. It doesn't have any
11828     * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
11829     */
11830    if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
11831         cur_el <= 2 && !arm_is_secure_below_el3(env))) {
11832        if (!extract32(env->cp15.nsacr, 10, 1)) {
11833            /* FP insns act as UNDEF */
11834            return cur_el == 2 ? 2 : 1;
11835        }
11836    }
11837
11838    /*
11839     * CPTR_EL2 is present in v7VE or v8, and changes format
11840     * with HCR_EL2.E2H (regardless of TGE).
11841     */
11842    if (cur_el <= 2) {
11843        if (hcr_el2 & HCR_E2H) {
11844            switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
11845            case 1:
11846                if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
11847                    break;
11848                }
11849                /* fall through */
11850            case 0:
11851            case 2:
11852                return 2;
11853            }
11854        } else if (arm_is_el2_enabled(env)) {
11855            if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
11856                return 2;
11857            }
11858        }
11859    }
11860
11861    /* CPTR_EL3 : present in v8 */
11862    if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
11863        /* Trap all FP ops to EL3 */
11864        return 3;
11865    }
11866#endif
11867    return 0;
11868}
11869
11870/* Return the exception level we're running at if this is our mmu_idx */
11871int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
11872{
11873    if (mmu_idx & ARM_MMU_IDX_M) {
11874        return mmu_idx & ARM_MMU_IDX_M_PRIV;
11875    }
11876
11877    switch (mmu_idx) {
11878    case ARMMMUIdx_E10_0:
11879    case ARMMMUIdx_E20_0:
11880        return 0;
11881    case ARMMMUIdx_E10_1:
11882    case ARMMMUIdx_E10_1_PAN:
11883        return 1;
11884    case ARMMMUIdx_E2:
11885    case ARMMMUIdx_E20_2:
11886    case ARMMMUIdx_E20_2_PAN:
11887        return 2;
11888    case ARMMMUIdx_E3:
11889        return 3;
11890    default:
11891        g_assert_not_reached();
11892    }
11893}
11894
11895#ifndef CONFIG_TCG
11896ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
11897{
11898    g_assert_not_reached();
11899}
11900#endif
11901
11902static bool arm_pan_enabled(CPUARMState *env)
11903{
11904    if (is_a64(env)) {
11905        return env->pstate & PSTATE_PAN;
11906    } else {
11907        return env->uncached_cpsr & CPSR_PAN;
11908    }
11909}
11910
11911ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
11912{
11913    ARMMMUIdx idx;
11914    uint64_t hcr;
11915
11916    if (arm_feature(env, ARM_FEATURE_M)) {
11917        return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
11918    }
11919
11920    /* See ARM pseudo-function ELIsInHost.  */
11921    switch (el) {
11922    case 0:
11923        hcr = arm_hcr_el2_eff(env);
11924        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
11925            idx = ARMMMUIdx_E20_0;
11926        } else {
11927            idx = ARMMMUIdx_E10_0;
11928        }
11929        break;
11930    case 1:
11931        if (arm_pan_enabled(env)) {
11932            idx = ARMMMUIdx_E10_1_PAN;
11933        } else {
11934            idx = ARMMMUIdx_E10_1;
11935        }
11936        break;
11937    case 2:
11938        /* Note that TGE does not apply at EL2.  */
11939        if (arm_hcr_el2_eff(env) & HCR_E2H) {
11940            if (arm_pan_enabled(env)) {
11941                idx = ARMMMUIdx_E20_2_PAN;
11942            } else {
11943                idx = ARMMMUIdx_E20_2;
11944            }
11945        } else {
11946            idx = ARMMMUIdx_E2;
11947        }
11948        break;
11949    case 3:
11950        return ARMMMUIdx_E3;
11951    default:
11952        g_assert_not_reached();
11953    }
11954
11955    return idx;
11956}
11957
11958ARMMMUIdx arm_mmu_idx(CPUARMState *env)
11959{
11960    return arm_mmu_idx_el(env, arm_current_el(env));
11961}
11962
11963static bool mve_no_pred(CPUARMState *env)
11964{
11965    /*
11966     * Return true if there is definitely no predication of MVE
11967     * instructions by VPR or LTPSIZE. (Returning false even if there
11968     * isn't any predication is OK; generated code will just be
11969     * a little worse.)
11970     * If the CPU does not implement MVE then this TB flag is always 0.
11971     *
11972     * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
11973     * logic in gen_update_fp_context() needs to be updated to match.
11974     *
11975     * We do not include the effect of the ECI bits here -- they are
11976     * tracked in other TB flags. This simplifies the logic for
11977     * "when did we emit code that changes the MVE_NO_PRED TB flag
11978     * and thus need to end the TB?".
11979     */
11980    if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
11981        return false;
11982    }
11983    if (env->v7m.vpr) {
11984        return false;
11985    }
11986    if (env->v7m.ltpsize < 4) {
11987        return false;
11988    }
11989    return true;
11990}
11991
11992void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
11993                          uint64_t *cs_base, uint32_t *pflags)
11994{
11995    CPUARMTBFlags flags;
11996
11997    assert_hflags_rebuild_correctly(env);
11998    flags = env->hflags;
11999
12000    if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
12001        *pc = env->pc;
12002        if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
12003            DP_TBFLAG_A64(flags, BTYPE, env->btype);
12004        }
12005    } else {
12006        *pc = env->regs[15];
12007
12008        if (arm_feature(env, ARM_FEATURE_M)) {
12009            if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12010                FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
12011                != env->v7m.secure) {
12012                DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
12013            }
12014
12015            if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
12016                (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
12017                 (env->v7m.secure &&
12018                  !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
12019                /*
12020                 * ASPEN is set, but FPCA/SFPA indicate that there is no
12021                 * active FP context; we must create a new FP context before
12022                 * executing any FP insn.
12023                 */
12024                DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
12025            }
12026
12027            bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
12028            if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
12029                DP_TBFLAG_M32(flags, LSPACT, 1);
12030            }
12031
12032            if (mve_no_pred(env)) {
12033                DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
12034            }
12035        } else {
12036            /*
12037             * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12038             * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12039             */
12040            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
12041                DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
12042            } else {
12043                DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
12044                DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
12045            }
12046            if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
12047                DP_TBFLAG_A32(flags, VFPEN, 1);
12048            }
12049        }
12050
12051        DP_TBFLAG_AM32(flags, THUMB, env->thumb);
12052        DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
12053    }
12054
12055    /*
12056     * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12057     * states defined in the ARM ARM for software singlestep:
12058     *  SS_ACTIVE   PSTATE.SS   State
12059     *     0            x       Inactive (the TB flag for SS is always 0)
12060     *     1            0       Active-pending
12061     *     1            1       Active-not-pending
12062     * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
12063     */
12064    if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
12065        DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
12066    }
12067
12068    *pflags = flags.flags;
12069    *cs_base = flags.flags2;
12070}
12071
12072#ifdef TARGET_AARCH64
12073/*
12074 * The manual says that when SVE is enabled and VQ is widened the
12075 * implementation is allowed to zero the previously inaccessible
12076 * portion of the registers.  The corollary to that is that when
12077 * SVE is enabled and VQ is narrowed we are also allowed to zero
12078 * the now inaccessible portion of the registers.
12079 *
12080 * The intent of this is that no predicate bit beyond VQ is ever set.
12081 * Which means that some operations on predicate registers themselves
12082 * may operate on full uint64_t or even unrolled across the maximum
12083 * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
12084 * may well be cheaper than conditionals to restrict the operation
12085 * to the relevant portion of a uint16_t[16].
12086 */
12087void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
12088{
12089    int i, j;
12090    uint64_t pmask;
12091
12092    assert(vq >= 1 && vq <= ARM_MAX_VQ);
12093    assert(vq <= env_archcpu(env)->sve_max_vq);
12094
12095    /* Zap the high bits of the zregs.  */
12096    for (i = 0; i < 32; i++) {
12097        memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
12098    }
12099
12100    /* Zap the high bits of the pregs and ffr.  */
12101    pmask = 0;
12102    if (vq & 3) {
12103        pmask = ~(-1ULL << (16 * (vq & 3)));
12104    }
12105    for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
12106        for (i = 0; i < 17; ++i) {
12107            env->vfp.pregs[i].p[j] &= pmask;
12108        }
12109        pmask = 0;
12110    }
12111}
12112
12113static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
12114{
12115    int exc_el;
12116
12117    if (sm) {
12118        exc_el = sme_exception_el(env, el);
12119    } else {
12120        exc_el = sve_exception_el(env, el);
12121    }
12122    if (exc_el) {
12123        return 0; /* disabled */
12124    }
12125    return sve_vqm1_for_el_sm(env, el, sm);
12126}
12127
12128/*
12129 * Notice a change in SVE vector size when changing EL.
12130 */
12131void aarch64_sve_change_el(CPUARMState *env, int old_el,
12132                           int new_el, bool el0_a64)
12133{
12134    ARMCPU *cpu = env_archcpu(env);
12135    int old_len, new_len;
12136    bool old_a64, new_a64, sm;
12137
12138    /* Nothing to do if no SVE.  */
12139    if (!cpu_isar_feature(aa64_sve, cpu)) {
12140        return;
12141    }
12142
12143    /* Nothing to do if FP is disabled in either EL.  */
12144    if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
12145        return;
12146    }
12147
12148    old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
12149    new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
12150
12151    /*
12152     * Both AArch64.TakeException and AArch64.ExceptionReturn
12153     * invoke ResetSVEState when taking an exception from, or
12154     * returning to, AArch32 state when PSTATE.SM is enabled.
12155     */
12156    sm = FIELD_EX64(env->svcr, SVCR, SM);
12157    if (old_a64 != new_a64 && sm) {
12158        arm_reset_sve_state(env);
12159        return;
12160    }
12161
12162    /*
12163     * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12164     * at ELx, or not available because the EL is in AArch32 state, then
12165     * for all purposes other than a direct read, the ZCR_ELx.LEN field
12166     * has an effective value of 0".
12167     *
12168     * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12169     * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12170     * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
12171     * we already have the correct register contents when encountering the
12172     * vq0->vq0 transition between EL0->EL1.
12173     */
12174    old_len = new_len = 0;
12175    if (old_a64) {
12176        old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
12177    }
12178    if (new_a64) {
12179        new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
12180    }
12181
12182    /* When changing vector length, clear inaccessible state.  */
12183    if (new_len < old_len) {
12184        aarch64_sve_narrow_vq(env, new_len + 1);
12185    }
12186}
12187#endif
12188
12189#ifndef CONFIG_USER_ONLY
12190ARMSecuritySpace arm_security_space(CPUARMState *env)
12191{
12192    if (arm_feature(env, ARM_FEATURE_M)) {
12193        return arm_secure_to_space(env->v7m.secure);
12194    }
12195
12196    /*
12197     * If EL3 is not supported then the secure state is implementation
12198     * defined, in which case QEMU defaults to non-secure.
12199     */
12200    if (!arm_feature(env, ARM_FEATURE_EL3)) {
12201        return ARMSS_NonSecure;
12202    }
12203
12204    /* Check for AArch64 EL3 or AArch32 Mon. */
12205    if (is_a64(env)) {
12206        if (extract32(env->pstate, 2, 2) == 3) {
12207            if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
12208                return ARMSS_Root;
12209            } else {
12210                return ARMSS_Secure;
12211            }
12212        }
12213    } else {
12214        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
12215            return ARMSS_Secure;
12216        }
12217    }
12218
12219    return arm_security_space_below_el3(env);
12220}
12221
12222ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
12223{
12224    assert(!arm_feature(env, ARM_FEATURE_M));
12225
12226    /*
12227     * If EL3 is not supported then the secure state is implementation
12228     * defined, in which case QEMU defaults to non-secure.
12229     */
12230    if (!arm_feature(env, ARM_FEATURE_EL3)) {
12231        return ARMSS_NonSecure;
12232    }
12233
12234    /*
12235     * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
12236     * Ignoring NSE when !NS retains consistency without having to
12237     * modify other predicates.
12238     */
12239    if (!(env->cp15.scr_el3 & SCR_NS)) {
12240        return ARMSS_Secure;
12241    } else if (env->cp15.scr_el3 & SCR_NSE) {
12242        return ARMSS_Realm;
12243    } else {
12244        return ARMSS_NonSecure;
12245    }
12246}
12247#endif /* !CONFIG_USER_ONLY */
12248