qemu/target-arm/helper.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "cpu.h"
   3#include "internals.h"
   4#include "exec/gdbstub.h"
   5#include "exec/helper-proto.h"
   6#include "qemu/host-utils.h"
   7#include "sysemu/arch_init.h"
   8#include "sysemu/sysemu.h"
   9#include "qemu/bitops.h"
  10#include "qemu/crc32c.h"
  11#include "exec/cpu_ldst.h"
  12#include "arm_ldst.h"
  13#include <zlib.h> /* For crc32 */
  14#include "exec/semihost.h"
  15#include "sysemu/kvm.h"
  16
  17#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  18
  19#ifndef CONFIG_USER_ONLY
  20static bool get_phys_addr(CPUARMState *env, target_ulong address,
  21                          int access_type, ARMMMUIdx mmu_idx,
  22                          hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
  23                          target_ulong *page_size, uint32_t *fsr,
  24                          ARMMMUFaultInfo *fi);
  25
  26static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
  27                               int access_type, ARMMMUIdx mmu_idx,
  28                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  29                               target_ulong *page_size_ptr, uint32_t *fsr,
  30                               ARMMMUFaultInfo *fi);
  31
  32/* Definitions for the PMCCNTR and PMCR registers */
  33#define PMCRD   0x8
  34#define PMCRC   0x4
  35#define PMCRE   0x1
  36#endif
  37
  38static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  39{
  40    int nregs;
  41
  42    /* VFP data registers are always little-endian.  */
  43    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  44    if (reg < nregs) {
  45        stfq_le_p(buf, env->vfp.regs[reg]);
  46        return 8;
  47    }
  48    if (arm_feature(env, ARM_FEATURE_NEON)) {
  49        /* Aliases for Q regs.  */
  50        nregs += 16;
  51        if (reg < nregs) {
  52            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
  53            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
  54            return 16;
  55        }
  56    }
  57    switch (reg - nregs) {
  58    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
  59    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
  60    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
  61    }
  62    return 0;
  63}
  64
  65static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
  66{
  67    int nregs;
  68
  69    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  70    if (reg < nregs) {
  71        env->vfp.regs[reg] = ldfq_le_p(buf);
  72        return 8;
  73    }
  74    if (arm_feature(env, ARM_FEATURE_NEON)) {
  75        nregs += 16;
  76        if (reg < nregs) {
  77            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
  78            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
  79            return 16;
  80        }
  81    }
  82    switch (reg - nregs) {
  83    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
  84    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
  85    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
  86    }
  87    return 0;
  88}
  89
  90static int arm_sys_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  91{
  92    switch (reg) {
  93    case 0:
  94        /* TTBCR Secure */
  95        stl_p(buf, env->cp15.tcr_el[3].raw_tcr);
  96        return 4;
  97    case 1:
  98        /* TTBR0 Secure */
  99        stl_p(buf, env->cp15.ttbr0_s);
 100        return 4;
 101    case 2:
 102        /* TTBR1 Secure */
 103        stl_p(buf, env->cp15.ttbr1_s);
 104        return 4;
 105    default:
 106        return 0;
 107    }
 108}
 109
 110static int arm_sys_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 111{
 112    switch (reg) {
 113    case 0:
 114        /* TTBCR Secure */
 115        return 0;
 116    case 1:
 117        /* TTBR0 Secure */
 118        env->cp15.ttbr0_s = ldl_p(buf);
 119        return 4;
 120    case 2:
 121        /* TTBR1 Secure */
 122        env->cp15.ttbr1_s = ldl_p(buf);
 123        return 4;
 124    default:
 125        return 0;
 126    }
 127}
 128
 129static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 130{
 131    switch (reg) {
 132    case 0 ... 31:
 133        /* 128 bit FP register */
 134        stfq_le_p(buf, env->vfp.regs[reg * 2]);
 135        stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
 136        return 16;
 137    case 32:
 138        /* FPSR */
 139        stl_p(buf, vfp_get_fpsr(env));
 140        return 4;
 141    case 33:
 142        /* FPCR */
 143        stl_p(buf, vfp_get_fpcr(env));
 144        return 4;
 145    default:
 146        return 0;
 147    }
 148}
 149
 150static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 151{
 152    switch (reg) {
 153    case 0 ... 31:
 154        /* 128 bit FP register */
 155        env->vfp.regs[reg * 2] = ldfq_le_p(buf);
 156        env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
 157        return 16;
 158    case 32:
 159        /* FPSR */
 160        vfp_set_fpsr(env, ldl_p(buf));
 161        return 4;
 162    case 33:
 163        /* FPCR */
 164        vfp_set_fpcr(env, ldl_p(buf));
 165        return 4;
 166    default:
 167        return 0;
 168    }
 169}
 170
 171static int aarch64_elx_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg, int el)
 172{
 173    switch (reg) {
 174    case 0:
 175        stfq_le_p(buf, env->elr_el[el]);
 176        return 8;
 177    case 1:
 178        stfq_le_p(buf, env->cp15.esr_el[el]);
 179        return 8;
 180    case 2:
 181        stfq_le_p(buf, env->banked_spsr[aarch64_banked_spsr_index(el)]);
 182        return 8;
 183    case 3:
 184        stfq_le_p(buf, env->cp15.ttbr0_el[el]);
 185        return 8;
 186    case 4:
 187        if (el == 1) {
 188            stfq_le_p(buf, env->cp15.ttbr1_el[el]);
 189            return 8;
 190        }
 191        /* Fallthrough */
 192    default:
 193        return 0;
 194    }
 195}
 196
 197static int aarch64_elx_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg, int el)
 198{
 199    switch (reg) {
 200    case 0:
 201        env->elr_el[el] = ldfq_le_p(buf);
 202        return 8;
 203    case 1:
 204        env->cp15.esr_el[el] = ldfq_le_p(buf);
 205        return 8;
 206    case 2:
 207        env->banked_spsr[aarch64_banked_spsr_index(el)] = ldfq_le_p(buf);
 208        return 8;
 209    case 3:
 210        env->cp15.ttbr0_el[el] = ldfq_le_p(buf);
 211        return 8;
 212    case 4:
 213        if (el == 1) {
 214            env->cp15.ttbr1_el[el] = ldfq_le_p(buf);
 215            return 8;
 216        }
 217        /* Fallthrough */
 218    default:
 219        return 0;
 220    }
 221}
 222
 223static int aarch64_el1_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 224{
 225    return aarch64_elx_gdb_get_reg(env, buf, reg, 1);
 226}
 227
 228static int aarch64_el1_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 229{
 230    return aarch64_elx_gdb_set_reg(env, buf, reg, 1);
 231}
 232
 233static int aarch64_el2_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 234{
 235    return aarch64_elx_gdb_get_reg(env, buf, reg, 2);
 236}
 237
 238static int aarch64_el2_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 239{
 240    return aarch64_elx_gdb_set_reg(env, buf, reg, 2);
 241}
 242
 243static int aarch64_el3_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 244{
 245    return aarch64_elx_gdb_get_reg(env, buf, reg, 3);
 246}
 247
 248static int aarch64_el3_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 249{
 250    return aarch64_elx_gdb_set_reg(env, buf, reg, 3);
 251}
 252
 253static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 254{
 255    assert(ri->fieldoffset);
 256    if (cpreg_field_is_64bit(ri)) {
 257        return CPREG_FIELD64(env, ri);
 258    } else {
 259        return CPREG_FIELD32(env, ri);
 260    }
 261}
 262
 263static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
 264                      uint64_t value)
 265{
 266    assert(ri->fieldoffset);
 267    if (cpreg_field_is_64bit(ri)) {
 268        CPREG_FIELD64(env, ri) = value;
 269    } else {
 270        CPREG_FIELD32(env, ri) = value;
 271    }
 272}
 273
 274static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
 275{
 276    return (char *)env + ri->fieldoffset;
 277}
 278
 279uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
 280{
 281    /* Raw read of a coprocessor register (as needed for migration, etc). */
 282    if (ri->type & ARM_CP_CONST) {
 283        return ri->resetvalue;
 284    } else if (ri->raw_readfn) {
 285        return ri->raw_readfn(env, ri);
 286    } else if (ri->readfn) {
 287        return ri->readfn(env, ri);
 288    } else {
 289        return raw_read(env, ri);
 290    }
 291}
 292
 293static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
 294                             uint64_t v)
 295{
 296    /* Raw write of a coprocessor register (as needed for migration, etc).
 297     * Note that constant registers are treated as write-ignored; the
 298     * caller should check for success by whether a readback gives the
 299     * value written.
 300     */
 301    if (ri->type & ARM_CP_CONST) {
 302        return;
 303    } else if (ri->raw_writefn) {
 304        ri->raw_writefn(env, ri, v);
 305    } else if (ri->writefn) {
 306        ri->writefn(env, ri, v);
 307    } else {
 308        raw_write(env, ri, v);
 309    }
 310}
 311
 312static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
 313{
 314   /* Return true if the regdef would cause an assertion if you called
 315    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 316    * program bug for it not to have the NO_RAW flag).
 317    * NB that returning false here doesn't necessarily mean that calling
 318    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 319    * read/write access functions which are safe for raw use" from "has
 320    * read/write access functions which have side effects but has forgotten
 321    * to provide raw access functions".
 322    * The tests here line up with the conditions in read/write_raw_cp_reg()
 323    * and assertions in raw_read()/raw_write().
 324    */
 325    if ((ri->type & ARM_CP_CONST) ||
 326        ri->fieldoffset ||
 327        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 328        return false;
 329    }
 330    return true;
 331}
 332
 333bool write_cpustate_to_list(ARMCPU *cpu)
 334{
 335    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 336    int i;
 337    bool ok = true;
 338
 339    for (i = 0; i < cpu->cpreg_array_len; i++) {
 340        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 341        const ARMCPRegInfo *ri;
 342
 343        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 344        if (!ri) {
 345            ok = false;
 346            continue;
 347        }
 348        if (ri->type & ARM_CP_NO_RAW) {
 349            continue;
 350        }
 351        cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
 352    }
 353    return ok;
 354}
 355
 356bool write_list_to_cpustate(ARMCPU *cpu)
 357{
 358    int i;
 359    bool ok = true;
 360
 361    for (i = 0; i < cpu->cpreg_array_len; i++) {
 362        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 363        uint64_t v = cpu->cpreg_values[i];
 364        const ARMCPRegInfo *ri;
 365
 366        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 367        if (!ri) {
 368            ok = false;
 369            continue;
 370        }
 371        if (ri->type & ARM_CP_NO_RAW) {
 372            continue;
 373        }
 374        /* Write value and confirm it reads back as written
 375         * (to catch read-only registers and partially read-only
 376         * registers where the incoming migration value doesn't match)
 377         */
 378        write_raw_cp_reg(&cpu->env, ri, v);
 379        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 380            ok = false;
 381        }
 382    }
 383    return ok;
 384}
 385
 386static void add_cpreg_to_list(gpointer key, gpointer opaque)
 387{
 388    ARMCPU *cpu = opaque;
 389    uint64_t regidx;
 390    const ARMCPRegInfo *ri;
 391
 392    regidx = *(uint32_t *)key;
 393    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 394
 395    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 396        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 397        /* The value array need not be initialized at this point */
 398        cpu->cpreg_array_len++;
 399    }
 400}
 401
 402static void count_cpreg(gpointer key, gpointer opaque)
 403{
 404    ARMCPU *cpu = opaque;
 405    uint64_t regidx;
 406    const ARMCPRegInfo *ri;
 407
 408    regidx = *(uint32_t *)key;
 409    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 410
 411    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 412        cpu->cpreg_array_len++;
 413    }
 414}
 415
 416static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 417{
 418    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
 419    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
 420
 421    if (aidx > bidx) {
 422        return 1;
 423    }
 424    if (aidx < bidx) {
 425        return -1;
 426    }
 427    return 0;
 428}
 429
 430void init_cpreg_list(ARMCPU *cpu)
 431{
 432    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
 433     * Note that we require cpreg_tuples[] to be sorted by key ID.
 434     */
 435    GList *keys;
 436    int arraylen;
 437
 438    keys = g_hash_table_get_keys(cpu->cp_regs);
 439    keys = g_list_sort(keys, cpreg_key_compare);
 440
 441    cpu->cpreg_array_len = 0;
 442
 443    g_list_foreach(keys, count_cpreg, cpu);
 444
 445    arraylen = cpu->cpreg_array_len;
 446    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 447    cpu->cpreg_values = g_new(uint64_t, arraylen);
 448    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 449    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 450    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 451    cpu->cpreg_array_len = 0;
 452
 453    g_list_foreach(keys, add_cpreg_to_list, cpu);
 454
 455    assert(cpu->cpreg_array_len == arraylen);
 456
 457    g_list_free(keys);
 458}
 459
 460/*
 461 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
 462 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
 463 *
 464 * access_el3_aa32ns: Used to check AArch32 register views.
 465 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
 466 */
 467static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 468                                        const ARMCPRegInfo *ri,
 469                                        bool isread)
 470{
 471    bool secure = arm_is_secure_below_el3(env);
 472
 473    assert(!arm_el_is_aa64(env, 3));
 474    if (secure) {
 475        return CP_ACCESS_TRAP_UNCATEGORIZED;
 476    }
 477    return CP_ACCESS_OK;
 478}
 479
 480static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
 481                                                const ARMCPRegInfo *ri,
 482                                                bool isread)
 483{
 484    if (!arm_el_is_aa64(env, 3)) {
 485        return access_el3_aa32ns(env, ri, isread);
 486    }
 487    return CP_ACCESS_OK;
 488}
 489
 490/* Some secure-only AArch32 registers trap to EL3 if used from
 491 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 492 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 493 * We assume that the .access field is set to PL1_RW.
 494 */
 495static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 496                                            const ARMCPRegInfo *ri,
 497                                            bool isread)
 498{
 499    if (arm_current_el(env) == 3) {
 500        return CP_ACCESS_OK;
 501    }
 502    if (arm_is_secure_below_el3(env)) {
 503        return CP_ACCESS_TRAP_EL3;
 504    }
 505    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 506    return CP_ACCESS_TRAP_UNCATEGORIZED;
 507}
 508
 509/* Check for traps to "powerdown debug" registers, which are controlled
 510 * by MDCR.TDOSA
 511 */
 512static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
 513                                   bool isread)
 514{
 515    int el = arm_current_el(env);
 516
 517    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
 518        && !arm_is_secure_below_el3(env)) {
 519        return CP_ACCESS_TRAP_EL2;
 520    }
 521    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
 522        return CP_ACCESS_TRAP_EL3;
 523    }
 524    return CP_ACCESS_OK;
 525}
 526
 527/* Check for traps to "debug ROM" registers, which are controlled
 528 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
 529 */
 530static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
 531                                  bool isread)
 532{
 533    int el = arm_current_el(env);
 534
 535    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
 536        && !arm_is_secure_below_el3(env)) {
 537        return CP_ACCESS_TRAP_EL2;
 538    }
 539    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 540        return CP_ACCESS_TRAP_EL3;
 541    }
 542    return CP_ACCESS_OK;
 543}
 544
 545/* Check for traps to general debug registers, which are controlled
 546 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
 547 */
 548static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
 549                                  bool isread)
 550{
 551    int el = arm_current_el(env);
 552
 553    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
 554        && !arm_is_secure_below_el3(env)) {
 555        return CP_ACCESS_TRAP_EL2;
 556    }
 557    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 558        return CP_ACCESS_TRAP_EL3;
 559    }
 560    return CP_ACCESS_OK;
 561}
 562
 563/* Check for traps to performance monitor registers, which are controlled
 564 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 565 */
 566static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 567                                 bool isread)
 568{
 569    int el = arm_current_el(env);
 570
 571    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 572        && !arm_is_secure_below_el3(env)) {
 573        return CP_ACCESS_TRAP_EL2;
 574    }
 575    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 576        return CP_ACCESS_TRAP_EL3;
 577    }
 578    return CP_ACCESS_OK;
 579}
 580
 581static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 582{
 583    ARMCPU *cpu = arm_env_get_cpu(env);
 584
 585    raw_write(env, ri, value);
 586    tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
 587}
 588
 589static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 590{
 591    ARMCPU *cpu = arm_env_get_cpu(env);
 592
 593    if (raw_read(env, ri) != value) {
 594        /* Unlike real hardware the qemu TLB uses virtual addresses,
 595         * not modified virtual addresses, so this causes a TLB flush.
 596         */
 597        tlb_flush(CPU(cpu), 1);
 598        raw_write(env, ri, value);
 599    }
 600}
 601
 602static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 603                             uint64_t value)
 604{
 605    ARMCPU *cpu = arm_env_get_cpu(env);
 606
 607    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
 608        && !extended_addresses_enabled(env)) {
 609        /* For VMSA (when not using the LPAE long descriptor page table
 610         * format) this register includes the ASID, so do a TLB flush.
 611         * For PMSA it is purely a process ID and no action is needed.
 612         */
 613        tlb_flush(CPU(cpu), 1);
 614    }
 615    raw_write(env, ri, value);
 616}
 617
 618static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 619                          uint64_t value)
 620{
 621    /* Invalidate all (TLBIALL) */
 622    ARMCPU *cpu = arm_env_get_cpu(env);
 623
 624    tlb_flush(CPU(cpu), 1);
 625}
 626
 627static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 628                          uint64_t value)
 629{
 630    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 631    ARMCPU *cpu = arm_env_get_cpu(env);
 632
 633    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 634}
 635
 636static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 637                           uint64_t value)
 638{
 639    /* Invalidate by ASID (TLBIASID) */
 640    ARMCPU *cpu = arm_env_get_cpu(env);
 641
 642    tlb_flush(CPU(cpu), value == 0);
 643}
 644
 645static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 646                           uint64_t value)
 647{
 648    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 649    ARMCPU *cpu = arm_env_get_cpu(env);
 650
 651    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 652}
 653
 654/* IS variants of TLB operations must affect all cores */
 655static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 656                             uint64_t value)
 657{
 658    CPUState *other_cs;
 659
 660    CPU_FOREACH(other_cs) {
 661        tlb_flush(other_cs, 1);
 662    }
 663}
 664
 665static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 666                             uint64_t value)
 667{
 668    CPUState *other_cs;
 669
 670    CPU_FOREACH(other_cs) {
 671        tlb_flush(other_cs, value == 0);
 672    }
 673}
 674
 675static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 676                             uint64_t value)
 677{
 678    CPUState *other_cs;
 679
 680    CPU_FOREACH(other_cs) {
 681        tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
 682    }
 683}
 684
 685static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 686                             uint64_t value)
 687{
 688    CPUState *other_cs;
 689
 690    CPU_FOREACH(other_cs) {
 691        tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
 692    }
 693}
 694
 695static const ARMCPRegInfo cp_reginfo[] = {
 696    /* Define the secure and non-secure FCSE identifier CP registers
 697     * separately because there is no secure bank in V8 (no _EL3).  This allows
 698     * the secure register to be properly reset and migrated. There is also no
 699     * v8 EL1 version of the register so the non-secure instance stands alone.
 700     */
 701    { .name = "FCSEIDR(NS)",
 702      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 703      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 704      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 705      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 706    { .name = "FCSEIDR(S)",
 707      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 708      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 709      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 710      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 711    /* Define the secure and non-secure context identifier CP registers
 712     * separately because there is no secure bank in V8 (no _EL3).  This allows
 713     * the secure register to be properly reset and migrated.  In the
 714     * non-secure case, the 32-bit register will have reset and migration
 715     * disabled during registration as it is handled by the 64-bit instance.
 716     */
 717    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 718      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 719      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 720      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 721      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 722    { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
 723      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 724      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 725      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 726      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 727    REGINFO_SENTINEL
 728};
 729
 730static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 731    /* NB: Some of these registers exist in v8 but with more precise
 732     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 733     */
 734    /* MMU Domain access control / MPU write buffer control */
 735    { .name = "DACR",
 736      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 737      .access = PL1_RW, .resetvalue = 0,
 738      .writefn = dacr_write, .raw_writefn = raw_write,
 739      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 740                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 741    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 742     * For v6 and v5, these mappings are overly broad.
 743     */
 744    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 745      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 746    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 747      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 748    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 749      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 750    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 751      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 752    /* Cache maintenance ops; some of this space may be overridden later. */
 753    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 754      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 755      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 756    REGINFO_SENTINEL
 757};
 758
 759static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 760    /* Not all pre-v6 cores implemented this WFI, so this is slightly
 761     * over-broad.
 762     */
 763    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 764      .access = PL1_W, .type = ARM_CP_WFI },
 765    REGINFO_SENTINEL
 766};
 767
 768static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 769    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 770     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 771     */
 772    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 773      .access = PL1_W, .type = ARM_CP_WFI },
 774    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
 775     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 776     * OMAPCP will override this space.
 777     */
 778    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 779      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 780      .resetvalue = 0 },
 781    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 782      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 783      .resetvalue = 0 },
 784    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 785    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 786      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 787      .resetvalue = 0 },
 788    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 789     * implementing it as RAZ means the "debug architecture version" bits
 790     * will read as a reserved value, which should cause Linux to not try
 791     * to use the debug hardware.
 792     */
 793    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 794      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 795    /* MMU TLB control. Note that the wildcarding means we cover not just
 796     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 797     */
 798    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 799      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 800      .type = ARM_CP_NO_RAW },
 801    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 802      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 803      .type = ARM_CP_NO_RAW },
 804    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 805      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 806      .type = ARM_CP_NO_RAW },
 807    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 808      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 809      .type = ARM_CP_NO_RAW },
 810    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 811      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 812    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 813      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 814    REGINFO_SENTINEL
 815};
 816
 817static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 818                        uint64_t value)
 819{
 820    uint32_t mask = 0;
 821
 822    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 823    if (!arm_feature(env, ARM_FEATURE_V8)) {
 824        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 825         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 826         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 827         */
 828        if (arm_feature(env, ARM_FEATURE_VFP)) {
 829            /* VFP coprocessor: cp10 & cp11 [23:20] */
 830            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
 831
 832            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 833                /* ASEDIS [31] bit is RAO/WI */
 834                value |= (1 << 31);
 835            }
 836
 837            /* VFPv3 and upwards with NEON implement 32 double precision
 838             * registers (D0-D31).
 839             */
 840            if (!arm_feature(env, ARM_FEATURE_NEON) ||
 841                    !arm_feature(env, ARM_FEATURE_VFP3)) {
 842                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
 843                value |= (1 << 30);
 844            }
 845        }
 846        value &= mask;
 847    }
 848    env->cp15.cpacr_el1 = value;
 849}
 850
 851static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 852                                   bool isread)
 853{
 854    if (arm_feature(env, ARM_FEATURE_V8)) {
 855        /* Check if CPACR accesses are to be trapped to EL2 */
 856        if (arm_current_el(env) == 1 &&
 857            (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
 858            return CP_ACCESS_TRAP_EL2;
 859        /* Check if CPACR accesses are to be trapped to EL3 */
 860        } else if (arm_current_el(env) < 3 &&
 861                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 862            return CP_ACCESS_TRAP_EL3;
 863        }
 864    }
 865
 866    return CP_ACCESS_OK;
 867}
 868
 869static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 870                                  bool isread)
 871{
 872    /* Check if CPTR accesses are set to trap to EL3 */
 873    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 874        return CP_ACCESS_TRAP_EL3;
 875    }
 876
 877    return CP_ACCESS_OK;
 878}
 879
 880static const ARMCPRegInfo v6_cp_reginfo[] = {
 881    /* prefetch by MVA in v6, NOP in v7 */
 882    { .name = "MVA_prefetch",
 883      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
 884      .access = PL1_W, .type = ARM_CP_NOP },
 885    /* We need to break the TB after ISB to execute self-modifying code
 886     * correctly and also to take any pending interrupts immediately.
 887     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
 888     */
 889    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
 890      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
 891    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
 892      .access = PL0_W, .type = ARM_CP_NOP },
 893    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
 894      .access = PL0_W, .type = ARM_CP_NOP },
 895    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
 896      .access = PL1_RW,
 897      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
 898                             offsetof(CPUARMState, cp15.ifar_ns) },
 899      .resetvalue = 0, },
 900    /* Watchpoint Fault Address Register : should actually only be present
 901     * for 1136, 1176, 11MPCore.
 902     */
 903    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
 904      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
 905    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
 906      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
 907      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
 908      .resetvalue = 0, .writefn = cpacr_write },
 909    REGINFO_SENTINEL
 910};
 911
 912static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
 913                                   bool isread)
 914{
 915    /* Performance monitor registers user accessibility is controlled
 916     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
 917     * trapping to EL2 or EL3 for other accesses.
 918     */
 919    int el = arm_current_el(env);
 920
 921    if (el == 0 && !env->cp15.c9_pmuserenr) {
 922        return CP_ACCESS_TRAP;
 923    }
 924    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 925        && !arm_is_secure_below_el3(env)) {
 926        return CP_ACCESS_TRAP_EL2;
 927    }
 928    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 929        return CP_ACCESS_TRAP_EL3;
 930    }
 931
 932    return CP_ACCESS_OK;
 933}
 934
 935#ifndef CONFIG_USER_ONLY
 936
 937static inline bool arm_ccnt_enabled(CPUARMState *env)
 938{
 939    /* This does not support checking PMCCFILTR_EL0 register */
 940
 941    if (!(env->cp15.c9_pmcr & PMCRE)) {
 942        return false;
 943    }
 944
 945    return true;
 946}
 947
 948void pmccntr_sync(CPUARMState *env)
 949{
 950    uint64_t temp_ticks;
 951
 952    temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 953                          ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
 954
 955    if (env->cp15.c9_pmcr & PMCRD) {
 956        /* Increment once every 64 processor clock cycles */
 957        temp_ticks /= 64;
 958    }
 959
 960    if (arm_ccnt_enabled(env)) {
 961        env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
 962    }
 963}
 964
 965static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 966                       uint64_t value)
 967{
 968    pmccntr_sync(env);
 969
 970    if (value & PMCRC) {
 971        /* The counter has been reset */
 972        env->cp15.c15_ccnt = 0;
 973    }
 974
 975    /* only the DP, X, D and E bits are writable */
 976    env->cp15.c9_pmcr &= ~0x39;
 977    env->cp15.c9_pmcr |= (value & 0x39);
 978
 979    pmccntr_sync(env);
 980}
 981
 982static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 983{
 984    uint64_t total_ticks;
 985
 986    if (!arm_ccnt_enabled(env)) {
 987        /* Counter is disabled, do not change value */
 988        return env->cp15.c15_ccnt;
 989    }
 990
 991    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 992                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
 993
 994    if (env->cp15.c9_pmcr & PMCRD) {
 995        /* Increment once every 64 processor clock cycles */
 996        total_ticks /= 64;
 997    }
 998    return total_ticks - env->cp15.c15_ccnt;
 999}
1000
1001static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1002                        uint64_t value)
1003{
1004    uint64_t total_ticks;
1005
1006    if (!arm_ccnt_enabled(env)) {
1007        /* Counter is disabled, set the absolute value */
1008        env->cp15.c15_ccnt = value;
1009        return;
1010    }
1011
1012    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1013                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1014
1015    if (env->cp15.c9_pmcr & PMCRD) {
1016        /* Increment once every 64 processor clock cycles */
1017        total_ticks /= 64;
1018    }
1019    env->cp15.c15_ccnt = total_ticks - value;
1020}
1021
1022static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1023                            uint64_t value)
1024{
1025    uint64_t cur_val = pmccntr_read(env, NULL);
1026
1027    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1028}
1029
1030#else /* CONFIG_USER_ONLY */
1031
1032void pmccntr_sync(CPUARMState *env)
1033{
1034}
1035
1036#endif
1037
1038static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1039                            uint64_t value)
1040{
1041    pmccntr_sync(env);
1042    env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1043    pmccntr_sync(env);
1044}
1045
1046static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1047                            uint64_t value)
1048{
1049    value &= (1 << 31);
1050    env->cp15.c9_pmcnten |= value;
1051}
1052
1053static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1054                             uint64_t value)
1055{
1056    value &= (1 << 31);
1057    env->cp15.c9_pmcnten &= ~value;
1058}
1059
1060static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1061                         uint64_t value)
1062{
1063    env->cp15.c9_pmovsr &= ~value;
1064}
1065
1066static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1067                             uint64_t value)
1068{
1069    env->cp15.c9_pmxevtyper = value & 0xff;
1070}
1071
1072static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1073                            uint64_t value)
1074{
1075    env->cp15.c9_pmuserenr = value & 1;
1076}
1077
1078static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1079                             uint64_t value)
1080{
1081    /* We have no event counters so only the C bit can be changed */
1082    value &= (1 << 31);
1083    env->cp15.c9_pminten |= value;
1084}
1085
1086static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1087                             uint64_t value)
1088{
1089    value &= (1 << 31);
1090    env->cp15.c9_pminten &= ~value;
1091}
1092
1093static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1094                       uint64_t value)
1095{
1096    /* Note that even though the AArch64 view of this register has bits
1097     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1098     * architectural requirements for bits which are RES0 only in some
1099     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1100     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1101     */
1102    raw_write(env, ri, value & ~0x1FULL);
1103}
1104
1105static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1106{
1107    /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1108     * For bits that vary between AArch32/64, code needs to check the
1109     * current execution mode before directly using the feature bit.
1110     */
1111    uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1112
1113    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1114        valid_mask &= ~SCR_HCE;
1115
1116        /* On ARMv7, SMD (or SCD as it is called in v7) is only
1117         * supported if EL2 exists. The bit is UNK/SBZP when
1118         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1119         * when EL2 is unavailable.
1120         * On ARMv8, this bit is always available.
1121         */
1122        if (arm_feature(env, ARM_FEATURE_V7) &&
1123            !arm_feature(env, ARM_FEATURE_V8)) {
1124            valid_mask &= ~SCR_SMD;
1125        }
1126    }
1127
1128    /* Clear all-context RES0 bits.  */
1129    value &= valid_mask;
1130    raw_write(env, ri, value);
1131}
1132
1133static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1134{
1135    ARMCPU *cpu = arm_env_get_cpu(env);
1136
1137    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1138     * bank
1139     */
1140    uint32_t index = A32_BANKED_REG_GET(env, csselr,
1141                                        ri->secure & ARM_CP_SECSTATE_S);
1142
1143    return cpu->ccsidr[index];
1144}
1145
1146static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1147                         uint64_t value)
1148{
1149    raw_write(env, ri, value & 0xf);
1150}
1151
1152static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1153{
1154    CPUState *cs = ENV_GET_CPU(env);
1155    uint64_t ret = 0;
1156
1157    if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1158        ret |= CPSR_I;
1159    }
1160    if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1161        ret |= CPSR_F;
1162    }
1163    /* External aborts are not possible in QEMU so A bit is always clear */
1164    return ret;
1165}
1166
1167static const ARMCPRegInfo v7_cp_reginfo[] = {
1168    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1169    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1170      .access = PL1_W, .type = ARM_CP_NOP },
1171    /* Performance monitors are implementation defined in v7,
1172     * but with an ARM recommended set of registers, which we
1173     * follow (although we don't actually implement any counters)
1174     *
1175     * Performance registers fall into three categories:
1176     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1177     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1178     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1179     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1180     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1181     */
1182    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1183      .access = PL0_RW, .type = ARM_CP_ALIAS,
1184      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1185      .writefn = pmcntenset_write,
1186      .accessfn = pmreg_access,
1187      .raw_writefn = raw_write },
1188    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1189      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1190      .access = PL0_RW, .accessfn = pmreg_access,
1191      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1192      .writefn = pmcntenset_write, .raw_writefn = raw_write },
1193    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1194      .access = PL0_RW,
1195      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1196      .accessfn = pmreg_access,
1197      .writefn = pmcntenclr_write,
1198      .type = ARM_CP_ALIAS },
1199    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1200      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1201      .access = PL0_RW, .accessfn = pmreg_access,
1202      .type = ARM_CP_ALIAS,
1203      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1204      .writefn = pmcntenclr_write },
1205    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1206      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1207      .accessfn = pmreg_access,
1208      .writefn = pmovsr_write,
1209      .raw_writefn = raw_write },
1210    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1211      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1212      .access = PL0_RW, .accessfn = pmreg_access,
1213      .type = ARM_CP_ALIAS,
1214      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1215      .writefn = pmovsr_write,
1216      .raw_writefn = raw_write },
1217    /* Unimplemented so WI. */
1218    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1219      .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
1220    /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
1221     * We choose to RAZ/WI.
1222     */
1223    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1224      .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1225      .accessfn = pmreg_access },
1226#ifndef CONFIG_USER_ONLY
1227    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1228      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1229      .readfn = pmccntr_read, .writefn = pmccntr_write32,
1230      .accessfn = pmreg_access },
1231    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1232      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1233      .access = PL0_RW, .accessfn = pmreg_access,
1234      .type = ARM_CP_IO,
1235      .readfn = pmccntr_read, .writefn = pmccntr_write, },
1236#endif
1237    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1238      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1239      .writefn = pmccfiltr_write,
1240      .access = PL0_RW, .accessfn = pmreg_access,
1241      .type = ARM_CP_IO,
1242      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1243      .resetvalue = 0, },
1244    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1245      .access = PL0_RW,
1246      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
1247      .accessfn = pmreg_access, .writefn = pmxevtyper_write,
1248      .raw_writefn = raw_write },
1249    /* Unimplemented, RAZ/WI. */
1250    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1251      .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1252      .accessfn = pmreg_access },
1253    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1254      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1255      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1256      .resetvalue = 0,
1257      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1258    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1259      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1260      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1261      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1262      .resetvalue = 0,
1263      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1264    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1265      .access = PL1_RW, .accessfn = access_tpm,
1266      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1267      .resetvalue = 0,
1268      .writefn = pmintenset_write, .raw_writefn = raw_write },
1269    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1270      .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1271      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1272      .writefn = pmintenclr_write, },
1273    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1274      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1275      .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1276      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1277      .writefn = pmintenclr_write },
1278    { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
1279      .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
1280      .access = PL1_RW, .writefn = vbar_write,
1281      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
1282                             offsetof(CPUARMState, cp15.vbar_ns) },
1283      .resetvalue = 0 },
1284    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1285      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1286      .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1287    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1288      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1289      .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1290      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1291                             offsetof(CPUARMState, cp15.csselr_ns) } },
1292    /* Auxiliary ID register: this actually has an IMPDEF value but for now
1293     * just RAZ for all cores:
1294     */
1295    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1296      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1297      .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1298    /* Auxiliary fault status registers: these also are IMPDEF, and we
1299     * choose to RAZ/WI for all cores.
1300     */
1301    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1302      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1303      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1304    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1305      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1306      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1307    /* MAIR can just read-as-written because we don't implement caches
1308     * and so don't need to care about memory attributes.
1309     */
1310    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1311      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1312      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1313      .resetvalue = 0 },
1314    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1315      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1316      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1317      .resetvalue = 0 },
1318    /* For non-long-descriptor page tables these are PRRR and NMRR;
1319     * regardless they still act as reads-as-written for QEMU.
1320     */
1321     /* MAIR0/1 are defined separately from their 64-bit counterpart which
1322      * allows them to assign the correct fieldoffset based on the endianness
1323      * handled in the field definitions.
1324      */
1325    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1326      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1327      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1328                             offsetof(CPUARMState, cp15.mair0_ns) },
1329      .resetfn = arm_cp_reset_ignore },
1330    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1331      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1332      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1333                             offsetof(CPUARMState, cp15.mair1_ns) },
1334      .resetfn = arm_cp_reset_ignore },
1335    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1336      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1337      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1338    /* 32 bit ITLB invalidates */
1339    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1340      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1341    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1342      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1343    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1344      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1345    /* 32 bit DTLB invalidates */
1346    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1347      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1348    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1349      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1350    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1351      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1352    /* 32 bit TLB invalidates */
1353    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1354      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1355    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1356      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1357    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1358      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1359    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1360      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1361    REGINFO_SENTINEL
1362};
1363
1364static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1365    /* 32 bit TLB invalidates, Inner Shareable */
1366    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1367      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1368    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1369      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1370    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1371      .type = ARM_CP_NO_RAW, .access = PL1_W,
1372      .writefn = tlbiasid_is_write },
1373    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1374      .type = ARM_CP_NO_RAW, .access = PL1_W,
1375      .writefn = tlbimvaa_is_write },
1376    REGINFO_SENTINEL
1377};
1378
1379static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1380                        uint64_t value)
1381{
1382    value &= 1;
1383    env->teecr = value;
1384}
1385
1386static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1387                                    bool isread)
1388{
1389    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1390        return CP_ACCESS_TRAP;
1391    }
1392    return CP_ACCESS_OK;
1393}
1394
1395static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1396    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1397      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1398      .resetvalue = 0,
1399      .writefn = teecr_write },
1400    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1401      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1402      .accessfn = teehbr_access, .resetvalue = 0 },
1403    REGINFO_SENTINEL
1404};
1405
1406static const ARMCPRegInfo v6k_cp_reginfo[] = {
1407    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1408      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1409      .access = PL0_RW,
1410      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1411    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1412      .access = PL0_RW,
1413      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1414                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1415      .resetfn = arm_cp_reset_ignore },
1416    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1417      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1418      .access = PL0_R|PL1_W,
1419      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1420      .resetvalue = 0},
1421    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1422      .access = PL0_R|PL1_W,
1423      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1424                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1425      .resetfn = arm_cp_reset_ignore },
1426    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1427      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1428      .access = PL1_RW,
1429      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1430    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1431      .access = PL1_RW,
1432      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1433                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1434      .resetvalue = 0 },
1435    REGINFO_SENTINEL
1436};
1437
1438#ifndef CONFIG_USER_ONLY
1439
1440static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1441                                       bool isread)
1442{
1443    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1444     * Writable only at the highest implemented exception level.
1445     */
1446    int el = arm_current_el(env);
1447
1448    switch (el) {
1449    case 0:
1450        if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1451            return CP_ACCESS_TRAP;
1452        }
1453        break;
1454    case 1:
1455        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1456            arm_is_secure_below_el3(env)) {
1457            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1458            return CP_ACCESS_TRAP_UNCATEGORIZED;
1459        }
1460        break;
1461    case 2:
1462    case 3:
1463        break;
1464    }
1465
1466    if (!isread && el < arm_highest_el(env)) {
1467        return CP_ACCESS_TRAP_UNCATEGORIZED;
1468    }
1469
1470    return CP_ACCESS_OK;
1471}
1472
1473static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1474                                        bool isread)
1475{
1476    unsigned int cur_el = arm_current_el(env);
1477    bool secure = arm_is_secure(env);
1478
1479    /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1480    if (cur_el == 0 &&
1481        !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1482        return CP_ACCESS_TRAP;
1483    }
1484
1485    if (arm_feature(env, ARM_FEATURE_EL2) &&
1486        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1487        !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1488        return CP_ACCESS_TRAP_EL2;
1489    }
1490    return CP_ACCESS_OK;
1491}
1492
1493static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1494                                      bool isread)
1495{
1496    unsigned int cur_el = arm_current_el(env);
1497    bool secure = arm_is_secure(env);
1498
1499    /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1500     * EL0[PV]TEN is zero.
1501     */
1502    if (cur_el == 0 &&
1503        !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1504        return CP_ACCESS_TRAP;
1505    }
1506
1507    if (arm_feature(env, ARM_FEATURE_EL2) &&
1508        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1509        !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1510        return CP_ACCESS_TRAP_EL2;
1511    }
1512    return CP_ACCESS_OK;
1513}
1514
1515static CPAccessResult gt_pct_access(CPUARMState *env,
1516                                    const ARMCPRegInfo *ri,
1517                                    bool isread)
1518{
1519    return gt_counter_access(env, GTIMER_PHYS, isread);
1520}
1521
1522static CPAccessResult gt_vct_access(CPUARMState *env,
1523                                    const ARMCPRegInfo *ri,
1524                                    bool isread)
1525{
1526    return gt_counter_access(env, GTIMER_VIRT, isread);
1527}
1528
1529static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1530                                       bool isread)
1531{
1532    return gt_timer_access(env, GTIMER_PHYS, isread);
1533}
1534
1535static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1536                                       bool isread)
1537{
1538    return gt_timer_access(env, GTIMER_VIRT, isread);
1539}
1540
1541static CPAccessResult gt_stimer_access(CPUARMState *env,
1542                                       const ARMCPRegInfo *ri,
1543                                       bool isread)
1544{
1545    /* The AArch64 register view of the secure physical timer is
1546     * always accessible from EL3, and configurably accessible from
1547     * Secure EL1.
1548     */
1549    switch (arm_current_el(env)) {
1550    case 1:
1551        if (!arm_is_secure(env)) {
1552            return CP_ACCESS_TRAP;
1553        }
1554        if (!(env->cp15.scr_el3 & SCR_ST)) {
1555            return CP_ACCESS_TRAP_EL3;
1556        }
1557        return CP_ACCESS_OK;
1558    case 0:
1559    case 2:
1560        return CP_ACCESS_TRAP;
1561    case 3:
1562        return CP_ACCESS_OK;
1563    default:
1564        g_assert_not_reached();
1565    }
1566}
1567
1568static uint64_t gt_get_countervalue(CPUARMState *env)
1569{
1570    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1571}
1572
1573static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1574{
1575    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1576
1577    if (gt->ctl & 1) {
1578        /* Timer enabled: calculate and set current ISTATUS, irq, and
1579         * reset timer to when ISTATUS next has to change
1580         */
1581        uint64_t offset = timeridx == GTIMER_VIRT ?
1582                                      cpu->env.cp15.cntvoff_el2 : 0;
1583        uint64_t count = gt_get_countervalue(&cpu->env);
1584        /* Note that this must be unsigned 64 bit arithmetic: */
1585        int istatus = count - offset >= gt->cval;
1586        uint64_t nexttick;
1587
1588        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1589        qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1590                     (istatus && !(gt->ctl & 2)));
1591        if (istatus) {
1592            /* Next transition is when count rolls back over to zero */
1593            nexttick = UINT64_MAX;
1594        } else {
1595            /* Next transition is when we hit cval */
1596            nexttick = gt->cval + offset;
1597        }
1598        /* Note that the desired next expiry time might be beyond the
1599         * signed-64-bit range of a QEMUTimer -- in this case we just
1600         * set the timer for as far in the future as possible. When the
1601         * timer expires we will reset the timer for any remaining period.
1602         */
1603        if (nexttick > INT64_MAX / GTIMER_SCALE) {
1604            nexttick = INT64_MAX / GTIMER_SCALE;
1605        }
1606        timer_mod(cpu->gt_timer[timeridx], nexttick);
1607    } else {
1608        /* Timer disabled: ISTATUS and timer output always clear */
1609        gt->ctl &= ~4;
1610        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1611        timer_del(cpu->gt_timer[timeridx]);
1612    }
1613}
1614
1615static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1616                           int timeridx)
1617{
1618    ARMCPU *cpu = arm_env_get_cpu(env);
1619
1620    timer_del(cpu->gt_timer[timeridx]);
1621}
1622
1623static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1624{
1625    return gt_get_countervalue(env);
1626}
1627
1628static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1629{
1630    return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1631}
1632
1633static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1634                          int timeridx,
1635                          uint64_t value)
1636{
1637    env->cp15.c14_timer[timeridx].cval = value;
1638    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1639}
1640
1641static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1642                             int timeridx)
1643{
1644    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1645
1646    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1647                      (gt_get_countervalue(env) - offset));
1648}
1649
1650static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1651                          int timeridx,
1652                          uint64_t value)
1653{
1654    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1655
1656    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1657                                         sextract64(value, 0, 32);
1658    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1659}
1660
1661static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1662                         int timeridx,
1663                         uint64_t value)
1664{
1665    ARMCPU *cpu = arm_env_get_cpu(env);
1666    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1667
1668    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1669    if ((oldval ^ value) & 1) {
1670        /* Enable toggled */
1671        gt_recalc_timer(cpu, timeridx);
1672    } else if ((oldval ^ value) & 2) {
1673        /* IMASK toggled: don't need to recalculate,
1674         * just set the interrupt line based on ISTATUS
1675         */
1676        qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1677                     (oldval & 4) && !(value & 2));
1678    }
1679}
1680
1681static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1682{
1683    gt_timer_reset(env, ri, GTIMER_PHYS);
1684}
1685
1686static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1687                               uint64_t value)
1688{
1689    gt_cval_write(env, ri, GTIMER_PHYS, value);
1690}
1691
1692static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1693{
1694    return gt_tval_read(env, ri, GTIMER_PHYS);
1695}
1696
1697static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1698                               uint64_t value)
1699{
1700    gt_tval_write(env, ri, GTIMER_PHYS, value);
1701}
1702
1703static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1704                              uint64_t value)
1705{
1706    gt_ctl_write(env, ri, GTIMER_PHYS, value);
1707}
1708
1709static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1710{
1711    gt_timer_reset(env, ri, GTIMER_VIRT);
1712}
1713
1714static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1715                               uint64_t value)
1716{
1717    gt_cval_write(env, ri, GTIMER_VIRT, value);
1718}
1719
1720static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1721{
1722    return gt_tval_read(env, ri, GTIMER_VIRT);
1723}
1724
1725static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1726                               uint64_t value)
1727{
1728    gt_tval_write(env, ri, GTIMER_VIRT, value);
1729}
1730
1731static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1732                              uint64_t value)
1733{
1734    gt_ctl_write(env, ri, GTIMER_VIRT, value);
1735}
1736
1737static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1738                              uint64_t value)
1739{
1740    ARMCPU *cpu = arm_env_get_cpu(env);
1741
1742    raw_write(env, ri, value);
1743    gt_recalc_timer(cpu, GTIMER_VIRT);
1744}
1745
1746static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1747{
1748    gt_timer_reset(env, ri, GTIMER_HYP);
1749}
1750
1751static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1752                              uint64_t value)
1753{
1754    gt_cval_write(env, ri, GTIMER_HYP, value);
1755}
1756
1757static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1758{
1759    return gt_tval_read(env, ri, GTIMER_HYP);
1760}
1761
1762static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1763                              uint64_t value)
1764{
1765    gt_tval_write(env, ri, GTIMER_HYP, value);
1766}
1767
1768static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1769                              uint64_t value)
1770{
1771    gt_ctl_write(env, ri, GTIMER_HYP, value);
1772}
1773
1774static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1775{
1776    gt_timer_reset(env, ri, GTIMER_SEC);
1777}
1778
1779static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1780                              uint64_t value)
1781{
1782    gt_cval_write(env, ri, GTIMER_SEC, value);
1783}
1784
1785static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1786{
1787    return gt_tval_read(env, ri, GTIMER_SEC);
1788}
1789
1790static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1791                              uint64_t value)
1792{
1793    gt_tval_write(env, ri, GTIMER_SEC, value);
1794}
1795
1796static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1797                              uint64_t value)
1798{
1799    gt_ctl_write(env, ri, GTIMER_SEC, value);
1800}
1801
1802void arm_gt_ptimer_cb(void *opaque)
1803{
1804    ARMCPU *cpu = opaque;
1805
1806    gt_recalc_timer(cpu, GTIMER_PHYS);
1807}
1808
1809void arm_gt_vtimer_cb(void *opaque)
1810{
1811    ARMCPU *cpu = opaque;
1812
1813    gt_recalc_timer(cpu, GTIMER_VIRT);
1814}
1815
1816void arm_gt_htimer_cb(void *opaque)
1817{
1818    ARMCPU *cpu = opaque;
1819
1820    gt_recalc_timer(cpu, GTIMER_HYP);
1821}
1822
1823void arm_gt_stimer_cb(void *opaque)
1824{
1825    ARMCPU *cpu = opaque;
1826
1827    gt_recalc_timer(cpu, GTIMER_SEC);
1828}
1829
1830static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1831    /* Note that CNTFRQ is purely reads-as-written for the benefit
1832     * of software; writing it doesn't actually change the timer frequency.
1833     * Our reset value matches the fixed frequency we implement the timer at.
1834     */
1835    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1836      .type = ARM_CP_ALIAS,
1837      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1838      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1839    },
1840    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1841      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1842      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1843      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1844      .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1845    },
1846    /* overall control: mostly access permissions */
1847    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1848      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1849      .access = PL1_RW,
1850      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1851      .resetvalue = 0,
1852    },
1853    /* per-timer control */
1854    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1855      .secure = ARM_CP_SECSTATE_NS,
1856      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1857      .accessfn = gt_ptimer_access,
1858      .fieldoffset = offsetoflow32(CPUARMState,
1859                                   cp15.c14_timer[GTIMER_PHYS].ctl),
1860      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1861    },
1862    { .name = "CNTP_CTL(S)",
1863      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1864      .secure = ARM_CP_SECSTATE_S,
1865      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1866      .accessfn = gt_ptimer_access,
1867      .fieldoffset = offsetoflow32(CPUARMState,
1868                                   cp15.c14_timer[GTIMER_SEC].ctl),
1869      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1870    },
1871    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1872      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1873      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1874      .accessfn = gt_ptimer_access,
1875      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1876      .resetvalue = 0,
1877      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1878    },
1879    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1880      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1881      .accessfn = gt_vtimer_access,
1882      .fieldoffset = offsetoflow32(CPUARMState,
1883                                   cp15.c14_timer[GTIMER_VIRT].ctl),
1884      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1885    },
1886    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1887      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1888      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1889      .accessfn = gt_vtimer_access,
1890      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1891      .resetvalue = 0,
1892      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1893    },
1894    /* TimerValue views: a 32 bit downcounting view of the underlying state */
1895    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1896      .secure = ARM_CP_SECSTATE_NS,
1897      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1898      .accessfn = gt_ptimer_access,
1899      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1900    },
1901    { .name = "CNTP_TVAL(S)",
1902      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1903      .secure = ARM_CP_SECSTATE_S,
1904      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1905      .accessfn = gt_ptimer_access,
1906      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1907    },
1908    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1909      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1910      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1911      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1912      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1913    },
1914    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1915      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1916      .accessfn = gt_vtimer_access,
1917      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1918    },
1919    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1920      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1921      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1922      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
1923      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1924    },
1925    /* The counter itself */
1926    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1927      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1928      .accessfn = gt_pct_access,
1929      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1930    },
1931    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1932      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1933      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1934      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
1935    },
1936    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1937      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1938      .accessfn = gt_vct_access,
1939      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
1940    },
1941    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1942      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1943      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1944      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
1945    },
1946    /* Comparison value, indicating when the timer goes off */
1947    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1948      .secure = ARM_CP_SECSTATE_NS,
1949      .access = PL1_RW | PL0_R,
1950      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1951      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1952      .accessfn = gt_ptimer_access,
1953      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1954    },
1955    { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
1956      .secure = ARM_CP_SECSTATE_S,
1957      .access = PL1_RW | PL0_R,
1958      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1959      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1960      .accessfn = gt_ptimer_access,
1961      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1962    },
1963    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1964      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1965      .access = PL1_RW | PL0_R,
1966      .type = ARM_CP_IO,
1967      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1968      .resetvalue = 0, .accessfn = gt_ptimer_access,
1969      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1970    },
1971    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1972      .access = PL1_RW | PL0_R,
1973      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1974      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1975      .accessfn = gt_vtimer_access,
1976      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1977    },
1978    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1979      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1980      .access = PL1_RW | PL0_R,
1981      .type = ARM_CP_IO,
1982      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1983      .resetvalue = 0, .accessfn = gt_vtimer_access,
1984      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1985    },
1986    /* Secure timer -- this is actually restricted to only EL3
1987     * and configurably Secure-EL1 via the accessfn.
1988     */
1989    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
1990      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
1991      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
1992      .accessfn = gt_stimer_access,
1993      .readfn = gt_sec_tval_read,
1994      .writefn = gt_sec_tval_write,
1995      .resetfn = gt_sec_timer_reset,
1996    },
1997    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
1998      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
1999      .type = ARM_CP_IO, .access = PL1_RW,
2000      .accessfn = gt_stimer_access,
2001      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2002      .resetvalue = 0,
2003      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2004    },
2005    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2006      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2007      .type = ARM_CP_IO, .access = PL1_RW,
2008      .accessfn = gt_stimer_access,
2009      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2010      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2011    },
2012    REGINFO_SENTINEL
2013};
2014
2015#else
2016/* In user-mode none of the generic timer registers are accessible,
2017 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2018 * so instead just don't register any of them.
2019 */
2020static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2021    REGINFO_SENTINEL
2022};
2023
2024#endif
2025
2026static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2027{
2028    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2029        raw_write(env, ri, value);
2030    } else if (arm_feature(env, ARM_FEATURE_V7)) {
2031        raw_write(env, ri, value & 0xfffff6ff);
2032    } else {
2033        raw_write(env, ri, value & 0xfffff1ff);
2034    }
2035}
2036
2037#ifndef CONFIG_USER_ONLY
2038/* get_phys_addr() isn't present for user-mode-only targets */
2039
2040static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2041                                 bool isread)
2042{
2043    if (ri->opc2 & 4) {
2044        /* The ATS12NSO* operations must trap to EL3 if executed in
2045         * Secure EL1 (which can only happen if EL3 is AArch64).
2046         * They are simply UNDEF if executed from NS EL1.
2047         * They function normally from EL2 or EL3.
2048         */
2049        if (arm_current_el(env) == 1) {
2050            if (arm_is_secure_below_el3(env)) {
2051                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2052            }
2053            return CP_ACCESS_TRAP_UNCATEGORIZED;
2054        }
2055    }
2056    return CP_ACCESS_OK;
2057}
2058
2059static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2060                             int access_type, ARMMMUIdx mmu_idx)
2061{
2062    hwaddr phys_addr;
2063    target_ulong page_size;
2064    int prot;
2065    uint32_t fsr;
2066    bool ret;
2067    uint64_t par64;
2068    MemTxAttrs attrs = {};
2069    ARMMMUFaultInfo fi = {};
2070
2071    ret = get_phys_addr(env, value, access_type, mmu_idx,
2072                        &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
2073    if (extended_addresses_enabled(env)) {
2074        /* fsr is a DFSR/IFSR value for the long descriptor
2075         * translation table format, but with WnR always clear.
2076         * Convert it to a 64-bit PAR.
2077         */
2078        par64 = (1 << 11); /* LPAE bit always set */
2079        if (!ret) {
2080            par64 |= phys_addr & ~0xfffULL;
2081            if (!attrs.secure) {
2082                par64 |= (1 << 9); /* NS */
2083            }
2084            /* We don't set the ATTR or SH fields in the PAR. */
2085        } else {
2086            par64 |= 1; /* F */
2087            par64 |= (fsr & 0x3f) << 1; /* FS */
2088            /* Note that S2WLK and FSTAGE are always zero, because we don't
2089             * implement virtualization and therefore there can't be a stage 2
2090             * fault.
2091             */
2092        }
2093    } else {
2094        /* fsr is a DFSR/IFSR value for the short descriptor
2095         * translation table format (with WnR always clear).
2096         * Convert it to a 32-bit PAR.
2097         */
2098        if (!ret) {
2099            /* We do not set any attribute bits in the PAR */
2100            if (page_size == (1 << 24)
2101                && arm_feature(env, ARM_FEATURE_V7)) {
2102                par64 = (phys_addr & 0xff000000) | (1 << 1);
2103            } else {
2104                par64 = phys_addr & 0xfffff000;
2105            }
2106            if (!attrs.secure) {
2107                par64 |= (1 << 9); /* NS */
2108            }
2109        } else {
2110            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2111                    ((fsr & 0xf) << 1) | 1;
2112        }
2113    }
2114    return par64;
2115}
2116
2117static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2118{
2119    int access_type = ri->opc2 & 1;
2120    uint64_t par64;
2121    ARMMMUIdx mmu_idx;
2122    int el = arm_current_el(env);
2123    bool secure = arm_is_secure_below_el3(env);
2124
2125    switch (ri->opc2 & 6) {
2126    case 0:
2127        /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2128        switch (el) {
2129        case 3:
2130            mmu_idx = ARMMMUIdx_S1E3;
2131            break;
2132        case 2:
2133            mmu_idx = ARMMMUIdx_S1NSE1;
2134            break;
2135        case 1:
2136            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2137            break;
2138        default:
2139            g_assert_not_reached();
2140        }
2141        break;
2142    case 2:
2143        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2144        switch (el) {
2145        case 3:
2146            mmu_idx = ARMMMUIdx_S1SE0;
2147            break;
2148        case 2:
2149            mmu_idx = ARMMMUIdx_S1NSE0;
2150            break;
2151        case 1:
2152            mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2153            break;
2154        default:
2155            g_assert_not_reached();
2156        }
2157        break;
2158    case 4:
2159        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2160        mmu_idx = ARMMMUIdx_S12NSE1;
2161        break;
2162    case 6:
2163        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2164        mmu_idx = ARMMMUIdx_S12NSE0;
2165        break;
2166    default:
2167        g_assert_not_reached();
2168    }
2169
2170    par64 = do_ats_write(env, value, access_type, mmu_idx);
2171
2172    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2173}
2174
2175static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2176                        uint64_t value)
2177{
2178    int access_type = ri->opc2 & 1;
2179    uint64_t par64;
2180
2181    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2182
2183    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2184}
2185
2186static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2187                                     bool isread)
2188{
2189    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2190        return CP_ACCESS_TRAP;
2191    }
2192    return CP_ACCESS_OK;
2193}
2194
2195static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2196                        uint64_t value)
2197{
2198    int access_type = ri->opc2 & 1;
2199    ARMMMUIdx mmu_idx;
2200    int secure = arm_is_secure_below_el3(env);
2201
2202    switch (ri->opc2 & 6) {
2203    case 0:
2204        switch (ri->opc1) {
2205        case 0: /* AT S1E1R, AT S1E1W */
2206            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2207            break;
2208        case 4: /* AT S1E2R, AT S1E2W */
2209            mmu_idx = ARMMMUIdx_S1E2;
2210            break;
2211        case 6: /* AT S1E3R, AT S1E3W */
2212            mmu_idx = ARMMMUIdx_S1E3;
2213            break;
2214        default:
2215            g_assert_not_reached();
2216        }
2217        break;
2218    case 2: /* AT S1E0R, AT S1E0W */
2219        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2220        break;
2221    case 4: /* AT S12E1R, AT S12E1W */
2222        mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2223        break;
2224    case 6: /* AT S12E0R, AT S12E0W */
2225        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2226        break;
2227    default:
2228        g_assert_not_reached();
2229    }
2230
2231    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2232}
2233#endif
2234
2235static const ARMCPRegInfo vapa_cp_reginfo[] = {
2236    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2237      .access = PL1_RW, .resetvalue = 0,
2238      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2239                             offsetoflow32(CPUARMState, cp15.par_ns) },
2240      .writefn = par_write },
2241#ifndef CONFIG_USER_ONLY
2242    /* This underdecoding is safe because the reginfo is NO_RAW. */
2243    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2244      .access = PL1_W, .accessfn = ats_access,
2245      .writefn = ats_write, .type = ARM_CP_NO_RAW },
2246#endif
2247    REGINFO_SENTINEL
2248};
2249
2250/* Return basic MPU access permission bits.  */
2251static uint32_t simple_mpu_ap_bits(uint32_t val)
2252{
2253    uint32_t ret;
2254    uint32_t mask;
2255    int i;
2256    ret = 0;
2257    mask = 3;
2258    for (i = 0; i < 16; i += 2) {
2259        ret |= (val >> i) & mask;
2260        mask <<= 2;
2261    }
2262    return ret;
2263}
2264
2265/* Pad basic MPU access permission bits to extended format.  */
2266static uint32_t extended_mpu_ap_bits(uint32_t val)
2267{
2268    uint32_t ret;
2269    uint32_t mask;
2270    int i;
2271    ret = 0;
2272    mask = 3;
2273    for (i = 0; i < 16; i += 2) {
2274        ret |= (val & mask) << i;
2275        mask <<= 2;
2276    }
2277    return ret;
2278}
2279
2280static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2281                                 uint64_t value)
2282{
2283    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2284}
2285
2286static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2287{
2288    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2289}
2290
2291static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2292                                 uint64_t value)
2293{
2294    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2295}
2296
2297static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2298{
2299    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2300}
2301
2302static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2303{
2304    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2305
2306    if (!u32p) {
2307        return 0;
2308    }
2309
2310    u32p += env->cp15.c6_rgnr;
2311    return *u32p;
2312}
2313
2314static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2315                         uint64_t value)
2316{
2317    ARMCPU *cpu = arm_env_get_cpu(env);
2318    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2319
2320    if (!u32p) {
2321        return;
2322    }
2323
2324    u32p += env->cp15.c6_rgnr;
2325    tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
2326    *u32p = value;
2327}
2328
2329static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2330{
2331    ARMCPU *cpu = arm_env_get_cpu(env);
2332    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2333
2334    if (!u32p) {
2335        return;
2336    }
2337
2338    memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
2339}
2340
2341static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2342                              uint64_t value)
2343{
2344    ARMCPU *cpu = arm_env_get_cpu(env);
2345    uint32_t nrgs = cpu->pmsav7_dregion;
2346
2347    if (value >= nrgs) {
2348        qemu_log_mask(LOG_GUEST_ERROR,
2349                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2350                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2351        return;
2352    }
2353
2354    raw_write(env, ri, value);
2355}
2356
2357static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2358    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2359      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2360      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2361      .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2362    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2363      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2364      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2365      .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2366    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2367      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2368      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2369      .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2370    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2371      .access = PL1_RW,
2372      .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
2373      .writefn = pmsav7_rgnr_write },
2374    REGINFO_SENTINEL
2375};
2376
2377static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2378    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2379      .access = PL1_RW, .type = ARM_CP_ALIAS,
2380      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2381      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2382    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2383      .access = PL1_RW, .type = ARM_CP_ALIAS,
2384      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2385      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2386    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2387      .access = PL1_RW,
2388      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2389      .resetvalue = 0, },
2390    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2391      .access = PL1_RW,
2392      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2393      .resetvalue = 0, },
2394    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2395      .access = PL1_RW,
2396      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2397    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2398      .access = PL1_RW,
2399      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2400    /* Protection region base and size registers */
2401    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2402      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2403      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2404    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2405      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2406      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2407    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2408      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2409      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2410    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2411      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2412      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2413    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2414      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2415      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2416    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2417      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2418      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2419    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2420      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2421      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2422    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2423      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2424      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2425    REGINFO_SENTINEL
2426};
2427
2428static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2429                                 uint64_t value)
2430{
2431    TCR *tcr = raw_ptr(env, ri);
2432    int maskshift = extract32(value, 0, 3);
2433
2434    if (!arm_feature(env, ARM_FEATURE_V8)) {
2435        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2436            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2437             * using Long-desciptor translation table format */
2438            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2439        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2440            /* In an implementation that includes the Security Extensions
2441             * TTBCR has additional fields PD0 [4] and PD1 [5] for
2442             * Short-descriptor translation table format.
2443             */
2444            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2445        } else {
2446            value &= TTBCR_N;
2447        }
2448    }
2449
2450    /* Update the masks corresponding to the TCR bank being written
2451     * Note that we always calculate mask and base_mask, but
2452     * they are only used for short-descriptor tables (ie if EAE is 0);
2453     * for long-descriptor tables the TCR fields are used differently
2454     * and the mask and base_mask values are meaningless.
2455     */
2456    tcr->raw_tcr = value;
2457    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2458    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2459}
2460
2461static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2462                             uint64_t value)
2463{
2464    ARMCPU *cpu = arm_env_get_cpu(env);
2465
2466    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2467        /* With LPAE the TTBCR could result in a change of ASID
2468         * via the TTBCR.A1 bit, so do a TLB flush.
2469         */
2470        tlb_flush(CPU(cpu), 1);
2471    }
2472    vmsa_ttbcr_raw_write(env, ri, value);
2473}
2474
2475static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2476{
2477    TCR *tcr = raw_ptr(env, ri);
2478
2479    /* Reset both the TCR as well as the masks corresponding to the bank of
2480     * the TCR being reset.
2481     */
2482    tcr->raw_tcr = 0;
2483    tcr->mask = 0;
2484    tcr->base_mask = 0xffffc000u;
2485}
2486
2487static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2488                               uint64_t value)
2489{
2490    ARMCPU *cpu = arm_env_get_cpu(env);
2491    TCR *tcr = raw_ptr(env, ri);
2492
2493    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2494    tlb_flush(CPU(cpu), 1);
2495    tcr->raw_tcr = value;
2496}
2497
2498static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2499                            uint64_t value)
2500{
2501    /* 64 bit accesses to the TTBRs can change the ASID and so we
2502     * must flush the TLB.
2503     */
2504    if (cpreg_field_is_64bit(ri)) {
2505        ARMCPU *cpu = arm_env_get_cpu(env);
2506
2507        tlb_flush(CPU(cpu), 1);
2508    }
2509    raw_write(env, ri, value);
2510}
2511
2512static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2513                        uint64_t value)
2514{
2515    ARMCPU *cpu = arm_env_get_cpu(env);
2516    CPUState *cs = CPU(cpu);
2517
2518    /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
2519    if (raw_read(env, ri) != value) {
2520        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2521                            ARMMMUIdx_S2NS, -1);
2522        raw_write(env, ri, value);
2523    }
2524}
2525
2526static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2527    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2528      .access = PL1_RW, .type = ARM_CP_ALIAS,
2529      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2530                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2531    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2532      .access = PL1_RW, .resetvalue = 0,
2533      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2534                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2535    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2536      .access = PL1_RW, .resetvalue = 0,
2537      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2538                             offsetof(CPUARMState, cp15.dfar_ns) } },
2539    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2540      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2541      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2542      .resetvalue = 0, },
2543    REGINFO_SENTINEL
2544};
2545
2546static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2547    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2548      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2549      .access = PL1_RW,
2550      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2551    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2552      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2553      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2554      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2555                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
2556    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2557      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2558      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2559      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2560                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
2561    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2562      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2563      .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2564      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2565      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2566    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2567      .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2568      .raw_writefn = vmsa_ttbcr_raw_write,
2569      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2570                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2571    REGINFO_SENTINEL
2572};
2573
2574static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2575                                uint64_t value)
2576{
2577    env->cp15.c15_ticonfig = value & 0xe7;
2578    /* The OS_TYPE bit in this register changes the reported CPUID! */
2579    env->cp15.c0_cpuid = (value & (1 << 5)) ?
2580        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2581}
2582
2583static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2584                                uint64_t value)
2585{
2586    env->cp15.c15_threadid = value & 0xffff;
2587}
2588
2589static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2590                           uint64_t value)
2591{
2592    /* Wait-for-interrupt (deprecated) */
2593    cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2594}
2595
2596static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2597                                  uint64_t value)
2598{
2599    /* On OMAP there are registers indicating the max/min index of dcache lines
2600     * containing a dirty line; cache flush operations have to reset these.
2601     */
2602    env->cp15.c15_i_max = 0x000;
2603    env->cp15.c15_i_min = 0xff0;
2604}
2605
2606static const ARMCPRegInfo omap_cp_reginfo[] = {
2607    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2608      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2609      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2610      .resetvalue = 0, },
2611    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2612      .access = PL1_RW, .type = ARM_CP_NOP },
2613    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2614      .access = PL1_RW,
2615      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2616      .writefn = omap_ticonfig_write },
2617    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2618      .access = PL1_RW,
2619      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2620    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2621      .access = PL1_RW, .resetvalue = 0xff0,
2622      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2623    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2624      .access = PL1_RW,
2625      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2626      .writefn = omap_threadid_write },
2627    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2628      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2629      .type = ARM_CP_NO_RAW,
2630      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2631    /* TODO: Peripheral port remap register:
2632     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2633     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2634     * when MMU is off.
2635     */
2636    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2637      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2638      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2639      .writefn = omap_cachemaint_write },
2640    { .name = "C9", .cp = 15, .crn = 9,
2641      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2642      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2643    REGINFO_SENTINEL
2644};
2645
2646static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2647                              uint64_t value)
2648{
2649    env->cp15.c15_cpar = value & 0x3fff;
2650}
2651
2652static const ARMCPRegInfo xscale_cp_reginfo[] = {
2653    { .name = "XSCALE_CPAR",
2654      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2655      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2656      .writefn = xscale_cpar_write, },
2657    { .name = "XSCALE_AUXCR",
2658      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2659      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2660      .resetvalue = 0, },
2661    /* XScale specific cache-lockdown: since we have no cache we NOP these
2662     * and hope the guest does not really rely on cache behaviour.
2663     */
2664    { .name = "XSCALE_LOCK_ICACHE_LINE",
2665      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2666      .access = PL1_W, .type = ARM_CP_NOP },
2667    { .name = "XSCALE_UNLOCK_ICACHE",
2668      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2669      .access = PL1_W, .type = ARM_CP_NOP },
2670    { .name = "XSCALE_DCACHE_LOCK",
2671      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2672      .access = PL1_RW, .type = ARM_CP_NOP },
2673    { .name = "XSCALE_UNLOCK_DCACHE",
2674      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2675      .access = PL1_W, .type = ARM_CP_NOP },
2676    REGINFO_SENTINEL
2677};
2678
2679static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2680    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2681     * implementation of this implementation-defined space.
2682     * Ideally this should eventually disappear in favour of actually
2683     * implementing the correct behaviour for all cores.
2684     */
2685    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2686      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2687      .access = PL1_RW,
2688      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2689      .resetvalue = 0 },
2690    REGINFO_SENTINEL
2691};
2692
2693static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2694    /* Cache status: RAZ because we have no cache so it's always clean */
2695    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2696      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2697      .resetvalue = 0 },
2698    REGINFO_SENTINEL
2699};
2700
2701static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2702    /* We never have a a block transfer operation in progress */
2703    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2704      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2705      .resetvalue = 0 },
2706    /* The cache ops themselves: these all NOP for QEMU */
2707    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2708      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2709    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2710      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2711    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2712      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2713    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2714      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2715    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2716      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2717    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2718      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2719    REGINFO_SENTINEL
2720};
2721
2722static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2723    /* The cache test-and-clean instructions always return (1 << 30)
2724     * to indicate that there are no dirty cache lines.
2725     */
2726    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2727      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2728      .resetvalue = (1 << 30) },
2729    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2730      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2731      .resetvalue = (1 << 30) },
2732    REGINFO_SENTINEL
2733};
2734
2735static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2736    /* Ignore ReadBuffer accesses */
2737    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2738      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2739      .access = PL1_RW, .resetvalue = 0,
2740      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2741    REGINFO_SENTINEL
2742};
2743
2744static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2745{
2746    ARMCPU *cpu = arm_env_get_cpu(env);
2747    unsigned int cur_el = arm_current_el(env);
2748    bool secure = arm_is_secure(env);
2749
2750    if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2751        return env->cp15.vpidr_el2;
2752    }
2753    return raw_read(env, ri);
2754}
2755
2756uint64_t mpidr_read_val(CPUARMState *env)
2757{
2758    ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2759    uint64_t mpidr = cpu->mp_affinity;
2760    unsigned int cur_el = arm_current_el(env);
2761    bool secure = arm_is_secure(env);
2762
2763    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2764        return env->cp15.vmpidr_el2;
2765    }
2766
2767    if (arm_feature(env, ARM_FEATURE_V7MP)) {
2768        mpidr |= (1U << 31);
2769        /* Cores which are uniprocessor (non-coherent)
2770         * but still implement the MP extensions set
2771         * bit 30. (For instance, Cortex-R5).
2772         */
2773        if (cpu->mp_is_up) {
2774            mpidr |= (1u << 30);
2775        }
2776    }
2777    return mpidr;
2778}
2779
2780static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2781{
2782    unsigned int cur_el = arm_current_el(env);
2783    bool secure = arm_is_secure(env);
2784
2785    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2786        return env->cp15.vmpidr_el2;
2787    }
2788    return mpidr_read_val(env);
2789}
2790
2791static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2792    { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2793      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2794      .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2795    REGINFO_SENTINEL
2796};
2797
2798static const ARMCPRegInfo lpae_cp_reginfo[] = {
2799    /* NOP AMAIR0/1 */
2800    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2801      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2802      .access = PL1_RW, .type = ARM_CP_CONST,
2803      .resetvalue = 0 },
2804    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2805    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2806      .access = PL1_RW, .type = ARM_CP_CONST,
2807      .resetvalue = 0 },
2808    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2809      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2810      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2811                             offsetof(CPUARMState, cp15.par_ns)} },
2812    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2813      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2814      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2815                             offsetof(CPUARMState, cp15.ttbr0_ns) },
2816      .writefn = vmsa_ttbr_write, },
2817    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2818      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2819      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2820                             offsetof(CPUARMState, cp15.ttbr1_ns) },
2821      .writefn = vmsa_ttbr_write, },
2822    REGINFO_SENTINEL
2823};
2824
2825static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2826{
2827    return vfp_get_fpcr(env);
2828}
2829
2830static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2831                            uint64_t value)
2832{
2833    vfp_set_fpcr(env, value);
2834}
2835
2836static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2837{
2838    return vfp_get_fpsr(env);
2839}
2840
2841static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2842                            uint64_t value)
2843{
2844    vfp_set_fpsr(env, value);
2845}
2846
2847static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2848                                       bool isread)
2849{
2850    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2851        return CP_ACCESS_TRAP;
2852    }
2853    return CP_ACCESS_OK;
2854}
2855
2856static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857                            uint64_t value)
2858{
2859    env->daif = value & PSTATE_DAIF;
2860}
2861
2862static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2863                                          const ARMCPRegInfo *ri,
2864                                          bool isread)
2865{
2866    /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2867     * SCTLR_EL1.UCI is set.
2868     */
2869    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2870        return CP_ACCESS_TRAP;
2871    }
2872    return CP_ACCESS_OK;
2873}
2874
2875/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2876 * Page D4-1736 (DDI0487A.b)
2877 */
2878
2879static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2880                                    uint64_t value)
2881{
2882    ARMCPU *cpu = arm_env_get_cpu(env);
2883    CPUState *cs = CPU(cpu);
2884
2885    if (arm_is_secure_below_el3(env)) {
2886        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2887    } else {
2888        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2889    }
2890}
2891
2892static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2893                                      uint64_t value)
2894{
2895    bool sec = arm_is_secure_below_el3(env);
2896    CPUState *other_cs;
2897
2898    CPU_FOREACH(other_cs) {
2899        if (sec) {
2900            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2901        } else {
2902            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2903                                ARMMMUIdx_S12NSE0, -1);
2904        }
2905    }
2906}
2907
2908static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2909                                  uint64_t value)
2910{
2911    /* Note that the 'ALL' scope must invalidate both stage 1 and
2912     * stage 2 translations, whereas most other scopes only invalidate
2913     * stage 1 translations.
2914     */
2915    ARMCPU *cpu = arm_env_get_cpu(env);
2916    CPUState *cs = CPU(cpu);
2917
2918    if (arm_is_secure_below_el3(env)) {
2919        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2920    } else {
2921        if (arm_feature(env, ARM_FEATURE_EL2)) {
2922            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2923                                ARMMMUIdx_S2NS, -1);
2924        } else {
2925            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2926        }
2927    }
2928}
2929
2930static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2931                                  uint64_t value)
2932{
2933    ARMCPU *cpu = arm_env_get_cpu(env);
2934    CPUState *cs = CPU(cpu);
2935
2936    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
2937}
2938
2939static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2940                                  uint64_t value)
2941{
2942    ARMCPU *cpu = arm_env_get_cpu(env);
2943    CPUState *cs = CPU(cpu);
2944
2945    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
2946}
2947
2948static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2949                                    uint64_t value)
2950{
2951    /* Note that the 'ALL' scope must invalidate both stage 1 and
2952     * stage 2 translations, whereas most other scopes only invalidate
2953     * stage 1 translations.
2954     */
2955    bool sec = arm_is_secure_below_el3(env);
2956    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
2957    CPUState *other_cs;
2958
2959    CPU_FOREACH(other_cs) {
2960        if (sec) {
2961            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2962        } else if (has_el2) {
2963            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2964                                ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
2965        } else {
2966            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2967                                ARMMMUIdx_S12NSE0, -1);
2968        }
2969    }
2970}
2971
2972static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2973                                    uint64_t value)
2974{
2975    CPUState *other_cs;
2976
2977    CPU_FOREACH(other_cs) {
2978        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
2979    }
2980}
2981
2982static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2983                                    uint64_t value)
2984{
2985    CPUState *other_cs;
2986
2987    CPU_FOREACH(other_cs) {
2988        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
2989    }
2990}
2991
2992static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2993                                 uint64_t value)
2994{
2995    /* Invalidate by VA, EL1&0 (AArch64 version).
2996     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
2997     * since we don't support flush-for-specific-ASID-only or
2998     * flush-last-level-only.
2999     */
3000    ARMCPU *cpu = arm_env_get_cpu(env);
3001    CPUState *cs = CPU(cpu);
3002    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3003
3004    if (arm_is_secure_below_el3(env)) {
3005        tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
3006                                 ARMMMUIdx_S1SE0, -1);
3007    } else {
3008        tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
3009                                 ARMMMUIdx_S12NSE0, -1);
3010    }
3011}
3012
3013static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3014                                 uint64_t value)
3015{
3016    /* Invalidate by VA, EL2
3017     * Currently handles both VAE2 and VALE2, since we don't support
3018     * flush-last-level-only.
3019     */
3020    ARMCPU *cpu = arm_env_get_cpu(env);
3021    CPUState *cs = CPU(cpu);
3022    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3023
3024    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
3025}
3026
3027static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3028                                 uint64_t value)
3029{
3030    /* Invalidate by VA, EL3
3031     * Currently handles both VAE3 and VALE3, since we don't support
3032     * flush-last-level-only.
3033     */
3034    ARMCPU *cpu = arm_env_get_cpu(env);
3035    CPUState *cs = CPU(cpu);
3036    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3037
3038    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
3039}
3040
3041static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3042                                   uint64_t value)
3043{
3044    bool sec = arm_is_secure_below_el3(env);
3045    CPUState *other_cs;
3046    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3047
3048    CPU_FOREACH(other_cs) {
3049        if (sec) {
3050            tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
3051                                     ARMMMUIdx_S1SE0, -1);
3052        } else {
3053            tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
3054                                     ARMMMUIdx_S12NSE0, -1);
3055        }
3056    }
3057}
3058
3059static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3060                                   uint64_t value)
3061{
3062    CPUState *other_cs;
3063    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3064
3065    CPU_FOREACH(other_cs) {
3066        tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
3067    }
3068}
3069
3070static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3071                                   uint64_t value)
3072{
3073    CPUState *other_cs;
3074    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3075
3076    CPU_FOREACH(other_cs) {
3077        tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
3078    }
3079}
3080
3081static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3082                                    uint64_t value)
3083{
3084    /* Invalidate by IPA. This has to invalidate any structures that
3085     * contain only stage 2 translation information, but does not need
3086     * to apply to structures that contain combined stage 1 and stage 2
3087     * translation information.
3088     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3089     */
3090    ARMCPU *cpu = arm_env_get_cpu(env);
3091    CPUState *cs = CPU(cpu);
3092    uint64_t pageaddr;
3093
3094    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3095        return;
3096    }
3097
3098    pageaddr = sextract64(value << 12, 0, 48);
3099
3100    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
3101}
3102
3103static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3104                                      uint64_t value)
3105{
3106    CPUState *other_cs;
3107    uint64_t pageaddr;
3108
3109    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3110        return;
3111    }
3112
3113    pageaddr = sextract64(value << 12, 0, 48);
3114
3115    CPU_FOREACH(other_cs) {
3116        tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
3117    }
3118}
3119
3120static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3121                                      bool isread)
3122{
3123    /* We don't implement EL2, so the only control on DC ZVA is the
3124     * bit in the SCTLR which can prohibit access for EL0.
3125     */
3126    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3127        return CP_ACCESS_TRAP;
3128    }
3129    return CP_ACCESS_OK;
3130}
3131
3132static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3133{
3134    ARMCPU *cpu = arm_env_get_cpu(env);
3135    int dzp_bit = 1 << 4;
3136
3137    /* DZP indicates whether DC ZVA access is allowed */
3138    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3139        dzp_bit = 0;
3140    }
3141    return cpu->dcz_blocksize | dzp_bit;
3142}
3143
3144static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3145                                    bool isread)
3146{
3147    if (!(env->pstate & PSTATE_SP)) {
3148        /* Access to SP_EL0 is undefined if it's being used as
3149         * the stack pointer.
3150         */
3151        return CP_ACCESS_TRAP_UNCATEGORIZED;
3152    }
3153    return CP_ACCESS_OK;
3154}
3155
3156static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3157{
3158    return env->pstate & PSTATE_SP;
3159}
3160
3161static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3162{
3163    update_spsel(env, val);
3164}
3165
3166static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3167                        uint64_t value)
3168{
3169    ARMCPU *cpu = arm_env_get_cpu(env);
3170
3171    if (raw_read(env, ri) == value) {
3172        /* Skip the TLB flush if nothing actually changed; Linux likes
3173         * to do a lot of pointless SCTLR writes.
3174         */
3175        return;
3176    }
3177
3178    raw_write(env, ri, value);
3179    /* ??? Lots of these bits are not implemented.  */
3180    /* This may enable/disable the MMU, so do a TLB flush.  */
3181    tlb_flush(CPU(cpu), 1);
3182}
3183
3184static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3185                                     bool isread)
3186{
3187    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3188        return CP_ACCESS_TRAP_FP_EL2;
3189    }
3190    if (env->cp15.cptr_el[3] & CPTR_TFP) {
3191        return CP_ACCESS_TRAP_FP_EL3;
3192    }
3193    return CP_ACCESS_OK;
3194}
3195
3196static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3197                       uint64_t value)
3198{
3199    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3200}
3201
3202static const ARMCPRegInfo v8_cp_reginfo[] = {
3203    /* Minimal set of EL0-visible registers. This will need to be expanded
3204     * significantly for system emulation of AArch64 CPUs.
3205     */
3206    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3207      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3208      .access = PL0_RW, .type = ARM_CP_NZCV },
3209    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3210      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3211      .type = ARM_CP_NO_RAW,
3212      .access = PL0_RW, .accessfn = aa64_daif_access,
3213      .fieldoffset = offsetof(CPUARMState, daif),
3214      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3215    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3216      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3217      .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3218    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3219      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3220      .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3221    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3222      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3223      .access = PL0_R, .type = ARM_CP_NO_RAW,
3224      .readfn = aa64_dczid_read },
3225    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3226      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3227      .access = PL0_W, .type = ARM_CP_DC_ZVA,
3228#ifndef CONFIG_USER_ONLY
3229      /* Avoid overhead of an access check that always passes in user-mode */
3230      .accessfn = aa64_zva_access,
3231#endif
3232    },
3233    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3234      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3235      .access = PL1_R, .type = ARM_CP_CURRENTEL },
3236    /* Cache ops: all NOPs since we don't emulate caches */
3237    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3238      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3239      .access = PL1_W, .type = ARM_CP_NOP },
3240    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3241      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3242      .access = PL1_W, .type = ARM_CP_NOP },
3243    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3244      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3245      .access = PL0_W, .type = ARM_CP_NOP,
3246      .accessfn = aa64_cacheop_access },
3247    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3248      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3249      .access = PL1_W, .type = ARM_CP_NOP },
3250    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3251      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3252      .access = PL1_W, .type = ARM_CP_NOP },
3253    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3254      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3255      .access = PL0_W, .type = ARM_CP_NOP,
3256      .accessfn = aa64_cacheop_access },
3257    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3258      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3259      .access = PL1_W, .type = ARM_CP_NOP },
3260    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3261      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3262      .access = PL0_W, .type = ARM_CP_NOP,
3263      .accessfn = aa64_cacheop_access },
3264    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3265      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3266      .access = PL0_W, .type = ARM_CP_NOP,
3267      .accessfn = aa64_cacheop_access },
3268    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3269      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3270      .access = PL1_W, .type = ARM_CP_NOP },
3271    /* TLBI operations */
3272    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3273      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3274      .access = PL1_W, .type = ARM_CP_NO_RAW,
3275      .writefn = tlbi_aa64_vmalle1is_write },
3276    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3277      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3278      .access = PL1_W, .type = ARM_CP_NO_RAW,
3279      .writefn = tlbi_aa64_vae1is_write },
3280    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3281      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3282      .access = PL1_W, .type = ARM_CP_NO_RAW,
3283      .writefn = tlbi_aa64_vmalle1is_write },
3284    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3285      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3286      .access = PL1_W, .type = ARM_CP_NO_RAW,
3287      .writefn = tlbi_aa64_vae1is_write },
3288    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3289      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3290      .access = PL1_W, .type = ARM_CP_NO_RAW,
3291      .writefn = tlbi_aa64_vae1is_write },
3292    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3293      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3294      .access = PL1_W, .type = ARM_CP_NO_RAW,
3295      .writefn = tlbi_aa64_vae1is_write },
3296    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3297      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3298      .access = PL1_W, .type = ARM_CP_NO_RAW,
3299      .writefn = tlbi_aa64_vmalle1_write },
3300    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3301      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3302      .access = PL1_W, .type = ARM_CP_NO_RAW,
3303      .writefn = tlbi_aa64_vae1_write },
3304    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3305      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3306      .access = PL1_W, .type = ARM_CP_NO_RAW,
3307      .writefn = tlbi_aa64_vmalle1_write },
3308    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3309      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3310      .access = PL1_W, .type = ARM_CP_NO_RAW,
3311      .writefn = tlbi_aa64_vae1_write },
3312    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3313      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3314      .access = PL1_W, .type = ARM_CP_NO_RAW,
3315      .writefn = tlbi_aa64_vae1_write },
3316    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3317      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3318      .access = PL1_W, .type = ARM_CP_NO_RAW,
3319      .writefn = tlbi_aa64_vae1_write },
3320    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3321      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3322      .access = PL2_W, .type = ARM_CP_NO_RAW,
3323      .writefn = tlbi_aa64_ipas2e1is_write },
3324    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3325      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3326      .access = PL2_W, .type = ARM_CP_NO_RAW,
3327      .writefn = tlbi_aa64_ipas2e1is_write },
3328    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3329      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3330      .access = PL2_W, .type = ARM_CP_NO_RAW,
3331      .writefn = tlbi_aa64_alle1is_write },
3332    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3333      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3334      .access = PL2_W, .type = ARM_CP_NO_RAW,
3335      .writefn = tlbi_aa64_alle1is_write },
3336    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3337      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3338      .access = PL2_W, .type = ARM_CP_NO_RAW,
3339      .writefn = tlbi_aa64_ipas2e1_write },
3340    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3341      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3342      .access = PL2_W, .type = ARM_CP_NO_RAW,
3343      .writefn = tlbi_aa64_ipas2e1_write },
3344    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3345      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3346      .access = PL2_W, .type = ARM_CP_NO_RAW,
3347      .writefn = tlbi_aa64_alle1_write },
3348    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3349      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3350      .access = PL2_W, .type = ARM_CP_NO_RAW,
3351      .writefn = tlbi_aa64_alle1is_write },
3352#ifndef CONFIG_USER_ONLY
3353    /* 64 bit address translation operations */
3354    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3355      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3356      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3357    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3358      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3359      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3360    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3361      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3362      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3363    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3364      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3365      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3366    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3367      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3368      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3369    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3370      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3371      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3372    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3373      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3374      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3375    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3376      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3377      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3378    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3379    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3380      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3381      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3382    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3383      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3384      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3385    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3386      .type = ARM_CP_ALIAS,
3387      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3388      .access = PL1_RW, .resetvalue = 0,
3389      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3390      .writefn = par_write },
3391#endif
3392    /* TLB invalidate last level of translation table walk */
3393    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3394      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3395    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3396      .type = ARM_CP_NO_RAW, .access = PL1_W,
3397      .writefn = tlbimvaa_is_write },
3398    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3399      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3400    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3401      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3402    /* 32 bit cache operations */
3403    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3404      .type = ARM_CP_NOP, .access = PL1_W },
3405    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3406      .type = ARM_CP_NOP, .access = PL1_W },
3407    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3408      .type = ARM_CP_NOP, .access = PL1_W },
3409    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3410      .type = ARM_CP_NOP, .access = PL1_W },
3411    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3412      .type = ARM_CP_NOP, .access = PL1_W },
3413    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3414      .type = ARM_CP_NOP, .access = PL1_W },
3415    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3416      .type = ARM_CP_NOP, .access = PL1_W },
3417    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3418      .type = ARM_CP_NOP, .access = PL1_W },
3419    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3420      .type = ARM_CP_NOP, .access = PL1_W },
3421    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3422      .type = ARM_CP_NOP, .access = PL1_W },
3423    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3424      .type = ARM_CP_NOP, .access = PL1_W },
3425    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3426      .type = ARM_CP_NOP, .access = PL1_W },
3427    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3428      .type = ARM_CP_NOP, .access = PL1_W },
3429    /* MMU Domain access control / MPU write buffer control */
3430    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3431      .access = PL1_RW, .resetvalue = 0,
3432      .writefn = dacr_write, .raw_writefn = raw_write,
3433      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3434                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3435    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3436      .type = ARM_CP_ALIAS,
3437      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3438      .access = PL1_RW,
3439      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3440    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3441      .type = ARM_CP_ALIAS,
3442      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3443      .access = PL1_RW,
3444      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3445    /* We rely on the access checks not allowing the guest to write to the
3446     * state field when SPSel indicates that it's being used as the stack
3447     * pointer.
3448     */
3449    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3450      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3451      .access = PL1_RW, .accessfn = sp_el0_access,
3452      .type = ARM_CP_ALIAS,
3453      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3454    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3455      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3456      .access = PL2_RW, .type = ARM_CP_ALIAS,
3457      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3458    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3459      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3460      .type = ARM_CP_NO_RAW,
3461      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3462    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3463      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3464      .type = ARM_CP_ALIAS,
3465      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3466      .access = PL2_RW, .accessfn = fpexc32_access },
3467    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3468      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3469      .access = PL2_RW, .resetvalue = 0,
3470      .writefn = dacr_write, .raw_writefn = raw_write,
3471      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3472    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3473      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3474      .access = PL2_RW, .resetvalue = 0,
3475      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3476    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3477      .type = ARM_CP_ALIAS,
3478      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3479      .access = PL2_RW,
3480      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3481    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3482      .type = ARM_CP_ALIAS,
3483      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3484      .access = PL2_RW,
3485      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3486    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3487      .type = ARM_CP_ALIAS,
3488      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3489      .access = PL2_RW,
3490      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3491    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3492      .type = ARM_CP_ALIAS,
3493      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3494      .access = PL2_RW,
3495      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3496    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3497      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3498      .resetvalue = 0,
3499      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3500    { .name = "SDCR", .type = ARM_CP_ALIAS,
3501      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3502      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3503      .writefn = sdcr_write,
3504      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3505    REGINFO_SENTINEL
3506};
3507
3508/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
3509static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3510    { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3511      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3512      .access = PL2_RW,
3513      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3514    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3515      .type = ARM_CP_NO_RAW,
3516      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3517      .access = PL2_RW,
3518      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3519    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3520      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3521      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3522    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3523      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3524      .access = PL2_RW, .type = ARM_CP_CONST,
3525      .resetvalue = 0 },
3526    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3527      .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3528      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3529    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3530      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3531      .access = PL2_RW, .type = ARM_CP_CONST,
3532      .resetvalue = 0 },
3533    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3534      .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3535      .access = PL2_RW, .type = ARM_CP_CONST,
3536      .resetvalue = 0 },
3537    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3538      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3539      .access = PL2_RW, .type = ARM_CP_CONST,
3540      .resetvalue = 0 },
3541    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3542      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3543      .access = PL2_RW, .type = ARM_CP_CONST,
3544      .resetvalue = 0 },
3545    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3546      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3547      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3548    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3549      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3550      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3551      .type = ARM_CP_CONST, .resetvalue = 0 },
3552    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3553      .cp = 15, .opc1 = 6, .crm = 2,
3554      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3555      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3556    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3557      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3558      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3559    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3560      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3561      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3562    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3563      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3564      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3565    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3566      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3567      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3568    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3569      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3570      .resetvalue = 0 },
3571    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3572      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3573      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3574    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3575      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3576      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3577    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3578      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3579      .resetvalue = 0 },
3580    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3581      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3582      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3583    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3584      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3585      .resetvalue = 0 },
3586    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3587      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3588      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3589    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3590      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3591      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3592    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3593      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3594      .access = PL2_RW, .accessfn = access_tda,
3595      .type = ARM_CP_CONST, .resetvalue = 0 },
3596    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3597      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3598      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3599      .type = ARM_CP_CONST, .resetvalue = 0 },
3600    { .name = "HSTR_EL2", .state = ARM_CP_STATE_AA64,
3601      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3602      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3603    REGINFO_SENTINEL
3604};
3605
3606static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3607{
3608    ARMCPU *cpu = arm_env_get_cpu(env);
3609    uint64_t valid_mask = HCR_MASK;
3610
3611    if (arm_feature(env, ARM_FEATURE_EL3)) {
3612        valid_mask &= ~HCR_HCD;
3613    } else {
3614        valid_mask &= ~HCR_TSC;
3615    }
3616
3617    /* Clear RES0 bits.  */
3618    value &= valid_mask;
3619
3620    /* These bits change the MMU setup:
3621     * HCR_VM enables stage 2 translation
3622     * HCR_PTW forbids certain page-table setups
3623     * HCR_DC Disables stage1 and enables stage2 translation
3624     */
3625    if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3626        tlb_flush(CPU(cpu), 1);
3627    }
3628    raw_write(env, ri, value);
3629}
3630
3631static const ARMCPRegInfo el2_cp_reginfo[] = {
3632    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3633      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3634      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3635      .writefn = hcr_write },
3636    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
3637      .type = ARM_CP_ALIAS,
3638      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
3639      .access = PL2_RW,
3640      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
3641    { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
3642      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3643      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
3644    { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
3645      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3646      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
3647    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
3648      .type = ARM_CP_ALIAS,
3649      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
3650      .access = PL2_RW,
3651      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
3652    { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3653      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3654      .access = PL2_RW, .writefn = vbar_write,
3655      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
3656      .resetvalue = 0 },
3657    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
3658      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
3659      .access = PL3_RW, .type = ARM_CP_ALIAS,
3660      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
3661    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3662      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3663      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
3664      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
3665    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3666      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3667      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
3668      .resetvalue = 0 },
3669    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3670      .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3671      .access = PL2_RW, .type = ARM_CP_ALIAS,
3672      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
3673    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3674      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3675      .access = PL2_RW, .type = ARM_CP_CONST,
3676      .resetvalue = 0 },
3677    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3678    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3679      .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3680      .access = PL2_RW, .type = ARM_CP_CONST,
3681      .resetvalue = 0 },
3682    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3683      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3684      .access = PL2_RW, .type = ARM_CP_CONST,
3685      .resetvalue = 0 },
3686    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3687      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3688      .access = PL2_RW, .type = ARM_CP_CONST,
3689      .resetvalue = 0 },
3690    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3691      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3692      .access = PL2_RW, .writefn = vmsa_tcr_el1_write,
3693      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3694      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
3695    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
3696      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3697      .type = ARM_CP_ALIAS,
3698      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3699      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3700    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
3701      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3702      .access = PL2_RW,
3703      /* no .writefn needed as this can't cause an ASID change;
3704       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3705       */
3706      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3707    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3708      .cp = 15, .opc1 = 6, .crm = 2,
3709      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3710      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3711      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
3712      .writefn = vttbr_write },
3713    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3714      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3715      .access = PL2_RW, .writefn = vttbr_write,
3716      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
3717    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3718      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3719      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
3720      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
3721    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3722      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3723      .access = PL2_RW, .resetvalue = 0,
3724      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
3725    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3726      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3727      .access = PL2_RW, .resetvalue = 0,
3728      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3729    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3730      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3731      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3732    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
3733      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3734      .type = ARM_CP_NO_RAW, .access = PL2_W,
3735      .writefn = tlbi_aa64_alle2_write },
3736    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
3737      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3738      .type = ARM_CP_NO_RAW, .access = PL2_W,
3739      .writefn = tlbi_aa64_vae2_write },
3740    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
3741      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3742      .access = PL2_W, .type = ARM_CP_NO_RAW,
3743      .writefn = tlbi_aa64_vae2_write },
3744    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
3745      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3746      .access = PL2_W, .type = ARM_CP_NO_RAW,
3747      .writefn = tlbi_aa64_alle2is_write },
3748    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
3749      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3750      .type = ARM_CP_NO_RAW, .access = PL2_W,
3751      .writefn = tlbi_aa64_vae2is_write },
3752    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
3753      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3754      .access = PL2_W, .type = ARM_CP_NO_RAW,
3755      .writefn = tlbi_aa64_vae2is_write },
3756#ifndef CONFIG_USER_ONLY
3757    /* Unlike the other EL2-related AT operations, these must
3758     * UNDEF from EL3 if EL2 is not implemented, which is why we
3759     * define them here rather than with the rest of the AT ops.
3760     */
3761    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
3762      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3763      .access = PL2_W, .accessfn = at_s1e2_access,
3764      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3765    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
3766      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3767      .access = PL2_W, .accessfn = at_s1e2_access,
3768      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3769    /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3770     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3771     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3772     * to behave as if SCR.NS was 1.
3773     */
3774    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3775      .access = PL2_W,
3776      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3777    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3778      .access = PL2_W,
3779      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3780    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3781      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3782      /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3783       * reset values as IMPDEF. We choose to reset to 3 to comply with
3784       * both ARMv7 and ARMv8.
3785       */
3786      .access = PL2_RW, .resetvalue = 3,
3787      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
3788    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3789      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3790      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3791      .writefn = gt_cntvoff_write,
3792      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3793    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3794      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
3795      .writefn = gt_cntvoff_write,
3796      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3797    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3798      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3799      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3800      .type = ARM_CP_IO, .access = PL2_RW,
3801      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3802    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3803      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3804      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
3805      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3806    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3807      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3808      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
3809      .resetfn = gt_hyp_timer_reset,
3810      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
3811    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3812      .type = ARM_CP_IO,
3813      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3814      .access = PL2_RW,
3815      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
3816      .resetvalue = 0,
3817      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
3818#endif
3819    /* The only field of MDCR_EL2 that has a defined architectural reset value
3820     * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
3821     * don't impelment any PMU event counters, so using zero as a reset
3822     * value for MDCR_EL2 is okay
3823     */
3824    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3825      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3826      .access = PL2_RW, .resetvalue = 0,
3827      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
3828    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
3829      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3830      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3831      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3832    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
3833      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3834      .access = PL2_RW,
3835      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3836    { .name = "HSTR_EL2", .state = ARM_CP_STATE_AA64,
3837      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3838      .access = PL2_RW,
3839      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore, },
3840    REGINFO_SENTINEL
3841};
3842
3843static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
3844                                   bool isread)
3845{
3846    /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
3847     * At Secure EL1 it traps to EL3.
3848     */
3849    if (arm_current_el(env) == 3) {
3850        return CP_ACCESS_OK;
3851    }
3852    if (arm_is_secure_below_el3(env)) {
3853        return CP_ACCESS_TRAP_EL3;
3854    }
3855    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
3856    if (isread) {
3857        return CP_ACCESS_OK;
3858    }
3859    return CP_ACCESS_TRAP_UNCATEGORIZED;
3860}
3861
3862static const ARMCPRegInfo el3_cp_reginfo[] = {
3863    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
3864      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
3865      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
3866      .resetvalue = 0, .writefn = scr_write },
3867    { .name = "SCR",  .type = ARM_CP_ALIAS,
3868      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
3869      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3870      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
3871      .writefn = scr_write },
3872    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
3873      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
3874      .access = PL3_RW, .resetvalue = 0,
3875      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
3876    { .name = "SDER",
3877      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
3878      .access = PL3_RW, .resetvalue = 0,
3879      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
3880    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
3881      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3882      .writefn = vbar_write, .resetvalue = 0,
3883      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
3884    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
3885      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
3886      .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3887      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
3888    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
3889      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
3890      .access = PL3_RW, .writefn = vmsa_tcr_el1_write,
3891      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3892      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
3893    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
3894      .type = ARM_CP_ALIAS,
3895      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
3896      .access = PL3_RW,
3897      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
3898    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
3899      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
3900      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
3901    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
3902      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
3903      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
3904    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
3905      .type = ARM_CP_ALIAS,
3906      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
3907      .access = PL3_RW,
3908      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
3909    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
3910      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
3911      .access = PL3_RW, .writefn = vbar_write,
3912      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
3913      .resetvalue = 0 },
3914    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
3915      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
3916      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
3917      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
3918    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
3919      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
3920      .access = PL3_RW, .resetvalue = 0,
3921      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
3922    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
3923      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
3924      .access = PL3_RW, .type = ARM_CP_CONST,
3925      .resetvalue = 0 },
3926    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
3927      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
3928      .access = PL3_RW, .type = ARM_CP_CONST,
3929      .resetvalue = 0 },
3930    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
3931      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
3932      .access = PL3_RW, .type = ARM_CP_CONST,
3933      .resetvalue = 0 },
3934    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
3935      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
3936      .access = PL3_W, .type = ARM_CP_NO_RAW,
3937      .writefn = tlbi_aa64_alle3is_write },
3938    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
3939      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
3940      .access = PL3_W, .type = ARM_CP_NO_RAW,
3941      .writefn = tlbi_aa64_vae3is_write },
3942    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
3943      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
3944      .access = PL3_W, .type = ARM_CP_NO_RAW,
3945      .writefn = tlbi_aa64_vae3is_write },
3946    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
3947      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
3948      .access = PL3_W, .type = ARM_CP_NO_RAW,
3949      .writefn = tlbi_aa64_alle3_write },
3950    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
3951      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
3952      .access = PL3_W, .type = ARM_CP_NO_RAW,
3953      .writefn = tlbi_aa64_vae3_write },
3954    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
3955      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
3956      .access = PL3_W, .type = ARM_CP_NO_RAW,
3957      .writefn = tlbi_aa64_vae3_write },
3958    REGINFO_SENTINEL
3959};
3960
3961static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3962                                     bool isread)
3963{
3964    /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
3965     * but the AArch32 CTR has its own reginfo struct)
3966     */
3967    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
3968        return CP_ACCESS_TRAP;
3969    }
3970    return CP_ACCESS_OK;
3971}
3972
3973static void dcc_write(CPUARMState *env, const ARMCPRegInfo *ri,
3974                        uint64_t value)
3975{
3976    putchar(value);
3977}
3978
3979static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3980                        uint64_t value)
3981{
3982    /* Writes to OSLAR_EL1 may update the OS lock status, which can be
3983     * read via a bit in OSLSR_EL1.
3984     */
3985    int oslock;
3986
3987    if (ri->state == ARM_CP_STATE_AA32) {
3988        oslock = (value == 0xC5ACCE55);
3989    } else {
3990        oslock = value & 1;
3991    }
3992
3993    env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
3994}
3995
3996static const ARMCPRegInfo debug_cp_reginfo[] = {
3997    /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
3998     * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
3999     * unlike DBGDRAR it is never accessible from EL0.
4000     * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4001     * accessor.
4002     */
4003    { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4004      .access = PL0_R, .accessfn = access_tdra,
4005      .type = ARM_CP_CONST, .resetvalue = 0 },
4006    { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4007      .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4008      .access = PL1_R, .accessfn = access_tdra,
4009      .type = ARM_CP_CONST, .resetvalue = 0 },
4010    { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4011      .access = PL0_R, .accessfn = access_tdra,
4012      .type = ARM_CP_CONST, .resetvalue = 0 },
4013    /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4014    { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4015      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4016      .access = PL1_RW, .accessfn = access_tda,
4017      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4018      .resetvalue = 0 },
4019    /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4020     * We don't implement the configurable EL0 access.
4021     */
4022    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4023      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4024      .type = ARM_CP_ALIAS,
4025      .access = PL1_R, .accessfn = access_tda,
4026      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4027    { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4028      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4029      .access = PL1_W, .type = ARM_CP_NO_RAW,
4030      .accessfn = access_tdosa,
4031      .writefn = oslar_write },
4032    { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4033      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4034      .access = PL1_R, .resetvalue = 10,
4035      .accessfn = access_tdosa,
4036      .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4037    /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4038    { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4039      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4040      .access = PL1_RW, .accessfn = access_tdosa,
4041      .type = ARM_CP_NOP },
4042    /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4043     * implement vector catch debug events yet.
4044     */
4045    { .name = "DBGVCR",
4046      .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4047      .access = PL1_RW, .accessfn = access_tda,
4048      .type = ARM_CP_NOP },
4049    { .name = "DBGDTRTX_EL0", .state = ARM_CP_STATE_AA64,
4050      .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0,
4051      .access = PL0_W, .writefn = dcc_write, .type = ARM_CP_NO_RAW },
4052    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64,
4053      .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
4054      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4055    REGINFO_SENTINEL
4056};
4057
4058static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4059    /* 64 bit access versions of the (dummy) debug registers */
4060    { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4061      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4062    { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4063      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4064    REGINFO_SENTINEL
4065};
4066
4067void hw_watchpoint_update(ARMCPU *cpu, int n)
4068{
4069    CPUARMState *env = &cpu->env;
4070    vaddr len = 0;
4071    vaddr wvr = env->cp15.dbgwvr[n];
4072    uint64_t wcr = env->cp15.dbgwcr[n];
4073    int mask;
4074    int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4075
4076    if (env->cpu_watchpoint[n]) {
4077        cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4078        env->cpu_watchpoint[n] = NULL;
4079    }
4080
4081    if (!extract64(wcr, 0, 1)) {
4082        /* E bit clear : watchpoint disabled */
4083        return;
4084    }
4085
4086    switch (extract64(wcr, 3, 2)) {
4087    case 0:
4088        /* LSC 00 is reserved and must behave as if the wp is disabled */
4089        return;
4090    case 1:
4091        flags |= BP_MEM_READ;
4092        break;
4093    case 2:
4094        flags |= BP_MEM_WRITE;
4095        break;
4096    case 3:
4097        flags |= BP_MEM_ACCESS;
4098        break;
4099    }
4100
4101    /* Attempts to use both MASK and BAS fields simultaneously are
4102     * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4103     * thus generating a watchpoint for every byte in the masked region.
4104     */
4105    mask = extract64(wcr, 24, 4);
4106    if (mask == 1 || mask == 2) {
4107        /* Reserved values of MASK; we must act as if the mask value was
4108         * some non-reserved value, or as if the watchpoint were disabled.
4109         * We choose the latter.
4110         */
4111        return;
4112    } else if (mask) {
4113        /* Watchpoint covers an aligned area up to 2GB in size */
4114        len = 1ULL << mask;
4115        /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4116         * whether the watchpoint fires when the unmasked bits match; we opt
4117         * to generate the exceptions.
4118         */
4119        wvr &= ~(len - 1);
4120    } else {
4121        /* Watchpoint covers bytes defined by the byte address select bits */
4122        int bas = extract64(wcr, 5, 8);
4123        int basstart;
4124
4125        if (bas == 0) {
4126            /* This must act as if the watchpoint is disabled */
4127            return;
4128        }
4129
4130        if (extract64(wvr, 2, 1)) {
4131            /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4132             * ignored, and BAS[3:0] define which bytes to watch.
4133             */
4134            bas &= 0xf;
4135        }
4136        /* The BAS bits are supposed to be programmed to indicate a contiguous
4137         * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4138         * we fire for each byte in the word/doubleword addressed by the WVR.
4139         * We choose to ignore any non-zero bits after the first range of 1s.
4140         */
4141        basstart = ctz32(bas);
4142        len = cto32(bas >> basstart);
4143        wvr += basstart;
4144    }
4145
4146    cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4147                          &env->cpu_watchpoint[n]);
4148}
4149
4150void hw_watchpoint_update_all(ARMCPU *cpu)
4151{
4152    int i;
4153    CPUARMState *env = &cpu->env;
4154
4155    /* Completely clear out existing QEMU watchpoints and our array, to
4156     * avoid possible stale entries following migration load.
4157     */
4158    cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4159    memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4160
4161    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4162        hw_watchpoint_update(cpu, i);
4163    }
4164}
4165
4166static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4167                         uint64_t value)
4168{
4169    ARMCPU *cpu = arm_env_get_cpu(env);
4170    int i = ri->crm;
4171
4172    /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4173     * register reads and behaves as if values written are sign extended.
4174     * Bits [1:0] are RES0.
4175     */
4176    value = sextract64(value, 0, 49) & ~3ULL;
4177
4178    raw_write(env, ri, value);
4179    hw_watchpoint_update(cpu, i);
4180}
4181
4182static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4183                         uint64_t value)
4184{
4185    ARMCPU *cpu = arm_env_get_cpu(env);
4186    int i = ri->crm;
4187
4188    raw_write(env, ri, value);
4189    hw_watchpoint_update(cpu, i);
4190}
4191
4192void hw_breakpoint_update(ARMCPU *cpu, int n)
4193{
4194    CPUARMState *env = &cpu->env;
4195    uint64_t bvr = env->cp15.dbgbvr[n];
4196    uint64_t bcr = env->cp15.dbgbcr[n];
4197    vaddr addr;
4198    int bt;
4199    int flags = BP_CPU;
4200
4201    if (env->cpu_breakpoint[n]) {
4202        cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4203        env->cpu_breakpoint[n] = NULL;
4204    }
4205
4206    if (!extract64(bcr, 0, 1)) {
4207        /* E bit clear : watchpoint disabled */
4208        return;
4209    }
4210
4211    bt = extract64(bcr, 20, 4);
4212
4213    switch (bt) {
4214    case 4: /* unlinked address mismatch (reserved if AArch64) */
4215    case 5: /* linked address mismatch (reserved if AArch64) */
4216        qemu_log_mask(LOG_UNIMP,
4217                      "arm: address mismatch breakpoint types not implemented");
4218        return;
4219    case 0: /* unlinked address match */
4220    case 1: /* linked address match */
4221    {
4222        /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4223         * we behave as if the register was sign extended. Bits [1:0] are
4224         * RES0. The BAS field is used to allow setting breakpoints on 16
4225         * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4226         * a bp will fire if the addresses covered by the bp and the addresses
4227         * covered by the insn overlap but the insn doesn't start at the
4228         * start of the bp address range. We choose to require the insn and
4229         * the bp to have the same address. The constraints on writing to
4230         * BAS enforced in dbgbcr_write mean we have only four cases:
4231         *  0b0000  => no breakpoint
4232         *  0b0011  => breakpoint on addr
4233         *  0b1100  => breakpoint on addr + 2
4234         *  0b1111  => breakpoint on addr
4235         * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4236         */
4237        int bas = extract64(bcr, 5, 4);
4238        addr = sextract64(bvr, 0, 49) & ~3ULL;
4239        if (bas == 0) {
4240            return;
4241        }
4242        if (bas == 0xc) {
4243            addr += 2;
4244        }
4245        break;
4246    }
4247    case 2: /* unlinked context ID match */
4248    case 8: /* unlinked VMID match (reserved if no EL2) */
4249    case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4250        qemu_log_mask(LOG_UNIMP,
4251                      "arm: unlinked context breakpoint types not implemented");
4252        return;
4253    case 9: /* linked VMID match (reserved if no EL2) */
4254    case 11: /* linked context ID and VMID match (reserved if no EL2) */
4255    case 3: /* linked context ID match */
4256    default:
4257        /* We must generate no events for Linked context matches (unless
4258         * they are linked to by some other bp/wp, which is handled in
4259         * updates for the linking bp/wp). We choose to also generate no events
4260         * for reserved values.
4261         */
4262        return;
4263    }
4264
4265    cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4266}
4267
4268void hw_breakpoint_update_all(ARMCPU *cpu)
4269{
4270    int i;
4271    CPUARMState *env = &cpu->env;
4272
4273    /* Completely clear out existing QEMU breakpoints and our array, to
4274     * avoid possible stale entries following migration load.
4275     */
4276    cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4277    memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4278
4279    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4280        hw_breakpoint_update(cpu, i);
4281    }
4282}
4283
4284static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4285                         uint64_t value)
4286{
4287    ARMCPU *cpu = arm_env_get_cpu(env);
4288    int i = ri->crm;
4289
4290    raw_write(env, ri, value);
4291    hw_breakpoint_update(cpu, i);
4292}
4293
4294static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4295                         uint64_t value)
4296{
4297    ARMCPU *cpu = arm_env_get_cpu(env);
4298    int i = ri->crm;
4299
4300    /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4301     * copy of BAS[0].
4302     */
4303    value = deposit64(value, 6, 1, extract64(value, 5, 1));
4304    value = deposit64(value, 8, 1, extract64(value, 7, 1));
4305
4306    raw_write(env, ri, value);
4307    hw_breakpoint_update(cpu, i);
4308}
4309
4310static void define_debug_regs(ARMCPU *cpu)
4311{
4312    /* Define v7 and v8 architectural debug registers.
4313     * These are just dummy implementations for now.
4314     */
4315    int i;
4316    int wrps, brps, ctx_cmps;
4317    ARMCPRegInfo dbgdidr = {
4318        .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4319        .access = PL0_R, .accessfn = access_tda,
4320        .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4321    };
4322
4323    /* Note that all these register fields hold "number of Xs minus 1". */
4324    brps = extract32(cpu->dbgdidr, 24, 4);
4325    wrps = extract32(cpu->dbgdidr, 28, 4);
4326    ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4327
4328    assert(ctx_cmps <= brps);
4329
4330    /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4331     * of the debug registers such as number of breakpoints;
4332     * check that if they both exist then they agree.
4333     */
4334    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4335        assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4336        assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4337        assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4338    }
4339
4340    define_one_arm_cp_reg(cpu, &dbgdidr);
4341    define_arm_cp_regs(cpu, debug_cp_reginfo);
4342
4343    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4344        define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4345    }
4346
4347    for (i = 0; i < brps + 1; i++) {
4348        ARMCPRegInfo dbgregs[] = {
4349            { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4350              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4351              .access = PL1_RW, .accessfn = access_tda,
4352              .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4353              .writefn = dbgbvr_write, .raw_writefn = raw_write
4354            },
4355            { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4356              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4357              .access = PL1_RW, .accessfn = access_tda,
4358              .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4359              .writefn = dbgbcr_write, .raw_writefn = raw_write
4360            },
4361            REGINFO_SENTINEL
4362        };
4363        define_arm_cp_regs(cpu, dbgregs);
4364    }
4365
4366    for (i = 0; i < wrps + 1; i++) {
4367        ARMCPRegInfo dbgregs[] = {
4368            { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4369              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4370              .access = PL1_RW, .accessfn = access_tda,
4371              .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4372              .writefn = dbgwvr_write, .raw_writefn = raw_write
4373            },
4374            { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4375              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4376              .access = PL1_RW, .accessfn = access_tda,
4377              .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4378              .writefn = dbgwcr_write, .raw_writefn = raw_write
4379            },
4380            REGINFO_SENTINEL
4381        };
4382        define_arm_cp_regs(cpu, dbgregs);
4383    }
4384}
4385
4386void register_cp_regs_for_features(ARMCPU *cpu)
4387{
4388    /* Register all the coprocessor registers based on feature bits */
4389    CPUARMState *env = &cpu->env;
4390    if (arm_feature(env, ARM_FEATURE_M)) {
4391        /* M profile has no coprocessor registers */
4392        return;
4393    }
4394
4395    define_arm_cp_regs(cpu, cp_reginfo);
4396    if (!arm_feature(env, ARM_FEATURE_V8)) {
4397        /* Must go early as it is full of wildcards that may be
4398         * overridden by later definitions.
4399         */
4400        define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4401    }
4402
4403    if (arm_feature(env, ARM_FEATURE_V6)) {
4404        /* The ID registers all have impdef reset values */
4405        ARMCPRegInfo v6_idregs[] = {
4406            { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4407              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4408              .access = PL1_R, .type = ARM_CP_CONST,
4409              .resetvalue = cpu->id_pfr0 },
4410            { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4411              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4412              .access = PL1_R, .type = ARM_CP_CONST,
4413              .resetvalue = cpu->id_pfr1 },
4414            { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4415              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4416              .access = PL1_R, .type = ARM_CP_CONST,
4417              .resetvalue = cpu->id_dfr0 },
4418            { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4419              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4420              .access = PL1_R, .type = ARM_CP_CONST,
4421              .resetvalue = cpu->id_afr0 },
4422            { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
4423              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
4424              .access = PL1_R, .type = ARM_CP_CONST,
4425              .resetvalue = cpu->id_mmfr0 },
4426            { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
4427              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
4428              .access = PL1_R, .type = ARM_CP_CONST,
4429              .resetvalue = cpu->id_mmfr1 },
4430            { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
4431              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
4432              .access = PL1_R, .type = ARM_CP_CONST,
4433              .resetvalue = cpu->id_mmfr2 },
4434            { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
4435              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
4436              .access = PL1_R, .type = ARM_CP_CONST,
4437              .resetvalue = cpu->id_mmfr3 },
4438            { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
4439              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4440              .access = PL1_R, .type = ARM_CP_CONST,
4441              .resetvalue = cpu->id_isar0 },
4442            { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
4443              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
4444              .access = PL1_R, .type = ARM_CP_CONST,
4445              .resetvalue = cpu->id_isar1 },
4446            { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
4447              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4448              .access = PL1_R, .type = ARM_CP_CONST,
4449              .resetvalue = cpu->id_isar2 },
4450            { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
4451              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
4452              .access = PL1_R, .type = ARM_CP_CONST,
4453              .resetvalue = cpu->id_isar3 },
4454            { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
4455              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
4456              .access = PL1_R, .type = ARM_CP_CONST,
4457              .resetvalue = cpu->id_isar4 },
4458            { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
4459              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
4460              .access = PL1_R, .type = ARM_CP_CONST,
4461              .resetvalue = cpu->id_isar5 },
4462            { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
4463              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
4464              .access = PL1_R, .type = ARM_CP_CONST,
4465              .resetvalue = cpu->id_mmfr4 },
4466            /* 7 is as yet unallocated and must RAZ */
4467            { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
4468              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
4469              .access = PL1_R, .type = ARM_CP_CONST,
4470              .resetvalue = 0 },
4471            REGINFO_SENTINEL
4472        };
4473        define_arm_cp_regs(cpu, v6_idregs);
4474        define_arm_cp_regs(cpu, v6_cp_reginfo);
4475    } else {
4476        define_arm_cp_regs(cpu, not_v6_cp_reginfo);
4477    }
4478    if (arm_feature(env, ARM_FEATURE_V6K)) {
4479        define_arm_cp_regs(cpu, v6k_cp_reginfo);
4480    }
4481    if (arm_feature(env, ARM_FEATURE_V7MP) &&
4482        !arm_feature(env, ARM_FEATURE_MPU)) {
4483        define_arm_cp_regs(cpu, v7mp_cp_reginfo);
4484    }
4485    if (arm_feature(env, ARM_FEATURE_V7)) {
4486        /* v7 performance monitor control register: same implementor
4487         * field as main ID register, and we implement only the cycle
4488         * count register.
4489         */
4490#ifndef CONFIG_USER_ONLY
4491        ARMCPRegInfo pmcr = {
4492            .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
4493            .access = PL0_RW,
4494            .type = ARM_CP_IO | ARM_CP_ALIAS,
4495            .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
4496            .accessfn = pmreg_access, .writefn = pmcr_write,
4497            .raw_writefn = raw_write,
4498        };
4499        ARMCPRegInfo pmcr64 = {
4500            .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
4501            .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
4502            .access = PL0_RW, .accessfn = pmreg_access,
4503            .type = ARM_CP_IO,
4504            .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
4505            .resetvalue = cpu->midr & 0xff000000,
4506            .writefn = pmcr_write, .raw_writefn = raw_write,
4507        };
4508        define_one_arm_cp_reg(cpu, &pmcr);
4509        define_one_arm_cp_reg(cpu, &pmcr64);
4510#endif
4511        ARMCPRegInfo clidr = {
4512            .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
4513            .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
4514            .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
4515        };
4516        define_one_arm_cp_reg(cpu, &clidr);
4517        define_arm_cp_regs(cpu, v7_cp_reginfo);
4518        define_debug_regs(cpu);
4519    } else {
4520        define_arm_cp_regs(cpu, not_v7_cp_reginfo);
4521    }
4522    if (arm_feature(env, ARM_FEATURE_V8)) {
4523        /* AArch64 ID registers, which all have impdef reset values.
4524         * Note that within the ID register ranges the unused slots
4525         * must all RAZ, not UNDEF; future architecture versions may
4526         * define new registers here.
4527         */
4528        ARMCPRegInfo v8_idregs[] = {
4529            { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
4530              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
4531              .access = PL1_R, .type = ARM_CP_CONST,
4532              .resetvalue = cpu->id_aa64pfr0 },
4533            { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
4534              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
4535              .access = PL1_R, .type = ARM_CP_CONST,
4536              .resetvalue = cpu->id_aa64pfr1},
4537            { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4538              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
4539              .access = PL1_R, .type = ARM_CP_CONST,
4540              .resetvalue = 0 },
4541            { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4542              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
4543              .access = PL1_R, .type = ARM_CP_CONST,
4544              .resetvalue = 0 },
4545            { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4546              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
4547              .access = PL1_R, .type = ARM_CP_CONST,
4548              .resetvalue = 0 },
4549            { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4550              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
4551              .access = PL1_R, .type = ARM_CP_CONST,
4552              .resetvalue = 0 },
4553            { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4554              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
4555              .access = PL1_R, .type = ARM_CP_CONST,
4556              .resetvalue = 0 },
4557            { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4558              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
4559              .access = PL1_R, .type = ARM_CP_CONST,
4560              .resetvalue = 0 },
4561            { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
4562              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
4563              .access = PL1_R, .type = ARM_CP_CONST,
4564              /* We mask out the PMUVer field, because we don't currently
4565               * implement the PMU. Not advertising it prevents the guest
4566               * from trying to use it and getting UNDEFs on registers we
4567               * don't implement.
4568               */
4569              .resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
4570            { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
4571              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
4572              .access = PL1_R, .type = ARM_CP_CONST,
4573              .resetvalue = cpu->id_aa64dfr1 },
4574            { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4575              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
4576              .access = PL1_R, .type = ARM_CP_CONST,
4577              .resetvalue = 0 },
4578            { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4579              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
4580              .access = PL1_R, .type = ARM_CP_CONST,
4581              .resetvalue = 0 },
4582            { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
4583              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
4584              .access = PL1_R, .type = ARM_CP_CONST,
4585              .resetvalue = cpu->id_aa64afr0 },
4586            { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
4587              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
4588              .access = PL1_R, .type = ARM_CP_CONST,
4589              .resetvalue = cpu->id_aa64afr1 },
4590            { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4591              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
4592              .access = PL1_R, .type = ARM_CP_CONST,
4593              .resetvalue = 0 },
4594            { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4595              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
4596              .access = PL1_R, .type = ARM_CP_CONST,
4597              .resetvalue = 0 },
4598            { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
4599              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
4600              .access = PL1_R, .type = ARM_CP_CONST,
4601              .resetvalue = cpu->id_aa64isar0 },
4602            { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
4603              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
4604              .access = PL1_R, .type = ARM_CP_CONST,
4605              .resetvalue = cpu->id_aa64isar1 },
4606            { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4607              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
4608              .access = PL1_R, .type = ARM_CP_CONST,
4609              .resetvalue = 0 },
4610            { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4611              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
4612              .access = PL1_R, .type = ARM_CP_CONST,
4613              .resetvalue = 0 },
4614            { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4615              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
4616              .access = PL1_R, .type = ARM_CP_CONST,
4617              .resetvalue = 0 },
4618            { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4619              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
4620              .access = PL1_R, .type = ARM_CP_CONST,
4621              .resetvalue = 0 },
4622            { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4623              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
4624              .access = PL1_R, .type = ARM_CP_CONST,
4625              .resetvalue = 0 },
4626            { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4627              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
4628              .access = PL1_R, .type = ARM_CP_CONST,
4629              .resetvalue = 0 },
4630            { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
4631              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4632              .access = PL1_R, .type = ARM_CP_CONST,
4633              .resetvalue = cpu->id_aa64mmfr0 },
4634            { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
4635              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
4636              .access = PL1_R, .type = ARM_CP_CONST,
4637              .resetvalue = cpu->id_aa64mmfr1 },
4638            { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4639              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
4640              .access = PL1_R, .type = ARM_CP_CONST,
4641              .resetvalue = 0 },
4642            { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4643              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
4644              .access = PL1_R, .type = ARM_CP_CONST,
4645              .resetvalue = 0 },
4646            { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4647              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
4648              .access = PL1_R, .type = ARM_CP_CONST,
4649              .resetvalue = 0 },
4650            { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4651              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
4652              .access = PL1_R, .type = ARM_CP_CONST,
4653              .resetvalue = 0 },
4654            { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4655              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
4656              .access = PL1_R, .type = ARM_CP_CONST,
4657              .resetvalue = 0 },
4658            { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4659              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
4660              .access = PL1_R, .type = ARM_CP_CONST,
4661              .resetvalue = 0 },
4662            { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
4663              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
4664              .access = PL1_R, .type = ARM_CP_CONST,
4665              .resetvalue = cpu->mvfr0 },
4666            { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
4667              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
4668              .access = PL1_R, .type = ARM_CP_CONST,
4669              .resetvalue = cpu->mvfr1 },
4670            { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
4671              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
4672              .access = PL1_R, .type = ARM_CP_CONST,
4673              .resetvalue = cpu->mvfr2 },
4674            { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4675              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
4676              .access = PL1_R, .type = ARM_CP_CONST,
4677              .resetvalue = 0 },
4678            { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4679              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
4680              .access = PL1_R, .type = ARM_CP_CONST,
4681              .resetvalue = 0 },
4682            { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4683              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
4684              .access = PL1_R, .type = ARM_CP_CONST,
4685              .resetvalue = 0 },
4686            { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4687              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
4688              .access = PL1_R, .type = ARM_CP_CONST,
4689              .resetvalue = 0 },
4690            { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4691              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
4692              .access = PL1_R, .type = ARM_CP_CONST,
4693              .resetvalue = 0 },
4694            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
4695              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
4696              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4697              .resetvalue = cpu->pmceid0 },
4698            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
4699              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
4700              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4701              .resetvalue = cpu->pmceid0 },
4702            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
4703              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
4704              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4705              .resetvalue = cpu->pmceid1 },
4706            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
4707              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
4708              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4709              .resetvalue = cpu->pmceid1 },
4710            REGINFO_SENTINEL
4711        };
4712        /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
4713        if (!arm_feature(env, ARM_FEATURE_EL3) &&
4714            !arm_feature(env, ARM_FEATURE_EL2)) {
4715            ARMCPRegInfo rvbar = {
4716                .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
4717                .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4718                .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
4719            };
4720            define_one_arm_cp_reg(cpu, &rvbar);
4721        }
4722        define_arm_cp_regs(cpu, v8_idregs);
4723        define_arm_cp_regs(cpu, v8_cp_reginfo);
4724    }
4725    if (arm_feature(env, ARM_FEATURE_EL2)) {
4726        uint64_t vmpidr_def = mpidr_read_val(env);
4727        ARMCPRegInfo vpidr_regs[] = {
4728            { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
4729              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4730              .access = PL2_RW, .accessfn = access_el3_aa32ns,
4731              .resetvalue = cpu->midr,
4732              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4733            { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
4734              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4735              .access = PL2_RW, .resetvalue = cpu->midr,
4736              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4737            { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
4738              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4739              .access = PL2_RW, .accessfn = access_el3_aa32ns,
4740              .resetvalue = vmpidr_def,
4741              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4742            { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
4743              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4744              .access = PL2_RW,
4745              .resetvalue = vmpidr_def,
4746              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4747            REGINFO_SENTINEL
4748        };
4749        define_arm_cp_regs(cpu, vpidr_regs);
4750        define_arm_cp_regs(cpu, el2_cp_reginfo);
4751        /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
4752        if (!arm_feature(env, ARM_FEATURE_EL3)) {
4753            ARMCPRegInfo rvbar = {
4754                .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
4755                .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
4756                .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
4757            };
4758            define_one_arm_cp_reg(cpu, &rvbar);
4759        }
4760    } else {
4761        /* If EL2 is missing but higher ELs are enabled, we need to
4762         * register the no_el2 reginfos.
4763         */
4764        if (arm_feature(env, ARM_FEATURE_EL3)) {
4765            /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
4766             * of MIDR_EL1 and MPIDR_EL1.
4767             */
4768            ARMCPRegInfo vpidr_regs[] = {
4769                { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4770                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4771                  .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4772                  .type = ARM_CP_CONST, .resetvalue = cpu->midr,
4773                  .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4774                { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4775                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4776                  .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4777                  .type = ARM_CP_NO_RAW,
4778                  .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
4779                REGINFO_SENTINEL
4780            };
4781            define_arm_cp_regs(cpu, vpidr_regs);
4782            define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
4783        }
4784    }
4785    if (arm_feature(env, ARM_FEATURE_EL3)) {
4786        define_arm_cp_regs(cpu, el3_cp_reginfo);
4787        ARMCPRegInfo el3_regs[] = {
4788            { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
4789              .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
4790              .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
4791            { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
4792              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
4793              .access = PL3_RW,
4794              .raw_writefn = raw_write, .writefn = sctlr_write,
4795              .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
4796              .resetvalue = cpu->reset_sctlr },
4797            REGINFO_SENTINEL
4798        };
4799
4800        define_arm_cp_regs(cpu, el3_regs);
4801    }
4802    /* The behaviour of NSACR is sufficiently various that we don't
4803     * try to describe it in a single reginfo:
4804     *  if EL3 is 64 bit, then trap to EL3 from S EL1,
4805     *     reads as constant 0xc00 from NS EL1 and NS EL2
4806     *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
4807     *  if v7 without EL3, register doesn't exist
4808     *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
4809     */
4810    if (arm_feature(env, ARM_FEATURE_EL3)) {
4811        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
4812            ARMCPRegInfo nsacr = {
4813                .name = "NSACR", .type = ARM_CP_CONST,
4814                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4815                .access = PL1_RW, .accessfn = nsacr_access,
4816                .resetvalue = 0xc00
4817            };
4818            define_one_arm_cp_reg(cpu, &nsacr);
4819        } else {
4820            ARMCPRegInfo nsacr = {
4821                .name = "NSACR",
4822                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4823                .access = PL3_RW | PL1_R,
4824                .resetvalue = 0,
4825                .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
4826            };
4827            define_one_arm_cp_reg(cpu, &nsacr);
4828        }
4829    } else {
4830        if (arm_feature(env, ARM_FEATURE_V8)) {
4831            ARMCPRegInfo nsacr = {
4832                .name = "NSACR", .type = ARM_CP_CONST,
4833                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4834                .access = PL1_R,
4835                .resetvalue = 0xc00
4836            };
4837            define_one_arm_cp_reg(cpu, &nsacr);
4838        }
4839    }
4840
4841    if (arm_feature(env, ARM_FEATURE_MPU)) {
4842        if (arm_feature(env, ARM_FEATURE_V6)) {
4843            /* PMSAv6 not implemented */
4844            assert(arm_feature(env, ARM_FEATURE_V7));
4845            define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4846            define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
4847        } else {
4848            define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
4849        }
4850    } else {
4851        define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4852        define_arm_cp_regs(cpu, vmsa_cp_reginfo);
4853    }
4854    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
4855        define_arm_cp_regs(cpu, t2ee_cp_reginfo);
4856    }
4857    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
4858        define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
4859    }
4860    if (arm_feature(env, ARM_FEATURE_VAPA)) {
4861        define_arm_cp_regs(cpu, vapa_cp_reginfo);
4862    }
4863    if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
4864        define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
4865    }
4866    if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
4867        define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
4868    }
4869    if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
4870        define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
4871    }
4872    if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
4873        define_arm_cp_regs(cpu, omap_cp_reginfo);
4874    }
4875    if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
4876        define_arm_cp_regs(cpu, strongarm_cp_reginfo);
4877    }
4878    if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4879        define_arm_cp_regs(cpu, xscale_cp_reginfo);
4880    }
4881    if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
4882        define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
4883    }
4884    if (arm_feature(env, ARM_FEATURE_LPAE)) {
4885        define_arm_cp_regs(cpu, lpae_cp_reginfo);
4886    }
4887    /* Slightly awkwardly, the OMAP and StrongARM cores need all of
4888     * cp15 crn=0 to be writes-ignored, whereas for other cores they should
4889     * be read-only (ie write causes UNDEF exception).
4890     */
4891    {
4892        ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
4893            /* Pre-v8 MIDR space.
4894             * Note that the MIDR isn't a simple constant register because
4895             * of the TI925 behaviour where writes to another register can
4896             * cause the MIDR value to change.
4897             *
4898             * Unimplemented registers in the c15 0 0 0 space default to
4899             * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
4900             * and friends override accordingly.
4901             */
4902            { .name = "MIDR",
4903              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
4904              .access = PL1_R, .resetvalue = cpu->midr,
4905              .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
4906              .readfn = midr_read,
4907              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
4908              .type = ARM_CP_OVERRIDE },
4909            /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
4910            { .name = "DUMMY",
4911              .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
4912              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4913            { .name = "DUMMY",
4914              .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
4915              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4916            { .name = "DUMMY",
4917              .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
4918              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4919            { .name = "DUMMY",
4920              .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
4921              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4922            { .name = "DUMMY",
4923              .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
4924              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4925            REGINFO_SENTINEL
4926        };
4927        ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
4928            { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
4929              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
4930              .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
4931              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
4932              .readfn = midr_read },
4933            /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
4934            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
4935              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
4936              .access = PL1_R, .resetvalue = cpu->midr },
4937            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
4938              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
4939              .access = PL1_R, .resetvalue = cpu->midr },
4940            { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
4941              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
4942              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
4943            REGINFO_SENTINEL
4944        };
4945        ARMCPRegInfo id_cp_reginfo[] = {
4946            /* These are common to v8 and pre-v8 */
4947            { .name = "CTR",
4948              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
4949              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
4950            { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
4951              .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
4952              .access = PL0_R, .accessfn = ctr_el0_access,
4953              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
4954            /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
4955            { .name = "TCMTR",
4956              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
4957              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4958            REGINFO_SENTINEL
4959        };
4960        /* TLBTR is specific to VMSA */
4961        ARMCPRegInfo id_tlbtr_reginfo = {
4962              .name = "TLBTR",
4963              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
4964              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
4965        };
4966        /* MPUIR is specific to PMSA V6+ */
4967        ARMCPRegInfo id_mpuir_reginfo = {
4968              .name = "MPUIR",
4969              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
4970              .access = PL1_R, .type = ARM_CP_CONST,
4971              .resetvalue = cpu->pmsav7_dregion << 8
4972        };
4973        ARMCPRegInfo crn0_wi_reginfo = {
4974            .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
4975            .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
4976            .type = ARM_CP_NOP | ARM_CP_OVERRIDE
4977        };
4978        if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
4979            arm_feature(env, ARM_FEATURE_STRONGARM)) {
4980            ARMCPRegInfo *r;
4981            /* Register the blanket "writes ignored" value first to cover the
4982             * whole space. Then update the specific ID registers to allow write
4983             * access, so that they ignore writes rather than causing them to
4984             * UNDEF.
4985             */
4986            define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
4987            for (r = id_pre_v8_midr_cp_reginfo;
4988                 r->type != ARM_CP_SENTINEL; r++) {
4989                r->access = PL1_RW;
4990            }
4991            for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
4992                r->access = PL1_RW;
4993            }
4994            id_tlbtr_reginfo.access = PL1_RW;
4995            id_tlbtr_reginfo.access = PL1_RW;
4996        }
4997        if (arm_feature(env, ARM_FEATURE_V8)) {
4998            define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
4999        } else {
5000            define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5001        }
5002        define_arm_cp_regs(cpu, id_cp_reginfo);
5003        if (!arm_feature(env, ARM_FEATURE_MPU)) {
5004            define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5005        } else if (arm_feature(env, ARM_FEATURE_V7)) {
5006            define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5007        }
5008    }
5009
5010    if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5011        define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5012    }
5013
5014    if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5015        ARMCPRegInfo auxcr_reginfo[] = {
5016            { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5017              .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5018              .access = PL1_RW, .type = ARM_CP_CONST,
5019              .resetvalue = cpu->reset_auxcr },
5020            { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5021              .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5022              .access = PL2_RW, .type = ARM_CP_CONST,
5023              .resetvalue = 0 },
5024            { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5025              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5026              .access = PL3_RW, .type = ARM_CP_CONST,
5027              .resetvalue = 0 },
5028            REGINFO_SENTINEL
5029        };
5030        define_arm_cp_regs(cpu, auxcr_reginfo);
5031    }
5032
5033    if (arm_feature(env, ARM_FEATURE_CBAR)) {
5034        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5035            /* 32 bit view is [31:18] 0...0 [43:32]. */
5036            uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5037                | extract64(cpu->reset_cbar, 32, 12);
5038            ARMCPRegInfo cbar_reginfo[] = {
5039                { .name = "CBAR",
5040                  .type = ARM_CP_CONST,
5041                  .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5042                  .access = PL1_R, .resetvalue = cpu->reset_cbar },
5043                { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5044                  .type = ARM_CP_CONST,
5045                  .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5046                  .access = PL1_R, .resetvalue = cbar32 },
5047                REGINFO_SENTINEL
5048            };
5049            /* We don't implement a r/w 64 bit CBAR currently */
5050            assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5051            define_arm_cp_regs(cpu, cbar_reginfo);
5052        } else {
5053            ARMCPRegInfo cbar = {
5054                .name = "CBAR",
5055                .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5056                .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5057                .fieldoffset = offsetof(CPUARMState,
5058                                        cp15.c15_config_base_address)
5059            };
5060            if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5061                cbar.access = PL1_R;
5062                cbar.fieldoffset = 0;
5063                cbar.type = ARM_CP_CONST;
5064            }
5065            define_one_arm_cp_reg(cpu, &cbar);
5066        }
5067    }
5068
5069    /* Generic registers whose values depend on the implementation */
5070    {
5071        ARMCPRegInfo sctlr = {
5072            .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5073            .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5074            .access = PL1_RW,
5075            .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5076                                   offsetof(CPUARMState, cp15.sctlr_ns) },
5077            .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5078            .raw_writefn = raw_write,
5079        };
5080        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5081            /* Normally we would always end the TB on an SCTLR write, but Linux
5082             * arch/arm/mach-pxa/sleep.S expects two instructions following
5083             * an MMU enable to execute from cache.  Imitate this behaviour.
5084             */
5085            sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5086        }
5087        define_one_arm_cp_reg(cpu, &sctlr);
5088    }
5089}
5090
5091ARMCPU *cpu_arm_init(const char *cpu_model)
5092{
5093    return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
5094}
5095
5096void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5097{
5098    CPUState *cs = CPU(cpu);
5099    CPUARMState *env = &cpu->env;
5100
5101    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5102        gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5103                                 aarch64_fpu_gdb_set_reg,
5104                                 34, "aarch64-fpu.xml", 0);
5105    } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5106        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5107                                 51, "arm-neon.xml", 0);
5108    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5109        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5110                                 35, "arm-vfp3.xml", 0);
5111    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5112        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5113                                 19, "arm-vfp.xml", 0);
5114    }
5115
5116    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5117        gdb_register_coprocessor(cs, aarch64_el1_gdb_get_reg,
5118                                 aarch64_el1_gdb_set_reg,
5119                                 5, "aarch64-el1.xml", 0);
5120        if (arm_feature(env, ARM_FEATURE_EL2)) {
5121            gdb_register_coprocessor(cs, aarch64_el2_gdb_get_reg,
5122                                     aarch64_el2_gdb_set_reg,
5123                                     4, "aarch64-el2.xml", 0);
5124        }
5125
5126        if (arm_feature(env, ARM_FEATURE_EL3)) {
5127            gdb_register_coprocessor(cs, aarch64_el3_gdb_get_reg,
5128                                     aarch64_el3_gdb_set_reg,
5129                                     4, "aarch64-el3.xml", 0);
5130        }
5131    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
5132        gdb_register_coprocessor(cs, arm_sys_gdb_get_reg,
5133                                 arm_sys_gdb_set_reg,
5134                                 3, "arm-sys.xml", 0);
5135    }
5136}
5137
5138/* Sort alphabetically by type name, except for "any". */
5139static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5140{
5141    ObjectClass *class_a = (ObjectClass *)a;
5142    ObjectClass *class_b = (ObjectClass *)b;
5143    const char *name_a, *name_b;
5144
5145    name_a = object_class_get_name(class_a);
5146    name_b = object_class_get_name(class_b);
5147    if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5148        return 1;
5149    } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5150        return -1;
5151    } else {
5152        return strcmp(name_a, name_b);
5153    }
5154}
5155
5156static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5157{
5158    ObjectClass *oc = data;
5159    CPUListState *s = user_data;
5160    const char *typename;
5161    char *name;
5162
5163    typename = object_class_get_name(oc);
5164    name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5165    (*s->cpu_fprintf)(s->file, "  %s\n",
5166                      name);
5167    g_free(name);
5168}
5169
5170void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5171{
5172    CPUListState s = {
5173        .file = f,
5174        .cpu_fprintf = cpu_fprintf,
5175    };
5176    GSList *list;
5177
5178    list = object_class_get_list(TYPE_ARM_CPU, false);
5179    list = g_slist_sort(list, arm_cpu_list_compare);
5180    (*cpu_fprintf)(f, "Available CPUs:\n");
5181    g_slist_foreach(list, arm_cpu_list_entry, &s);
5182    g_slist_free(list);
5183#ifdef CONFIG_KVM
5184    /* The 'host' CPU type is dynamically registered only if KVM is
5185     * enabled, so we have to special-case it here:
5186     */
5187    (*cpu_fprintf)(f, "  host (only available in KVM mode)\n");
5188#endif
5189}
5190
5191static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5192{
5193    ObjectClass *oc = data;
5194    CpuDefinitionInfoList **cpu_list = user_data;
5195    CpuDefinitionInfoList *entry;
5196    CpuDefinitionInfo *info;
5197    const char *typename;
5198
5199    typename = object_class_get_name(oc);
5200    info = g_malloc0(sizeof(*info));
5201    info->name = g_strndup(typename,
5202                           strlen(typename) - strlen("-" TYPE_ARM_CPU));
5203
5204    entry = g_malloc0(sizeof(*entry));
5205    entry->value = info;
5206    entry->next = *cpu_list;
5207    *cpu_list = entry;
5208}
5209
5210CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5211{
5212    CpuDefinitionInfoList *cpu_list = NULL;
5213    GSList *list;
5214
5215    list = object_class_get_list(TYPE_ARM_CPU, false);
5216    g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5217    g_slist_free(list);
5218
5219    return cpu_list;
5220}
5221
5222static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5223                                   void *opaque, int state, int secstate,
5224                                   int crm, int opc1, int opc2)
5225{
5226    /* Private utility function for define_one_arm_cp_reg_with_opaque():
5227     * add a single reginfo struct to the hash table.
5228     */
5229    uint32_t *key = g_new(uint32_t, 1);
5230    ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5231    int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5232    int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5233
5234    /* Reset the secure state to the specific incoming state.  This is
5235     * necessary as the register may have been defined with both states.
5236     */
5237    r2->secure = secstate;
5238
5239    if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5240        /* Register is banked (using both entries in array).
5241         * Overwriting fieldoffset as the array is only used to define
5242         * banked registers but later only fieldoffset is used.
5243         */
5244        r2->fieldoffset = r->bank_fieldoffsets[ns];
5245    }
5246
5247    if (state == ARM_CP_STATE_AA32) {
5248        if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5249            /* If the register is banked then we don't need to migrate or
5250             * reset the 32-bit instance in certain cases:
5251             *
5252             * 1) If the register has both 32-bit and 64-bit instances then we
5253             *    can count on the 64-bit instance taking care of the
5254             *    non-secure bank.
5255             * 2) If ARMv8 is enabled then we can count on a 64-bit version
5256             *    taking care of the secure bank.  This requires that separate
5257             *    32 and 64-bit definitions are provided.
5258             */
5259            if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5260                (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5261                r2->type |= ARM_CP_ALIAS;
5262            }
5263        } else if ((secstate != r->secure) && !ns) {
5264            /* The register is not banked so we only want to allow migration of
5265             * the non-secure instance.
5266             */
5267            r2->type |= ARM_CP_ALIAS;
5268        }
5269
5270        if (r->state == ARM_CP_STATE_BOTH) {
5271            /* We assume it is a cp15 register if the .cp field is left unset.
5272             */
5273            if (r2->cp == 0) {
5274                r2->cp = 15;
5275            }
5276
5277#ifdef HOST_WORDS_BIGENDIAN
5278            if (r2->fieldoffset) {
5279                r2->fieldoffset += sizeof(uint32_t);
5280            }
5281#endif
5282        }
5283    }
5284    if (state == ARM_CP_STATE_AA64) {
5285        /* To allow abbreviation of ARMCPRegInfo
5286         * definitions, we treat cp == 0 as equivalent to
5287         * the value for "standard guest-visible sysreg".
5288         * STATE_BOTH definitions are also always "standard
5289         * sysreg" in their AArch64 view (the .cp value may
5290         * be non-zero for the benefit of the AArch32 view).
5291         */
5292        if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5293            r2->cp = CP_REG_ARM64_SYSREG_CP;
5294        }
5295        *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5296                                  r2->opc0, opc1, opc2);
5297    } else {
5298        *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5299    }
5300    if (opaque) {
5301        r2->opaque = opaque;
5302    }
5303    /* reginfo passed to helpers is correct for the actual access,
5304     * and is never ARM_CP_STATE_BOTH:
5305     */
5306    r2->state = state;
5307    /* Make sure reginfo passed to helpers for wildcarded regs
5308     * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5309     */
5310    r2->crm = crm;
5311    r2->opc1 = opc1;
5312    r2->opc2 = opc2;
5313    /* By convention, for wildcarded registers only the first
5314     * entry is used for migration; the others are marked as
5315     * ALIAS so we don't try to transfer the register
5316     * multiple times. Special registers (ie NOP/WFI) are
5317     * never migratable and not even raw-accessible.
5318     */
5319    if ((r->type & ARM_CP_SPECIAL)) {
5320        r2->type |= ARM_CP_NO_RAW;
5321    }
5322    if (((r->crm == CP_ANY) && crm != 0) ||
5323        ((r->opc1 == CP_ANY) && opc1 != 0) ||
5324        ((r->opc2 == CP_ANY) && opc2 != 0)) {
5325        r2->type |= ARM_CP_ALIAS;
5326    }
5327
5328    /* Check that raw accesses are either forbidden or handled. Note that
5329     * we can't assert this earlier because the setup of fieldoffset for
5330     * banked registers has to be done first.
5331     */
5332    if (!(r2->type & ARM_CP_NO_RAW)) {
5333        assert(!raw_accessors_invalid(r2));
5334    }
5335
5336    /* Overriding of an existing definition must be explicitly
5337     * requested.
5338     */
5339    if (!(r->type & ARM_CP_OVERRIDE)) {
5340        ARMCPRegInfo *oldreg;
5341        oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5342        if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5343            fprintf(stderr, "Register redefined: cp=%d %d bit "
5344                    "crn=%d crm=%d opc1=%d opc2=%d, "
5345                    "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5346                    r2->crn, r2->crm, r2->opc1, r2->opc2,
5347                    oldreg->name, r2->name);
5348            g_assert_not_reached();
5349        }
5350    }
5351    g_hash_table_insert(cpu->cp_regs, key, r2);
5352}
5353
5354
5355void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5356                                       const ARMCPRegInfo *r, void *opaque)
5357{
5358    /* Define implementations of coprocessor registers.
5359     * We store these in a hashtable because typically
5360     * there are less than 150 registers in a space which
5361     * is 16*16*16*8*8 = 262144 in size.
5362     * Wildcarding is supported for the crm, opc1 and opc2 fields.
5363     * If a register is defined twice then the second definition is
5364     * used, so this can be used to define some generic registers and
5365     * then override them with implementation specific variations.
5366     * At least one of the original and the second definition should
5367     * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5368     * against accidental use.
5369     *
5370     * The state field defines whether the register is to be
5371     * visible in the AArch32 or AArch64 execution state. If the
5372     * state is set to ARM_CP_STATE_BOTH then we synthesise a
5373     * reginfo structure for the AArch32 view, which sees the lower
5374     * 32 bits of the 64 bit register.
5375     *
5376     * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5377     * be wildcarded. AArch64 registers are always considered to be 64
5378     * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5379     * the register, if any.
5380     */
5381    int crm, opc1, opc2, state;
5382    int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5383    int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5384    int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5385    int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5386    int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5387    int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5388    /* 64 bit registers have only CRm and Opc1 fields */
5389    assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5390    /* op0 only exists in the AArch64 encodings */
5391    assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5392    /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5393    assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5394    /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5395     * encodes a minimum access level for the register. We roll this
5396     * runtime check into our general permission check code, so check
5397     * here that the reginfo's specified permissions are strict enough
5398     * to encompass the generic architectural permission check.
5399     */
5400    if (r->state != ARM_CP_STATE_AA32) {
5401        int mask = 0;
5402        switch (r->opc1) {
5403        case 0: case 1: case 2:
5404            /* min_EL EL1 */
5405            mask = PL1_RW;
5406            break;
5407        case 3:
5408            /* min_EL EL0 */
5409            mask = PL0_RW;
5410            break;
5411        case 4:
5412            /* min_EL EL2 */
5413            mask = PL2_RW;
5414            break;
5415        case 5:
5416            /* unallocated encoding, so not possible */
5417            assert(false);
5418            break;
5419        case 6:
5420            /* min_EL EL3 */
5421            mask = PL3_RW;
5422            break;
5423        case 7:
5424            /* min_EL EL1, secure mode only (we don't check the latter) */
5425            mask = PL1_RW;
5426            break;
5427        default:
5428            /* broken reginfo with out-of-range opc1 */
5429            assert(false);
5430            break;
5431        }
5432        /* assert our permissions are not too lax (stricter is fine) */
5433        assert((r->access & ~mask) == 0);
5434    }
5435
5436    /* Check that the register definition has enough info to handle
5437     * reads and writes if they are permitted.
5438     */
5439    if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
5440        if (r->access & PL3_R) {
5441            assert((r->fieldoffset ||
5442                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5443                   r->readfn);
5444        }
5445        if (r->access & PL3_W) {
5446            assert((r->fieldoffset ||
5447                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5448                   r->writefn);
5449        }
5450    }
5451    /* Bad type field probably means missing sentinel at end of reg list */
5452    assert(cptype_valid(r->type));
5453    for (crm = crmmin; crm <= crmmax; crm++) {
5454        for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
5455            for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
5456                for (state = ARM_CP_STATE_AA32;
5457                     state <= ARM_CP_STATE_AA64; state++) {
5458                    if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
5459                        continue;
5460                    }
5461                    if (state == ARM_CP_STATE_AA32) {
5462                        /* Under AArch32 CP registers can be common
5463                         * (same for secure and non-secure world) or banked.
5464                         */
5465                        switch (r->secure) {
5466                        case ARM_CP_SECSTATE_S:
5467                        case ARM_CP_SECSTATE_NS:
5468                            add_cpreg_to_hashtable(cpu, r, opaque, state,
5469                                                   r->secure, crm, opc1, opc2);
5470                            break;
5471                        default:
5472                            add_cpreg_to_hashtable(cpu, r, opaque, state,
5473                                                   ARM_CP_SECSTATE_S,
5474                                                   crm, opc1, opc2);
5475                            add_cpreg_to_hashtable(cpu, r, opaque, state,
5476                                                   ARM_CP_SECSTATE_NS,
5477                                                   crm, opc1, opc2);
5478                            break;
5479                        }
5480                    } else {
5481                        /* AArch64 registers get mapped to non-secure instance
5482                         * of AArch32 */
5483                        add_cpreg_to_hashtable(cpu, r, opaque, state,
5484                                               ARM_CP_SECSTATE_NS,
5485                                               crm, opc1, opc2);
5486                    }
5487                }
5488            }
5489        }
5490    }
5491}
5492
5493void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
5494                                    const ARMCPRegInfo *regs, void *opaque)
5495{
5496    /* Define a whole list of registers */
5497    const ARMCPRegInfo *r;
5498    for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
5499        define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
5500    }
5501}
5502
5503const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
5504{
5505    return g_hash_table_lookup(cpregs, &encoded_cp);
5506}
5507
5508void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
5509                         uint64_t value)
5510{
5511    /* Helper coprocessor write function for write-ignore registers */
5512}
5513
5514uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
5515{
5516    /* Helper coprocessor write function for read-as-zero registers */
5517    return 0;
5518}
5519
5520void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
5521{
5522    /* Helper coprocessor reset function for do-nothing-on-reset registers */
5523}
5524
5525static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
5526{
5527    /* Return true if it is not valid for us to switch to
5528     * this CPU mode (ie all the UNPREDICTABLE cases in
5529     * the ARM ARM CPSRWriteByInstr pseudocode).
5530     */
5531
5532    /* Changes to or from Hyp via MSR and CPS are illegal. */
5533    if (write_type == CPSRWriteByInstr &&
5534        ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
5535         mode == ARM_CPU_MODE_HYP)) {
5536        return 1;
5537    }
5538
5539    switch (mode) {
5540    case ARM_CPU_MODE_USR:
5541        return 0;
5542    case ARM_CPU_MODE_SYS:
5543    case ARM_CPU_MODE_SVC:
5544    case ARM_CPU_MODE_ABT:
5545    case ARM_CPU_MODE_UND:
5546    case ARM_CPU_MODE_IRQ:
5547    case ARM_CPU_MODE_FIQ:
5548        /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5549         * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5550         */
5551        /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5552         * and CPS are treated as illegal mode changes.
5553         */
5554        if (write_type == CPSRWriteByInstr &&
5555            (env->cp15.hcr_el2 & HCR_TGE) &&
5556            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
5557            !arm_is_secure_below_el3(env)) {
5558            return 1;
5559        }
5560        return 0;
5561    case ARM_CPU_MODE_HYP:
5562        return !arm_feature(env, ARM_FEATURE_EL2)
5563            || arm_current_el(env) < 2 || arm_is_secure(env);
5564    case ARM_CPU_MODE_MON:
5565        return arm_current_el(env) < 3;
5566    default:
5567        return 1;
5568    }
5569}
5570
5571uint32_t cpsr_read(CPUARMState *env)
5572{
5573    int ZF;
5574    ZF = (env->ZF == 0);
5575    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
5576        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
5577        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
5578        | ((env->condexec_bits & 0xfc) << 8)
5579        | (env->GE << 16) | (env->daif & CPSR_AIF);
5580}
5581
5582void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
5583                CPSRWriteType write_type)
5584{
5585    uint32_t changed_daif;
5586
5587    if (mask & CPSR_NZCV) {
5588        env->ZF = (~val) & CPSR_Z;
5589        env->NF = val;
5590        env->CF = (val >> 29) & 1;
5591        env->VF = (val << 3) & 0x80000000;
5592    }
5593    if (mask & CPSR_Q)
5594        env->QF = ((val & CPSR_Q) != 0);
5595    if (mask & CPSR_T)
5596        env->thumb = ((val & CPSR_T) != 0);
5597    if (mask & CPSR_IT_0_1) {
5598        env->condexec_bits &= ~3;
5599        env->condexec_bits |= (val >> 25) & 3;
5600    }
5601    if (mask & CPSR_IT_2_7) {
5602        env->condexec_bits &= 3;
5603        env->condexec_bits |= (val >> 8) & 0xfc;
5604    }
5605    if (mask & CPSR_GE) {
5606        env->GE = (val >> 16) & 0xf;
5607    }
5608
5609    /* In a V7 implementation that includes the security extensions but does
5610     * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5611     * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5612     * bits respectively.
5613     *
5614     * In a V8 implementation, it is permitted for privileged software to
5615     * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5616     */
5617    if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
5618        arm_feature(env, ARM_FEATURE_EL3) &&
5619        !arm_feature(env, ARM_FEATURE_EL2) &&
5620        !arm_is_secure(env)) {
5621
5622        changed_daif = (env->daif ^ val) & mask;
5623
5624        if (changed_daif & CPSR_A) {
5625            /* Check to see if we are allowed to change the masking of async
5626             * abort exceptions from a non-secure state.
5627             */
5628            if (!(env->cp15.scr_el3 & SCR_AW)) {
5629                qemu_log_mask(LOG_GUEST_ERROR,
5630                              "Ignoring attempt to switch CPSR_A flag from "
5631                              "non-secure world with SCR.AW bit clear\n");
5632                mask &= ~CPSR_A;
5633            }
5634        }
5635
5636        if (changed_daif & CPSR_F) {
5637            /* Check to see if we are allowed to change the masking of FIQ
5638             * exceptions from a non-secure state.
5639             */
5640            if (!(env->cp15.scr_el3 & SCR_FW)) {
5641                qemu_log_mask(LOG_GUEST_ERROR,
5642                              "Ignoring attempt to switch CPSR_F flag from "
5643                              "non-secure world with SCR.FW bit clear\n");
5644                mask &= ~CPSR_F;
5645            }
5646
5647            /* Check whether non-maskable FIQ (NMFI) support is enabled.
5648             * If this bit is set software is not allowed to mask
5649             * FIQs, but is allowed to set CPSR_F to 0.
5650             */
5651            if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
5652                (val & CPSR_F)) {
5653                qemu_log_mask(LOG_GUEST_ERROR,
5654                              "Ignoring attempt to enable CPSR_F flag "
5655                              "(non-maskable FIQ [NMFI] support enabled)\n");
5656                mask &= ~CPSR_F;
5657            }
5658        }
5659    }
5660
5661    env->daif &= ~(CPSR_AIF & mask);
5662    env->daif |= val & CPSR_AIF & mask;
5663
5664    if (write_type != CPSRWriteRaw &&
5665        ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
5666        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
5667            /* Note that we can only get here in USR mode if this is a
5668             * gdb stub write; for this case we follow the architectural
5669             * behaviour for guest writes in USR mode of ignoring an attempt
5670             * to switch mode. (Those are caught by translate.c for writes
5671             * triggered by guest instructions.)
5672             */
5673            mask &= ~CPSR_M;
5674        } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
5675            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
5676             * v7, and has defined behaviour in v8:
5677             *  + leave CPSR.M untouched
5678             *  + allow changes to the other CPSR fields
5679             *  + set PSTATE.IL
5680             * For user changes via the GDB stub, we don't set PSTATE.IL,
5681             * as this would be unnecessarily harsh for a user error.
5682             */
5683            mask &= ~CPSR_M;
5684            if (write_type != CPSRWriteByGDBStub &&
5685                arm_feature(env, ARM_FEATURE_V8)) {
5686                mask |= CPSR_IL;
5687                val |= CPSR_IL;
5688            }
5689        } else {
5690            switch_mode(env, val & CPSR_M);
5691        }
5692    }
5693    mask &= ~CACHED_CPSR_BITS;
5694    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
5695}
5696
5697/* Sign/zero extend */
5698uint32_t HELPER(sxtb16)(uint32_t x)
5699{
5700    uint32_t res;
5701    res = (uint16_t)(int8_t)x;
5702    res |= (uint32_t)(int8_t)(x >> 16) << 16;
5703    return res;
5704}
5705
5706uint32_t HELPER(uxtb16)(uint32_t x)
5707{
5708    uint32_t res;
5709    res = (uint16_t)(uint8_t)x;
5710    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
5711    return res;
5712}
5713
5714uint32_t HELPER(clz)(uint32_t x)
5715{
5716    return clz32(x);
5717}
5718
5719int32_t HELPER(sdiv)(int32_t num, int32_t den)
5720{
5721    if (den == 0)
5722      return 0;
5723    if (num == INT_MIN && den == -1)
5724      return INT_MIN;
5725    return num / den;
5726}
5727
5728uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
5729{
5730    if (den == 0)
5731      return 0;
5732    return num / den;
5733}
5734
5735uint32_t HELPER(rbit)(uint32_t x)
5736{
5737    return revbit32(x);
5738}
5739
5740#if defined(CONFIG_USER_ONLY)
5741
5742/* These should probably raise undefined insn exceptions.  */
5743void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
5744{
5745    ARMCPU *cpu = arm_env_get_cpu(env);
5746
5747    cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
5748}
5749
5750uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
5751{
5752    ARMCPU *cpu = arm_env_get_cpu(env);
5753
5754    cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
5755    return 0;
5756}
5757
5758void switch_mode(CPUARMState *env, int mode)
5759{
5760    ARMCPU *cpu = arm_env_get_cpu(env);
5761
5762    if (mode != ARM_CPU_MODE_USR) {
5763        cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
5764    }
5765}
5766
5767uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5768                                 uint32_t cur_el, bool secure)
5769{
5770    return 1;
5771}
5772
5773void aarch64_sync_64_to_32(CPUARMState *env)
5774{
5775    g_assert_not_reached();
5776}
5777
5778#else
5779
5780void switch_mode(CPUARMState *env, int mode)
5781{
5782    int old_mode;
5783    int i;
5784
5785    old_mode = env->uncached_cpsr & CPSR_M;
5786    if (mode == old_mode)
5787        return;
5788
5789    if (old_mode == ARM_CPU_MODE_FIQ) {
5790        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
5791        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
5792    } else if (mode == ARM_CPU_MODE_FIQ) {
5793        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
5794        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
5795    }
5796
5797    i = bank_number(old_mode);
5798    env->banked_r13[i] = env->regs[13];
5799    env->banked_r14[i] = env->regs[14];
5800    env->banked_spsr[i] = env->spsr;
5801
5802    i = bank_number(mode);
5803    env->regs[13] = env->banked_r13[i];
5804    env->regs[14] = env->banked_r14[i];
5805    env->spsr = env->banked_spsr[i];
5806}
5807
5808/* Returns 0 if wfi can go ahead or the target-el to trap into.  */
5809unsigned int arm_wfi_needs_trap(CPUARMState *env)
5810{
5811    unsigned int cur_el = arm_current_el(env);
5812    unsigned int target_el = 0;
5813
5814    switch (cur_el) {
5815    case 0:
5816        if (!(env->cp15.sctlr_el[2] & SCTLR_nTWI)) {
5817            target_el = 1;
5818        }
5819    case 1:
5820        if (arm_feature(env, ARM_FEATURE_EL2)
5821            && (env->cp15.hcr_el2 & HCR_TWI)) {
5822            target_el = 2;
5823        }
5824    case 2:
5825        if (arm_feature(env, ARM_FEATURE_EL3)
5826            && (env->cp15.scr_el3 & SCR_TWI)) {
5827            target_el = 3;
5828        }
5829        break;
5830    }
5831    return target_el;
5832}
5833
5834unsigned int arm_wfe_needs_trap(CPUARMState *env)
5835{
5836    unsigned int cur_el = arm_current_el(env);
5837    unsigned int target_el = 0;
5838
5839    switch (cur_el) {
5840    case 0:
5841        if (!(env->cp15.sctlr_el[2] & SCTLR_nTWE)) {
5842            target_el = 1;
5843        }
5844    case 1:
5845        if (arm_feature(env, ARM_FEATURE_EL2)
5846            && (env->cp15.hcr_el2 & HCR_TWE)) {
5847            target_el = 2;
5848        }
5849    case 2:
5850        if (arm_feature(env, ARM_FEATURE_EL3)
5851            && (env->cp15.scr_el3 & SCR_TWE)) {
5852            target_el = 3;
5853        }
5854        break;
5855    }
5856    return target_el;
5857}
5858
5859/* Physical Interrupt Target EL Lookup Table
5860 *
5861 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
5862 *
5863 * The below multi-dimensional table is used for looking up the target
5864 * exception level given numerous condition criteria.  Specifically, the
5865 * target EL is based on SCR and HCR routing controls as well as the
5866 * currently executing EL and secure state.
5867 *
5868 *    Dimensions:
5869 *    target_el_table[2][2][2][2][2][4]
5870 *                    |  |  |  |  |  +--- Current EL
5871 *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
5872 *                    |  |  |  +--------- HCR mask override
5873 *                    |  |  +------------ SCR exec state control
5874 *                    |  +--------------- SCR mask override
5875 *                    +------------------ 32-bit(0)/64-bit(1) EL3
5876 *
5877 *    The table values are as such:
5878 *    0-3 = EL0-EL3
5879 *     -1 = Cannot occur
5880 *
5881 * The ARM ARM target EL table includes entries indicating that an "exception
5882 * is not taken".  The two cases where this is applicable are:
5883 *    1) An exception is taken from EL3 but the SCR does not have the exception
5884 *    routed to EL3.
5885 *    2) An exception is taken from EL2 but the HCR does not have the exception
5886 *    routed to EL2.
5887 * In these two cases, the below table contain a target of EL1.  This value is
5888 * returned as it is expected that the consumer of the table data will check
5889 * for "target EL >= current EL" to ensure the exception is not taken.
5890 *
5891 *            SCR     HCR
5892 *         64  EA     AMO                 From
5893 *        BIT IRQ     IMO      Non-secure         Secure
5894 *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
5895 */
5896static const int8_t target_el_table[2][2][2][2][2][4] = {
5897    {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5898       {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
5899      {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5900       {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
5901     {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5902       {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
5903      {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5904       {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
5905    {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
5906       {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
5907      {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
5908       {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
5909     {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5910       {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
5911      {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5912       {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
5913};
5914
5915/*
5916 * Determine the target EL for physical exceptions
5917 */
5918uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5919                                 uint32_t cur_el, bool secure)
5920{
5921    CPUARMState *env = cs->env_ptr;
5922    int rw;
5923    int scr;
5924    int hcr;
5925    int target_el;
5926    /* Is the highest EL AArch64? */
5927    int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
5928
5929    if (arm_feature(env, ARM_FEATURE_EL3)) {
5930        rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
5931    } else {
5932        /* Either EL2 is the highest EL (and so the EL2 register width
5933         * is given by is64); or there is no EL2 or EL3, in which case
5934         * the value of 'rw' does not affect the table lookup anyway.
5935         */
5936        rw = is64;
5937    }
5938
5939    switch (excp_idx) {
5940    case EXCP_IRQ:
5941        scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
5942        hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
5943        break;
5944    case EXCP_FIQ:
5945        scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
5946        hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
5947        break;
5948    default:
5949        scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
5950        hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
5951        break;
5952    };
5953
5954    /* If HCR.TGE is set then HCR is treated as being 1 */
5955    hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
5956
5957    /* Perform a table-lookup for the target EL given the current state */
5958    target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
5959
5960    assert(target_el > 0);
5961
5962    return target_el;
5963}
5964
5965static void v7m_push(CPUARMState *env, uint32_t val)
5966{
5967    CPUState *cs = CPU(arm_env_get_cpu(env));
5968
5969    env->regs[13] -= 4;
5970    stl_phys(cs->as, env->regs[13], val);
5971}
5972
5973static uint32_t v7m_pop(CPUARMState *env)
5974{
5975    CPUState *cs = CPU(arm_env_get_cpu(env));
5976    uint32_t val;
5977
5978    val = ldl_phys(cs->as, env->regs[13]);
5979    env->regs[13] += 4;
5980    return val;
5981}
5982
5983/* Switch to V7M main or process stack pointer.  */
5984static void switch_v7m_sp(CPUARMState *env, int process)
5985{
5986    uint32_t tmp;
5987    if (env->v7m.current_sp != process) {
5988        tmp = env->v7m.other_sp;
5989        env->v7m.other_sp = env->regs[13];
5990        env->regs[13] = tmp;
5991        env->v7m.current_sp = process;
5992    }
5993}
5994
5995static void do_v7m_exception_exit(CPUARMState *env)
5996{
5997    uint32_t type;
5998    uint32_t xpsr;
5999
6000    type = env->regs[15];
6001    if (env->v7m.exception != 0)
6002        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
6003
6004    /* Switch to the target stack.  */
6005    switch_v7m_sp(env, (type & 4) != 0);
6006    /* Pop registers.  */
6007    env->regs[0] = v7m_pop(env);
6008    env->regs[1] = v7m_pop(env);
6009    env->regs[2] = v7m_pop(env);
6010    env->regs[3] = v7m_pop(env);
6011    env->regs[12] = v7m_pop(env);
6012    env->regs[14] = v7m_pop(env);
6013    env->regs[15] = v7m_pop(env);
6014    if (env->regs[15] & 1) {
6015        qemu_log_mask(LOG_GUEST_ERROR,
6016                      "M profile return from interrupt with misaligned "
6017                      "PC is UNPREDICTABLE\n");
6018        /* Actual hardware seems to ignore the lsbit, and there are several
6019         * RTOSes out there which incorrectly assume the r15 in the stack
6020         * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
6021         */
6022        env->regs[15] &= ~1U;
6023    }
6024    xpsr = v7m_pop(env);
6025    xpsr_write(env, xpsr, 0xfffffdff);
6026    /* Undo stack alignment.  */
6027    if (xpsr & 0x200)
6028        env->regs[13] |= 4;
6029    /* ??? The exception return type specifies Thread/Handler mode.  However
6030       this is also implied by the xPSR value. Not sure what to do
6031       if there is a mismatch.  */
6032    /* ??? Likewise for mismatches between the CONTROL register and the stack
6033       pointer.  */
6034}
6035
6036void arm_v7m_cpu_do_interrupt(CPUState *cs)
6037{
6038    ARMCPU *cpu = ARM_CPU(cs);
6039    CPUARMState *env = &cpu->env;
6040    uint32_t xpsr = xpsr_read(env);
6041    uint32_t lr;
6042    uint32_t addr;
6043
6044    arm_log_exception(cs->exception_index);
6045
6046    lr = 0xfffffff1;
6047    if (env->v7m.current_sp)
6048        lr |= 4;
6049    if (env->v7m.exception == 0)
6050        lr |= 8;
6051
6052    /* For exceptions we just mark as pending on the NVIC, and let that
6053       handle it.  */
6054    /* TODO: Need to escalate if the current priority is higher than the
6055       one we're raising.  */
6056    switch (cs->exception_index) {
6057    case EXCP_UDEF:
6058        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6059        return;
6060    case EXCP_SWI:
6061        /* The PC already points to the next instruction.  */
6062        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
6063        return;
6064    case EXCP_PREFETCH_ABORT:
6065    case EXCP_DATA_ABORT:
6066        /* TODO: if we implemented the MPU registers, this is where we
6067         * should set the MMFAR, etc from exception.fsr and exception.vaddress.
6068         */
6069        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
6070        return;
6071    case EXCP_BKPT:
6072        if (semihosting_enabled()) {
6073            int nr;
6074            nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
6075            if (nr == 0xab) {
6076                env->regs[15] += 2;
6077                qemu_log_mask(CPU_LOG_INT,
6078                              "...handling as semihosting call 0x%x\n",
6079                              env->regs[0]);
6080                env->regs[0] = do_arm_semihosting(env);
6081                return;
6082            }
6083        }
6084        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
6085        return;
6086    case EXCP_IRQ:
6087        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
6088        break;
6089    case EXCP_EXCEPTION_EXIT:
6090        do_v7m_exception_exit(env);
6091        return;
6092    default:
6093        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6094        return; /* Never happens.  Keep compiler happy.  */
6095    }
6096
6097    /* Align stack pointer.  */
6098    /* ??? Should only do this if Configuration Control Register
6099       STACKALIGN bit is set.  */
6100    if (env->regs[13] & 4) {
6101        env->regs[13] -= 4;
6102        xpsr |= 0x200;
6103    }
6104    /* Switch to the handler mode.  */
6105    v7m_push(env, xpsr);
6106    v7m_push(env, env->regs[15]);
6107    v7m_push(env, env->regs[14]);
6108    v7m_push(env, env->regs[12]);
6109    v7m_push(env, env->regs[3]);
6110    v7m_push(env, env->regs[2]);
6111    v7m_push(env, env->regs[1]);
6112    v7m_push(env, env->regs[0]);
6113    switch_v7m_sp(env, 0);
6114    /* Clear IT bits */
6115    env->condexec_bits = 0;
6116    env->regs[14] = lr;
6117    addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
6118    env->regs[15] = addr & 0xfffffffe;
6119    env->thumb = addr & 1;
6120}
6121
6122/* Function used to synchronize QEMU's AArch64 register set with AArch32
6123 * register set.  This is necessary when switching between AArch32 and AArch64
6124 * execution state.
6125 */
6126void aarch64_sync_32_to_64(CPUARMState *env)
6127{
6128    int i;
6129    uint32_t mode = env->uncached_cpsr & CPSR_M;
6130
6131    /* We can blanket copy R[0:7] to X[0:7] */
6132    for (i = 0; i < 8; i++) {
6133        env->xregs[i] = env->regs[i];
6134    }
6135
6136    /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
6137     * Otherwise, they come from the banked user regs.
6138     */
6139    if (mode == ARM_CPU_MODE_FIQ) {
6140        for (i = 8; i < 13; i++) {
6141            env->xregs[i] = env->usr_regs[i - 8];
6142        }
6143    } else {
6144        for (i = 8; i < 13; i++) {
6145            env->xregs[i] = env->regs[i];
6146        }
6147    }
6148
6149    /* Registers x13-x23 are the various mode SP and FP registers. Registers
6150     * r13 and r14 are only copied if we are in that mode, otherwise we copy
6151     * from the mode banked register.
6152     */
6153    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6154        env->xregs[13] = env->regs[13];
6155        env->xregs[14] = env->regs[14];
6156    } else {
6157        env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
6158        /* HYP is an exception in that it is copied from r14 */
6159        if (mode == ARM_CPU_MODE_HYP) {
6160            env->xregs[14] = env->regs[14];
6161        } else {
6162            env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
6163        }
6164    }
6165
6166    if (mode == ARM_CPU_MODE_HYP) {
6167        env->xregs[15] = env->regs[13];
6168    } else {
6169        env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
6170    }
6171
6172    if (mode == ARM_CPU_MODE_IRQ) {
6173        env->xregs[16] = env->regs[14];
6174        env->xregs[17] = env->regs[13];
6175    } else {
6176        env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
6177        env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
6178    }
6179
6180    if (mode == ARM_CPU_MODE_SVC) {
6181        env->xregs[18] = env->regs[14];
6182        env->xregs[19] = env->regs[13];
6183    } else {
6184        env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
6185        env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
6186    }
6187
6188    if (mode == ARM_CPU_MODE_ABT) {
6189        env->xregs[20] = env->regs[14];
6190        env->xregs[21] = env->regs[13];
6191    } else {
6192        env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
6193        env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
6194    }
6195
6196    if (mode == ARM_CPU_MODE_UND) {
6197        env->xregs[22] = env->regs[14];
6198        env->xregs[23] = env->regs[13];
6199    } else {
6200        env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
6201        env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
6202    }
6203
6204    /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6205     * mode, then we can copy from r8-r14.  Otherwise, we copy from the
6206     * FIQ bank for r8-r14.
6207     */
6208    if (mode == ARM_CPU_MODE_FIQ) {
6209        for (i = 24; i < 31; i++) {
6210            env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
6211        }
6212    } else {
6213        for (i = 24; i < 29; i++) {
6214            env->xregs[i] = env->fiq_regs[i - 24];
6215        }
6216        env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
6217        env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
6218    }
6219
6220    env->pc = env->regs[15];
6221}
6222
6223/* Function used to synchronize QEMU's AArch32 register set with AArch64
6224 * register set.  This is necessary when switching between AArch32 and AArch64
6225 * execution state.
6226 */
6227void aarch64_sync_64_to_32(CPUARMState *env)
6228{
6229    int i;
6230    uint32_t mode = env->uncached_cpsr & CPSR_M;
6231
6232    /* We can blanket copy X[0:7] to R[0:7] */
6233    for (i = 0; i < 8; i++) {
6234        env->regs[i] = env->xregs[i];
6235    }
6236
6237    /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
6238     * Otherwise, we copy x8-x12 into the banked user regs.
6239     */
6240    if (mode == ARM_CPU_MODE_FIQ) {
6241        for (i = 8; i < 13; i++) {
6242            env->usr_regs[i - 8] = env->xregs[i];
6243        }
6244    } else {
6245        for (i = 8; i < 13; i++) {
6246            env->regs[i] = env->xregs[i];
6247        }
6248    }
6249
6250    /* Registers r13 & r14 depend on the current mode.
6251     * If we are in a given mode, we copy the corresponding x registers to r13
6252     * and r14.  Otherwise, we copy the x register to the banked r13 and r14
6253     * for the mode.
6254     */
6255    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6256        env->regs[13] = env->xregs[13];
6257        env->regs[14] = env->xregs[14];
6258    } else {
6259        env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
6260
6261        /* HYP is an exception in that it does not have its own banked r14 but
6262         * shares the USR r14
6263         */
6264        if (mode == ARM_CPU_MODE_HYP) {
6265            env->regs[14] = env->xregs[14];
6266        } else {
6267            env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
6268        }
6269    }
6270
6271    if (mode == ARM_CPU_MODE_HYP) {
6272        env->regs[13] = env->xregs[15];
6273    } else {
6274        env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
6275    }
6276
6277    if (mode == ARM_CPU_MODE_IRQ) {
6278        env->regs[14] = env->xregs[16];
6279        env->regs[13] = env->xregs[17];
6280    } else {
6281        env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
6282        env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
6283    }
6284
6285    if (mode == ARM_CPU_MODE_SVC) {
6286        env->regs[14] = env->xregs[18];
6287        env->regs[13] = env->xregs[19];
6288    } else {
6289        env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
6290        env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
6291    }
6292
6293    if (mode == ARM_CPU_MODE_ABT) {
6294        env->regs[14] = env->xregs[20];
6295        env->regs[13] = env->xregs[21];
6296    } else {
6297        env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
6298        env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
6299    }
6300
6301    if (mode == ARM_CPU_MODE_UND) {
6302        env->regs[14] = env->xregs[22];
6303        env->regs[13] = env->xregs[23];
6304    } else {
6305        env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
6306        env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
6307    }
6308
6309    /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6310     * mode, then we can copy to r8-r14.  Otherwise, we copy to the
6311     * FIQ bank for r8-r14.
6312     */
6313    if (mode == ARM_CPU_MODE_FIQ) {
6314        for (i = 24; i < 31; i++) {
6315            env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
6316        }
6317    } else {
6318        for (i = 24; i < 29; i++) {
6319            env->fiq_regs[i - 24] = env->xregs[i];
6320        }
6321        env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
6322        env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
6323    }
6324
6325    env->regs[15] = env->pc;
6326}
6327
6328static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
6329{
6330    ARMCPU *cpu = ARM_CPU(cs);
6331    CPUARMState *env = &cpu->env;
6332    uint32_t addr;
6333    uint32_t mask;
6334    int new_mode;
6335    uint32_t offset;
6336    uint32_t moe;
6337
6338    /* If this is a debug exception we must update the DBGDSCR.MOE bits */
6339    switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
6340    case EC_BREAKPOINT:
6341    case EC_BREAKPOINT_SAME_EL:
6342        moe = 1;
6343        break;
6344    case EC_WATCHPOINT:
6345    case EC_WATCHPOINT_SAME_EL:
6346        moe = 10;
6347        break;
6348    case EC_AA32_BKPT:
6349        moe = 3;
6350        break;
6351    case EC_VECTORCATCH:
6352        moe = 5;
6353        break;
6354    default:
6355        moe = 0;
6356        break;
6357    }
6358
6359    if (moe) {
6360        env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
6361    }
6362
6363    /* TODO: Vectored interrupt controller.  */
6364    switch (cs->exception_index) {
6365    case EXCP_UDEF:
6366        new_mode = ARM_CPU_MODE_UND;
6367        addr = 0x04;
6368        mask = CPSR_I;
6369        if (env->thumb)
6370            offset = 2;
6371        else
6372            offset = 4;
6373        break;
6374    case EXCP_SWI:
6375        new_mode = ARM_CPU_MODE_SVC;
6376        addr = 0x08;
6377        mask = CPSR_I;
6378        /* The PC already points to the next instruction.  */
6379        offset = 0;
6380        break;
6381    case EXCP_BKPT:
6382        env->exception.fsr = 2;
6383        /* Fall through to prefetch abort.  */
6384    case EXCP_PREFETCH_ABORT:
6385        A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
6386        A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
6387        qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
6388                      env->exception.fsr, (uint32_t)env->exception.vaddress);
6389        new_mode = ARM_CPU_MODE_ABT;
6390        addr = 0x0c;
6391        mask = CPSR_A | CPSR_I;
6392        offset = 4;
6393        break;
6394    case EXCP_DATA_ABORT:
6395        A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
6396        A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
6397        qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
6398                      env->exception.fsr,
6399                      (uint32_t)env->exception.vaddress);
6400        new_mode = ARM_CPU_MODE_ABT;
6401        addr = 0x10;
6402        mask = CPSR_A | CPSR_I;
6403        offset = 8;
6404        break;
6405    case EXCP_IRQ:
6406        new_mode = ARM_CPU_MODE_IRQ;
6407        addr = 0x18;
6408        /* Disable IRQ and imprecise data aborts.  */
6409        mask = CPSR_A | CPSR_I;
6410        offset = 4;
6411        if (env->cp15.scr_el3 & SCR_IRQ) {
6412            /* IRQ routed to monitor mode */
6413            new_mode = ARM_CPU_MODE_MON;
6414            mask |= CPSR_F;
6415        }
6416        break;
6417    case EXCP_FIQ:
6418        new_mode = ARM_CPU_MODE_FIQ;
6419        addr = 0x1c;
6420        /* Disable FIQ, IRQ and imprecise data aborts.  */
6421        mask = CPSR_A | CPSR_I | CPSR_F;
6422        if (env->cp15.scr_el3 & SCR_FIQ) {
6423            /* FIQ routed to monitor mode */
6424            new_mode = ARM_CPU_MODE_MON;
6425        }
6426        offset = 4;
6427        break;
6428    case EXCP_SMC:
6429        new_mode = ARM_CPU_MODE_MON;
6430        addr = 0x08;
6431        mask = CPSR_A | CPSR_I | CPSR_F;
6432        offset = 0;
6433        break;
6434    default:
6435        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6436        return; /* Never happens.  Keep compiler happy.  */
6437    }
6438
6439    if (new_mode == ARM_CPU_MODE_MON) {
6440        addr += env->cp15.mvbar;
6441    } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
6442        /* High vectors. When enabled, base address cannot be remapped. */
6443        addr += 0xffff0000;
6444    } else {
6445        /* ARM v7 architectures provide a vector base address register to remap
6446         * the interrupt vector table.
6447         * This register is only followed in non-monitor mode, and is banked.
6448         * Note: only bits 31:5 are valid.
6449         */
6450        addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
6451    }
6452
6453    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
6454        env->cp15.scr_el3 &= ~SCR_NS;
6455    }
6456
6457    switch_mode (env, new_mode);
6458    /* For exceptions taken to AArch32 we must clear the SS bit in both
6459     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
6460     */
6461    env->uncached_cpsr &= ~PSTATE_SS;
6462    env->spsr = cpsr_read(env);
6463    /* Clear IT bits.  */
6464    env->condexec_bits = 0;
6465    /* Switch to the new mode, and to the correct instruction set.  */
6466    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
6467    /* Set new mode endianness */
6468    env->uncached_cpsr &= ~CPSR_E;
6469    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
6470        env->uncached_cpsr |= ~CPSR_E;
6471    }
6472    env->daif |= mask;
6473    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
6474     * and we should just guard the thumb mode on V4 */
6475    if (arm_feature(env, ARM_FEATURE_V4T)) {
6476        env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
6477    }
6478    env->regs[14] = env->regs[15] + offset;
6479    env->regs[15] = addr;
6480}
6481
6482/* Handle exception entry to a target EL which is using AArch64 */
6483static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
6484{
6485    ARMCPU *cpu = ARM_CPU(cs);
6486    CPUARMState *env = &cpu->env;
6487    unsigned int new_el = env->exception.target_el;
6488    target_ulong addr = env->cp15.vbar_el[new_el];
6489    unsigned int new_mode = aarch64_pstate_mode(new_el, true);
6490
6491    if (arm_current_el(env) < new_el) {
6492        /* Entry vector offset depends on whether the implemented EL
6493         * immediately lower than the target level is using AArch32 or AArch64
6494         */
6495        bool is_aa64;
6496
6497        switch (new_el) {
6498        case 3:
6499            is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
6500            break;
6501        case 2:
6502            is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
6503            break;
6504        case 1:
6505            is_aa64 = is_a64(env);
6506            break;
6507        default:
6508            g_assert_not_reached();
6509        }
6510
6511        if (is_aa64) {
6512            addr += 0x400;
6513        } else {
6514            addr += 0x600;
6515        }
6516    } else if (pstate_read(env) & PSTATE_SP) {
6517        addr += 0x200;
6518    }
6519
6520    switch (cs->exception_index) {
6521    case EXCP_PREFETCH_ABORT:
6522    case EXCP_DATA_ABORT:
6523        env->cp15.far_el[new_el] = env->exception.vaddress;
6524        qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
6525                      env->cp15.far_el[new_el]);
6526        /* fall through */
6527    case EXCP_BKPT:
6528    case EXCP_UDEF:
6529    case EXCP_SWI:
6530    case EXCP_HVC:
6531    case EXCP_HYP_TRAP:
6532    case EXCP_SMC:
6533        env->cp15.esr_el[new_el] = env->exception.syndrome;
6534        break;
6535    case EXCP_IRQ:
6536    case EXCP_VIRQ:
6537        addr += 0x80;
6538        break;
6539    case EXCP_FIQ:
6540    case EXCP_VFIQ:
6541        addr += 0x100;
6542        break;
6543    case EXCP_SEMIHOST:
6544        qemu_log_mask(CPU_LOG_INT,
6545                      "...handling as semihosting call 0x%" PRIx64 "\n",
6546                      env->xregs[0]);
6547        env->xregs[0] = do_arm_semihosting(env);
6548        return;
6549    default:
6550        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6551    }
6552
6553    if (is_a64(env)) {
6554        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
6555        aarch64_save_sp(env, arm_current_el(env));
6556        env->elr_el[new_el] = env->pc;
6557    } else {
6558        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
6559        if (!env->thumb) {
6560            env->cp15.esr_el[new_el] |= 1 << 25;
6561        }
6562        env->elr_el[new_el] = env->regs[15];
6563
6564        aarch64_sync_32_to_64(env);
6565
6566        env->condexec_bits = 0;
6567    }
6568    qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
6569                  env->elr_el[new_el]);
6570
6571    pstate_write(env, PSTATE_DAIF | new_mode);
6572    env->aarch64 = 1;
6573    aarch64_restore_sp(env, new_el);
6574
6575    env->pc = addr;
6576
6577    qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
6578                  new_el, env->pc, pstate_read(env));
6579}
6580
6581static inline bool check_for_semihosting(CPUState *cs)
6582{
6583    /* Check whether this exception is a semihosting call; if so
6584     * then handle it and return true; otherwise return false.
6585     */
6586    ARMCPU *cpu = ARM_CPU(cs);
6587    CPUARMState *env = &cpu->env;
6588
6589    if (is_a64(env)) {
6590        if (cs->exception_index == EXCP_SEMIHOST) {
6591            /* This is always the 64-bit semihosting exception.
6592             * The "is this usermode" and "is semihosting enabled"
6593             * checks have been done at translate time.
6594             */
6595            qemu_log_mask(CPU_LOG_INT,
6596                          "...handling as semihosting call 0x%" PRIx64 "\n",
6597                          env->xregs[0]);
6598            env->xregs[0] = do_arm_semihosting(env);
6599            return true;
6600        }
6601        return false;
6602    } else {
6603        uint32_t imm;
6604
6605        /* Only intercept calls from privileged modes, to provide some
6606         * semblance of security.
6607         */
6608        if (!semihosting_enabled() ||
6609            ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)) {
6610            return false;
6611        }
6612
6613        switch (cs->exception_index) {
6614        case EXCP_SWI:
6615            /* Check for semihosting interrupt.  */
6616            if (env->thumb) {
6617                imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
6618                    & 0xff;
6619                if (imm == 0xab) {
6620                    break;
6621                }
6622            } else {
6623                imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
6624                    & 0xffffff;
6625                if (imm == 0x123456) {
6626                    break;
6627                }
6628            }
6629            return false;
6630        case EXCP_BKPT:
6631            /* See if this is a semihosting syscall.  */
6632            if (env->thumb) {
6633                imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
6634                    & 0xff;
6635                if (imm == 0xab) {
6636                    env->regs[15] += 2;
6637                    break;
6638                }
6639            }
6640            return false;
6641        default:
6642            return false;
6643        }
6644
6645        qemu_log_mask(CPU_LOG_INT,
6646                      "...handling as semihosting call 0x%x\n",
6647                      env->regs[0]);
6648        env->regs[0] = do_arm_semihosting(env);
6649        return true;
6650    }
6651}
6652
6653/* Handle a CPU exception for A and R profile CPUs.
6654 * Do any appropriate logging, handle PSCI calls, and then hand off
6655 * to the AArch64-entry or AArch32-entry function depending on the
6656 * target exception level's register width.
6657 */
6658void arm_cpu_do_interrupt(CPUState *cs)
6659{
6660    ARMCPU *cpu = ARM_CPU(cs);
6661    CPUARMState *env = &cpu->env;
6662    unsigned int new_el = env->exception.target_el;
6663
6664    assert(!IS_M(env));
6665
6666    arm_log_exception(cs->exception_index);
6667    qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
6668                  new_el);
6669    if (qemu_loglevel_mask(CPU_LOG_INT)
6670        && !excp_is_internal(cs->exception_index)) {
6671        qemu_log_mask(CPU_LOG_INT, "...with ESR %x/0x%" PRIx32 "\n",
6672                      env->exception.syndrome >> ARM_EL_EC_SHIFT,
6673                      env->exception.syndrome);
6674    }
6675
6676    if (arm_is_psci_call(cpu, cs->exception_index)) {
6677        arm_handle_psci_call(cpu);
6678        qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
6679        return;
6680    }
6681
6682    /* Semihosting semantics depend on the register width of the
6683     * code that caused the exception, not the target exception level,
6684     * so must be handled here.
6685     */
6686    if (check_for_semihosting(cs)) {
6687        return;
6688    }
6689
6690    assert(!excp_is_internal(cs->exception_index));
6691    if (arm_el_is_aa64(env, new_el)) {
6692        arm_cpu_do_interrupt_aarch64(cs);
6693    } else {
6694        arm_cpu_do_interrupt_aarch32(cs);
6695    }
6696
6697    if (!kvm_enabled()) {
6698        cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
6699    }
6700}
6701
6702/* Return the exception level which controls this address translation regime */
6703static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
6704{
6705    switch (mmu_idx) {
6706    case ARMMMUIdx_S2NS:
6707    case ARMMMUIdx_S1E2:
6708        return 2;
6709    case ARMMMUIdx_S1E3:
6710        return 3;
6711    case ARMMMUIdx_S1SE0:
6712        return arm_el_is_aa64(env, 3) ? 1 : 3;
6713    case ARMMMUIdx_S1SE1:
6714    case ARMMMUIdx_S1NSE0:
6715    case ARMMMUIdx_S1NSE1:
6716        return 1;
6717    default:
6718        g_assert_not_reached();
6719    }
6720}
6721
6722/* Return true if this address translation regime is secure */
6723static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
6724{
6725    switch (mmu_idx) {
6726    case ARMMMUIdx_S12NSE0:
6727    case ARMMMUIdx_S12NSE1:
6728    case ARMMMUIdx_S1NSE0:
6729    case ARMMMUIdx_S1NSE1:
6730    case ARMMMUIdx_S1E2:
6731    case ARMMMUIdx_S2NS:
6732        return false;
6733    case ARMMMUIdx_S1E3:
6734    case ARMMMUIdx_S1SE0:
6735    case ARMMMUIdx_S1SE1:
6736        return true;
6737    default:
6738        g_assert_not_reached();
6739    }
6740}
6741
6742/* Return the SCTLR value which controls this address translation regime */
6743static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
6744{
6745    return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
6746}
6747
6748/* Return true if the specified stage of address translation is disabled */
6749static inline bool regime_translation_disabled(CPUARMState *env,
6750                                               ARMMMUIdx mmu_idx)
6751{
6752    if (mmu_idx == ARMMMUIdx_S2NS) {
6753        return (env->cp15.hcr_el2 & HCR_VM) == 0;
6754    }
6755    return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
6756}
6757
6758static inline bool regime_translation_big_endian(CPUARMState *env,
6759                                                 ARMMMUIdx mmu_idx)
6760{
6761    return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
6762}
6763
6764/* Return the TCR controlling this translation regime */
6765static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
6766{
6767    if (mmu_idx == ARMMMUIdx_S2NS) {
6768        return &env->cp15.vtcr_el2;
6769    }
6770    return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
6771}
6772
6773/* Return the TTBR associated with this translation regime */
6774static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
6775                                   int ttbrn)
6776{
6777    if (mmu_idx == ARMMMUIdx_S2NS) {
6778        return env->cp15.vttbr_el2;
6779    }
6780    if (ttbrn == 0) {
6781        return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
6782    } else {
6783        return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
6784    }
6785}
6786
6787/* Return true if the translation regime is using LPAE format page tables */
6788static inline bool regime_using_lpae_format(CPUARMState *env,
6789                                            ARMMMUIdx mmu_idx)
6790{
6791    int el = regime_el(env, mmu_idx);
6792    if (el == 2 || arm_el_is_aa64(env, el)) {
6793        return true;
6794    }
6795    if (arm_feature(env, ARM_FEATURE_LPAE)
6796        && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
6797        return true;
6798    }
6799    return false;
6800}
6801
6802/* Returns true if the stage 1 translation regime is using LPAE format page
6803 * tables. Used when raising alignment exceptions, whose FSR changes depending
6804 * on whether the long or short descriptor format is in use. */
6805bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
6806{
6807    if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
6808        mmu_idx += ARMMMUIdx_S1NSE0;
6809    }
6810
6811    return regime_using_lpae_format(env, mmu_idx);
6812}
6813
6814static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
6815{
6816    switch (mmu_idx) {
6817    case ARMMMUIdx_S1SE0:
6818    case ARMMMUIdx_S1NSE0:
6819        return true;
6820    default:
6821        return false;
6822    case ARMMMUIdx_S12NSE0:
6823    case ARMMMUIdx_S12NSE1:
6824        g_assert_not_reached();
6825    }
6826}
6827
6828/* Translate section/page access permissions to page
6829 * R/W protection flags
6830 *
6831 * @env:         CPUARMState
6832 * @mmu_idx:     MMU index indicating required translation regime
6833 * @ap:          The 3-bit access permissions (AP[2:0])
6834 * @domain_prot: The 2-bit domain access permissions
6835 */
6836static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
6837                                int ap, int domain_prot)
6838{
6839    bool is_user = regime_is_user(env, mmu_idx);
6840
6841    if (domain_prot == 3) {
6842        return PAGE_READ | PAGE_WRITE;
6843    }
6844
6845    switch (ap) {
6846    case 0:
6847        if (arm_feature(env, ARM_FEATURE_V7)) {
6848            return 0;
6849        }
6850        switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
6851        case SCTLR_S:
6852            return is_user ? 0 : PAGE_READ;
6853        case SCTLR_R:
6854            return PAGE_READ;
6855        default:
6856            return 0;
6857        }
6858    case 1:
6859        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
6860    case 2:
6861        if (is_user) {
6862            return PAGE_READ;
6863        } else {
6864            return PAGE_READ | PAGE_WRITE;
6865        }
6866    case 3:
6867        return PAGE_READ | PAGE_WRITE;
6868    case 4: /* Reserved.  */
6869        return 0;
6870    case 5:
6871        return is_user ? 0 : PAGE_READ;
6872    case 6:
6873        return PAGE_READ;
6874    case 7:
6875        if (!arm_feature(env, ARM_FEATURE_V6K)) {
6876            return 0;
6877        }
6878        return PAGE_READ;
6879    default:
6880        g_assert_not_reached();
6881    }
6882}
6883
6884/* Translate section/page access permissions to page
6885 * R/W protection flags.
6886 *
6887 * @ap:      The 2-bit simple AP (AP[2:1])
6888 * @is_user: TRUE if accessing from PL0
6889 */
6890static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
6891{
6892    switch (ap) {
6893    case 0:
6894        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
6895    case 1:
6896        return PAGE_READ | PAGE_WRITE;
6897    case 2:
6898        return is_user ? 0 : PAGE_READ;
6899    case 3:
6900        return PAGE_READ;
6901    default:
6902        g_assert_not_reached();
6903    }
6904}
6905
6906static inline int
6907simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
6908{
6909    return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
6910}
6911
6912/* Translate S2 section/page access permissions to protection flags
6913 *
6914 * @env:     CPUARMState
6915 * @s2ap:    The 2-bit stage2 access permissions (S2AP)
6916 * @xn:      XN (execute-never) bit
6917 */
6918static int get_S2prot(CPUARMState *env, int s2ap, int xn)
6919{
6920    int prot = 0;
6921
6922    if (s2ap & 1) {
6923        prot |= PAGE_READ;
6924    }
6925    if (s2ap & 2) {
6926        prot |= PAGE_WRITE;
6927    }
6928    if (!xn) {
6929        prot |= PAGE_EXEC;
6930    }
6931    return prot;
6932}
6933
6934/* Translate section/page access permissions to protection flags
6935 *
6936 * @env:     CPUARMState
6937 * @mmu_idx: MMU index indicating required translation regime
6938 * @is_aa64: TRUE if AArch64
6939 * @ap:      The 2-bit simple AP (AP[2:1])
6940 * @ns:      NS (non-secure) bit
6941 * @xn:      XN (execute-never) bit
6942 * @pxn:     PXN (privileged execute-never) bit
6943 */
6944static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
6945                      int ap, int ns, int xn, int pxn)
6946{
6947    bool is_user = regime_is_user(env, mmu_idx);
6948    int prot_rw, user_rw;
6949    bool have_wxn;
6950    int wxn = 0;
6951
6952    assert(mmu_idx != ARMMMUIdx_S2NS);
6953
6954    user_rw = simple_ap_to_rw_prot_is_user(ap, true);
6955    if (is_user) {
6956        prot_rw = user_rw;
6957    } else {
6958        prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
6959    }
6960
6961    if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
6962        return prot_rw;
6963    }
6964
6965    /* TODO have_wxn should be replaced with
6966     *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
6967     * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
6968     * compatible processors have EL2, which is required for [U]WXN.
6969     */
6970    have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
6971
6972    if (have_wxn) {
6973        wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
6974    }
6975
6976    if (is_aa64) {
6977        switch (regime_el(env, mmu_idx)) {
6978        case 1:
6979            if (!is_user) {
6980                xn = pxn || (user_rw & PAGE_WRITE);
6981            }
6982            break;
6983        case 2:
6984        case 3:
6985            break;
6986        }
6987    } else if (arm_feature(env, ARM_FEATURE_V7)) {
6988        switch (regime_el(env, mmu_idx)) {
6989        case 1:
6990        case 3:
6991            if (is_user) {
6992                xn = xn || !(user_rw & PAGE_READ);
6993            } else {
6994                int uwxn = 0;
6995                if (have_wxn) {
6996                    uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
6997                }
6998                xn = xn || !(prot_rw & PAGE_READ) || pxn ||
6999                     (uwxn && (user_rw & PAGE_WRITE));
7000            }
7001            break;
7002        case 2:
7003            break;
7004        }
7005    } else {
7006        xn = wxn = 0;
7007    }
7008
7009    if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
7010        return prot_rw;
7011    }
7012    return prot_rw | PAGE_EXEC;
7013}
7014
7015static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
7016                                     uint32_t *table, uint32_t address)
7017{
7018    /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
7019    TCR *tcr = regime_tcr(env, mmu_idx);
7020
7021    if (address & tcr->mask) {
7022        if (tcr->raw_tcr & TTBCR_PD1) {
7023            /* Translation table walk disabled for TTBR1 */
7024            return false;
7025        }
7026        *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
7027    } else {
7028        if (tcr->raw_tcr & TTBCR_PD0) {
7029            /* Translation table walk disabled for TTBR0 */
7030            return false;
7031        }
7032        *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
7033    }
7034    *table |= (address >> 18) & 0x3ffc;
7035    return true;
7036}
7037
7038/* Translate a S1 pagetable walk through S2 if needed.  */
7039static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
7040                               hwaddr addr, MemTxAttrs txattrs,
7041                               uint32_t *fsr,
7042                               ARMMMUFaultInfo *fi)
7043{
7044    if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
7045        !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
7046        target_ulong s2size;
7047        hwaddr s2pa;
7048        int s2prot;
7049        int ret;
7050
7051        ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
7052                                 &txattrs, &s2prot, &s2size, fsr, fi);
7053        if (ret) {
7054            fi->s2addr = addr;
7055            fi->stage2 = true;
7056            fi->s1ptw = true;
7057            return ~0;
7058        }
7059        addr = s2pa;
7060    }
7061    return addr;
7062}
7063
7064/* All loads done in the course of a page table walk go through here.
7065 * TODO: rather than ignoring errors from physical memory reads (which
7066 * are external aborts in ARM terminology) we should propagate this
7067 * error out so that we can turn it into a Data Abort if this walk
7068 * was being done for a CPU load/store or an address translation instruction
7069 * (but not if it was for a debug access).
7070 */
7071static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7072                            ARMMMUIdx mmu_idx, uint32_t *fsr,
7073                            ARMMMUFaultInfo *fi)
7074{
7075    ARMCPU *cpu = ARM_CPU(cs);
7076    CPUARMState *env = &cpu->env;
7077    MemTxAttrs attrs = {};
7078    AddressSpace *as;
7079
7080    attrs.secure = is_secure;
7081    as = arm_addressspace(cs, attrs);
7082    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7083    if (fi->s1ptw) {
7084        return 0;
7085    }
7086    if (regime_translation_big_endian(env, mmu_idx)) {
7087        return address_space_ldl_be(as, addr, attrs, NULL);
7088    } else {
7089        return address_space_ldl_le(as, addr, attrs, NULL);
7090    }
7091}
7092
7093static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7094                            ARMMMUIdx mmu_idx, uint32_t *fsr,
7095                            ARMMMUFaultInfo *fi)
7096{
7097    ARMCPU *cpu = ARM_CPU(cs);
7098    CPUARMState *env = &cpu->env;
7099    MemTxAttrs attrs = {};
7100    AddressSpace *as;
7101
7102    attrs.secure = is_secure;
7103    as = arm_addressspace(cs, attrs);
7104    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7105    if (fi->s1ptw) {
7106        return 0;
7107    }
7108    if (regime_translation_big_endian(env, mmu_idx)) {
7109        return address_space_ldq_be(as, addr, attrs, NULL);
7110    } else {
7111        return address_space_ldq_le(as, addr, attrs, NULL);
7112    }
7113}
7114
7115static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
7116                             int access_type, ARMMMUIdx mmu_idx,
7117                             hwaddr *phys_ptr, int *prot,
7118                             target_ulong *page_size, uint32_t *fsr,
7119                             ARMMMUFaultInfo *fi)
7120{
7121    CPUState *cs = CPU(arm_env_get_cpu(env));
7122    int code;
7123    uint32_t table;
7124    uint32_t desc;
7125    int type;
7126    int ap;
7127    int domain = 0;
7128    int domain_prot;
7129    hwaddr phys_addr;
7130    uint32_t dacr;
7131
7132    /* Pagetable walk.  */
7133    /* Lookup l1 descriptor.  */
7134    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7135        /* Section translation fault if page walk is disabled by PD0 or PD1 */
7136        code = 5;
7137        goto do_fault;
7138    }
7139    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7140                       mmu_idx, fsr, fi);
7141    type = (desc & 3);
7142    domain = (desc >> 5) & 0x0f;
7143    if (regime_el(env, mmu_idx) == 1) {
7144        dacr = env->cp15.dacr_ns;
7145    } else {
7146        dacr = env->cp15.dacr_s;
7147    }
7148    domain_prot = (dacr >> (domain * 2)) & 3;
7149    if (type == 0) {
7150        /* Section translation fault.  */
7151        code = 5;
7152        goto do_fault;
7153    }
7154    if (domain_prot == 0 || domain_prot == 2) {
7155        if (type == 2)
7156            code = 9; /* Section domain fault.  */
7157        else
7158            code = 11; /* Page domain fault.  */
7159        goto do_fault;
7160    }
7161    if (type == 2) {
7162        /* 1Mb section.  */
7163        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7164        ap = (desc >> 10) & 3;
7165        code = 13;
7166        *page_size = 1024 * 1024;
7167    } else {
7168        /* Lookup l2 entry.  */
7169        if (type == 1) {
7170            /* Coarse pagetable.  */
7171            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7172        } else {
7173            /* Fine pagetable.  */
7174            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
7175        }
7176        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7177                           mmu_idx, fsr, fi);
7178        switch (desc & 3) {
7179        case 0: /* Page translation fault.  */
7180            code = 7;
7181            goto do_fault;
7182        case 1: /* 64k page.  */
7183            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7184            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
7185            *page_size = 0x10000;
7186            break;
7187        case 2: /* 4k page.  */
7188            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7189            ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
7190            *page_size = 0x1000;
7191            break;
7192        case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
7193            if (type == 1) {
7194                /* ARMv6/XScale extended small page format */
7195                if (arm_feature(env, ARM_FEATURE_XSCALE)
7196                    || arm_feature(env, ARM_FEATURE_V6)) {
7197                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7198                    *page_size = 0x1000;
7199                } else {
7200                    /* UNPREDICTABLE in ARMv5; we choose to take a
7201                     * page translation fault.
7202                     */
7203                    code = 7;
7204                    goto do_fault;
7205                }
7206            } else {
7207                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
7208                *page_size = 0x400;
7209            }
7210            ap = (desc >> 4) & 3;
7211            break;
7212        default:
7213            /* Never happens, but compiler isn't smart enough to tell.  */
7214            abort();
7215        }
7216        code = 15;
7217    }
7218    *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7219    *prot |= *prot ? PAGE_EXEC : 0;
7220    if (!(*prot & (1 << access_type))) {
7221        /* Access permission fault.  */
7222        goto do_fault;
7223    }
7224    *phys_ptr = phys_addr;
7225    return false;
7226do_fault:
7227    *fsr = code | (domain << 4);
7228    return true;
7229}
7230
7231static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
7232                             int access_type, ARMMMUIdx mmu_idx,
7233                             hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
7234                             target_ulong *page_size, uint32_t *fsr,
7235                             ARMMMUFaultInfo *fi)
7236{
7237    CPUState *cs = CPU(arm_env_get_cpu(env));
7238    int code;
7239    uint32_t table;
7240    uint32_t desc;
7241    uint32_t xn;
7242    uint32_t pxn = 0;
7243    int type;
7244    int ap;
7245    int domain = 0;
7246    int domain_prot;
7247    hwaddr phys_addr;
7248    uint32_t dacr;
7249    bool ns;
7250
7251    /* Pagetable walk.  */
7252    /* Lookup l1 descriptor.  */
7253    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7254        /* Section translation fault if page walk is disabled by PD0 or PD1 */
7255        code = 5;
7256        goto do_fault;
7257    }
7258    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7259                       mmu_idx, fsr, fi);
7260    type = (desc & 3);
7261    if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
7262        /* Section translation fault, or attempt to use the encoding
7263         * which is Reserved on implementations without PXN.
7264         */
7265        code = 5;
7266        goto do_fault;
7267    }
7268    if ((type == 1) || !(desc & (1 << 18))) {
7269        /* Page or Section.  */
7270        domain = (desc >> 5) & 0x0f;
7271    }
7272    if (regime_el(env, mmu_idx) == 1) {
7273        dacr = env->cp15.dacr_ns;
7274    } else {
7275        dacr = env->cp15.dacr_s;
7276    }
7277    domain_prot = (dacr >> (domain * 2)) & 3;
7278    if (domain_prot == 0 || domain_prot == 2) {
7279        if (type != 1) {
7280            code = 9; /* Section domain fault.  */
7281        } else {
7282            code = 11; /* Page domain fault.  */
7283        }
7284        goto do_fault;
7285    }
7286    if (type != 1) {
7287        if (desc & (1 << 18)) {
7288            /* Supersection.  */
7289            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
7290            phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
7291            phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
7292            *page_size = 0x1000000;
7293        } else {
7294            /* Section.  */
7295            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7296            *page_size = 0x100000;
7297        }
7298        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
7299        xn = desc & (1 << 4);
7300        pxn = desc & 1;
7301        code = 13;
7302        ns = extract32(desc, 19, 1);
7303    } else {
7304        if (arm_feature(env, ARM_FEATURE_PXN)) {
7305            pxn = (desc >> 2) & 1;
7306        }
7307        ns = extract32(desc, 3, 1);
7308        /* Lookup l2 entry.  */
7309        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7310        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7311                           mmu_idx, fsr, fi);
7312        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
7313        switch (desc & 3) {
7314        case 0: /* Page translation fault.  */
7315            code = 7;
7316            goto do_fault;
7317        case 1: /* 64k page.  */
7318            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7319            xn = desc & (1 << 15);
7320            *page_size = 0x10000;
7321            break;
7322        case 2: case 3: /* 4k page.  */
7323            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7324            xn = desc & 1;
7325            *page_size = 0x1000;
7326            break;
7327        default:
7328            /* Never happens, but compiler isn't smart enough to tell.  */
7329            abort();
7330        }
7331        code = 15;
7332    }
7333    if (domain_prot == 3) {
7334        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
7335    } else {
7336        if (pxn && !regime_is_user(env, mmu_idx)) {
7337            xn = 1;
7338        }
7339        if (xn && access_type == 2)
7340            goto do_fault;
7341
7342        if (arm_feature(env, ARM_FEATURE_V6K) &&
7343                (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
7344            /* The simplified model uses AP[0] as an access control bit.  */
7345            if ((ap & 1) == 0) {
7346                /* Access flag fault.  */
7347                code = (code == 15) ? 6 : 3;
7348                goto do_fault;
7349            }
7350            *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
7351        } else {
7352            *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7353        }
7354        if (*prot && !xn) {
7355            *prot |= PAGE_EXEC;
7356        }
7357        if (!(*prot & (1 << access_type))) {
7358            /* Access permission fault.  */
7359            goto do_fault;
7360        }
7361    }
7362    if (ns) {
7363        /* The NS bit will (as required by the architecture) have no effect if
7364         * the CPU doesn't support TZ or this is a non-secure translation
7365         * regime, because the attribute will already be non-secure.
7366         */
7367        attrs->secure = false;
7368    }
7369    *phys_ptr = phys_addr;
7370    return false;
7371do_fault:
7372    *fsr = code | (domain << 4);
7373    return true;
7374}
7375
7376/* Fault type for long-descriptor MMU fault reporting; this corresponds
7377 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
7378 */
7379typedef enum {
7380    translation_fault = 1,
7381    access_fault = 2,
7382    permission_fault = 3,
7383} MMUFaultType;
7384
7385/*
7386 * check_s2_mmu_setup
7387 * @cpu:        ARMCPU
7388 * @is_aa64:    True if the translation regime is in AArch64 state
7389 * @startlevel: Suggested starting level
7390 * @inputsize:  Bitsize of IPAs
7391 * @stride:     Page-table stride (See the ARM ARM)
7392 *
7393 * Returns true if the suggested S2 translation parameters are OK and
7394 * false otherwise.
7395 */
7396static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
7397                               int inputsize, int stride)
7398{
7399    const int grainsize = stride + 3;
7400    int startsizecheck;
7401
7402    /* Negative levels are never allowed.  */
7403    if (level < 0) {
7404        return false;
7405    }
7406
7407    startsizecheck = inputsize - ((3 - level) * stride + grainsize);
7408    if (startsizecheck < 1 || startsizecheck > stride + 4) {
7409        return false;
7410    }
7411
7412    if (is_aa64) {
7413        CPUARMState *env = &cpu->env;
7414        unsigned int pamax = arm_pamax(cpu);
7415
7416        switch (stride) {
7417        case 13: /* 64KB Pages.  */
7418            if (level == 0 || (level == 1 && pamax <= 42)) {
7419                return false;
7420            }
7421            break;
7422        case 11: /* 16KB Pages.  */
7423            if (level == 0 || (level == 1 && pamax <= 40)) {
7424                return false;
7425            }
7426            break;
7427        case 9: /* 4KB Pages.  */
7428            if (level == 0 && pamax <= 42) {
7429                return false;
7430            }
7431            break;
7432        default:
7433            g_assert_not_reached();
7434        }
7435
7436        /* Inputsize checks.  */
7437        if (inputsize > pamax &&
7438            (arm_el_is_aa64(env, 1) || inputsize > 40)) {
7439            /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
7440            return false;
7441        }
7442    } else {
7443        /* AArch32 only supports 4KB pages. Assert on that.  */
7444        assert(stride == 9);
7445
7446        if (level == 0) {
7447            return false;
7448        }
7449    }
7450    return true;
7451}
7452
7453static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
7454                               int access_type, ARMMMUIdx mmu_idx,
7455                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
7456                               target_ulong *page_size_ptr, uint32_t *fsr,
7457                               ARMMMUFaultInfo *fi)
7458{
7459    ARMCPU *cpu = arm_env_get_cpu(env);
7460    CPUState *cs = CPU(cpu);
7461    /* Read an LPAE long-descriptor translation table. */
7462    MMUFaultType fault_type = translation_fault;
7463    uint32_t level;
7464    uint32_t epd = 0;
7465    int32_t t0sz, t1sz;
7466    uint32_t tg;
7467    uint64_t ttbr;
7468    int ttbr_select;
7469    hwaddr descaddr, descmask;
7470    uint32_t tableattrs;
7471    target_ulong page_size;
7472    uint32_t attrs;
7473    int32_t stride = 9;
7474    int32_t va_size;
7475    int inputsize;
7476    int32_t tbi = 0;
7477    TCR *tcr = regime_tcr(env, mmu_idx);
7478    int ap, ns, xn, pxn;
7479    uint32_t el = regime_el(env, mmu_idx);
7480    bool ttbr1_valid = true;
7481    uint64_t descaddrmask;
7482
7483    /* TODO:
7484     * This code does not handle the different format TCR for VTCR_EL2.
7485     * This code also does not support shareability levels.
7486     * Attribute and permission bit handling should also be checked when adding
7487     * support for those page table walks.
7488     */
7489    if (arm_el_is_aa64(env, el)) {
7490        level = 0;
7491        va_size = 64;
7492        if (el > 1) {
7493            if (mmu_idx != ARMMMUIdx_S2NS) {
7494                tbi = extract64(tcr->raw_tcr, 20, 1);
7495            }
7496        } else {
7497            if (extract64(address, 55, 1)) {
7498                tbi = extract64(tcr->raw_tcr, 38, 1);
7499            } else {
7500                tbi = extract64(tcr->raw_tcr, 37, 1);
7501            }
7502        }
7503        tbi *= 8;
7504
7505        /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
7506         * invalid.
7507         */
7508        if (el > 1) {
7509            ttbr1_valid = false;
7510        }
7511    } else {
7512        level = 1;
7513        va_size = 32;
7514        /* There is no TTBR1 for EL2 */
7515        if (el == 2) {
7516            ttbr1_valid = false;
7517        }
7518    }
7519
7520    /* Determine whether this address is in the region controlled by
7521     * TTBR0 or TTBR1 (or if it is in neither region and should fault).
7522     * This is a Non-secure PL0/1 stage 1 translation, so controlled by
7523     * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
7524     */
7525    if (va_size == 64) {
7526        /* AArch64 translation.  */
7527        t0sz = extract32(tcr->raw_tcr, 0, 6);
7528        t0sz = MIN(t0sz, 39);
7529        t0sz = MAX(t0sz, 16);
7530    } else if (mmu_idx != ARMMMUIdx_S2NS) {
7531        /* AArch32 stage 1 translation.  */
7532        t0sz = extract32(tcr->raw_tcr, 0, 3);
7533    } else {
7534        /* AArch32 stage 2 translation.  */
7535        bool sext = extract32(tcr->raw_tcr, 4, 1);
7536        bool sign = extract32(tcr->raw_tcr, 3, 1);
7537        t0sz = sextract32(tcr->raw_tcr, 0, 4);
7538
7539        /* If the sign-extend bit is not the same as t0sz[3], the result
7540         * is unpredictable. Flag this as a guest error.  */
7541        if (sign != sext) {
7542            qemu_log_mask(LOG_GUEST_ERROR,
7543                          "AArch32: VTCR.S / VTCR.T0SZ[3] missmatch\n");
7544        }
7545    }
7546    t1sz = extract32(tcr->raw_tcr, 16, 6);
7547    if (va_size == 64) {
7548        t1sz = MIN(t1sz, 39);
7549        t1sz = MAX(t1sz, 16);
7550    }
7551    if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
7552        /* there is a ttbr0 region and we are in it (high bits all zero) */
7553        ttbr_select = 0;
7554    } else if (ttbr1_valid && t1sz &&
7555               !extract64(~address, va_size - t1sz, t1sz - tbi)) {
7556        /* there is a ttbr1 region and we are in it (high bits all one) */
7557        ttbr_select = 1;
7558    } else if (!t0sz) {
7559        /* ttbr0 region is "everything not in the ttbr1 region" */
7560        ttbr_select = 0;
7561    } else if (!t1sz && ttbr1_valid) {
7562        /* ttbr1 region is "everything not in the ttbr0 region" */
7563        ttbr_select = 1;
7564    } else {
7565        /* in the gap between the two regions, this is a Translation fault */
7566        fault_type = translation_fault;
7567        goto do_fault;
7568    }
7569
7570    /* Note that QEMU ignores shareability and cacheability attributes,
7571     * so we don't need to do anything with the SH, ORGN, IRGN fields
7572     * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
7573     * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
7574     * implement any ASID-like capability so we can ignore it (instead
7575     * we will always flush the TLB any time the ASID is changed).
7576     */
7577    if (ttbr_select == 0) {
7578        ttbr = regime_ttbr(env, mmu_idx, 0);
7579        if (el < 2) {
7580            epd = extract32(tcr->raw_tcr, 7, 1);
7581        }
7582        inputsize = va_size - t0sz;
7583
7584        tg = extract32(tcr->raw_tcr, 14, 2);
7585        if (tg == 1) { /* 64KB pages */
7586            stride = 13;
7587        }
7588        if (tg == 2) { /* 16KB pages */
7589            stride = 11;
7590        }
7591    } else {
7592        /* We should only be here if TTBR1 is valid */
7593        assert(ttbr1_valid);
7594
7595        ttbr = regime_ttbr(env, mmu_idx, 1);
7596        epd = extract32(tcr->raw_tcr, 23, 1);
7597        inputsize = va_size - t1sz;
7598
7599        tg = extract32(tcr->raw_tcr, 30, 2);
7600        if (tg == 3)  { /* 64KB pages */
7601            stride = 13;
7602        }
7603        if (tg == 1) { /* 16KB pages */
7604            stride = 11;
7605        }
7606    }
7607
7608    /* Here we should have set up all the parameters for the translation:
7609     * va_size, inputsize, ttbr, epd, stride, tbi
7610     */
7611
7612    if (epd) {
7613        /* Translation table walk disabled => Translation fault on TLB miss
7614         * Note: This is always 0 on 64-bit EL2 and EL3.
7615         */
7616        goto do_fault;
7617    }
7618
7619    if (mmu_idx != ARMMMUIdx_S2NS) {
7620        /* The starting level depends on the virtual address size (which can
7621         * be up to 48 bits) and the translation granule size. It indicates
7622         * the number of strides (stride bits at a time) needed to
7623         * consume the bits of the input address. In the pseudocode this is:
7624         *  level = 4 - RoundUp((inputsize - grainsize) / stride)
7625         * where their 'inputsize' is our 'inputsize', 'grainsize' is
7626         * our 'stride + 3' and 'stride' is our 'stride'.
7627         * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
7628         * = 4 - (inputsize - stride - 3 + stride - 1) / stride
7629         * = 4 - (inputsize - 4) / stride;
7630         */
7631        level = 4 - (inputsize - 4) / stride;
7632    } else {
7633        /* For stage 2 translations the starting level is specified by the
7634         * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
7635         */
7636        uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
7637        uint32_t startlevel;
7638        bool ok;
7639
7640        if (va_size == 32 || stride == 9) {
7641            /* AArch32 or 4KB pages */
7642            startlevel = 2 - sl0;
7643        } else {
7644            /* 16KB or 64KB pages */
7645            startlevel = 3 - sl0;
7646        }
7647
7648        /* Check that the starting level is valid. */
7649        ok = check_s2_mmu_setup(cpu, va_size == 64, startlevel,
7650                                inputsize, stride);
7651        if (!ok) {
7652            fault_type = translation_fault;
7653            goto do_fault;
7654        }
7655        level = startlevel;
7656    }
7657
7658    /* Clear the vaddr bits which aren't part of the within-region address,
7659     * so that we don't have to special case things when calculating the
7660     * first descriptor address.
7661     */
7662    if (va_size != inputsize) {
7663        address &= (1ULL << inputsize) - 1;
7664    }
7665
7666    descmask = (1ULL << (stride + 3)) - 1;
7667
7668    /* Now we can extract the actual base address from the TTBR */
7669    descaddr = extract64(ttbr, 0, 48);
7670    descaddr &= ~((1ULL << (inputsize - (stride * (4 - level)))) - 1);
7671
7672    /* The address field in the descriptor goes up to bit 39 for ARMv7
7673     * but up to bit 47 for ARMv8.
7674     */
7675    if (arm_feature(env, ARM_FEATURE_V8)) {
7676        descaddrmask = 0xfffffffff000ULL;
7677    } else {
7678        descaddrmask = 0xfffffff000ULL;
7679    }
7680
7681    /* Secure accesses start with the page table in secure memory and
7682     * can be downgraded to non-secure at any step. Non-secure accesses
7683     * remain non-secure. We implement this by just ORing in the NSTable/NS
7684     * bits at each step.
7685     */
7686    tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
7687    for (;;) {
7688        uint64_t descriptor;
7689        bool nstable;
7690
7691        descaddr |= (address >> (stride * (4 - level))) & descmask;
7692        descaddr &= ~7ULL;
7693        nstable = extract32(tableattrs, 4, 1);
7694        descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
7695        if (fi->s1ptw) {
7696            goto do_fault;
7697        }
7698
7699        if (!(descriptor & 1) ||
7700            (!(descriptor & 2) && (level == 3))) {
7701            /* Invalid, or the Reserved level 3 encoding */
7702            goto do_fault;
7703        }
7704        descaddr = descriptor & descaddrmask;
7705
7706        if ((descriptor & 2) && (level < 3)) {
7707            /* Table entry. The top five bits are attributes which  may
7708             * propagate down through lower levels of the table (and
7709             * which are all arranged so that 0 means "no effect", so
7710             * we can gather them up by ORing in the bits at each level).
7711             */
7712            tableattrs |= extract64(descriptor, 59, 5);
7713            level++;
7714            continue;
7715        }
7716        /* Block entry at level 1 or 2, or page entry at level 3.
7717         * These are basically the same thing, although the number
7718         * of bits we pull in from the vaddr varies.
7719         */
7720        page_size = (1ULL << ((stride * (4 - level)) + 3));
7721        descaddr |= (address & (page_size - 1));
7722        /* Extract attributes from the descriptor */
7723        attrs = extract64(descriptor, 2, 10)
7724            | (extract64(descriptor, 52, 12) << 10);
7725
7726        if (mmu_idx == ARMMMUIdx_S2NS) {
7727            /* Stage 2 table descriptors do not include any attribute fields */
7728            break;
7729        }
7730        /* Merge in attributes from table descriptors */
7731        attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
7732        attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
7733        /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
7734         * means "force PL1 access only", which means forcing AP[1] to 0.
7735         */
7736        if (extract32(tableattrs, 2, 1)) {
7737            attrs &= ~(1 << 4);
7738        }
7739        attrs |= nstable << 3; /* NS */
7740        break;
7741    }
7742    /* Here descaddr is the final physical address, and attributes
7743     * are all in attrs.
7744     */
7745    fault_type = access_fault;
7746    if ((attrs & (1 << 8)) == 0) {
7747        /* Access flag */
7748        goto do_fault;
7749    }
7750
7751    ap = extract32(attrs, 4, 2);
7752    xn = extract32(attrs, 12, 1);
7753
7754    if (mmu_idx == ARMMMUIdx_S2NS) {
7755        ns = true;
7756        *prot = get_S2prot(env, ap, xn);
7757    } else {
7758        ns = extract32(attrs, 3, 1);
7759        pxn = extract32(attrs, 11, 1);
7760        *prot = get_S1prot(env, mmu_idx, va_size == 64, ap, ns, xn, pxn);
7761    }
7762
7763    fault_type = permission_fault;
7764    if (!(*prot & (1 << access_type))) {
7765        goto do_fault;
7766    }
7767
7768    if (ns) {
7769        /* The NS bit will (as required by the architecture) have no effect if
7770         * the CPU doesn't support TZ or this is a non-secure translation
7771         * regime, because the attribute will already be non-secure.
7772         */
7773        txattrs->secure = false;
7774    }
7775    *phys_ptr = descaddr;
7776    *page_size_ptr = page_size;
7777    return false;
7778
7779do_fault:
7780    /* Long-descriptor format IFSR/DFSR value */
7781    *fsr = (1 << 9) | (fault_type << 2) | level;
7782    /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
7783    fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
7784    return true;
7785}
7786
7787static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
7788                                                ARMMMUIdx mmu_idx,
7789                                                int32_t address, int *prot)
7790{
7791    *prot = PAGE_READ | PAGE_WRITE;
7792    switch (address) {
7793    case 0xF0000000 ... 0xFFFFFFFF:
7794        if (regime_sctlr(env, mmu_idx) & SCTLR_V) { /* hivecs execing is ok */
7795            *prot |= PAGE_EXEC;
7796        }
7797        break;
7798    case 0x00000000 ... 0x7FFFFFFF:
7799        *prot |= PAGE_EXEC;
7800        break;
7801    }
7802
7803}
7804
7805static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
7806                                 int access_type, ARMMMUIdx mmu_idx,
7807                                 hwaddr *phys_ptr, int *prot, uint32_t *fsr)
7808{
7809    ARMCPU *cpu = arm_env_get_cpu(env);
7810    int n;
7811    bool is_user = regime_is_user(env, mmu_idx);
7812
7813    *phys_ptr = address;
7814    *prot = 0;
7815
7816    if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
7817        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
7818    } else { /* MPU enabled */
7819        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
7820            /* region search */
7821            uint32_t base = env->pmsav7.drbar[n];
7822            uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
7823            uint32_t rmask;
7824            bool srdis = false;
7825
7826            if (!(env->pmsav7.drsr[n] & 0x1)) {
7827                continue;
7828            }
7829
7830            if (!rsize) {
7831                qemu_log_mask(LOG_GUEST_ERROR, "DRSR.Rsize field can not be 0");
7832                continue;
7833            }
7834            rsize++;
7835            rmask = (1ull << rsize) - 1;
7836
7837            if (base & rmask) {
7838                qemu_log_mask(LOG_GUEST_ERROR, "DRBAR %" PRIx32 " misaligned "
7839                              "to DRSR region size, mask = %" PRIx32,
7840                              base, rmask);
7841                continue;
7842            }
7843
7844            if (address < base || address > base + rmask) {
7845                continue;
7846            }
7847
7848            /* Region matched */
7849
7850            if (rsize >= 8) { /* no subregions for regions < 256 bytes */
7851                int i, snd;
7852                uint32_t srdis_mask;
7853
7854                rsize -= 3; /* sub region size (power of 2) */
7855                snd = ((address - base) >> rsize) & 0x7;
7856                srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
7857
7858                srdis_mask = srdis ? 0x3 : 0x0;
7859                for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
7860                    /* This will check in groups of 2, 4 and then 8, whether
7861                     * the subregion bits are consistent. rsize is incremented
7862                     * back up to give the region size, considering consistent
7863                     * adjacent subregions as one region. Stop testing if rsize
7864                     * is already big enough for an entire QEMU page.
7865                     */
7866                    int snd_rounded = snd & ~(i - 1);
7867                    uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
7868                                                     snd_rounded + 8, i);
7869                    if (srdis_mask ^ srdis_multi) {
7870                        break;
7871                    }
7872                    srdis_mask = (srdis_mask << i) | srdis_mask;
7873                    rsize++;
7874                }
7875            }
7876            if (rsize < TARGET_PAGE_BITS) {
7877                qemu_log_mask(LOG_UNIMP, "No support for MPU (sub)region"
7878                              "alignment of %" PRIu32 " bits. Minimum is %d\n",
7879                              rsize, TARGET_PAGE_BITS);
7880                continue;
7881            }
7882            if (srdis) {
7883                continue;
7884            }
7885            break;
7886        }
7887
7888        if (n == -1) { /* no hits */
7889            if (cpu->pmsav7_dregion &&
7890                (is_user || !(regime_sctlr(env, mmu_idx) & SCTLR_BR))) {
7891                /* background fault */
7892                *fsr = 0;
7893                return true;
7894            }
7895            get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
7896        } else { /* a MPU hit! */
7897            uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
7898
7899            if (is_user) { /* User mode AP bit decoding */
7900                switch (ap) {
7901                case 0:
7902                case 1:
7903                case 5:
7904                    break; /* no access */
7905                case 3:
7906                    *prot |= PAGE_WRITE;
7907                    /* fall through */
7908                case 2:
7909                case 6:
7910                    *prot |= PAGE_READ | PAGE_EXEC;
7911                    break;
7912                default:
7913                    qemu_log_mask(LOG_GUEST_ERROR,
7914                                  "Bad value for AP bits in DRACR %"
7915                                  PRIx32 "\n", ap);
7916                }
7917            } else { /* Priv. mode AP bits decoding */
7918                switch (ap) {
7919                case 0:
7920                    break; /* no access */
7921                case 1:
7922                case 2:
7923                case 3:
7924                    *prot |= PAGE_WRITE;
7925                    /* fall through */
7926                case 5:
7927                case 6:
7928                    *prot |= PAGE_READ | PAGE_EXEC;
7929                    break;
7930                default:
7931                    qemu_log_mask(LOG_GUEST_ERROR,
7932                                  "Bad value for AP bits in DRACR %"
7933                                  PRIx32 "\n", ap);
7934                }
7935            }
7936
7937            /* execute never */
7938            if (env->pmsav7.dracr[n] & (1 << 12)) {
7939                *prot &= ~PAGE_EXEC;
7940            }
7941        }
7942    }
7943
7944    *fsr = 0x00d; /* Permission fault */
7945    return !(*prot & (1 << access_type));
7946}
7947
7948static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
7949                                 int access_type, ARMMMUIdx mmu_idx,
7950                                 hwaddr *phys_ptr, int *prot, uint32_t *fsr)
7951{
7952    int n;
7953    uint32_t mask;
7954    uint32_t base;
7955    bool is_user = regime_is_user(env, mmu_idx);
7956
7957    *phys_ptr = address;
7958    for (n = 7; n >= 0; n--) {
7959        base = env->cp15.c6_region[n];
7960        if ((base & 1) == 0) {
7961            continue;
7962        }
7963        mask = 1 << ((base >> 1) & 0x1f);
7964        /* Keep this shift separate from the above to avoid an
7965           (undefined) << 32.  */
7966        mask = (mask << 1) - 1;
7967        if (((base ^ address) & ~mask) == 0) {
7968            break;
7969        }
7970    }
7971    if (n < 0) {
7972        *fsr = 2;
7973        return true;
7974    }
7975
7976    if (access_type == 2) {
7977        mask = env->cp15.pmsav5_insn_ap;
7978    } else {
7979        mask = env->cp15.pmsav5_data_ap;
7980    }
7981    mask = (mask >> (n * 4)) & 0xf;
7982    switch (mask) {
7983    case 0:
7984        *fsr = 1;
7985        return true;
7986    case 1:
7987        if (is_user) {
7988            *fsr = 1;
7989            return true;
7990        }
7991        *prot = PAGE_READ | PAGE_WRITE;
7992        break;
7993    case 2:
7994        *prot = PAGE_READ;
7995        if (!is_user) {
7996            *prot |= PAGE_WRITE;
7997        }
7998        break;
7999    case 3:
8000        *prot = PAGE_READ | PAGE_WRITE;
8001        break;
8002    case 5:
8003        if (is_user) {
8004            *fsr = 1;
8005            return true;
8006        }
8007        *prot = PAGE_READ;
8008        break;
8009    case 6:
8010        *prot = PAGE_READ;
8011        break;
8012    default:
8013        /* Bad permission.  */
8014        *fsr = 1;
8015        return true;
8016    }
8017    *prot |= PAGE_EXEC;
8018    return false;
8019}
8020
8021/* get_phys_addr - get the physical address for this virtual address
8022 *
8023 * Find the physical address corresponding to the given virtual address,
8024 * by doing a translation table walk on MMU based systems or using the
8025 * MPU state on MPU based systems.
8026 *
8027 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
8028 * prot and page_size may not be filled in, and the populated fsr value provides
8029 * information on why the translation aborted, in the format of a
8030 * DFSR/IFSR fault register, with the following caveats:
8031 *  * we honour the short vs long DFSR format differences.
8032 *  * the WnR bit is never set (the caller must do this).
8033 *  * for PSMAv5 based systems we don't bother to return a full FSR format
8034 *    value.
8035 *
8036 * @env: CPUARMState
8037 * @address: virtual address to get physical address for
8038 * @access_type: 0 for read, 1 for write, 2 for execute
8039 * @mmu_idx: MMU index indicating required translation regime
8040 * @phys_ptr: set to the physical address corresponding to the virtual address
8041 * @attrs: set to the memory transaction attributes to use
8042 * @prot: set to the permissions for the page containing phys_ptr
8043 * @page_size: set to the size of the page containing phys_ptr
8044 * @fsr: set to the DFSR/IFSR value on failure
8045 */
8046static bool get_phys_addr(CPUARMState *env, target_ulong address,
8047                          int access_type, ARMMMUIdx mmu_idx,
8048                          hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
8049                          target_ulong *page_size, uint32_t *fsr,
8050                          ARMMMUFaultInfo *fi)
8051{
8052    if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8053        /* Call ourselves recursively to do the stage 1 and then stage 2
8054         * translations.
8055         */
8056        if (arm_feature(env, ARM_FEATURE_EL2)) {
8057            hwaddr ipa;
8058            int s2_prot;
8059            int ret;
8060
8061            ret = get_phys_addr(env, address, access_type,
8062                                mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
8063                                prot, page_size, fsr, fi);
8064
8065            /* If S1 fails or S2 is disabled, return early.  */
8066            if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8067                *phys_ptr = ipa;
8068                return ret;
8069            }
8070
8071            /* S1 is done. Now do S2 translation.  */
8072            ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
8073                                     phys_ptr, attrs, &s2_prot,
8074                                     page_size, fsr, fi);
8075            fi->s2addr = ipa;
8076            /* Combine the S1 and S2 perms.  */
8077            *prot &= s2_prot;
8078            return ret;
8079        } else {
8080            /*
8081             * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
8082             */
8083            mmu_idx += ARMMMUIdx_S1NSE0;
8084        }
8085    }
8086
8087    /* The page table entries may downgrade secure to non-secure, but
8088     * cannot upgrade an non-secure translation regime's attributes
8089     * to secure.
8090     */
8091    attrs->secure = regime_is_secure(env, mmu_idx);
8092    attrs->user = regime_is_user(env, mmu_idx);
8093
8094    /* Fast Context Switch Extension. This doesn't exist at all in v8.
8095     * In v7 and earlier it affects all stage 1 translations.
8096     */
8097    if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
8098        && !arm_feature(env, ARM_FEATURE_V8)) {
8099        if (regime_el(env, mmu_idx) == 3) {
8100            address += env->cp15.fcseidr_s;
8101        } else {
8102            address += env->cp15.fcseidr_ns;
8103        }
8104    }
8105
8106    /* pmsav7 has special handling for when MPU is disabled so call it before
8107     * the common MMU/MPU disabled check below.
8108     */
8109    if (arm_feature(env, ARM_FEATURE_MPU) &&
8110        arm_feature(env, ARM_FEATURE_V7)) {
8111        *page_size = TARGET_PAGE_SIZE;
8112        return get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
8113                                    phys_ptr, prot, fsr);
8114    }
8115
8116    if (regime_translation_disabled(env, mmu_idx)) {
8117        /* MMU/MPU disabled.  */
8118        *phys_ptr = address;
8119        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
8120        *page_size = TARGET_PAGE_SIZE;
8121        return 0;
8122    }
8123
8124    if (arm_feature(env, ARM_FEATURE_MPU)) {
8125        /* Pre-v7 MPU */
8126        *page_size = TARGET_PAGE_SIZE;
8127        return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
8128                                    phys_ptr, prot, fsr);
8129    }
8130
8131    if (regime_using_lpae_format(env, mmu_idx)) {
8132        return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
8133                                  attrs, prot, page_size, fsr, fi);
8134    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
8135        return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
8136                                attrs, prot, page_size, fsr, fi);
8137    } else {
8138        return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
8139                                prot, page_size, fsr, fi);
8140    }
8141}
8142
8143/* Walk the page table and (if the mapping exists) add the page
8144 * to the TLB. Return false on success, or true on failure. Populate
8145 * fsr with ARM DFSR/IFSR fault register format value on failure.
8146 */
8147bool arm_tlb_fill(CPUState *cs, vaddr address,
8148                  int access_type, int mmu_idx, uint32_t *fsr,
8149                  ARMMMUFaultInfo *fi)
8150{
8151    ARMCPU *cpu = ARM_CPU(cs);
8152    CPUARMState *env = &cpu->env;
8153    hwaddr phys_addr;
8154    target_ulong page_size;
8155    int prot;
8156    int ret;
8157    MemTxAttrs attrs = {};
8158
8159    ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
8160                        &attrs, &prot, &page_size, fsr, fi);
8161    if (!ret) {
8162        /* Map a single [sub]page.  */
8163        phys_addr &= TARGET_PAGE_MASK;
8164        address &= TARGET_PAGE_MASK;
8165        tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
8166                                prot, mmu_idx, page_size);
8167        return 0;
8168    }
8169
8170    return ret;
8171}
8172
8173hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
8174                                         MemTxAttrs *attrs)
8175{
8176    ARMCPU *cpu = ARM_CPU(cs);
8177    CPUARMState *env = &cpu->env;
8178    hwaddr phys_addr;
8179    target_ulong page_size;
8180    int prot;
8181    bool ret;
8182    uint32_t fsr;
8183    ARMMMUFaultInfo fi = {};
8184
8185    *attrs = (MemTxAttrs) {};
8186
8187    ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
8188                        attrs, &prot, &page_size, &fsr, &fi);
8189
8190    if (ret) {
8191        return -1;
8192    }
8193    return phys_addr;
8194}
8195
8196uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
8197{
8198    ARMCPU *cpu = arm_env_get_cpu(env);
8199
8200    switch (reg) {
8201    case 0: /* APSR */
8202        return xpsr_read(env) & 0xf8000000;
8203    case 1: /* IAPSR */
8204        return xpsr_read(env) & 0xf80001ff;
8205    case 2: /* EAPSR */
8206        return xpsr_read(env) & 0xff00fc00;
8207    case 3: /* xPSR */
8208        return xpsr_read(env) & 0xff00fdff;
8209    case 5: /* IPSR */
8210        return xpsr_read(env) & 0x000001ff;
8211    case 6: /* EPSR */
8212        return xpsr_read(env) & 0x0700fc00;
8213    case 7: /* IEPSR */
8214        return xpsr_read(env) & 0x0700edff;
8215    case 8: /* MSP */
8216        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
8217    case 9: /* PSP */
8218        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
8219    case 16: /* PRIMASK */
8220        return (env->daif & PSTATE_I) != 0;
8221    case 17: /* BASEPRI */
8222    case 18: /* BASEPRI_MAX */
8223        return env->v7m.basepri;
8224    case 19: /* FAULTMASK */
8225        return (env->daif & PSTATE_F) != 0;
8226    case 20: /* CONTROL */
8227        return env->v7m.control;
8228    default:
8229        /* ??? For debugging only.  */
8230        cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
8231        return 0;
8232    }
8233}
8234
8235void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
8236{
8237    ARMCPU *cpu = arm_env_get_cpu(env);
8238
8239    switch (reg) {
8240    case 0: /* APSR */
8241        xpsr_write(env, val, 0xf8000000);
8242        break;
8243    case 1: /* IAPSR */
8244        xpsr_write(env, val, 0xf8000000);
8245        break;
8246    case 2: /* EAPSR */
8247        xpsr_write(env, val, 0xfe00fc00);
8248        break;
8249    case 3: /* xPSR */
8250        xpsr_write(env, val, 0xfe00fc00);
8251        break;
8252    case 5: /* IPSR */
8253        /* IPSR bits are readonly.  */
8254        break;
8255    case 6: /* EPSR */
8256        xpsr_write(env, val, 0x0600fc00);
8257        break;
8258    case 7: /* IEPSR */
8259        xpsr_write(env, val, 0x0600fc00);
8260        break;
8261    case 8: /* MSP */
8262        if (env->v7m.current_sp)
8263            env->v7m.other_sp = val;
8264        else
8265            env->regs[13] = val;
8266        break;
8267    case 9: /* PSP */
8268        if (env->v7m.current_sp)
8269            env->regs[13] = val;
8270        else
8271            env->v7m.other_sp = val;
8272        break;
8273    case 16: /* PRIMASK */
8274        if (val & 1) {
8275            env->daif |= PSTATE_I;
8276        } else {
8277            env->daif &= ~PSTATE_I;
8278        }
8279        break;
8280    case 17: /* BASEPRI */
8281        env->v7m.basepri = val & 0xff;
8282        break;
8283    case 18: /* BASEPRI_MAX */
8284        val &= 0xff;
8285        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
8286            env->v7m.basepri = val;
8287        break;
8288    case 19: /* FAULTMASK */
8289        if (val & 1) {
8290            env->daif |= PSTATE_F;
8291        } else {
8292            env->daif &= ~PSTATE_F;
8293        }
8294        break;
8295    case 20: /* CONTROL */
8296        env->v7m.control = val & 3;
8297        switch_v7m_sp(env, (val & 2) != 0);
8298        break;
8299    default:
8300        /* ??? For debugging only.  */
8301        cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
8302        return;
8303    }
8304}
8305
8306#endif
8307
8308void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
8309{
8310    /* Implement DC ZVA, which zeroes a fixed-length block of memory.
8311     * Note that we do not implement the (architecturally mandated)
8312     * alignment fault for attempts to use this on Device memory
8313     * (which matches the usual QEMU behaviour of not implementing either
8314     * alignment faults or any memory attribute handling).
8315     */
8316
8317    ARMCPU *cpu = arm_env_get_cpu(env);
8318    uint64_t blocklen = 4 << cpu->dcz_blocksize;
8319    uint64_t vaddr = vaddr_in & ~(blocklen - 1);
8320
8321#ifndef CONFIG_USER_ONLY
8322    {
8323        /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
8324         * the block size so we might have to do more than one TLB lookup.
8325         * We know that in fact for any v8 CPU the page size is at least 4K
8326         * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
8327         * 1K as an artefact of legacy v5 subpage support being present in the
8328         * same QEMU executable.
8329         */
8330        int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
8331        void *hostaddr[maxidx];
8332        int try, i;
8333        unsigned mmu_idx = cpu_mmu_index(env, false);
8334        TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
8335
8336        for (try = 0; try < 2; try++) {
8337
8338            for (i = 0; i < maxidx; i++) {
8339                hostaddr[i] = tlb_vaddr_to_host(env,
8340                                                vaddr + TARGET_PAGE_SIZE * i,
8341                                                1, mmu_idx);
8342                if (!hostaddr[i]) {
8343                    break;
8344                }
8345            }
8346            if (i == maxidx) {
8347                /* If it's all in the TLB it's fair game for just writing to;
8348                 * we know we don't need to update dirty status, etc.
8349                 */
8350                for (i = 0; i < maxidx - 1; i++) {
8351                    memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
8352                }
8353                memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
8354                return;
8355            }
8356            /* OK, try a store and see if we can populate the tlb. This
8357             * might cause an exception if the memory isn't writable,
8358             * in which case we will longjmp out of here. We must for
8359             * this purpose use the actual register value passed to us
8360             * so that we get the fault address right.
8361             */
8362            helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETRA());
8363            /* Now we can populate the other TLB entries, if any */
8364            for (i = 0; i < maxidx; i++) {
8365                uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
8366                if (va != (vaddr_in & TARGET_PAGE_MASK)) {
8367                    helper_ret_stb_mmu(env, va, 0, oi, GETRA());
8368                }
8369            }
8370        }
8371
8372        /* Slow path (probably attempt to do this to an I/O device or
8373         * similar, or clearing of a block of code we have translations
8374         * cached for). Just do a series of byte writes as the architecture
8375         * demands. It's not worth trying to use a cpu_physical_memory_map(),
8376         * memset(), unmap() sequence here because:
8377         *  + we'd need to account for the blocksize being larger than a page
8378         *  + the direct-RAM access case is almost always going to be dealt
8379         *    with in the fastpath code above, so there's no speed benefit
8380         *  + we would have to deal with the map returning NULL because the
8381         *    bounce buffer was in use
8382         */
8383        for (i = 0; i < blocklen; i++) {
8384            helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETRA());
8385        }
8386    }
8387#else
8388    memset(g2h(vaddr), 0, blocklen);
8389#endif
8390}
8391
8392/* Note that signed overflow is undefined in C.  The following routines are
8393   careful to use unsigned types where modulo arithmetic is required.
8394   Failure to do so _will_ break on newer gcc.  */
8395
8396/* Signed saturating arithmetic.  */
8397
8398/* Perform 16-bit signed saturating addition.  */
8399static inline uint16_t add16_sat(uint16_t a, uint16_t b)
8400{
8401    uint16_t res;
8402
8403    res = a + b;
8404    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
8405        if (a & 0x8000)
8406            res = 0x8000;
8407        else
8408            res = 0x7fff;
8409    }
8410    return res;
8411}
8412
8413/* Perform 8-bit signed saturating addition.  */
8414static inline uint8_t add8_sat(uint8_t a, uint8_t b)
8415{
8416    uint8_t res;
8417
8418    res = a + b;
8419    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
8420        if (a & 0x80)
8421            res = 0x80;
8422        else
8423            res = 0x7f;
8424    }
8425    return res;
8426}
8427
8428/* Perform 16-bit signed saturating subtraction.  */
8429static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
8430{
8431    uint16_t res;
8432
8433    res = a - b;
8434    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
8435        if (a & 0x8000)
8436            res = 0x8000;
8437        else
8438            res = 0x7fff;
8439    }
8440    return res;
8441}
8442
8443/* Perform 8-bit signed saturating subtraction.  */
8444static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
8445{
8446    uint8_t res;
8447
8448    res = a - b;
8449    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
8450        if (a & 0x80)
8451            res = 0x80;
8452        else
8453            res = 0x7f;
8454    }
8455    return res;
8456}
8457
8458#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
8459#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
8460#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
8461#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
8462#define PFX q
8463
8464#include "op_addsub.h"
8465
8466/* Unsigned saturating arithmetic.  */
8467static inline uint16_t add16_usat(uint16_t a, uint16_t b)
8468{
8469    uint16_t res;
8470    res = a + b;
8471    if (res < a)
8472        res = 0xffff;
8473    return res;
8474}
8475
8476static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
8477{
8478    if (a > b)
8479        return a - b;
8480    else
8481        return 0;
8482}
8483
8484static inline uint8_t add8_usat(uint8_t a, uint8_t b)
8485{
8486    uint8_t res;
8487    res = a + b;
8488    if (res < a)
8489        res = 0xff;
8490    return res;
8491}
8492
8493static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
8494{
8495    if (a > b)
8496        return a - b;
8497    else
8498        return 0;
8499}
8500
8501#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
8502#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
8503#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
8504#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
8505#define PFX uq
8506
8507#include "op_addsub.h"
8508
8509/* Signed modulo arithmetic.  */
8510#define SARITH16(a, b, n, op) do { \
8511    int32_t sum; \
8512    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
8513    RESULT(sum, n, 16); \
8514    if (sum >= 0) \
8515        ge |= 3 << (n * 2); \
8516    } while(0)
8517
8518#define SARITH8(a, b, n, op) do { \
8519    int32_t sum; \
8520    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
8521    RESULT(sum, n, 8); \
8522    if (sum >= 0) \
8523        ge |= 1 << n; \
8524    } while(0)
8525
8526
8527#define ADD16(a, b, n) SARITH16(a, b, n, +)
8528#define SUB16(a, b, n) SARITH16(a, b, n, -)
8529#define ADD8(a, b, n)  SARITH8(a, b, n, +)
8530#define SUB8(a, b, n)  SARITH8(a, b, n, -)
8531#define PFX s
8532#define ARITH_GE
8533
8534#include "op_addsub.h"
8535
8536/* Unsigned modulo arithmetic.  */
8537#define ADD16(a, b, n) do { \
8538    uint32_t sum; \
8539    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
8540    RESULT(sum, n, 16); \
8541    if ((sum >> 16) == 1) \
8542        ge |= 3 << (n * 2); \
8543    } while(0)
8544
8545#define ADD8(a, b, n) do { \
8546    uint32_t sum; \
8547    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
8548    RESULT(sum, n, 8); \
8549    if ((sum >> 8) == 1) \
8550        ge |= 1 << n; \
8551    } while(0)
8552
8553#define SUB16(a, b, n) do { \
8554    uint32_t sum; \
8555    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
8556    RESULT(sum, n, 16); \
8557    if ((sum >> 16) == 0) \
8558        ge |= 3 << (n * 2); \
8559    } while(0)
8560
8561#define SUB8(a, b, n) do { \
8562    uint32_t sum; \
8563    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
8564    RESULT(sum, n, 8); \
8565    if ((sum >> 8) == 0) \
8566        ge |= 1 << n; \
8567    } while(0)
8568
8569#define PFX u
8570#define ARITH_GE
8571
8572#include "op_addsub.h"
8573
8574/* Halved signed arithmetic.  */
8575#define ADD16(a, b, n) \
8576  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
8577#define SUB16(a, b, n) \
8578  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
8579#define ADD8(a, b, n) \
8580  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
8581#define SUB8(a, b, n) \
8582  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
8583#define PFX sh
8584
8585#include "op_addsub.h"
8586
8587/* Halved unsigned arithmetic.  */
8588#define ADD16(a, b, n) \
8589  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
8590#define SUB16(a, b, n) \
8591  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
8592#define ADD8(a, b, n) \
8593  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
8594#define SUB8(a, b, n) \
8595  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
8596#define PFX uh
8597
8598#include "op_addsub.h"
8599
8600static inline uint8_t do_usad(uint8_t a, uint8_t b)
8601{
8602    if (a > b)
8603        return a - b;
8604    else
8605        return b - a;
8606}
8607
8608/* Unsigned sum of absolute byte differences.  */
8609uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
8610{
8611    uint32_t sum;
8612    sum = do_usad(a, b);
8613    sum += do_usad(a >> 8, b >> 8);
8614    sum += do_usad(a >> 16, b >>16);
8615    sum += do_usad(a >> 24, b >> 24);
8616    return sum;
8617}
8618
8619/* For ARMv6 SEL instruction.  */
8620uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
8621{
8622    uint32_t mask;
8623
8624    mask = 0;
8625    if (flags & 1)
8626        mask |= 0xff;
8627    if (flags & 2)
8628        mask |= 0xff00;
8629    if (flags & 4)
8630        mask |= 0xff0000;
8631    if (flags & 8)
8632        mask |= 0xff000000;
8633    return (a & mask) | (b & ~mask);
8634}
8635
8636/* VFP support.  We follow the convention used for VFP instructions:
8637   Single precision routines have a "s" suffix, double precision a
8638   "d" suffix.  */
8639
8640/* Convert host exception flags to vfp form.  */
8641static inline int vfp_exceptbits_from_host(int host_bits)
8642{
8643    int target_bits = 0;
8644
8645    if (host_bits & float_flag_invalid)
8646        target_bits |= 1;
8647    if (host_bits & float_flag_divbyzero)
8648        target_bits |= 2;
8649    if (host_bits & float_flag_overflow)
8650        target_bits |= 4;
8651    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
8652        target_bits |= 8;
8653    if (host_bits & float_flag_inexact)
8654        target_bits |= 0x10;
8655    if (host_bits & float_flag_input_denormal)
8656        target_bits |= 0x80;
8657    return target_bits;
8658}
8659
8660uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
8661{
8662    int i;
8663    uint32_t fpscr;
8664
8665    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
8666            | (env->vfp.vec_len << 16)
8667            | (env->vfp.vec_stride << 20);
8668    i = get_float_exception_flags(&env->vfp.fp_status);
8669    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
8670    fpscr |= vfp_exceptbits_from_host(i);
8671    return fpscr;
8672}
8673
8674uint32_t vfp_get_fpscr(CPUARMState *env)
8675{
8676    return HELPER(vfp_get_fpscr)(env);
8677}
8678
8679/* Convert vfp exception flags to target form.  */
8680static inline int vfp_exceptbits_to_host(int target_bits)
8681{
8682    int host_bits = 0;
8683
8684    if (target_bits & 1)
8685        host_bits |= float_flag_invalid;
8686    if (target_bits & 2)
8687        host_bits |= float_flag_divbyzero;
8688    if (target_bits & 4)
8689        host_bits |= float_flag_overflow;
8690    if (target_bits & 8)
8691        host_bits |= float_flag_underflow;
8692    if (target_bits & 0x10)
8693        host_bits |= float_flag_inexact;
8694    if (target_bits & 0x80)
8695        host_bits |= float_flag_input_denormal;
8696    return host_bits;
8697}
8698
8699void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
8700{
8701    int i;
8702    uint32_t changed;
8703
8704    changed = env->vfp.xregs[ARM_VFP_FPSCR];
8705    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
8706    env->vfp.vec_len = (val >> 16) & 7;
8707    env->vfp.vec_stride = (val >> 20) & 3;
8708
8709    changed ^= val;
8710    if (changed & (3 << 22)) {
8711        i = (val >> 22) & 3;
8712        switch (i) {
8713        case FPROUNDING_TIEEVEN:
8714            i = float_round_nearest_even;
8715            break;
8716        case FPROUNDING_POSINF:
8717            i = float_round_up;
8718            break;
8719        case FPROUNDING_NEGINF:
8720            i = float_round_down;
8721            break;
8722        case FPROUNDING_ZERO:
8723            i = float_round_to_zero;
8724            break;
8725        }
8726        set_float_rounding_mode(i, &env->vfp.fp_status);
8727    }
8728    if (changed & (1 << 24)) {
8729        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
8730        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
8731    }
8732    if (changed & (1 << 25))
8733        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
8734
8735    i = vfp_exceptbits_to_host(val);
8736    set_float_exception_flags(i, &env->vfp.fp_status);
8737    set_float_exception_flags(0, &env->vfp.standard_fp_status);
8738}
8739
8740void vfp_set_fpscr(CPUARMState *env, uint32_t val)
8741{
8742    HELPER(vfp_set_fpscr)(env, val);
8743}
8744
8745#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
8746
8747#define VFP_BINOP(name) \
8748float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
8749{ \
8750    float_status *fpst = fpstp; \
8751    return float32_ ## name(a, b, fpst); \
8752} \
8753float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
8754{ \
8755    float_status *fpst = fpstp; \
8756    return float64_ ## name(a, b, fpst); \
8757}
8758VFP_BINOP(add)
8759VFP_BINOP(sub)
8760VFP_BINOP(mul)
8761VFP_BINOP(div)
8762VFP_BINOP(min)
8763VFP_BINOP(max)
8764VFP_BINOP(minnum)
8765VFP_BINOP(maxnum)
8766#undef VFP_BINOP
8767
8768float32 VFP_HELPER(neg, s)(float32 a)
8769{
8770    return float32_chs(a);
8771}
8772
8773float64 VFP_HELPER(neg, d)(float64 a)
8774{
8775    return float64_chs(a);
8776}
8777
8778float32 VFP_HELPER(abs, s)(float32 a)
8779{
8780    return float32_abs(a);
8781}
8782
8783float64 VFP_HELPER(abs, d)(float64 a)
8784{
8785    return float64_abs(a);
8786}
8787
8788float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
8789{
8790    return float32_sqrt(a, &env->vfp.fp_status);
8791}
8792
8793float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
8794{
8795    return float64_sqrt(a, &env->vfp.fp_status);
8796}
8797
8798/* XXX: check quiet/signaling case */
8799#define DO_VFP_cmp(p, type) \
8800void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
8801{ \
8802    uint32_t flags; \
8803    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
8804    case 0: flags = 0x6; break; \
8805    case -1: flags = 0x8; break; \
8806    case 1: flags = 0x2; break; \
8807    default: case 2: flags = 0x3; break; \
8808    } \
8809    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
8810        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
8811} \
8812void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
8813{ \
8814    uint32_t flags; \
8815    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
8816    case 0: flags = 0x6; break; \
8817    case -1: flags = 0x8; break; \
8818    case 1: flags = 0x2; break; \
8819    default: case 2: flags = 0x3; break; \
8820    } \
8821    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
8822        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
8823}
8824DO_VFP_cmp(s, float32)
8825DO_VFP_cmp(d, float64)
8826#undef DO_VFP_cmp
8827
8828/* Integer to float and float to integer conversions */
8829
8830#define CONV_ITOF(name, fsz, sign) \
8831    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
8832{ \
8833    float_status *fpst = fpstp; \
8834    return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
8835}
8836
8837#define CONV_FTOI(name, fsz, sign, round) \
8838uint32_t HELPER(name)(float##fsz x, void *fpstp) \
8839{ \
8840    float_status *fpst = fpstp; \
8841    if (float##fsz##_is_any_nan(x)) { \
8842        float_raise(float_flag_invalid, fpst); \
8843        return 0; \
8844    } \
8845    return float##fsz##_to_##sign##int32##round(x, fpst); \
8846}
8847
8848#define FLOAT_CONVS(name, p, fsz, sign) \
8849CONV_ITOF(vfp_##name##to##p, fsz, sign) \
8850CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
8851CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
8852
8853FLOAT_CONVS(si, s, 32, )
8854FLOAT_CONVS(si, d, 64, )
8855FLOAT_CONVS(ui, s, 32, u)
8856FLOAT_CONVS(ui, d, 64, u)
8857
8858#undef CONV_ITOF
8859#undef CONV_FTOI
8860#undef FLOAT_CONVS
8861
8862/* floating point conversion */
8863float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
8864{
8865    float64 r = float32_to_float64(x, &env->vfp.fp_status);
8866    /* ARM requires that S<->D conversion of any kind of NaN generates
8867     * a quiet NaN by forcing the most significant frac bit to 1.
8868     */
8869    return float64_maybe_silence_nan(r);
8870}
8871
8872float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
8873{
8874    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
8875    /* ARM requires that S<->D conversion of any kind of NaN generates
8876     * a quiet NaN by forcing the most significant frac bit to 1.
8877     */
8878    return float32_maybe_silence_nan(r);
8879}
8880
8881/* VFP3 fixed point conversion.  */
8882#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
8883float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t  x, uint32_t shift, \
8884                                     void *fpstp) \
8885{ \
8886    float_status *fpst = fpstp; \
8887    float##fsz tmp; \
8888    tmp = itype##_to_##float##fsz(x, fpst); \
8889    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
8890}
8891
8892/* Notice that we want only input-denormal exception flags from the
8893 * scalbn operation: the other possible flags (overflow+inexact if
8894 * we overflow to infinity, output-denormal) aren't correct for the
8895 * complete scale-and-convert operation.
8896 */
8897#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
8898uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
8899                                             uint32_t shift, \
8900                                             void *fpstp) \
8901{ \
8902    float_status *fpst = fpstp; \
8903    int old_exc_flags = get_float_exception_flags(fpst); \
8904    float##fsz tmp; \
8905    if (float##fsz##_is_any_nan(x)) { \
8906        float_raise(float_flag_invalid, fpst); \
8907        return 0; \
8908    } \
8909    tmp = float##fsz##_scalbn(x, shift, fpst); \
8910    old_exc_flags |= get_float_exception_flags(fpst) \
8911        & float_flag_input_denormal; \
8912    set_float_exception_flags(old_exc_flags, fpst); \
8913    return float##fsz##_to_##itype##round(tmp, fpst); \
8914}
8915
8916#define VFP_CONV_FIX(name, p, fsz, isz, itype)                   \
8917VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
8918VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
8919VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
8920
8921#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype)               \
8922VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
8923VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
8924
8925VFP_CONV_FIX(sh, d, 64, 64, int16)
8926VFP_CONV_FIX(sl, d, 64, 64, int32)
8927VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
8928VFP_CONV_FIX(uh, d, 64, 64, uint16)
8929VFP_CONV_FIX(ul, d, 64, 64, uint32)
8930VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
8931VFP_CONV_FIX(sh, s, 32, 32, int16)
8932VFP_CONV_FIX(sl, s, 32, 32, int32)
8933VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
8934VFP_CONV_FIX(uh, s, 32, 32, uint16)
8935VFP_CONV_FIX(ul, s, 32, 32, uint32)
8936VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
8937#undef VFP_CONV_FIX
8938#undef VFP_CONV_FIX_FLOAT
8939#undef VFP_CONV_FLOAT_FIX_ROUND
8940
8941/* Set the current fp rounding mode and return the old one.
8942 * The argument is a softfloat float_round_ value.
8943 */
8944uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
8945{
8946    float_status *fp_status = &env->vfp.fp_status;
8947
8948    uint32_t prev_rmode = get_float_rounding_mode(fp_status);
8949    set_float_rounding_mode(rmode, fp_status);
8950
8951    return prev_rmode;
8952}
8953
8954/* Set the current fp rounding mode in the standard fp status and return
8955 * the old one. This is for NEON instructions that need to change the
8956 * rounding mode but wish to use the standard FPSCR values for everything
8957 * else. Always set the rounding mode back to the correct value after
8958 * modifying it.
8959 * The argument is a softfloat float_round_ value.
8960 */
8961uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
8962{
8963    float_status *fp_status = &env->vfp.standard_fp_status;
8964
8965    uint32_t prev_rmode = get_float_rounding_mode(fp_status);
8966    set_float_rounding_mode(rmode, fp_status);
8967
8968    return prev_rmode;
8969}
8970
8971/* Half precision conversions.  */
8972static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
8973{
8974    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
8975    float32 r = float16_to_float32(make_float16(a), ieee, s);
8976    if (ieee) {
8977        return float32_maybe_silence_nan(r);
8978    }
8979    return r;
8980}
8981
8982static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
8983{
8984    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
8985    float16 r = float32_to_float16(a, ieee, s);
8986    if (ieee) {
8987        r = float16_maybe_silence_nan(r);
8988    }
8989    return float16_val(r);
8990}
8991
8992float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
8993{
8994    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
8995}
8996
8997uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
8998{
8999    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
9000}
9001
9002float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
9003{
9004    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
9005}
9006
9007uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
9008{
9009    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
9010}
9011
9012float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
9013{
9014    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9015    float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
9016    if (ieee) {
9017        return float64_maybe_silence_nan(r);
9018    }
9019    return r;
9020}
9021
9022uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
9023{
9024    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9025    float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
9026    if (ieee) {
9027        r = float16_maybe_silence_nan(r);
9028    }
9029    return float16_val(r);
9030}
9031
9032#define float32_two make_float32(0x40000000)
9033#define float32_three make_float32(0x40400000)
9034#define float32_one_point_five make_float32(0x3fc00000)
9035
9036float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
9037{
9038    float_status *s = &env->vfp.standard_fp_status;
9039    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9040        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9041        if (!(float32_is_zero(a) || float32_is_zero(b))) {
9042            float_raise(float_flag_input_denormal, s);
9043        }
9044        return float32_two;
9045    }
9046    return float32_sub(float32_two, float32_mul(a, b, s), s);
9047}
9048
9049float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
9050{
9051    float_status *s = &env->vfp.standard_fp_status;
9052    float32 product;
9053    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9054        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9055        if (!(float32_is_zero(a) || float32_is_zero(b))) {
9056            float_raise(float_flag_input_denormal, s);
9057        }
9058        return float32_one_point_five;
9059    }
9060    product = float32_mul(a, b, s);
9061    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
9062}
9063
9064/* NEON helpers.  */
9065
9066/* Constants 256 and 512 are used in some helpers; we avoid relying on
9067 * int->float conversions at run-time.  */
9068#define float64_256 make_float64(0x4070000000000000LL)
9069#define float64_512 make_float64(0x4080000000000000LL)
9070#define float32_maxnorm make_float32(0x7f7fffff)
9071#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
9072
9073/* Reciprocal functions
9074 *
9075 * The algorithm that must be used to calculate the estimate
9076 * is specified by the ARM ARM, see FPRecipEstimate()
9077 */
9078
9079static float64 recip_estimate(float64 a, float_status *real_fp_status)
9080{
9081    /* These calculations mustn't set any fp exception flags,
9082     * so we use a local copy of the fp_status.
9083     */
9084    float_status dummy_status = *real_fp_status;
9085    float_status *s = &dummy_status;
9086    /* q = (int)(a * 512.0) */
9087    float64 q = float64_mul(float64_512, a, s);
9088    int64_t q_int = float64_to_int64_round_to_zero(q, s);
9089
9090    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
9091    q = int64_to_float64(q_int, s);
9092    q = float64_add(q, float64_half, s);
9093    q = float64_div(q, float64_512, s);
9094    q = float64_div(float64_one, q, s);
9095
9096    /* s = (int)(256.0 * r + 0.5) */
9097    q = float64_mul(q, float64_256, s);
9098    q = float64_add(q, float64_half, s);
9099    q_int = float64_to_int64_round_to_zero(q, s);
9100
9101    /* return (double)s / 256.0 */
9102    return float64_div(int64_to_float64(q_int, s), float64_256, s);
9103}
9104
9105/* Common wrapper to call recip_estimate */
9106static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
9107{
9108    uint64_t val64 = float64_val(num);
9109    uint64_t frac = extract64(val64, 0, 52);
9110    int64_t exp = extract64(val64, 52, 11);
9111    uint64_t sbit;
9112    float64 scaled, estimate;
9113
9114    /* Generate the scaled number for the estimate function */
9115    if (exp == 0) {
9116        if (extract64(frac, 51, 1) == 0) {
9117            exp = -1;
9118            frac = extract64(frac, 0, 50) << 2;
9119        } else {
9120            frac = extract64(frac, 0, 51) << 1;
9121        }
9122    }
9123
9124    /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
9125    scaled = make_float64((0x3feULL << 52)
9126                          | extract64(frac, 44, 8) << 44);
9127
9128    estimate = recip_estimate(scaled, fpst);
9129
9130    /* Build new result */
9131    val64 = float64_val(estimate);
9132    sbit = 0x8000000000000000ULL & val64;
9133    exp = off - exp;
9134    frac = extract64(val64, 0, 52);
9135
9136    if (exp == 0) {
9137        frac = 1ULL << 51 | extract64(frac, 1, 51);
9138    } else if (exp == -1) {
9139        frac = 1ULL << 50 | extract64(frac, 2, 50);
9140        exp = 0;
9141    }
9142
9143    return make_float64(sbit | (exp << 52) | frac);
9144}
9145
9146static bool round_to_inf(float_status *fpst, bool sign_bit)
9147{
9148    switch (fpst->float_rounding_mode) {
9149    case float_round_nearest_even: /* Round to Nearest */
9150        return true;
9151    case float_round_up: /* Round to +Inf */
9152        return !sign_bit;
9153    case float_round_down: /* Round to -Inf */
9154        return sign_bit;
9155    case float_round_to_zero: /* Round to Zero */
9156        return false;
9157    }
9158
9159    g_assert_not_reached();
9160}
9161
9162float32 HELPER(recpe_f32)(float32 input, void *fpstp)
9163{
9164    float_status *fpst = fpstp;
9165    float32 f32 = float32_squash_input_denormal(input, fpst);
9166    uint32_t f32_val = float32_val(f32);
9167    uint32_t f32_sbit = 0x80000000ULL & f32_val;
9168    int32_t f32_exp = extract32(f32_val, 23, 8);
9169    uint32_t f32_frac = extract32(f32_val, 0, 23);
9170    float64 f64, r64;
9171    uint64_t r64_val;
9172    int64_t r64_exp;
9173    uint64_t r64_frac;
9174
9175    if (float32_is_any_nan(f32)) {
9176        float32 nan = f32;
9177        if (float32_is_signaling_nan(f32)) {
9178            float_raise(float_flag_invalid, fpst);
9179            nan = float32_maybe_silence_nan(f32);
9180        }
9181        if (fpst->default_nan_mode) {
9182            nan =  float32_default_nan;
9183        }
9184        return nan;
9185    } else if (float32_is_infinity(f32)) {
9186        return float32_set_sign(float32_zero, float32_is_neg(f32));
9187    } else if (float32_is_zero(f32)) {
9188        float_raise(float_flag_divbyzero, fpst);
9189        return float32_set_sign(float32_infinity, float32_is_neg(f32));
9190    } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
9191        /* Abs(value) < 2.0^-128 */
9192        float_raise(float_flag_overflow | float_flag_inexact, fpst);
9193        if (round_to_inf(fpst, f32_sbit)) {
9194            return float32_set_sign(float32_infinity, float32_is_neg(f32));
9195        } else {
9196            return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
9197        }
9198    } else if (f32_exp >= 253 && fpst->flush_to_zero) {
9199        float_raise(float_flag_underflow, fpst);
9200        return float32_set_sign(float32_zero, float32_is_neg(f32));
9201    }
9202
9203
9204    f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
9205    r64 = call_recip_estimate(f64, 253, fpst);
9206    r64_val = float64_val(r64);
9207    r64_exp = extract64(r64_val, 52, 11);
9208    r64_frac = extract64(r64_val, 0, 52);
9209
9210    /* result = sign : result_exp<7:0> : fraction<51:29>; */
9211    return make_float32(f32_sbit |
9212                        (r64_exp & 0xff) << 23 |
9213                        extract64(r64_frac, 29, 24));
9214}
9215
9216float64 HELPER(recpe_f64)(float64 input, void *fpstp)
9217{
9218    float_status *fpst = fpstp;
9219    float64 f64 = float64_squash_input_denormal(input, fpst);
9220    uint64_t f64_val = float64_val(f64);
9221    uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
9222    int64_t f64_exp = extract64(f64_val, 52, 11);
9223    float64 r64;
9224    uint64_t r64_val;
9225    int64_t r64_exp;
9226    uint64_t r64_frac;
9227
9228    /* Deal with any special cases */
9229    if (float64_is_any_nan(f64)) {
9230        float64 nan = f64;
9231        if (float64_is_signaling_nan(f64)) {
9232            float_raise(float_flag_invalid, fpst);
9233            nan = float64_maybe_silence_nan(f64);
9234        }
9235        if (fpst->default_nan_mode) {
9236            nan =  float64_default_nan;
9237        }
9238        return nan;
9239    } else if (float64_is_infinity(f64)) {
9240        return float64_set_sign(float64_zero, float64_is_neg(f64));
9241    } else if (float64_is_zero(f64)) {
9242        float_raise(float_flag_divbyzero, fpst);
9243        return float64_set_sign(float64_infinity, float64_is_neg(f64));
9244    } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
9245        /* Abs(value) < 2.0^-1024 */
9246        float_raise(float_flag_overflow | float_flag_inexact, fpst);
9247        if (round_to_inf(fpst, f64_sbit)) {
9248            return float64_set_sign(float64_infinity, float64_is_neg(f64));
9249        } else {
9250            return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
9251        }
9252    } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
9253        float_raise(float_flag_underflow, fpst);
9254        return float64_set_sign(float64_zero, float64_is_neg(f64));
9255    }
9256
9257    r64 = call_recip_estimate(f64, 2045, fpst);
9258    r64_val = float64_val(r64);
9259    r64_exp = extract64(r64_val, 52, 11);
9260    r64_frac = extract64(r64_val, 0, 52);
9261
9262    /* result = sign : result_exp<10:0> : fraction<51:0> */
9263    return make_float64(f64_sbit |
9264                        ((r64_exp & 0x7ff) << 52) |
9265                        r64_frac);
9266}
9267
9268/* The algorithm that must be used to calculate the estimate
9269 * is specified by the ARM ARM.
9270 */
9271static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
9272{
9273    /* These calculations mustn't set any fp exception flags,
9274     * so we use a local copy of the fp_status.
9275     */
9276    float_status dummy_status = *real_fp_status;
9277    float_status *s = &dummy_status;
9278    float64 q;
9279    int64_t q_int;
9280
9281    if (float64_lt(a, float64_half, s)) {
9282        /* range 0.25 <= a < 0.5 */
9283
9284        /* a in units of 1/512 rounded down */
9285        /* q0 = (int)(a * 512.0);  */
9286        q = float64_mul(float64_512, a, s);
9287        q_int = float64_to_int64_round_to_zero(q, s);
9288
9289        /* reciprocal root r */
9290        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
9291        q = int64_to_float64(q_int, s);
9292        q = float64_add(q, float64_half, s);
9293        q = float64_div(q, float64_512, s);
9294        q = float64_sqrt(q, s);
9295        q = float64_div(float64_one, q, s);
9296    } else {
9297        /* range 0.5 <= a < 1.0 */
9298
9299        /* a in units of 1/256 rounded down */
9300        /* q1 = (int)(a * 256.0); */
9301        q = float64_mul(float64_256, a, s);
9302        int64_t q_int = float64_to_int64_round_to_zero(q, s);
9303
9304        /* reciprocal root r */
9305        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
9306        q = int64_to_float64(q_int, s);
9307        q = float64_add(q, float64_half, s);
9308        q = float64_div(q, float64_256, s);
9309        q = float64_sqrt(q, s);
9310        q = float64_div(float64_one, q, s);
9311    }
9312    /* r in units of 1/256 rounded to nearest */
9313    /* s = (int)(256.0 * r + 0.5); */
9314
9315    q = float64_mul(q, float64_256,s );
9316    q = float64_add(q, float64_half, s);
9317    q_int = float64_to_int64_round_to_zero(q, s);
9318
9319    /* return (double)s / 256.0;*/
9320    return float64_div(int64_to_float64(q_int, s), float64_256, s);
9321}
9322
9323float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
9324{
9325    float_status *s = fpstp;
9326    float32 f32 = float32_squash_input_denormal(input, s);
9327    uint32_t val = float32_val(f32);
9328    uint32_t f32_sbit = 0x80000000 & val;
9329    int32_t f32_exp = extract32(val, 23, 8);
9330    uint32_t f32_frac = extract32(val, 0, 23);
9331    uint64_t f64_frac;
9332    uint64_t val64;
9333    int result_exp;
9334    float64 f64;
9335
9336    if (float32_is_any_nan(f32)) {
9337        float32 nan = f32;
9338        if (float32_is_signaling_nan(f32)) {
9339            float_raise(float_flag_invalid, s);
9340            nan = float32_maybe_silence_nan(f32);
9341        }
9342        if (s->default_nan_mode) {
9343            nan =  float32_default_nan;
9344        }
9345        return nan;
9346    } else if (float32_is_zero(f32)) {
9347        float_raise(float_flag_divbyzero, s);
9348        return float32_set_sign(float32_infinity, float32_is_neg(f32));
9349    } else if (float32_is_neg(f32)) {
9350        float_raise(float_flag_invalid, s);
9351        return float32_default_nan;
9352    } else if (float32_is_infinity(f32)) {
9353        return float32_zero;
9354    }
9355
9356    /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9357     * preserving the parity of the exponent.  */
9358
9359    f64_frac = ((uint64_t) f32_frac) << 29;
9360    if (f32_exp == 0) {
9361        while (extract64(f64_frac, 51, 1) == 0) {
9362            f64_frac = f64_frac << 1;
9363            f32_exp = f32_exp-1;
9364        }
9365        f64_frac = extract64(f64_frac, 0, 51) << 1;
9366    }
9367
9368    if (extract64(f32_exp, 0, 1) == 0) {
9369        f64 = make_float64(((uint64_t) f32_sbit) << 32
9370                           | (0x3feULL << 52)
9371                           | f64_frac);
9372    } else {
9373        f64 = make_float64(((uint64_t) f32_sbit) << 32
9374                           | (0x3fdULL << 52)
9375                           | f64_frac);
9376    }
9377
9378    result_exp = (380 - f32_exp) / 2;
9379
9380    f64 = recip_sqrt_estimate(f64, s);
9381
9382    val64 = float64_val(f64);
9383
9384    val = ((result_exp & 0xff) << 23)
9385        | ((val64 >> 29)  & 0x7fffff);
9386    return make_float32(val);
9387}
9388
9389float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
9390{
9391    float_status *s = fpstp;
9392    float64 f64 = float64_squash_input_denormal(input, s);
9393    uint64_t val = float64_val(f64);
9394    uint64_t f64_sbit = 0x8000000000000000ULL & val;
9395    int64_t f64_exp = extract64(val, 52, 11);
9396    uint64_t f64_frac = extract64(val, 0, 52);
9397    int64_t result_exp;
9398    uint64_t result_frac;
9399
9400    if (float64_is_any_nan(f64)) {
9401        float64 nan = f64;
9402        if (float64_is_signaling_nan(f64)) {
9403            float_raise(float_flag_invalid, s);
9404            nan = float64_maybe_silence_nan(f64);
9405        }
9406        if (s->default_nan_mode) {
9407            nan =  float64_default_nan;
9408        }
9409        return nan;
9410    } else if (float64_is_zero(f64)) {
9411        float_raise(float_flag_divbyzero, s);
9412        return float64_set_sign(float64_infinity, float64_is_neg(f64));
9413    } else if (float64_is_neg(f64)) {
9414        float_raise(float_flag_invalid, s);
9415        return float64_default_nan;
9416    } else if (float64_is_infinity(f64)) {
9417        return float64_zero;
9418    }
9419
9420    /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9421     * preserving the parity of the exponent.  */
9422
9423    if (f64_exp == 0) {
9424        while (extract64(f64_frac, 51, 1) == 0) {
9425            f64_frac = f64_frac << 1;
9426            f64_exp = f64_exp - 1;
9427        }
9428        f64_frac = extract64(f64_frac, 0, 51) << 1;
9429    }
9430
9431    if (extract64(f64_exp, 0, 1) == 0) {
9432        f64 = make_float64(f64_sbit
9433                           | (0x3feULL << 52)
9434                           | f64_frac);
9435    } else {
9436        f64 = make_float64(f64_sbit
9437                           | (0x3fdULL << 52)
9438                           | f64_frac);
9439    }
9440
9441    result_exp = (3068 - f64_exp) / 2;
9442
9443    f64 = recip_sqrt_estimate(f64, s);
9444
9445    result_frac = extract64(float64_val(f64), 0, 52);
9446
9447    return make_float64(f64_sbit |
9448                        ((result_exp & 0x7ff) << 52) |
9449                        result_frac);
9450}
9451
9452uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
9453{
9454    float_status *s = fpstp;
9455    float64 f64;
9456
9457    if ((a & 0x80000000) == 0) {
9458        return 0xffffffff;
9459    }
9460
9461    f64 = make_float64((0x3feULL << 52)
9462                       | ((int64_t)(a & 0x7fffffff) << 21));
9463
9464    f64 = recip_estimate(f64, s);
9465
9466    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9467}
9468
9469uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
9470{
9471    float_status *fpst = fpstp;
9472    float64 f64;
9473
9474    if ((a & 0xc0000000) == 0) {
9475        return 0xffffffff;
9476    }
9477
9478    if (a & 0x80000000) {
9479        f64 = make_float64((0x3feULL << 52)
9480                           | ((uint64_t)(a & 0x7fffffff) << 21));
9481    } else { /* bits 31-30 == '01' */
9482        f64 = make_float64((0x3fdULL << 52)
9483                           | ((uint64_t)(a & 0x3fffffff) << 22));
9484    }
9485
9486    f64 = recip_sqrt_estimate(f64, fpst);
9487
9488    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9489}
9490
9491/* VFPv4 fused multiply-accumulate */
9492float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
9493{
9494    float_status *fpst = fpstp;
9495    return float32_muladd(a, b, c, 0, fpst);
9496}
9497
9498float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
9499{
9500    float_status *fpst = fpstp;
9501    return float64_muladd(a, b, c, 0, fpst);
9502}
9503
9504/* ARMv8 round to integral */
9505float32 HELPER(rints_exact)(float32 x, void *fp_status)
9506{
9507    return float32_round_to_int(x, fp_status);
9508}
9509
9510float64 HELPER(rintd_exact)(float64 x, void *fp_status)
9511{
9512    return float64_round_to_int(x, fp_status);
9513}
9514
9515float32 HELPER(rints)(float32 x, void *fp_status)
9516{
9517    int old_flags = get_float_exception_flags(fp_status), new_flags;
9518    float32 ret;
9519
9520    ret = float32_round_to_int(x, fp_status);
9521
9522    /* Suppress any inexact exceptions the conversion produced */
9523    if (!(old_flags & float_flag_inexact)) {
9524        new_flags = get_float_exception_flags(fp_status);
9525        set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
9526    }
9527
9528    return ret;
9529}
9530
9531float64 HELPER(rintd)(float64 x, void *fp_status)
9532{
9533    int old_flags = get_float_exception_flags(fp_status), new_flags;
9534    float64 ret;
9535
9536    ret = float64_round_to_int(x, fp_status);
9537
9538    new_flags = get_float_exception_flags(fp_status);
9539
9540    /* Suppress any inexact exceptions the conversion produced */
9541    if (!(old_flags & float_flag_inexact)) {
9542        new_flags = get_float_exception_flags(fp_status);
9543        set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
9544    }
9545
9546    return ret;
9547}
9548
9549/* Convert ARM rounding mode to softfloat */
9550int arm_rmode_to_sf(int rmode)
9551{
9552    switch (rmode) {
9553    case FPROUNDING_TIEAWAY:
9554        rmode = float_round_ties_away;
9555        break;
9556    case FPROUNDING_ODD:
9557        /* FIXME: add support for TIEAWAY and ODD */
9558        qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
9559                      rmode);
9560    case FPROUNDING_TIEEVEN:
9561    default:
9562        rmode = float_round_nearest_even;
9563        break;
9564    case FPROUNDING_POSINF:
9565        rmode = float_round_up;
9566        break;
9567    case FPROUNDING_NEGINF:
9568        rmode = float_round_down;
9569        break;
9570    case FPROUNDING_ZERO:
9571        rmode = float_round_to_zero;
9572        break;
9573    }
9574    return rmode;
9575}
9576
9577/* CRC helpers.
9578 * The upper bytes of val (above the number specified by 'bytes') must have
9579 * been zeroed out by the caller.
9580 */
9581uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
9582{
9583    uint8_t buf[4];
9584
9585    stl_le_p(buf, val);
9586
9587    /* zlib crc32 converts the accumulator and output to one's complement.  */
9588    return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
9589}
9590
9591uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
9592{
9593    uint8_t buf[4];
9594
9595    stl_le_p(buf, val);
9596
9597    /* Linux crc32c converts the output to one's complement.  */
9598    return crc32c(acc, buf, bytes) ^ 0xffffffff;
9599}
9600