qemu/target/arm/cpu.h
<<
>>
Prefs
   1/*
   2 * ARM virtual CPU header
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#ifndef ARM_CPU_H
  21#define ARM_CPU_H
  22
  23#include "kvm-consts.h"
  24#include "hw/registerfields.h"
  25#include "cpu-qom.h"
  26#include "exec/cpu-defs.h"
  27#include "qapi/qapi-types-common.h"
  28
  29/* ARM processors have a weak memory model */
  30#define TCG_GUEST_DEFAULT_MO      (0)
  31
  32#ifdef TARGET_AARCH64
  33#define KVM_HAVE_MCE_INJECTION 1
  34#endif
  35
  36#define EXCP_UDEF            1   /* undefined instruction */
  37#define EXCP_SWI             2   /* software interrupt */
  38#define EXCP_PREFETCH_ABORT  3
  39#define EXCP_DATA_ABORT      4
  40#define EXCP_IRQ             5
  41#define EXCP_FIQ             6
  42#define EXCP_BKPT            7
  43#define EXCP_EXCEPTION_EXIT  8   /* Return from v7M exception.  */
  44#define EXCP_KERNEL_TRAP     9   /* Jumped to kernel code page.  */
  45#define EXCP_HVC            11   /* HyperVisor Call */
  46#define EXCP_HYP_TRAP       12
  47#define EXCP_SMC            13   /* Secure Monitor Call */
  48#define EXCP_VIRQ           14
  49#define EXCP_VFIQ           15
  50#define EXCP_SEMIHOST       16   /* semihosting call */
  51#define EXCP_NOCP           17   /* v7M NOCP UsageFault */
  52#define EXCP_INVSTATE       18   /* v7M INVSTATE UsageFault */
  53#define EXCP_STKOF          19   /* v8M STKOF UsageFault */
  54#define EXCP_LAZYFP         20   /* v7M fault during lazy FP stacking */
  55#define EXCP_LSERR          21   /* v8M LSERR SecureFault */
  56#define EXCP_UNALIGNED      22   /* v7M UNALIGNED UsageFault */
  57/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
  58
  59#define ARMV7M_EXCP_RESET   1
  60#define ARMV7M_EXCP_NMI     2
  61#define ARMV7M_EXCP_HARD    3
  62#define ARMV7M_EXCP_MEM     4
  63#define ARMV7M_EXCP_BUS     5
  64#define ARMV7M_EXCP_USAGE   6
  65#define ARMV7M_EXCP_SECURE  7
  66#define ARMV7M_EXCP_SVC     11
  67#define ARMV7M_EXCP_DEBUG   12
  68#define ARMV7M_EXCP_PENDSV  14
  69#define ARMV7M_EXCP_SYSTICK 15
  70
  71/* For M profile, some registers are banked secure vs non-secure;
  72 * these are represented as a 2-element array where the first element
  73 * is the non-secure copy and the second is the secure copy.
  74 * When the CPU does not have implement the security extension then
  75 * only the first element is used.
  76 * This means that the copy for the current security state can be
  77 * accessed via env->registerfield[env->v7m.secure] (whether the security
  78 * extension is implemented or not).
  79 */
  80enum {
  81    M_REG_NS = 0,
  82    M_REG_S = 1,
  83    M_REG_NUM_BANKS = 2,
  84};
  85
  86/* ARM-specific interrupt pending bits.  */
  87#define CPU_INTERRUPT_FIQ   CPU_INTERRUPT_TGT_EXT_1
  88#define CPU_INTERRUPT_VIRQ  CPU_INTERRUPT_TGT_EXT_2
  89#define CPU_INTERRUPT_VFIQ  CPU_INTERRUPT_TGT_EXT_3
  90
  91/* The usual mapping for an AArch64 system register to its AArch32
  92 * counterpart is for the 32 bit world to have access to the lower
  93 * half only (with writes leaving the upper half untouched). It's
  94 * therefore useful to be able to pass TCG the offset of the least
  95 * significant half of a uint64_t struct member.
  96 */
  97#ifdef HOST_WORDS_BIGENDIAN
  98#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
  99#define offsetofhigh32(S, M) offsetof(S, M)
 100#else
 101#define offsetoflow32(S, M) offsetof(S, M)
 102#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
 103#endif
 104
 105/* Meanings of the ARMCPU object's four inbound GPIO lines */
 106#define ARM_CPU_IRQ 0
 107#define ARM_CPU_FIQ 1
 108#define ARM_CPU_VIRQ 2
 109#define ARM_CPU_VFIQ 3
 110
 111#undef NB_MEM_ATTR
 112#define NB_MEM_ATTR 2
 113#define MEM_ATTR_NS 0
 114#define MEM_ATTR_SEC 1
 115
 116/* ARM-specific extra insn start words:
 117 * 1: Conditional execution bits
 118 * 2: Partial exception syndrome for data aborts
 119 */
 120#define TARGET_INSN_START_EXTRA_WORDS 2
 121
 122/* The 2nd extra word holding syndrome info for data aborts does not use
 123 * the upper 6 bits nor the lower 14 bits. We mask and shift it down to
 124 * help the sleb128 encoder do a better job.
 125 * When restoring the CPU state, we shift it back up.
 126 */
 127#define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
 128#define ARM_INSN_START_WORD2_SHIFT 14
 129
 130/* We currently assume float and double are IEEE single and double
 131   precision respectively.
 132   Doing runtime conversions is tricky because VFP registers may contain
 133   integer values (eg. as the result of a FTOSI instruction).
 134   s<2n> maps to the least significant half of d<n>
 135   s<2n+1> maps to the most significant half of d<n>
 136 */
 137
 138/**
 139 * DynamicGDBXMLInfo:
 140 * @desc: Contains the XML descriptions.
 141 * @num: Number of the registers in this XML seen by GDB.
 142 * @data: A union with data specific to the set of registers
 143 *    @cpregs_keys: Array that contains the corresponding Key of
 144 *                  a given cpreg with the same order of the cpreg
 145 *                  in the XML description.
 146 */
 147typedef struct DynamicGDBXMLInfo {
 148    char *desc;
 149    int num;
 150    union {
 151        struct {
 152            uint32_t *keys;
 153        } cpregs;
 154    } data;
 155} DynamicGDBXMLInfo;
 156
 157/* CPU state for each instance of a generic timer (in cp15 c14) */
 158typedef struct ARMGenericTimer {
 159    uint64_t cval; /* Timer CompareValue register */
 160    uint64_t ctl; /* Timer Control register */
 161} ARMGenericTimer;
 162
 163#define GTIMER_PHYS     0
 164#define GTIMER_VIRT     1
 165#define GTIMER_HYP      2
 166#define GTIMER_SEC      3
 167#define GTIMER_HYPVIRT  4
 168#define NUM_GTIMERS     5
 169
 170typedef struct {
 171    uint64_t raw_tcr;
 172    uint32_t mask;
 173    uint32_t base_mask;
 174} TCR;
 175
 176#define VTCR_NSW (1u << 29)
 177#define VTCR_NSA (1u << 30)
 178#define VSTCR_SW VTCR_NSW
 179#define VSTCR_SA VTCR_NSA
 180
 181/* Define a maximum sized vector register.
 182 * For 32-bit, this is a 128-bit NEON/AdvSIMD register.
 183 * For 64-bit, this is a 2048-bit SVE register.
 184 *
 185 * Note that the mapping between S, D, and Q views of the register bank
 186 * differs between AArch64 and AArch32.
 187 * In AArch32:
 188 *  Qn = regs[n].d[1]:regs[n].d[0]
 189 *  Dn = regs[n / 2].d[n & 1]
 190 *  Sn = regs[n / 4].d[n % 4 / 2],
 191 *       bits 31..0 for even n, and bits 63..32 for odd n
 192 *       (and regs[16] to regs[31] are inaccessible)
 193 * In AArch64:
 194 *  Zn = regs[n].d[*]
 195 *  Qn = regs[n].d[1]:regs[n].d[0]
 196 *  Dn = regs[n].d[0]
 197 *  Sn = regs[n].d[0] bits 31..0
 198 *  Hn = regs[n].d[0] bits 15..0
 199 *
 200 * This corresponds to the architecturally defined mapping between
 201 * the two execution states, and means we do not need to explicitly
 202 * map these registers when changing states.
 203 *
 204 * Align the data for use with TCG host vector operations.
 205 */
 206
 207#ifdef TARGET_AARCH64
 208# define ARM_MAX_VQ    16
 209void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
 210void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
 211#else
 212# define ARM_MAX_VQ    1
 213static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
 214static inline void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { }
 215#endif
 216
 217typedef struct ARMVectorReg {
 218    uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
 219} ARMVectorReg;
 220
 221#ifdef TARGET_AARCH64
 222/* In AArch32 mode, predicate registers do not exist at all.  */
 223typedef struct ARMPredicateReg {
 224    uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
 225} ARMPredicateReg;
 226
 227/* In AArch32 mode, PAC keys do not exist at all.  */
 228typedef struct ARMPACKey {
 229    uint64_t lo, hi;
 230} ARMPACKey;
 231#endif
 232
 233/* See the commentary above the TBFLAG field definitions.  */
 234typedef struct CPUARMTBFlags {
 235    uint32_t flags;
 236    target_ulong flags2;
 237} CPUARMTBFlags;
 238
 239typedef struct CPUARMState {
 240    /* Regs for current mode.  */
 241    uint32_t regs[16];
 242
 243    /* 32/64 switch only happens when taking and returning from
 244     * exceptions so the overlap semantics are taken care of then
 245     * instead of having a complicated union.
 246     */
 247    /* Regs for A64 mode.  */
 248    uint64_t xregs[32];
 249    uint64_t pc;
 250    /* PSTATE isn't an architectural register for ARMv8. However, it is
 251     * convenient for us to assemble the underlying state into a 32 bit format
 252     * identical to the architectural format used for the SPSR. (This is also
 253     * what the Linux kernel's 'pstate' field in signal handlers and KVM's
 254     * 'pstate' register are.) Of the PSTATE bits:
 255     *  NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
 256     *    semantics as for AArch32, as described in the comments on each field)
 257     *  nRW (also known as M[4]) is kept, inverted, in env->aarch64
 258     *  DAIF (exception masks) are kept in env->daif
 259     *  BTYPE is kept in env->btype
 260     *  all other bits are stored in their correct places in env->pstate
 261     */
 262    uint32_t pstate;
 263    uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
 264
 265    /* Cached TBFLAGS state.  See below for which bits are included.  */
 266    CPUARMTBFlags hflags;
 267
 268    /* Frequently accessed CPSR bits are stored separately for efficiency.
 269       This contains all the other bits.  Use cpsr_{read,write} to access
 270       the whole CPSR.  */
 271    uint32_t uncached_cpsr;
 272    uint32_t spsr;
 273
 274    /* Banked registers.  */
 275    uint64_t banked_spsr[8];
 276    uint32_t banked_r13[8];
 277    uint32_t banked_r14[8];
 278
 279    /* These hold r8-r12.  */
 280    uint32_t usr_regs[5];
 281    uint32_t fiq_regs[5];
 282
 283    /* cpsr flag cache for faster execution */
 284    uint32_t CF; /* 0 or 1 */
 285    uint32_t VF; /* V is the bit 31. All other bits are undefined */
 286    uint32_t NF; /* N is bit 31. All other bits are undefined.  */
 287    uint32_t ZF; /* Z set if zero.  */
 288    uint32_t QF; /* 0 or 1 */
 289    uint32_t GE; /* cpsr[19:16] */
 290    uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
 291    uint32_t condexec_bits; /* IT bits.  cpsr[15:10,26:25].  */
 292    uint32_t btype;  /* BTI branch type.  spsr[11:10].  */
 293    uint64_t daif; /* exception masks, in the bits they are in PSTATE */
 294
 295    uint64_t elr_el[4]; /* AArch64 exception link regs  */
 296    uint64_t sp_el[4]; /* AArch64 banked stack pointers */
 297
 298    /* System control coprocessor (cp15) */
 299    struct {
 300        uint32_t c0_cpuid;
 301        union { /* Cache size selection */
 302            struct {
 303                uint64_t _unused_csselr0;
 304                uint64_t csselr_ns;
 305                uint64_t _unused_csselr1;
 306                uint64_t csselr_s;
 307            };
 308            uint64_t csselr_el[4];
 309        };
 310        union { /* System control register. */
 311            struct {
 312                uint64_t _unused_sctlr;
 313                uint64_t sctlr_ns;
 314                uint64_t hsctlr;
 315                uint64_t sctlr_s;
 316            };
 317            uint64_t sctlr_el[4];
 318        };
 319        uint64_t cpacr_el1; /* Architectural feature access control register */
 320        uint64_t cptr_el[4];  /* ARMv8 feature trap registers */
 321        uint32_t c1_xscaleauxcr; /* XScale auxiliary control register.  */
 322        uint64_t sder; /* Secure debug enable register. */
 323        uint32_t nsacr; /* Non-secure access control register. */
 324        union { /* MMU translation table base 0. */
 325            struct {
 326                uint64_t _unused_ttbr0_0;
 327                uint64_t ttbr0_ns;
 328                uint64_t _unused_ttbr0_1;
 329                uint64_t ttbr0_s;
 330            };
 331            uint64_t ttbr0_el[4];
 332        };
 333        union { /* MMU translation table base 1. */
 334            struct {
 335                uint64_t _unused_ttbr1_0;
 336                uint64_t ttbr1_ns;
 337                uint64_t _unused_ttbr1_1;
 338                uint64_t ttbr1_s;
 339            };
 340            uint64_t ttbr1_el[4];
 341        };
 342        uint64_t vttbr_el2; /* Virtualization Translation Table Base.  */
 343        uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
 344        /* MMU translation table base control. */
 345        TCR tcr_el[4];
 346        TCR vtcr_el2; /* Virtualization Translation Control.  */
 347        TCR vstcr_el2; /* Secure Virtualization Translation Control. */
 348        uint32_t c2_data; /* MPU data cacheable bits.  */
 349        uint32_t c2_insn; /* MPU instruction cacheable bits.  */
 350        union { /* MMU domain access control register
 351                 * MPU write buffer control.
 352                 */
 353            struct {
 354                uint64_t dacr_ns;
 355                uint64_t dacr_s;
 356            };
 357            struct {
 358                uint64_t dacr32_el2;
 359            };
 360        };
 361        uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
 362        uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
 363        uint64_t hcr_el2; /* Hypervisor configuration register */
 364        uint64_t scr_el3; /* Secure configuration register.  */
 365        union { /* Fault status registers.  */
 366            struct {
 367                uint64_t ifsr_ns;
 368                uint64_t ifsr_s;
 369            };
 370            struct {
 371                uint64_t ifsr32_el2;
 372            };
 373        };
 374        union {
 375            struct {
 376                uint64_t _unused_dfsr;
 377                uint64_t dfsr_ns;
 378                uint64_t hsr;
 379                uint64_t dfsr_s;
 380            };
 381            uint64_t esr_el[4];
 382        };
 383        uint32_t c6_region[8]; /* MPU base/size registers.  */
 384        union { /* Fault address registers. */
 385            struct {
 386                uint64_t _unused_far0;
 387#ifdef HOST_WORDS_BIGENDIAN
 388                uint32_t ifar_ns;
 389                uint32_t dfar_ns;
 390                uint32_t ifar_s;
 391                uint32_t dfar_s;
 392#else
 393                uint32_t dfar_ns;
 394                uint32_t ifar_ns;
 395                uint32_t dfar_s;
 396                uint32_t ifar_s;
 397#endif
 398                uint64_t _unused_far3;
 399            };
 400            uint64_t far_el[4];
 401        };
 402        uint64_t hpfar_el2;
 403        uint64_t hstr_el2;
 404        union { /* Translation result. */
 405            struct {
 406                uint64_t _unused_par_0;
 407                uint64_t par_ns;
 408                uint64_t _unused_par_1;
 409                uint64_t par_s;
 410            };
 411            uint64_t par_el[4];
 412        };
 413
 414        uint32_t c9_insn; /* Cache lockdown registers.  */
 415        uint32_t c9_data;
 416        uint64_t c9_pmcr; /* performance monitor control register */
 417        uint64_t c9_pmcnten; /* perf monitor counter enables */
 418        uint64_t c9_pmovsr; /* perf monitor overflow status */
 419        uint64_t c9_pmuserenr; /* perf monitor user enable */
 420        uint64_t c9_pmselr; /* perf monitor counter selection register */
 421        uint64_t c9_pminten; /* perf monitor interrupt enables */
 422        uint32_t c9_pmxevtyper; /* perf monitor event type */
 423        uint32_t c9_imp_bpctlr; /* branch predictor control register */
 424        uint32_t c9_imp_memprotctlr; /* memory protection control register */
 425        union { /* Memory attribute redirection */
 426            struct {
 427#ifdef HOST_WORDS_BIGENDIAN
 428                uint64_t _unused_mair_0;
 429                uint32_t mair1_ns;
 430                uint32_t mair0_ns;
 431                uint64_t _unused_mair_1;
 432                uint32_t mair1_s;
 433                uint32_t mair0_s;
 434#else
 435                uint64_t _unused_mair_0;
 436                uint32_t mair0_ns;
 437                uint32_t mair1_ns;
 438                uint64_t _unused_mair_1;
 439                uint32_t mair0_s;
 440                uint32_t mair1_s;
 441#endif
 442            };
 443            uint64_t mair_el[4];
 444        };
 445        union { /* vector base address register */
 446            struct {
 447                uint64_t _unused_vbar;
 448                uint64_t vbar_ns;
 449                uint64_t hvbar;
 450                uint64_t vbar_s;
 451            };
 452            uint64_t vbar_el[4];
 453        };
 454        uint32_t mvbar; /* (monitor) vector base address register */
 455        struct { /* FCSE PID. */
 456            uint32_t fcseidr_ns;
 457            uint32_t fcseidr_s;
 458        };
 459        union { /* Context ID. */
 460            struct {
 461                uint64_t _unused_contextidr_0;
 462                uint64_t contextidr_ns;
 463                uint64_t _unused_contextidr_1;
 464                uint64_t contextidr_s;
 465            };
 466            uint64_t contextidr_el[4];
 467        };
 468        union { /* User RW Thread register. */
 469            struct {
 470                uint64_t tpidrurw_ns;
 471                uint64_t tpidrprw_ns;
 472                uint64_t htpidr;
 473                uint64_t _tpidr_el3;
 474            };
 475            uint64_t tpidr_el[4];
 476        };
 477        /* The secure banks of these registers don't map anywhere */
 478        uint64_t tpidrurw_s;
 479        uint64_t tpidrprw_s;
 480        uint64_t tpidruro_s;
 481
 482        union { /* User RO Thread register. */
 483            uint64_t tpidruro_ns;
 484            uint64_t tpidrro_el[1];
 485        };
 486        uint64_t c14_cntfrq; /* Counter Frequency register */
 487        uint64_t c14_cntkctl; /* Timer Control register */
 488        uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
 489        uint64_t cntvoff_el2; /* Counter Virtual Offset register */
 490        ARMGenericTimer c14_timer[NUM_GTIMERS];
 491        uint32_t c15_cpar; /* XScale Coprocessor Access Register */
 492        uint32_t c15_ticonfig; /* TI925T configuration byte.  */
 493        uint32_t c15_i_max; /* Maximum D-cache dirty line index.  */
 494        uint32_t c15_i_min; /* Minimum D-cache dirty line index.  */
 495        uint32_t c15_threadid; /* TI debugger thread-ID.  */
 496        uint32_t c15_config_base_address; /* SCU base address.  */
 497        uint32_t c15_diagnostic; /* diagnostic register */
 498        uint32_t c15_power_diagnostic;
 499        uint32_t c15_power_control; /* power control */
 500        uint64_t dbgbvr[16]; /* breakpoint value registers */
 501        uint64_t dbgbcr[16]; /* breakpoint control registers */
 502        uint64_t dbgwvr[16]; /* watchpoint value registers */
 503        uint64_t dbgwcr[16]; /* watchpoint control registers */
 504        uint64_t mdscr_el1;
 505        uint64_t oslsr_el1; /* OS Lock Status */
 506        uint64_t mdcr_el2;
 507        uint64_t mdcr_el3;
 508        /* Stores the architectural value of the counter *the last time it was
 509         * updated* by pmccntr_op_start. Accesses should always be surrounded
 510         * by pmccntr_op_start/pmccntr_op_finish to guarantee the latest
 511         * architecturally-correct value is being read/set.
 512         */
 513        uint64_t c15_ccnt;
 514        /* Stores the delta between the architectural value and the underlying
 515         * cycle count during normal operation. It is used to update c15_ccnt
 516         * to be the correct architectural value before accesses. During
 517         * accesses, c15_ccnt_delta contains the underlying count being used
 518         * for the access, after which it reverts to the delta value in
 519         * pmccntr_op_finish.
 520         */
 521        uint64_t c15_ccnt_delta;
 522        uint64_t c14_pmevcntr[31];
 523        uint64_t c14_pmevcntr_delta[31];
 524        uint64_t c14_pmevtyper[31];
 525        uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
 526        uint64_t vpidr_el2; /* Virtualization Processor ID Register */
 527        uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
 528        uint64_t tfsr_el[4]; /* tfsre0_el1 is index 0.  */
 529        uint64_t gcr_el1;
 530        uint64_t rgsr_el1;
 531
 532        /* DynamIQ Shared Unit (DSU) Registers.  */
 533        struct {
 534            uint64_t clusterectrl;
 535            uint64_t clusterpwrctrl;
 536            uint64_t clusterpwrdn;
 537            uint64_t clusterpartcr;
 538            uint64_t clusterbusqos;
 539            uint64_t clusterthreadsidovr;
 540        } dsu;
 541    } cp15;
 542
 543    struct {
 544        /* M profile has up to 4 stack pointers:
 545         * a Main Stack Pointer and a Process Stack Pointer for each
 546         * of the Secure and Non-Secure states. (If the CPU doesn't support
 547         * the security extension then it has only two SPs.)
 548         * In QEMU we always store the currently active SP in regs[13],
 549         * and the non-active SP for the current security state in
 550         * v7m.other_sp. The stack pointers for the inactive security state
 551         * are stored in other_ss_msp and other_ss_psp.
 552         * switch_v7m_security_state() is responsible for rearranging them
 553         * when we change security state.
 554         */
 555        uint32_t other_sp;
 556        uint32_t other_ss_msp;
 557        uint32_t other_ss_psp;
 558        uint32_t vecbase[M_REG_NUM_BANKS];
 559        uint32_t basepri[M_REG_NUM_BANKS];
 560        uint32_t control[M_REG_NUM_BANKS];
 561        uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */
 562        uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */
 563        uint32_t hfsr; /* HardFault Status */
 564        uint32_t dfsr; /* Debug Fault Status Register */
 565        uint32_t sfsr; /* Secure Fault Status Register */
 566        uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */
 567        uint32_t bfar; /* BusFault Address */
 568        uint32_t sfar; /* Secure Fault Address Register */
 569        unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */
 570        int exception;
 571        uint32_t primask[M_REG_NUM_BANKS];
 572        uint32_t faultmask[M_REG_NUM_BANKS];
 573        uint32_t aircr; /* only holds r/w state if security extn implemented */
 574        uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
 575        uint32_t csselr[M_REG_NUM_BANKS];
 576        uint32_t scr[M_REG_NUM_BANKS];
 577        uint32_t msplim[M_REG_NUM_BANKS];
 578        uint32_t psplim[M_REG_NUM_BANKS];
 579        uint32_t fpcar[M_REG_NUM_BANKS];
 580        uint32_t fpccr[M_REG_NUM_BANKS];
 581        uint32_t fpdscr[M_REG_NUM_BANKS];
 582        uint32_t cpacr[M_REG_NUM_BANKS];
 583        uint32_t nsacr;
 584        uint32_t ltpsize;
 585        uint32_t vpr;
 586    } v7m;
 587
 588    /* Information associated with an exception about to be taken:
 589     * code which raises an exception must set cs->exception_index and
 590     * the relevant parts of this structure; the cpu_do_interrupt function
 591     * will then set the guest-visible registers as part of the exception
 592     * entry process.
 593     */
 594    struct {
 595        uint32_t syndrome; /* AArch64 format syndrome register */
 596        uint32_t fsr; /* AArch32 format fault status register info */
 597        uint64_t vaddress; /* virtual addr associated with exception, if any */
 598        uint32_t target_el; /* EL the exception should be targeted for */
 599        /* If we implement EL2 we will also need to store information
 600         * about the intermediate physical address for stage 2 faults.
 601         */
 602    } exception;
 603
 604    /* Information associated with an SError */
 605    struct {
 606        uint8_t pending;
 607        uint8_t has_esr;
 608        uint64_t esr;
 609    } serror;
 610
 611    uint8_t ext_dabt_raised; /* Tracking/verifying injection of ext DABT */
 612
 613    /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */
 614    uint32_t irq_line_state;
 615
 616    /* Thumb-2 EE state.  */
 617    uint32_t teecr;
 618    uint32_t teehbr;
 619
 620    /* VFP coprocessor state.  */
 621    struct {
 622        ARMVectorReg zregs[32];
 623
 624#ifdef TARGET_AARCH64
 625        /* Store FFR as pregs[16] to make it easier to treat as any other.  */
 626#define FFR_PRED_NUM 16
 627        ARMPredicateReg pregs[17];
 628        /* Scratch space for aa64 sve predicate temporary.  */
 629        ARMPredicateReg preg_tmp;
 630#endif
 631
 632        /* We store these fpcsr fields separately for convenience.  */
 633        uint32_t qc[4] QEMU_ALIGNED(16);
 634        int vec_len;
 635        int vec_stride;
 636
 637        uint32_t xregs[16];
 638
 639        /* Scratch space for aa32 neon expansion.  */
 640        uint32_t scratch[8];
 641
 642        /* There are a number of distinct float control structures:
 643         *
 644         *  fp_status: is the "normal" fp status.
 645         *  fp_status_fp16: used for half-precision calculations
 646         *  standard_fp_status : the ARM "Standard FPSCR Value"
 647         *  standard_fp_status_fp16 : used for half-precision
 648         *       calculations with the ARM "Standard FPSCR Value"
 649         *
 650         * Half-precision operations are governed by a separate
 651         * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
 652         * status structure to control this.
 653         *
 654         * The "Standard FPSCR", ie default-NaN, flush-to-zero,
 655         * round-to-nearest and is used by any operations (generally
 656         * Neon) which the architecture defines as controlled by the
 657         * standard FPSCR value rather than the FPSCR.
 658         *
 659         * The "standard FPSCR but for fp16 ops" is needed because
 660         * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than
 661         * using a fixed value for it.
 662         *
 663         * To avoid having to transfer exception bits around, we simply
 664         * say that the FPSCR cumulative exception flags are the logical
 665         * OR of the flags in the four fp statuses. This relies on the
 666         * only thing which needs to read the exception flags being
 667         * an explicit FPSCR read.
 668         */
 669        float_status fp_status;
 670        float_status fp_status_f16;
 671        float_status standard_fp_status;
 672        float_status standard_fp_status_f16;
 673
 674        /* ZCR_EL[1-3] */
 675        uint64_t zcr_el[4];
 676    } vfp;
 677    uint64_t exclusive_addr;
 678    uint64_t exclusive_val;
 679    uint64_t exclusive_high;
 680
 681    uint32_t debug_ctx;
 682
 683    /* iwMMXt coprocessor state.  */
 684    struct {
 685        uint64_t regs[16];
 686        uint64_t val;
 687
 688        uint32_t cregs[16];
 689    } iwmmxt;
 690
 691#ifdef TARGET_AARCH64
 692    struct {
 693        ARMPACKey apia;
 694        ARMPACKey apib;
 695        ARMPACKey apda;
 696        ARMPACKey apdb;
 697        ARMPACKey apga;
 698    } keys;
 699#endif
 700
 701#if defined(CONFIG_USER_ONLY)
 702    /* For usermode syscall translation.  */
 703    int eabi;
 704#endif
 705
 706    struct CPUBreakpoint *cpu_breakpoint[16];
 707    struct CPUWatchpoint *cpu_watchpoint[16];
 708
 709    /* Fields up to this point are cleared by a CPU reset */
 710    struct {} end_reset_fields;
 711
 712    /* Fields after this point are preserved across CPU reset. */
 713    bool irq_wires[4];
 714
 715    /* Internal CPU feature flags.  */
 716    uint64_t features;
 717
 718    /* Controls the reset value of SCTLR_V.  */
 719    bool vinithi;
 720
 721    /* PMSAv7 MPU */
 722    struct {
 723        uint32_t *drbar;
 724        uint32_t *drsr;
 725        uint32_t *dracr;
 726        uint32_t rnr[M_REG_NUM_BANKS];
 727    } pmsav7;
 728
 729    MemTxAttrs *memattr_ns;
 730    MemTxAttrs *memattr_s;
 731
 732    /* PMSAv8 MPU */
 733    struct {
 734        /* The PMSAv8 implementation also shares some PMSAv7 config
 735         * and state:
 736         *  pmsav7.rnr (region number register)
 737         *  pmsav7_dregion (number of configured regions)
 738         */
 739        uint32_t *rbar[M_REG_NUM_BANKS];
 740        uint32_t *rlar[M_REG_NUM_BANKS];
 741        uint32_t mair0[M_REG_NUM_BANKS];
 742        uint32_t mair1[M_REG_NUM_BANKS];
 743    } pmsav8;
 744
 745    /* v8M SAU */
 746    struct {
 747        uint32_t *rbar;
 748        uint32_t *rlar;
 749        uint32_t rnr;
 750        uint32_t ctrl;
 751    } sau;
 752
 753    struct {
 754    /* TCM region registers A, B & C */
 755    uint32_t a;
 756    uint32_t b;
 757    uint32_t c;
 758    } tcmregion;
 759
 760    void *nvic;
 761    const struct arm_boot_info *boot_info;
 762    /* Store GICv3CPUState to access from this struct */
 763    void *gicv3state;
 764
 765#ifdef TARGET_TAGGED_ADDRESSES
 766    /* Linux syscall tagged address support */
 767    bool tagged_addr_enable;
 768#endif
 769} CPUARMState;
 770
 771enum {
 772        DEBUG_CURRENT_EL = 0,
 773        DEBUG_EL0 = 1,
 774        DEBUG_EL1 = 2,
 775        DEBUG_EL2 = 3,
 776        DEBUG_EL3 = 4,
 777        DEBUG_PHYS = 5,
 778};
 779
 780static inline void set_feature(CPUARMState *env, int feature)
 781{
 782    env->features |= 1ULL << feature;
 783}
 784
 785static inline void unset_feature(CPUARMState *env, int feature)
 786{
 787    env->features &= ~(1ULL << feature);
 788}
 789
 790/**
 791 * ARMELChangeHookFn:
 792 * type of a function which can be registered via arm_register_el_change_hook()
 793 * to get callbacks when the CPU changes its exception level or mode.
 794 */
 795typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque);
 796typedef struct ARMELChangeHook ARMELChangeHook;
 797struct ARMELChangeHook {
 798    ARMELChangeHookFn *hook;
 799    void *opaque;
 800    QLIST_ENTRY(ARMELChangeHook) node;
 801};
 802
 803/* These values map onto the return values for
 804 * QEMU_PSCI_0_2_FN_AFFINITY_INFO */
 805typedef enum ARMPSCIState {
 806    PSCI_ON = 0,
 807    PSCI_OFF = 1,
 808    PSCI_ON_PENDING = 2
 809} ARMPSCIState;
 810
 811typedef struct ARMISARegisters ARMISARegisters;
 812
 813/**
 814 * ARMCPU:
 815 * @env: #CPUARMState
 816 *
 817 * An ARM CPU core.
 818 */
 819struct ARMCPU {
 820    /*< private >*/
 821    CPUState parent_obj;
 822    /*< public >*/
 823
 824    CPUNegativeOffsetState neg;
 825    CPUARMState env;
 826
 827    bool is_in_wfi;
 828
 829    /* Coprocessor information */
 830    GHashTable *cp_regs;
 831    /* For marshalling (mostly coprocessor) register state between the
 832     * kernel and QEMU (for KVM) and between two QEMUs (for migration),
 833     * we use these arrays.
 834     */
 835    /* List of register indexes managed via these arrays; (full KVM style
 836     * 64 bit indexes, not CPRegInfo 32 bit indexes)
 837     */
 838    uint64_t *cpreg_indexes;
 839    /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
 840    uint64_t *cpreg_values;
 841    /* Length of the indexes, values, reset_values arrays */
 842    int32_t cpreg_array_len;
 843    /* These are used only for migration: incoming data arrives in
 844     * these fields and is sanity checked in post_load before copying
 845     * to the working data structures above.
 846     */
 847    uint64_t *cpreg_vmstate_indexes;
 848    uint64_t *cpreg_vmstate_values;
 849    int32_t cpreg_vmstate_array_len;
 850
 851    DynamicGDBXMLInfo dyn_sysreg_xml;
 852    DynamicGDBXMLInfo dyn_svereg_xml;
 853
 854    /* Timers used by the generic (architected) timer */
 855    QEMUTimer *gt_timer[NUM_GTIMERS];
 856    /*
 857     * Timer used by the PMU. Its state is restored after migration by
 858     * pmu_op_finish() - it does not need other handling during migration
 859     */
 860    QEMUTimer *pmu_timer;
 861    /* GPIO outputs for generic timer */
 862    qemu_irq gt_timer_outputs[NUM_GTIMERS];
 863    /* GPIO output for GICv3 maintenance interrupt signal */
 864    qemu_irq gicv3_maintenance_interrupt;
 865    /* GPIO output for the PMU interrupt */
 866    qemu_irq pmu_interrupt;
 867
 868    /* WFI notification */
 869    qemu_irq wfi;
 870
 871    /* MemoryRegion to use for secure physical accesses */
 872    MemoryRegion *secure_memory;
 873
 874    /* MemoryRegion to use for allocation tag accesses */
 875    MemoryRegion *tag_memory;
 876    MemoryRegion *secure_tag_memory;
 877
 878    /* For v8M, pointer to the IDAU interface provided by board/SoC */
 879    Object *idau;
 880
 881    /* 'compatible' string for this CPU for Linux device trees */
 882    const char *dtb_compatible;
 883
 884    /* PSCI version for this CPU
 885     * Bits[31:16] = Major Version
 886     * Bits[15:0] = Minor Version
 887     */
 888    uint32_t psci_version;
 889
 890    /* Current power state, access guarded by BQL */
 891    ARMPSCIState power_state;
 892
 893    /* CPU has virtualization extension */
 894    bool has_el2;
 895    /* CPU has security extension */
 896    bool has_el3;
 897    /* CPU has PMU (Performance Monitor Unit) */
 898    bool has_pmu;
 899    /* CPU has VFP */
 900    bool has_vfp;
 901    /* CPU has Neon */
 902    bool has_neon;
 903    /* CPU has M-profile DSP extension */
 904    bool has_dsp;
 905
 906    /* CPU has memory protection unit */
 907    bool has_mpu;
 908    /* PMSAv7 MPU number of supported regions */
 909    uint32_t pmsav7_dregion;
 910    /* v8M SAU number of supported regions */
 911    uint32_t sau_sregion;
 912
 913    /* PSCI conduit used to invoke PSCI methods
 914     * 0 - disabled, 1 - smc, 2 - hvc
 915     */
 916    uint32_t psci_conduit;
 917
 918    /* For v8M, initial value of the Secure VTOR */
 919    uint32_t init_svtor;
 920    /* For v8M, initial value of the Non-secure VTOR */
 921    uint32_t init_nsvtor;
 922
 923    /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
 924     * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
 925     */
 926    uint32_t kvm_target;
 927
 928    /* KVM init features for this CPU */
 929    uint32_t kvm_init_features[7];
 930
 931    /* KVM CPU state */
 932
 933    /* KVM virtual time adjustment */
 934    bool kvm_adjvtime;
 935    bool kvm_vtime_dirty;
 936    uint64_t kvm_vtime;
 937
 938    /* KVM steal time */
 939    OnOffAuto kvm_steal_time;
 940
 941    /* Uniprocessor system with MP extensions */
 942    bool mp_is_up;
 943
 944    /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init
 945     * and the probe failed (so we need to report the error in realize)
 946     */
 947    bool host_cpu_probe_failed;
 948
 949    /* Specify the number of cores in this CPU cluster. Used for the L2CTLR
 950     * register.
 951     */
 952    int32_t core_count;
 953
 954    /* The instance init functions for implementation-specific subclasses
 955     * set these fields to specify the implementation-dependent values of
 956     * various constant registers and reset values of non-constant
 957     * registers.
 958     * Some of these might become QOM properties eventually.
 959     * Field names match the official register names as defined in the
 960     * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
 961     * is used for reset values of non-constant registers; no reset_
 962     * prefix means a constant register.
 963     * Some of these registers are split out into a substructure that
 964     * is shared with the translators to control the ISA.
 965     *
 966     * Note that if you add an ID register to the ARMISARegisters struct
 967     * you need to also update the 32-bit and 64-bit versions of the
 968     * kvm_arm_get_host_cpu_features() function to correctly populate the
 969     * field by reading the value from the KVM vCPU.
 970     */
 971    struct ARMISARegisters {
 972        uint32_t id_isar0;
 973        uint32_t id_isar1;
 974        uint32_t id_isar2;
 975        uint32_t id_isar3;
 976        uint32_t id_isar4;
 977        uint32_t id_isar5;
 978        uint32_t id_isar6;
 979        uint32_t id_mmfr0;
 980        uint32_t id_mmfr1;
 981        uint32_t id_mmfr2;
 982        uint32_t id_mmfr3;
 983        uint32_t id_mmfr4;
 984        uint32_t id_pfr0;
 985        uint32_t id_pfr1;
 986        uint32_t id_pfr2;
 987        uint32_t mvfr0;
 988        uint32_t mvfr1;
 989        uint32_t mvfr2;
 990        uint32_t id_dfr0;
 991        uint32_t dbgdidr;
 992        uint64_t id_aa64isar0;
 993        uint64_t id_aa64isar1;
 994        uint64_t id_aa64pfr0;
 995        uint64_t id_aa64pfr1;
 996        uint64_t id_aa64mmfr0;
 997        uint64_t id_aa64mmfr1;
 998        uint64_t id_aa64mmfr2;
 999        uint64_t id_aa64dfr0;
1000        uint64_t id_aa64dfr1;
1001        uint64_t id_aa64zfr0;
1002    } isar;
1003    uint64_t midr;
1004    uint32_t revidr;
1005    uint32_t reset_fpsid;
1006    uint64_t ctr;
1007    uint32_t reset_sctlr;
1008    uint64_t pmceid0;
1009    uint64_t pmceid1;
1010    uint32_t id_afr0;
1011    uint64_t id_aa64afr0;
1012    uint64_t id_aa64afr1;
1013    uint64_t clidr;
1014    uint64_t mp_affinity; /* MP ID without feature bits */
1015    /* The elements of this array are the CCSIDR values for each cache,
1016     * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
1017     */
1018    uint64_t ccsidr[16];
1019    uint64_t reset_cbar;
1020    uint32_t reset_auxcr;
1021    bool reset_hivecs;
1022
1023    /*
1024     * Intermediate values used during property parsing.
1025     * Once finalized, the values should be read from ID_AA64ISAR1.
1026     */
1027    bool prop_pauth;
1028    bool prop_pauth_impdef;
1029
1030    /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
1031    uint32_t dcz_blocksize;
1032    uint64_t rvbar;
1033    int pe;
1034
1035    uint32_t tcmtr;
1036
1037    /* Configurable aspects of GIC cpu interface (which is part of the CPU) */
1038    int gic_num_lrs; /* number of list registers */
1039    int gic_vpribits; /* number of virtual priority bits */
1040    int gic_vprebits; /* number of virtual preemption bits */
1041
1042    /* Whether the cfgend input is high (i.e. this CPU should reset into
1043     * big-endian mode).  This setting isn't used directly: instead it modifies
1044     * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the
1045     * architecture version.
1046     */
1047    bool cfgend;
1048
1049    MemoryRegion *mr_secure;
1050
1051    QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks;
1052    QLIST_HEAD(, ARMELChangeHook) el_change_hooks;
1053
1054    int32_t node_id; /* NUMA node this CPU belongs to */
1055
1056    /* Used to synchronize KVM and QEMU in-kernel device levels */
1057    uint8_t device_irq_level;
1058
1059    /* Used to set the maximum vector length the cpu will support.  */
1060    uint32_t sve_max_vq;
1061
1062#ifdef CONFIG_USER_ONLY
1063    /* Used to set the default vector length at process start. */
1064    uint32_t sve_default_vq;
1065#endif
1066
1067    /*
1068     * In sve_vq_map each set bit is a supported vector length of
1069     * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector
1070     * length in quadwords.
1071     *
1072     * While processing properties during initialization, corresponding
1073     * sve_vq_init bits are set for bits in sve_vq_map that have been
1074     * set by properties.
1075     */
1076    DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ);
1077    DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ);
1078
1079    /* Generic timer counter frequency, in Hz */
1080    uint64_t gt_cntfrq_hz;
1081};
1082
1083unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
1084
1085void arm_cpu_post_init(Object *obj);
1086
1087uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
1088
1089#ifndef CONFIG_USER_ONLY
1090extern const VMStateDescription vmstate_arm_cpu;
1091#endif
1092
1093void arm_cpu_do_interrupt(CPUState *cpu);
1094void arm_v7m_cpu_do_interrupt(CPUState *cpu);
1095bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
1096
1097hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
1098                                         MemTxAttrs *attrs);
1099
1100int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
1101int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
1102
1103/*
1104 * Helpers to dynamically generates XML descriptions of the sysregs
1105 * and SVE registers. Returns the number of registers in each set.
1106 */
1107int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg);
1108int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
1109
1110/* Returns the dynamically generated XML for the gdb stub.
1111 * Returns a pointer to the XML contents for the specified XML file or NULL
1112 * if the XML name doesn't match the predefined one.
1113 */
1114const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname);
1115
1116int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
1117                             int cpuid, void *opaque);
1118int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
1119                             int cpuid, void *opaque);
1120
1121#ifdef TARGET_AARCH64
1122int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
1123int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
1124void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
1125void aarch64_sve_change_el(CPUARMState *env, int old_el,
1126                           int new_el, bool el0_a64);
1127void aarch64_add_sve_properties(Object *obj);
1128
1129/*
1130 * SVE registers are encoded in KVM's memory in an endianness-invariant format.
1131 * The byte at offset i from the start of the in-memory representation contains
1132 * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
1133 * lowest offsets are stored in the lowest memory addresses, then that nearly
1134 * matches QEMU's representation, which is to use an array of host-endian
1135 * uint64_t's, where the lower offsets are at the lower indices. To complete
1136 * the translation we just need to byte swap the uint64_t's on big-endian hosts.
1137 */
1138static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
1139{
1140#ifdef HOST_WORDS_BIGENDIAN
1141    int i;
1142
1143    for (i = 0; i < nr; ++i) {
1144        dst[i] = bswap64(src[i]);
1145    }
1146
1147    return dst;
1148#else
1149    return src;
1150#endif
1151}
1152
1153#else
1154static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
1155static inline void aarch64_sve_change_el(CPUARMState *env, int o,
1156                                         int n, bool a)
1157{ }
1158static inline void aarch64_add_sve_properties(Object *obj) { }
1159#endif
1160
1161void aarch64_sync_32_to_64(CPUARMState *env);
1162void aarch64_sync_64_to_32(CPUARMState *env);
1163
1164int fp_exception_el(CPUARMState *env, int cur_el);
1165int sve_exception_el(CPUARMState *env, int cur_el);
1166uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
1167
1168static inline bool is_a64(CPUARMState *env)
1169{
1170    return env->aarch64;
1171}
1172
1173/* you can call this signal handler from your SIGBUS and SIGSEGV
1174   signal handlers to inform the virtual CPU of exceptions. non zero
1175   is returned if the signal was handled by the virtual CPU.  */
1176int cpu_arm_signal_handler(int host_signum, void *pinfo,
1177                           void *puc);
1178
1179/**
1180 * pmu_op_start/finish
1181 * @env: CPUARMState
1182 *
1183 * Convert all PMU counters between their delta form (the typical mode when
1184 * they are enabled) and the guest-visible values. These two calls must
1185 * surround any action which might affect the counters.
1186 */
1187void pmu_op_start(CPUARMState *env);
1188void pmu_op_finish(CPUARMState *env);
1189
1190/*
1191 * Called when a PMU counter is due to overflow
1192 */
1193void arm_pmu_timer_cb(void *opaque);
1194
1195/**
1196 * Functions to register as EL change hooks for PMU mode filtering
1197 */
1198void pmu_pre_el_change(ARMCPU *cpu, void *ignored);
1199void pmu_post_el_change(ARMCPU *cpu, void *ignored);
1200
1201/*
1202 * pmu_init
1203 * @cpu: ARMCPU
1204 *
1205 * Initialize the CPU's PMCEID[01]_EL0 registers and associated internal state
1206 * for the current configuration
1207 */
1208void pmu_init(ARMCPU *cpu);
1209
1210/* SCTLR bit meanings. Several bits have been reused in newer
1211 * versions of the architecture; in that case we define constants
1212 * for both old and new bit meanings. Code which tests against those
1213 * bits should probably check or otherwise arrange that the CPU
1214 * is the architectural version it expects.
1215 */
1216#define SCTLR_M       (1U << 0)
1217#define SCTLR_A       (1U << 1)
1218#define SCTLR_C       (1U << 2)
1219#define SCTLR_W       (1U << 3) /* up to v6; RAO in v7 */
1220#define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */
1221#define SCTLR_SA      (1U << 3) /* AArch64 only */
1222#define SCTLR_P       (1U << 4) /* up to v5; RAO in v6 and v7 */
1223#define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */
1224#define SCTLR_SA0     (1U << 4) /* v8 onward, AArch64 only */
1225#define SCTLR_D       (1U << 5) /* up to v5; RAO in v6 */
1226#define SCTLR_CP15BEN (1U << 5) /* v7 onward */
1227#define SCTLR_L       (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
1228#define SCTLR_nAA     (1U << 6) /* when v8.4-LSE is implemented */
1229#define SCTLR_B       (1U << 7) /* up to v6; RAZ in v7 */
1230#define SCTLR_ITD     (1U << 7) /* v8 onward */
1231#define SCTLR_S       (1U << 8) /* up to v6; RAZ in v7 */
1232#define SCTLR_SED     (1U << 8) /* v8 onward */
1233#define SCTLR_R       (1U << 9) /* up to v6; RAZ in v7 */
1234#define SCTLR_UMA     (1U << 9) /* v8 onward, AArch64 only */
1235#define SCTLR_F       (1U << 10) /* up to v6 */
1236#define SCTLR_SW      (1U << 10) /* v7 */
1237#define SCTLR_EnRCTX  (1U << 10) /* in v8.0-PredInv */
1238#define SCTLR_Z       (1U << 11) /* in v7, RES1 in v8 */
1239#define SCTLR_EOS     (1U << 11) /* v8.5-ExS */
1240#define SCTLR_I       (1U << 12)
1241#define SCTLR_V       (1U << 13) /* AArch32 only */
1242#define SCTLR_EnDB    (1U << 13) /* v8.3, AArch64 only */
1243#define SCTLR_RR      (1U << 14) /* up to v7 */
1244#define SCTLR_DZE     (1U << 14) /* v8 onward, AArch64 only */
1245#define SCTLR_L4      (1U << 15) /* up to v6; RAZ in v7 */
1246#define SCTLR_UCT     (1U << 15) /* v8 onward, AArch64 only */
1247#define SCTLR_DT      (1U << 16) /* up to ??, RAO in v6 and v7 */
1248#define SCTLR_nTWI    (1U << 16) /* v8 onward */
1249#define SCTLR_HA      (1U << 17) /* up to v7, RES0 in v8 */
1250#define SCTLR_BR      (1U << 17) /* PMSA only */
1251#define SCTLR_IT      (1U << 18) /* up to ??, RAO in v6 and v7 */
1252#define SCTLR_nTWE    (1U << 18) /* v8 onward */
1253#define SCTLR_WXN     (1U << 19)
1254#define SCTLR_ST      (1U << 20) /* up to ??, RAZ in v6 */
1255#define SCTLR_UWXN    (1U << 20) /* v7 onward, AArch32 only */
1256#define SCTLR_FI      (1U << 21) /* up to v7, v8 RES0 */
1257#define SCTLR_IESB    (1U << 21) /* v8.2-IESB, AArch64 only */
1258#define SCTLR_U       (1U << 22) /* up to v6, RAO in v7 */
1259#define SCTLR_EIS     (1U << 22) /* v8.5-ExS */
1260#define SCTLR_XP      (1U << 23) /* up to v6; v7 onward RAO */
1261#define SCTLR_SPAN    (1U << 23) /* v8.1-PAN */
1262#define SCTLR_VE      (1U << 24) /* up to v7 */
1263#define SCTLR_E0E     (1U << 24) /* v8 onward, AArch64 only */
1264#define SCTLR_EE      (1U << 25)
1265#define SCTLR_L2      (1U << 26) /* up to v6, RAZ in v7 */
1266#define SCTLR_UCI     (1U << 26) /* v8 onward, AArch64 only */
1267#define SCTLR_NMFI    (1U << 27) /* up to v7, RAZ in v7VE and v8 */
1268#define SCTLR_EnDA    (1U << 27) /* v8.3, AArch64 only */
1269#define SCTLR_TRE     (1U << 28) /* AArch32 only */
1270#define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */
1271#define SCTLR_AFE     (1U << 29) /* AArch32 only */
1272#define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */
1273#define SCTLR_TE      (1U << 30) /* AArch32 only */
1274#define SCTLR_EnIB    (1U << 30) /* v8.3, AArch64 only */
1275#define SCTLR_EnIA    (1U << 31) /* v8.3, AArch64 only */
1276#define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */
1277#define SCTLR_BT0     (1ULL << 35) /* v8.5-BTI */
1278#define SCTLR_BT1     (1ULL << 36) /* v8.5-BTI */
1279#define SCTLR_ITFSB   (1ULL << 37) /* v8.5-MemTag */
1280#define SCTLR_TCF0    (3ULL << 38) /* v8.5-MemTag */
1281#define SCTLR_TCF     (3ULL << 40) /* v8.5-MemTag */
1282#define SCTLR_ATA0    (1ULL << 42) /* v8.5-MemTag */
1283#define SCTLR_ATA     (1ULL << 43) /* v8.5-MemTag */
1284#define SCTLR_DSSBS_64 (1ULL << 44) /* v8.5, AArch64 only */
1285
1286#define CPTR_TCPAC    (1U << 31)
1287#define CPTR_TTA      (1U << 20)
1288#define CPTR_TFP      (1U << 10)
1289#define CPTR_TZ       (1U << 8)   /* CPTR_EL2 */
1290#define CPTR_EZ       (1U << 8)   /* CPTR_EL3 */
1291
1292#define MDCR_EPMAD    (1U << 21)
1293#define MDCR_EDAD     (1U << 20)
1294#define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
1295#define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
1296#define MDCR_SDD      (1U << 16)
1297#define MDCR_SPD      (3U << 14)
1298#define MDCR_TDRA     (1U << 11)
1299#define MDCR_TDOSA    (1U << 10)
1300#define MDCR_TDA      (1U << 9)
1301#define MDCR_TDE      (1U << 8)
1302#define MDCR_HPME     (1U << 7)
1303#define MDCR_TPM      (1U << 6)
1304#define MDCR_TPMCR    (1U << 5)
1305#define MDCR_HPMN     (0x1fU)
1306
1307/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
1308#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
1309
1310#define CPSR_M (0x1fU)
1311#define CPSR_T (1U << 5)
1312#define CPSR_F (1U << 6)
1313#define CPSR_I (1U << 7)
1314#define CPSR_A (1U << 8)
1315#define CPSR_E (1U << 9)
1316#define CPSR_IT_2_7 (0xfc00U)
1317#define CPSR_GE (0xfU << 16)
1318#define CPSR_IL (1U << 20)
1319#define CPSR_DIT (1U << 21)
1320#define CPSR_PAN (1U << 22)
1321#define CPSR_SSBS (1U << 23)
1322#define CPSR_J (1U << 24)
1323#define CPSR_IT_0_1 (3U << 25)
1324#define CPSR_Q (1U << 27)
1325#define CPSR_V (1U << 28)
1326#define CPSR_C (1U << 29)
1327#define CPSR_Z (1U << 30)
1328#define CPSR_N (1U << 31)
1329#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
1330#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
1331
1332#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
1333#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
1334    | CPSR_NZCV)
1335/* Bits writable in user mode.  */
1336#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE | CPSR_E)
1337/* Execution state bits.  MRS read as zero, MSR writes ignored.  */
1338#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
1339
1340/* Bit definitions for M profile XPSR. Most are the same as CPSR. */
1341#define XPSR_EXCP 0x1ffU
1342#define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */
1343#define XPSR_IT_2_7 CPSR_IT_2_7
1344#define XPSR_GE CPSR_GE
1345#define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */
1346#define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */
1347#define XPSR_IT_0_1 CPSR_IT_0_1
1348#define XPSR_Q CPSR_Q
1349#define XPSR_V CPSR_V
1350#define XPSR_C CPSR_C
1351#define XPSR_Z CPSR_Z
1352#define XPSR_N CPSR_N
1353#define XPSR_NZCV CPSR_NZCV
1354#define XPSR_IT CPSR_IT
1355
1356#define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
1357#define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
1358#define TTBCR_PD0    (1U << 4)
1359#define TTBCR_PD1    (1U << 5)
1360#define TTBCR_EPD0   (1U << 7)
1361#define TTBCR_IRGN0  (3U << 8)
1362#define TTBCR_ORGN0  (3U << 10)
1363#define TTBCR_SH0    (3U << 12)
1364#define TTBCR_T1SZ   (3U << 16)
1365#define TTBCR_A1     (1U << 22)
1366#define TTBCR_EPD1   (1U << 23)
1367#define TTBCR_IRGN1  (3U << 24)
1368#define TTBCR_ORGN1  (3U << 26)
1369#define TTBCR_SH1    (1U << 28)
1370#define TTBCR_EAE    (1U << 31)
1371
1372/* Bit definitions for ARMv8 SPSR (PSTATE) format.
1373 * Only these are valid when in AArch64 mode; in
1374 * AArch32 mode SPSRs are basically CPSR-format.
1375 */
1376#define PSTATE_SP (1U)
1377#define PSTATE_M (0xFU)
1378#define PSTATE_nRW (1U << 4)
1379#define PSTATE_F (1U << 6)
1380#define PSTATE_I (1U << 7)
1381#define PSTATE_A (1U << 8)
1382#define PSTATE_D (1U << 9)
1383#define PSTATE_BTYPE (3U << 10)
1384#define PSTATE_SSBS (1U << 12)
1385#define PSTATE_IL (1U << 20)
1386#define PSTATE_SS (1U << 21)
1387#define PSTATE_PAN (1U << 22)
1388#define PSTATE_UAO (1U << 23)
1389#define PSTATE_DIT (1U << 24)
1390#define PSTATE_TCO (1U << 25)
1391#define PSTATE_V (1U << 28)
1392#define PSTATE_C (1U << 29)
1393#define PSTATE_Z (1U << 30)
1394#define PSTATE_N (1U << 31)
1395#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
1396#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
1397#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
1398/* Mode values for AArch64 */
1399#define PSTATE_MODE_EL3h 13
1400#define PSTATE_MODE_EL3t 12
1401#define PSTATE_MODE_EL2h 9
1402#define PSTATE_MODE_EL2t 8
1403#define PSTATE_MODE_EL1h 5
1404#define PSTATE_MODE_EL1t 4
1405#define PSTATE_MODE_EL0t 0
1406
1407/* Write a new value to v7m.exception, thus transitioning into or out
1408 * of Handler mode; this may result in a change of active stack pointer.
1409 */
1410void write_v7m_exception(CPUARMState *env, uint32_t new_exc);
1411
1412/* Map EL and handler into a PSTATE_MODE.  */
1413static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
1414{
1415    return (el << 2) | handler;
1416}
1417
1418/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
1419 * interprocessing, so we don't attempt to sync with the cpsr state used by
1420 * the 32 bit decoder.
1421 */
1422static inline uint32_t pstate_read(CPUARMState *env)
1423{
1424    int ZF;
1425
1426    ZF = (env->ZF == 0);
1427    return (env->NF & 0x80000000) | (ZF << 30)
1428        | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
1429        | env->pstate | env->daif | (env->btype << 10);
1430}
1431
1432static inline void pstate_write(CPUARMState *env, uint32_t val)
1433{
1434    env->ZF = (~val) & PSTATE_Z;
1435    env->NF = val;
1436    env->CF = (val >> 29) & 1;
1437    env->VF = (val << 3) & 0x80000000;
1438    env->daif = val & PSTATE_DAIF;
1439    env->btype = (val >> 10) & 3;
1440    env->pstate = val & ~CACHED_PSTATE_BITS;
1441}
1442
1443/* Return the current CPSR value.  */
1444uint32_t cpsr_read(CPUARMState *env);
1445
1446typedef enum CPSRWriteType {
1447    CPSRWriteByInstr = 0,         /* from guest MSR or CPS */
1448    CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
1449    CPSRWriteRaw = 2,             /* trust values, do not switch reg banks */
1450    CPSRWriteByGDBStub = 3,       /* from the GDB stub */
1451} CPSRWriteType;
1452
1453/* Set the CPSR.  Note that some bits of mask must be all-set or all-clear.*/
1454void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
1455                CPSRWriteType write_type);
1456uint64_t mpidr_read_val(CPUARMState *env);
1457
1458/* Return the current xPSR value.  */
1459static inline uint32_t xpsr_read(CPUARMState *env)
1460{
1461    int ZF;
1462    ZF = (env->ZF == 0);
1463    return (env->NF & 0x80000000) | (ZF << 30)
1464        | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
1465        | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
1466        | ((env->condexec_bits & 0xfc) << 8)
1467        | (env->GE << 16)
1468        | env->v7m.exception;
1469}
1470
1471/* Set the xPSR.  Note that some bits of mask must be all-set or all-clear.  */
1472static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
1473{
1474    if (mask & XPSR_NZCV) {
1475        env->ZF = (~val) & XPSR_Z;
1476        env->NF = val;
1477        env->CF = (val >> 29) & 1;
1478        env->VF = (val << 3) & 0x80000000;
1479    }
1480    if (mask & XPSR_Q) {
1481        env->QF = ((val & XPSR_Q) != 0);
1482    }
1483    if (mask & XPSR_GE) {
1484        env->GE = (val & XPSR_GE) >> 16;
1485    }
1486#ifndef CONFIG_USER_ONLY
1487    if (mask & XPSR_T) {
1488        env->thumb = ((val & XPSR_T) != 0);
1489    }
1490    if (mask & XPSR_IT_0_1) {
1491        env->condexec_bits &= ~3;
1492        env->condexec_bits |= (val >> 25) & 3;
1493    }
1494    if (mask & XPSR_IT_2_7) {
1495        env->condexec_bits &= 3;
1496        env->condexec_bits |= (val >> 8) & 0xfc;
1497    }
1498    if (mask & XPSR_EXCP) {
1499        /* Note that this only happens on exception exit */
1500        write_v7m_exception(env, val & XPSR_EXCP);
1501    }
1502#endif
1503}
1504
1505#define HCR_VM        (1ULL << 0)
1506#define HCR_SWIO      (1ULL << 1)
1507#define HCR_PTW       (1ULL << 2)
1508#define HCR_FMO       (1ULL << 3)
1509#define HCR_IMO       (1ULL << 4)
1510#define HCR_AMO       (1ULL << 5)
1511#define HCR_VF        (1ULL << 6)
1512#define HCR_VI        (1ULL << 7)
1513#define HCR_VSE       (1ULL << 8)
1514#define HCR_FB        (1ULL << 9)
1515#define HCR_BSU_MASK  (3ULL << 10)
1516#define HCR_DC        (1ULL << 12)
1517#define HCR_TWI       (1ULL << 13)
1518#define HCR_TWE       (1ULL << 14)
1519#define HCR_TID0      (1ULL << 15)
1520#define HCR_TID1      (1ULL << 16)
1521#define HCR_TID2      (1ULL << 17)
1522#define HCR_TID3      (1ULL << 18)
1523#define HCR_TSC       (1ULL << 19)
1524#define HCR_TIDCP     (1ULL << 20)
1525#define HCR_TACR      (1ULL << 21)
1526#define HCR_TSW       (1ULL << 22)
1527#define HCR_TPCP      (1ULL << 23)
1528#define HCR_TPU       (1ULL << 24)
1529#define HCR_TTLB      (1ULL << 25)
1530#define HCR_TVM       (1ULL << 26)
1531#define HCR_TGE       (1ULL << 27)
1532#define HCR_TDZ       (1ULL << 28)
1533#define HCR_HCD       (1ULL << 29)
1534#define HCR_TRVM      (1ULL << 30)
1535#define HCR_RW        (1ULL << 31)
1536#define HCR_CD        (1ULL << 32)
1537#define HCR_ID        (1ULL << 33)
1538#define HCR_E2H       (1ULL << 34)
1539#define HCR_TLOR      (1ULL << 35)
1540#define HCR_TERR      (1ULL << 36)
1541#define HCR_TEA       (1ULL << 37)
1542#define HCR_MIOCNCE   (1ULL << 38)
1543/* RES0 bit 39 */
1544#define HCR_APK       (1ULL << 40)
1545#define HCR_API       (1ULL << 41)
1546#define HCR_NV        (1ULL << 42)
1547#define HCR_NV1       (1ULL << 43)
1548#define HCR_AT        (1ULL << 44)
1549#define HCR_NV2       (1ULL << 45)
1550#define HCR_FWB       (1ULL << 46)
1551#define HCR_FIEN      (1ULL << 47)
1552/* RES0 bit 48 */
1553#define HCR_TID4      (1ULL << 49)
1554#define HCR_TICAB     (1ULL << 50)
1555#define HCR_AMVOFFEN  (1ULL << 51)
1556#define HCR_TOCU      (1ULL << 52)
1557#define HCR_ENSCXT    (1ULL << 53)
1558#define HCR_TTLBIS    (1ULL << 54)
1559#define HCR_TTLBOS    (1ULL << 55)
1560#define HCR_ATA       (1ULL << 56)
1561#define HCR_DCT       (1ULL << 57)
1562#define HCR_TID5      (1ULL << 58)
1563#define HCR_TWEDEN    (1ULL << 59)
1564#define HCR_TWEDEL    MAKE_64BIT_MASK(60, 4)
1565
1566#define HPFAR_NS      (1ULL << 63)
1567
1568#define SCR_NS                (1U << 0)
1569#define SCR_IRQ               (1U << 1)
1570#define SCR_FIQ               (1U << 2)
1571#define SCR_EA                (1U << 3)
1572#define SCR_FW                (1U << 4)
1573#define SCR_AW                (1U << 5)
1574#define SCR_NET               (1U << 6)
1575#define SCR_SMD               (1U << 7)
1576#define SCR_HCE               (1U << 8)
1577#define SCR_SIF               (1U << 9)
1578#define SCR_RW                (1U << 10)
1579#define SCR_ST                (1U << 11)
1580#define SCR_TWI               (1U << 12)
1581#define SCR_TWE               (1U << 13)
1582#define SCR_TLOR              (1U << 14)
1583#define SCR_TERR              (1U << 15)
1584#define SCR_APK               (1U << 16)
1585#define SCR_API               (1U << 17)
1586#define SCR_EEL2              (1U << 18)
1587#define SCR_EASE              (1U << 19)
1588#define SCR_NMEA              (1U << 20)
1589#define SCR_FIEN              (1U << 21)
1590#define SCR_ENSCXT            (1U << 25)
1591#define SCR_ATA               (1U << 26)
1592
1593/* Return the current FPSCR value.  */
1594uint32_t vfp_get_fpscr(CPUARMState *env);
1595void vfp_set_fpscr(CPUARMState *env, uint32_t val);
1596
1597/* FPCR, Floating Point Control Register
1598 * FPSR, Floating Poiht Status Register
1599 *
1600 * For A64 the FPSCR is split into two logically distinct registers,
1601 * FPCR and FPSR. However since they still use non-overlapping bits
1602 * we store the underlying state in fpscr and just mask on read/write.
1603 */
1604#define FPSR_MASK 0xf800009f
1605#define FPCR_MASK 0x07ff9f00
1606
1607#define FPCR_IOE    (1 << 8)    /* Invalid Operation exception trap enable */
1608#define FPCR_DZE    (1 << 9)    /* Divide by Zero exception trap enable */
1609#define FPCR_OFE    (1 << 10)   /* Overflow exception trap enable */
1610#define FPCR_UFE    (1 << 11)   /* Underflow exception trap enable */
1611#define FPCR_IXE    (1 << 12)   /* Inexact exception trap enable */
1612#define FPCR_IDE    (1 << 15)   /* Input Denormal exception trap enable */
1613#define FPCR_FZ16   (1 << 19)   /* ARMv8.2+, FP16 flush-to-zero */
1614#define FPCR_RMODE_MASK (3 << 22) /* Rounding mode */
1615#define FPCR_FZ     (1 << 24)   /* Flush-to-zero enable bit */
1616#define FPCR_DN     (1 << 25)   /* Default NaN enable bit */
1617#define FPCR_AHP    (1 << 26)   /* Alternative half-precision */
1618#define FPCR_QC     (1 << 27)   /* Cumulative saturation bit */
1619#define FPCR_V      (1 << 28)   /* FP overflow flag */
1620#define FPCR_C      (1 << 29)   /* FP carry flag */
1621#define FPCR_Z      (1 << 30)   /* FP zero flag */
1622#define FPCR_N      (1 << 31)   /* FP negative flag */
1623
1624#define FPCR_LTPSIZE_SHIFT 16   /* LTPSIZE, M-profile only */
1625#define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT)
1626#define FPCR_LTPSIZE_LENGTH 3
1627
1628#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
1629#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
1630
1631static inline uint32_t vfp_get_fpsr(CPUARMState *env)
1632{
1633    return vfp_get_fpscr(env) & FPSR_MASK;
1634}
1635
1636static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
1637{
1638    uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
1639    vfp_set_fpscr(env, new_fpscr);
1640}
1641
1642static inline uint32_t vfp_get_fpcr(CPUARMState *env)
1643{
1644    return vfp_get_fpscr(env) & FPCR_MASK;
1645}
1646
1647static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
1648{
1649    uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
1650    vfp_set_fpscr(env, new_fpscr);
1651}
1652
1653enum arm_cpu_mode {
1654  ARM_CPU_MODE_USR = 0x10,
1655  ARM_CPU_MODE_FIQ = 0x11,
1656  ARM_CPU_MODE_IRQ = 0x12,
1657  ARM_CPU_MODE_SVC = 0x13,
1658  ARM_CPU_MODE_MON = 0x16,
1659  ARM_CPU_MODE_ABT = 0x17,
1660  ARM_CPU_MODE_HYP = 0x1a,
1661  ARM_CPU_MODE_UND = 0x1b,
1662  ARM_CPU_MODE_SYS = 0x1f
1663};
1664
1665/* VFP system registers.  */
1666#define ARM_VFP_FPSID   0
1667#define ARM_VFP_FPSCR   1
1668#define ARM_VFP_MVFR2   5
1669#define ARM_VFP_MVFR1   6
1670#define ARM_VFP_MVFR0   7
1671#define ARM_VFP_FPEXC   8
1672#define ARM_VFP_FPINST  9
1673#define ARM_VFP_FPINST2 10
1674/* These ones are M-profile only */
1675#define ARM_VFP_FPSCR_NZCVQC 2
1676#define ARM_VFP_VPR 12
1677#define ARM_VFP_P0 13
1678#define ARM_VFP_FPCXT_NS 14
1679#define ARM_VFP_FPCXT_S 15
1680
1681/* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
1682#define QEMU_VFP_FPSCR_NZCV 0xffff
1683
1684/* iwMMXt coprocessor control registers.  */
1685#define ARM_IWMMXT_wCID  0
1686#define ARM_IWMMXT_wCon  1
1687#define ARM_IWMMXT_wCSSF 2
1688#define ARM_IWMMXT_wCASF 3
1689#define ARM_IWMMXT_wCGR0 8
1690#define ARM_IWMMXT_wCGR1 9
1691#define ARM_IWMMXT_wCGR2 10
1692#define ARM_IWMMXT_wCGR3 11
1693
1694/* V7M CCR bits */
1695FIELD(V7M_CCR, NONBASETHRDENA, 0, 1)
1696FIELD(V7M_CCR, USERSETMPEND, 1, 1)
1697FIELD(V7M_CCR, UNALIGN_TRP, 3, 1)
1698FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
1699FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
1700FIELD(V7M_CCR, STKALIGN, 9, 1)
1701FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
1702FIELD(V7M_CCR, DC, 16, 1)
1703FIELD(V7M_CCR, IC, 17, 1)
1704FIELD(V7M_CCR, BP, 18, 1)
1705FIELD(V7M_CCR, LOB, 19, 1)
1706FIELD(V7M_CCR, TRD, 20, 1)
1707
1708/* V7M SCR bits */
1709FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
1710FIELD(V7M_SCR, SLEEPDEEP, 2, 1)
1711FIELD(V7M_SCR, SLEEPDEEPS, 3, 1)
1712FIELD(V7M_SCR, SEVONPEND, 4, 1)
1713
1714/* V7M AIRCR bits */
1715FIELD(V7M_AIRCR, VECTRESET, 0, 1)
1716FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1)
1717FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1)
1718FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1)
1719FIELD(V7M_AIRCR, PRIGROUP, 8, 3)
1720FIELD(V7M_AIRCR, BFHFNMINS, 13, 1)
1721FIELD(V7M_AIRCR, PRIS, 14, 1)
1722FIELD(V7M_AIRCR, ENDIANNESS, 15, 1)
1723FIELD(V7M_AIRCR, VECTKEY, 16, 16)
1724
1725/* V7M CFSR bits for MMFSR */
1726FIELD(V7M_CFSR, IACCVIOL, 0, 1)
1727FIELD(V7M_CFSR, DACCVIOL, 1, 1)
1728FIELD(V7M_CFSR, MUNSTKERR, 3, 1)
1729FIELD(V7M_CFSR, MSTKERR, 4, 1)
1730FIELD(V7M_CFSR, MLSPERR, 5, 1)
1731FIELD(V7M_CFSR, MMARVALID, 7, 1)
1732
1733/* V7M CFSR bits for BFSR */
1734FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1)
1735FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1)
1736FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1)
1737FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1)
1738FIELD(V7M_CFSR, STKERR, 8 + 4, 1)
1739FIELD(V7M_CFSR, LSPERR, 8 + 5, 1)
1740FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1)
1741
1742/* V7M CFSR bits for UFSR */
1743FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1)
1744FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
1745FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
1746FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
1747FIELD(V7M_CFSR, STKOF, 16 + 4, 1)
1748FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
1749FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
1750
1751/* V7M CFSR bit masks covering all of the subregister bits */
1752FIELD(V7M_CFSR, MMFSR, 0, 8)
1753FIELD(V7M_CFSR, BFSR, 8, 8)
1754FIELD(V7M_CFSR, UFSR, 16, 16)
1755
1756/* V7M HFSR bits */
1757FIELD(V7M_HFSR, VECTTBL, 1, 1)
1758FIELD(V7M_HFSR, FORCED, 30, 1)
1759FIELD(V7M_HFSR, DEBUGEVT, 31, 1)
1760
1761/* V7M DFSR bits */
1762FIELD(V7M_DFSR, HALTED, 0, 1)
1763FIELD(V7M_DFSR, BKPT, 1, 1)
1764FIELD(V7M_DFSR, DWTTRAP, 2, 1)
1765FIELD(V7M_DFSR, VCATCH, 3, 1)
1766FIELD(V7M_DFSR, EXTERNAL, 4, 1)
1767
1768/* V7M SFSR bits */
1769FIELD(V7M_SFSR, INVEP, 0, 1)
1770FIELD(V7M_SFSR, INVIS, 1, 1)
1771FIELD(V7M_SFSR, INVER, 2, 1)
1772FIELD(V7M_SFSR, AUVIOL, 3, 1)
1773FIELD(V7M_SFSR, INVTRAN, 4, 1)
1774FIELD(V7M_SFSR, LSPERR, 5, 1)
1775FIELD(V7M_SFSR, SFARVALID, 6, 1)
1776FIELD(V7M_SFSR, LSERR, 7, 1)
1777
1778/* v7M MPU_CTRL bits */
1779FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
1780FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
1781FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
1782
1783/* v7M CLIDR bits */
1784FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21)
1785FIELD(V7M_CLIDR, LOUIS, 21, 3)
1786FIELD(V7M_CLIDR, LOC, 24, 3)
1787FIELD(V7M_CLIDR, LOUU, 27, 3)
1788FIELD(V7M_CLIDR, ICB, 30, 2)
1789
1790FIELD(V7M_CSSELR, IND, 0, 1)
1791FIELD(V7M_CSSELR, LEVEL, 1, 3)
1792/* We use the combination of InD and Level to index into cpu->ccsidr[];
1793 * define a mask for this and check that it doesn't permit running off
1794 * the end of the array.
1795 */
1796FIELD(V7M_CSSELR, INDEX, 0, 4)
1797
1798/* v7M FPCCR bits */
1799FIELD(V7M_FPCCR, LSPACT, 0, 1)
1800FIELD(V7M_FPCCR, USER, 1, 1)
1801FIELD(V7M_FPCCR, S, 2, 1)
1802FIELD(V7M_FPCCR, THREAD, 3, 1)
1803FIELD(V7M_FPCCR, HFRDY, 4, 1)
1804FIELD(V7M_FPCCR, MMRDY, 5, 1)
1805FIELD(V7M_FPCCR, BFRDY, 6, 1)
1806FIELD(V7M_FPCCR, SFRDY, 7, 1)
1807FIELD(V7M_FPCCR, MONRDY, 8, 1)
1808FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1)
1809FIELD(V7M_FPCCR, UFRDY, 10, 1)
1810FIELD(V7M_FPCCR, RES0, 11, 15)
1811FIELD(V7M_FPCCR, TS, 26, 1)
1812FIELD(V7M_FPCCR, CLRONRETS, 27, 1)
1813FIELD(V7M_FPCCR, CLRONRET, 28, 1)
1814FIELD(V7M_FPCCR, LSPENS, 29, 1)
1815FIELD(V7M_FPCCR, LSPEN, 30, 1)
1816FIELD(V7M_FPCCR, ASPEN, 31, 1)
1817/* These bits are banked. Others are non-banked and live in the M_REG_S bank */
1818#define R_V7M_FPCCR_BANKED_MASK                 \
1819    (R_V7M_FPCCR_LSPACT_MASK |                  \
1820     R_V7M_FPCCR_USER_MASK |                    \
1821     R_V7M_FPCCR_THREAD_MASK |                  \
1822     R_V7M_FPCCR_MMRDY_MASK |                   \
1823     R_V7M_FPCCR_SPLIMVIOL_MASK |               \
1824     R_V7M_FPCCR_UFRDY_MASK |                   \
1825     R_V7M_FPCCR_ASPEN_MASK)
1826
1827/* v7M VPR bits */
1828FIELD(V7M_VPR, P0, 0, 16)
1829FIELD(V7M_VPR, MASK01, 16, 4)
1830FIELD(V7M_VPR, MASK23, 20, 4)
1831
1832/*
1833 * System register ID fields.
1834 */
1835FIELD(CLIDR_EL1, CTYPE1, 0, 3)
1836FIELD(CLIDR_EL1, CTYPE2, 3, 3)
1837FIELD(CLIDR_EL1, CTYPE3, 6, 3)
1838FIELD(CLIDR_EL1, CTYPE4, 9, 3)
1839FIELD(CLIDR_EL1, CTYPE5, 12, 3)
1840FIELD(CLIDR_EL1, CTYPE6, 15, 3)
1841FIELD(CLIDR_EL1, CTYPE7, 18, 3)
1842FIELD(CLIDR_EL1, LOUIS, 21, 3)
1843FIELD(CLIDR_EL1, LOC, 24, 3)
1844FIELD(CLIDR_EL1, LOUU, 27, 3)
1845FIELD(CLIDR_EL1, ICB, 30, 3)
1846
1847/* When FEAT_CCIDX is implemented */
1848FIELD(CCSIDR_EL1, CCIDX_LINESIZE, 0, 3)
1849FIELD(CCSIDR_EL1, CCIDX_ASSOCIATIVITY, 3, 21)
1850FIELD(CCSIDR_EL1, CCIDX_NUMSETS, 32, 24)
1851
1852/* When FEAT_CCIDX is not implemented */
1853FIELD(CCSIDR_EL1, LINESIZE, 0, 3)
1854FIELD(CCSIDR_EL1, ASSOCIATIVITY, 3, 10)
1855FIELD(CCSIDR_EL1, NUMSETS, 13, 15)
1856
1857FIELD(CTR_EL0,  IMINLINE, 0, 4)
1858FIELD(CTR_EL0,  L1IP, 14, 2)
1859FIELD(CTR_EL0,  DMINLINE, 16, 4)
1860FIELD(CTR_EL0,  ERG, 20, 4)
1861FIELD(CTR_EL0,  CWG, 24, 4)
1862FIELD(CTR_EL0,  IDC, 28, 1)
1863FIELD(CTR_EL0,  DIC, 29, 1)
1864FIELD(CTR_EL0,  TMINLINE, 32, 6)
1865
1866FIELD(MIDR_EL1, REVISION, 0, 4)
1867FIELD(MIDR_EL1, PARTNUM, 4, 12)
1868FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
1869FIELD(MIDR_EL1, VARIANT, 20, 4)
1870FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
1871
1872FIELD(ID_ISAR0, SWAP, 0, 4)
1873FIELD(ID_ISAR0, BITCOUNT, 4, 4)
1874FIELD(ID_ISAR0, BITFIELD, 8, 4)
1875FIELD(ID_ISAR0, CMPBRANCH, 12, 4)
1876FIELD(ID_ISAR0, COPROC, 16, 4)
1877FIELD(ID_ISAR0, DEBUG, 20, 4)
1878FIELD(ID_ISAR0, DIVIDE, 24, 4)
1879
1880FIELD(ID_ISAR1, ENDIAN, 0, 4)
1881FIELD(ID_ISAR1, EXCEPT, 4, 4)
1882FIELD(ID_ISAR1, EXCEPT_AR, 8, 4)
1883FIELD(ID_ISAR1, EXTEND, 12, 4)
1884FIELD(ID_ISAR1, IFTHEN, 16, 4)
1885FIELD(ID_ISAR1, IMMEDIATE, 20, 4)
1886FIELD(ID_ISAR1, INTERWORK, 24, 4)
1887FIELD(ID_ISAR1, JAZELLE, 28, 4)
1888
1889FIELD(ID_ISAR2, LOADSTORE, 0, 4)
1890FIELD(ID_ISAR2, MEMHINT, 4, 4)
1891FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4)
1892FIELD(ID_ISAR2, MULT, 12, 4)
1893FIELD(ID_ISAR2, MULTS, 16, 4)
1894FIELD(ID_ISAR2, MULTU, 20, 4)
1895FIELD(ID_ISAR2, PSR_AR, 24, 4)
1896FIELD(ID_ISAR2, REVERSAL, 28, 4)
1897
1898FIELD(ID_ISAR3, SATURATE, 0, 4)
1899FIELD(ID_ISAR3, SIMD, 4, 4)
1900FIELD(ID_ISAR3, SVC, 8, 4)
1901FIELD(ID_ISAR3, SYNCHPRIM, 12, 4)
1902FIELD(ID_ISAR3, TABBRANCH, 16, 4)
1903FIELD(ID_ISAR3, T32COPY, 20, 4)
1904FIELD(ID_ISAR3, TRUENOP, 24, 4)
1905FIELD(ID_ISAR3, T32EE, 28, 4)
1906
1907FIELD(ID_ISAR4, UNPRIV, 0, 4)
1908FIELD(ID_ISAR4, WITHSHIFTS, 4, 4)
1909FIELD(ID_ISAR4, WRITEBACK, 8, 4)
1910FIELD(ID_ISAR4, SMC, 12, 4)
1911FIELD(ID_ISAR4, BARRIER, 16, 4)
1912FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4)
1913FIELD(ID_ISAR4, PSR_M, 24, 4)
1914FIELD(ID_ISAR4, SWP_FRAC, 28, 4)
1915
1916FIELD(ID_ISAR5, SEVL, 0, 4)
1917FIELD(ID_ISAR5, AES, 4, 4)
1918FIELD(ID_ISAR5, SHA1, 8, 4)
1919FIELD(ID_ISAR5, SHA2, 12, 4)
1920FIELD(ID_ISAR5, CRC32, 16, 4)
1921FIELD(ID_ISAR5, RDM, 24, 4)
1922FIELD(ID_ISAR5, VCMA, 28, 4)
1923
1924FIELD(ID_ISAR6, JSCVT, 0, 4)
1925FIELD(ID_ISAR6, DP, 4, 4)
1926FIELD(ID_ISAR6, FHM, 8, 4)
1927FIELD(ID_ISAR6, SB, 12, 4)
1928FIELD(ID_ISAR6, SPECRES, 16, 4)
1929FIELD(ID_ISAR6, BF16, 20, 4)
1930FIELD(ID_ISAR6, I8MM, 24, 4)
1931
1932FIELD(ID_MMFR0, VMSA, 0, 4)
1933FIELD(ID_MMFR0, PMSA, 4, 4)
1934FIELD(ID_MMFR0, OUTERSHR, 8, 4)
1935FIELD(ID_MMFR0, SHARELVL, 12, 4)
1936FIELD(ID_MMFR0, TCM, 16, 4)
1937FIELD(ID_MMFR0, AUXREG, 20, 4)
1938FIELD(ID_MMFR0, FCSE, 24, 4)
1939FIELD(ID_MMFR0, INNERSHR, 28, 4)
1940
1941FIELD(ID_MMFR1, L1HVDVA, 0, 4)
1942FIELD(ID_MMFR1, L1UNIVA, 4, 4)
1943FIELD(ID_MMFR1, L1HVDSW, 8, 4)
1944FIELD(ID_MMFR1, L1UNISW, 12, 4)
1945FIELD(ID_MMFR1, L1HVD, 16, 4)
1946FIELD(ID_MMFR1, L1UNI, 20, 4)
1947FIELD(ID_MMFR1, L1TSTCLN, 24, 4)
1948FIELD(ID_MMFR1, BPRED, 28, 4)
1949
1950FIELD(ID_MMFR2, L1HVDFG, 0, 4)
1951FIELD(ID_MMFR2, L1HVDBG, 4, 4)
1952FIELD(ID_MMFR2, L1HVDRNG, 8, 4)
1953FIELD(ID_MMFR2, HVDTLB, 12, 4)
1954FIELD(ID_MMFR2, UNITLB, 16, 4)
1955FIELD(ID_MMFR2, MEMBARR, 20, 4)
1956FIELD(ID_MMFR2, WFISTALL, 24, 4)
1957FIELD(ID_MMFR2, HWACCFLG, 28, 4)
1958
1959FIELD(ID_MMFR3, CMAINTVA, 0, 4)
1960FIELD(ID_MMFR3, CMAINTSW, 4, 4)
1961FIELD(ID_MMFR3, BPMAINT, 8, 4)
1962FIELD(ID_MMFR3, MAINTBCST, 12, 4)
1963FIELD(ID_MMFR3, PAN, 16, 4)
1964FIELD(ID_MMFR3, COHWALK, 20, 4)
1965FIELD(ID_MMFR3, CMEMSZ, 24, 4)
1966FIELD(ID_MMFR3, SUPERSEC, 28, 4)
1967
1968FIELD(ID_MMFR4, SPECSEI, 0, 4)
1969FIELD(ID_MMFR4, AC2, 4, 4)
1970FIELD(ID_MMFR4, XNX, 8, 4)
1971FIELD(ID_MMFR4, CNP, 12, 4)
1972FIELD(ID_MMFR4, HPDS, 16, 4)
1973FIELD(ID_MMFR4, LSM, 20, 4)
1974FIELD(ID_MMFR4, CCIDX, 24, 4)
1975FIELD(ID_MMFR4, EVT, 28, 4)
1976
1977FIELD(ID_MMFR5, ETS, 0, 4)
1978
1979FIELD(ID_PFR0, STATE0, 0, 4)
1980FIELD(ID_PFR0, STATE1, 4, 4)
1981FIELD(ID_PFR0, STATE2, 8, 4)
1982FIELD(ID_PFR0, STATE3, 12, 4)
1983FIELD(ID_PFR0, CSV2, 16, 4)
1984FIELD(ID_PFR0, AMU, 20, 4)
1985FIELD(ID_PFR0, DIT, 24, 4)
1986FIELD(ID_PFR0, RAS, 28, 4)
1987
1988FIELD(ID_PFR1, PROGMOD, 0, 4)
1989FIELD(ID_PFR1, SECURITY, 4, 4)
1990FIELD(ID_PFR1, MPROGMOD, 8, 4)
1991FIELD(ID_PFR1, VIRTUALIZATION, 12, 4)
1992FIELD(ID_PFR1, GENTIMER, 16, 4)
1993FIELD(ID_PFR1, SEC_FRAC, 20, 4)
1994FIELD(ID_PFR1, VIRT_FRAC, 24, 4)
1995FIELD(ID_PFR1, GIC, 28, 4)
1996
1997FIELD(ID_PFR2, CSV3, 0, 4)
1998FIELD(ID_PFR2, SSBS, 4, 4)
1999FIELD(ID_PFR2, RAS_FRAC, 8, 4)
2000
2001FIELD(ID_AA64ISAR0, AES, 4, 4)
2002FIELD(ID_AA64ISAR0, SHA1, 8, 4)
2003FIELD(ID_AA64ISAR0, SHA2, 12, 4)
2004FIELD(ID_AA64ISAR0, CRC32, 16, 4)
2005FIELD(ID_AA64ISAR0, ATOMIC, 20, 4)
2006FIELD(ID_AA64ISAR0, RDM, 28, 4)
2007FIELD(ID_AA64ISAR0, SHA3, 32, 4)
2008FIELD(ID_AA64ISAR0, SM3, 36, 4)
2009FIELD(ID_AA64ISAR0, SM4, 40, 4)
2010FIELD(ID_AA64ISAR0, DP, 44, 4)
2011FIELD(ID_AA64ISAR0, FHM, 48, 4)
2012FIELD(ID_AA64ISAR0, TS, 52, 4)
2013FIELD(ID_AA64ISAR0, TLB, 56, 4)
2014FIELD(ID_AA64ISAR0, RNDR, 60, 4)
2015
2016FIELD(ID_AA64ISAR1, DPB, 0, 4)
2017FIELD(ID_AA64ISAR1, APA, 4, 4)
2018FIELD(ID_AA64ISAR1, API, 8, 4)
2019FIELD(ID_AA64ISAR1, JSCVT, 12, 4)
2020FIELD(ID_AA64ISAR1, FCMA, 16, 4)
2021FIELD(ID_AA64ISAR1, LRCPC, 20, 4)
2022FIELD(ID_AA64ISAR1, GPA, 24, 4)
2023FIELD(ID_AA64ISAR1, GPI, 28, 4)
2024FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
2025FIELD(ID_AA64ISAR1, SB, 36, 4)
2026FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
2027FIELD(ID_AA64ISAR1, BF16, 44, 4)
2028FIELD(ID_AA64ISAR1, DGH, 48, 4)
2029FIELD(ID_AA64ISAR1, I8MM, 52, 4)
2030
2031FIELD(ID_AA64PFR0, EL0, 0, 4)
2032FIELD(ID_AA64PFR0, EL1, 4, 4)
2033FIELD(ID_AA64PFR0, EL2, 8, 4)
2034FIELD(ID_AA64PFR0, EL3, 12, 4)
2035FIELD(ID_AA64PFR0, FP, 16, 4)
2036FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
2037FIELD(ID_AA64PFR0, GIC, 24, 4)
2038FIELD(ID_AA64PFR0, RAS, 28, 4)
2039FIELD(ID_AA64PFR0, SVE, 32, 4)
2040FIELD(ID_AA64PFR0, SEL2, 36, 4)
2041FIELD(ID_AA64PFR0, MPAM, 40, 4)
2042FIELD(ID_AA64PFR0, AMU, 44, 4)
2043FIELD(ID_AA64PFR0, DIT, 48, 4)
2044FIELD(ID_AA64PFR0, CSV2, 56, 4)
2045FIELD(ID_AA64PFR0, CSV3, 60, 4)
2046
2047FIELD(ID_AA64PFR1, BT, 0, 4)
2048FIELD(ID_AA64PFR1, SSBS, 4, 4)
2049FIELD(ID_AA64PFR1, MTE, 8, 4)
2050FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
2051FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4)
2052
2053FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
2054FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
2055FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
2056FIELD(ID_AA64MMFR0, SNSMEM, 12, 4)
2057FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4)
2058FIELD(ID_AA64MMFR0, TGRAN16, 20, 4)
2059FIELD(ID_AA64MMFR0, TGRAN64, 24, 4)
2060FIELD(ID_AA64MMFR0, TGRAN4, 28, 4)
2061FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4)
2062FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4)
2063FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4)
2064FIELD(ID_AA64MMFR0, EXS, 44, 4)
2065FIELD(ID_AA64MMFR0, FGT, 56, 4)
2066FIELD(ID_AA64MMFR0, ECV, 60, 4)
2067
2068FIELD(ID_AA64MMFR1, HAFDBS, 0, 4)
2069FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4)
2070FIELD(ID_AA64MMFR1, VH, 8, 4)
2071FIELD(ID_AA64MMFR1, HPDS, 12, 4)
2072FIELD(ID_AA64MMFR1, LO, 16, 4)
2073FIELD(ID_AA64MMFR1, PAN, 20, 4)
2074FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
2075FIELD(ID_AA64MMFR1, XNX, 28, 4)
2076FIELD(ID_AA64MMFR1, TWED, 32, 4)
2077FIELD(ID_AA64MMFR1, ETS, 36, 4)
2078
2079FIELD(ID_AA64MMFR2, CNP, 0, 4)
2080FIELD(ID_AA64MMFR2, UAO, 4, 4)
2081FIELD(ID_AA64MMFR2, LSM, 8, 4)
2082FIELD(ID_AA64MMFR2, IESB, 12, 4)
2083FIELD(ID_AA64MMFR2, VARANGE, 16, 4)
2084FIELD(ID_AA64MMFR2, CCIDX, 20, 4)
2085FIELD(ID_AA64MMFR2, NV, 24, 4)
2086FIELD(ID_AA64MMFR2, ST, 28, 4)
2087FIELD(ID_AA64MMFR2, AT, 32, 4)
2088FIELD(ID_AA64MMFR2, IDS, 36, 4)
2089FIELD(ID_AA64MMFR2, FWB, 40, 4)
2090FIELD(ID_AA64MMFR2, TTL, 48, 4)
2091FIELD(ID_AA64MMFR2, BBM, 52, 4)
2092FIELD(ID_AA64MMFR2, EVT, 56, 4)
2093FIELD(ID_AA64MMFR2, E0PD, 60, 4)
2094
2095FIELD(ID_AA64DFR0, DEBUGVER, 0, 4)
2096FIELD(ID_AA64DFR0, TRACEVER, 4, 4)
2097FIELD(ID_AA64DFR0, PMUVER, 8, 4)
2098FIELD(ID_AA64DFR0, BRPS, 12, 4)
2099FIELD(ID_AA64DFR0, WRPS, 20, 4)
2100FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4)
2101FIELD(ID_AA64DFR0, PMSVER, 32, 4)
2102FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
2103FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
2104FIELD(ID_AA64DFR0, MTPMU, 48, 4)
2105
2106FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
2107FIELD(ID_AA64ZFR0, AES, 4, 4)
2108FIELD(ID_AA64ZFR0, BITPERM, 16, 4)
2109FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4)
2110FIELD(ID_AA64ZFR0, SHA3, 32, 4)
2111FIELD(ID_AA64ZFR0, SM4, 40, 4)
2112FIELD(ID_AA64ZFR0, I8MM, 44, 4)
2113FIELD(ID_AA64ZFR0, F32MM, 52, 4)
2114FIELD(ID_AA64ZFR0, F64MM, 56, 4)
2115
2116FIELD(ID_DFR0, COPDBG, 0, 4)
2117FIELD(ID_DFR0, COPSDBG, 4, 4)
2118FIELD(ID_DFR0, MMAPDBG, 8, 4)
2119FIELD(ID_DFR0, COPTRC, 12, 4)
2120FIELD(ID_DFR0, MMAPTRC, 16, 4)
2121FIELD(ID_DFR0, MPROFDBG, 20, 4)
2122FIELD(ID_DFR0, PERFMON, 24, 4)
2123FIELD(ID_DFR0, TRACEFILT, 28, 4)
2124
2125FIELD(ID_DFR1, MTPMU, 0, 4)
2126
2127FIELD(DBGDIDR, SE_IMP, 12, 1)
2128FIELD(DBGDIDR, NSUHD_IMP, 14, 1)
2129FIELD(DBGDIDR, VERSION, 16, 4)
2130FIELD(DBGDIDR, CTX_CMPS, 20, 4)
2131FIELD(DBGDIDR, BRPS, 24, 4)
2132FIELD(DBGDIDR, WRPS, 28, 4)
2133
2134FIELD(MVFR0, SIMDREG, 0, 4)
2135FIELD(MVFR0, FPSP, 4, 4)
2136FIELD(MVFR0, FPDP, 8, 4)
2137FIELD(MVFR0, FPTRAP, 12, 4)
2138FIELD(MVFR0, FPDIVIDE, 16, 4)
2139FIELD(MVFR0, FPSQRT, 20, 4)
2140FIELD(MVFR0, FPSHVEC, 24, 4)
2141FIELD(MVFR0, FPROUND, 28, 4)
2142
2143FIELD(MVFR1, FPFTZ, 0, 4)
2144FIELD(MVFR1, FPDNAN, 4, 4)
2145FIELD(MVFR1, SIMDLS, 8, 4) /* A-profile only */
2146FIELD(MVFR1, SIMDINT, 12, 4) /* A-profile only */
2147FIELD(MVFR1, SIMDSP, 16, 4) /* A-profile only */
2148FIELD(MVFR1, SIMDHP, 20, 4) /* A-profile only */
2149FIELD(MVFR1, MVE, 8, 4) /* M-profile only */
2150FIELD(MVFR1, FP16, 20, 4) /* M-profile only */
2151FIELD(MVFR1, FPHP, 24, 4)
2152FIELD(MVFR1, SIMDFMAC, 28, 4)
2153
2154FIELD(MVFR2, SIMDMISC, 0, 4)
2155FIELD(MVFR2, FPMISC, 4, 4)
2156
2157QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
2158
2159/* If adding a feature bit which corresponds to a Linux ELF
2160 * HWCAP bit, remember to update the feature-bit-to-hwcap
2161 * mapping in linux-user/elfload.c:get_elf_hwcap().
2162 */
2163enum arm_features {
2164    ARM_FEATURE_AUXCR,  /* ARM1026 Auxiliary control register.  */
2165    ARM_FEATURE_XSCALE, /* Intel XScale extensions.  */
2166    ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension.  */
2167    ARM_FEATURE_V6,
2168    ARM_FEATURE_V6K,
2169    ARM_FEATURE_V7,
2170    ARM_FEATURE_THUMB2,
2171    ARM_FEATURE_PMSA,   /* no MMU; may have Memory Protection Unit */
2172    ARM_FEATURE_NEON,
2173    ARM_FEATURE_M, /* Microcontroller profile.  */
2174    ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling.  */
2175    ARM_FEATURE_THUMB2EE,
2176    ARM_FEATURE_V7MP,    /* v7 Multiprocessing Extensions */
2177    ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */
2178    ARM_FEATURE_V4T,
2179    ARM_FEATURE_V5,
2180    ARM_FEATURE_STRONGARM,
2181    ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
2182    ARM_FEATURE_GENERIC_TIMER,
2183    ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
2184    ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
2185    ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
2186    ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
2187    ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
2188    ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
2189    ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
2190    ARM_FEATURE_V8,
2191    ARM_FEATURE_AARCH64, /* supports 64 bit mode */
2192    ARM_FEATURE_CBAR, /* has cp15 CBAR */
2193    ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
2194    ARM_FEATURE_EL2, /* has EL2 Virtualization support */
2195    ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
2196    ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
2197    ARM_FEATURE_PMU, /* has PMU support */
2198    ARM_FEATURE_VBAR, /* has cp15 VBAR */
2199    ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
2200    ARM_FEATURE_M_MAIN, /* M profile Main Extension */
2201    ARM_FEATURE_V8_1M, /* M profile extras only in v8.1M and later */
2202};
2203
2204static inline int arm_feature(CPUARMState *env, int feature)
2205{
2206    return (env->features & (1ULL << feature)) != 0;
2207}
2208
2209void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
2210
2211#if !defined(CONFIG_USER_ONLY)
2212/* Return true if exception levels below EL3 are in secure state,
2213 * or would be following an exception return to that level.
2214 * Unlike arm_is_secure() (which is always a question about the
2215 * _current_ state of the CPU) this doesn't care about the current
2216 * EL or mode.
2217 */
2218static inline bool arm_is_secure_below_el3(CPUARMState *env)
2219{
2220    if (arm_feature(env, ARM_FEATURE_EL3)) {
2221        return !(env->cp15.scr_el3 & SCR_NS);
2222    } else {
2223        /* If EL3 is not supported then the secure state is implementation
2224         * defined, in which case QEMU defaults to non-secure.
2225         */
2226        return false;
2227    }
2228}
2229
2230/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
2231static inline bool arm_is_el3_or_mon(CPUARMState *env)
2232{
2233    if (arm_feature(env, ARM_FEATURE_EL3)) {
2234        if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
2235            /* CPU currently in AArch64 state and EL3 */
2236            return true;
2237        } else if (!is_a64(env) &&
2238                (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
2239            /* CPU currently in AArch32 state and monitor mode */
2240            return true;
2241        }
2242    }
2243    return false;
2244}
2245
2246/* Return true if the processor is in secure state */
2247static inline bool arm_is_secure(CPUARMState *env)
2248{
2249    if (arm_is_el3_or_mon(env)) {
2250        return true;
2251    }
2252    return arm_is_secure_below_el3(env);
2253}
2254
2255/*
2256 * Return true if the current security state has AArch64 EL2 or AArch32 Hyp.
2257 * This corresponds to the pseudocode EL2Enabled()
2258 */
2259static inline bool arm_is_el2_enabled(CPUARMState *env)
2260{
2261    if (arm_feature(env, ARM_FEATURE_EL2)) {
2262        if (arm_is_secure_below_el3(env)) {
2263            return (env->cp15.scr_el3 & SCR_EEL2) != 0;
2264        }
2265        return true;
2266    }
2267    return false;
2268}
2269
2270#else
2271static inline bool arm_is_secure_below_el3(CPUARMState *env)
2272{
2273    return false;
2274}
2275
2276static inline bool arm_is_secure(CPUARMState *env)
2277{
2278    return false;
2279}
2280
2281static inline bool arm_is_el2_enabled(CPUARMState *env)
2282{
2283    return false;
2284}
2285#endif
2286
2287/**
2288 * arm_hcr_el2_eff(): Return the effective value of HCR_EL2.
2289 * E.g. when in secure state, fields in HCR_EL2 are suppressed,
2290 * "for all purposes other than a direct read or write access of HCR_EL2."
2291 * Not included here is HCR_RW.
2292 */
2293uint64_t arm_hcr_el2_eff(CPUARMState *env);
2294
2295/* Return true if the specified exception level is running in AArch64 state. */
2296static inline bool arm_el_is_aa64(CPUARMState *env, int el)
2297{
2298    /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
2299     * and if we're not in EL0 then the state of EL0 isn't well defined.)
2300     */
2301    assert(el >= 1 && el <= 3);
2302    bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
2303
2304    /* The highest exception level is always at the maximum supported
2305     * register width, and then lower levels have a register width controlled
2306     * by bits in the SCR or HCR registers.
2307     */
2308    if (el == 3) {
2309        return aa64;
2310    }
2311
2312    if (arm_feature(env, ARM_FEATURE_EL3) &&
2313        ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
2314        aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
2315    }
2316
2317    if (el == 2) {
2318        return aa64;
2319    }
2320
2321    if (arm_is_el2_enabled(env)) {
2322        aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
2323    }
2324
2325    return aa64;
2326}
2327
2328/* Function for determing whether guest cp register reads and writes should
2329 * access the secure or non-secure bank of a cp register.  When EL3 is
2330 * operating in AArch32 state, the NS-bit determines whether the secure
2331 * instance of a cp register should be used. When EL3 is AArch64 (or if
2332 * it doesn't exist at all) then there is no register banking, and all
2333 * accesses are to the non-secure version.
2334 */
2335static inline bool access_secure_reg(CPUARMState *env)
2336{
2337    bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
2338                !arm_el_is_aa64(env, 3) &&
2339                !(env->cp15.scr_el3 & SCR_NS));
2340
2341    return ret;
2342}
2343
2344/* Macros for accessing a specified CP register bank */
2345#define A32_BANKED_REG_GET(_env, _regname, _secure)    \
2346    ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
2347
2348#define A32_BANKED_REG_SET(_env, _regname, _secure, _val)   \
2349    do {                                                \
2350        if (_secure) {                                   \
2351            (_env)->cp15._regname##_s = (_val);            \
2352        } else {                                        \
2353            (_env)->cp15._regname##_ns = (_val);           \
2354        }                                               \
2355    } while (0)
2356
2357/* Macros for automatically accessing a specific CP register bank depending on
2358 * the current secure state of the system.  These macros are not intended for
2359 * supporting instruction translation reads/writes as these are dependent
2360 * solely on the SCR.NS bit and not the mode.
2361 */
2362#define A32_BANKED_CURRENT_REG_GET(_env, _regname)        \
2363    A32_BANKED_REG_GET((_env), _regname,                \
2364                       (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
2365
2366#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val)                       \
2367    A32_BANKED_REG_SET((_env), _regname,                                    \
2368                       (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
2369                       (_val))
2370
2371void arm_cpu_list(void);
2372uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
2373                                 uint32_t cur_el, bool secure);
2374
2375/* Interface between CPU and Interrupt controller.  */
2376#ifndef CONFIG_USER_ONLY
2377bool armv7m_nvic_can_take_pending_exception(void *opaque);
2378#else
2379static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
2380{
2381    return true;
2382}
2383#endif
2384/**
2385 * armv7m_nvic_set_pending: mark the specified exception as pending
2386 * @opaque: the NVIC
2387 * @irq: the exception number to mark pending
2388 * @secure: false for non-banked exceptions or for the nonsecure
2389 * version of a banked exception, true for the secure version of a banked
2390 * exception.
2391 *
2392 * Marks the specified exception as pending. Note that we will assert()
2393 * if @secure is true and @irq does not specify one of the fixed set
2394 * of architecturally banked exceptions.
2395 */
2396void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
2397/**
2398 * armv7m_nvic_set_pending_derived: mark this derived exception as pending
2399 * @opaque: the NVIC
2400 * @irq: the exception number to mark pending
2401 * @secure: false for non-banked exceptions or for the nonsecure
2402 * version of a banked exception, true for the secure version of a banked
2403 * exception.
2404 *
2405 * Similar to armv7m_nvic_set_pending(), but specifically for derived
2406 * exceptions (exceptions generated in the course of trying to take
2407 * a different exception).
2408 */
2409void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure);
2410/**
2411 * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending
2412 * @opaque: the NVIC
2413 * @irq: the exception number to mark pending
2414 * @secure: false for non-banked exceptions or for the nonsecure
2415 * version of a banked exception, true for the secure version of a banked
2416 * exception.
2417 *
2418 * Similar to armv7m_nvic_set_pending(), but specifically for exceptions
2419 * generated in the course of lazy stacking of FP registers.
2420 */
2421void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure);
2422/**
2423 * armv7m_nvic_get_pending_irq_info: return highest priority pending
2424 *    exception, and whether it targets Secure state
2425 * @opaque: the NVIC
2426 * @pirq: set to pending exception number
2427 * @ptargets_secure: set to whether pending exception targets Secure
2428 *
2429 * This function writes the number of the highest priority pending
2430 * exception (the one which would be made active by
2431 * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure
2432 * to true if the current highest priority pending exception should
2433 * be taken to Secure state, false for NS.
2434 */
2435void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq,
2436                                      bool *ptargets_secure);
2437/**
2438 * armv7m_nvic_acknowledge_irq: make highest priority pending exception active
2439 * @opaque: the NVIC
2440 *
2441 * Move the current highest priority pending exception from the pending
2442 * state to the active state, and update v7m.exception to indicate that
2443 * it is the exception currently being handled.
2444 */
2445void armv7m_nvic_acknowledge_irq(void *opaque);
2446/**
2447 * armv7m_nvic_complete_irq: complete specified interrupt or exception
2448 * @opaque: the NVIC
2449 * @irq: the exception number to complete
2450 * @secure: true if this exception was secure
2451 *
2452 * Returns: -1 if the irq was not active
2453 *           1 if completing this irq brought us back to base (no active irqs)
2454 *           0 if there is still an irq active after this one was completed
2455 * (Ignoring -1, this is the same as the RETTOBASE value before completion.)
2456 */
2457int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure);
2458/**
2459 * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
2460 * @opaque: the NVIC
2461 * @irq: the exception number to mark pending
2462 * @secure: false for non-banked exceptions or for the nonsecure
2463 * version of a banked exception, true for the secure version of a banked
2464 * exception.
2465 *
2466 * Return whether an exception is "ready", i.e. whether the exception is
2467 * enabled and is configured at a priority which would allow it to
2468 * interrupt the current execution priority. This controls whether the
2469 * RDY bit for it in the FPCCR is set.
2470 */
2471bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure);
2472/**
2473 * armv7m_nvic_raw_execution_priority: return the raw execution priority
2474 * @opaque: the NVIC
2475 *
2476 * Returns: the raw execution priority as defined by the v8M architecture.
2477 * This is the execution priority minus the effects of AIRCR.PRIS,
2478 * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting.
2479 * (v8M ARM ARM I_PKLD.)
2480 */
2481int armv7m_nvic_raw_execution_priority(void *opaque);
2482/**
2483 * armv7m_nvic_neg_prio_requested: return true if the requested execution
2484 * priority is negative for the specified security state.
2485 * @opaque: the NVIC
2486 * @secure: the security state to test
2487 * This corresponds to the pseudocode IsReqExecPriNeg().
2488 */
2489#ifndef CONFIG_USER_ONLY
2490bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure);
2491#else
2492static inline bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
2493{
2494    return false;
2495}
2496#endif
2497
2498/* Interface for defining coprocessor registers.
2499 * Registers are defined in tables of arm_cp_reginfo structs
2500 * which are passed to define_arm_cp_regs().
2501 */
2502
2503/* When looking up a coprocessor register we look for it
2504 * via an integer which encodes all of:
2505 *  coprocessor number
2506 *  Crn, Crm, opc1, opc2 fields
2507 *  32 or 64 bit register (ie is it accessed via MRC/MCR
2508 *    or via MRRC/MCRR?)
2509 *  non-secure/secure bank (AArch32 only)
2510 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
2511 * (In this case crn and opc2 should be zero.)
2512 * For AArch64, there is no 32/64 bit size distinction;
2513 * instead all registers have a 2 bit op0, 3 bit op1 and op2,
2514 * and 4 bit CRn and CRm. The encoding patterns are chosen
2515 * to be easy to convert to and from the KVM encodings, and also
2516 * so that the hashtable can contain both AArch32 and AArch64
2517 * registers (to allow for interprocessing where we might run
2518 * 32 bit code on a 64 bit core).
2519 */
2520/* This bit is private to our hashtable cpreg; in KVM register
2521 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
2522 * in the upper bits of the 64 bit ID.
2523 */
2524#define CP_REG_AA64_SHIFT 28
2525#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
2526
2527/* To enable banking of coprocessor registers depending on ns-bit we
2528 * add a bit to distinguish between secure and non-secure cpregs in the
2529 * hashtable.
2530 */
2531#define CP_REG_NS_SHIFT 29
2532#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
2533
2534#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2)   \
2535    ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) |   \
2536     ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
2537
2538#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
2539    (CP_REG_AA64_MASK |                                 \
2540     ((cp) << CP_REG_ARM_COPROC_SHIFT) |                \
2541     ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) |         \
2542     ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) |         \
2543     ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) |         \
2544     ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) |         \
2545     ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
2546
2547/* Convert a full 64 bit KVM register ID to the truncated 32 bit
2548 * version used as a key for the coprocessor register hashtable
2549 */
2550static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
2551{
2552    uint32_t cpregid = kvmid;
2553    if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
2554        cpregid |= CP_REG_AA64_MASK;
2555    } else {
2556        if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
2557            cpregid |= (1 << 15);
2558        }
2559
2560        /* KVM is always non-secure so add the NS flag on AArch32 register
2561         * entries.
2562         */
2563         cpregid |= 1 << CP_REG_NS_SHIFT;
2564    }
2565    return cpregid;
2566}
2567
2568/* Convert a truncated 32 bit hashtable key into the full
2569 * 64 bit KVM register ID.
2570 */
2571static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
2572{
2573    uint64_t kvmid;
2574
2575    if (cpregid & CP_REG_AA64_MASK) {
2576        kvmid = cpregid & ~CP_REG_AA64_MASK;
2577        kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
2578    } else {
2579        kvmid = cpregid & ~(1 << 15);
2580        if (cpregid & (1 << 15)) {
2581            kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
2582        } else {
2583            kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
2584        }
2585    }
2586    return kvmid;
2587}
2588
2589/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
2590 * special-behaviour cp reg and bits [11..8] indicate what behaviour
2591 * it has. Otherwise it is a simple cp reg, where CONST indicates that
2592 * TCG can assume the value to be constant (ie load at translate time)
2593 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
2594 * indicates that the TB should not be ended after a write to this register
2595 * (the default is that the TB ends after cp writes). OVERRIDE permits
2596 * a register definition to override a previous definition for the
2597 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
2598 * old must have the OVERRIDE bit set.
2599 * ALIAS indicates that this register is an alias view of some underlying
2600 * state which is also visible via another register, and that the other
2601 * register is handling migration and reset; registers marked ALIAS will not be
2602 * migrated but may have their state set by syncing of register state from KVM.
2603 * NO_RAW indicates that this register has no underlying state and does not
2604 * support raw access for state saving/loading; it will not be used for either
2605 * migration or KVM state synchronization. (Typically this is for "registers"
2606 * which are actually used as instructions for cache maintenance and so on.)
2607 * IO indicates that this register does I/O and therefore its accesses
2608 * need to be marked with gen_io_start() and also end the TB. In particular,
2609 * registers which implement clocks or timers require this.
2610 * RAISES_EXC is for when the read or write hook might raise an exception;
2611 * the generated code will synchronize the CPU state before calling the hook
2612 * so that it is safe for the hook to call raise_exception().
2613 * NEWEL is for writes to registers that might change the exception
2614 * level - typically on older ARM chips. For those cases we need to
2615 * re-read the new el when recomputing the translation flags.
2616 */
2617#define ARM_CP_SPECIAL           0x0001
2618#define ARM_CP_CONST             0x0002
2619#define ARM_CP_64BIT             0x0004
2620#define ARM_CP_SUPPRESS_TB_END   0x0008
2621#define ARM_CP_OVERRIDE          0x0010
2622#define ARM_CP_ALIAS             0x0020
2623#define ARM_CP_IO                0x0040
2624#define ARM_CP_NO_RAW            0x0080
2625#define ARM_CP_NOP               (ARM_CP_SPECIAL | 0x0100)
2626#define ARM_CP_WFI               (ARM_CP_SPECIAL | 0x0200)
2627#define ARM_CP_NZCV              (ARM_CP_SPECIAL | 0x0300)
2628#define ARM_CP_CURRENTEL         (ARM_CP_SPECIAL | 0x0400)
2629#define ARM_CP_DC_ZVA            (ARM_CP_SPECIAL | 0x0500)
2630#define ARM_CP_DC_GVA            (ARM_CP_SPECIAL | 0x0600)
2631#define ARM_CP_DC_GZVA           (ARM_CP_SPECIAL | 0x0700)
2632#define ARM_LAST_SPECIAL         ARM_CP_DC_GZVA
2633#define ARM_CP_FPU               0x1000
2634#define ARM_CP_SVE               0x2000
2635#define ARM_CP_NO_GDB            0x4000
2636#define ARM_CP_RAISES_EXC        0x8000
2637#define ARM_CP_NEWEL             0x10000
2638/* Used only as a terminator for ARMCPRegInfo lists */
2639#define ARM_CP_SENTINEL          0xfffff
2640/* Mask of only the flag bits in a type field */
2641#define ARM_CP_FLAG_MASK         0x1f0ff
2642
2643/* Valid values for ARMCPRegInfo state field, indicating which of
2644 * the AArch32 and AArch64 execution states this register is visible in.
2645 * If the reginfo doesn't explicitly specify then it is AArch32 only.
2646 * If the reginfo is declared to be visible in both states then a second
2647 * reginfo is synthesised for the AArch32 view of the AArch64 register,
2648 * such that the AArch32 view is the lower 32 bits of the AArch64 one.
2649 * Note that we rely on the values of these enums as we iterate through
2650 * the various states in some places.
2651 */
2652enum {
2653    ARM_CP_STATE_AA32 = 0,
2654    ARM_CP_STATE_AA64 = 1,
2655    ARM_CP_STATE_BOTH = 2,
2656};
2657
2658/* ARM CP register secure state flags.  These flags identify security state
2659 * attributes for a given CP register entry.
2660 * The existence of both or neither secure and non-secure flags indicates that
2661 * the register has both a secure and non-secure hash entry.  A single one of
2662 * these flags causes the register to only be hashed for the specified
2663 * security state.
2664 * Although definitions may have any combination of the S/NS bits, each
2665 * registered entry will only have one to identify whether the entry is secure
2666 * or non-secure.
2667 */
2668enum {
2669    ARM_CP_SECSTATE_S =   (1 << 0), /* bit[0]: Secure state register */
2670    ARM_CP_SECSTATE_NS =  (1 << 1), /* bit[1]: Non-secure state register */
2671};
2672
2673/* Return true if cptype is a valid type field. This is used to try to
2674 * catch errors where the sentinel has been accidentally left off the end
2675 * of a list of registers.
2676 */
2677static inline bool cptype_valid(int cptype)
2678{
2679    return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
2680        || ((cptype & ARM_CP_SPECIAL) &&
2681            ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
2682}
2683
2684/* Access rights:
2685 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
2686 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
2687 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
2688 * (ie any of the privileged modes in Secure state, or Monitor mode).
2689 * If a register is accessible in one privilege level it's always accessible
2690 * in higher privilege levels too. Since "Secure PL1" also follows this rule
2691 * (ie anything visible in PL2 is visible in S-PL1, some things are only
2692 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
2693 * terminology a little and call this PL3.
2694 * In AArch64 things are somewhat simpler as the PLx bits line up exactly
2695 * with the ELx exception levels.
2696 *
2697 * If access permissions for a register are more complex than can be
2698 * described with these bits, then use a laxer set of restrictions, and
2699 * do the more restrictive/complex check inside a helper function.
2700 */
2701#define PL3_R 0x80
2702#define PL3_W 0x40
2703#define PL2_R (0x20 | PL3_R)
2704#define PL2_W (0x10 | PL3_W)
2705#define PL1_R (0x08 | PL2_R)
2706#define PL1_W (0x04 | PL2_W)
2707#define PL0_R (0x02 | PL1_R)
2708#define PL0_W (0x01 | PL1_W)
2709
2710/*
2711 * For user-mode some registers are accessible to EL0 via a kernel
2712 * trap-and-emulate ABI. In this case we define the read permissions
2713 * as actually being PL0_R. However some bits of any given register
2714 * may still be masked.
2715 */
2716#ifdef CONFIG_USER_ONLY
2717#define PL0U_R PL0_R
2718#else
2719#define PL0U_R PL1_R
2720#endif
2721
2722#define PL3_RW (PL3_R | PL3_W)
2723#define PL2_RW (PL2_R | PL2_W)
2724#define PL1_RW (PL1_R | PL1_W)
2725#define PL0_RW (PL0_R | PL0_W)
2726
2727/* Return the highest implemented Exception Level */
2728static inline int arm_highest_el(CPUARMState *env)
2729{
2730    if (arm_feature(env, ARM_FEATURE_EL3)) {
2731        return 3;
2732    }
2733    if (arm_feature(env, ARM_FEATURE_EL2)) {
2734        return 2;
2735    }
2736    return 1;
2737}
2738
2739/* Return true if a v7M CPU is in Handler mode */
2740static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
2741{
2742    return env->v7m.exception != 0;
2743}
2744
2745/* Return the current Exception Level (as per ARMv8; note that this differs
2746 * from the ARMv7 Privilege Level).
2747 */
2748static inline int arm_current_el(CPUARMState *env)
2749{
2750    if (arm_feature(env, ARM_FEATURE_M)) {
2751        return arm_v7m_is_handler_mode(env) ||
2752            !(env->v7m.control[env->v7m.secure] & 1);
2753    }
2754
2755    if (is_a64(env)) {
2756        return extract32(env->pstate, 2, 2);
2757    }
2758
2759    switch (env->uncached_cpsr & 0x1f) {
2760    case ARM_CPU_MODE_USR:
2761        return 0;
2762    case ARM_CPU_MODE_HYP:
2763        return 2;
2764    case ARM_CPU_MODE_MON:
2765        return 3;
2766    default:
2767        if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
2768            /* If EL3 is 32-bit then all secure privileged modes run in
2769             * EL3
2770             */
2771            return 3;
2772        }
2773
2774        return 1;
2775    }
2776}
2777
2778typedef struct ARMCPRegInfo ARMCPRegInfo;
2779
2780typedef enum CPAccessResult {
2781    /* Access is permitted */
2782    CP_ACCESS_OK = 0,
2783    /* Access fails due to a configurable trap or enable which would
2784     * result in a categorized exception syndrome giving information about
2785     * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
2786     * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
2787     * PL1 if in EL0, otherwise to the current EL).
2788     */
2789    CP_ACCESS_TRAP = 1,
2790    /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
2791     * Note that this is not a catch-all case -- the set of cases which may
2792     * result in this failure is specifically defined by the architecture.
2793     */
2794    CP_ACCESS_TRAP_UNCATEGORIZED = 2,
2795    /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
2796    CP_ACCESS_TRAP_EL2 = 3,
2797    CP_ACCESS_TRAP_EL3 = 4,
2798    /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
2799    CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
2800    CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
2801    /* Access fails and results in an exception syndrome for an FP access,
2802     * trapped directly to EL2 or EL3
2803     */
2804    CP_ACCESS_TRAP_FP_EL2 = 7,
2805    CP_ACCESS_TRAP_FP_EL3 = 8,
2806} CPAccessResult;
2807
2808/* Access functions for coprocessor registers. These cannot fail and
2809 * may not raise exceptions.
2810 */
2811typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
2812typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
2813                       uint64_t value);
2814/* Access permission check functions for coprocessor registers. */
2815typedef CPAccessResult CPAccessFn(CPUARMState *env,
2816                                  const ARMCPRegInfo *opaque,
2817                                  bool isread);
2818/* Hook function for register reset */
2819typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
2820
2821#define CP_ANY 0xff
2822
2823/* Definition of an ARM coprocessor register */
2824struct ARMCPRegInfo {
2825    /* Name of register (useful mainly for debugging, need not be unique) */
2826    const char *name;
2827    /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
2828     * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
2829     * 'wildcard' field -- any value of that field in the MRC/MCR insn
2830     * will be decoded to this register. The register read and write
2831     * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
2832     * used by the program, so it is possible to register a wildcard and
2833     * then behave differently on read/write if necessary.
2834     * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
2835     * must both be zero.
2836     * For AArch64-visible registers, opc0 is also used.
2837     * Since there are no "coprocessors" in AArch64, cp is purely used as a
2838     * way to distinguish (for KVM's benefit) guest-visible system registers
2839     * from demuxed ones provided to preserve the "no side effects on
2840     * KVM register read/write from QEMU" semantics. cp==0x13 is guest
2841     * visible (to match KVM's encoding); cp==0 will be converted to
2842     * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
2843     */
2844    uint8_t cp;
2845    uint8_t crn;
2846    uint8_t crm;
2847    uint8_t opc0;
2848    uint8_t opc1;
2849    uint8_t opc2;
2850    /* Execution state in which this register is visible: ARM_CP_STATE_* */
2851    int state;
2852    /* Register type: ARM_CP_* bits/values */
2853    int type;
2854    /* Access rights: PL*_[RW] */
2855    int access;
2856    /* Security state: ARM_CP_SECSTATE_* bits/values */
2857    int secure;
2858    /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
2859     * this register was defined: can be used to hand data through to the
2860     * register read/write functions, since they are passed the ARMCPRegInfo*.
2861     */
2862    void *opaque;
2863    /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
2864     * fieldoffset is non-zero, the reset value of the register.
2865     */
2866    uint64_t resetvalue;
2867    /* Offset of the field in CPUARMState for this register.
2868     *
2869     * This is not needed if either:
2870     *  1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
2871     *  2. both readfn and writefn are specified
2872     */
2873    ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
2874
2875    /* Offsets of the secure and non-secure fields in CPUARMState for the
2876     * register if it is banked.  These fields are only used during the static
2877     * registration of a register.  During hashing the bank associated
2878     * with a given security state is copied to fieldoffset which is used from
2879     * there on out.
2880     *
2881     * It is expected that register definitions use either fieldoffset or
2882     * bank_fieldoffsets in the definition but not both.  It is also expected
2883     * that both bank offsets are set when defining a banked register.  This
2884     * use indicates that a register is banked.
2885     */
2886    ptrdiff_t bank_fieldoffsets[2];
2887
2888    /* Function for making any access checks for this register in addition to
2889     * those specified by the 'access' permissions bits. If NULL, no extra
2890     * checks required. The access check is performed at runtime, not at
2891     * translate time.
2892     */
2893    CPAccessFn *accessfn;
2894    /* Function for handling reads of this register. If NULL, then reads
2895     * will be done by loading from the offset into CPUARMState specified
2896     * by fieldoffset.
2897     */
2898    CPReadFn *readfn;
2899    /* Function for handling writes of this register. If NULL, then writes
2900     * will be done by writing to the offset into CPUARMState specified
2901     * by fieldoffset.
2902     */
2903    CPWriteFn *writefn;
2904    /* Function for doing a "raw" read; used when we need to copy
2905     * coprocessor state to the kernel for KVM or out for
2906     * migration. This only needs to be provided if there is also a
2907     * readfn and it has side effects (for instance clear-on-read bits).
2908     */
2909    CPReadFn *raw_readfn;
2910    /* Function for doing a "raw" write; used when we need to copy KVM
2911     * kernel coprocessor state into userspace, or for inbound
2912     * migration. This only needs to be provided if there is also a
2913     * writefn and it masks out "unwritable" bits or has write-one-to-clear
2914     * or similar behaviour.
2915     */
2916    CPWriteFn *raw_writefn;
2917    /* Function for resetting the register. If NULL, then reset will be done
2918     * by writing resetvalue to the field specified in fieldoffset. If
2919     * fieldoffset is 0 then no reset will be done.
2920     */
2921    CPResetFn *resetfn;
2922
2923    /*
2924     * "Original" writefn and readfn.
2925     * For ARMv8.1-VHE register aliases, we overwrite the read/write
2926     * accessor functions of various EL1/EL0 to perform the runtime
2927     * check for which sysreg should actually be modified, and then
2928     * forwards the operation.  Before overwriting the accessors,
2929     * the original function is copied here, so that accesses that
2930     * really do go to the EL1/EL0 version proceed normally.
2931     * (The corresponding EL2 register is linked via opaque.)
2932     */
2933    CPReadFn *orig_readfn;
2934    CPWriteFn *orig_writefn;
2935};
2936
2937/* Macros which are lvalues for the field in CPUARMState for the
2938 * ARMCPRegInfo *ri.
2939 */
2940#define CPREG_FIELD32(env, ri) \
2941    (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
2942#define CPREG_FIELD64(env, ri) \
2943    (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
2944
2945#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
2946
2947void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
2948                                    const ARMCPRegInfo *regs, void *opaque);
2949void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
2950                                       const ARMCPRegInfo *regs, void *opaque);
2951static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
2952{
2953    define_arm_cp_regs_with_opaque(cpu, regs, 0);
2954}
2955static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
2956{
2957    define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
2958}
2959const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
2960
2961/*
2962 * Definition of an ARM co-processor register as viewed from
2963 * userspace. This is used for presenting sanitised versions of
2964 * registers to userspace when emulating the Linux AArch64 CPU
2965 * ID/feature ABI (advertised as HWCAP_CPUID).
2966 */
2967typedef struct ARMCPRegUserSpaceInfo {
2968    /* Name of register */
2969    const char *name;
2970
2971    /* Is the name actually a glob pattern */
2972    bool is_glob;
2973
2974    /* Only some bits are exported to user space */
2975    uint64_t exported_bits;
2976
2977    /* Fixed bits are applied after the mask */
2978    uint64_t fixed_bits;
2979} ARMCPRegUserSpaceInfo;
2980
2981#define REGUSERINFO_SENTINEL { .name = NULL }
2982
2983void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods);
2984
2985/* CPWriteFn that can be used to implement writes-ignored behaviour */
2986void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
2987                         uint64_t value);
2988/* CPReadFn that can be used for read-as-zero behaviour */
2989uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
2990
2991/* CPResetFn that does nothing, for use if no reset is required even
2992 * if fieldoffset is non zero.
2993 */
2994void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
2995
2996/* Return true if this reginfo struct's field in the cpu state struct
2997 * is 64 bits wide.
2998 */
2999static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
3000{
3001    return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
3002}
3003
3004static inline bool cp_access_ok(int current_el,
3005                                const ARMCPRegInfo *ri, int isread)
3006{
3007    return (ri->access >> ((current_el * 2) + isread)) & 1;
3008}
3009
3010/* Raw read of a coprocessor register (as needed for migration, etc) */
3011uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
3012
3013/**
3014 * write_list_to_cpustate
3015 * @cpu: ARMCPU
3016 *
3017 * For each register listed in the ARMCPU cpreg_indexes list, write
3018 * its value from the cpreg_values list into the ARMCPUState structure.
3019 * This updates TCG's working data structures from KVM data or
3020 * from incoming migration state.
3021 *
3022 * Returns: true if all register values were updated correctly,
3023 * false if some register was unknown or could not be written.
3024 * Note that we do not stop early on failure -- we will attempt
3025 * writing all registers in the list.
3026 */
3027bool write_list_to_cpustate(ARMCPU *cpu);
3028
3029/**
3030 * write_cpustate_to_list:
3031 * @cpu: ARMCPU
3032 * @kvm_sync: true if this is for syncing back to KVM
3033 *
3034 * For each register listed in the ARMCPU cpreg_indexes list, write
3035 * its value from the ARMCPUState structure into the cpreg_values list.
3036 * This is used to copy info from TCG's working data structures into
3037 * KVM or for outbound migration.
3038 *
3039 * @kvm_sync is true if we are doing this in order to sync the
3040 * register state back to KVM. In this case we will only update
3041 * values in the list if the previous list->cpustate sync actually
3042 * successfully wrote the CPU state. Otherwise we will keep the value
3043 * that is in the list.
3044 *
3045 * Returns: true if all register values were read correctly,
3046 * false if some register was unknown or could not be read.
3047 * Note that we do not stop early on failure -- we will attempt
3048 * reading all registers in the list.
3049 */
3050bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
3051
3052#define ARM_CPUID_TI915T      0x54029152
3053#define ARM_CPUID_TI925T      0x54029252
3054
3055#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
3056#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
3057#define CPU_RESOLVING_TYPE TYPE_ARM_CPU
3058
3059#define cpu_signal_handler cpu_arm_signal_handler
3060#define cpu_list arm_cpu_list
3061
3062/* ARM has the following "translation regimes" (as the ARM ARM calls them):
3063 *
3064 * If EL3 is 64-bit:
3065 *  + NonSecure EL1 & 0 stage 1
3066 *  + NonSecure EL1 & 0 stage 2
3067 *  + NonSecure EL2
3068 *  + NonSecure EL2 & 0   (ARMv8.1-VHE)
3069 *  + Secure EL1 & 0
3070 *  + Secure EL3
3071 * If EL3 is 32-bit:
3072 *  + NonSecure PL1 & 0 stage 1
3073 *  + NonSecure PL1 & 0 stage 2
3074 *  + NonSecure PL2
3075 *  + Secure PL0
3076 *  + Secure PL1
3077 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
3078 *
3079 * For QEMU, an mmu_idx is not quite the same as a translation regime because:
3080 *  1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
3081 *     because they may differ in access permissions even if the VA->PA map is
3082 *     the same
3083 *  2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
3084 *     translation, which means that we have one mmu_idx that deals with two
3085 *     concatenated translation regimes [this sort of combined s1+2 TLB is
3086 *     architecturally permitted]
3087 *  3. we don't need to allocate an mmu_idx to translations that we won't be
3088 *     handling via the TLB. The only way to do a stage 1 translation without
3089 *     the immediate stage 2 translation is via the ATS or AT system insns,
3090 *     which can be slow-pathed and always do a page table walk.
3091 *     The only use of stage 2 translations is either as part of an s1+2
3092 *     lookup or when loading the descriptors during a stage 1 page table walk,
3093 *     and in both those cases we don't use the TLB.
3094 *  4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
3095 *     translation regimes, because they map reasonably well to each other
3096 *     and they can't both be active at the same time.
3097 *  5. we want to be able to use the TLB for accesses done as part of a
3098 *     stage1 page table walk, rather than having to walk the stage2 page
3099 *     table over and over.
3100 *  6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
3101 *     Never (PAN) bit within PSTATE.
3102 *
3103 * This gives us the following list of cases:
3104 *
3105 * NS EL0 EL1&0 stage 1+2 (aka NS PL0)
3106 * NS EL1 EL1&0 stage 1+2 (aka NS PL1)
3107 * NS EL1 EL1&0 stage 1+2 +PAN
3108 * NS EL0 EL2&0
3109 * NS EL2 EL2&0
3110 * NS EL2 EL2&0 +PAN
3111 * NS EL2 (aka NS PL2)
3112 * S EL0 EL1&0 (aka S PL0)
3113 * S EL1 EL1&0 (not used if EL3 is 32 bit)
3114 * S EL1 EL1&0 +PAN
3115 * S EL3 (aka S PL1)
3116 *
3117 * for a total of 11 different mmu_idx.
3118 *
3119 * R profile CPUs have an MPU, but can use the same set of MMU indexes
3120 * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
3121 * NS EL2 if we ever model a Cortex-R52).
3122 *
3123 * M profile CPUs are rather different as they do not have a true MMU.
3124 * They have the following different MMU indexes:
3125 *  User
3126 *  Privileged
3127 *  User, execution priority negative (ie the MPU HFNMIENA bit may apply)
3128 *  Privileged, execution priority negative (ditto)
3129 * If the CPU supports the v8M Security Extension then there are also:
3130 *  Secure User
3131 *  Secure Privileged
3132 *  Secure User, execution priority negative
3133 *  Secure Privileged, execution priority negative
3134 *
3135 * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
3136 * are not quite the same -- different CPU types (most notably M profile
3137 * vs A/R profile) would like to use MMU indexes with different semantics,
3138 * but since we don't ever need to use all of those in a single CPU we
3139 * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
3140 * modes + total number of M profile MMU modes". The lower bits of
3141 * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
3142 * the same for any particular CPU.
3143 * Variables of type ARMMUIdx are always full values, and the core
3144 * index values are in variables of type 'int'.
3145 *
3146 * Our enumeration includes at the end some entries which are not "true"
3147 * mmu_idx values in that they don't have corresponding TLBs and are only
3148 * valid for doing slow path page table walks.
3149 *
3150 * The constant names here are patterned after the general style of the names
3151 * of the AT/ATS operations.
3152 * The values used are carefully arranged to make mmu_idx => EL lookup easy.
3153 * For M profile we arrange them to have a bit for priv, a bit for negpri
3154 * and a bit for secure.
3155 */
3156#define ARM_MMU_IDX_A     0x10  /* A profile */
3157#define ARM_MMU_IDX_NOTLB 0x20  /* does not have a TLB */
3158#define ARM_MMU_IDX_M     0x40  /* M profile */
3159
3160/* Meanings of the bits for A profile mmu idx values */
3161#define ARM_MMU_IDX_A_NS     0x8
3162
3163/* Meanings of the bits for M profile mmu idx values */
3164#define ARM_MMU_IDX_M_PRIV   0x1
3165#define ARM_MMU_IDX_M_NEGPRI 0x2
3166#define ARM_MMU_IDX_M_S      0x4  /* Secure */
3167
3168#define ARM_MMU_IDX_TYPE_MASK \
3169    (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
3170#define ARM_MMU_IDX_COREIDX_MASK 0xf
3171
3172typedef enum ARMMMUIdx {
3173    /*
3174     * A-profile.
3175     */
3176    ARMMMUIdx_SE10_0     =  0 | ARM_MMU_IDX_A,
3177    ARMMMUIdx_SE20_0     =  1 | ARM_MMU_IDX_A,
3178    ARMMMUIdx_SE10_1     =  2 | ARM_MMU_IDX_A,
3179    ARMMMUIdx_SE20_2     =  3 | ARM_MMU_IDX_A,
3180    ARMMMUIdx_SE10_1_PAN =  4 | ARM_MMU_IDX_A,
3181    ARMMMUIdx_SE20_2_PAN =  5 | ARM_MMU_IDX_A,
3182    ARMMMUIdx_SE2        =  6 | ARM_MMU_IDX_A,
3183    ARMMMUIdx_SE3        =  7 | ARM_MMU_IDX_A,
3184
3185    ARMMMUIdx_E10_0     = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS,
3186    ARMMMUIdx_E20_0     = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS,
3187    ARMMMUIdx_E10_1     = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS,
3188    ARMMMUIdx_E20_2     = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS,
3189    ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS,
3190    ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS,
3191    ARMMMUIdx_E2        = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS,
3192
3193    /*
3194     * These are not allocated TLBs and are used only for AT system
3195     * instructions or for the first stage of an S12 page table walk.
3196     */
3197    ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
3198    ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
3199    ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
3200    ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB,
3201    ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB,
3202    ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB,
3203    /*
3204     * Not allocated a TLB: used only for second stage of an S12 page
3205     * table walk, or for descriptor loads during first stage of an S1
3206     * page table walk. Note that if we ever want to have a TLB for this
3207     * then various TLB flush insns which currently are no-ops or flush
3208     * only stage 1 MMU indexes will need to change to flush stage 2.
3209     */
3210    ARMMMUIdx_Stage2     = 6 | ARM_MMU_IDX_NOTLB,
3211    ARMMMUIdx_Stage2_S   = 7 | ARM_MMU_IDX_NOTLB,
3212
3213    /*
3214     * M-profile.
3215     */
3216    ARMMMUIdx_MUser = ARM_MMU_IDX_M,
3217    ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
3218    ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
3219    ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
3220    ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
3221    ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
3222    ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
3223    ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
3224} ARMMMUIdx;
3225
3226/*
3227 * Bit macros for the core-mmu-index values for each index,
3228 * for use when calling tlb_flush_by_mmuidx() and friends.
3229 */
3230#define TO_CORE_BIT(NAME) \
3231    ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
3232
3233typedef enum ARMMMUIdxBit {
3234    TO_CORE_BIT(E10_0),
3235    TO_CORE_BIT(E20_0),
3236    TO_CORE_BIT(E10_1),
3237    TO_CORE_BIT(E10_1_PAN),
3238    TO_CORE_BIT(E2),
3239    TO_CORE_BIT(E20_2),
3240    TO_CORE_BIT(E20_2_PAN),
3241    TO_CORE_BIT(SE10_0),
3242    TO_CORE_BIT(SE20_0),
3243    TO_CORE_BIT(SE10_1),
3244    TO_CORE_BIT(SE20_2),
3245    TO_CORE_BIT(SE10_1_PAN),
3246    TO_CORE_BIT(SE20_2_PAN),
3247    TO_CORE_BIT(SE2),
3248    TO_CORE_BIT(SE3),
3249
3250    TO_CORE_BIT(MUser),
3251    TO_CORE_BIT(MPriv),
3252    TO_CORE_BIT(MUserNegPri),
3253    TO_CORE_BIT(MPrivNegPri),
3254    TO_CORE_BIT(MSUser),
3255    TO_CORE_BIT(MSPriv),
3256    TO_CORE_BIT(MSUserNegPri),
3257    TO_CORE_BIT(MSPrivNegPri),
3258} ARMMMUIdxBit;
3259
3260#undef TO_CORE_BIT
3261
3262#define MMU_USER_IDX 0
3263
3264/* Indexes used when registering address spaces with cpu_address_space_init */
3265typedef enum ARMASIdx {
3266    ARMASIdx_NS = 0,
3267    ARMASIdx_S = 1,
3268    ARMASIdx_TagNS = 2,
3269    ARMASIdx_TagS = 3,
3270} ARMASIdx;
3271
3272/* Return the Exception Level targeted by debug exceptions. */
3273static inline int arm_debug_target_el(CPUARMState *env)
3274{
3275    bool secure = arm_is_secure(env);
3276    bool route_to_el2 = false;
3277
3278    if (arm_is_el2_enabled(env)) {
3279        route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
3280                       env->cp15.mdcr_el2 & MDCR_TDE;
3281    }
3282
3283    if (route_to_el2) {
3284        return 2;
3285    } else if (arm_feature(env, ARM_FEATURE_EL3) &&
3286               !arm_el_is_aa64(env, 3) && secure) {
3287        return 3;
3288    } else {
3289        return 1;
3290    }
3291}
3292
3293static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
3294{
3295    /* If all the CLIDR.Ctypem bits are 0 there are no caches, and
3296     * CSSELR is RAZ/WI.
3297     */
3298    return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
3299}
3300
3301/* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */
3302static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
3303{
3304    int cur_el = arm_current_el(env);
3305    int debug_el;
3306
3307    if (cur_el == 3) {
3308        return false;
3309    }
3310
3311    /* MDCR_EL3.SDD disables debug events from Secure state */
3312    if (arm_is_secure_below_el3(env)
3313        && extract32(env->cp15.mdcr_el3, 16, 1)) {
3314        return false;
3315    }
3316
3317    /*
3318     * Same EL to same EL debug exceptions need MDSCR_KDE enabled
3319     * while not masking the (D)ebug bit in DAIF.
3320     */
3321    debug_el = arm_debug_target_el(env);
3322
3323    if (cur_el == debug_el) {
3324        return extract32(env->cp15.mdscr_el1, 13, 1)
3325            && !(env->daif & PSTATE_D);
3326    }
3327
3328    /* Otherwise the debug target needs to be a higher EL */
3329    return debug_el > cur_el;
3330}
3331
3332static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
3333{
3334    int el = arm_current_el(env);
3335
3336    if (el == 0 && arm_el_is_aa64(env, 1)) {
3337        return aa64_generate_debug_exceptions(env);
3338    }
3339
3340    if (arm_is_secure(env)) {
3341        int spd;
3342
3343        if (el == 0 && (env->cp15.sder & 1)) {
3344            /* SDER.SUIDEN means debug exceptions from Secure EL0
3345             * are always enabled. Otherwise they are controlled by
3346             * SDCR.SPD like those from other Secure ELs.
3347             */
3348            return true;
3349        }
3350
3351        spd = extract32(env->cp15.mdcr_el3, 14, 2);
3352        switch (spd) {
3353        case 1:
3354            /* SPD == 0b01 is reserved, but behaves as 0b00. */
3355        case 0:
3356            /* For 0b00 we return true if external secure invasive debug
3357             * is enabled. On real hardware this is controlled by external
3358             * signals to the core. QEMU always permits debug, and behaves
3359             * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
3360             */
3361            return true;
3362        case 2:
3363            return false;
3364        case 3:
3365            return true;
3366        }
3367    }
3368
3369    return el != 2;
3370}
3371
3372/* Return true if debugging exceptions are currently enabled.
3373 * This corresponds to what in ARM ARM pseudocode would be
3374 *    if UsingAArch32() then
3375 *        return AArch32.GenerateDebugExceptions()
3376 *    else
3377 *        return AArch64.GenerateDebugExceptions()
3378 * We choose to push the if() down into this function for clarity,
3379 * since the pseudocode has it at all callsites except for the one in
3380 * CheckSoftwareStep(), where it is elided because both branches would
3381 * always return the same value.
3382 */
3383static inline bool arm_generate_debug_exceptions(CPUARMState *env)
3384{
3385    if (env->aarch64) {
3386        return aa64_generate_debug_exceptions(env);
3387    } else {
3388        return aa32_generate_debug_exceptions(env);
3389    }
3390}
3391
3392/* Is single-stepping active? (Note that the "is EL_D AArch64?" check
3393 * implicitly means this always returns false in pre-v8 CPUs.)
3394 */
3395static inline bool arm_singlestep_active(CPUARMState *env)
3396{
3397    return extract32(env->cp15.mdscr_el1, 0, 1)
3398        && arm_el_is_aa64(env, arm_debug_target_el(env))
3399        && arm_generate_debug_exceptions(env);
3400}
3401
3402static inline bool arm_sctlr_b(CPUARMState *env)
3403{
3404    return
3405        /* We need not implement SCTLR.ITD in user-mode emulation, so
3406         * let linux-user ignore the fact that it conflicts with SCTLR_B.
3407         * This lets people run BE32 binaries with "-cpu any".
3408         */
3409#ifndef CONFIG_USER_ONLY
3410        !arm_feature(env, ARM_FEATURE_V7) &&
3411#endif
3412        (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
3413}
3414
3415uint64_t arm_sctlr(CPUARMState *env, int el);
3416
3417static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
3418                                                  bool sctlr_b)
3419{
3420#ifdef CONFIG_USER_ONLY
3421    /*
3422     * In system mode, BE32 is modelled in line with the
3423     * architecture (as word-invariant big-endianness), where loads
3424     * and stores are done little endian but from addresses which
3425     * are adjusted by XORing with the appropriate constant. So the
3426     * endianness to use for the raw data access is not affected by
3427     * SCTLR.B.
3428     * In user mode, however, we model BE32 as byte-invariant
3429     * big-endianness (because user-only code cannot tell the
3430     * difference), and so we need to use a data access endianness
3431     * that depends on SCTLR.B.
3432     */
3433    if (sctlr_b) {
3434        return true;
3435    }
3436#endif
3437    /* In 32bit endianness is determined by looking at CPSR's E bit */
3438    return env->uncached_cpsr & CPSR_E;
3439}
3440
3441static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
3442{
3443    return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
3444}
3445
3446/* Return true if the processor is in big-endian mode. */
3447static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
3448{
3449    if (!is_a64(env)) {
3450        return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
3451    } else {
3452        int cur_el = arm_current_el(env);
3453        uint64_t sctlr = arm_sctlr(env, cur_el);
3454        return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
3455    }
3456}
3457
3458typedef CPUARMState CPUArchState;
3459typedef ARMCPU ArchCPU;
3460
3461#include "exec/cpu-all.h"
3462
3463/*
3464 * We have more than 32-bits worth of state per TB, so we split the data
3465 * between tb->flags and tb->cs_base, which is otherwise unused for ARM.
3466 * We collect these two parts in CPUARMTBFlags where they are named
3467 * flags and flags2 respectively.
3468 *
3469 * The flags that are shared between all execution modes, TBFLAG_ANY,
3470 * are stored in flags.  The flags that are specific to a given mode
3471 * are stores in flags2.  Since cs_base is sized on the configured
3472 * address size, flags2 always has 64-bits for A64, and a minimum of
3473 * 32-bits for A32 and M32.
3474 *
3475 * The bits for 32-bit A-profile and M-profile partially overlap:
3476 *
3477 *  31         23         11 10             0
3478 * +-------------+----------+----------------+
3479 * |             |          |   TBFLAG_A32   |
3480 * | TBFLAG_AM32 |          +-----+----------+
3481 * |             |                |TBFLAG_M32|
3482 * +-------------+----------------+----------+
3483 *  31         23                5 4        0
3484 *
3485 * Unless otherwise noted, these bits are cached in env->hflags.
3486 */
3487FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1)
3488FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1)
3489FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1)      /* Not cached. */
3490FIELD(TBFLAG_ANY, BE_DATA, 3, 1)
3491FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
3492/* Target EL if we take a floating-point-disabled exception */
3493FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
3494/* For A-profile only, target EL for debug exceptions.  */
3495FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
3496/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
3497FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
3498
3499/*
3500 * Bit usage when in AArch32 state, both A- and M-profile.
3501 */
3502FIELD(TBFLAG_AM32, CONDEXEC, 24, 8)      /* Not cached. */
3503FIELD(TBFLAG_AM32, THUMB, 23, 1)         /* Not cached. */
3504
3505/*
3506 * Bit usage when in AArch32 state, for A-profile only.
3507 */
3508FIELD(TBFLAG_A32, VECLEN, 0, 3)         /* Not cached. */
3509FIELD(TBFLAG_A32, VECSTRIDE, 3, 2)     /* Not cached. */
3510/*
3511 * We store the bottom two bits of the CPAR as TB flags and handle
3512 * checks on the other bits at runtime. This shares the same bits as
3513 * VECSTRIDE, which is OK as no XScale CPU has VFP.
3514 * Not cached, because VECLEN+VECSTRIDE are not cached.
3515 */
3516FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
3517FIELD(TBFLAG_A32, VFPEN, 7, 1)         /* Partially cached, minus FPEXC. */
3518FIELD(TBFLAG_A32, SCTLR__B, 8, 1)      /* Cannot overlap with SCTLR_B */
3519FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
3520/*
3521 * Indicates whether cp register reads and writes by guest code should access
3522 * the secure or nonsecure bank of banked registers; note that this is not
3523 * the same thing as the current security state of the processor!
3524 */
3525FIELD(TBFLAG_A32, NS, 10, 1)
3526
3527/*
3528 * Bit usage when in AArch32 state, for M-profile only.
3529 */
3530/* Handler (ie not Thread) mode */
3531FIELD(TBFLAG_M32, HANDLER, 0, 1)
3532/* Whether we should generate stack-limit checks */
3533FIELD(TBFLAG_M32, STACKCHECK, 1, 1)
3534/* Set if FPCCR.LSPACT is set */
3535FIELD(TBFLAG_M32, LSPACT, 2, 1)                 /* Not cached. */
3536/* Set if we must create a new FP context */
3537FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1)     /* Not cached. */
3538/* Set if FPCCR.S does not match current security state */
3539FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1)          /* Not cached. */
3540
3541/*
3542 * Bit usage when in AArch64 state
3543 */
3544FIELD(TBFLAG_A64, TBII, 0, 2)
3545FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
3546FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
3547FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
3548FIELD(TBFLAG_A64, BT, 9, 1)
3549FIELD(TBFLAG_A64, BTYPE, 10, 2)         /* Not cached. */
3550FIELD(TBFLAG_A64, TBID, 12, 2)
3551FIELD(TBFLAG_A64, UNPRIV, 14, 1)
3552FIELD(TBFLAG_A64, ATA, 15, 1)
3553FIELD(TBFLAG_A64, TCMA, 16, 2)
3554FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
3555FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
3556
3557/*
3558 * Helpers for using the above.
3559 */
3560#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
3561    (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
3562#define DP_TBFLAG_A64(DST, WHICH, VAL) \
3563    (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL))
3564#define DP_TBFLAG_A32(DST, WHICH, VAL) \
3565    (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL))
3566#define DP_TBFLAG_M32(DST, WHICH, VAL) \
3567    (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL))
3568#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
3569    (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL))
3570
3571#define EX_TBFLAG_ANY(IN, WHICH)   FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
3572#define EX_TBFLAG_A64(IN, WHICH)   FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH)
3573#define EX_TBFLAG_A32(IN, WHICH)   FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH)
3574#define EX_TBFLAG_M32(IN, WHICH)   FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
3575#define EX_TBFLAG_AM32(IN, WHICH)  FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
3576
3577/**
3578 * cpu_mmu_index:
3579 * @env: The cpu environment
3580 * @ifetch: True for code access, false for data access.
3581 *
3582 * Return the core mmu index for the current translation regime.
3583 * This function is used by generic TCG code paths.
3584 */
3585static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
3586{
3587    return EX_TBFLAG_ANY(env->hflags, MMUIDX);
3588}
3589
3590static inline bool bswap_code(bool sctlr_b)
3591{
3592#ifdef CONFIG_USER_ONLY
3593    /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
3594     * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
3595     * would also end up as a mixed-endian mode with BE code, LE data.
3596     */
3597    return
3598#ifdef TARGET_WORDS_BIGENDIAN
3599        1 ^
3600#endif
3601        sctlr_b;
3602#else
3603    /* All code access in ARM is little endian, and there are no loaders
3604     * doing swaps that need to be reversed
3605     */
3606    return 0;
3607#endif
3608}
3609
3610#ifdef CONFIG_USER_ONLY
3611static inline bool arm_cpu_bswap_data(CPUARMState *env)
3612{
3613    return
3614#ifdef TARGET_WORDS_BIGENDIAN
3615       1 ^
3616#endif
3617       arm_cpu_data_is_big_endian(env);
3618}
3619#endif
3620
3621void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
3622                          target_ulong *cs_base, uint32_t *flags);
3623
3624enum {
3625    QEMU_PSCI_CONDUIT_DISABLED = 0,
3626    QEMU_PSCI_CONDUIT_SMC = 1,
3627    QEMU_PSCI_CONDUIT_HVC = 2,
3628};
3629
3630#ifndef CONFIG_USER_ONLY
3631/* Return the address space index to use for a memory access */
3632static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
3633{
3634    return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
3635}
3636
3637/* Return the AddressSpace to use for a memory access
3638 * (which depends on whether the access is S or NS, and whether
3639 * the board gave us a separate AddressSpace for S accesses).
3640 */
3641static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
3642{
3643    return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
3644}
3645#endif
3646
3647/**
3648 * arm_register_pre_el_change_hook:
3649 * Register a hook function which will be called immediately before this
3650 * CPU changes exception level or mode. The hook function will be
3651 * passed a pointer to the ARMCPU and the opaque data pointer passed
3652 * to this function when the hook was registered.
3653 *
3654 * Note that if a pre-change hook is called, any registered post-change hooks
3655 * are guaranteed to subsequently be called.
3656 */
3657void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
3658                                 void *opaque);
3659/**
3660 * arm_register_el_change_hook:
3661 * Register a hook function which will be called immediately after this
3662 * CPU changes exception level or mode. The hook function will be
3663 * passed a pointer to the ARMCPU and the opaque data pointer passed
3664 * to this function when the hook was registered.
3665 *
3666 * Note that any registered hooks registered here are guaranteed to be called
3667 * if pre-change hooks have been.
3668 */
3669void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void
3670        *opaque);
3671
3672/**
3673 * arm_rebuild_hflags:
3674 * Rebuild the cached TBFLAGS for arbitrary changed processor state.
3675 */
3676void arm_rebuild_hflags(CPUARMState *env);
3677
3678/**
3679 * aa32_vfp_dreg:
3680 * Return a pointer to the Dn register within env in 32-bit mode.
3681 */
3682static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
3683{
3684    return &env->vfp.zregs[regno >> 1].d[regno & 1];
3685}
3686
3687/**
3688 * aa32_vfp_qreg:
3689 * Return a pointer to the Qn register within env in 32-bit mode.
3690 */
3691static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
3692{
3693    return &env->vfp.zregs[regno].d[0];
3694}
3695
3696/**
3697 * aa64_vfp_qreg:
3698 * Return a pointer to the Qn register within env in 64-bit mode.
3699 */
3700static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
3701{
3702    return &env->vfp.zregs[regno].d[0];
3703}
3704
3705/* Shared between translate-sve.c and sve_helper.c.  */
3706extern const uint64_t pred_esz_masks[4];
3707
3708/* Helper for the macros below, validating the argument type. */
3709static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
3710{
3711    return x;
3712}
3713
3714/*
3715 * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
3716 * Using these should be a bit more self-documenting than using the
3717 * generic target bits directly.
3718 */
3719#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
3720#define arm_tlb_mte_tagged(x) (typecheck_memtxattrs(x)->target_tlb_bit1)
3721
3722/*
3723 * AArch64 usage of the PAGE_TARGET_* bits for linux-user.
3724 */
3725#define PAGE_BTI  PAGE_TARGET_1
3726#define PAGE_MTE  PAGE_TARGET_2
3727
3728#ifdef TARGET_TAGGED_ADDRESSES
3729/**
3730 * cpu_untagged_addr:
3731 * @cs: CPU context
3732 * @x: tagged address
3733 *
3734 * Remove any address tag from @x.  This is explicitly related to the
3735 * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
3736 *
3737 * There should be a better place to put this, but we need this in
3738 * include/exec/cpu_ldst.h, and not some place linux-user specific.
3739 */
3740static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
3741{
3742    ARMCPU *cpu = ARM_CPU(cs);
3743    if (cpu->env.tagged_addr_enable) {
3744        /*
3745         * TBI is enabled for userspace but not kernelspace addresses.
3746         * Only clear the tag if bit 55 is clear.
3747         */
3748        x &= sextract64(x, 0, 56);
3749    }
3750    return x;
3751}
3752#endif
3753
3754/*
3755 * Naming convention for isar_feature functions:
3756 * Functions which test 32-bit ID registers should have _aa32_ in
3757 * their name. Functions which test 64-bit ID registers should have
3758 * _aa64_ in their name. These must only be used in code where we
3759 * know for certain that the CPU has AArch32 or AArch64 respectively
3760 * or where the correct answer for a CPU which doesn't implement that
3761 * CPU state is "false" (eg when generating A32 or A64 code, if adding
3762 * system registers that are specific to that CPU state, for "should
3763 * we let this system register bit be set" tests where the 32-bit
3764 * flavour of the register doesn't have the bit, and so on).
3765 * Functions which simply ask "does this feature exist at all" have
3766 * _any_ in their name, and always return the logical OR of the _aa64_
3767 * and the _aa32_ function.
3768 */
3769
3770/*
3771 * 32-bit feature tests via id registers.
3772 */
3773static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id)
3774{
3775    return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
3776}
3777
3778static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id)
3779{
3780    return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
3781}
3782
3783static inline bool isar_feature_aa32_lob(const ARMISARegisters *id)
3784{
3785    /* (M-profile) low-overhead loops and branch future */
3786    return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3;
3787}
3788
3789static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id)
3790{
3791    return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
3792}
3793
3794static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
3795{
3796    return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
3797}
3798
3799static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
3800{
3801    return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
3802}
3803
3804static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
3805{
3806    return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
3807}
3808
3809static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
3810{
3811    return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
3812}
3813
3814static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
3815{
3816    return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
3817}
3818
3819static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
3820{
3821    return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
3822}
3823
3824static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
3825{
3826    return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
3827}
3828
3829static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id)
3830{
3831    return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0;
3832}
3833
3834static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
3835{
3836    return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
3837}
3838
3839static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id)
3840{
3841    return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0;
3842}
3843
3844static inline bool isar_feature_aa32_sb(const ARMISARegisters *id)
3845{
3846    return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0;
3847}
3848
3849static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
3850{
3851    return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
3852}
3853
3854static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id)
3855{
3856    return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0;
3857}
3858
3859static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id)
3860{
3861    return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0;
3862}
3863
3864static inline bool isar_feature_aa32_ras(const ARMISARegisters *id)
3865{
3866    return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0;
3867}
3868
3869static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
3870{
3871    return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
3872}
3873
3874static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
3875{
3876    /*
3877     * Return true if M-profile state handling insns
3878     * (VSCCLRM, CLRM, FPCTX access insns) are implemented
3879     */
3880    return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3;
3881}
3882
3883static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
3884{
3885    /* Sadly this is encoded differently for A-profile and M-profile */
3886    if (isar_feature_aa32_mprofile(id)) {
3887        return FIELD_EX32(id->mvfr1, MVFR1, FP16) > 0;
3888    } else {
3889        return FIELD_EX32(id->mvfr1, MVFR1, FPHP) >= 3;
3890    }
3891}
3892
3893static inline bool isar_feature_aa32_mve(const ARMISARegisters *id)
3894{
3895    /*
3896     * Return true if MVE is supported (either integer or floating point).
3897     * We must check for M-profile as the MVFR1 field means something
3898     * else for A-profile.
3899     */
3900    return isar_feature_aa32_mprofile(id) &&
3901        FIELD_EX32(id->mvfr1, MVFR1, MVE) > 0;
3902}
3903
3904static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id)
3905{
3906    /*
3907     * Return true if MVE is supported (either integer or floating point).
3908     * We must check for M-profile as the MVFR1 field means something
3909     * else for A-profile.
3910     */
3911    return isar_feature_aa32_mprofile(id) &&
3912        FIELD_EX32(id->mvfr1, MVFR1, MVE) >= 2;
3913}
3914
3915static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id)
3916{
3917    /*
3918     * Return true if either VFP or SIMD is implemented.
3919     * In this case, a minimum of VFP w/ D0-D15.
3920     */
3921    return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0;
3922}
3923
3924static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id)
3925{
3926    /* Return true if D16-D31 are implemented */
3927    return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2;
3928}
3929
3930static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
3931{
3932    return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0;
3933}
3934
3935static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id)
3936{
3937    /* Return true if CPU supports single precision floating point, VFPv2 */
3938    return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0;
3939}
3940
3941static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id)
3942{
3943    /* Return true if CPU supports single precision floating point, VFPv3 */
3944    return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2;
3945}
3946
3947static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id)
3948{
3949    /* Return true if CPU supports double precision floating point, VFPv2 */
3950    return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0;
3951}
3952
3953static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id)
3954{
3955    /* Return true if CPU supports double precision floating point, VFPv3 */
3956    return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2;
3957}
3958
3959static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id)
3960{
3961    return isar_feature_aa32_fpsp_v2(id) || isar_feature_aa32_fpdp_v2(id);
3962}
3963
3964/*
3965 * We always set the FP and SIMD FP16 fields to indicate identical
3966 * levels of support (assuming SIMD is implemented at all), so
3967 * we only need one set of accessors.
3968 */
3969static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id)
3970{
3971    return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0;
3972}
3973
3974static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id)
3975{
3976    return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1;
3977}
3978
3979/*
3980 * Note that this ID register field covers both VFP and Neon FMAC,
3981 * so should usually be tested in combination with some other
3982 * check that confirms the presence of whichever of VFP or Neon is
3983 * relevant, to avoid accidentally enabling a Neon feature on
3984 * a VFP-no-Neon core or vice-versa.
3985 */
3986static inline bool isar_feature_aa32_simdfmac(const ARMISARegisters *id)
3987{
3988    return FIELD_EX32(id->mvfr1, MVFR1, SIMDFMAC) != 0;
3989}
3990
3991static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id)
3992{
3993    return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1;
3994}
3995
3996static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id)
3997{
3998    return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2;
3999}
4000
4001static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id)
4002{
4003    return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3;
4004}
4005
4006static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
4007{
4008    return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4;
4009}
4010
4011static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id)
4012{
4013    return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4;
4014}
4015
4016static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
4017{
4018    return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0;
4019}
4020
4021static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
4022{
4023    return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2;
4024}
4025
4026static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id)
4027{
4028    /* 0xf means "non-standard IMPDEF PMU" */
4029    return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
4030        FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
4031}
4032
4033static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id)
4034{
4035    /* 0xf means "non-standard IMPDEF PMU" */
4036    return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 &&
4037        FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
4038}
4039
4040static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
4041{
4042    return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
4043}
4044
4045static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id)
4046{
4047    return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0;
4048}
4049
4050static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id)
4051{
4052    return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0;
4053}
4054
4055static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
4056{
4057    return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
4058}
4059
4060static inline bool isar_feature_aa32_dit(const ARMISARegisters *id)
4061{
4062    return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0;
4063}
4064
4065static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id)
4066{
4067    return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0;
4068}
4069
4070/*
4071 * 64-bit feature tests via id registers.
4072 */
4073static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
4074{
4075    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
4076}
4077
4078static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
4079{
4080    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
4081}
4082
4083static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
4084{
4085    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
4086}
4087
4088static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
4089{
4090    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
4091}
4092
4093static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
4094{
4095    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
4096}
4097
4098static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
4099{
4100    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
4101}
4102
4103static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
4104{
4105    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
4106}
4107
4108static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
4109{
4110    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
4111}
4112
4113static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
4114{
4115    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
4116}
4117
4118static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
4119{
4120    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
4121}
4122
4123static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
4124{
4125    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
4126}
4127
4128static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
4129{
4130    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
4131}
4132
4133static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id)
4134{
4135    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0;
4136}
4137
4138static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id)
4139{
4140    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0;
4141}
4142
4143static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id)
4144{
4145    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2;
4146}
4147
4148static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id)
4149{
4150    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0;
4151}
4152
4153static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id)
4154{
4155    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0;
4156}
4157
4158static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
4159{
4160    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
4161}
4162
4163static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
4164{
4165    /*
4166     * Return true if any form of pauth is enabled, as this
4167     * predicate controls migration of the 128-bit keys.
4168     */
4169    return (id->id_aa64isar1 &
4170            (FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
4171             FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) |
4172             FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) |
4173             FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
4174}
4175
4176static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
4177{
4178    /*
4179     * Return true if pauth is enabled with the architected QARMA algorithm.
4180     * QEMU will always set APA+GPA to the same value.
4181     */
4182    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
4183}
4184
4185static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
4186{
4187    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
4188}
4189
4190static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id)
4191{
4192    return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0;
4193}
4194
4195static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
4196{
4197    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
4198}
4199
4200static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id)
4201{
4202    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0;
4203}
4204
4205static inline bool isar_feature_aa64_frint(const ARMISARegisters *id)
4206{
4207    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0;
4208}
4209
4210static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id)
4211{
4212    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0;
4213}
4214
4215static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id)
4216{
4217    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2;
4218}
4219
4220static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id)
4221{
4222    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0;
4223}
4224
4225static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id)
4226{
4227    /* We always set the AdvSIMD and FP fields identically.  */
4228    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf;
4229}
4230
4231static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
4232{
4233    /* We always set the AdvSIMD and FP fields identically wrt FP16.  */
4234    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
4235}
4236
4237static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id)
4238{
4239    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2;
4240}
4241
4242static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id)
4243{
4244    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2;
4245}
4246
4247static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
4248{
4249    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
4250}
4251
4252static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id)
4253{
4254    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0;
4255}
4256
4257static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
4258{
4259    return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
4260}
4261
4262static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
4263{
4264    return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
4265}
4266
4267static inline bool isar_feature_aa64_pan(const ARMISARegisters *id)
4268{
4269    return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0;
4270}
4271
4272static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
4273{
4274    return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
4275}
4276
4277static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
4278{
4279    return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
4280}
4281
4282static inline bool isar_feature_aa64_st(const ARMISARegisters *id)
4283{
4284    return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0;
4285}
4286
4287static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
4288{
4289    return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
4290}
4291
4292static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id)
4293{
4294    return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0;
4295}
4296
4297static inline bool isar_feature_aa64_mte(const ARMISARegisters *id)
4298{
4299    return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2;
4300}
4301
4302static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id)
4303{
4304    return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 &&
4305        FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
4306}
4307
4308static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id)
4309{
4310    return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 &&
4311        FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
4312}
4313
4314static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
4315{
4316    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
4317}
4318
4319static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id)
4320{
4321    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2;
4322}
4323
4324static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
4325{
4326    return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
4327}
4328
4329static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
4330{
4331    return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
4332}
4333
4334static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
4335{
4336    return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
4337}
4338
4339static inline bool isar_feature_aa64_dit(const ARMISARegisters *id)
4340{
4341    return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0;
4342}
4343
4344static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
4345{
4346    return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
4347}
4348
4349static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
4350{
4351    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
4352}
4353
4354static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id)
4355{
4356    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0;
4357}
4358
4359static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id)
4360{
4361    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2;
4362}
4363
4364static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
4365{
4366    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
4367}
4368
4369static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id)
4370{
4371    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0;
4372}
4373
4374static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
4375{
4376    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
4377}
4378
4379static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id)
4380{
4381    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0;
4382}
4383
4384static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id)
4385{
4386    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
4387}
4388
4389static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id)
4390{
4391    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
4392}
4393
4394static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id)
4395{
4396    return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0;
4397}
4398
4399/*
4400 * Feature tests for "does this exist in either 32-bit or 64-bit?"
4401 */
4402static inline bool isar_feature_any_fp16(const ARMISARegisters *id)
4403{
4404    return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id);
4405}
4406
4407static inline bool isar_feature_any_predinv(const ARMISARegisters *id)
4408{
4409    return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id);
4410}
4411
4412static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id)
4413{
4414    return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id);
4415}
4416
4417static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id)
4418{
4419    return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id);
4420}
4421
4422static inline bool isar_feature_any_ccidx(const ARMISARegisters *id)
4423{
4424    return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id);
4425}
4426
4427static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id)
4428{
4429    return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id);
4430}
4431
4432/*
4433 * Forward to the above feature tests given an ARMCPU pointer.
4434 */
4435#define cpu_isar_feature(name, cpu) \
4436    ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); })
4437
4438#endif
4439