qemu/target/arm/internals.h
<<
>>
Prefs
   1/*
   2 * QEMU ARM CPU -- internal functions and types
   3 *
   4 * Copyright (c) 2014 Linaro Ltd
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see
  18 * <http://www.gnu.org/licenses/gpl-2.0.html>
  19 *
  20 * This header defines functions, types, etc which need to be shared
  21 * between different source files within target/arm/ but which are
  22 * private to it and not required by the rest of QEMU.
  23 */
  24
  25#ifndef TARGET_ARM_INTERNALS_H
  26#define TARGET_ARM_INTERNALS_H
  27
  28#include "hw/registerfields.h"
  29#include "tcg/tcg-gvec-desc.h"
  30#include "syndrome.h"
  31
  32/* register banks for CPU modes */
  33#define BANK_USRSYS 0
  34#define BANK_SVC    1
  35#define BANK_ABT    2
  36#define BANK_UND    3
  37#define BANK_IRQ    4
  38#define BANK_FIQ    5
  39#define BANK_HYP    6
  40#define BANK_MON    7
  41
  42static inline bool excp_is_internal(int excp)
  43{
  44    /* Return true if this exception number represents a QEMU-internal
  45     * exception that will not be passed to the guest.
  46     */
  47    return excp == EXCP_INTERRUPT
  48        || excp == EXCP_HLT
  49        || excp == EXCP_DEBUG
  50        || excp == EXCP_HALTED
  51        || excp == EXCP_EXCEPTION_EXIT
  52        || excp == EXCP_KERNEL_TRAP
  53        || excp == EXCP_SEMIHOST;
  54}
  55
  56/* Scale factor for generic timers, ie number of ns per tick.
  57 * This gives a 62.5MHz timer.
  58 */
  59#define GTIMER_SCALE 16
  60
  61/* Bit definitions for the v7M CONTROL register */
  62FIELD(V7M_CONTROL, NPRIV, 0, 1)
  63FIELD(V7M_CONTROL, SPSEL, 1, 1)
  64FIELD(V7M_CONTROL, FPCA, 2, 1)
  65FIELD(V7M_CONTROL, SFPA, 3, 1)
  66
  67/* Bit definitions for v7M exception return payload */
  68FIELD(V7M_EXCRET, ES, 0, 1)
  69FIELD(V7M_EXCRET, RES0, 1, 1)
  70FIELD(V7M_EXCRET, SPSEL, 2, 1)
  71FIELD(V7M_EXCRET, MODE, 3, 1)
  72FIELD(V7M_EXCRET, FTYPE, 4, 1)
  73FIELD(V7M_EXCRET, DCRS, 5, 1)
  74FIELD(V7M_EXCRET, S, 6, 1)
  75FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
  76
  77/* Minimum value which is a magic number for exception return */
  78#define EXC_RETURN_MIN_MAGIC 0xff000000
  79/* Minimum number which is a magic number for function or exception return
  80 * when using v8M security extension
  81 */
  82#define FNC_RETURN_MIN_MAGIC 0xfefffffe
  83
  84/* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
  85FIELD(DBGWCR, E, 0, 1)
  86FIELD(DBGWCR, PAC, 1, 2)
  87FIELD(DBGWCR, LSC, 3, 2)
  88FIELD(DBGWCR, BAS, 5, 8)
  89FIELD(DBGWCR, HMC, 13, 1)
  90FIELD(DBGWCR, SSC, 14, 2)
  91FIELD(DBGWCR, LBN, 16, 4)
  92FIELD(DBGWCR, WT, 20, 1)
  93FIELD(DBGWCR, MASK, 24, 5)
  94FIELD(DBGWCR, SSCE, 29, 1)
  95
  96/* We use a few fake FSR values for internal purposes in M profile.
  97 * M profile cores don't have A/R format FSRs, but currently our
  98 * get_phys_addr() code assumes A/R profile and reports failures via
  99 * an A/R format FSR value. We then translate that into the proper
 100 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
 101 * Mostly the FSR values we use for this are those defined for v7PMSA,
 102 * since we share some of that codepath. A few kinds of fault are
 103 * only for M profile and have no A/R equivalent, though, so we have
 104 * to pick a value from the reserved range (which we never otherwise
 105 * generate) to use for these.
 106 * These values will never be visible to the guest.
 107 */
 108#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
 109#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
 110
 111/**
 112 * raise_exception: Raise the specified exception.
 113 * Raise a guest exception with the specified value, syndrome register
 114 * and target exception level. This should be called from helper functions,
 115 * and never returns because we will longjump back up to the CPU main loop.
 116 */
 117G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
 118                                uint32_t syndrome, uint32_t target_el);
 119
 120/*
 121 * Similarly, but also use unwinding to restore cpu state.
 122 */
 123G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
 124                                      uint32_t syndrome, uint32_t target_el,
 125                                      uintptr_t ra);
 126
 127/*
 128 * For AArch64, map a given EL to an index in the banked_spsr array.
 129 * Note that this mapping and the AArch32 mapping defined in bank_number()
 130 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
 131 * mandated mapping between each other.
 132 */
 133static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
 134{
 135    static const unsigned int map[4] = {
 136        [1] = BANK_SVC, /* EL1.  */
 137        [2] = BANK_HYP, /* EL2.  */
 138        [3] = BANK_MON, /* EL3.  */
 139    };
 140    assert(el >= 1 && el <= 3);
 141    return map[el];
 142}
 143
 144/* Map CPU modes onto saved register banks.  */
 145static inline int bank_number(int mode)
 146{
 147    switch (mode) {
 148    case ARM_CPU_MODE_USR:
 149    case ARM_CPU_MODE_SYS:
 150        return BANK_USRSYS;
 151    case ARM_CPU_MODE_SVC:
 152        return BANK_SVC;
 153    case ARM_CPU_MODE_ABT:
 154        return BANK_ABT;
 155    case ARM_CPU_MODE_UND:
 156        return BANK_UND;
 157    case ARM_CPU_MODE_IRQ:
 158        return BANK_IRQ;
 159    case ARM_CPU_MODE_FIQ:
 160        return BANK_FIQ;
 161    case ARM_CPU_MODE_HYP:
 162        return BANK_HYP;
 163    case ARM_CPU_MODE_MON:
 164        return BANK_MON;
 165    }
 166    g_assert_not_reached();
 167}
 168
 169/**
 170 * r14_bank_number: Map CPU mode onto register bank for r14
 171 *
 172 * Given an AArch32 CPU mode, return the index into the saved register
 173 * banks to use for the R14 (LR) in that mode. This is the same as
 174 * bank_number(), except for the special case of Hyp mode, where
 175 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
 176 * This should be used as the index into env->banked_r14[], and
 177 * bank_number() used for the index into env->banked_r13[] and
 178 * env->banked_spsr[].
 179 */
 180static inline int r14_bank_number(int mode)
 181{
 182    return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
 183}
 184
 185void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
 186void arm_translate_init(void);
 187
 188void arm_restore_state_to_opc(CPUState *cs,
 189                              const TranslationBlock *tb,
 190                              const uint64_t *data);
 191
 192#ifdef CONFIG_TCG
 193void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
 194#endif /* CONFIG_TCG */
 195
 196typedef enum ARMFPRounding {
 197    FPROUNDING_TIEEVEN,
 198    FPROUNDING_POSINF,
 199    FPROUNDING_NEGINF,
 200    FPROUNDING_ZERO,
 201    FPROUNDING_TIEAWAY,
 202    FPROUNDING_ODD
 203} ARMFPRounding;
 204
 205extern const FloatRoundMode arm_rmode_to_sf_map[6];
 206
 207static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
 208{
 209    assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
 210    return arm_rmode_to_sf_map[rmode];
 211}
 212
 213static inline void aarch64_save_sp(CPUARMState *env, int el)
 214{
 215    if (env->pstate & PSTATE_SP) {
 216        env->sp_el[el] = env->xregs[31];
 217    } else {
 218        env->sp_el[0] = env->xregs[31];
 219    }
 220}
 221
 222static inline void aarch64_restore_sp(CPUARMState *env, int el)
 223{
 224    if (env->pstate & PSTATE_SP) {
 225        env->xregs[31] = env->sp_el[el];
 226    } else {
 227        env->xregs[31] = env->sp_el[0];
 228    }
 229}
 230
 231static inline void update_spsel(CPUARMState *env, uint32_t imm)
 232{
 233    unsigned int cur_el = arm_current_el(env);
 234    /* Update PSTATE SPSel bit; this requires us to update the
 235     * working stack pointer in xregs[31].
 236     */
 237    if (!((imm ^ env->pstate) & PSTATE_SP)) {
 238        return;
 239    }
 240    aarch64_save_sp(env, cur_el);
 241    env->pstate = deposit32(env->pstate, 0, 1, imm);
 242
 243    /* We rely on illegal updates to SPsel from EL0 to get trapped
 244     * at translation time.
 245     */
 246    assert(cur_el >= 1 && cur_el <= 3);
 247    aarch64_restore_sp(env, cur_el);
 248}
 249
 250/*
 251 * arm_pamax
 252 * @cpu: ARMCPU
 253 *
 254 * Returns the implementation defined bit-width of physical addresses.
 255 * The ARMv8 reference manuals refer to this as PAMax().
 256 */
 257unsigned int arm_pamax(ARMCPU *cpu);
 258
 259/* Return true if extended addresses are enabled.
 260 * This is always the case if our translation regime is 64 bit,
 261 * but depends on TTBCR.EAE for 32 bit.
 262 */
 263static inline bool extended_addresses_enabled(CPUARMState *env)
 264{
 265    uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
 266    if (arm_feature(env, ARM_FEATURE_PMSA) &&
 267        arm_feature(env, ARM_FEATURE_V8)) {
 268        return true;
 269    }
 270    return arm_el_is_aa64(env, 1) ||
 271           (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
 272}
 273
 274/* Update a QEMU watchpoint based on the information the guest has set in the
 275 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
 276 */
 277void hw_watchpoint_update(ARMCPU *cpu, int n);
 278/* Update the QEMU watchpoints for every guest watchpoint. This does a
 279 * complete delete-and-reinstate of the QEMU watchpoint list and so is
 280 * suitable for use after migration or on reset.
 281 */
 282void hw_watchpoint_update_all(ARMCPU *cpu);
 283/* Update a QEMU breakpoint based on the information the guest has set in the
 284 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
 285 */
 286void hw_breakpoint_update(ARMCPU *cpu, int n);
 287/* Update the QEMU breakpoints for every guest breakpoint. This does a
 288 * complete delete-and-reinstate of the QEMU breakpoint list and so is
 289 * suitable for use after migration or on reset.
 290 */
 291void hw_breakpoint_update_all(ARMCPU *cpu);
 292
 293/* Callback function for checking if a breakpoint should trigger. */
 294bool arm_debug_check_breakpoint(CPUState *cs);
 295
 296/* Callback function for checking if a watchpoint should trigger. */
 297bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
 298
 299/* Adjust addresses (in BE32 mode) before testing against watchpoint
 300 * addresses.
 301 */
 302vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
 303
 304/* Callback function for when a watchpoint or breakpoint triggers. */
 305void arm_debug_excp_handler(CPUState *cs);
 306
 307#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
 308static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
 309{
 310    return false;
 311}
 312static inline void arm_handle_psci_call(ARMCPU *cpu)
 313{
 314    g_assert_not_reached();
 315}
 316#else
 317/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
 318bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
 319/* Actually handle a PSCI call */
 320void arm_handle_psci_call(ARMCPU *cpu);
 321#endif
 322
 323/**
 324 * arm_clear_exclusive: clear the exclusive monitor
 325 * @env: CPU env
 326 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
 327 */
 328static inline void arm_clear_exclusive(CPUARMState *env)
 329{
 330    env->exclusive_addr = -1;
 331}
 332
 333/**
 334 * ARMFaultType: type of an ARM MMU fault
 335 * This corresponds to the v8A pseudocode's Fault enumeration,
 336 * with extensions for QEMU internal conditions.
 337 */
 338typedef enum ARMFaultType {
 339    ARMFault_None,
 340    ARMFault_AccessFlag,
 341    ARMFault_Alignment,
 342    ARMFault_Background,
 343    ARMFault_Domain,
 344    ARMFault_Permission,
 345    ARMFault_Translation,
 346    ARMFault_AddressSize,
 347    ARMFault_SyncExternal,
 348    ARMFault_SyncExternalOnWalk,
 349    ARMFault_SyncParity,
 350    ARMFault_SyncParityOnWalk,
 351    ARMFault_AsyncParity,
 352    ARMFault_AsyncExternal,
 353    ARMFault_Debug,
 354    ARMFault_TLBConflict,
 355    ARMFault_UnsuppAtomicUpdate,
 356    ARMFault_Lockdown,
 357    ARMFault_Exclusive,
 358    ARMFault_ICacheMaint,
 359    ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
 360    ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
 361    ARMFault_GPCFOnWalk,
 362    ARMFault_GPCFOnOutput,
 363} ARMFaultType;
 364
 365typedef enum ARMGPCF {
 366    GPCF_None,
 367    GPCF_AddressSize,
 368    GPCF_Walk,
 369    GPCF_EABT,
 370    GPCF_Fail,
 371} ARMGPCF;
 372
 373/**
 374 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
 375 * @type: Type of fault
 376 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
 377 * @level: Table walk level (for translation, access flag and permission faults)
 378 * @domain: Domain of the fault address (for non-LPAE CPUs only)
 379 * @s2addr: Address that caused a fault at stage 2
 380 * @paddr: physical address that caused a fault for gpc
 381 * @paddr_space: physical address space that caused a fault for gpc
 382 * @stage2: True if we faulted at stage 2
 383 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
 384 * @s1ns: True if we faulted on a non-secure IPA while in secure state
 385 * @ea: True if we should set the EA (external abort type) bit in syndrome
 386 */
 387typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
 388struct ARMMMUFaultInfo {
 389    ARMFaultType type;
 390    ARMGPCF gpcf;
 391    target_ulong s2addr;
 392    target_ulong paddr;
 393    ARMSecuritySpace paddr_space;
 394    int level;
 395    int domain;
 396    bool stage2;
 397    bool s1ptw;
 398    bool s1ns;
 399    bool ea;
 400};
 401
 402/**
 403 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
 404 * Compare pseudocode EncodeSDFSC(), though unlike that function
 405 * we set up a whole FSR-format code including domain field and
 406 * putting the high bit of the FSC into bit 10.
 407 */
 408static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
 409{
 410    uint32_t fsc;
 411
 412    switch (fi->type) {
 413    case ARMFault_None:
 414        return 0;
 415    case ARMFault_AccessFlag:
 416        fsc = fi->level == 1 ? 0x3 : 0x6;
 417        break;
 418    case ARMFault_Alignment:
 419        fsc = 0x1;
 420        break;
 421    case ARMFault_Permission:
 422        fsc = fi->level == 1 ? 0xd : 0xf;
 423        break;
 424    case ARMFault_Domain:
 425        fsc = fi->level == 1 ? 0x9 : 0xb;
 426        break;
 427    case ARMFault_Translation:
 428        fsc = fi->level == 1 ? 0x5 : 0x7;
 429        break;
 430    case ARMFault_SyncExternal:
 431        fsc = 0x8 | (fi->ea << 12);
 432        break;
 433    case ARMFault_SyncExternalOnWalk:
 434        fsc = fi->level == 1 ? 0xc : 0xe;
 435        fsc |= (fi->ea << 12);
 436        break;
 437    case ARMFault_SyncParity:
 438        fsc = 0x409;
 439        break;
 440    case ARMFault_SyncParityOnWalk:
 441        fsc = fi->level == 1 ? 0x40c : 0x40e;
 442        break;
 443    case ARMFault_AsyncParity:
 444        fsc = 0x408;
 445        break;
 446    case ARMFault_AsyncExternal:
 447        fsc = 0x406 | (fi->ea << 12);
 448        break;
 449    case ARMFault_Debug:
 450        fsc = 0x2;
 451        break;
 452    case ARMFault_TLBConflict:
 453        fsc = 0x400;
 454        break;
 455    case ARMFault_Lockdown:
 456        fsc = 0x404;
 457        break;
 458    case ARMFault_Exclusive:
 459        fsc = 0x405;
 460        break;
 461    case ARMFault_ICacheMaint:
 462        fsc = 0x4;
 463        break;
 464    case ARMFault_Background:
 465        fsc = 0x0;
 466        break;
 467    case ARMFault_QEMU_NSCExec:
 468        fsc = M_FAKE_FSR_NSC_EXEC;
 469        break;
 470    case ARMFault_QEMU_SFault:
 471        fsc = M_FAKE_FSR_SFAULT;
 472        break;
 473    default:
 474        /* Other faults can't occur in a context that requires a
 475         * short-format status code.
 476         */
 477        g_assert_not_reached();
 478    }
 479
 480    fsc |= (fi->domain << 4);
 481    return fsc;
 482}
 483
 484/**
 485 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
 486 * Compare pseudocode EncodeLDFSC(), though unlike that function
 487 * we fill in also the LPAE bit 9 of a DFSR format.
 488 */
 489static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
 490{
 491    uint32_t fsc;
 492
 493    switch (fi->type) {
 494    case ARMFault_None:
 495        return 0;
 496    case ARMFault_AddressSize:
 497        assert(fi->level >= -1 && fi->level <= 3);
 498        if (fi->level < 0) {
 499            fsc = 0b101001;
 500        } else {
 501            fsc = fi->level;
 502        }
 503        break;
 504    case ARMFault_AccessFlag:
 505        assert(fi->level >= 0 && fi->level <= 3);
 506        fsc = 0b001000 | fi->level;
 507        break;
 508    case ARMFault_Permission:
 509        assert(fi->level >= 0 && fi->level <= 3);
 510        fsc = 0b001100 | fi->level;
 511        break;
 512    case ARMFault_Translation:
 513        assert(fi->level >= -1 && fi->level <= 3);
 514        if (fi->level < 0) {
 515            fsc = 0b101011;
 516        } else {
 517            fsc = 0b000100 | fi->level;
 518        }
 519        break;
 520    case ARMFault_SyncExternal:
 521        fsc = 0x10 | (fi->ea << 12);
 522        break;
 523    case ARMFault_SyncExternalOnWalk:
 524        assert(fi->level >= -1 && fi->level <= 3);
 525        if (fi->level < 0) {
 526            fsc = 0b010011;
 527        } else {
 528            fsc = 0b010100 | fi->level;
 529        }
 530        fsc |= fi->ea << 12;
 531        break;
 532    case ARMFault_SyncParity:
 533        fsc = 0x18;
 534        break;
 535    case ARMFault_SyncParityOnWalk:
 536        assert(fi->level >= -1 && fi->level <= 3);
 537        if (fi->level < 0) {
 538            fsc = 0b011011;
 539        } else {
 540            fsc = 0b011100 | fi->level;
 541        }
 542        break;
 543    case ARMFault_AsyncParity:
 544        fsc = 0x19;
 545        break;
 546    case ARMFault_AsyncExternal:
 547        fsc = 0x11 | (fi->ea << 12);
 548        break;
 549    case ARMFault_Alignment:
 550        fsc = 0x21;
 551        break;
 552    case ARMFault_Debug:
 553        fsc = 0x22;
 554        break;
 555    case ARMFault_TLBConflict:
 556        fsc = 0x30;
 557        break;
 558    case ARMFault_UnsuppAtomicUpdate:
 559        fsc = 0x31;
 560        break;
 561    case ARMFault_Lockdown:
 562        fsc = 0x34;
 563        break;
 564    case ARMFault_Exclusive:
 565        fsc = 0x35;
 566        break;
 567    case ARMFault_GPCFOnWalk:
 568        assert(fi->level >= -1 && fi->level <= 3);
 569        if (fi->level < 0) {
 570            fsc = 0b100011;
 571        } else {
 572            fsc = 0b100100 | fi->level;
 573        }
 574        break;
 575    case ARMFault_GPCFOnOutput:
 576        fsc = 0b101000;
 577        break;
 578    default:
 579        /* Other faults can't occur in a context that requires a
 580         * long-format status code.
 581         */
 582        g_assert_not_reached();
 583    }
 584
 585    fsc |= 1 << 9;
 586    return fsc;
 587}
 588
 589static inline bool arm_extabort_type(MemTxResult result)
 590{
 591    /* The EA bit in syndromes and fault status registers is an
 592     * IMPDEF classification of external aborts. ARM implementations
 593     * usually use this to indicate AXI bus Decode error (0) or
 594     * Slave error (1); in QEMU we follow that.
 595     */
 596    return result != MEMTX_DECODE_ERROR;
 597}
 598
 599#ifdef CONFIG_USER_ONLY
 600void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
 601                            MMUAccessType access_type,
 602                            bool maperr, uintptr_t ra);
 603void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
 604                           MMUAccessType access_type, uintptr_t ra);
 605#else
 606bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
 607                      MMUAccessType access_type, int mmu_idx,
 608                      bool probe, uintptr_t retaddr);
 609#endif
 610
 611static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
 612{
 613    return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
 614}
 615
 616static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
 617{
 618    if (arm_feature(env, ARM_FEATURE_M)) {
 619        return mmu_idx | ARM_MMU_IDX_M;
 620    } else {
 621        return mmu_idx | ARM_MMU_IDX_A;
 622    }
 623}
 624
 625static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
 626{
 627    /* AArch64 is always a-profile. */
 628    return mmu_idx | ARM_MMU_IDX_A;
 629}
 630
 631int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
 632
 633/* Return the MMU index for a v7M CPU in the specified security state */
 634ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
 635
 636/*
 637 * Return true if the stage 1 translation regime is using LPAE
 638 * format page tables
 639 */
 640bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
 641
 642/* Raise a data fault alignment exception for the specified virtual address */
 643G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
 644                                            MMUAccessType access_type,
 645                                            int mmu_idx, uintptr_t retaddr);
 646
 647#ifndef CONFIG_USER_ONLY
 648/* arm_cpu_do_transaction_failed: handle a memory system error response
 649 * (eg "no device/memory present at address") by raising an external abort
 650 * exception
 651 */
 652void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
 653                                   vaddr addr, unsigned size,
 654                                   MMUAccessType access_type,
 655                                   int mmu_idx, MemTxAttrs attrs,
 656                                   MemTxResult response, uintptr_t retaddr);
 657#endif
 658
 659/* Call any registered EL change hooks */
 660static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
 661{
 662    ARMELChangeHook *hook, *next;
 663    QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
 664        hook->hook(cpu, hook->opaque);
 665    }
 666}
 667static inline void arm_call_el_change_hook(ARMCPU *cpu)
 668{
 669    ARMELChangeHook *hook, *next;
 670    QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
 671        hook->hook(cpu, hook->opaque);
 672    }
 673}
 674
 675/* Return true if this address translation regime has two ranges.  */
 676static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
 677{
 678    switch (mmu_idx) {
 679    case ARMMMUIdx_Stage1_E0:
 680    case ARMMMUIdx_Stage1_E1:
 681    case ARMMMUIdx_Stage1_E1_PAN:
 682    case ARMMMUIdx_E10_0:
 683    case ARMMMUIdx_E10_1:
 684    case ARMMMUIdx_E10_1_PAN:
 685    case ARMMMUIdx_E20_0:
 686    case ARMMMUIdx_E20_2:
 687    case ARMMMUIdx_E20_2_PAN:
 688        return true;
 689    default:
 690        return false;
 691    }
 692}
 693
 694static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
 695{
 696    switch (mmu_idx) {
 697    case ARMMMUIdx_Stage1_E1_PAN:
 698    case ARMMMUIdx_E10_1_PAN:
 699    case ARMMMUIdx_E20_2_PAN:
 700        return true;
 701    default:
 702        return false;
 703    }
 704}
 705
 706static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
 707{
 708    return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
 709}
 710
 711/* Return the exception level which controls this address translation regime */
 712static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
 713{
 714    switch (mmu_idx) {
 715    case ARMMMUIdx_E20_0:
 716    case ARMMMUIdx_E20_2:
 717    case ARMMMUIdx_E20_2_PAN:
 718    case ARMMMUIdx_Stage2:
 719    case ARMMMUIdx_Stage2_S:
 720    case ARMMMUIdx_E2:
 721        return 2;
 722    case ARMMMUIdx_E3:
 723        return 3;
 724    case ARMMMUIdx_E10_0:
 725    case ARMMMUIdx_Stage1_E0:
 726        return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
 727    case ARMMMUIdx_Stage1_E1:
 728    case ARMMMUIdx_Stage1_E1_PAN:
 729    case ARMMMUIdx_E10_1:
 730    case ARMMMUIdx_E10_1_PAN:
 731    case ARMMMUIdx_MPrivNegPri:
 732    case ARMMMUIdx_MUserNegPri:
 733    case ARMMMUIdx_MPriv:
 734    case ARMMMUIdx_MUser:
 735    case ARMMMUIdx_MSPrivNegPri:
 736    case ARMMMUIdx_MSUserNegPri:
 737    case ARMMMUIdx_MSPriv:
 738    case ARMMMUIdx_MSUser:
 739        return 1;
 740    default:
 741        g_assert_not_reached();
 742    }
 743}
 744
 745static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
 746{
 747    switch (mmu_idx) {
 748    case ARMMMUIdx_E20_0:
 749    case ARMMMUIdx_Stage1_E0:
 750    case ARMMMUIdx_MUser:
 751    case ARMMMUIdx_MSUser:
 752    case ARMMMUIdx_MUserNegPri:
 753    case ARMMMUIdx_MSUserNegPri:
 754        return true;
 755    default:
 756        return false;
 757    case ARMMMUIdx_E10_0:
 758    case ARMMMUIdx_E10_1:
 759    case ARMMMUIdx_E10_1_PAN:
 760        g_assert_not_reached();
 761    }
 762}
 763
 764/* Return the SCTLR value which controls this address translation regime */
 765static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
 766{
 767    return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
 768}
 769
 770/*
 771 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
 772 * and the Non-Secure stage 2 translation regimes (and hence which are
 773 * not present in VSTCR_EL2).
 774 */
 775#define VTCR_SHARED_FIELD_MASK \
 776    (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
 777     R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
 778     R_VTCR_DS_MASK)
 779
 780/* Return the value of the TCR controlling this translation regime */
 781static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
 782{
 783    if (mmu_idx == ARMMMUIdx_Stage2) {
 784        return env->cp15.vtcr_el2;
 785    }
 786    if (mmu_idx == ARMMMUIdx_Stage2_S) {
 787        /*
 788         * Secure stage 2 shares fields from VTCR_EL2. We merge those
 789         * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
 790         * value so the callers don't need to special case this.
 791         *
 792         * If a future architecture change defines bits in VSTCR_EL2 that
 793         * overlap with these VTCR_EL2 fields we may need to revisit this.
 794         */
 795        uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
 796        v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
 797        return v;
 798    }
 799    return env->cp15.tcr_el[regime_el(env, mmu_idx)];
 800}
 801
 802/* Return true if the translation regime is using LPAE format page tables */
 803static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
 804{
 805    int el = regime_el(env, mmu_idx);
 806    if (el == 2 || arm_el_is_aa64(env, el)) {
 807        return true;
 808    }
 809    if (arm_feature(env, ARM_FEATURE_PMSA) &&
 810        arm_feature(env, ARM_FEATURE_V8)) {
 811        return true;
 812    }
 813    if (arm_feature(env, ARM_FEATURE_LPAE)
 814        && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
 815        return true;
 816    }
 817    return false;
 818}
 819
 820/**
 821 * arm_num_brps: Return number of implemented breakpoints.
 822 * Note that the ID register BRPS field is "number of bps - 1",
 823 * and we return the actual number of breakpoints.
 824 */
 825static inline int arm_num_brps(ARMCPU *cpu)
 826{
 827    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 828        return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
 829    } else {
 830        return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
 831    }
 832}
 833
 834/**
 835 * arm_num_wrps: Return number of implemented watchpoints.
 836 * Note that the ID register WRPS field is "number of wps - 1",
 837 * and we return the actual number of watchpoints.
 838 */
 839static inline int arm_num_wrps(ARMCPU *cpu)
 840{
 841    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 842        return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
 843    } else {
 844        return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
 845    }
 846}
 847
 848/**
 849 * arm_num_ctx_cmps: Return number of implemented context comparators.
 850 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
 851 * and we return the actual number of comparators.
 852 */
 853static inline int arm_num_ctx_cmps(ARMCPU *cpu)
 854{
 855    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 856        return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
 857    } else {
 858        return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
 859    }
 860}
 861
 862/**
 863 * v7m_using_psp: Return true if using process stack pointer
 864 * Return true if the CPU is currently using the process stack
 865 * pointer, or false if it is using the main stack pointer.
 866 */
 867static inline bool v7m_using_psp(CPUARMState *env)
 868{
 869    /* Handler mode always uses the main stack; for thread mode
 870     * the CONTROL.SPSEL bit determines the answer.
 871     * Note that in v7M it is not possible to be in Handler mode with
 872     * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
 873     */
 874    return !arm_v7m_is_handler_mode(env) &&
 875        env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
 876}
 877
 878/**
 879 * v7m_sp_limit: Return SP limit for current CPU state
 880 * Return the SP limit value for the current CPU security state
 881 * and stack pointer.
 882 */
 883static inline uint32_t v7m_sp_limit(CPUARMState *env)
 884{
 885    if (v7m_using_psp(env)) {
 886        return env->v7m.psplim[env->v7m.secure];
 887    } else {
 888        return env->v7m.msplim[env->v7m.secure];
 889    }
 890}
 891
 892/**
 893 * v7m_cpacr_pass:
 894 * Return true if the v7M CPACR permits access to the FPU for the specified
 895 * security state and privilege level.
 896 */
 897static inline bool v7m_cpacr_pass(CPUARMState *env,
 898                                  bool is_secure, bool is_priv)
 899{
 900    switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
 901    case 0:
 902    case 2: /* UNPREDICTABLE: we treat like 0 */
 903        return false;
 904    case 1:
 905        return is_priv;
 906    case 3:
 907        return true;
 908    default:
 909        g_assert_not_reached();
 910    }
 911}
 912
 913/**
 914 * aarch32_mode_name(): Return name of the AArch32 CPU mode
 915 * @psr: Program Status Register indicating CPU mode
 916 *
 917 * Returns, for debug logging purposes, a printable representation
 918 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
 919 * the low bits of the specified PSR.
 920 */
 921static inline const char *aarch32_mode_name(uint32_t psr)
 922{
 923    static const char cpu_mode_names[16][4] = {
 924        "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
 925        "???", "???", "hyp", "und", "???", "???", "???", "sys"
 926    };
 927
 928    return cpu_mode_names[psr & 0xf];
 929}
 930
 931/**
 932 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
 933 *
 934 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
 935 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
 936 * Must be called with the iothread lock held.
 937 */
 938void arm_cpu_update_virq(ARMCPU *cpu);
 939
 940/**
 941 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
 942 *
 943 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
 944 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
 945 * Must be called with the iothread lock held.
 946 */
 947void arm_cpu_update_vfiq(ARMCPU *cpu);
 948
 949/**
 950 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
 951 *
 952 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
 953 * following a change to the HCR_EL2.VSE bit.
 954 */
 955void arm_cpu_update_vserr(ARMCPU *cpu);
 956
 957/**
 958 * arm_mmu_idx_el:
 959 * @env: The cpu environment
 960 * @el: The EL to use.
 961 *
 962 * Return the full ARMMMUIdx for the translation regime for EL.
 963 */
 964ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
 965
 966/**
 967 * arm_mmu_idx:
 968 * @env: The cpu environment
 969 *
 970 * Return the full ARMMMUIdx for the current translation regime.
 971 */
 972ARMMMUIdx arm_mmu_idx(CPUARMState *env);
 973
 974/**
 975 * arm_stage1_mmu_idx:
 976 * @env: The cpu environment
 977 *
 978 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
 979 */
 980#ifdef CONFIG_USER_ONLY
 981static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
 982{
 983    return ARMMMUIdx_Stage1_E0;
 984}
 985static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
 986{
 987    return ARMMMUIdx_Stage1_E0;
 988}
 989#else
 990ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
 991ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
 992#endif
 993
 994/**
 995 * arm_mmu_idx_is_stage1_of_2:
 996 * @mmu_idx: The ARMMMUIdx to test
 997 *
 998 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
 999 * first stage of a two stage regime.
1000 */
1001static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1002{
1003    switch (mmu_idx) {
1004    case ARMMMUIdx_Stage1_E0:
1005    case ARMMMUIdx_Stage1_E1:
1006    case ARMMMUIdx_Stage1_E1_PAN:
1007        return true;
1008    default:
1009        return false;
1010    }
1011}
1012
1013static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1014                                               const ARMISARegisters *id)
1015{
1016    uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1017
1018    if ((features >> ARM_FEATURE_V4T) & 1) {
1019        valid |= CPSR_T;
1020    }
1021    if ((features >> ARM_FEATURE_V5) & 1) {
1022        valid |= CPSR_Q; /* V5TE in reality*/
1023    }
1024    if ((features >> ARM_FEATURE_V6) & 1) {
1025        valid |= CPSR_E | CPSR_GE;
1026    }
1027    if ((features >> ARM_FEATURE_THUMB2) & 1) {
1028        valid |= CPSR_IT;
1029    }
1030    if (isar_feature_aa32_jazelle(id)) {
1031        valid |= CPSR_J;
1032    }
1033    if (isar_feature_aa32_pan(id)) {
1034        valid |= CPSR_PAN;
1035    }
1036    if (isar_feature_aa32_dit(id)) {
1037        valid |= CPSR_DIT;
1038    }
1039    if (isar_feature_aa32_ssbs(id)) {
1040        valid |= CPSR_SSBS;
1041    }
1042
1043    return valid;
1044}
1045
1046static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1047{
1048    uint32_t valid;
1049
1050    valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1051    if (isar_feature_aa64_bti(id)) {
1052        valid |= PSTATE_BTYPE;
1053    }
1054    if (isar_feature_aa64_pan(id)) {
1055        valid |= PSTATE_PAN;
1056    }
1057    if (isar_feature_aa64_uao(id)) {
1058        valid |= PSTATE_UAO;
1059    }
1060    if (isar_feature_aa64_dit(id)) {
1061        valid |= PSTATE_DIT;
1062    }
1063    if (isar_feature_aa64_ssbs(id)) {
1064        valid |= PSTATE_SSBS;
1065    }
1066    if (isar_feature_aa64_mte(id)) {
1067        valid |= PSTATE_TCO;
1068    }
1069
1070    return valid;
1071}
1072
1073/* Granule size (i.e. page size) */
1074typedef enum ARMGranuleSize {
1075    /* Same order as TG0 encoding */
1076    Gran4K,
1077    Gran64K,
1078    Gran16K,
1079    GranInvalid,
1080} ARMGranuleSize;
1081
1082/**
1083 * arm_granule_bits: Return address size of the granule in bits
1084 *
1085 * Return the address size of the granule in bits. This corresponds
1086 * to the pseudocode TGxGranuleBits().
1087 */
1088static inline int arm_granule_bits(ARMGranuleSize gran)
1089{
1090    switch (gran) {
1091    case Gran64K:
1092        return 16;
1093    case Gran16K:
1094        return 14;
1095    case Gran4K:
1096        return 12;
1097    default:
1098        g_assert_not_reached();
1099    }
1100}
1101
1102/*
1103 * Parameters of a given virtual address, as extracted from the
1104 * translation control register (TCR) for a given regime.
1105 */
1106typedef struct ARMVAParameters {
1107    unsigned tsz    : 8;
1108    unsigned ps     : 3;
1109    unsigned sh     : 2;
1110    unsigned select : 1;
1111    bool tbi        : 1;
1112    bool epd        : 1;
1113    bool hpd        : 1;
1114    bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
1115    bool ds         : 1;
1116    bool ha         : 1;
1117    bool hd         : 1;
1118    ARMGranuleSize gran : 2;
1119} ARMVAParameters;
1120
1121/**
1122 * aa64_va_parameters: Return parameters for an AArch64 virtual address
1123 * @env: CPU
1124 * @va: virtual address to look up
1125 * @mmu_idx: determines translation regime to use
1126 * @data: true if this is a data access
1127 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1128 *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1129 */
1130ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1131                                   ARMMMUIdx mmu_idx, bool data,
1132                                   bool el1_is_aa32);
1133
1134int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1135int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1136int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1137
1138/* Determine if allocation tags are available.  */
1139static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1140                                                 uint64_t sctlr)
1141{
1142    if (el < 3
1143        && arm_feature(env, ARM_FEATURE_EL3)
1144        && !(env->cp15.scr_el3 & SCR_ATA)) {
1145        return false;
1146    }
1147    if (el < 2 && arm_is_el2_enabled(env)) {
1148        uint64_t hcr = arm_hcr_el2_eff(env);
1149        if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1150            return false;
1151        }
1152    }
1153    sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1154    return sctlr != 0;
1155}
1156
1157#ifndef CONFIG_USER_ONLY
1158
1159/* Security attributes for an address, as returned by v8m_security_lookup. */
1160typedef struct V8M_SAttributes {
1161    bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1162    bool ns;
1163    bool nsc;
1164    uint8_t sregion;
1165    bool srvalid;
1166    uint8_t iregion;
1167    bool irvalid;
1168} V8M_SAttributes;
1169
1170void v8m_security_lookup(CPUARMState *env, uint32_t address,
1171                         MMUAccessType access_type, ARMMMUIdx mmu_idx,
1172                         bool secure, V8M_SAttributes *sattrs);
1173
1174/* Cacheability and shareability attributes for a memory access */
1175typedef struct ARMCacheAttrs {
1176    /*
1177     * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1178     * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1179     */
1180    unsigned int attrs:8;
1181    unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1182    bool is_s2_format:1;
1183    bool guarded:1;              /* guarded bit of the v8-64 PTE */
1184} ARMCacheAttrs;
1185
1186/* Fields that are valid upon success. */
1187typedef struct GetPhysAddrResult {
1188    CPUTLBEntryFull f;
1189    ARMCacheAttrs cacheattrs;
1190} GetPhysAddrResult;
1191
1192/**
1193 * get_phys_addr_with_secure: get the physical address for a virtual address
1194 * @env: CPUARMState
1195 * @address: virtual address to get physical address for
1196 * @access_type: 0 for read, 1 for write, 2 for execute
1197 * @mmu_idx: MMU index indicating required translation regime
1198 * @is_secure: security state for the access
1199 * @result: set on translation success.
1200 * @fi: set to fault info if the translation fails
1201 *
1202 * Find the physical address corresponding to the given virtual address,
1203 * by doing a translation table walk on MMU based systems or using the
1204 * MPU state on MPU based systems.
1205 *
1206 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1207 * prot and page_size may not be filled in, and the populated fsr value provides
1208 * information on why the translation aborted, in the format of a
1209 * DFSR/IFSR fault register, with the following caveats:
1210 *  * we honour the short vs long DFSR format differences.
1211 *  * the WnR bit is never set (the caller must do this).
1212 *  * for PSMAv5 based systems we don't bother to return a full FSR format
1213 *    value.
1214 */
1215bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
1216                               MMUAccessType access_type,
1217                               ARMMMUIdx mmu_idx, bool is_secure,
1218                               GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1219    __attribute__((nonnull));
1220
1221/**
1222 * get_phys_addr: get the physical address for a virtual address
1223 * @env: CPUARMState
1224 * @address: virtual address to get physical address for
1225 * @access_type: 0 for read, 1 for write, 2 for execute
1226 * @mmu_idx: MMU index indicating required translation regime
1227 * @result: set on translation success.
1228 * @fi: set to fault info if the translation fails
1229 *
1230 * Similarly, but use the security regime of @mmu_idx.
1231 */
1232bool get_phys_addr(CPUARMState *env, target_ulong address,
1233                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
1234                   GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1235    __attribute__((nonnull));
1236
1237bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1238                       MMUAccessType access_type, ARMMMUIdx mmu_idx,
1239                       bool is_secure, GetPhysAddrResult *result,
1240                       ARMMMUFaultInfo *fi, uint32_t *mregion);
1241
1242void arm_log_exception(CPUState *cs);
1243
1244#endif /* !CONFIG_USER_ONLY */
1245
1246/*
1247 * The log2 of the words in the tag block, for GMID_EL1.BS.
1248 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1249 */
1250#define GMID_EL1_BS  6
1251
1252/*
1253 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1254 * the same simd_desc() encoding due to restrictions on size.
1255 * Use these instead.
1256 */
1257FIELD(PREDDESC, OPRSZ, 0, 6)
1258FIELD(PREDDESC, ESZ, 6, 2)
1259FIELD(PREDDESC, DATA, 8, 24)
1260
1261/*
1262 * The SVE simd_data field, for memory ops, contains either
1263 * rd (5 bits) or a shift count (2 bits).
1264 */
1265#define SVE_MTEDESC_SHIFT 5
1266
1267/* Bits within a descriptor passed to the helper_mte_check* functions. */
1268FIELD(MTEDESC, MIDX,  0, 4)
1269FIELD(MTEDESC, TBI,   4, 2)
1270FIELD(MTEDESC, TCMA,  6, 2)
1271FIELD(MTEDESC, WRITE, 8, 1)
1272FIELD(MTEDESC, ALIGN, 9, 3)
1273FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12)  /* size - 1 */
1274
1275bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1276uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1277
1278static inline int allocation_tag_from_addr(uint64_t ptr)
1279{
1280    return extract64(ptr, 56, 4);
1281}
1282
1283static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1284{
1285    return deposit64(ptr, 56, 4, rtag);
1286}
1287
1288/* Return true if tbi bits mean that the access is checked.  */
1289static inline bool tbi_check(uint32_t desc, int bit55)
1290{
1291    return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1292}
1293
1294/* Return true if tcma bits mean that the access is unchecked.  */
1295static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1296{
1297    /*
1298     * We had extracted bit55 and ptr_tag for other reasons, so fold
1299     * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1300     */
1301    bool match = ((ptr_tag + bit55) & 0xf) == 0;
1302    bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1303    return tcma && match;
1304}
1305
1306/*
1307 * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1308 * for the tag to be present in the FAR_ELx register.  But for user-only
1309 * mode, we do not have a TLB with which to implement this, so we must
1310 * remove the top byte.
1311 */
1312static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1313{
1314#ifdef CONFIG_USER_ONLY
1315    /* TBI0 is known to be enabled, while TBI1 is disabled. */
1316    ptr &= sextract64(ptr, 0, 56);
1317#endif
1318    return ptr;
1319}
1320
1321static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1322{
1323#ifdef CONFIG_USER_ONLY
1324    int64_t clean_ptr = sextract64(ptr, 0, 56);
1325    if (tbi_check(desc, clean_ptr < 0)) {
1326        ptr = clean_ptr;
1327    }
1328#endif
1329    return ptr;
1330}
1331
1332/* Values for M-profile PSR.ECI for MVE insns */
1333enum MVEECIState {
1334    ECI_NONE = 0, /* No completed beats */
1335    ECI_A0 = 1, /* Completed: A0 */
1336    ECI_A0A1 = 2, /* Completed: A0, A1 */
1337    /* 3 is reserved */
1338    ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1339    ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1340    /* All other values reserved */
1341};
1342
1343/* Definitions for the PMU registers */
1344#define PMCRN_MASK  0xf800
1345#define PMCRN_SHIFT 11
1346#define PMCRLP  0x80
1347#define PMCRLC  0x40
1348#define PMCRDP  0x20
1349#define PMCRX   0x10
1350#define PMCRD   0x8
1351#define PMCRC   0x4
1352#define PMCRP   0x2
1353#define PMCRE   0x1
1354/*
1355 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1356 * which can be written as 1 to trigger behaviour but which stay RAZ).
1357 */
1358#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1359
1360#define PMXEVTYPER_P          0x80000000
1361#define PMXEVTYPER_U          0x40000000
1362#define PMXEVTYPER_NSK        0x20000000
1363#define PMXEVTYPER_NSU        0x10000000
1364#define PMXEVTYPER_NSH        0x08000000
1365#define PMXEVTYPER_M          0x04000000
1366#define PMXEVTYPER_MT         0x02000000
1367#define PMXEVTYPER_EVTCOUNT   0x0000ffff
1368#define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1369                               PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1370                               PMXEVTYPER_M | PMXEVTYPER_MT | \
1371                               PMXEVTYPER_EVTCOUNT)
1372
1373#define PMCCFILTR             0xf8000000
1374#define PMCCFILTR_M           PMXEVTYPER_M
1375#define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1376
1377static inline uint32_t pmu_num_counters(CPUARMState *env)
1378{
1379    ARMCPU *cpu = env_archcpu(env);
1380
1381    return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1382}
1383
1384/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1385static inline uint64_t pmu_counter_mask(CPUARMState *env)
1386{
1387  return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1388}
1389
1390#ifdef TARGET_AARCH64
1391int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
1392int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
1393int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
1394int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
1395int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
1396int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
1397int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
1398void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1399void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1400void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1401void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1402void aarch64_max_tcg_initfn(Object *obj);
1403void aarch64_add_pauth_properties(Object *obj);
1404void aarch64_add_sve_properties(Object *obj);
1405void aarch64_add_sme_properties(Object *obj);
1406#endif
1407
1408/* Read the CONTROL register as the MRS instruction would. */
1409uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1410
1411/*
1412 * Return a pointer to the location where we currently store the
1413 * stack pointer for the requested security state and thread mode.
1414 * This pointer will become invalid if the CPU state is updated
1415 * such that the stack pointers are switched around (eg changing
1416 * the SPSEL control bit).
1417 */
1418uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1419                             bool threadmode, bool spsel);
1420
1421bool el_is_in_host(CPUARMState *env, int el);
1422
1423void aa32_max_features(ARMCPU *cpu);
1424int exception_target_el(CPUARMState *env);
1425bool arm_singlestep_active(CPUARMState *env);
1426bool arm_generate_debug_exceptions(CPUARMState *env);
1427
1428/**
1429 * pauth_ptr_mask:
1430 * @param: parameters defining the MMU setup
1431 *
1432 * Return a mask of the address bits that contain the authentication code,
1433 * given the MMU config defined by @param.
1434 */
1435static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1436{
1437    int bot_pac_bit = 64 - param.tsz;
1438    int top_pac_bit = 64 - 8 * param.tbi;
1439
1440    return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1441}
1442
1443/* Add the cpreg definitions for debug related system registers */
1444void define_debug_regs(ARMCPU *cpu);
1445
1446/* Effective value of MDCR_EL2 */
1447static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1448{
1449    return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1450}
1451
1452/* Powers of 2 for sve_vq_map et al. */
1453#define SVE_VQ_POW2_MAP                                 \
1454    ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1455     (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1456
1457/*
1458 * Return true if it is possible to take a fine-grained-trap to EL2.
1459 */
1460static inline bool arm_fgt_active(CPUARMState *env, int el)
1461{
1462    /*
1463     * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1464     * that can affect EL0, but it is harmless to do the test also for
1465     * traps on registers that are only accessible at EL1 because if the test
1466     * returns true then we can't be executing at EL1 anyway.
1467     * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1468     * traps from AArch32 only happen for the EL0 is AArch32 case.
1469     */
1470    return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1471        el < 2 && arm_is_el2_enabled(env) &&
1472        arm_el_is_aa64(env, 1) &&
1473        (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1474        (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1475}
1476
1477void assert_hflags_rebuild_correctly(CPUARMState *env);
1478
1479/*
1480 * Although the ARM implementation of hardware assisted debugging
1481 * allows for different breakpoints per-core, the current GDB
1482 * interface treats them as a global pool of registers (which seems to
1483 * be the case for x86, ppc and s390). As a result we store one copy
1484 * of registers which is used for all active cores.
1485 *
1486 * Write access is serialised by virtue of the GDB protocol which
1487 * updates things. Read access (i.e. when the values are copied to the
1488 * vCPU) is also gated by GDB's run control.
1489 *
1490 * This is not unreasonable as most of the time debugging kernels you
1491 * never know which core will eventually execute your function.
1492 */
1493
1494typedef struct {
1495    uint64_t bcr;
1496    uint64_t bvr;
1497} HWBreakpoint;
1498
1499/*
1500 * The watchpoint registers can cover more area than the requested
1501 * watchpoint so we need to store the additional information
1502 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1503 * when the watchpoint is hit.
1504 */
1505typedef struct {
1506    uint64_t wcr;
1507    uint64_t wvr;
1508    CPUWatchpoint details;
1509} HWWatchpoint;
1510
1511/* Maximum and current break/watch point counts */
1512extern int max_hw_bps, max_hw_wps;
1513extern GArray *hw_breakpoints, *hw_watchpoints;
1514
1515#define cur_hw_wps      (hw_watchpoints->len)
1516#define cur_hw_bps      (hw_breakpoints->len)
1517#define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1518#define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1519
1520bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1521int insert_hw_breakpoint(target_ulong pc);
1522int delete_hw_breakpoint(target_ulong pc);
1523
1524bool check_watchpoint_in_range(int i, target_ulong addr);
1525CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1526int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1527int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1528#endif
1529