qemu/target/arm/m_helper.c
<<
>>
Prefs
   1/*
   2 * ARM generic helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8
   9#include "qemu/osdep.h"
  10#include "qemu/units.h"
  11#include "target/arm/idau.h"
  12#include "trace.h"
  13#include "cpu.h"
  14#include "internals.h"
  15#include "exec/gdbstub.h"
  16#include "exec/helper-proto.h"
  17#include "qemu/host-utils.h"
  18#include "qemu/main-loop.h"
  19#include "qemu/bitops.h"
  20#include "qemu/crc32c.h"
  21#include "qemu/qemu-print.h"
  22#include "exec/exec-all.h"
  23#include <zlib.h> /* For crc32 */
  24#include "semihosting/semihost.h"
  25#include "sysemu/cpus.h"
  26#include "sysemu/kvm.h"
  27#include "qemu/range.h"
  28#include "qapi/qapi-commands-machine-target.h"
  29#include "qapi/error.h"
  30#include "qemu/guest-random.h"
  31#ifdef CONFIG_TCG
  32#include "arm_ldst.h"
  33#include "exec/cpu_ldst.h"
  34#include "semihosting/common-semi.h"
  35#endif
  36
  37static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
  38                         uint32_t reg, uint32_t val)
  39{
  40    /* Only APSR is actually writable */
  41    if (!(reg & 4)) {
  42        uint32_t apsrmask = 0;
  43
  44        if (mask & 8) {
  45            apsrmask |= XPSR_NZCV | XPSR_Q;
  46        }
  47        if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
  48            apsrmask |= XPSR_GE;
  49        }
  50        xpsr_write(env, val, apsrmask);
  51    }
  52}
  53
  54static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
  55{
  56    uint32_t mask = 0;
  57
  58    if ((reg & 1) && el) {
  59        mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
  60    }
  61    if (!(reg & 4)) {
  62        mask |= XPSR_NZCV | XPSR_Q; /* APSR */
  63        if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
  64            mask |= XPSR_GE;
  65        }
  66    }
  67    /* EPSR reads as zero */
  68    return xpsr_read(env) & mask;
  69}
  70
  71static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
  72{
  73    uint32_t value = env->v7m.control[secure];
  74
  75    if (!secure) {
  76        /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
  77        value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
  78    }
  79    return value;
  80}
  81
  82#ifdef CONFIG_USER_ONLY
  83
  84void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
  85{
  86    uint32_t mask = extract32(maskreg, 8, 4);
  87    uint32_t reg = extract32(maskreg, 0, 8);
  88
  89    switch (reg) {
  90    case 0 ... 7: /* xPSR sub-fields */
  91        v7m_msr_xpsr(env, mask, reg, val);
  92        break;
  93    case 20: /* CONTROL */
  94        /* There are no sub-fields that are actually writable from EL0. */
  95        break;
  96    default:
  97        /* Unprivileged writes to other registers are ignored */
  98        break;
  99    }
 100}
 101
 102uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
 103{
 104    switch (reg) {
 105    case 0 ... 7: /* xPSR sub-fields */
 106        return v7m_mrs_xpsr(env, reg, 0);
 107    case 20: /* CONTROL */
 108        return v7m_mrs_control(env, 0);
 109    default:
 110        /* Unprivileged reads others as zero.  */
 111        return 0;
 112    }
 113}
 114
 115void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
 116{
 117    /* translate.c should never generate calls here in user-only mode */
 118    g_assert_not_reached();
 119}
 120
 121void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
 122{
 123    /* translate.c should never generate calls here in user-only mode */
 124    g_assert_not_reached();
 125}
 126
 127void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
 128{
 129    /* translate.c should never generate calls here in user-only mode */
 130    g_assert_not_reached();
 131}
 132
 133void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
 134{
 135    /* translate.c should never generate calls here in user-only mode */
 136    g_assert_not_reached();
 137}
 138
 139void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
 140{
 141    /* translate.c should never generate calls here in user-only mode */
 142    g_assert_not_reached();
 143}
 144
 145uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
 146{
 147    /*
 148     * The TT instructions can be used by unprivileged code, but in
 149     * user-only emulation we don't have the MPU.
 150     * Luckily since we know we are NonSecure unprivileged (and that in
 151     * turn means that the A flag wasn't specified), all the bits in the
 152     * register must be zero:
 153     *  IREGION: 0 because IRVALID is 0
 154     *  IRVALID: 0 because NS
 155     *  S: 0 because NS
 156     *  NSRW: 0 because NS
 157     *  NSR: 0 because NS
 158     *  RW: 0 because unpriv and A flag not set
 159     *  R: 0 because unpriv and A flag not set
 160     *  SRVALID: 0 because NS
 161     *  MRVALID: 0 because unpriv and A flag not set
 162     *  SREGION: 0 becaus SRVALID is 0
 163     *  MREGION: 0 because MRVALID is 0
 164     */
 165    return 0;
 166}
 167
 168#else
 169
 170/*
 171 * What kind of stack write are we doing? This affects how exceptions
 172 * generated during the stacking are treated.
 173 */
 174typedef enum StackingMode {
 175    STACK_NORMAL,
 176    STACK_IGNFAULTS,
 177    STACK_LAZYFP,
 178} StackingMode;
 179
 180static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
 181                            ARMMMUIdx mmu_idx, StackingMode mode)
 182{
 183    CPUState *cs = CPU(cpu);
 184    CPUARMState *env = &cpu->env;
 185    MemTxAttrs attrs = {};
 186    MemTxResult txres;
 187    target_ulong page_size;
 188    hwaddr physaddr;
 189    int prot;
 190    ARMMMUFaultInfo fi = {};
 191    ARMCacheAttrs cacheattrs = {};
 192    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
 193    int exc;
 194    bool exc_secure;
 195
 196    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
 197                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
 198        /* MPU/SAU lookup failed */
 199        if (fi.type == ARMFault_QEMU_SFault) {
 200            if (mode == STACK_LAZYFP) {
 201                qemu_log_mask(CPU_LOG_INT,
 202                              "...SecureFault with SFSR.LSPERR "
 203                              "during lazy stacking\n");
 204                env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
 205            } else {
 206                qemu_log_mask(CPU_LOG_INT,
 207                              "...SecureFault with SFSR.AUVIOL "
 208                              "during stacking\n");
 209                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
 210            }
 211            env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
 212            env->v7m.sfar = addr;
 213            exc = ARMV7M_EXCP_SECURE;
 214            exc_secure = false;
 215        } else {
 216            if (mode == STACK_LAZYFP) {
 217                qemu_log_mask(CPU_LOG_INT,
 218                              "...MemManageFault with CFSR.MLSPERR\n");
 219                env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
 220            } else {
 221                qemu_log_mask(CPU_LOG_INT,
 222                              "...MemManageFault with CFSR.MSTKERR\n");
 223                env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
 224            }
 225            exc = ARMV7M_EXCP_MEM;
 226            exc_secure = secure;
 227        }
 228        goto pend_fault;
 229    }
 230    address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
 231                         attrs, &txres);
 232    if (txres != MEMTX_OK) {
 233        /* BusFault trying to write the data */
 234        if (mode == STACK_LAZYFP) {
 235            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
 236            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
 237        } else {
 238            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
 239            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
 240        }
 241        exc = ARMV7M_EXCP_BUS;
 242        exc_secure = false;
 243        goto pend_fault;
 244    }
 245    return true;
 246
 247pend_fault:
 248    /*
 249     * By pending the exception at this point we are making
 250     * the IMPDEF choice "overridden exceptions pended" (see the
 251     * MergeExcInfo() pseudocode). The other choice would be to not
 252     * pend them now and then make a choice about which to throw away
 253     * later if we have two derived exceptions.
 254     * The only case when we must not pend the exception but instead
 255     * throw it away is if we are doing the push of the callee registers
 256     * and we've already generated a derived exception (this is indicated
 257     * by the caller passing STACK_IGNFAULTS). Even in this case we will
 258     * still update the fault status registers.
 259     */
 260    switch (mode) {
 261    case STACK_NORMAL:
 262        armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
 263        break;
 264    case STACK_LAZYFP:
 265        armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
 266        break;
 267    case STACK_IGNFAULTS:
 268        break;
 269    }
 270    return false;
 271}
 272
 273static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
 274                           ARMMMUIdx mmu_idx)
 275{
 276    CPUState *cs = CPU(cpu);
 277    CPUARMState *env = &cpu->env;
 278    MemTxAttrs attrs = {};
 279    MemTxResult txres;
 280    target_ulong page_size;
 281    hwaddr physaddr;
 282    int prot;
 283    ARMMMUFaultInfo fi = {};
 284    ARMCacheAttrs cacheattrs = {};
 285    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
 286    int exc;
 287    bool exc_secure;
 288    uint32_t value;
 289
 290    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
 291                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
 292        /* MPU/SAU lookup failed */
 293        if (fi.type == ARMFault_QEMU_SFault) {
 294            qemu_log_mask(CPU_LOG_INT,
 295                          "...SecureFault with SFSR.AUVIOL during unstack\n");
 296            env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
 297            env->v7m.sfar = addr;
 298            exc = ARMV7M_EXCP_SECURE;
 299            exc_secure = false;
 300        } else {
 301            qemu_log_mask(CPU_LOG_INT,
 302                          "...MemManageFault with CFSR.MUNSTKERR\n");
 303            env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
 304            exc = ARMV7M_EXCP_MEM;
 305            exc_secure = secure;
 306        }
 307        goto pend_fault;
 308    }
 309
 310    value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
 311                              attrs, &txres);
 312    if (txres != MEMTX_OK) {
 313        /* BusFault trying to read the data */
 314        qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
 315        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
 316        exc = ARMV7M_EXCP_BUS;
 317        exc_secure = false;
 318        goto pend_fault;
 319    }
 320
 321    *dest = value;
 322    return true;
 323
 324pend_fault:
 325    /*
 326     * By pending the exception at this point we are making
 327     * the IMPDEF choice "overridden exceptions pended" (see the
 328     * MergeExcInfo() pseudocode). The other choice would be to not
 329     * pend them now and then make a choice about which to throw away
 330     * later if we have two derived exceptions.
 331     */
 332    armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
 333    return false;
 334}
 335
 336void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
 337{
 338    /*
 339     * Preserve FP state (because LSPACT was set and we are about
 340     * to execute an FP instruction). This corresponds to the
 341     * PreserveFPState() pseudocode.
 342     * We may throw an exception if the stacking fails.
 343     */
 344    ARMCPU *cpu = env_archcpu(env);
 345    bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
 346    bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
 347    bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
 348    bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
 349    uint32_t fpcar = env->v7m.fpcar[is_secure];
 350    bool stacked_ok = true;
 351    bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
 352    bool take_exception;
 353
 354    /* Take the iothread lock as we are going to touch the NVIC */
 355    qemu_mutex_lock_iothread();
 356
 357    /* Check the background context had access to the FPU */
 358    if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
 359        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
 360        env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
 361        stacked_ok = false;
 362    } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
 363        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
 364        env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
 365        stacked_ok = false;
 366    }
 367
 368    if (!splimviol && stacked_ok) {
 369        /* We only stack if the stack limit wasn't violated */
 370        int i;
 371        ARMMMUIdx mmu_idx;
 372
 373        mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
 374        for (i = 0; i < (ts ? 32 : 16); i += 2) {
 375            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
 376            uint32_t faddr = fpcar + 4 * i;
 377            uint32_t slo = extract64(dn, 0, 32);
 378            uint32_t shi = extract64(dn, 32, 32);
 379
 380            if (i >= 16) {
 381                faddr += 8; /* skip the slot for the FPSCR */
 382            }
 383            stacked_ok = stacked_ok &&
 384                v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
 385                v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
 386        }
 387
 388        stacked_ok = stacked_ok &&
 389            v7m_stack_write(cpu, fpcar + 0x40,
 390                            vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
 391    }
 392
 393    /*
 394     * We definitely pended an exception, but it's possible that it
 395     * might not be able to be taken now. If its priority permits us
 396     * to take it now, then we must not update the LSPACT or FP regs,
 397     * but instead jump out to take the exception immediately.
 398     * If it's just pending and won't be taken until the current
 399     * handler exits, then we do update LSPACT and the FP regs.
 400     */
 401    take_exception = !stacked_ok &&
 402        armv7m_nvic_can_take_pending_exception(env->nvic);
 403
 404    qemu_mutex_unlock_iothread();
 405
 406    if (take_exception) {
 407        raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
 408    }
 409
 410    env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
 411
 412    if (ts) {
 413        /* Clear s0 to s31 and the FPSCR */
 414        int i;
 415
 416        for (i = 0; i < 32; i += 2) {
 417            *aa32_vfp_dreg(env, i / 2) = 0;
 418        }
 419        vfp_set_fpscr(env, 0);
 420    }
 421    /*
 422     * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
 423     * unchanged.
 424     */
 425}
 426
 427/*
 428 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
 429 * This may change the current stack pointer between Main and Process
 430 * stack pointers if it is done for the CONTROL register for the current
 431 * security state.
 432 */
 433static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
 434                                                 bool new_spsel,
 435                                                 bool secstate)
 436{
 437    bool old_is_psp = v7m_using_psp(env);
 438
 439    env->v7m.control[secstate] =
 440        deposit32(env->v7m.control[secstate],
 441                  R_V7M_CONTROL_SPSEL_SHIFT,
 442                  R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
 443
 444    if (secstate == env->v7m.secure) {
 445        bool new_is_psp = v7m_using_psp(env);
 446        uint32_t tmp;
 447
 448        if (old_is_psp != new_is_psp) {
 449            tmp = env->v7m.other_sp;
 450            env->v7m.other_sp = env->regs[13];
 451            env->regs[13] = tmp;
 452        }
 453    }
 454}
 455
 456/*
 457 * Write to v7M CONTROL.SPSEL bit. This may change the current
 458 * stack pointer between Main and Process stack pointers.
 459 */
 460static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
 461{
 462    write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
 463}
 464
 465void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
 466{
 467    /*
 468     * Write a new value to v7m.exception, thus transitioning into or out
 469     * of Handler mode; this may result in a change of active stack pointer.
 470     */
 471    bool new_is_psp, old_is_psp = v7m_using_psp(env);
 472    uint32_t tmp;
 473
 474    env->v7m.exception = new_exc;
 475
 476    new_is_psp = v7m_using_psp(env);
 477
 478    if (old_is_psp != new_is_psp) {
 479        tmp = env->v7m.other_sp;
 480        env->v7m.other_sp = env->regs[13];
 481        env->regs[13] = tmp;
 482    }
 483}
 484
 485/* Switch M profile security state between NS and S */
 486static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
 487{
 488    uint32_t new_ss_msp, new_ss_psp;
 489
 490    if (env->v7m.secure == new_secstate) {
 491        return;
 492    }
 493
 494    /*
 495     * All the banked state is accessed by looking at env->v7m.secure
 496     * except for the stack pointer; rearrange the SP appropriately.
 497     */
 498    new_ss_msp = env->v7m.other_ss_msp;
 499    new_ss_psp = env->v7m.other_ss_psp;
 500
 501    if (v7m_using_psp(env)) {
 502        env->v7m.other_ss_psp = env->regs[13];
 503        env->v7m.other_ss_msp = env->v7m.other_sp;
 504    } else {
 505        env->v7m.other_ss_msp = env->regs[13];
 506        env->v7m.other_ss_psp = env->v7m.other_sp;
 507    }
 508
 509    env->v7m.secure = new_secstate;
 510
 511    if (v7m_using_psp(env)) {
 512        env->regs[13] = new_ss_psp;
 513        env->v7m.other_sp = new_ss_msp;
 514    } else {
 515        env->regs[13] = new_ss_msp;
 516        env->v7m.other_sp = new_ss_psp;
 517    }
 518}
 519
 520void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
 521{
 522    /*
 523     * Handle v7M BXNS:
 524     *  - if the return value is a magic value, do exception return (like BX)
 525     *  - otherwise bit 0 of the return value is the target security state
 526     */
 527    uint32_t min_magic;
 528
 529    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 530        /* Covers FNC_RETURN and EXC_RETURN magic */
 531        min_magic = FNC_RETURN_MIN_MAGIC;
 532    } else {
 533        /* EXC_RETURN magic only */
 534        min_magic = EXC_RETURN_MIN_MAGIC;
 535    }
 536
 537    if (dest >= min_magic) {
 538        /*
 539         * This is an exception return magic value; put it where
 540         * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
 541         * Note that if we ever add gen_ss_advance() singlestep support to
 542         * M profile this should count as an "instruction execution complete"
 543         * event (compare gen_bx_excret_final_code()).
 544         */
 545        env->regs[15] = dest & ~1;
 546        env->thumb = dest & 1;
 547        HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
 548        /* notreached */
 549    }
 550
 551    /* translate.c should have made BXNS UNDEF unless we're secure */
 552    assert(env->v7m.secure);
 553
 554    if (!(dest & 1)) {
 555        env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
 556    }
 557    switch_v7m_security_state(env, dest & 1);
 558    env->thumb = 1;
 559    env->regs[15] = dest & ~1;
 560    arm_rebuild_hflags(env);
 561}
 562
 563void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
 564{
 565    /*
 566     * Handle v7M BLXNS:
 567     *  - bit 0 of the destination address is the target security state
 568     */
 569
 570    /* At this point regs[15] is the address just after the BLXNS */
 571    uint32_t nextinst = env->regs[15] | 1;
 572    uint32_t sp = env->regs[13] - 8;
 573    uint32_t saved_psr;
 574
 575    /* translate.c will have made BLXNS UNDEF unless we're secure */
 576    assert(env->v7m.secure);
 577
 578    if (dest & 1) {
 579        /*
 580         * Target is Secure, so this is just a normal BLX,
 581         * except that the low bit doesn't indicate Thumb/not.
 582         */
 583        env->regs[14] = nextinst;
 584        env->thumb = 1;
 585        env->regs[15] = dest & ~1;
 586        return;
 587    }
 588
 589    /* Target is non-secure: first push a stack frame */
 590    if (!QEMU_IS_ALIGNED(sp, 8)) {
 591        qemu_log_mask(LOG_GUEST_ERROR,
 592                      "BLXNS with misaligned SP is UNPREDICTABLE\n");
 593    }
 594
 595    if (sp < v7m_sp_limit(env)) {
 596        raise_exception(env, EXCP_STKOF, 0, 1);
 597    }
 598
 599    saved_psr = env->v7m.exception;
 600    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
 601        saved_psr |= XPSR_SFPA;
 602    }
 603
 604    /* Note that these stores can throw exceptions on MPU faults */
 605    cpu_stl_data_ra(env, sp, nextinst, GETPC());
 606    cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
 607
 608    env->regs[13] = sp;
 609    env->regs[14] = 0xfeffffff;
 610    if (arm_v7m_is_handler_mode(env)) {
 611        /*
 612         * Write a dummy value to IPSR, to avoid leaking the current secure
 613         * exception number to non-secure code. This is guaranteed not
 614         * to cause write_v7m_exception() to actually change stacks.
 615         */
 616        write_v7m_exception(env, 1);
 617    }
 618    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
 619    switch_v7m_security_state(env, 0);
 620    env->thumb = 1;
 621    env->regs[15] = dest;
 622    arm_rebuild_hflags(env);
 623}
 624
 625static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
 626                                bool spsel)
 627{
 628    /*
 629     * Return a pointer to the location where we currently store the
 630     * stack pointer for the requested security state and thread mode.
 631     * This pointer will become invalid if the CPU state is updated
 632     * such that the stack pointers are switched around (eg changing
 633     * the SPSEL control bit).
 634     * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
 635     * Unlike that pseudocode, we require the caller to pass us in the
 636     * SPSEL control bit value; this is because we also use this
 637     * function in handling of pushing of the callee-saves registers
 638     * part of the v8M stack frame (pseudocode PushCalleeStack()),
 639     * and in the tailchain codepath the SPSEL bit comes from the exception
 640     * return magic LR value from the previous exception. The pseudocode
 641     * opencodes the stack-selection in PushCalleeStack(), but we prefer
 642     * to make this utility function generic enough to do the job.
 643     */
 644    bool want_psp = threadmode && spsel;
 645
 646    if (secure == env->v7m.secure) {
 647        if (want_psp == v7m_using_psp(env)) {
 648            return &env->regs[13];
 649        } else {
 650            return &env->v7m.other_sp;
 651        }
 652    } else {
 653        if (want_psp) {
 654            return &env->v7m.other_ss_psp;
 655        } else {
 656            return &env->v7m.other_ss_msp;
 657        }
 658    }
 659}
 660
 661static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
 662                                uint32_t *pvec)
 663{
 664    CPUState *cs = CPU(cpu);
 665    CPUARMState *env = &cpu->env;
 666    MemTxResult result;
 667    uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
 668    uint32_t vector_entry;
 669    MemTxAttrs attrs = {};
 670    ARMMMUIdx mmu_idx;
 671    bool exc_secure;
 672
 673    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
 674
 675    /*
 676     * We don't do a get_phys_addr() here because the rules for vector
 677     * loads are special: they always use the default memory map, and
 678     * the default memory map permits reads from all addresses.
 679     * Since there's no easy way to pass through to pmsav8_mpu_lookup()
 680     * that we want this special case which would always say "yes",
 681     * we just do the SAU lookup here followed by a direct physical load.
 682     */
 683    attrs.secure = targets_secure;
 684    attrs.user = false;
 685
 686    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 687        V8M_SAttributes sattrs = {};
 688
 689        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
 690        if (sattrs.ns) {
 691            attrs.secure = false;
 692        } else if (!targets_secure) {
 693            /*
 694             * NS access to S memory: the underlying exception which we escalate
 695             * to HardFault is SecureFault, which always targets Secure.
 696             */
 697            exc_secure = true;
 698            goto load_fail;
 699        }
 700    }
 701
 702    vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
 703                                     attrs, &result);
 704    if (result != MEMTX_OK) {
 705        /*
 706         * Underlying exception is BusFault: its target security state
 707         * depends on BFHFNMINS.
 708         */
 709        exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
 710        goto load_fail;
 711    }
 712    *pvec = vector_entry;
 713    return true;
 714
 715load_fail:
 716    /*
 717     * All vector table fetch fails are reported as HardFault, with
 718     * HFSR.VECTTBL and .FORCED set. (FORCED is set because
 719     * technically the underlying exception is a SecureFault or BusFault
 720     * that is escalated to HardFault.) This is a terminal exception,
 721     * so we will either take the HardFault immediately or else enter
 722     * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
 723     * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
 724     * secure); otherwise it targets the same security state as the
 725     * underlying exception.
 726     * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
 727     */
 728    if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
 729        exc_secure = true;
 730    }
 731    env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
 732    if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
 733        env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
 734    }
 735    armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
 736    return false;
 737}
 738
 739static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
 740{
 741    /*
 742     * Return the integrity signature value for the callee-saves
 743     * stack frame section. @lr is the exception return payload/LR value
 744     * whose FType bit forms bit 0 of the signature if FP is present.
 745     */
 746    uint32_t sig = 0xfefa125a;
 747
 748    if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
 749        || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
 750        sig |= 1;
 751    }
 752    return sig;
 753}
 754
 755static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
 756                                  bool ignore_faults)
 757{
 758    /*
 759     * For v8M, push the callee-saves register part of the stack frame.
 760     * Compare the v8M pseudocode PushCalleeStack().
 761     * In the tailchaining case this may not be the current stack.
 762     */
 763    CPUARMState *env = &cpu->env;
 764    uint32_t *frame_sp_p;
 765    uint32_t frameptr;
 766    ARMMMUIdx mmu_idx;
 767    bool stacked_ok;
 768    uint32_t limit;
 769    bool want_psp;
 770    uint32_t sig;
 771    StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
 772
 773    if (dotailchain) {
 774        bool mode = lr & R_V7M_EXCRET_MODE_MASK;
 775        bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
 776            !mode;
 777
 778        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
 779        frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
 780                                    lr & R_V7M_EXCRET_SPSEL_MASK);
 781        want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
 782        if (want_psp) {
 783            limit = env->v7m.psplim[M_REG_S];
 784        } else {
 785            limit = env->v7m.msplim[M_REG_S];
 786        }
 787    } else {
 788        mmu_idx = arm_mmu_idx(env);
 789        frame_sp_p = &env->regs[13];
 790        limit = v7m_sp_limit(env);
 791    }
 792
 793    frameptr = *frame_sp_p - 0x28;
 794    if (frameptr < limit) {
 795        /*
 796         * Stack limit failure: set SP to the limit value, and generate
 797         * STKOF UsageFault. Stack pushes below the limit must not be
 798         * performed. It is IMPDEF whether pushes above the limit are
 799         * performed; we choose not to.
 800         */
 801        qemu_log_mask(CPU_LOG_INT,
 802                      "...STKOF during callee-saves register stacking\n");
 803        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
 804        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
 805                                env->v7m.secure);
 806        *frame_sp_p = limit;
 807        return true;
 808    }
 809
 810    /*
 811     * Write as much of the stack frame as we can. A write failure may
 812     * cause us to pend a derived exception.
 813     */
 814    sig = v7m_integrity_sig(env, lr);
 815    stacked_ok =
 816        v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
 817        v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
 818        v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
 819        v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
 820        v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
 821        v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
 822        v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
 823        v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
 824        v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
 825
 826    /* Update SP regardless of whether any of the stack accesses failed. */
 827    *frame_sp_p = frameptr;
 828
 829    return !stacked_ok;
 830}
 831
 832static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
 833                                bool ignore_stackfaults)
 834{
 835    /*
 836     * Do the "take the exception" parts of exception entry,
 837     * but not the pushing of state to the stack. This is
 838     * similar to the pseudocode ExceptionTaken() function.
 839     */
 840    CPUARMState *env = &cpu->env;
 841    uint32_t addr;
 842    bool targets_secure;
 843    int exc;
 844    bool push_failed = false;
 845
 846    armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
 847    qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
 848                  targets_secure ? "secure" : "nonsecure", exc);
 849
 850    if (dotailchain) {
 851        /* Sanitize LR FType and PREFIX bits */
 852        if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
 853            lr |= R_V7M_EXCRET_FTYPE_MASK;
 854        }
 855        lr = deposit32(lr, 24, 8, 0xff);
 856    }
 857
 858    if (arm_feature(env, ARM_FEATURE_V8)) {
 859        if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
 860            (lr & R_V7M_EXCRET_S_MASK)) {
 861            /*
 862             * The background code (the owner of the registers in the
 863             * exception frame) is Secure. This means it may either already
 864             * have or now needs to push callee-saves registers.
 865             */
 866            if (targets_secure) {
 867                if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
 868                    /*
 869                     * We took an exception from Secure to NonSecure
 870                     * (which means the callee-saved registers got stacked)
 871                     * and are now tailchaining to a Secure exception.
 872                     * Clear DCRS so eventual return from this Secure
 873                     * exception unstacks the callee-saved registers.
 874                     */
 875                    lr &= ~R_V7M_EXCRET_DCRS_MASK;
 876                }
 877            } else {
 878                /*
 879                 * We're going to a non-secure exception; push the
 880                 * callee-saves registers to the stack now, if they're
 881                 * not already saved.
 882                 */
 883                if (lr & R_V7M_EXCRET_DCRS_MASK &&
 884                    !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
 885                    push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
 886                                                        ignore_stackfaults);
 887                }
 888                lr |= R_V7M_EXCRET_DCRS_MASK;
 889            }
 890        }
 891
 892        lr &= ~R_V7M_EXCRET_ES_MASK;
 893        if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 894            lr |= R_V7M_EXCRET_ES_MASK;
 895        }
 896        lr &= ~R_V7M_EXCRET_SPSEL_MASK;
 897        if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
 898            lr |= R_V7M_EXCRET_SPSEL_MASK;
 899        }
 900
 901        /*
 902         * Clear registers if necessary to prevent non-secure exception
 903         * code being able to see register values from secure code.
 904         * Where register values become architecturally UNKNOWN we leave
 905         * them with their previous values. v8.1M is tighter than v8.0M
 906         * here and always zeroes the caller-saved registers regardless
 907         * of the security state the exception is targeting.
 908         */
 909        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 910            if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
 911                /*
 912                 * Always clear the caller-saved registers (they have been
 913                 * pushed to the stack earlier in v7m_push_stack()).
 914                 * Clear callee-saved registers if the background code is
 915                 * Secure (in which case these regs were saved in
 916                 * v7m_push_callee_stack()).
 917                 */
 918                int i;
 919                /*
 920                 * r4..r11 are callee-saves, zero only if background
 921                 * state was Secure (EXCRET.S == 1) and exception
 922                 * targets Non-secure state
 923                 */
 924                bool zero_callee_saves = !targets_secure &&
 925                    (lr & R_V7M_EXCRET_S_MASK);
 926
 927                for (i = 0; i < 13; i++) {
 928                    if (i < 4 || i > 11 || zero_callee_saves) {
 929                        env->regs[i] = 0;
 930                    }
 931                }
 932                /* Clear EAPSR */
 933                xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
 934            }
 935        }
 936    }
 937
 938    if (push_failed && !ignore_stackfaults) {
 939        /*
 940         * Derived exception on callee-saves register stacking:
 941         * we might now want to take a different exception which
 942         * targets a different security state, so try again from the top.
 943         */
 944        qemu_log_mask(CPU_LOG_INT,
 945                      "...derived exception on callee-saves register stacking");
 946        v7m_exception_taken(cpu, lr, true, true);
 947        return;
 948    }
 949
 950    if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
 951        /* Vector load failed: derived exception */
 952        qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
 953        v7m_exception_taken(cpu, lr, true, true);
 954        return;
 955    }
 956
 957    /*
 958     * Now we've done everything that might cause a derived exception
 959     * we can go ahead and activate whichever exception we're going to
 960     * take (which might now be the derived exception).
 961     */
 962    armv7m_nvic_acknowledge_irq(env->nvic);
 963
 964    /* Switch to target security state -- must do this before writing SPSEL */
 965    switch_v7m_security_state(env, targets_secure);
 966    write_v7m_control_spsel(env, 0);
 967    arm_clear_exclusive(env);
 968    /* Clear SFPA and FPCA (has no effect if no FPU) */
 969    env->v7m.control[M_REG_S] &=
 970        ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
 971    /* Clear IT bits */
 972    env->condexec_bits = 0;
 973    env->regs[14] = lr;
 974    env->regs[15] = addr & 0xfffffffe;
 975    env->thumb = addr & 1;
 976    arm_rebuild_hflags(env);
 977}
 978
 979static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
 980                             bool apply_splim)
 981{
 982    /*
 983     * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
 984     * that we will need later in order to do lazy FP reg stacking.
 985     */
 986    bool is_secure = env->v7m.secure;
 987    void *nvic = env->nvic;
 988    /*
 989     * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
 990     * are banked and we want to update the bit in the bank for the
 991     * current security state; and in one case we want to specifically
 992     * update the NS banked version of a bit even if we are secure.
 993     */
 994    uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
 995    uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
 996    uint32_t *fpccr = &env->v7m.fpccr[is_secure];
 997    bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
 998
 999    env->v7m.fpcar[is_secure] = frameptr & ~0x7;
1000
1001    if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1002        bool splimviol;
1003        uint32_t splim = v7m_sp_limit(env);
1004        bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1005            (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1006
1007        splimviol = !ign && frameptr < splim;
1008        *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1009    }
1010
1011    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1012
1013    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1014
1015    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1016
1017    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1018                        !arm_v7m_is_handler_mode(env));
1019
1020    hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1021    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1022
1023    bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1024    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1025
1026    mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1027    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1028
1029    ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1030    *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1031
1032    monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1033    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1034
1035    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1036        s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1037        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1038
1039        sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1040        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1041    }
1042}
1043
1044void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1045{
1046    /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1047    bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1048    bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1049    uintptr_t ra = GETPC();
1050
1051    assert(env->v7m.secure);
1052
1053    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1054        return;
1055    }
1056
1057    /* Check access to the coprocessor is permitted */
1058    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1059        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1060    }
1061
1062    if (lspact) {
1063        /* LSPACT should not be active when there is active FP state */
1064        raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1065    }
1066
1067    if (fptr & 7) {
1068        raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1069    }
1070
1071    /*
1072     * Note that we do not use v7m_stack_write() here, because the
1073     * accesses should not set the FSR bits for stacking errors if they
1074     * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1075     * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1076     * and longjmp out.
1077     */
1078    if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1079        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1080        int i;
1081
1082        for (i = 0; i < (ts ? 32 : 16); i += 2) {
1083            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1084            uint32_t faddr = fptr + 4 * i;
1085            uint32_t slo = extract64(dn, 0, 32);
1086            uint32_t shi = extract64(dn, 32, 32);
1087
1088            if (i >= 16) {
1089                faddr += 8; /* skip the slot for the FPSCR */
1090            }
1091            cpu_stl_data_ra(env, faddr, slo, ra);
1092            cpu_stl_data_ra(env, faddr + 4, shi, ra);
1093        }
1094        cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1095
1096        /*
1097         * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1098         * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1099         */
1100        if (ts) {
1101            for (i = 0; i < 32; i += 2) {
1102                *aa32_vfp_dreg(env, i / 2) = 0;
1103            }
1104            vfp_set_fpscr(env, 0);
1105        }
1106    } else {
1107        v7m_update_fpccr(env, fptr, false);
1108    }
1109
1110    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1111}
1112
1113void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1114{
1115    uintptr_t ra = GETPC();
1116
1117    /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1118    assert(env->v7m.secure);
1119
1120    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1121        return;
1122    }
1123
1124    /* Check access to the coprocessor is permitted */
1125    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1126        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1127    }
1128
1129    if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1130        /* State in FP is still valid */
1131        env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1132    } else {
1133        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1134        int i;
1135        uint32_t fpscr;
1136
1137        if (fptr & 7) {
1138            raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1139        }
1140
1141        for (i = 0; i < (ts ? 32 : 16); i += 2) {
1142            uint32_t slo, shi;
1143            uint64_t dn;
1144            uint32_t faddr = fptr + 4 * i;
1145
1146            if (i >= 16) {
1147                faddr += 8; /* skip the slot for the FPSCR */
1148            }
1149
1150            slo = cpu_ldl_data_ra(env, faddr, ra);
1151            shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1152
1153            dn = (uint64_t) shi << 32 | slo;
1154            *aa32_vfp_dreg(env, i / 2) = dn;
1155        }
1156        fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1157        vfp_set_fpscr(env, fpscr);
1158    }
1159
1160    env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1161}
1162
1163static bool v7m_push_stack(ARMCPU *cpu)
1164{
1165    /*
1166     * Do the "set up stack frame" part of exception entry,
1167     * similar to pseudocode PushStack().
1168     * Return true if we generate a derived exception (and so
1169     * should ignore further stack faults trying to process
1170     * that derived exception.)
1171     */
1172    bool stacked_ok = true, limitviol = false;
1173    CPUARMState *env = &cpu->env;
1174    uint32_t xpsr = xpsr_read(env);
1175    uint32_t frameptr = env->regs[13];
1176    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1177    uint32_t framesize;
1178    bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1179
1180    if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1181        (env->v7m.secure || nsacr_cp10)) {
1182        if (env->v7m.secure &&
1183            env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1184            framesize = 0xa8;
1185        } else {
1186            framesize = 0x68;
1187        }
1188    } else {
1189        framesize = 0x20;
1190    }
1191
1192    /* Align stack pointer if the guest wants that */
1193    if ((frameptr & 4) &&
1194        (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1195        frameptr -= 4;
1196        xpsr |= XPSR_SPREALIGN;
1197    }
1198
1199    xpsr &= ~XPSR_SFPA;
1200    if (env->v7m.secure &&
1201        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1202        xpsr |= XPSR_SFPA;
1203    }
1204
1205    frameptr -= framesize;
1206
1207    if (arm_feature(env, ARM_FEATURE_V8)) {
1208        uint32_t limit = v7m_sp_limit(env);
1209
1210        if (frameptr < limit) {
1211            /*
1212             * Stack limit failure: set SP to the limit value, and generate
1213             * STKOF UsageFault. Stack pushes below the limit must not be
1214             * performed. It is IMPDEF whether pushes above the limit are
1215             * performed; we choose not to.
1216             */
1217            qemu_log_mask(CPU_LOG_INT,
1218                          "...STKOF during stacking\n");
1219            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1220            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1221                                    env->v7m.secure);
1222            env->regs[13] = limit;
1223            /*
1224             * We won't try to perform any further memory accesses but
1225             * we must continue through the following code to check for
1226             * permission faults during FPU state preservation, and we
1227             * must update FPCCR if lazy stacking is enabled.
1228             */
1229            limitviol = true;
1230            stacked_ok = false;
1231        }
1232    }
1233
1234    /*
1235     * Write as much of the stack frame as we can. If we fail a stack
1236     * write this will result in a derived exception being pended
1237     * (which may be taken in preference to the one we started with
1238     * if it has higher priority).
1239     */
1240    stacked_ok = stacked_ok &&
1241        v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1242        v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1243                        mmu_idx, STACK_NORMAL) &&
1244        v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1245                        mmu_idx, STACK_NORMAL) &&
1246        v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1247                        mmu_idx, STACK_NORMAL) &&
1248        v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1249                        mmu_idx, STACK_NORMAL) &&
1250        v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1251                        mmu_idx, STACK_NORMAL) &&
1252        v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1253                        mmu_idx, STACK_NORMAL) &&
1254        v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1255
1256    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1257        /* FPU is active, try to save its registers */
1258        bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1259        bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1260
1261        if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1262            qemu_log_mask(CPU_LOG_INT,
1263                          "...SecureFault because LSPACT and FPCA both set\n");
1264            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1265            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1266        } else if (!env->v7m.secure && !nsacr_cp10) {
1267            qemu_log_mask(CPU_LOG_INT,
1268                          "...Secure UsageFault with CFSR.NOCP because "
1269                          "NSACR.CP10 prevents stacking FP regs\n");
1270            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1271            env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1272        } else {
1273            if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1274                /* Lazy stacking disabled, save registers now */
1275                int i;
1276                bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1277                                                 arm_current_el(env) != 0);
1278
1279                if (stacked_ok && !cpacr_pass) {
1280                    /*
1281                     * Take UsageFault if CPACR forbids access. The pseudocode
1282                     * here does a full CheckCPEnabled() but we know the NSACR
1283                     * check can never fail as we have already handled that.
1284                     */
1285                    qemu_log_mask(CPU_LOG_INT,
1286                                  "...UsageFault with CFSR.NOCP because "
1287                                  "CPACR.CP10 prevents stacking FP regs\n");
1288                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1289                                            env->v7m.secure);
1290                    env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1291                    stacked_ok = false;
1292                }
1293
1294                for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1295                    uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1296                    uint32_t faddr = frameptr + 0x20 + 4 * i;
1297                    uint32_t slo = extract64(dn, 0, 32);
1298                    uint32_t shi = extract64(dn, 32, 32);
1299
1300                    if (i >= 16) {
1301                        faddr += 8; /* skip the slot for the FPSCR */
1302                    }
1303                    stacked_ok = stacked_ok &&
1304                        v7m_stack_write(cpu, faddr, slo,
1305                                        mmu_idx, STACK_NORMAL) &&
1306                        v7m_stack_write(cpu, faddr + 4, shi,
1307                                        mmu_idx, STACK_NORMAL);
1308                }
1309                stacked_ok = stacked_ok &&
1310                    v7m_stack_write(cpu, frameptr + 0x60,
1311                                    vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1312                if (cpacr_pass) {
1313                    for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1314                        *aa32_vfp_dreg(env, i / 2) = 0;
1315                    }
1316                    vfp_set_fpscr(env, 0);
1317                }
1318            } else {
1319                /* Lazy stacking enabled, save necessary info to stack later */
1320                v7m_update_fpccr(env, frameptr + 0x20, true);
1321            }
1322        }
1323    }
1324
1325    /*
1326     * If we broke a stack limit then SP was already updated earlier;
1327     * otherwise we update SP regardless of whether any of the stack
1328     * accesses failed or we took some other kind of fault.
1329     */
1330    if (!limitviol) {
1331        env->regs[13] = frameptr;
1332    }
1333
1334    return !stacked_ok;
1335}
1336
1337static void do_v7m_exception_exit(ARMCPU *cpu)
1338{
1339    CPUARMState *env = &cpu->env;
1340    uint32_t excret;
1341    uint32_t xpsr, xpsr_mask;
1342    bool ufault = false;
1343    bool sfault = false;
1344    bool return_to_sp_process;
1345    bool return_to_handler;
1346    bool rettobase = false;
1347    bool exc_secure = false;
1348    bool return_to_secure;
1349    bool ftype;
1350    bool restore_s16_s31 = false;
1351
1352    /*
1353     * If we're not in Handler mode then jumps to magic exception-exit
1354     * addresses don't have magic behaviour. However for the v8M
1355     * security extensions the magic secure-function-return has to
1356     * work in thread mode too, so to avoid doing an extra check in
1357     * the generated code we allow exception-exit magic to also cause the
1358     * internal exception and bring us here in thread mode. Correct code
1359     * will never try to do this (the following insn fetch will always
1360     * fault) so we the overhead of having taken an unnecessary exception
1361     * doesn't matter.
1362     */
1363    if (!arm_v7m_is_handler_mode(env)) {
1364        return;
1365    }
1366
1367    /*
1368     * In the spec pseudocode ExceptionReturn() is called directly
1369     * from BXWritePC() and gets the full target PC value including
1370     * bit zero. In QEMU's implementation we treat it as a normal
1371     * jump-to-register (which is then caught later on), and so split
1372     * the target value up between env->regs[15] and env->thumb in
1373     * gen_bx(). Reconstitute it.
1374     */
1375    excret = env->regs[15];
1376    if (env->thumb) {
1377        excret |= 1;
1378    }
1379
1380    qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1381                  " previous exception %d\n",
1382                  excret, env->v7m.exception);
1383
1384    if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1385        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1386                      "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1387                      excret);
1388    }
1389
1390    ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1391
1392    if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1393        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1394                      "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1395                      "if FPU not present\n",
1396                      excret);
1397        ftype = true;
1398    }
1399
1400    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1401        /*
1402         * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1403         * we pick which FAULTMASK to clear.
1404         */
1405        if (!env->v7m.secure &&
1406            ((excret & R_V7M_EXCRET_ES_MASK) ||
1407             !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1408            sfault = 1;
1409            /* For all other purposes, treat ES as 0 (R_HXSR) */
1410            excret &= ~R_V7M_EXCRET_ES_MASK;
1411        }
1412        exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1413    }
1414
1415    if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1416        /*
1417         * Auto-clear FAULTMASK on return from other than NMI.
1418         * If the security extension is implemented then this only
1419         * happens if the raw execution priority is >= 0; the
1420         * value of the ES bit in the exception return value indicates
1421         * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1422         */
1423        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1424            if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1425                env->v7m.faultmask[exc_secure] = 0;
1426            }
1427        } else {
1428            env->v7m.faultmask[M_REG_NS] = 0;
1429        }
1430    }
1431
1432    switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1433                                     exc_secure)) {
1434    case -1:
1435        /* attempt to exit an exception that isn't active */
1436        ufault = true;
1437        break;
1438    case 0:
1439        /* still an irq active now */
1440        break;
1441    case 1:
1442        /*
1443         * We returned to base exception level, no nesting.
1444         * (In the pseudocode this is written using "NestedActivation != 1"
1445         * where we have 'rettobase == false'.)
1446         */
1447        rettobase = true;
1448        break;
1449    default:
1450        g_assert_not_reached();
1451    }
1452
1453    return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1454    return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1455    return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1456        (excret & R_V7M_EXCRET_S_MASK);
1457
1458    if (arm_feature(env, ARM_FEATURE_V8)) {
1459        if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1460            /*
1461             * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1462             * we choose to take the UsageFault.
1463             */
1464            if ((excret & R_V7M_EXCRET_S_MASK) ||
1465                (excret & R_V7M_EXCRET_ES_MASK) ||
1466                !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1467                ufault = true;
1468            }
1469        }
1470        if (excret & R_V7M_EXCRET_RES0_MASK) {
1471            ufault = true;
1472        }
1473    } else {
1474        /* For v7M we only recognize certain combinations of the low bits */
1475        switch (excret & 0xf) {
1476        case 1: /* Return to Handler */
1477            break;
1478        case 13: /* Return to Thread using Process stack */
1479        case 9: /* Return to Thread using Main stack */
1480            /*
1481             * We only need to check NONBASETHRDENA for v7M, because in
1482             * v8M this bit does not exist (it is RES1).
1483             */
1484            if (!rettobase &&
1485                !(env->v7m.ccr[env->v7m.secure] &
1486                  R_V7M_CCR_NONBASETHRDENA_MASK)) {
1487                ufault = true;
1488            }
1489            break;
1490        default:
1491            ufault = true;
1492        }
1493    }
1494
1495    /*
1496     * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1497     * Handler mode (and will be until we write the new XPSR.Interrupt
1498     * field) this does not switch around the current stack pointer.
1499     * We must do this before we do any kind of tailchaining, including
1500     * for the derived exceptions on integrity check failures, or we will
1501     * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1502     */
1503    write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1504
1505    /*
1506     * Clear scratch FP values left in caller saved registers; this
1507     * must happen before any kind of tail chaining.
1508     */
1509    if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1510        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1511        if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1512            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1513            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1514            qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1515                          "stackframe: error during lazy state deactivation\n");
1516            v7m_exception_taken(cpu, excret, true, false);
1517            return;
1518        } else {
1519            if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1520                /* v8.1M adds this NOCP check */
1521                bool nsacr_pass = exc_secure ||
1522                    extract32(env->v7m.nsacr, 10, 1);
1523                bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1524                if (!nsacr_pass) {
1525                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1526                    env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1527                    qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1528                        "stackframe: NSACR prevents clearing FPU registers\n");
1529                    v7m_exception_taken(cpu, excret, true, false);
1530                } else if (!cpacr_pass) {
1531                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1532                                            exc_secure);
1533                    env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1534                    qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1535                        "stackframe: CPACR prevents clearing FPU registers\n");
1536                    v7m_exception_taken(cpu, excret, true, false);
1537                }
1538            }
1539            /* Clear s0..s15 and FPSCR; TODO also VPR when MVE is implemented */
1540            int i;
1541
1542            for (i = 0; i < 16; i += 2) {
1543                *aa32_vfp_dreg(env, i / 2) = 0;
1544            }
1545            vfp_set_fpscr(env, 0);
1546        }
1547    }
1548
1549    if (sfault) {
1550        env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1551        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1552        qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1553                      "stackframe: failed EXC_RETURN.ES validity check\n");
1554        v7m_exception_taken(cpu, excret, true, false);
1555        return;
1556    }
1557
1558    if (ufault) {
1559        /*
1560         * Bad exception return: instead of popping the exception
1561         * stack, directly take a usage fault on the current stack.
1562         */
1563        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1564        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1565        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1566                      "stackframe: failed exception return integrity check\n");
1567        v7m_exception_taken(cpu, excret, true, false);
1568        return;
1569    }
1570
1571    /*
1572     * Tailchaining: if there is currently a pending exception that
1573     * is high enough priority to preempt execution at the level we're
1574     * about to return to, then just directly take that exception now,
1575     * avoiding an unstack-and-then-stack. Note that now we have
1576     * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1577     * our current execution priority is already the execution priority we are
1578     * returning to -- none of the state we would unstack or set based on
1579     * the EXCRET value affects it.
1580     */
1581    if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1582        qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1583        v7m_exception_taken(cpu, excret, true, false);
1584        return;
1585    }
1586
1587    switch_v7m_security_state(env, return_to_secure);
1588
1589    {
1590        /*
1591         * The stack pointer we should be reading the exception frame from
1592         * depends on bits in the magic exception return type value (and
1593         * for v8M isn't necessarily the stack pointer we will eventually
1594         * end up resuming execution with). Get a pointer to the location
1595         * in the CPU state struct where the SP we need is currently being
1596         * stored; we will use and modify it in place.
1597         * We use this limited C variable scope so we don't accidentally
1598         * use 'frame_sp_p' after we do something that makes it invalid.
1599         */
1600        uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1601                                              return_to_secure,
1602                                              !return_to_handler,
1603                                              return_to_sp_process);
1604        uint32_t frameptr = *frame_sp_p;
1605        bool pop_ok = true;
1606        ARMMMUIdx mmu_idx;
1607        bool return_to_priv = return_to_handler ||
1608            !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1609
1610        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1611                                                        return_to_priv);
1612
1613        if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1614            arm_feature(env, ARM_FEATURE_V8)) {
1615            qemu_log_mask(LOG_GUEST_ERROR,
1616                          "M profile exception return with non-8-aligned SP "
1617                          "for destination state is UNPREDICTABLE\n");
1618        }
1619
1620        /* Do we need to pop callee-saved registers? */
1621        if (return_to_secure &&
1622            ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1623             (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1624            uint32_t actual_sig;
1625
1626            pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1627
1628            if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1629                /* Take a SecureFault on the current stack */
1630                env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1631                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1632                qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1633                              "stackframe: failed exception return integrity "
1634                              "signature check\n");
1635                v7m_exception_taken(cpu, excret, true, false);
1636                return;
1637            }
1638
1639            pop_ok = pop_ok &&
1640                v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1641                v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1642                v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1643                v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1644                v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1645                v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1646                v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1647                v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1648
1649            frameptr += 0x28;
1650        }
1651
1652        /* Pop registers */
1653        pop_ok = pop_ok &&
1654            v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1655            v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1656            v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1657            v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1658            v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1659            v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1660            v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1661            v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1662
1663        if (!pop_ok) {
1664            /*
1665             * v7m_stack_read() pended a fault, so take it (as a tail
1666             * chained exception on the same stack frame)
1667             */
1668            qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1669            v7m_exception_taken(cpu, excret, true, false);
1670            return;
1671        }
1672
1673        /*
1674         * Returning from an exception with a PC with bit 0 set is defined
1675         * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1676         * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1677         * the lsbit, and there are several RTOSes out there which incorrectly
1678         * assume the r15 in the stack frame should be a Thumb-style "lsbit
1679         * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1680         * complain about the badly behaved guest.
1681         */
1682        if (env->regs[15] & 1) {
1683            env->regs[15] &= ~1U;
1684            if (!arm_feature(env, ARM_FEATURE_V8)) {
1685                qemu_log_mask(LOG_GUEST_ERROR,
1686                              "M profile return from interrupt with misaligned "
1687                              "PC is UNPREDICTABLE on v7M\n");
1688            }
1689        }
1690
1691        if (arm_feature(env, ARM_FEATURE_V8)) {
1692            /*
1693             * For v8M we have to check whether the xPSR exception field
1694             * matches the EXCRET value for return to handler/thread
1695             * before we commit to changing the SP and xPSR.
1696             */
1697            bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1698            if (return_to_handler != will_be_handler) {
1699                /*
1700                 * Take an INVPC UsageFault on the current stack.
1701                 * By this point we will have switched to the security state
1702                 * for the background state, so this UsageFault will target
1703                 * that state.
1704                 */
1705                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1706                                        env->v7m.secure);
1707                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1708                qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1709                              "stackframe: failed exception return integrity "
1710                              "check\n");
1711                v7m_exception_taken(cpu, excret, true, false);
1712                return;
1713            }
1714        }
1715
1716        if (!ftype) {
1717            /* FP present and we need to handle it */
1718            if (!return_to_secure &&
1719                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1720                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1721                env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1722                qemu_log_mask(CPU_LOG_INT,
1723                              "...taking SecureFault on existing stackframe: "
1724                              "Secure LSPACT set but exception return is "
1725                              "not to secure state\n");
1726                v7m_exception_taken(cpu, excret, true, false);
1727                return;
1728            }
1729
1730            restore_s16_s31 = return_to_secure &&
1731                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1732
1733            if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1734                /* State in FPU is still valid, just clear LSPACT */
1735                env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1736            } else {
1737                int i;
1738                uint32_t fpscr;
1739                bool cpacr_pass, nsacr_pass;
1740
1741                cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1742                                            return_to_priv);
1743                nsacr_pass = return_to_secure ||
1744                    extract32(env->v7m.nsacr, 10, 1);
1745
1746                if (!cpacr_pass) {
1747                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1748                                            return_to_secure);
1749                    env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1750                    qemu_log_mask(CPU_LOG_INT,
1751                                  "...taking UsageFault on existing "
1752                                  "stackframe: CPACR.CP10 prevents unstacking "
1753                                  "FP regs\n");
1754                    v7m_exception_taken(cpu, excret, true, false);
1755                    return;
1756                } else if (!nsacr_pass) {
1757                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1758                    env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1759                    qemu_log_mask(CPU_LOG_INT,
1760                                  "...taking Secure UsageFault on existing "
1761                                  "stackframe: NSACR.CP10 prevents unstacking "
1762                                  "FP regs\n");
1763                    v7m_exception_taken(cpu, excret, true, false);
1764                    return;
1765                }
1766
1767                for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1768                    uint32_t slo, shi;
1769                    uint64_t dn;
1770                    uint32_t faddr = frameptr + 0x20 + 4 * i;
1771
1772                    if (i >= 16) {
1773                        faddr += 8; /* Skip the slot for the FPSCR */
1774                    }
1775
1776                    pop_ok = pop_ok &&
1777                        v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1778                        v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1779
1780                    if (!pop_ok) {
1781                        break;
1782                    }
1783
1784                    dn = (uint64_t)shi << 32 | slo;
1785                    *aa32_vfp_dreg(env, i / 2) = dn;
1786                }
1787                pop_ok = pop_ok &&
1788                    v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1789                if (pop_ok) {
1790                    vfp_set_fpscr(env, fpscr);
1791                }
1792                if (!pop_ok) {
1793                    /*
1794                     * These regs are 0 if security extension present;
1795                     * otherwise merely UNKNOWN. We zero always.
1796                     */
1797                    for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1798                        *aa32_vfp_dreg(env, i / 2) = 0;
1799                    }
1800                    vfp_set_fpscr(env, 0);
1801                }
1802            }
1803        }
1804        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1805                                               V7M_CONTROL, FPCA, !ftype);
1806
1807        /* Commit to consuming the stack frame */
1808        frameptr += 0x20;
1809        if (!ftype) {
1810            frameptr += 0x48;
1811            if (restore_s16_s31) {
1812                frameptr += 0x40;
1813            }
1814        }
1815        /*
1816         * Undo stack alignment (the SPREALIGN bit indicates that the original
1817         * pre-exception SP was not 8-aligned and we added a padding word to
1818         * align it, so we undo this by ORing in the bit that increases it
1819         * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1820         * would work too but a logical OR is how the pseudocode specifies it.)
1821         */
1822        if (xpsr & XPSR_SPREALIGN) {
1823            frameptr |= 4;
1824        }
1825        *frame_sp_p = frameptr;
1826    }
1827
1828    xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1829    if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1830        xpsr_mask &= ~XPSR_GE;
1831    }
1832    /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1833    xpsr_write(env, xpsr, xpsr_mask);
1834
1835    if (env->v7m.secure) {
1836        bool sfpa = xpsr & XPSR_SFPA;
1837
1838        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1839                                               V7M_CONTROL, SFPA, sfpa);
1840    }
1841
1842    /*
1843     * The restored xPSR exception field will be zero if we're
1844     * resuming in Thread mode. If that doesn't match what the
1845     * exception return excret specified then this is a UsageFault.
1846     * v7M requires we make this check here; v8M did it earlier.
1847     */
1848    if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1849        /*
1850         * Take an INVPC UsageFault by pushing the stack again;
1851         * we know we're v7M so this is never a Secure UsageFault.
1852         */
1853        bool ignore_stackfaults;
1854
1855        assert(!arm_feature(env, ARM_FEATURE_V8));
1856        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1857        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1858        ignore_stackfaults = v7m_push_stack(cpu);
1859        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1860                      "failed exception return integrity check\n");
1861        v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1862        return;
1863    }
1864
1865    /* Otherwise, we have a successful exception exit. */
1866    arm_clear_exclusive(env);
1867    arm_rebuild_hflags(env);
1868    qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1869}
1870
1871static bool do_v7m_function_return(ARMCPU *cpu)
1872{
1873    /*
1874     * v8M security extensions magic function return.
1875     * We may either:
1876     *  (1) throw an exception (longjump)
1877     *  (2) return true if we successfully handled the function return
1878     *  (3) return false if we failed a consistency check and have
1879     *      pended a UsageFault that needs to be taken now
1880     *
1881     * At this point the magic return value is split between env->regs[15]
1882     * and env->thumb. We don't bother to reconstitute it because we don't
1883     * need it (all values are handled the same way).
1884     */
1885    CPUARMState *env = &cpu->env;
1886    uint32_t newpc, newpsr, newpsr_exc;
1887
1888    qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1889
1890    {
1891        bool threadmode, spsel;
1892        TCGMemOpIdx oi;
1893        ARMMMUIdx mmu_idx;
1894        uint32_t *frame_sp_p;
1895        uint32_t frameptr;
1896
1897        /* Pull the return address and IPSR from the Secure stack */
1898        threadmode = !arm_v7m_is_handler_mode(env);
1899        spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1900
1901        frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1902        frameptr = *frame_sp_p;
1903
1904        /*
1905         * These loads may throw an exception (for MPU faults). We want to
1906         * do them as secure, so work out what MMU index that is.
1907         */
1908        mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1909        oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1910        newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1911        newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1912
1913        /* Consistency checks on new IPSR */
1914        newpsr_exc = newpsr & XPSR_EXCP;
1915        if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1916              (env->v7m.exception == 1 && newpsr_exc != 0))) {
1917            /* Pend the fault and tell our caller to take it */
1918            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1919            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1920                                    env->v7m.secure);
1921            qemu_log_mask(CPU_LOG_INT,
1922                          "...taking INVPC UsageFault: "
1923                          "IPSR consistency check failed\n");
1924            return false;
1925        }
1926
1927        *frame_sp_p = frameptr + 8;
1928    }
1929
1930    /* This invalidates frame_sp_p */
1931    switch_v7m_security_state(env, true);
1932    env->v7m.exception = newpsr_exc;
1933    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1934    if (newpsr & XPSR_SFPA) {
1935        env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1936    }
1937    xpsr_write(env, 0, XPSR_IT);
1938    env->thumb = newpc & 1;
1939    env->regs[15] = newpc & ~1;
1940    arm_rebuild_hflags(env);
1941
1942    qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1943    return true;
1944}
1945
1946static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1947                               uint32_t addr, uint16_t *insn)
1948{
1949    /*
1950     * Load a 16-bit portion of a v7M instruction, returning true on success,
1951     * or false on failure (in which case we will have pended the appropriate
1952     * exception).
1953     * We need to do the instruction fetch's MPU and SAU checks
1954     * like this because there is no MMU index that would allow
1955     * doing the load with a single function call. Instead we must
1956     * first check that the security attributes permit the load
1957     * and that they don't mismatch on the two halves of the instruction,
1958     * and then we do the load as a secure load (ie using the security
1959     * attributes of the address, not the CPU, as architecturally required).
1960     */
1961    CPUState *cs = CPU(cpu);
1962    CPUARMState *env = &cpu->env;
1963    V8M_SAttributes sattrs = {};
1964    MemTxAttrs attrs = {};
1965    ARMMMUFaultInfo fi = {};
1966    ARMCacheAttrs cacheattrs = {};
1967    MemTxResult txres;
1968    target_ulong page_size;
1969    hwaddr physaddr;
1970    int prot;
1971
1972    v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1973    if (!sattrs.nsc || sattrs.ns) {
1974        /*
1975         * This must be the second half of the insn, and it straddles a
1976         * region boundary with the second half not being S&NSC.
1977         */
1978        env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1979        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1980        qemu_log_mask(CPU_LOG_INT,
1981                      "...really SecureFault with SFSR.INVEP\n");
1982        return false;
1983    }
1984    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
1985                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
1986        /* the MPU lookup failed */
1987        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1988        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1989        qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1990        return false;
1991    }
1992    *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1993                                 attrs, &txres);
1994    if (txres != MEMTX_OK) {
1995        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1996        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1997        qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1998        return false;
1999    }
2000    return true;
2001}
2002
2003static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2004                                   uint32_t addr, uint32_t *spdata)
2005{
2006    /*
2007     * Read a word of data from the stack for the SG instruction,
2008     * writing the value into *spdata. If the load succeeds, return
2009     * true; otherwise pend an appropriate exception and return false.
2010     * (We can't use data load helpers here that throw an exception
2011     * because of the context we're called in, which is halfway through
2012     * arm_v7m_cpu_do_interrupt().)
2013     */
2014    CPUState *cs = CPU(cpu);
2015    CPUARMState *env = &cpu->env;
2016    MemTxAttrs attrs = {};
2017    MemTxResult txres;
2018    target_ulong page_size;
2019    hwaddr physaddr;
2020    int prot;
2021    ARMMMUFaultInfo fi = {};
2022    ARMCacheAttrs cacheattrs = {};
2023    uint32_t value;
2024
2025    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
2026                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
2027        /* MPU/SAU lookup failed */
2028        if (fi.type == ARMFault_QEMU_SFault) {
2029            qemu_log_mask(CPU_LOG_INT,
2030                          "...SecureFault during stack word read\n");
2031            env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2032            env->v7m.sfar = addr;
2033            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2034        } else {
2035            qemu_log_mask(CPU_LOG_INT,
2036                          "...MemManageFault during stack word read\n");
2037            env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2038                R_V7M_CFSR_MMARVALID_MASK;
2039            env->v7m.mmfar[M_REG_S] = addr;
2040            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2041        }
2042        return false;
2043    }
2044    value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
2045                              attrs, &txres);
2046    if (txres != MEMTX_OK) {
2047        /* BusFault trying to read the data */
2048        qemu_log_mask(CPU_LOG_INT,
2049                      "...BusFault during stack word read\n");
2050        env->v7m.cfsr[M_REG_NS] |=
2051            (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2052        env->v7m.bfar = addr;
2053        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2054        return false;
2055    }
2056
2057    *spdata = value;
2058    return true;
2059}
2060
2061static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2062{
2063    /*
2064     * Check whether this attempt to execute code in a Secure & NS-Callable
2065     * memory region is for an SG instruction; if so, then emulate the
2066     * effect of the SG instruction and return true. Otherwise pend
2067     * the correct kind of exception and return false.
2068     */
2069    CPUARMState *env = &cpu->env;
2070    ARMMMUIdx mmu_idx;
2071    uint16_t insn;
2072
2073    /*
2074     * We should never get here unless get_phys_addr_pmsav8() caused
2075     * an exception for NS executing in S&NSC memory.
2076     */
2077    assert(!env->v7m.secure);
2078    assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2079
2080    /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2081    mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2082
2083    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
2084        return false;
2085    }
2086
2087    if (!env->thumb) {
2088        goto gen_invep;
2089    }
2090
2091    if (insn != 0xe97f) {
2092        /*
2093         * Not an SG instruction first half (we choose the IMPDEF
2094         * early-SG-check option).
2095         */
2096        goto gen_invep;
2097    }
2098
2099    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
2100        return false;
2101    }
2102
2103    if (insn != 0xe97f) {
2104        /*
2105         * Not an SG instruction second half (yes, both halves of the SG
2106         * insn have the same hex value)
2107         */
2108        goto gen_invep;
2109    }
2110
2111    /*
2112     * OK, we have confirmed that we really have an SG instruction.
2113     * We know we're NS in S memory so don't need to repeat those checks.
2114     */
2115    qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2116                  ", executing it\n", env->regs[15]);
2117
2118    if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2119        !arm_v7m_is_handler_mode(env)) {
2120        /*
2121         * v8.1M exception stack frame integrity check. Note that we
2122         * must perform the memory access even if CCR_S.TRD is zero
2123         * and we aren't going to check what the data loaded is.
2124         */
2125        uint32_t spdata, sp;
2126
2127        /*
2128         * We know we are currently NS, so the S stack pointers must be
2129         * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2130         */
2131        sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2132        if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2133            /* Stack access failed and an exception has been pended */
2134            return false;
2135        }
2136
2137        if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2138            if (((spdata & ~1) == 0xfefa125a) ||
2139                !(env->v7m.control[M_REG_S] & 1)) {
2140                goto gen_invep;
2141            }
2142        }
2143    }
2144
2145    env->regs[14] &= ~1;
2146    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2147    switch_v7m_security_state(env, true);
2148    xpsr_write(env, 0, XPSR_IT);
2149    env->regs[15] += 4;
2150    arm_rebuild_hflags(env);
2151    return true;
2152
2153gen_invep:
2154    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2155    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2156    qemu_log_mask(CPU_LOG_INT,
2157                  "...really SecureFault with SFSR.INVEP\n");
2158    return false;
2159}
2160
2161void arm_v7m_cpu_do_interrupt(CPUState *cs)
2162{
2163    ARMCPU *cpu = ARM_CPU(cs);
2164    CPUARMState *env = &cpu->env;
2165    uint32_t lr;
2166    bool ignore_stackfaults;
2167
2168    arm_log_exception(cs->exception_index);
2169
2170    /*
2171     * For exceptions we just mark as pending on the NVIC, and let that
2172     * handle it.
2173     */
2174    switch (cs->exception_index) {
2175    case EXCP_UDEF:
2176        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2177        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2178        break;
2179    case EXCP_NOCP:
2180    {
2181        /*
2182         * NOCP might be directed to something other than the current
2183         * security state if this fault is because of NSACR; we indicate
2184         * the target security state using exception.target_el.
2185         */
2186        int target_secstate;
2187
2188        if (env->exception.target_el == 3) {
2189            target_secstate = M_REG_S;
2190        } else {
2191            target_secstate = env->v7m.secure;
2192        }
2193        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2194        env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2195        break;
2196    }
2197    case EXCP_INVSTATE:
2198        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2199        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2200        break;
2201    case EXCP_STKOF:
2202        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2203        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2204        break;
2205    case EXCP_LSERR:
2206        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2207        env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2208        break;
2209    case EXCP_UNALIGNED:
2210        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2211        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2212        break;
2213    case EXCP_SWI:
2214        /* The PC already points to the next instruction.  */
2215        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2216        break;
2217    case EXCP_PREFETCH_ABORT:
2218    case EXCP_DATA_ABORT:
2219        /*
2220         * Note that for M profile we don't have a guest facing FSR, but
2221         * the env->exception.fsr will be populated by the code that
2222         * raises the fault, in the A profile short-descriptor format.
2223         */
2224        switch (env->exception.fsr & 0xf) {
2225        case M_FAKE_FSR_NSC_EXEC:
2226            /*
2227             * Exception generated when we try to execute code at an address
2228             * which is marked as Secure & Non-Secure Callable and the CPU
2229             * is in the Non-Secure state. The only instruction which can
2230             * be executed like this is SG (and that only if both halves of
2231             * the SG instruction have the same security attributes.)
2232             * Everything else must generate an INVEP SecureFault, so we
2233             * emulate the SG instruction here.
2234             */
2235            if (v7m_handle_execute_nsc(cpu)) {
2236                return;
2237            }
2238            break;
2239        case M_FAKE_FSR_SFAULT:
2240            /*
2241             * Various flavours of SecureFault for attempts to execute or
2242             * access data in the wrong security state.
2243             */
2244            switch (cs->exception_index) {
2245            case EXCP_PREFETCH_ABORT:
2246                if (env->v7m.secure) {
2247                    env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2248                    qemu_log_mask(CPU_LOG_INT,
2249                                  "...really SecureFault with SFSR.INVTRAN\n");
2250                } else {
2251                    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2252                    qemu_log_mask(CPU_LOG_INT,
2253                                  "...really SecureFault with SFSR.INVEP\n");
2254                }
2255                break;
2256            case EXCP_DATA_ABORT:
2257                /* This must be an NS access to S memory */
2258                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2259                qemu_log_mask(CPU_LOG_INT,
2260                              "...really SecureFault with SFSR.AUVIOL\n");
2261                break;
2262            }
2263            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2264            break;
2265        case 0x8: /* External Abort */
2266            switch (cs->exception_index) {
2267            case EXCP_PREFETCH_ABORT:
2268                env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2269                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2270                break;
2271            case EXCP_DATA_ABORT:
2272                env->v7m.cfsr[M_REG_NS] |=
2273                    (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2274                env->v7m.bfar = env->exception.vaddress;
2275                qemu_log_mask(CPU_LOG_INT,
2276                              "...with CFSR.PRECISERR and BFAR 0x%x\n",
2277                              env->v7m.bfar);
2278                break;
2279            }
2280            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2281            break;
2282        default:
2283            /*
2284             * All other FSR values are either MPU faults or "can't happen
2285             * for M profile" cases.
2286             */
2287            switch (cs->exception_index) {
2288            case EXCP_PREFETCH_ABORT:
2289                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2290                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2291                break;
2292            case EXCP_DATA_ABORT:
2293                env->v7m.cfsr[env->v7m.secure] |=
2294                    (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2295                env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2296                qemu_log_mask(CPU_LOG_INT,
2297                              "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2298                              env->v7m.mmfar[env->v7m.secure]);
2299                break;
2300            }
2301            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2302                                    env->v7m.secure);
2303            break;
2304        }
2305        break;
2306    case EXCP_SEMIHOST:
2307        qemu_log_mask(CPU_LOG_INT,
2308                      "...handling as semihosting call 0x%x\n",
2309                      env->regs[0]);
2310#ifdef CONFIG_TCG
2311        env->regs[0] = do_common_semihosting(cs);
2312#else
2313        g_assert_not_reached();
2314#endif
2315        env->regs[15] += env->thumb ? 2 : 4;
2316        return;
2317    case EXCP_BKPT:
2318        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2319        break;
2320    case EXCP_IRQ:
2321        break;
2322    case EXCP_EXCEPTION_EXIT:
2323        if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2324            /* Must be v8M security extension function return */
2325            assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2326            assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2327            if (do_v7m_function_return(cpu)) {
2328                return;
2329            }
2330        } else {
2331            do_v7m_exception_exit(cpu);
2332            return;
2333        }
2334        break;
2335    case EXCP_LAZYFP:
2336        /*
2337         * We already pended the specific exception in the NVIC in the
2338         * v7m_preserve_fp_state() helper function.
2339         */
2340        break;
2341    default:
2342        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2343        return; /* Never happens.  Keep compiler happy.  */
2344    }
2345
2346    if (arm_feature(env, ARM_FEATURE_V8)) {
2347        lr = R_V7M_EXCRET_RES1_MASK |
2348            R_V7M_EXCRET_DCRS_MASK;
2349        /*
2350         * The S bit indicates whether we should return to Secure
2351         * or NonSecure (ie our current state).
2352         * The ES bit indicates whether we're taking this exception
2353         * to Secure or NonSecure (ie our target state). We set it
2354         * later, in v7m_exception_taken().
2355         * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2356         * This corresponds to the ARM ARM pseudocode for v8M setting
2357         * some LR bits in PushStack() and some in ExceptionTaken();
2358         * the distinction matters for the tailchain cases where we
2359         * can take an exception without pushing the stack.
2360         */
2361        if (env->v7m.secure) {
2362            lr |= R_V7M_EXCRET_S_MASK;
2363        }
2364    } else {
2365        lr = R_V7M_EXCRET_RES1_MASK |
2366            R_V7M_EXCRET_S_MASK |
2367            R_V7M_EXCRET_DCRS_MASK |
2368            R_V7M_EXCRET_ES_MASK;
2369        if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2370            lr |= R_V7M_EXCRET_SPSEL_MASK;
2371        }
2372    }
2373    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2374        lr |= R_V7M_EXCRET_FTYPE_MASK;
2375    }
2376    if (!arm_v7m_is_handler_mode(env)) {
2377        lr |= R_V7M_EXCRET_MODE_MASK;
2378    }
2379
2380    ignore_stackfaults = v7m_push_stack(cpu);
2381    v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2382}
2383
2384uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2385{
2386    unsigned el = arm_current_el(env);
2387
2388    /* First handle registers which unprivileged can read */
2389    switch (reg) {
2390    case 0 ... 7: /* xPSR sub-fields */
2391        return v7m_mrs_xpsr(env, reg, el);
2392    case 20: /* CONTROL */
2393        return v7m_mrs_control(env, env->v7m.secure);
2394    case 0x94: /* CONTROL_NS */
2395        /*
2396         * We have to handle this here because unprivileged Secure code
2397         * can read the NS CONTROL register.
2398         */
2399        if (!env->v7m.secure) {
2400            return 0;
2401        }
2402        return env->v7m.control[M_REG_NS] |
2403            (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2404    }
2405
2406    if (el == 0) {
2407        return 0; /* unprivileged reads others as zero */
2408    }
2409
2410    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2411        switch (reg) {
2412        case 0x88: /* MSP_NS */
2413            if (!env->v7m.secure) {
2414                return 0;
2415            }
2416            return env->v7m.other_ss_msp;
2417        case 0x89: /* PSP_NS */
2418            if (!env->v7m.secure) {
2419                return 0;
2420            }
2421            return env->v7m.other_ss_psp;
2422        case 0x8a: /* MSPLIM_NS */
2423            if (!env->v7m.secure) {
2424                return 0;
2425            }
2426            return env->v7m.msplim[M_REG_NS];
2427        case 0x8b: /* PSPLIM_NS */
2428            if (!env->v7m.secure) {
2429                return 0;
2430            }
2431            return env->v7m.psplim[M_REG_NS];
2432        case 0x90: /* PRIMASK_NS */
2433            if (!env->v7m.secure) {
2434                return 0;
2435            }
2436            return env->v7m.primask[M_REG_NS];
2437        case 0x91: /* BASEPRI_NS */
2438            if (!env->v7m.secure) {
2439                return 0;
2440            }
2441            return env->v7m.basepri[M_REG_NS];
2442        case 0x93: /* FAULTMASK_NS */
2443            if (!env->v7m.secure) {
2444                return 0;
2445            }
2446            return env->v7m.faultmask[M_REG_NS];
2447        case 0x98: /* SP_NS */
2448        {
2449            /*
2450             * This gives the non-secure SP selected based on whether we're
2451             * currently in handler mode or not, using the NS CONTROL.SPSEL.
2452             */
2453            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2454
2455            if (!env->v7m.secure) {
2456                return 0;
2457            }
2458            if (!arm_v7m_is_handler_mode(env) && spsel) {
2459                return env->v7m.other_ss_psp;
2460            } else {
2461                return env->v7m.other_ss_msp;
2462            }
2463        }
2464        default:
2465            break;
2466        }
2467    }
2468
2469    switch (reg) {
2470    case 8: /* MSP */
2471        return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2472    case 9: /* PSP */
2473        return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2474    case 10: /* MSPLIM */
2475        if (!arm_feature(env, ARM_FEATURE_V8)) {
2476            goto bad_reg;
2477        }
2478        return env->v7m.msplim[env->v7m.secure];
2479    case 11: /* PSPLIM */
2480        if (!arm_feature(env, ARM_FEATURE_V8)) {
2481            goto bad_reg;
2482        }
2483        return env->v7m.psplim[env->v7m.secure];
2484    case 16: /* PRIMASK */
2485        return env->v7m.primask[env->v7m.secure];
2486    case 17: /* BASEPRI */
2487    case 18: /* BASEPRI_MAX */
2488        return env->v7m.basepri[env->v7m.secure];
2489    case 19: /* FAULTMASK */
2490        return env->v7m.faultmask[env->v7m.secure];
2491    default:
2492    bad_reg:
2493        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2494                                       " register %d\n", reg);
2495        return 0;
2496    }
2497}
2498
2499void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2500{
2501    /*
2502     * We're passed bits [11..0] of the instruction; extract
2503     * SYSm and the mask bits.
2504     * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2505     * we choose to treat them as if the mask bits were valid.
2506     * NB that the pseudocode 'mask' variable is bits [11..10],
2507     * whereas ours is [11..8].
2508     */
2509    uint32_t mask = extract32(maskreg, 8, 4);
2510    uint32_t reg = extract32(maskreg, 0, 8);
2511    int cur_el = arm_current_el(env);
2512
2513    if (cur_el == 0 && reg > 7 && reg != 20) {
2514        /*
2515         * only xPSR sub-fields and CONTROL.SFPA may be written by
2516         * unprivileged code
2517         */
2518        return;
2519    }
2520
2521    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2522        switch (reg) {
2523        case 0x88: /* MSP_NS */
2524            if (!env->v7m.secure) {
2525                return;
2526            }
2527            env->v7m.other_ss_msp = val;
2528            return;
2529        case 0x89: /* PSP_NS */
2530            if (!env->v7m.secure) {
2531                return;
2532            }
2533            env->v7m.other_ss_psp = val;
2534            return;
2535        case 0x8a: /* MSPLIM_NS */
2536            if (!env->v7m.secure) {
2537                return;
2538            }
2539            env->v7m.msplim[M_REG_NS] = val & ~7;
2540            return;
2541        case 0x8b: /* PSPLIM_NS */
2542            if (!env->v7m.secure) {
2543                return;
2544            }
2545            env->v7m.psplim[M_REG_NS] = val & ~7;
2546            return;
2547        case 0x90: /* PRIMASK_NS */
2548            if (!env->v7m.secure) {
2549                return;
2550            }
2551            env->v7m.primask[M_REG_NS] = val & 1;
2552            return;
2553        case 0x91: /* BASEPRI_NS */
2554            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2555                return;
2556            }
2557            env->v7m.basepri[M_REG_NS] = val & 0xff;
2558            return;
2559        case 0x93: /* FAULTMASK_NS */
2560            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2561                return;
2562            }
2563            env->v7m.faultmask[M_REG_NS] = val & 1;
2564            return;
2565        case 0x94: /* CONTROL_NS */
2566            if (!env->v7m.secure) {
2567                return;
2568            }
2569            write_v7m_control_spsel_for_secstate(env,
2570                                                 val & R_V7M_CONTROL_SPSEL_MASK,
2571                                                 M_REG_NS);
2572            if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2573                env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2574                env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2575            }
2576            /*
2577             * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2578             * RES0 if the FPU is not present, and is stored in the S bank
2579             */
2580            if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2581                extract32(env->v7m.nsacr, 10, 1)) {
2582                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2583                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2584            }
2585            return;
2586        case 0x98: /* SP_NS */
2587        {
2588            /*
2589             * This gives the non-secure SP selected based on whether we're
2590             * currently in handler mode or not, using the NS CONTROL.SPSEL.
2591             */
2592            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2593            bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2594            uint32_t limit;
2595
2596            if (!env->v7m.secure) {
2597                return;
2598            }
2599
2600            limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2601
2602            if (val < limit) {
2603                CPUState *cs = env_cpu(env);
2604
2605                cpu_restore_state(cs, GETPC(), true);
2606                raise_exception(env, EXCP_STKOF, 0, 1);
2607            }
2608
2609            if (is_psp) {
2610                env->v7m.other_ss_psp = val;
2611            } else {
2612                env->v7m.other_ss_msp = val;
2613            }
2614            return;
2615        }
2616        default:
2617            break;
2618        }
2619    }
2620
2621    switch (reg) {
2622    case 0 ... 7: /* xPSR sub-fields */
2623        v7m_msr_xpsr(env, mask, reg, val);
2624        break;
2625    case 8: /* MSP */
2626        if (v7m_using_psp(env)) {
2627            env->v7m.other_sp = val;
2628        } else {
2629            env->regs[13] = val;
2630        }
2631        break;
2632    case 9: /* PSP */
2633        if (v7m_using_psp(env)) {
2634            env->regs[13] = val;
2635        } else {
2636            env->v7m.other_sp = val;
2637        }
2638        break;
2639    case 10: /* MSPLIM */
2640        if (!arm_feature(env, ARM_FEATURE_V8)) {
2641            goto bad_reg;
2642        }
2643        env->v7m.msplim[env->v7m.secure] = val & ~7;
2644        break;
2645    case 11: /* PSPLIM */
2646        if (!arm_feature(env, ARM_FEATURE_V8)) {
2647            goto bad_reg;
2648        }
2649        env->v7m.psplim[env->v7m.secure] = val & ~7;
2650        break;
2651    case 16: /* PRIMASK */
2652        env->v7m.primask[env->v7m.secure] = val & 1;
2653        break;
2654    case 17: /* BASEPRI */
2655        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2656            goto bad_reg;
2657        }
2658        env->v7m.basepri[env->v7m.secure] = val & 0xff;
2659        break;
2660    case 18: /* BASEPRI_MAX */
2661        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2662            goto bad_reg;
2663        }
2664        val &= 0xff;
2665        if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2666                         || env->v7m.basepri[env->v7m.secure] == 0)) {
2667            env->v7m.basepri[env->v7m.secure] = val;
2668        }
2669        break;
2670    case 19: /* FAULTMASK */
2671        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2672            goto bad_reg;
2673        }
2674        env->v7m.faultmask[env->v7m.secure] = val & 1;
2675        break;
2676    case 20: /* CONTROL */
2677        /*
2678         * Writing to the SPSEL bit only has an effect if we are in
2679         * thread mode; other bits can be updated by any privileged code.
2680         * write_v7m_control_spsel() deals with updating the SPSEL bit in
2681         * env->v7m.control, so we only need update the others.
2682         * For v7M, we must just ignore explicit writes to SPSEL in handler
2683         * mode; for v8M the write is permitted but will have no effect.
2684         * All these bits are writes-ignored from non-privileged code,
2685         * except for SFPA.
2686         */
2687        if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2688                           !arm_v7m_is_handler_mode(env))) {
2689            write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2690        }
2691        if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2692            env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2693            env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2694        }
2695        if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2696            /*
2697             * SFPA is RAZ/WI from NS or if no FPU.
2698             * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2699             * Both are stored in the S bank.
2700             */
2701            if (env->v7m.secure) {
2702                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2703                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2704            }
2705            if (cur_el > 0 &&
2706                (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2707                 extract32(env->v7m.nsacr, 10, 1))) {
2708                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2709                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2710            }
2711        }
2712        break;
2713    default:
2714    bad_reg:
2715        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2716                                       " register %d\n", reg);
2717        return;
2718    }
2719}
2720
2721uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2722{
2723    /* Implement the TT instruction. op is bits [7:6] of the insn. */
2724    bool forceunpriv = op & 1;
2725    bool alt = op & 2;
2726    V8M_SAttributes sattrs = {};
2727    uint32_t tt_resp;
2728    bool r, rw, nsr, nsrw, mrvalid;
2729    int prot;
2730    ARMMMUFaultInfo fi = {};
2731    MemTxAttrs attrs = {};
2732    hwaddr phys_addr;
2733    ARMMMUIdx mmu_idx;
2734    uint32_t mregion;
2735    bool targetpriv;
2736    bool targetsec = env->v7m.secure;
2737    bool is_subpage;
2738
2739    /*
2740     * Work out what the security state and privilege level we're
2741     * interested in is...
2742     */
2743    if (alt) {
2744        targetsec = !targetsec;
2745    }
2746
2747    if (forceunpriv) {
2748        targetpriv = false;
2749    } else {
2750        targetpriv = arm_v7m_is_handler_mode(env) ||
2751            !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2752    }
2753
2754    /* ...and then figure out which MMU index this is */
2755    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2756
2757    /*
2758     * We know that the MPU and SAU don't care about the access type
2759     * for our purposes beyond that we don't want to claim to be
2760     * an insn fetch, so we arbitrarily call this a read.
2761     */
2762
2763    /*
2764     * MPU region info only available for privileged or if
2765     * inspecting the other MPU state.
2766     */
2767    if (arm_current_el(env) != 0 || alt) {
2768        /* We can ignore the return value as prot is always set */
2769        pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2770                          &phys_addr, &attrs, &prot, &is_subpage,
2771                          &fi, &mregion);
2772        if (mregion == -1) {
2773            mrvalid = false;
2774            mregion = 0;
2775        } else {
2776            mrvalid = true;
2777        }
2778        r = prot & PAGE_READ;
2779        rw = prot & PAGE_WRITE;
2780    } else {
2781        r = false;
2782        rw = false;
2783        mrvalid = false;
2784        mregion = 0;
2785    }
2786
2787    if (env->v7m.secure) {
2788        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2789        nsr = sattrs.ns && r;
2790        nsrw = sattrs.ns && rw;
2791    } else {
2792        sattrs.ns = true;
2793        nsr = false;
2794        nsrw = false;
2795    }
2796
2797    tt_resp = (sattrs.iregion << 24) |
2798        (sattrs.irvalid << 23) |
2799        ((!sattrs.ns) << 22) |
2800        (nsrw << 21) |
2801        (nsr << 20) |
2802        (rw << 19) |
2803        (r << 18) |
2804        (sattrs.srvalid << 17) |
2805        (mrvalid << 16) |
2806        (sattrs.sregion << 8) |
2807        mregion;
2808
2809    return tt_resp;
2810}
2811
2812#endif /* !CONFIG_USER_ONLY */
2813
2814ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2815                              bool secstate, bool priv, bool negpri)
2816{
2817    ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2818
2819    if (priv) {
2820        mmu_idx |= ARM_MMU_IDX_M_PRIV;
2821    }
2822
2823    if (negpri) {
2824        mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2825    }
2826
2827    if (secstate) {
2828        mmu_idx |= ARM_MMU_IDX_M_S;
2829    }
2830
2831    return mmu_idx;
2832}
2833
2834ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2835                                                bool secstate, bool priv)
2836{
2837    bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2838
2839    return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2840}
2841
2842/* Return the MMU index for a v7M CPU in the specified security state */
2843ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2844{
2845    bool priv = arm_v7m_is_handler_mode(env) ||
2846        !(env->v7m.control[secstate] & 1);
2847
2848    return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2849}
2850