qemu/target/arm/m_helper.c
<<
>>
Prefs
   1/*
   2 * ARM generic helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8
   9#include "qemu/osdep.h"
  10#include "qemu/units.h"
  11#include "target/arm/idau.h"
  12#include "trace.h"
  13#include "cpu.h"
  14#include "internals.h"
  15#include "exec/gdbstub.h"
  16#include "exec/helper-proto.h"
  17#include "qemu/host-utils.h"
  18#include "qemu/main-loop.h"
  19#include "qemu/bitops.h"
  20#include "qemu/crc32c.h"
  21#include "qemu/qemu-print.h"
  22#include "qemu/log.h"
  23#include "exec/exec-all.h"
  24#include <zlib.h> /* For crc32 */
  25#include "semihosting/semihost.h"
  26#include "sysemu/cpus.h"
  27#include "sysemu/kvm.h"
  28#include "qemu/range.h"
  29#include "qapi/qapi-commands-machine-target.h"
  30#include "qapi/error.h"
  31#include "qemu/guest-random.h"
  32#ifdef CONFIG_TCG
  33#include "arm_ldst.h"
  34#include "exec/cpu_ldst.h"
  35#include "semihosting/common-semi.h"
  36#endif
  37
  38static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
  39                         uint32_t reg, uint32_t val)
  40{
  41    /* Only APSR is actually writable */
  42    if (!(reg & 4)) {
  43        uint32_t apsrmask = 0;
  44
  45        if (mask & 8) {
  46            apsrmask |= XPSR_NZCV | XPSR_Q;
  47        }
  48        if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
  49            apsrmask |= XPSR_GE;
  50        }
  51        xpsr_write(env, val, apsrmask);
  52    }
  53}
  54
  55static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
  56{
  57    uint32_t mask = 0;
  58
  59    if ((reg & 1) && el) {
  60        mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
  61    }
  62    if (!(reg & 4)) {
  63        mask |= XPSR_NZCV | XPSR_Q; /* APSR */
  64        if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
  65            mask |= XPSR_GE;
  66        }
  67    }
  68    /* EPSR reads as zero */
  69    return xpsr_read(env) & mask;
  70}
  71
  72static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
  73{
  74    uint32_t value = env->v7m.control[secure];
  75
  76    if (!secure) {
  77        /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
  78        value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
  79    }
  80    return value;
  81}
  82
  83#ifdef CONFIG_USER_ONLY
  84
  85void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
  86{
  87    uint32_t mask = extract32(maskreg, 8, 4);
  88    uint32_t reg = extract32(maskreg, 0, 8);
  89
  90    switch (reg) {
  91    case 0 ... 7: /* xPSR sub-fields */
  92        v7m_msr_xpsr(env, mask, reg, val);
  93        break;
  94    case 20: /* CONTROL */
  95        /* There are no sub-fields that are actually writable from EL0. */
  96        break;
  97    default:
  98        /* Unprivileged writes to other registers are ignored */
  99        break;
 100    }
 101}
 102
 103uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
 104{
 105    switch (reg) {
 106    case 0 ... 7: /* xPSR sub-fields */
 107        return v7m_mrs_xpsr(env, reg, 0);
 108    case 20: /* CONTROL */
 109        return v7m_mrs_control(env, 0);
 110    default:
 111        /* Unprivileged reads others as zero.  */
 112        return 0;
 113    }
 114}
 115
 116void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
 117{
 118    /* translate.c should never generate calls here in user-only mode */
 119    g_assert_not_reached();
 120}
 121
 122void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
 123{
 124    /* translate.c should never generate calls here in user-only mode */
 125    g_assert_not_reached();
 126}
 127
 128void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
 129{
 130    /* translate.c should never generate calls here in user-only mode */
 131    g_assert_not_reached();
 132}
 133
 134void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
 135{
 136    /* translate.c should never generate calls here in user-only mode */
 137    g_assert_not_reached();
 138}
 139
 140void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
 141{
 142    /* translate.c should never generate calls here in user-only mode */
 143    g_assert_not_reached();
 144}
 145
 146uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
 147{
 148    /*
 149     * The TT instructions can be used by unprivileged code, but in
 150     * user-only emulation we don't have the MPU.
 151     * Luckily since we know we are NonSecure unprivileged (and that in
 152     * turn means that the A flag wasn't specified), all the bits in the
 153     * register must be zero:
 154     *  IREGION: 0 because IRVALID is 0
 155     *  IRVALID: 0 because NS
 156     *  S: 0 because NS
 157     *  NSRW: 0 because NS
 158     *  NSR: 0 because NS
 159     *  RW: 0 because unpriv and A flag not set
 160     *  R: 0 because unpriv and A flag not set
 161     *  SRVALID: 0 because NS
 162     *  MRVALID: 0 because unpriv and A flag not set
 163     *  SREGION: 0 becaus SRVALID is 0
 164     *  MREGION: 0 because MRVALID is 0
 165     */
 166    return 0;
 167}
 168
 169#else
 170
 171/*
 172 * What kind of stack write are we doing? This affects how exceptions
 173 * generated during the stacking are treated.
 174 */
 175typedef enum StackingMode {
 176    STACK_NORMAL,
 177    STACK_IGNFAULTS,
 178    STACK_LAZYFP,
 179} StackingMode;
 180
 181static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
 182                            ARMMMUIdx mmu_idx, StackingMode mode)
 183{
 184    CPUState *cs = CPU(cpu);
 185    CPUARMState *env = &cpu->env;
 186    MemTxAttrs attrs = {};
 187    MemTxResult txres;
 188    target_ulong page_size;
 189    hwaddr physaddr;
 190    int prot;
 191    ARMMMUFaultInfo fi = {};
 192    ARMCacheAttrs cacheattrs = {};
 193    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
 194    int exc;
 195    bool exc_secure;
 196
 197    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
 198                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
 199        /* MPU/SAU lookup failed */
 200        if (fi.type == ARMFault_QEMU_SFault) {
 201            if (mode == STACK_LAZYFP) {
 202                qemu_log_mask(CPU_LOG_INT,
 203                              "...SecureFault with SFSR.LSPERR "
 204                              "during lazy stacking\n");
 205                env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
 206            } else {
 207                qemu_log_mask(CPU_LOG_INT,
 208                              "...SecureFault with SFSR.AUVIOL "
 209                              "during stacking\n");
 210                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
 211            }
 212            env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
 213            env->v7m.sfar = addr;
 214            exc = ARMV7M_EXCP_SECURE;
 215            exc_secure = false;
 216        } else {
 217            if (mode == STACK_LAZYFP) {
 218                qemu_log_mask(CPU_LOG_INT,
 219                              "...MemManageFault with CFSR.MLSPERR\n");
 220                env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
 221            } else {
 222                qemu_log_mask(CPU_LOG_INT,
 223                              "...MemManageFault with CFSR.MSTKERR\n");
 224                env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
 225            }
 226            exc = ARMV7M_EXCP_MEM;
 227            exc_secure = secure;
 228        }
 229        goto pend_fault;
 230    }
 231    address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
 232                         attrs, &txres);
 233    if (txres != MEMTX_OK) {
 234        /* BusFault trying to write the data */
 235        if (mode == STACK_LAZYFP) {
 236            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
 237            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
 238        } else {
 239            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
 240            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
 241        }
 242        exc = ARMV7M_EXCP_BUS;
 243        exc_secure = false;
 244        goto pend_fault;
 245    }
 246    return true;
 247
 248pend_fault:
 249    /*
 250     * By pending the exception at this point we are making
 251     * the IMPDEF choice "overridden exceptions pended" (see the
 252     * MergeExcInfo() pseudocode). The other choice would be to not
 253     * pend them now and then make a choice about which to throw away
 254     * later if we have two derived exceptions.
 255     * The only case when we must not pend the exception but instead
 256     * throw it away is if we are doing the push of the callee registers
 257     * and we've already generated a derived exception (this is indicated
 258     * by the caller passing STACK_IGNFAULTS). Even in this case we will
 259     * still update the fault status registers.
 260     */
 261    switch (mode) {
 262    case STACK_NORMAL:
 263        armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
 264        break;
 265    case STACK_LAZYFP:
 266        armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
 267        break;
 268    case STACK_IGNFAULTS:
 269        break;
 270    }
 271    return false;
 272}
 273
 274static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
 275                           ARMMMUIdx mmu_idx)
 276{
 277    CPUState *cs = CPU(cpu);
 278    CPUARMState *env = &cpu->env;
 279    MemTxAttrs attrs = {};
 280    MemTxResult txres;
 281    target_ulong page_size;
 282    hwaddr physaddr;
 283    int prot;
 284    ARMMMUFaultInfo fi = {};
 285    ARMCacheAttrs cacheattrs = {};
 286    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
 287    int exc;
 288    bool exc_secure;
 289    uint32_t value;
 290
 291    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
 292                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
 293        /* MPU/SAU lookup failed */
 294        if (fi.type == ARMFault_QEMU_SFault) {
 295            qemu_log_mask(CPU_LOG_INT,
 296                          "...SecureFault with SFSR.AUVIOL during unstack\n");
 297            env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
 298            env->v7m.sfar = addr;
 299            exc = ARMV7M_EXCP_SECURE;
 300            exc_secure = false;
 301        } else {
 302            qemu_log_mask(CPU_LOG_INT,
 303                          "...MemManageFault with CFSR.MUNSTKERR\n");
 304            env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
 305            exc = ARMV7M_EXCP_MEM;
 306            exc_secure = secure;
 307        }
 308        goto pend_fault;
 309    }
 310
 311    value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
 312                              attrs, &txres);
 313    if (txres != MEMTX_OK) {
 314        /* BusFault trying to read the data */
 315        qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
 316        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
 317        exc = ARMV7M_EXCP_BUS;
 318        exc_secure = false;
 319        goto pend_fault;
 320    }
 321
 322    *dest = value;
 323    return true;
 324
 325pend_fault:
 326    /*
 327     * By pending the exception at this point we are making
 328     * the IMPDEF choice "overridden exceptions pended" (see the
 329     * MergeExcInfo() pseudocode). The other choice would be to not
 330     * pend them now and then make a choice about which to throw away
 331     * later if we have two derived exceptions.
 332     */
 333    armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
 334    return false;
 335}
 336
 337void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
 338{
 339    /*
 340     * Preserve FP state (because LSPACT was set and we are about
 341     * to execute an FP instruction). This corresponds to the
 342     * PreserveFPState() pseudocode.
 343     * We may throw an exception if the stacking fails.
 344     */
 345    ARMCPU *cpu = env_archcpu(env);
 346    bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
 347    bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
 348    bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
 349    bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
 350    uint32_t fpcar = env->v7m.fpcar[is_secure];
 351    bool stacked_ok = true;
 352    bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
 353    bool take_exception;
 354
 355    /* Take the iothread lock as we are going to touch the NVIC */
 356    qemu_mutex_lock_iothread();
 357
 358    /* Check the background context had access to the FPU */
 359    if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
 360        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
 361        env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
 362        stacked_ok = false;
 363    } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
 364        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
 365        env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
 366        stacked_ok = false;
 367    }
 368
 369    if (!splimviol && stacked_ok) {
 370        /* We only stack if the stack limit wasn't violated */
 371        int i;
 372        ARMMMUIdx mmu_idx;
 373
 374        mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
 375        for (i = 0; i < (ts ? 32 : 16); i += 2) {
 376            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
 377            uint32_t faddr = fpcar + 4 * i;
 378            uint32_t slo = extract64(dn, 0, 32);
 379            uint32_t shi = extract64(dn, 32, 32);
 380
 381            if (i >= 16) {
 382                faddr += 8; /* skip the slot for the FPSCR/VPR */
 383            }
 384            stacked_ok = stacked_ok &&
 385                v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
 386                v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
 387        }
 388
 389        stacked_ok = stacked_ok &&
 390            v7m_stack_write(cpu, fpcar + 0x40,
 391                            vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
 392        if (cpu_isar_feature(aa32_mve, cpu)) {
 393            stacked_ok = stacked_ok &&
 394                v7m_stack_write(cpu, fpcar + 0x44,
 395                                env->v7m.vpr, mmu_idx, STACK_LAZYFP);
 396        }
 397    }
 398
 399    /*
 400     * We definitely pended an exception, but it's possible that it
 401     * might not be able to be taken now. If its priority permits us
 402     * to take it now, then we must not update the LSPACT or FP regs,
 403     * but instead jump out to take the exception immediately.
 404     * If it's just pending and won't be taken until the current
 405     * handler exits, then we do update LSPACT and the FP regs.
 406     */
 407    take_exception = !stacked_ok &&
 408        armv7m_nvic_can_take_pending_exception(env->nvic);
 409
 410    qemu_mutex_unlock_iothread();
 411
 412    if (take_exception) {
 413        raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
 414    }
 415
 416    env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
 417
 418    if (ts) {
 419        /* Clear s0 to s31 and the FPSCR and VPR */
 420        int i;
 421
 422        for (i = 0; i < 32; i += 2) {
 423            *aa32_vfp_dreg(env, i / 2) = 0;
 424        }
 425        vfp_set_fpscr(env, 0);
 426        if (cpu_isar_feature(aa32_mve, cpu)) {
 427            env->v7m.vpr = 0;
 428        }
 429    }
 430    /*
 431     * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
 432     * unchanged.
 433     */
 434}
 435
 436/*
 437 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
 438 * This may change the current stack pointer between Main and Process
 439 * stack pointers if it is done for the CONTROL register for the current
 440 * security state.
 441 */
 442static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
 443                                                 bool new_spsel,
 444                                                 bool secstate)
 445{
 446    bool old_is_psp = v7m_using_psp(env);
 447
 448    env->v7m.control[secstate] =
 449        deposit32(env->v7m.control[secstate],
 450                  R_V7M_CONTROL_SPSEL_SHIFT,
 451                  R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
 452
 453    if (secstate == env->v7m.secure) {
 454        bool new_is_psp = v7m_using_psp(env);
 455        uint32_t tmp;
 456
 457        if (old_is_psp != new_is_psp) {
 458            tmp = env->v7m.other_sp;
 459            env->v7m.other_sp = env->regs[13];
 460            env->regs[13] = tmp;
 461        }
 462    }
 463}
 464
 465/*
 466 * Write to v7M CONTROL.SPSEL bit. This may change the current
 467 * stack pointer between Main and Process stack pointers.
 468 */
 469static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
 470{
 471    write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
 472}
 473
 474void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
 475{
 476    /*
 477     * Write a new value to v7m.exception, thus transitioning into or out
 478     * of Handler mode; this may result in a change of active stack pointer.
 479     */
 480    bool new_is_psp, old_is_psp = v7m_using_psp(env);
 481    uint32_t tmp;
 482
 483    env->v7m.exception = new_exc;
 484
 485    new_is_psp = v7m_using_psp(env);
 486
 487    if (old_is_psp != new_is_psp) {
 488        tmp = env->v7m.other_sp;
 489        env->v7m.other_sp = env->regs[13];
 490        env->regs[13] = tmp;
 491    }
 492}
 493
 494/* Switch M profile security state between NS and S */
 495static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
 496{
 497    uint32_t new_ss_msp, new_ss_psp;
 498
 499    if (env->v7m.secure == new_secstate) {
 500        return;
 501    }
 502
 503    /*
 504     * All the banked state is accessed by looking at env->v7m.secure
 505     * except for the stack pointer; rearrange the SP appropriately.
 506     */
 507    new_ss_msp = env->v7m.other_ss_msp;
 508    new_ss_psp = env->v7m.other_ss_psp;
 509
 510    if (v7m_using_psp(env)) {
 511        env->v7m.other_ss_psp = env->regs[13];
 512        env->v7m.other_ss_msp = env->v7m.other_sp;
 513    } else {
 514        env->v7m.other_ss_msp = env->regs[13];
 515        env->v7m.other_ss_psp = env->v7m.other_sp;
 516    }
 517
 518    env->v7m.secure = new_secstate;
 519
 520    if (v7m_using_psp(env)) {
 521        env->regs[13] = new_ss_psp;
 522        env->v7m.other_sp = new_ss_msp;
 523    } else {
 524        env->regs[13] = new_ss_msp;
 525        env->v7m.other_sp = new_ss_psp;
 526    }
 527}
 528
 529void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
 530{
 531    /*
 532     * Handle v7M BXNS:
 533     *  - if the return value is a magic value, do exception return (like BX)
 534     *  - otherwise bit 0 of the return value is the target security state
 535     */
 536    uint32_t min_magic;
 537
 538    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 539        /* Covers FNC_RETURN and EXC_RETURN magic */
 540        min_magic = FNC_RETURN_MIN_MAGIC;
 541    } else {
 542        /* EXC_RETURN magic only */
 543        min_magic = EXC_RETURN_MIN_MAGIC;
 544    }
 545
 546    if (dest >= min_magic) {
 547        /*
 548         * This is an exception return magic value; put it where
 549         * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
 550         * Note that if we ever add gen_ss_advance() singlestep support to
 551         * M profile this should count as an "instruction execution complete"
 552         * event (compare gen_bx_excret_final_code()).
 553         */
 554        env->regs[15] = dest & ~1;
 555        env->thumb = dest & 1;
 556        HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
 557        /* notreached */
 558    }
 559
 560    /* translate.c should have made BXNS UNDEF unless we're secure */
 561    assert(env->v7m.secure);
 562
 563    if (!(dest & 1)) {
 564        env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
 565    }
 566    switch_v7m_security_state(env, dest & 1);
 567    env->thumb = true;
 568    env->regs[15] = dest & ~1;
 569    arm_rebuild_hflags(env);
 570}
 571
 572void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
 573{
 574    /*
 575     * Handle v7M BLXNS:
 576     *  - bit 0 of the destination address is the target security state
 577     */
 578
 579    /* At this point regs[15] is the address just after the BLXNS */
 580    uint32_t nextinst = env->regs[15] | 1;
 581    uint32_t sp = env->regs[13] - 8;
 582    uint32_t saved_psr;
 583
 584    /* translate.c will have made BLXNS UNDEF unless we're secure */
 585    assert(env->v7m.secure);
 586
 587    if (dest & 1) {
 588        /*
 589         * Target is Secure, so this is just a normal BLX,
 590         * except that the low bit doesn't indicate Thumb/not.
 591         */
 592        env->regs[14] = nextinst;
 593        env->thumb = true;
 594        env->regs[15] = dest & ~1;
 595        return;
 596    }
 597
 598    /* Target is non-secure: first push a stack frame */
 599    if (!QEMU_IS_ALIGNED(sp, 8)) {
 600        qemu_log_mask(LOG_GUEST_ERROR,
 601                      "BLXNS with misaligned SP is UNPREDICTABLE\n");
 602    }
 603
 604    if (sp < v7m_sp_limit(env)) {
 605        raise_exception(env, EXCP_STKOF, 0, 1);
 606    }
 607
 608    saved_psr = env->v7m.exception;
 609    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
 610        saved_psr |= XPSR_SFPA;
 611    }
 612
 613    /* Note that these stores can throw exceptions on MPU faults */
 614    cpu_stl_data_ra(env, sp, nextinst, GETPC());
 615    cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
 616
 617    env->regs[13] = sp;
 618    env->regs[14] = 0xfeffffff;
 619    if (arm_v7m_is_handler_mode(env)) {
 620        /*
 621         * Write a dummy value to IPSR, to avoid leaking the current secure
 622         * exception number to non-secure code. This is guaranteed not
 623         * to cause write_v7m_exception() to actually change stacks.
 624         */
 625        write_v7m_exception(env, 1);
 626    }
 627    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
 628    switch_v7m_security_state(env, 0);
 629    env->thumb = true;
 630    env->regs[15] = dest;
 631    arm_rebuild_hflags(env);
 632}
 633
 634static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
 635                                bool spsel)
 636{
 637    /*
 638     * Return a pointer to the location where we currently store the
 639     * stack pointer for the requested security state and thread mode.
 640     * This pointer will become invalid if the CPU state is updated
 641     * such that the stack pointers are switched around (eg changing
 642     * the SPSEL control bit).
 643     * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
 644     * Unlike that pseudocode, we require the caller to pass us in the
 645     * SPSEL control bit value; this is because we also use this
 646     * function in handling of pushing of the callee-saves registers
 647     * part of the v8M stack frame (pseudocode PushCalleeStack()),
 648     * and in the tailchain codepath the SPSEL bit comes from the exception
 649     * return magic LR value from the previous exception. The pseudocode
 650     * opencodes the stack-selection in PushCalleeStack(), but we prefer
 651     * to make this utility function generic enough to do the job.
 652     */
 653    bool want_psp = threadmode && spsel;
 654
 655    if (secure == env->v7m.secure) {
 656        if (want_psp == v7m_using_psp(env)) {
 657            return &env->regs[13];
 658        } else {
 659            return &env->v7m.other_sp;
 660        }
 661    } else {
 662        if (want_psp) {
 663            return &env->v7m.other_ss_psp;
 664        } else {
 665            return &env->v7m.other_ss_msp;
 666        }
 667    }
 668}
 669
 670static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
 671                                uint32_t *pvec)
 672{
 673    CPUState *cs = CPU(cpu);
 674    CPUARMState *env = &cpu->env;
 675    MemTxResult result;
 676    uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
 677    uint32_t vector_entry;
 678    MemTxAttrs attrs = {};
 679    ARMMMUIdx mmu_idx;
 680    bool exc_secure;
 681
 682    qemu_log_mask(CPU_LOG_INT,
 683                  "...loading from element %d of %s vector table at 0x%x\n",
 684                  exc, targets_secure ? "secure" : "non-secure", addr);
 685
 686    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
 687
 688    /*
 689     * We don't do a get_phys_addr() here because the rules for vector
 690     * loads are special: they always use the default memory map, and
 691     * the default memory map permits reads from all addresses.
 692     * Since there's no easy way to pass through to pmsav8_mpu_lookup()
 693     * that we want this special case which would always say "yes",
 694     * we just do the SAU lookup here followed by a direct physical load.
 695     */
 696    attrs.secure = targets_secure;
 697    attrs.user = false;
 698
 699    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 700        V8M_SAttributes sattrs = {};
 701
 702        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
 703        if (sattrs.ns) {
 704            attrs.secure = false;
 705        } else if (!targets_secure) {
 706            /*
 707             * NS access to S memory: the underlying exception which we escalate
 708             * to HardFault is SecureFault, which always targets Secure.
 709             */
 710            exc_secure = true;
 711            goto load_fail;
 712        }
 713    }
 714
 715    vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
 716                                     attrs, &result);
 717    if (result != MEMTX_OK) {
 718        /*
 719         * Underlying exception is BusFault: its target security state
 720         * depends on BFHFNMINS.
 721         */
 722        exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
 723        goto load_fail;
 724    }
 725    *pvec = vector_entry;
 726    qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
 727    return true;
 728
 729load_fail:
 730    /*
 731     * All vector table fetch fails are reported as HardFault, with
 732     * HFSR.VECTTBL and .FORCED set. (FORCED is set because
 733     * technically the underlying exception is a SecureFault or BusFault
 734     * that is escalated to HardFault.) This is a terminal exception,
 735     * so we will either take the HardFault immediately or else enter
 736     * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
 737     * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
 738     * secure); otherwise it targets the same security state as the
 739     * underlying exception.
 740     * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
 741     */
 742    if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
 743        exc_secure = true;
 744    }
 745    env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
 746    if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
 747        env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
 748    }
 749    armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
 750    return false;
 751}
 752
 753static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
 754{
 755    /*
 756     * Return the integrity signature value for the callee-saves
 757     * stack frame section. @lr is the exception return payload/LR value
 758     * whose FType bit forms bit 0 of the signature if FP is present.
 759     */
 760    uint32_t sig = 0xfefa125a;
 761
 762    if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
 763        || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
 764        sig |= 1;
 765    }
 766    return sig;
 767}
 768
 769static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
 770                                  bool ignore_faults)
 771{
 772    /*
 773     * For v8M, push the callee-saves register part of the stack frame.
 774     * Compare the v8M pseudocode PushCalleeStack().
 775     * In the tailchaining case this may not be the current stack.
 776     */
 777    CPUARMState *env = &cpu->env;
 778    uint32_t *frame_sp_p;
 779    uint32_t frameptr;
 780    ARMMMUIdx mmu_idx;
 781    bool stacked_ok;
 782    uint32_t limit;
 783    bool want_psp;
 784    uint32_t sig;
 785    StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
 786
 787    if (dotailchain) {
 788        bool mode = lr & R_V7M_EXCRET_MODE_MASK;
 789        bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
 790            !mode;
 791
 792        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
 793        frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
 794                                    lr & R_V7M_EXCRET_SPSEL_MASK);
 795        want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
 796        if (want_psp) {
 797            limit = env->v7m.psplim[M_REG_S];
 798        } else {
 799            limit = env->v7m.msplim[M_REG_S];
 800        }
 801    } else {
 802        mmu_idx = arm_mmu_idx(env);
 803        frame_sp_p = &env->regs[13];
 804        limit = v7m_sp_limit(env);
 805    }
 806
 807    frameptr = *frame_sp_p - 0x28;
 808    if (frameptr < limit) {
 809        /*
 810         * Stack limit failure: set SP to the limit value, and generate
 811         * STKOF UsageFault. Stack pushes below the limit must not be
 812         * performed. It is IMPDEF whether pushes above the limit are
 813         * performed; we choose not to.
 814         */
 815        qemu_log_mask(CPU_LOG_INT,
 816                      "...STKOF during callee-saves register stacking\n");
 817        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
 818        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
 819                                env->v7m.secure);
 820        *frame_sp_p = limit;
 821        return true;
 822    }
 823
 824    /*
 825     * Write as much of the stack frame as we can. A write failure may
 826     * cause us to pend a derived exception.
 827     */
 828    sig = v7m_integrity_sig(env, lr);
 829    stacked_ok =
 830        v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
 831        v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
 832        v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
 833        v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
 834        v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
 835        v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
 836        v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
 837        v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
 838        v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
 839
 840    /* Update SP regardless of whether any of the stack accesses failed. */
 841    *frame_sp_p = frameptr;
 842
 843    return !stacked_ok;
 844}
 845
 846static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
 847                                bool ignore_stackfaults)
 848{
 849    /*
 850     * Do the "take the exception" parts of exception entry,
 851     * but not the pushing of state to the stack. This is
 852     * similar to the pseudocode ExceptionTaken() function.
 853     */
 854    CPUARMState *env = &cpu->env;
 855    uint32_t addr;
 856    bool targets_secure;
 857    int exc;
 858    bool push_failed = false;
 859
 860    armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
 861    qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
 862                  targets_secure ? "secure" : "nonsecure", exc);
 863
 864    if (dotailchain) {
 865        /* Sanitize LR FType and PREFIX bits */
 866        if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
 867            lr |= R_V7M_EXCRET_FTYPE_MASK;
 868        }
 869        lr = deposit32(lr, 24, 8, 0xff);
 870    }
 871
 872    if (arm_feature(env, ARM_FEATURE_V8)) {
 873        if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
 874            (lr & R_V7M_EXCRET_S_MASK)) {
 875            /*
 876             * The background code (the owner of the registers in the
 877             * exception frame) is Secure. This means it may either already
 878             * have or now needs to push callee-saves registers.
 879             */
 880            if (targets_secure) {
 881                if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
 882                    /*
 883                     * We took an exception from Secure to NonSecure
 884                     * (which means the callee-saved registers got stacked)
 885                     * and are now tailchaining to a Secure exception.
 886                     * Clear DCRS so eventual return from this Secure
 887                     * exception unstacks the callee-saved registers.
 888                     */
 889                    lr &= ~R_V7M_EXCRET_DCRS_MASK;
 890                }
 891            } else {
 892                /*
 893                 * We're going to a non-secure exception; push the
 894                 * callee-saves registers to the stack now, if they're
 895                 * not already saved.
 896                 */
 897                if (lr & R_V7M_EXCRET_DCRS_MASK &&
 898                    !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
 899                    push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
 900                                                        ignore_stackfaults);
 901                }
 902                lr |= R_V7M_EXCRET_DCRS_MASK;
 903            }
 904        }
 905
 906        lr &= ~R_V7M_EXCRET_ES_MASK;
 907        if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 908            lr |= R_V7M_EXCRET_ES_MASK;
 909        }
 910        lr &= ~R_V7M_EXCRET_SPSEL_MASK;
 911        if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
 912            lr |= R_V7M_EXCRET_SPSEL_MASK;
 913        }
 914
 915        /*
 916         * Clear registers if necessary to prevent non-secure exception
 917         * code being able to see register values from secure code.
 918         * Where register values become architecturally UNKNOWN we leave
 919         * them with their previous values. v8.1M is tighter than v8.0M
 920         * here and always zeroes the caller-saved registers regardless
 921         * of the security state the exception is targeting.
 922         */
 923        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 924            if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
 925                /*
 926                 * Always clear the caller-saved registers (they have been
 927                 * pushed to the stack earlier in v7m_push_stack()).
 928                 * Clear callee-saved registers if the background code is
 929                 * Secure (in which case these regs were saved in
 930                 * v7m_push_callee_stack()).
 931                 */
 932                int i;
 933                /*
 934                 * r4..r11 are callee-saves, zero only if background
 935                 * state was Secure (EXCRET.S == 1) and exception
 936                 * targets Non-secure state
 937                 */
 938                bool zero_callee_saves = !targets_secure &&
 939                    (lr & R_V7M_EXCRET_S_MASK);
 940
 941                for (i = 0; i < 13; i++) {
 942                    if (i < 4 || i > 11 || zero_callee_saves) {
 943                        env->regs[i] = 0;
 944                    }
 945                }
 946                /* Clear EAPSR */
 947                xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
 948            }
 949        }
 950    }
 951
 952    if (push_failed && !ignore_stackfaults) {
 953        /*
 954         * Derived exception on callee-saves register stacking:
 955         * we might now want to take a different exception which
 956         * targets a different security state, so try again from the top.
 957         */
 958        qemu_log_mask(CPU_LOG_INT,
 959                      "...derived exception on callee-saves register stacking");
 960        v7m_exception_taken(cpu, lr, true, true);
 961        return;
 962    }
 963
 964    if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
 965        /* Vector load failed: derived exception */
 966        qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
 967        v7m_exception_taken(cpu, lr, true, true);
 968        return;
 969    }
 970
 971    /*
 972     * Now we've done everything that might cause a derived exception
 973     * we can go ahead and activate whichever exception we're going to
 974     * take (which might now be the derived exception).
 975     */
 976    armv7m_nvic_acknowledge_irq(env->nvic);
 977
 978    /* Switch to target security state -- must do this before writing SPSEL */
 979    switch_v7m_security_state(env, targets_secure);
 980    write_v7m_control_spsel(env, 0);
 981    arm_clear_exclusive(env);
 982    /* Clear SFPA and FPCA (has no effect if no FPU) */
 983    env->v7m.control[M_REG_S] &=
 984        ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
 985    /* Clear IT bits */
 986    env->condexec_bits = 0;
 987    env->regs[14] = lr;
 988    env->regs[15] = addr & 0xfffffffe;
 989    env->thumb = addr & 1;
 990    arm_rebuild_hflags(env);
 991}
 992
 993static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
 994                             bool apply_splim)
 995{
 996    /*
 997     * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
 998     * that we will need later in order to do lazy FP reg stacking.
 999     */
1000    bool is_secure = env->v7m.secure;
1001    void *nvic = env->nvic;
1002    /*
1003     * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
1004     * are banked and we want to update the bit in the bank for the
1005     * current security state; and in one case we want to specifically
1006     * update the NS banked version of a bit even if we are secure.
1007     */
1008    uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
1009    uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
1010    uint32_t *fpccr = &env->v7m.fpccr[is_secure];
1011    bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
1012
1013    env->v7m.fpcar[is_secure] = frameptr & ~0x7;
1014
1015    if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1016        bool splimviol;
1017        uint32_t splim = v7m_sp_limit(env);
1018        bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1019            (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1020
1021        splimviol = !ign && frameptr < splim;
1022        *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1023    }
1024
1025    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1026
1027    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1028
1029    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1030
1031    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1032                        !arm_v7m_is_handler_mode(env));
1033
1034    hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1035    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1036
1037    bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1038    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1039
1040    mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1041    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1042
1043    ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1044    *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1045
1046    monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1047    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1048
1049    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1050        s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1051        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1052
1053        sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1054        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1055    }
1056}
1057
1058void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1059{
1060    /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1061    ARMCPU *cpu = env_archcpu(env);
1062    bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1063    bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1064    uintptr_t ra = GETPC();
1065
1066    assert(env->v7m.secure);
1067
1068    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1069        return;
1070    }
1071
1072    /* Check access to the coprocessor is permitted */
1073    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1074        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1075    }
1076
1077    if (lspact) {
1078        /* LSPACT should not be active when there is active FP state */
1079        raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1080    }
1081
1082    if (fptr & 7) {
1083        raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1084    }
1085
1086    /*
1087     * Note that we do not use v7m_stack_write() here, because the
1088     * accesses should not set the FSR bits for stacking errors if they
1089     * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1090     * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1091     * and longjmp out.
1092     */
1093    if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1094        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1095        int i;
1096
1097        for (i = 0; i < (ts ? 32 : 16); i += 2) {
1098            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1099            uint32_t faddr = fptr + 4 * i;
1100            uint32_t slo = extract64(dn, 0, 32);
1101            uint32_t shi = extract64(dn, 32, 32);
1102
1103            if (i >= 16) {
1104                faddr += 8; /* skip the slot for the FPSCR */
1105            }
1106            cpu_stl_data_ra(env, faddr, slo, ra);
1107            cpu_stl_data_ra(env, faddr + 4, shi, ra);
1108        }
1109        cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1110        if (cpu_isar_feature(aa32_mve, cpu)) {
1111            cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
1112        }
1113
1114        /*
1115         * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1116         * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1117         */
1118        if (ts) {
1119            for (i = 0; i < 32; i += 2) {
1120                *aa32_vfp_dreg(env, i / 2) = 0;
1121            }
1122            vfp_set_fpscr(env, 0);
1123            if (cpu_isar_feature(aa32_mve, cpu)) {
1124                env->v7m.vpr = 0;
1125            }
1126        }
1127    } else {
1128        v7m_update_fpccr(env, fptr, false);
1129    }
1130
1131    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1132}
1133
1134void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1135{
1136    ARMCPU *cpu = env_archcpu(env);
1137    uintptr_t ra = GETPC();
1138
1139    /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1140    assert(env->v7m.secure);
1141
1142    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1143        return;
1144    }
1145
1146    /* Check access to the coprocessor is permitted */
1147    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1148        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1149    }
1150
1151    if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1152        /* State in FP is still valid */
1153        env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1154    } else {
1155        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1156        int i;
1157        uint32_t fpscr;
1158
1159        if (fptr & 7) {
1160            raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1161        }
1162
1163        for (i = 0; i < (ts ? 32 : 16); i += 2) {
1164            uint32_t slo, shi;
1165            uint64_t dn;
1166            uint32_t faddr = fptr + 4 * i;
1167
1168            if (i >= 16) {
1169                faddr += 8; /* skip the slot for the FPSCR and VPR */
1170            }
1171
1172            slo = cpu_ldl_data_ra(env, faddr, ra);
1173            shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1174
1175            dn = (uint64_t) shi << 32 | slo;
1176            *aa32_vfp_dreg(env, i / 2) = dn;
1177        }
1178        fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1179        vfp_set_fpscr(env, fpscr);
1180        if (cpu_isar_feature(aa32_mve, cpu)) {
1181            env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
1182        }
1183    }
1184
1185    env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1186}
1187
1188static bool v7m_push_stack(ARMCPU *cpu)
1189{
1190    /*
1191     * Do the "set up stack frame" part of exception entry,
1192     * similar to pseudocode PushStack().
1193     * Return true if we generate a derived exception (and so
1194     * should ignore further stack faults trying to process
1195     * that derived exception.)
1196     */
1197    bool stacked_ok = true, limitviol = false;
1198    CPUARMState *env = &cpu->env;
1199    uint32_t xpsr = xpsr_read(env);
1200    uint32_t frameptr = env->regs[13];
1201    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1202    uint32_t framesize;
1203    bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1204
1205    if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1206        (env->v7m.secure || nsacr_cp10)) {
1207        if (env->v7m.secure &&
1208            env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1209            framesize = 0xa8;
1210        } else {
1211            framesize = 0x68;
1212        }
1213    } else {
1214        framesize = 0x20;
1215    }
1216
1217    /* Align stack pointer if the guest wants that */
1218    if ((frameptr & 4) &&
1219        (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1220        frameptr -= 4;
1221        xpsr |= XPSR_SPREALIGN;
1222    }
1223
1224    xpsr &= ~XPSR_SFPA;
1225    if (env->v7m.secure &&
1226        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1227        xpsr |= XPSR_SFPA;
1228    }
1229
1230    frameptr -= framesize;
1231
1232    if (arm_feature(env, ARM_FEATURE_V8)) {
1233        uint32_t limit = v7m_sp_limit(env);
1234
1235        if (frameptr < limit) {
1236            /*
1237             * Stack limit failure: set SP to the limit value, and generate
1238             * STKOF UsageFault. Stack pushes below the limit must not be
1239             * performed. It is IMPDEF whether pushes above the limit are
1240             * performed; we choose not to.
1241             */
1242            qemu_log_mask(CPU_LOG_INT,
1243                          "...STKOF during stacking\n");
1244            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1245            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1246                                    env->v7m.secure);
1247            env->regs[13] = limit;
1248            /*
1249             * We won't try to perform any further memory accesses but
1250             * we must continue through the following code to check for
1251             * permission faults during FPU state preservation, and we
1252             * must update FPCCR if lazy stacking is enabled.
1253             */
1254            limitviol = true;
1255            stacked_ok = false;
1256        }
1257    }
1258
1259    /*
1260     * Write as much of the stack frame as we can. If we fail a stack
1261     * write this will result in a derived exception being pended
1262     * (which may be taken in preference to the one we started with
1263     * if it has higher priority).
1264     */
1265    stacked_ok = stacked_ok &&
1266        v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1267        v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1268                        mmu_idx, STACK_NORMAL) &&
1269        v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1270                        mmu_idx, STACK_NORMAL) &&
1271        v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1272                        mmu_idx, STACK_NORMAL) &&
1273        v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1274                        mmu_idx, STACK_NORMAL) &&
1275        v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1276                        mmu_idx, STACK_NORMAL) &&
1277        v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1278                        mmu_idx, STACK_NORMAL) &&
1279        v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1280
1281    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1282        /* FPU is active, try to save its registers */
1283        bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1284        bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1285
1286        if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1287            qemu_log_mask(CPU_LOG_INT,
1288                          "...SecureFault because LSPACT and FPCA both set\n");
1289            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1290            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1291        } else if (!env->v7m.secure && !nsacr_cp10) {
1292            qemu_log_mask(CPU_LOG_INT,
1293                          "...Secure UsageFault with CFSR.NOCP because "
1294                          "NSACR.CP10 prevents stacking FP regs\n");
1295            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1296            env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1297        } else {
1298            if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1299                /* Lazy stacking disabled, save registers now */
1300                int i;
1301                bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1302                                                 arm_current_el(env) != 0);
1303
1304                if (stacked_ok && !cpacr_pass) {
1305                    /*
1306                     * Take UsageFault if CPACR forbids access. The pseudocode
1307                     * here does a full CheckCPEnabled() but we know the NSACR
1308                     * check can never fail as we have already handled that.
1309                     */
1310                    qemu_log_mask(CPU_LOG_INT,
1311                                  "...UsageFault with CFSR.NOCP because "
1312                                  "CPACR.CP10 prevents stacking FP regs\n");
1313                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1314                                            env->v7m.secure);
1315                    env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1316                    stacked_ok = false;
1317                }
1318
1319                for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1320                    uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1321                    uint32_t faddr = frameptr + 0x20 + 4 * i;
1322                    uint32_t slo = extract64(dn, 0, 32);
1323                    uint32_t shi = extract64(dn, 32, 32);
1324
1325                    if (i >= 16) {
1326                        faddr += 8; /* skip the slot for the FPSCR and VPR */
1327                    }
1328                    stacked_ok = stacked_ok &&
1329                        v7m_stack_write(cpu, faddr, slo,
1330                                        mmu_idx, STACK_NORMAL) &&
1331                        v7m_stack_write(cpu, faddr + 4, shi,
1332                                        mmu_idx, STACK_NORMAL);
1333                }
1334                stacked_ok = stacked_ok &&
1335                    v7m_stack_write(cpu, frameptr + 0x60,
1336                                    vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1337                if (cpu_isar_feature(aa32_mve, cpu)) {
1338                    stacked_ok = stacked_ok &&
1339                        v7m_stack_write(cpu, frameptr + 0x64,
1340                                        env->v7m.vpr, mmu_idx, STACK_NORMAL);
1341                }
1342                if (cpacr_pass) {
1343                    for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1344                        *aa32_vfp_dreg(env, i / 2) = 0;
1345                    }
1346                    vfp_set_fpscr(env, 0);
1347                    if (cpu_isar_feature(aa32_mve, cpu)) {
1348                        env->v7m.vpr = 0;
1349                    }
1350                }
1351            } else {
1352                /* Lazy stacking enabled, save necessary info to stack later */
1353                v7m_update_fpccr(env, frameptr + 0x20, true);
1354            }
1355        }
1356    }
1357
1358    /*
1359     * If we broke a stack limit then SP was already updated earlier;
1360     * otherwise we update SP regardless of whether any of the stack
1361     * accesses failed or we took some other kind of fault.
1362     */
1363    if (!limitviol) {
1364        env->regs[13] = frameptr;
1365    }
1366
1367    return !stacked_ok;
1368}
1369
1370static void do_v7m_exception_exit(ARMCPU *cpu)
1371{
1372    CPUARMState *env = &cpu->env;
1373    uint32_t excret;
1374    uint32_t xpsr, xpsr_mask;
1375    bool ufault = false;
1376    bool sfault = false;
1377    bool return_to_sp_process;
1378    bool return_to_handler;
1379    bool rettobase = false;
1380    bool exc_secure = false;
1381    bool return_to_secure;
1382    bool ftype;
1383    bool restore_s16_s31 = false;
1384
1385    /*
1386     * If we're not in Handler mode then jumps to magic exception-exit
1387     * addresses don't have magic behaviour. However for the v8M
1388     * security extensions the magic secure-function-return has to
1389     * work in thread mode too, so to avoid doing an extra check in
1390     * the generated code we allow exception-exit magic to also cause the
1391     * internal exception and bring us here in thread mode. Correct code
1392     * will never try to do this (the following insn fetch will always
1393     * fault) so we the overhead of having taken an unnecessary exception
1394     * doesn't matter.
1395     */
1396    if (!arm_v7m_is_handler_mode(env)) {
1397        return;
1398    }
1399
1400    /*
1401     * In the spec pseudocode ExceptionReturn() is called directly
1402     * from BXWritePC() and gets the full target PC value including
1403     * bit zero. In QEMU's implementation we treat it as a normal
1404     * jump-to-register (which is then caught later on), and so split
1405     * the target value up between env->regs[15] and env->thumb in
1406     * gen_bx(). Reconstitute it.
1407     */
1408    excret = env->regs[15];
1409    if (env->thumb) {
1410        excret |= 1;
1411    }
1412
1413    qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1414                  " previous exception %d\n",
1415                  excret, env->v7m.exception);
1416
1417    if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1418        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1419                      "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1420                      excret);
1421    }
1422
1423    ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1424
1425    if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1426        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1427                      "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1428                      "if FPU not present\n",
1429                      excret);
1430        ftype = true;
1431    }
1432
1433    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1434        /*
1435         * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1436         * we pick which FAULTMASK to clear.
1437         */
1438        if (!env->v7m.secure &&
1439            ((excret & R_V7M_EXCRET_ES_MASK) ||
1440             !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1441            sfault = 1;
1442            /* For all other purposes, treat ES as 0 (R_HXSR) */
1443            excret &= ~R_V7M_EXCRET_ES_MASK;
1444        }
1445        exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1446    }
1447
1448    if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1449        /*
1450         * Auto-clear FAULTMASK on return from other than NMI.
1451         * If the security extension is implemented then this only
1452         * happens if the raw execution priority is >= 0; the
1453         * value of the ES bit in the exception return value indicates
1454         * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1455         */
1456        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1457            if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1458                env->v7m.faultmask[exc_secure] = 0;
1459            }
1460        } else {
1461            env->v7m.faultmask[M_REG_NS] = 0;
1462        }
1463    }
1464
1465    switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1466                                     exc_secure)) {
1467    case -1:
1468        /* attempt to exit an exception that isn't active */
1469        ufault = true;
1470        break;
1471    case 0:
1472        /* still an irq active now */
1473        break;
1474    case 1:
1475        /*
1476         * We returned to base exception level, no nesting.
1477         * (In the pseudocode this is written using "NestedActivation != 1"
1478         * where we have 'rettobase == false'.)
1479         */
1480        rettobase = true;
1481        break;
1482    default:
1483        g_assert_not_reached();
1484    }
1485
1486    return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1487    return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1488    return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1489        (excret & R_V7M_EXCRET_S_MASK);
1490
1491    if (arm_feature(env, ARM_FEATURE_V8)) {
1492        if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1493            /*
1494             * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1495             * we choose to take the UsageFault.
1496             */
1497            if ((excret & R_V7M_EXCRET_S_MASK) ||
1498                (excret & R_V7M_EXCRET_ES_MASK) ||
1499                !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1500                ufault = true;
1501            }
1502        }
1503        if (excret & R_V7M_EXCRET_RES0_MASK) {
1504            ufault = true;
1505        }
1506    } else {
1507        /* For v7M we only recognize certain combinations of the low bits */
1508        switch (excret & 0xf) {
1509        case 1: /* Return to Handler */
1510            break;
1511        case 13: /* Return to Thread using Process stack */
1512        case 9: /* Return to Thread using Main stack */
1513            /*
1514             * We only need to check NONBASETHRDENA for v7M, because in
1515             * v8M this bit does not exist (it is RES1).
1516             */
1517            if (!rettobase &&
1518                !(env->v7m.ccr[env->v7m.secure] &
1519                  R_V7M_CCR_NONBASETHRDENA_MASK)) {
1520                ufault = true;
1521            }
1522            break;
1523        default:
1524            ufault = true;
1525        }
1526    }
1527
1528    /*
1529     * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1530     * Handler mode (and will be until we write the new XPSR.Interrupt
1531     * field) this does not switch around the current stack pointer.
1532     * We must do this before we do any kind of tailchaining, including
1533     * for the derived exceptions on integrity check failures, or we will
1534     * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1535     */
1536    write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1537
1538    /*
1539     * Clear scratch FP values left in caller saved registers; this
1540     * must happen before any kind of tail chaining.
1541     */
1542    if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1543        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1544        if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1545            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1546            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1547            qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1548                          "stackframe: error during lazy state deactivation\n");
1549            v7m_exception_taken(cpu, excret, true, false);
1550            return;
1551        } else {
1552            if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1553                /* v8.1M adds this NOCP check */
1554                bool nsacr_pass = exc_secure ||
1555                    extract32(env->v7m.nsacr, 10, 1);
1556                bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1557                if (!nsacr_pass) {
1558                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1559                    env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1560                    qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1561                        "stackframe: NSACR prevents clearing FPU registers\n");
1562                    v7m_exception_taken(cpu, excret, true, false);
1563                    return;
1564                } else if (!cpacr_pass) {
1565                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1566                                            exc_secure);
1567                    env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1568                    qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1569                        "stackframe: CPACR prevents clearing FPU registers\n");
1570                    v7m_exception_taken(cpu, excret, true, false);
1571                    return;
1572                }
1573            }
1574            /* Clear s0..s15, FPSCR and VPR */
1575            int i;
1576
1577            for (i = 0; i < 16; i += 2) {
1578                *aa32_vfp_dreg(env, i / 2) = 0;
1579            }
1580            vfp_set_fpscr(env, 0);
1581            if (cpu_isar_feature(aa32_mve, cpu)) {
1582                env->v7m.vpr = 0;
1583            }
1584        }
1585    }
1586
1587    if (sfault) {
1588        env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1589        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1590        qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1591                      "stackframe: failed EXC_RETURN.ES validity check\n");
1592        v7m_exception_taken(cpu, excret, true, false);
1593        return;
1594    }
1595
1596    if (ufault) {
1597        /*
1598         * Bad exception return: instead of popping the exception
1599         * stack, directly take a usage fault on the current stack.
1600         */
1601        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1602        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1603        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1604                      "stackframe: failed exception return integrity check\n");
1605        v7m_exception_taken(cpu, excret, true, false);
1606        return;
1607    }
1608
1609    /*
1610     * Tailchaining: if there is currently a pending exception that
1611     * is high enough priority to preempt execution at the level we're
1612     * about to return to, then just directly take that exception now,
1613     * avoiding an unstack-and-then-stack. Note that now we have
1614     * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1615     * our current execution priority is already the execution priority we are
1616     * returning to -- none of the state we would unstack or set based on
1617     * the EXCRET value affects it.
1618     */
1619    if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1620        qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1621        v7m_exception_taken(cpu, excret, true, false);
1622        return;
1623    }
1624
1625    switch_v7m_security_state(env, return_to_secure);
1626
1627    {
1628        /*
1629         * The stack pointer we should be reading the exception frame from
1630         * depends on bits in the magic exception return type value (and
1631         * for v8M isn't necessarily the stack pointer we will eventually
1632         * end up resuming execution with). Get a pointer to the location
1633         * in the CPU state struct where the SP we need is currently being
1634         * stored; we will use and modify it in place.
1635         * We use this limited C variable scope so we don't accidentally
1636         * use 'frame_sp_p' after we do something that makes it invalid.
1637         */
1638        bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
1639        uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1640                                              return_to_secure,
1641                                              !return_to_handler,
1642                                              spsel);
1643        uint32_t frameptr = *frame_sp_p;
1644        bool pop_ok = true;
1645        ARMMMUIdx mmu_idx;
1646        bool return_to_priv = return_to_handler ||
1647            !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1648
1649        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1650                                                        return_to_priv);
1651
1652        if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1653            arm_feature(env, ARM_FEATURE_V8)) {
1654            qemu_log_mask(LOG_GUEST_ERROR,
1655                          "M profile exception return with non-8-aligned SP "
1656                          "for destination state is UNPREDICTABLE\n");
1657        }
1658
1659        /* Do we need to pop callee-saved registers? */
1660        if (return_to_secure &&
1661            ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1662             (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1663            uint32_t actual_sig;
1664
1665            pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1666
1667            if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1668                /* Take a SecureFault on the current stack */
1669                env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1670                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1671                qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1672                              "stackframe: failed exception return integrity "
1673                              "signature check\n");
1674                v7m_exception_taken(cpu, excret, true, false);
1675                return;
1676            }
1677
1678            pop_ok = pop_ok &&
1679                v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1680                v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1681                v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1682                v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1683                v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1684                v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1685                v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1686                v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1687
1688            frameptr += 0x28;
1689        }
1690
1691        /* Pop registers */
1692        pop_ok = pop_ok &&
1693            v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1694            v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1695            v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1696            v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1697            v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1698            v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1699            v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1700            v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1701
1702        if (!pop_ok) {
1703            /*
1704             * v7m_stack_read() pended a fault, so take it (as a tail
1705             * chained exception on the same stack frame)
1706             */
1707            qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1708            v7m_exception_taken(cpu, excret, true, false);
1709            return;
1710        }
1711
1712        /*
1713         * Returning from an exception with a PC with bit 0 set is defined
1714         * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1715         * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1716         * the lsbit, and there are several RTOSes out there which incorrectly
1717         * assume the r15 in the stack frame should be a Thumb-style "lsbit
1718         * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1719         * complain about the badly behaved guest.
1720         */
1721        if (env->regs[15] & 1) {
1722            env->regs[15] &= ~1U;
1723            if (!arm_feature(env, ARM_FEATURE_V8)) {
1724                qemu_log_mask(LOG_GUEST_ERROR,
1725                              "M profile return from interrupt with misaligned "
1726                              "PC is UNPREDICTABLE on v7M\n");
1727            }
1728        }
1729
1730        if (arm_feature(env, ARM_FEATURE_V8)) {
1731            /*
1732             * For v8M we have to check whether the xPSR exception field
1733             * matches the EXCRET value for return to handler/thread
1734             * before we commit to changing the SP and xPSR.
1735             */
1736            bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1737            if (return_to_handler != will_be_handler) {
1738                /*
1739                 * Take an INVPC UsageFault on the current stack.
1740                 * By this point we will have switched to the security state
1741                 * for the background state, so this UsageFault will target
1742                 * that state.
1743                 */
1744                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1745                                        env->v7m.secure);
1746                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1747                qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1748                              "stackframe: failed exception return integrity "
1749                              "check\n");
1750                v7m_exception_taken(cpu, excret, true, false);
1751                return;
1752            }
1753        }
1754
1755        if (!ftype) {
1756            /* FP present and we need to handle it */
1757            if (!return_to_secure &&
1758                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1759                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1760                env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1761                qemu_log_mask(CPU_LOG_INT,
1762                              "...taking SecureFault on existing stackframe: "
1763                              "Secure LSPACT set but exception return is "
1764                              "not to secure state\n");
1765                v7m_exception_taken(cpu, excret, true, false);
1766                return;
1767            }
1768
1769            restore_s16_s31 = return_to_secure &&
1770                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1771
1772            if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1773                /* State in FPU is still valid, just clear LSPACT */
1774                env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1775            } else {
1776                int i;
1777                uint32_t fpscr;
1778                bool cpacr_pass, nsacr_pass;
1779
1780                cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1781                                            return_to_priv);
1782                nsacr_pass = return_to_secure ||
1783                    extract32(env->v7m.nsacr, 10, 1);
1784
1785                if (!cpacr_pass) {
1786                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1787                                            return_to_secure);
1788                    env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1789                    qemu_log_mask(CPU_LOG_INT,
1790                                  "...taking UsageFault on existing "
1791                                  "stackframe: CPACR.CP10 prevents unstacking "
1792                                  "FP regs\n");
1793                    v7m_exception_taken(cpu, excret, true, false);
1794                    return;
1795                } else if (!nsacr_pass) {
1796                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1797                    env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1798                    qemu_log_mask(CPU_LOG_INT,
1799                                  "...taking Secure UsageFault on existing "
1800                                  "stackframe: NSACR.CP10 prevents unstacking "
1801                                  "FP regs\n");
1802                    v7m_exception_taken(cpu, excret, true, false);
1803                    return;
1804                }
1805
1806                for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1807                    uint32_t slo, shi;
1808                    uint64_t dn;
1809                    uint32_t faddr = frameptr + 0x20 + 4 * i;
1810
1811                    if (i >= 16) {
1812                        faddr += 8; /* Skip the slot for the FPSCR and VPR */
1813                    }
1814
1815                    pop_ok = pop_ok &&
1816                        v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1817                        v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1818
1819                    if (!pop_ok) {
1820                        break;
1821                    }
1822
1823                    dn = (uint64_t)shi << 32 | slo;
1824                    *aa32_vfp_dreg(env, i / 2) = dn;
1825                }
1826                pop_ok = pop_ok &&
1827                    v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1828                if (pop_ok) {
1829                    vfp_set_fpscr(env, fpscr);
1830                }
1831                if (cpu_isar_feature(aa32_mve, cpu)) {
1832                    pop_ok = pop_ok &&
1833                        v7m_stack_read(cpu, &env->v7m.vpr,
1834                                       frameptr + 0x64, mmu_idx);
1835                }
1836                if (!pop_ok) {
1837                    /*
1838                     * These regs are 0 if security extension present;
1839                     * otherwise merely UNKNOWN. We zero always.
1840                     */
1841                    for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1842                        *aa32_vfp_dreg(env, i / 2) = 0;
1843                    }
1844                    vfp_set_fpscr(env, 0);
1845                    if (cpu_isar_feature(aa32_mve, cpu)) {
1846                        env->v7m.vpr = 0;
1847                    }
1848                }
1849            }
1850        }
1851        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1852                                               V7M_CONTROL, FPCA, !ftype);
1853
1854        /* Commit to consuming the stack frame */
1855        frameptr += 0x20;
1856        if (!ftype) {
1857            frameptr += 0x48;
1858            if (restore_s16_s31) {
1859                frameptr += 0x40;
1860            }
1861        }
1862        /*
1863         * Undo stack alignment (the SPREALIGN bit indicates that the original
1864         * pre-exception SP was not 8-aligned and we added a padding word to
1865         * align it, so we undo this by ORing in the bit that increases it
1866         * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1867         * would work too but a logical OR is how the pseudocode specifies it.)
1868         */
1869        if (xpsr & XPSR_SPREALIGN) {
1870            frameptr |= 4;
1871        }
1872        *frame_sp_p = frameptr;
1873    }
1874
1875    xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1876    if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1877        xpsr_mask &= ~XPSR_GE;
1878    }
1879    /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1880    xpsr_write(env, xpsr, xpsr_mask);
1881
1882    if (env->v7m.secure) {
1883        bool sfpa = xpsr & XPSR_SFPA;
1884
1885        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1886                                               V7M_CONTROL, SFPA, sfpa);
1887    }
1888
1889    /*
1890     * The restored xPSR exception field will be zero if we're
1891     * resuming in Thread mode. If that doesn't match what the
1892     * exception return excret specified then this is a UsageFault.
1893     * v7M requires we make this check here; v8M did it earlier.
1894     */
1895    if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1896        /*
1897         * Take an INVPC UsageFault by pushing the stack again;
1898         * we know we're v7M so this is never a Secure UsageFault.
1899         */
1900        bool ignore_stackfaults;
1901
1902        assert(!arm_feature(env, ARM_FEATURE_V8));
1903        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1904        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1905        ignore_stackfaults = v7m_push_stack(cpu);
1906        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1907                      "failed exception return integrity check\n");
1908        v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1909        return;
1910    }
1911
1912    /* Otherwise, we have a successful exception exit. */
1913    arm_clear_exclusive(env);
1914    arm_rebuild_hflags(env);
1915    qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1916}
1917
1918static bool do_v7m_function_return(ARMCPU *cpu)
1919{
1920    /*
1921     * v8M security extensions magic function return.
1922     * We may either:
1923     *  (1) throw an exception (longjump)
1924     *  (2) return true if we successfully handled the function return
1925     *  (3) return false if we failed a consistency check and have
1926     *      pended a UsageFault that needs to be taken now
1927     *
1928     * At this point the magic return value is split between env->regs[15]
1929     * and env->thumb. We don't bother to reconstitute it because we don't
1930     * need it (all values are handled the same way).
1931     */
1932    CPUARMState *env = &cpu->env;
1933    uint32_t newpc, newpsr, newpsr_exc;
1934
1935    qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1936
1937    {
1938        bool threadmode, spsel;
1939        MemOpIdx oi;
1940        ARMMMUIdx mmu_idx;
1941        uint32_t *frame_sp_p;
1942        uint32_t frameptr;
1943
1944        /* Pull the return address and IPSR from the Secure stack */
1945        threadmode = !arm_v7m_is_handler_mode(env);
1946        spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1947
1948        frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1949        frameptr = *frame_sp_p;
1950
1951        /*
1952         * These loads may throw an exception (for MPU faults). We want to
1953         * do them as secure, so work out what MMU index that is.
1954         */
1955        mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1956        oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
1957        newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
1958        newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
1959
1960        /* Consistency checks on new IPSR */
1961        newpsr_exc = newpsr & XPSR_EXCP;
1962        if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1963              (env->v7m.exception == 1 && newpsr_exc != 0))) {
1964            /* Pend the fault and tell our caller to take it */
1965            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1966            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1967                                    env->v7m.secure);
1968            qemu_log_mask(CPU_LOG_INT,
1969                          "...taking INVPC UsageFault: "
1970                          "IPSR consistency check failed\n");
1971            return false;
1972        }
1973
1974        *frame_sp_p = frameptr + 8;
1975    }
1976
1977    /* This invalidates frame_sp_p */
1978    switch_v7m_security_state(env, true);
1979    env->v7m.exception = newpsr_exc;
1980    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1981    if (newpsr & XPSR_SFPA) {
1982        env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1983    }
1984    xpsr_write(env, 0, XPSR_IT);
1985    env->thumb = newpc & 1;
1986    env->regs[15] = newpc & ~1;
1987    arm_rebuild_hflags(env);
1988
1989    qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1990    return true;
1991}
1992
1993static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1994                               uint32_t addr, uint16_t *insn)
1995{
1996    /*
1997     * Load a 16-bit portion of a v7M instruction, returning true on success,
1998     * or false on failure (in which case we will have pended the appropriate
1999     * exception).
2000     * We need to do the instruction fetch's MPU and SAU checks
2001     * like this because there is no MMU index that would allow
2002     * doing the load with a single function call. Instead we must
2003     * first check that the security attributes permit the load
2004     * and that they don't mismatch on the two halves of the instruction,
2005     * and then we do the load as a secure load (ie using the security
2006     * attributes of the address, not the CPU, as architecturally required).
2007     */
2008    CPUState *cs = CPU(cpu);
2009    CPUARMState *env = &cpu->env;
2010    V8M_SAttributes sattrs = {};
2011    MemTxAttrs attrs = {};
2012    ARMMMUFaultInfo fi = {};
2013    ARMCacheAttrs cacheattrs = {};
2014    MemTxResult txres;
2015    target_ulong page_size;
2016    hwaddr physaddr;
2017    int prot;
2018
2019    v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
2020    if (!sattrs.nsc || sattrs.ns) {
2021        /*
2022         * This must be the second half of the insn, and it straddles a
2023         * region boundary with the second half not being S&NSC.
2024         */
2025        env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2026        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2027        qemu_log_mask(CPU_LOG_INT,
2028                      "...really SecureFault with SFSR.INVEP\n");
2029        return false;
2030    }
2031    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
2032                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
2033        /* the MPU lookup failed */
2034        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2035        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
2036        qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
2037        return false;
2038    }
2039    *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
2040                                 attrs, &txres);
2041    if (txres != MEMTX_OK) {
2042        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2043        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2044        qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
2045        return false;
2046    }
2047    return true;
2048}
2049
2050static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2051                                   uint32_t addr, uint32_t *spdata)
2052{
2053    /*
2054     * Read a word of data from the stack for the SG instruction,
2055     * writing the value into *spdata. If the load succeeds, return
2056     * true; otherwise pend an appropriate exception and return false.
2057     * (We can't use data load helpers here that throw an exception
2058     * because of the context we're called in, which is halfway through
2059     * arm_v7m_cpu_do_interrupt().)
2060     */
2061    CPUState *cs = CPU(cpu);
2062    CPUARMState *env = &cpu->env;
2063    MemTxAttrs attrs = {};
2064    MemTxResult txres;
2065    target_ulong page_size;
2066    hwaddr physaddr;
2067    int prot;
2068    ARMMMUFaultInfo fi = {};
2069    ARMCacheAttrs cacheattrs = {};
2070    uint32_t value;
2071
2072    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
2073                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
2074        /* MPU/SAU lookup failed */
2075        if (fi.type == ARMFault_QEMU_SFault) {
2076            qemu_log_mask(CPU_LOG_INT,
2077                          "...SecureFault during stack word read\n");
2078            env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2079            env->v7m.sfar = addr;
2080            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2081        } else {
2082            qemu_log_mask(CPU_LOG_INT,
2083                          "...MemManageFault during stack word read\n");
2084            env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2085                R_V7M_CFSR_MMARVALID_MASK;
2086            env->v7m.mmfar[M_REG_S] = addr;
2087            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2088        }
2089        return false;
2090    }
2091    value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
2092                              attrs, &txres);
2093    if (txres != MEMTX_OK) {
2094        /* BusFault trying to read the data */
2095        qemu_log_mask(CPU_LOG_INT,
2096                      "...BusFault during stack word read\n");
2097        env->v7m.cfsr[M_REG_NS] |=
2098            (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2099        env->v7m.bfar = addr;
2100        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2101        return false;
2102    }
2103
2104    *spdata = value;
2105    return true;
2106}
2107
2108static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2109{
2110    /*
2111     * Check whether this attempt to execute code in a Secure & NS-Callable
2112     * memory region is for an SG instruction; if so, then emulate the
2113     * effect of the SG instruction and return true. Otherwise pend
2114     * the correct kind of exception and return false.
2115     */
2116    CPUARMState *env = &cpu->env;
2117    ARMMMUIdx mmu_idx;
2118    uint16_t insn;
2119
2120    /*
2121     * We should never get here unless get_phys_addr_pmsav8() caused
2122     * an exception for NS executing in S&NSC memory.
2123     */
2124    assert(!env->v7m.secure);
2125    assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2126
2127    /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2128    mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2129
2130    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
2131        return false;
2132    }
2133
2134    if (!env->thumb) {
2135        goto gen_invep;
2136    }
2137
2138    if (insn != 0xe97f) {
2139        /*
2140         * Not an SG instruction first half (we choose the IMPDEF
2141         * early-SG-check option).
2142         */
2143        goto gen_invep;
2144    }
2145
2146    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
2147        return false;
2148    }
2149
2150    if (insn != 0xe97f) {
2151        /*
2152         * Not an SG instruction second half (yes, both halves of the SG
2153         * insn have the same hex value)
2154         */
2155        goto gen_invep;
2156    }
2157
2158    /*
2159     * OK, we have confirmed that we really have an SG instruction.
2160     * We know we're NS in S memory so don't need to repeat those checks.
2161     */
2162    qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2163                  ", executing it\n", env->regs[15]);
2164
2165    if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2166        !arm_v7m_is_handler_mode(env)) {
2167        /*
2168         * v8.1M exception stack frame integrity check. Note that we
2169         * must perform the memory access even if CCR_S.TRD is zero
2170         * and we aren't going to check what the data loaded is.
2171         */
2172        uint32_t spdata, sp;
2173
2174        /*
2175         * We know we are currently NS, so the S stack pointers must be
2176         * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2177         */
2178        sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2179        if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2180            /* Stack access failed and an exception has been pended */
2181            return false;
2182        }
2183
2184        if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2185            if (((spdata & ~1) == 0xfefa125a) ||
2186                !(env->v7m.control[M_REG_S] & 1)) {
2187                goto gen_invep;
2188            }
2189        }
2190    }
2191
2192    env->regs[14] &= ~1;
2193    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2194    switch_v7m_security_state(env, true);
2195    xpsr_write(env, 0, XPSR_IT);
2196    env->regs[15] += 4;
2197    arm_rebuild_hflags(env);
2198    return true;
2199
2200gen_invep:
2201    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2202    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2203    qemu_log_mask(CPU_LOG_INT,
2204                  "...really SecureFault with SFSR.INVEP\n");
2205    return false;
2206}
2207
2208void arm_v7m_cpu_do_interrupt(CPUState *cs)
2209{
2210    ARMCPU *cpu = ARM_CPU(cs);
2211    CPUARMState *env = &cpu->env;
2212    uint32_t lr;
2213    bool ignore_stackfaults;
2214
2215    arm_log_exception(cs);
2216
2217    /*
2218     * For exceptions we just mark as pending on the NVIC, and let that
2219     * handle it.
2220     */
2221    switch (cs->exception_index) {
2222    case EXCP_UDEF:
2223        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2224        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2225        break;
2226    case EXCP_NOCP:
2227    {
2228        /*
2229         * NOCP might be directed to something other than the current
2230         * security state if this fault is because of NSACR; we indicate
2231         * the target security state using exception.target_el.
2232         */
2233        int target_secstate;
2234
2235        if (env->exception.target_el == 3) {
2236            target_secstate = M_REG_S;
2237        } else {
2238            target_secstate = env->v7m.secure;
2239        }
2240        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2241        env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2242        break;
2243    }
2244    case EXCP_INVSTATE:
2245        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2246        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2247        break;
2248    case EXCP_STKOF:
2249        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2250        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2251        break;
2252    case EXCP_LSERR:
2253        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2254        env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2255        break;
2256    case EXCP_UNALIGNED:
2257        /* Unaligned faults reported by M-profile aware code */
2258        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2259        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2260        break;
2261    case EXCP_DIVBYZERO:
2262        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2263        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
2264        break;
2265    case EXCP_SWI:
2266        /* The PC already points to the next instruction.  */
2267        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2268        break;
2269    case EXCP_PREFETCH_ABORT:
2270    case EXCP_DATA_ABORT:
2271        /*
2272         * Note that for M profile we don't have a guest facing FSR, but
2273         * the env->exception.fsr will be populated by the code that
2274         * raises the fault, in the A profile short-descriptor format.
2275         *
2276         * Log the exception.vaddress now regardless of subtype, because
2277         * logging below only logs it when it goes into a guest visible
2278         * register.
2279         */
2280        qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
2281                      (uint32_t)env->exception.vaddress);
2282        switch (env->exception.fsr & 0xf) {
2283        case M_FAKE_FSR_NSC_EXEC:
2284            /*
2285             * Exception generated when we try to execute code at an address
2286             * which is marked as Secure & Non-Secure Callable and the CPU
2287             * is in the Non-Secure state. The only instruction which can
2288             * be executed like this is SG (and that only if both halves of
2289             * the SG instruction have the same security attributes.)
2290             * Everything else must generate an INVEP SecureFault, so we
2291             * emulate the SG instruction here.
2292             */
2293            if (v7m_handle_execute_nsc(cpu)) {
2294                return;
2295            }
2296            break;
2297        case M_FAKE_FSR_SFAULT:
2298            /*
2299             * Various flavours of SecureFault for attempts to execute or
2300             * access data in the wrong security state.
2301             */
2302            switch (cs->exception_index) {
2303            case EXCP_PREFETCH_ABORT:
2304                if (env->v7m.secure) {
2305                    env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2306                    qemu_log_mask(CPU_LOG_INT,
2307                                  "...really SecureFault with SFSR.INVTRAN\n");
2308                } else {
2309                    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2310                    qemu_log_mask(CPU_LOG_INT,
2311                                  "...really SecureFault with SFSR.INVEP\n");
2312                }
2313                break;
2314            case EXCP_DATA_ABORT:
2315                /* This must be an NS access to S memory */
2316                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2317                qemu_log_mask(CPU_LOG_INT,
2318                              "...really SecureFault with SFSR.AUVIOL\n");
2319                break;
2320            }
2321            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2322            break;
2323        case 0x8: /* External Abort */
2324            switch (cs->exception_index) {
2325            case EXCP_PREFETCH_ABORT:
2326                env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2327                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2328                break;
2329            case EXCP_DATA_ABORT:
2330                env->v7m.cfsr[M_REG_NS] |=
2331                    (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2332                env->v7m.bfar = env->exception.vaddress;
2333                qemu_log_mask(CPU_LOG_INT,
2334                              "...with CFSR.PRECISERR and BFAR 0x%x\n",
2335                              env->v7m.bfar);
2336                break;
2337            }
2338            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2339            break;
2340        case 0x1: /* Alignment fault reported by generic code */
2341            qemu_log_mask(CPU_LOG_INT,
2342                          "...really UsageFault with UFSR.UNALIGNED\n");
2343            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2344            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
2345                                    env->v7m.secure);
2346            break;
2347        default:
2348            /*
2349             * All other FSR values are either MPU faults or "can't happen
2350             * for M profile" cases.
2351             */
2352            switch (cs->exception_index) {
2353            case EXCP_PREFETCH_ABORT:
2354                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2355                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2356                break;
2357            case EXCP_DATA_ABORT:
2358                env->v7m.cfsr[env->v7m.secure] |=
2359                    (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2360                env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2361                qemu_log_mask(CPU_LOG_INT,
2362                              "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2363                              env->v7m.mmfar[env->v7m.secure]);
2364                break;
2365            }
2366            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2367                                    env->v7m.secure);
2368            break;
2369        }
2370        break;
2371    case EXCP_SEMIHOST:
2372        qemu_log_mask(CPU_LOG_INT,
2373                      "...handling as semihosting call 0x%x\n",
2374                      env->regs[0]);
2375#ifdef CONFIG_TCG
2376        do_common_semihosting(cs);
2377#else
2378        g_assert_not_reached();
2379#endif
2380        env->regs[15] += env->thumb ? 2 : 4;
2381        return;
2382    case EXCP_BKPT:
2383        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2384        break;
2385    case EXCP_IRQ:
2386        break;
2387    case EXCP_EXCEPTION_EXIT:
2388        if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2389            /* Must be v8M security extension function return */
2390            assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2391            assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2392            if (do_v7m_function_return(cpu)) {
2393                return;
2394            }
2395        } else {
2396            do_v7m_exception_exit(cpu);
2397            return;
2398        }
2399        break;
2400    case EXCP_LAZYFP:
2401        /*
2402         * We already pended the specific exception in the NVIC in the
2403         * v7m_preserve_fp_state() helper function.
2404         */
2405        break;
2406    default:
2407        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2408        return; /* Never happens.  Keep compiler happy.  */
2409    }
2410
2411    if (arm_feature(env, ARM_FEATURE_V8)) {
2412        lr = R_V7M_EXCRET_RES1_MASK |
2413            R_V7M_EXCRET_DCRS_MASK;
2414        /*
2415         * The S bit indicates whether we should return to Secure
2416         * or NonSecure (ie our current state).
2417         * The ES bit indicates whether we're taking this exception
2418         * to Secure or NonSecure (ie our target state). We set it
2419         * later, in v7m_exception_taken().
2420         * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2421         * This corresponds to the ARM ARM pseudocode for v8M setting
2422         * some LR bits in PushStack() and some in ExceptionTaken();
2423         * the distinction matters for the tailchain cases where we
2424         * can take an exception without pushing the stack.
2425         */
2426        if (env->v7m.secure) {
2427            lr |= R_V7M_EXCRET_S_MASK;
2428        }
2429    } else {
2430        lr = R_V7M_EXCRET_RES1_MASK |
2431            R_V7M_EXCRET_S_MASK |
2432            R_V7M_EXCRET_DCRS_MASK |
2433            R_V7M_EXCRET_ES_MASK;
2434        if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2435            lr |= R_V7M_EXCRET_SPSEL_MASK;
2436        }
2437    }
2438    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2439        lr |= R_V7M_EXCRET_FTYPE_MASK;
2440    }
2441    if (!arm_v7m_is_handler_mode(env)) {
2442        lr |= R_V7M_EXCRET_MODE_MASK;
2443    }
2444
2445    ignore_stackfaults = v7m_push_stack(cpu);
2446    v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2447}
2448
2449uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2450{
2451    unsigned el = arm_current_el(env);
2452
2453    /* First handle registers which unprivileged can read */
2454    switch (reg) {
2455    case 0 ... 7: /* xPSR sub-fields */
2456        return v7m_mrs_xpsr(env, reg, el);
2457    case 20: /* CONTROL */
2458        return v7m_mrs_control(env, env->v7m.secure);
2459    case 0x94: /* CONTROL_NS */
2460        /*
2461         * We have to handle this here because unprivileged Secure code
2462         * can read the NS CONTROL register.
2463         */
2464        if (!env->v7m.secure) {
2465            return 0;
2466        }
2467        return env->v7m.control[M_REG_NS] |
2468            (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2469    }
2470
2471    if (el == 0) {
2472        return 0; /* unprivileged reads others as zero */
2473    }
2474
2475    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2476        switch (reg) {
2477        case 0x88: /* MSP_NS */
2478            if (!env->v7m.secure) {
2479                return 0;
2480            }
2481            return env->v7m.other_ss_msp;
2482        case 0x89: /* PSP_NS */
2483            if (!env->v7m.secure) {
2484                return 0;
2485            }
2486            return env->v7m.other_ss_psp;
2487        case 0x8a: /* MSPLIM_NS */
2488            if (!env->v7m.secure) {
2489                return 0;
2490            }
2491            return env->v7m.msplim[M_REG_NS];
2492        case 0x8b: /* PSPLIM_NS */
2493            if (!env->v7m.secure) {
2494                return 0;
2495            }
2496            return env->v7m.psplim[M_REG_NS];
2497        case 0x90: /* PRIMASK_NS */
2498            if (!env->v7m.secure) {
2499                return 0;
2500            }
2501            return env->v7m.primask[M_REG_NS];
2502        case 0x91: /* BASEPRI_NS */
2503            if (!env->v7m.secure) {
2504                return 0;
2505            }
2506            return env->v7m.basepri[M_REG_NS];
2507        case 0x93: /* FAULTMASK_NS */
2508            if (!env->v7m.secure) {
2509                return 0;
2510            }
2511            return env->v7m.faultmask[M_REG_NS];
2512        case 0x98: /* SP_NS */
2513        {
2514            /*
2515             * This gives the non-secure SP selected based on whether we're
2516             * currently in handler mode or not, using the NS CONTROL.SPSEL.
2517             */
2518            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2519
2520            if (!env->v7m.secure) {
2521                return 0;
2522            }
2523            if (!arm_v7m_is_handler_mode(env) && spsel) {
2524                return env->v7m.other_ss_psp;
2525            } else {
2526                return env->v7m.other_ss_msp;
2527            }
2528        }
2529        default:
2530            break;
2531        }
2532    }
2533
2534    switch (reg) {
2535    case 8: /* MSP */
2536        return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2537    case 9: /* PSP */
2538        return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2539    case 10: /* MSPLIM */
2540        if (!arm_feature(env, ARM_FEATURE_V8)) {
2541            goto bad_reg;
2542        }
2543        return env->v7m.msplim[env->v7m.secure];
2544    case 11: /* PSPLIM */
2545        if (!arm_feature(env, ARM_FEATURE_V8)) {
2546            goto bad_reg;
2547        }
2548        return env->v7m.psplim[env->v7m.secure];
2549    case 16: /* PRIMASK */
2550        return env->v7m.primask[env->v7m.secure];
2551    case 17: /* BASEPRI */
2552    case 18: /* BASEPRI_MAX */
2553        return env->v7m.basepri[env->v7m.secure];
2554    case 19: /* FAULTMASK */
2555        return env->v7m.faultmask[env->v7m.secure];
2556    default:
2557    bad_reg:
2558        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2559                                       " register %d\n", reg);
2560        return 0;
2561    }
2562}
2563
2564void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2565{
2566    /*
2567     * We're passed bits [11..0] of the instruction; extract
2568     * SYSm and the mask bits.
2569     * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2570     * we choose to treat them as if the mask bits were valid.
2571     * NB that the pseudocode 'mask' variable is bits [11..10],
2572     * whereas ours is [11..8].
2573     */
2574    uint32_t mask = extract32(maskreg, 8, 4);
2575    uint32_t reg = extract32(maskreg, 0, 8);
2576    int cur_el = arm_current_el(env);
2577
2578    if (cur_el == 0 && reg > 7 && reg != 20) {
2579        /*
2580         * only xPSR sub-fields and CONTROL.SFPA may be written by
2581         * unprivileged code
2582         */
2583        return;
2584    }
2585
2586    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2587        switch (reg) {
2588        case 0x88: /* MSP_NS */
2589            if (!env->v7m.secure) {
2590                return;
2591            }
2592            env->v7m.other_ss_msp = val & ~3;
2593            return;
2594        case 0x89: /* PSP_NS */
2595            if (!env->v7m.secure) {
2596                return;
2597            }
2598            env->v7m.other_ss_psp = val & ~3;
2599            return;
2600        case 0x8a: /* MSPLIM_NS */
2601            if (!env->v7m.secure) {
2602                return;
2603            }
2604            env->v7m.msplim[M_REG_NS] = val & ~7;
2605            return;
2606        case 0x8b: /* PSPLIM_NS */
2607            if (!env->v7m.secure) {
2608                return;
2609            }
2610            env->v7m.psplim[M_REG_NS] = val & ~7;
2611            return;
2612        case 0x90: /* PRIMASK_NS */
2613            if (!env->v7m.secure) {
2614                return;
2615            }
2616            env->v7m.primask[M_REG_NS] = val & 1;
2617            return;
2618        case 0x91: /* BASEPRI_NS */
2619            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2620                return;
2621            }
2622            env->v7m.basepri[M_REG_NS] = val & 0xff;
2623            return;
2624        case 0x93: /* FAULTMASK_NS */
2625            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2626                return;
2627            }
2628            env->v7m.faultmask[M_REG_NS] = val & 1;
2629            return;
2630        case 0x94: /* CONTROL_NS */
2631            if (!env->v7m.secure) {
2632                return;
2633            }
2634            write_v7m_control_spsel_for_secstate(env,
2635                                                 val & R_V7M_CONTROL_SPSEL_MASK,
2636                                                 M_REG_NS);
2637            if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2638                env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2639                env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2640            }
2641            /*
2642             * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2643             * RES0 if the FPU is not present, and is stored in the S bank
2644             */
2645            if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2646                extract32(env->v7m.nsacr, 10, 1)) {
2647                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2648                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2649            }
2650            return;
2651        case 0x98: /* SP_NS */
2652        {
2653            /*
2654             * This gives the non-secure SP selected based on whether we're
2655             * currently in handler mode or not, using the NS CONTROL.SPSEL.
2656             */
2657            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2658            bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2659            uint32_t limit;
2660
2661            if (!env->v7m.secure) {
2662                return;
2663            }
2664
2665            limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2666
2667            val &= ~0x3;
2668
2669            if (val < limit) {
2670                raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
2671            }
2672
2673            if (is_psp) {
2674                env->v7m.other_ss_psp = val;
2675            } else {
2676                env->v7m.other_ss_msp = val;
2677            }
2678            return;
2679        }
2680        default:
2681            break;
2682        }
2683    }
2684
2685    switch (reg) {
2686    case 0 ... 7: /* xPSR sub-fields */
2687        v7m_msr_xpsr(env, mask, reg, val);
2688        break;
2689    case 8: /* MSP */
2690        if (v7m_using_psp(env)) {
2691            env->v7m.other_sp = val & ~3;
2692        } else {
2693            env->regs[13] = val & ~3;
2694        }
2695        break;
2696    case 9: /* PSP */
2697        if (v7m_using_psp(env)) {
2698            env->regs[13] = val & ~3;
2699        } else {
2700            env->v7m.other_sp = val & ~3;
2701        }
2702        break;
2703    case 10: /* MSPLIM */
2704        if (!arm_feature(env, ARM_FEATURE_V8)) {
2705            goto bad_reg;
2706        }
2707        env->v7m.msplim[env->v7m.secure] = val & ~7;
2708        break;
2709    case 11: /* PSPLIM */
2710        if (!arm_feature(env, ARM_FEATURE_V8)) {
2711            goto bad_reg;
2712        }
2713        env->v7m.psplim[env->v7m.secure] = val & ~7;
2714        break;
2715    case 16: /* PRIMASK */
2716        env->v7m.primask[env->v7m.secure] = val & 1;
2717        break;
2718    case 17: /* BASEPRI */
2719        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2720            goto bad_reg;
2721        }
2722        env->v7m.basepri[env->v7m.secure] = val & 0xff;
2723        break;
2724    case 18: /* BASEPRI_MAX */
2725        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2726            goto bad_reg;
2727        }
2728        val &= 0xff;
2729        if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2730                         || env->v7m.basepri[env->v7m.secure] == 0)) {
2731            env->v7m.basepri[env->v7m.secure] = val;
2732        }
2733        break;
2734    case 19: /* FAULTMASK */
2735        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2736            goto bad_reg;
2737        }
2738        env->v7m.faultmask[env->v7m.secure] = val & 1;
2739        break;
2740    case 20: /* CONTROL */
2741        /*
2742         * Writing to the SPSEL bit only has an effect if we are in
2743         * thread mode; other bits can be updated by any privileged code.
2744         * write_v7m_control_spsel() deals with updating the SPSEL bit in
2745         * env->v7m.control, so we only need update the others.
2746         * For v7M, we must just ignore explicit writes to SPSEL in handler
2747         * mode; for v8M the write is permitted but will have no effect.
2748         * All these bits are writes-ignored from non-privileged code,
2749         * except for SFPA.
2750         */
2751        if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2752                           !arm_v7m_is_handler_mode(env))) {
2753            write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2754        }
2755        if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2756            env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2757            env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2758        }
2759        if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2760            /*
2761             * SFPA is RAZ/WI from NS or if no FPU.
2762             * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2763             * Both are stored in the S bank.
2764             */
2765            if (env->v7m.secure) {
2766                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2767                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2768            }
2769            if (cur_el > 0 &&
2770                (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2771                 extract32(env->v7m.nsacr, 10, 1))) {
2772                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2773                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2774            }
2775        }
2776        break;
2777    default:
2778    bad_reg:
2779        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2780                                       " register %d\n", reg);
2781        return;
2782    }
2783}
2784
2785uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2786{
2787    /* Implement the TT instruction. op is bits [7:6] of the insn. */
2788    bool forceunpriv = op & 1;
2789    bool alt = op & 2;
2790    V8M_SAttributes sattrs = {};
2791    uint32_t tt_resp;
2792    bool r, rw, nsr, nsrw, mrvalid;
2793    int prot;
2794    ARMMMUFaultInfo fi = {};
2795    MemTxAttrs attrs = {};
2796    hwaddr phys_addr;
2797    ARMMMUIdx mmu_idx;
2798    uint32_t mregion;
2799    bool targetpriv;
2800    bool targetsec = env->v7m.secure;
2801    bool is_subpage;
2802
2803    /*
2804     * Work out what the security state and privilege level we're
2805     * interested in is...
2806     */
2807    if (alt) {
2808        targetsec = !targetsec;
2809    }
2810
2811    if (forceunpriv) {
2812        targetpriv = false;
2813    } else {
2814        targetpriv = arm_v7m_is_handler_mode(env) ||
2815            !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2816    }
2817
2818    /* ...and then figure out which MMU index this is */
2819    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2820
2821    /*
2822     * We know that the MPU and SAU don't care about the access type
2823     * for our purposes beyond that we don't want to claim to be
2824     * an insn fetch, so we arbitrarily call this a read.
2825     */
2826
2827    /*
2828     * MPU region info only available for privileged or if
2829     * inspecting the other MPU state.
2830     */
2831    if (arm_current_el(env) != 0 || alt) {
2832        /* We can ignore the return value as prot is always set */
2833        pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2834                          &phys_addr, &attrs, &prot, &is_subpage,
2835                          &fi, &mregion);
2836        if (mregion == -1) {
2837            mrvalid = false;
2838            mregion = 0;
2839        } else {
2840            mrvalid = true;
2841        }
2842        r = prot & PAGE_READ;
2843        rw = prot & PAGE_WRITE;
2844    } else {
2845        r = false;
2846        rw = false;
2847        mrvalid = false;
2848        mregion = 0;
2849    }
2850
2851    if (env->v7m.secure) {
2852        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2853        nsr = sattrs.ns && r;
2854        nsrw = sattrs.ns && rw;
2855    } else {
2856        sattrs.ns = true;
2857        nsr = false;
2858        nsrw = false;
2859    }
2860
2861    tt_resp = (sattrs.iregion << 24) |
2862        (sattrs.irvalid << 23) |
2863        ((!sattrs.ns) << 22) |
2864        (nsrw << 21) |
2865        (nsr << 20) |
2866        (rw << 19) |
2867        (r << 18) |
2868        (sattrs.srvalid << 17) |
2869        (mrvalid << 16) |
2870        (sattrs.sregion << 8) |
2871        mregion;
2872
2873    return tt_resp;
2874}
2875
2876#endif /* !CONFIG_USER_ONLY */
2877
2878ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2879                              bool secstate, bool priv, bool negpri)
2880{
2881    ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2882
2883    if (priv) {
2884        mmu_idx |= ARM_MMU_IDX_M_PRIV;
2885    }
2886
2887    if (negpri) {
2888        mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2889    }
2890
2891    if (secstate) {
2892        mmu_idx |= ARM_MMU_IDX_M_S;
2893    }
2894
2895    return mmu_idx;
2896}
2897
2898ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2899                                                bool secstate, bool priv)
2900{
2901    bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2902
2903    return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2904}
2905
2906/* Return the MMU index for a v7M CPU in the specified security state */
2907ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2908{
2909    bool priv = arm_v7m_is_handler_mode(env) ||
2910        !(env->v7m.control[secstate] & 1);
2911
2912    return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2913}
2914