qemu/target/arm/m_helper.c
<<
>>
Prefs
   1/*
   2 * ARM generic helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8
   9#include "qemu/osdep.h"
  10#include "qemu/units.h"
  11#include "target/arm/idau.h"
  12#include "trace.h"
  13#include "cpu.h"
  14#include "internals.h"
  15#include "exec/gdbstub.h"
  16#include "exec/helper-proto.h"
  17#include "qemu/host-utils.h"
  18#include "qemu/main-loop.h"
  19#include "qemu/bitops.h"
  20#include "qemu/crc32c.h"
  21#include "qemu/qemu-print.h"
  22#include "exec/exec-all.h"
  23#include <zlib.h> /* For crc32 */
  24#include "hw/semihosting/semihost.h"
  25#include "sysemu/cpus.h"
  26#include "sysemu/kvm.h"
  27#include "qemu/range.h"
  28#include "qapi/qapi-commands-machine-target.h"
  29#include "qapi/error.h"
  30#include "qemu/guest-random.h"
  31#ifdef CONFIG_TCG
  32#include "arm_ldst.h"
  33#include "exec/cpu_ldst.h"
  34#endif
  35
  36static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
  37                         uint32_t reg, uint32_t val)
  38{
  39    /* Only APSR is actually writable */
  40    if (!(reg & 4)) {
  41        uint32_t apsrmask = 0;
  42
  43        if (mask & 8) {
  44            apsrmask |= XPSR_NZCV | XPSR_Q;
  45        }
  46        if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
  47            apsrmask |= XPSR_GE;
  48        }
  49        xpsr_write(env, val, apsrmask);
  50    }
  51}
  52
  53static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
  54{
  55    uint32_t mask = 0;
  56
  57    if ((reg & 1) && el) {
  58        mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
  59    }
  60    if (!(reg & 4)) {
  61        mask |= XPSR_NZCV | XPSR_Q; /* APSR */
  62        if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
  63            mask |= XPSR_GE;
  64        }
  65    }
  66    /* EPSR reads as zero */
  67    return xpsr_read(env) & mask;
  68}
  69
  70static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
  71{
  72    uint32_t value = env->v7m.control[secure];
  73
  74    if (!secure) {
  75        /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
  76        value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
  77    }
  78    return value;
  79}
  80
  81#ifdef CONFIG_USER_ONLY
  82
  83void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
  84{
  85    uint32_t mask = extract32(maskreg, 8, 4);
  86    uint32_t reg = extract32(maskreg, 0, 8);
  87
  88    switch (reg) {
  89    case 0 ... 7: /* xPSR sub-fields */
  90        v7m_msr_xpsr(env, mask, reg, val);
  91        break;
  92    case 20: /* CONTROL */
  93        /* There are no sub-fields that are actually writable from EL0. */
  94        break;
  95    default:
  96        /* Unprivileged writes to other registers are ignored */
  97        break;
  98    }
  99}
 100
 101uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
 102{
 103    switch (reg) {
 104    case 0 ... 7: /* xPSR sub-fields */
 105        return v7m_mrs_xpsr(env, reg, 0);
 106    case 20: /* CONTROL */
 107        return v7m_mrs_control(env, 0);
 108    default:
 109        /* Unprivileged reads others as zero.  */
 110        return 0;
 111    }
 112}
 113
 114void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
 115{
 116    /* translate.c should never generate calls here in user-only mode */
 117    g_assert_not_reached();
 118}
 119
 120void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
 121{
 122    /* translate.c should never generate calls here in user-only mode */
 123    g_assert_not_reached();
 124}
 125
 126void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
 127{
 128    /* translate.c should never generate calls here in user-only mode */
 129    g_assert_not_reached();
 130}
 131
 132void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
 133{
 134    /* translate.c should never generate calls here in user-only mode */
 135    g_assert_not_reached();
 136}
 137
 138void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
 139{
 140    /* translate.c should never generate calls here in user-only mode */
 141    g_assert_not_reached();
 142}
 143
 144uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
 145{
 146    /*
 147     * The TT instructions can be used by unprivileged code, but in
 148     * user-only emulation we don't have the MPU.
 149     * Luckily since we know we are NonSecure unprivileged (and that in
 150     * turn means that the A flag wasn't specified), all the bits in the
 151     * register must be zero:
 152     *  IREGION: 0 because IRVALID is 0
 153     *  IRVALID: 0 because NS
 154     *  S: 0 because NS
 155     *  NSRW: 0 because NS
 156     *  NSR: 0 because NS
 157     *  RW: 0 because unpriv and A flag not set
 158     *  R: 0 because unpriv and A flag not set
 159     *  SRVALID: 0 because NS
 160     *  MRVALID: 0 because unpriv and A flag not set
 161     *  SREGION: 0 becaus SRVALID is 0
 162     *  MREGION: 0 because MRVALID is 0
 163     */
 164    return 0;
 165}
 166
 167#else
 168
 169/*
 170 * What kind of stack write are we doing? This affects how exceptions
 171 * generated during the stacking are treated.
 172 */
 173typedef enum StackingMode {
 174    STACK_NORMAL,
 175    STACK_IGNFAULTS,
 176    STACK_LAZYFP,
 177} StackingMode;
 178
 179static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
 180                            ARMMMUIdx mmu_idx, StackingMode mode)
 181{
 182    CPUState *cs = CPU(cpu);
 183    CPUARMState *env = &cpu->env;
 184    MemTxAttrs attrs = {};
 185    MemTxResult txres;
 186    target_ulong page_size;
 187    hwaddr physaddr;
 188    int prot;
 189    ARMMMUFaultInfo fi = {};
 190    ARMCacheAttrs cacheattrs = {};
 191    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
 192    int exc;
 193    bool exc_secure;
 194
 195    if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
 196                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
 197        /* MPU/SAU lookup failed */
 198        if (fi.type == ARMFault_QEMU_SFault) {
 199            if (mode == STACK_LAZYFP) {
 200                qemu_log_mask(CPU_LOG_INT,
 201                              "...SecureFault with SFSR.LSPERR "
 202                              "during lazy stacking\n");
 203                env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
 204            } else {
 205                qemu_log_mask(CPU_LOG_INT,
 206                              "...SecureFault with SFSR.AUVIOL "
 207                              "during stacking\n");
 208                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
 209            }
 210            env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
 211            env->v7m.sfar = addr;
 212            exc = ARMV7M_EXCP_SECURE;
 213            exc_secure = false;
 214        } else {
 215            if (mode == STACK_LAZYFP) {
 216                qemu_log_mask(CPU_LOG_INT,
 217                              "...MemManageFault with CFSR.MLSPERR\n");
 218                env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
 219            } else {
 220                qemu_log_mask(CPU_LOG_INT,
 221                              "...MemManageFault with CFSR.MSTKERR\n");
 222                env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
 223            }
 224            exc = ARMV7M_EXCP_MEM;
 225            exc_secure = secure;
 226        }
 227        goto pend_fault;
 228    }
 229    address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
 230                         attrs, &txres);
 231    if (txres != MEMTX_OK) {
 232        /* BusFault trying to write the data */
 233        if (mode == STACK_LAZYFP) {
 234            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
 235            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
 236        } else {
 237            qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
 238            env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
 239        }
 240        exc = ARMV7M_EXCP_BUS;
 241        exc_secure = false;
 242        goto pend_fault;
 243    }
 244    return true;
 245
 246pend_fault:
 247    /*
 248     * By pending the exception at this point we are making
 249     * the IMPDEF choice "overridden exceptions pended" (see the
 250     * MergeExcInfo() pseudocode). The other choice would be to not
 251     * pend them now and then make a choice about which to throw away
 252     * later if we have two derived exceptions.
 253     * The only case when we must not pend the exception but instead
 254     * throw it away is if we are doing the push of the callee registers
 255     * and we've already generated a derived exception (this is indicated
 256     * by the caller passing STACK_IGNFAULTS). Even in this case we will
 257     * still update the fault status registers.
 258     */
 259    switch (mode) {
 260    case STACK_NORMAL:
 261        armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
 262        break;
 263    case STACK_LAZYFP:
 264        armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
 265        break;
 266    case STACK_IGNFAULTS:
 267        break;
 268    }
 269    return false;
 270}
 271
 272static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
 273                           ARMMMUIdx mmu_idx)
 274{
 275    CPUState *cs = CPU(cpu);
 276    CPUARMState *env = &cpu->env;
 277    MemTxAttrs attrs = {};
 278    MemTxResult txres;
 279    target_ulong page_size;
 280    hwaddr physaddr;
 281    int prot;
 282    ARMMMUFaultInfo fi = {};
 283    ARMCacheAttrs cacheattrs = {};
 284    bool secure = mmu_idx & ARM_MMU_IDX_M_S;
 285    int exc;
 286    bool exc_secure;
 287    uint32_t value;
 288
 289    if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
 290                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
 291        /* MPU/SAU lookup failed */
 292        if (fi.type == ARMFault_QEMU_SFault) {
 293            qemu_log_mask(CPU_LOG_INT,
 294                          "...SecureFault with SFSR.AUVIOL during unstack\n");
 295            env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
 296            env->v7m.sfar = addr;
 297            exc = ARMV7M_EXCP_SECURE;
 298            exc_secure = false;
 299        } else {
 300            qemu_log_mask(CPU_LOG_INT,
 301                          "...MemManageFault with CFSR.MUNSTKERR\n");
 302            env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
 303            exc = ARMV7M_EXCP_MEM;
 304            exc_secure = secure;
 305        }
 306        goto pend_fault;
 307    }
 308
 309    value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
 310                              attrs, &txres);
 311    if (txres != MEMTX_OK) {
 312        /* BusFault trying to read the data */
 313        qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
 314        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
 315        exc = ARMV7M_EXCP_BUS;
 316        exc_secure = false;
 317        goto pend_fault;
 318    }
 319
 320    *dest = value;
 321    return true;
 322
 323pend_fault:
 324    /*
 325     * By pending the exception at this point we are making
 326     * the IMPDEF choice "overridden exceptions pended" (see the
 327     * MergeExcInfo() pseudocode). The other choice would be to not
 328     * pend them now and then make a choice about which to throw away
 329     * later if we have two derived exceptions.
 330     */
 331    armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
 332    return false;
 333}
 334
 335void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
 336{
 337    /*
 338     * Preserve FP state (because LSPACT was set and we are about
 339     * to execute an FP instruction). This corresponds to the
 340     * PreserveFPState() pseudocode.
 341     * We may throw an exception if the stacking fails.
 342     */
 343    ARMCPU *cpu = env_archcpu(env);
 344    bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
 345    bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
 346    bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
 347    bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
 348    uint32_t fpcar = env->v7m.fpcar[is_secure];
 349    bool stacked_ok = true;
 350    bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
 351    bool take_exception;
 352
 353    /* Take the iothread lock as we are going to touch the NVIC */
 354    qemu_mutex_lock_iothread();
 355
 356    /* Check the background context had access to the FPU */
 357    if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
 358        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
 359        env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
 360        stacked_ok = false;
 361    } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
 362        armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
 363        env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
 364        stacked_ok = false;
 365    }
 366
 367    if (!splimviol && stacked_ok) {
 368        /* We only stack if the stack limit wasn't violated */
 369        int i;
 370        ARMMMUIdx mmu_idx;
 371
 372        mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
 373        for (i = 0; i < (ts ? 32 : 16); i += 2) {
 374            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
 375            uint32_t faddr = fpcar + 4 * i;
 376            uint32_t slo = extract64(dn, 0, 32);
 377            uint32_t shi = extract64(dn, 32, 32);
 378
 379            if (i >= 16) {
 380                faddr += 8; /* skip the slot for the FPSCR */
 381            }
 382            stacked_ok = stacked_ok &&
 383                v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
 384                v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
 385        }
 386
 387        stacked_ok = stacked_ok &&
 388            v7m_stack_write(cpu, fpcar + 0x40,
 389                            vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
 390    }
 391
 392    /*
 393     * We definitely pended an exception, but it's possible that it
 394     * might not be able to be taken now. If its priority permits us
 395     * to take it now, then we must not update the LSPACT or FP regs,
 396     * but instead jump out to take the exception immediately.
 397     * If it's just pending and won't be taken until the current
 398     * handler exits, then we do update LSPACT and the FP regs.
 399     */
 400    take_exception = !stacked_ok &&
 401        armv7m_nvic_can_take_pending_exception(env->nvic);
 402
 403    qemu_mutex_unlock_iothread();
 404
 405    if (take_exception) {
 406        raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
 407    }
 408
 409    env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
 410
 411    if (ts) {
 412        /* Clear s0 to s31 and the FPSCR */
 413        int i;
 414
 415        for (i = 0; i < 32; i += 2) {
 416            *aa32_vfp_dreg(env, i / 2) = 0;
 417        }
 418        vfp_set_fpscr(env, 0);
 419    }
 420    /*
 421     * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
 422     * unchanged.
 423     */
 424}
 425
 426/*
 427 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
 428 * This may change the current stack pointer between Main and Process
 429 * stack pointers if it is done for the CONTROL register for the current
 430 * security state.
 431 */
 432static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
 433                                                 bool new_spsel,
 434                                                 bool secstate)
 435{
 436    bool old_is_psp = v7m_using_psp(env);
 437
 438    env->v7m.control[secstate] =
 439        deposit32(env->v7m.control[secstate],
 440                  R_V7M_CONTROL_SPSEL_SHIFT,
 441                  R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
 442
 443    if (secstate == env->v7m.secure) {
 444        bool new_is_psp = v7m_using_psp(env);
 445        uint32_t tmp;
 446
 447        if (old_is_psp != new_is_psp) {
 448            tmp = env->v7m.other_sp;
 449            env->v7m.other_sp = env->regs[13];
 450            env->regs[13] = tmp;
 451        }
 452    }
 453}
 454
 455/*
 456 * Write to v7M CONTROL.SPSEL bit. This may change the current
 457 * stack pointer between Main and Process stack pointers.
 458 */
 459static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
 460{
 461    write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
 462}
 463
 464void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
 465{
 466    /*
 467     * Write a new value to v7m.exception, thus transitioning into or out
 468     * of Handler mode; this may result in a change of active stack pointer.
 469     */
 470    bool new_is_psp, old_is_psp = v7m_using_psp(env);
 471    uint32_t tmp;
 472
 473    env->v7m.exception = new_exc;
 474
 475    new_is_psp = v7m_using_psp(env);
 476
 477    if (old_is_psp != new_is_psp) {
 478        tmp = env->v7m.other_sp;
 479        env->v7m.other_sp = env->regs[13];
 480        env->regs[13] = tmp;
 481    }
 482}
 483
 484/* Switch M profile security state between NS and S */
 485static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
 486{
 487    uint32_t new_ss_msp, new_ss_psp;
 488
 489    if (env->v7m.secure == new_secstate) {
 490        return;
 491    }
 492
 493    /*
 494     * All the banked state is accessed by looking at env->v7m.secure
 495     * except for the stack pointer; rearrange the SP appropriately.
 496     */
 497    new_ss_msp = env->v7m.other_ss_msp;
 498    new_ss_psp = env->v7m.other_ss_psp;
 499
 500    if (v7m_using_psp(env)) {
 501        env->v7m.other_ss_psp = env->regs[13];
 502        env->v7m.other_ss_msp = env->v7m.other_sp;
 503    } else {
 504        env->v7m.other_ss_msp = env->regs[13];
 505        env->v7m.other_ss_psp = env->v7m.other_sp;
 506    }
 507
 508    env->v7m.secure = new_secstate;
 509
 510    if (v7m_using_psp(env)) {
 511        env->regs[13] = new_ss_psp;
 512        env->v7m.other_sp = new_ss_msp;
 513    } else {
 514        env->regs[13] = new_ss_msp;
 515        env->v7m.other_sp = new_ss_psp;
 516    }
 517}
 518
 519void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
 520{
 521    /*
 522     * Handle v7M BXNS:
 523     *  - if the return value is a magic value, do exception return (like BX)
 524     *  - otherwise bit 0 of the return value is the target security state
 525     */
 526    uint32_t min_magic;
 527
 528    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 529        /* Covers FNC_RETURN and EXC_RETURN magic */
 530        min_magic = FNC_RETURN_MIN_MAGIC;
 531    } else {
 532        /* EXC_RETURN magic only */
 533        min_magic = EXC_RETURN_MIN_MAGIC;
 534    }
 535
 536    if (dest >= min_magic) {
 537        /*
 538         * This is an exception return magic value; put it where
 539         * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
 540         * Note that if we ever add gen_ss_advance() singlestep support to
 541         * M profile this should count as an "instruction execution complete"
 542         * event (compare gen_bx_excret_final_code()).
 543         */
 544        env->regs[15] = dest & ~1;
 545        env->thumb = dest & 1;
 546        HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
 547        /* notreached */
 548    }
 549
 550    /* translate.c should have made BXNS UNDEF unless we're secure */
 551    assert(env->v7m.secure);
 552
 553    if (!(dest & 1)) {
 554        env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
 555    }
 556    switch_v7m_security_state(env, dest & 1);
 557    env->thumb = 1;
 558    env->regs[15] = dest & ~1;
 559    arm_rebuild_hflags(env);
 560}
 561
 562void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
 563{
 564    /*
 565     * Handle v7M BLXNS:
 566     *  - bit 0 of the destination address is the target security state
 567     */
 568
 569    /* At this point regs[15] is the address just after the BLXNS */
 570    uint32_t nextinst = env->regs[15] | 1;
 571    uint32_t sp = env->regs[13] - 8;
 572    uint32_t saved_psr;
 573
 574    /* translate.c will have made BLXNS UNDEF unless we're secure */
 575    assert(env->v7m.secure);
 576
 577    if (dest & 1) {
 578        /*
 579         * Target is Secure, so this is just a normal BLX,
 580         * except that the low bit doesn't indicate Thumb/not.
 581         */
 582        env->regs[14] = nextinst;
 583        env->thumb = 1;
 584        env->regs[15] = dest & ~1;
 585        return;
 586    }
 587
 588    /* Target is non-secure: first push a stack frame */
 589    if (!QEMU_IS_ALIGNED(sp, 8)) {
 590        qemu_log_mask(LOG_GUEST_ERROR,
 591                      "BLXNS with misaligned SP is UNPREDICTABLE\n");
 592    }
 593
 594    if (sp < v7m_sp_limit(env)) {
 595        raise_exception(env, EXCP_STKOF, 0, 1);
 596    }
 597
 598    saved_psr = env->v7m.exception;
 599    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
 600        saved_psr |= XPSR_SFPA;
 601    }
 602
 603    /* Note that these stores can throw exceptions on MPU faults */
 604    cpu_stl_data_ra(env, sp, nextinst, GETPC());
 605    cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
 606
 607    env->regs[13] = sp;
 608    env->regs[14] = 0xfeffffff;
 609    if (arm_v7m_is_handler_mode(env)) {
 610        /*
 611         * Write a dummy value to IPSR, to avoid leaking the current secure
 612         * exception number to non-secure code. This is guaranteed not
 613         * to cause write_v7m_exception() to actually change stacks.
 614         */
 615        write_v7m_exception(env, 1);
 616    }
 617    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
 618    switch_v7m_security_state(env, 0);
 619    env->thumb = 1;
 620    env->regs[15] = dest;
 621    arm_rebuild_hflags(env);
 622}
 623
 624static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
 625                                bool spsel)
 626{
 627    /*
 628     * Return a pointer to the location where we currently store the
 629     * stack pointer for the requested security state and thread mode.
 630     * This pointer will become invalid if the CPU state is updated
 631     * such that the stack pointers are switched around (eg changing
 632     * the SPSEL control bit).
 633     * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
 634     * Unlike that pseudocode, we require the caller to pass us in the
 635     * SPSEL control bit value; this is because we also use this
 636     * function in handling of pushing of the callee-saves registers
 637     * part of the v8M stack frame (pseudocode PushCalleeStack()),
 638     * and in the tailchain codepath the SPSEL bit comes from the exception
 639     * return magic LR value from the previous exception. The pseudocode
 640     * opencodes the stack-selection in PushCalleeStack(), but we prefer
 641     * to make this utility function generic enough to do the job.
 642     */
 643    bool want_psp = threadmode && spsel;
 644
 645    if (secure == env->v7m.secure) {
 646        if (want_psp == v7m_using_psp(env)) {
 647            return &env->regs[13];
 648        } else {
 649            return &env->v7m.other_sp;
 650        }
 651    } else {
 652        if (want_psp) {
 653            return &env->v7m.other_ss_psp;
 654        } else {
 655            return &env->v7m.other_ss_msp;
 656        }
 657    }
 658}
 659
 660static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
 661                                uint32_t *pvec)
 662{
 663    CPUState *cs = CPU(cpu);
 664    CPUARMState *env = &cpu->env;
 665    MemTxResult result;
 666    uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
 667    uint32_t vector_entry;
 668    MemTxAttrs attrs = {};
 669    ARMMMUIdx mmu_idx;
 670    bool exc_secure;
 671
 672    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
 673
 674    /*
 675     * We don't do a get_phys_addr() here because the rules for vector
 676     * loads are special: they always use the default memory map, and
 677     * the default memory map permits reads from all addresses.
 678     * Since there's no easy way to pass through to pmsav8_mpu_lookup()
 679     * that we want this special case which would always say "yes",
 680     * we just do the SAU lookup here followed by a direct physical load.
 681     */
 682    attrs.secure = targets_secure;
 683    attrs.user = false;
 684
 685    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 686        V8M_SAttributes sattrs = {};
 687
 688        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
 689        if (sattrs.ns) {
 690            attrs.secure = false;
 691        } else if (!targets_secure) {
 692            /*
 693             * NS access to S memory: the underlying exception which we escalate
 694             * to HardFault is SecureFault, which always targets Secure.
 695             */
 696            exc_secure = true;
 697            goto load_fail;
 698        }
 699    }
 700
 701    vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
 702                                     attrs, &result);
 703    if (result != MEMTX_OK) {
 704        /*
 705         * Underlying exception is BusFault: its target security state
 706         * depends on BFHFNMINS.
 707         */
 708        exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
 709        goto load_fail;
 710    }
 711    *pvec = vector_entry;
 712    return true;
 713
 714load_fail:
 715    /*
 716     * All vector table fetch fails are reported as HardFault, with
 717     * HFSR.VECTTBL and .FORCED set. (FORCED is set because
 718     * technically the underlying exception is a SecureFault or BusFault
 719     * that is escalated to HardFault.) This is a terminal exception,
 720     * so we will either take the HardFault immediately or else enter
 721     * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
 722     * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
 723     * secure); otherwise it targets the same security state as the
 724     * underlying exception.
 725     */
 726    if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
 727        exc_secure = true;
 728    }
 729    env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
 730    armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
 731    return false;
 732}
 733
 734static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
 735{
 736    /*
 737     * Return the integrity signature value for the callee-saves
 738     * stack frame section. @lr is the exception return payload/LR value
 739     * whose FType bit forms bit 0 of the signature if FP is present.
 740     */
 741    uint32_t sig = 0xfefa125a;
 742
 743    if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
 744        || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
 745        sig |= 1;
 746    }
 747    return sig;
 748}
 749
 750static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
 751                                  bool ignore_faults)
 752{
 753    /*
 754     * For v8M, push the callee-saves register part of the stack frame.
 755     * Compare the v8M pseudocode PushCalleeStack().
 756     * In the tailchaining case this may not be the current stack.
 757     */
 758    CPUARMState *env = &cpu->env;
 759    uint32_t *frame_sp_p;
 760    uint32_t frameptr;
 761    ARMMMUIdx mmu_idx;
 762    bool stacked_ok;
 763    uint32_t limit;
 764    bool want_psp;
 765    uint32_t sig;
 766    StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
 767
 768    if (dotailchain) {
 769        bool mode = lr & R_V7M_EXCRET_MODE_MASK;
 770        bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
 771            !mode;
 772
 773        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
 774        frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
 775                                    lr & R_V7M_EXCRET_SPSEL_MASK);
 776        want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
 777        if (want_psp) {
 778            limit = env->v7m.psplim[M_REG_S];
 779        } else {
 780            limit = env->v7m.msplim[M_REG_S];
 781        }
 782    } else {
 783        mmu_idx = arm_mmu_idx(env);
 784        frame_sp_p = &env->regs[13];
 785        limit = v7m_sp_limit(env);
 786    }
 787
 788    frameptr = *frame_sp_p - 0x28;
 789    if (frameptr < limit) {
 790        /*
 791         * Stack limit failure: set SP to the limit value, and generate
 792         * STKOF UsageFault. Stack pushes below the limit must not be
 793         * performed. It is IMPDEF whether pushes above the limit are
 794         * performed; we choose not to.
 795         */
 796        qemu_log_mask(CPU_LOG_INT,
 797                      "...STKOF during callee-saves register stacking\n");
 798        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
 799        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
 800                                env->v7m.secure);
 801        *frame_sp_p = limit;
 802        return true;
 803    }
 804
 805    /*
 806     * Write as much of the stack frame as we can. A write failure may
 807     * cause us to pend a derived exception.
 808     */
 809    sig = v7m_integrity_sig(env, lr);
 810    stacked_ok =
 811        v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
 812        v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
 813        v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
 814        v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
 815        v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
 816        v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
 817        v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
 818        v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
 819        v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
 820
 821    /* Update SP regardless of whether any of the stack accesses failed. */
 822    *frame_sp_p = frameptr;
 823
 824    return !stacked_ok;
 825}
 826
 827static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
 828                                bool ignore_stackfaults)
 829{
 830    /*
 831     * Do the "take the exception" parts of exception entry,
 832     * but not the pushing of state to the stack. This is
 833     * similar to the pseudocode ExceptionTaken() function.
 834     */
 835    CPUARMState *env = &cpu->env;
 836    uint32_t addr;
 837    bool targets_secure;
 838    int exc;
 839    bool push_failed = false;
 840
 841    armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
 842    qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
 843                  targets_secure ? "secure" : "nonsecure", exc);
 844
 845    if (dotailchain) {
 846        /* Sanitize LR FType and PREFIX bits */
 847        if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
 848            lr |= R_V7M_EXCRET_FTYPE_MASK;
 849        }
 850        lr = deposit32(lr, 24, 8, 0xff);
 851    }
 852
 853    if (arm_feature(env, ARM_FEATURE_V8)) {
 854        if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
 855            (lr & R_V7M_EXCRET_S_MASK)) {
 856            /*
 857             * The background code (the owner of the registers in the
 858             * exception frame) is Secure. This means it may either already
 859             * have or now needs to push callee-saves registers.
 860             */
 861            if (targets_secure) {
 862                if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
 863                    /*
 864                     * We took an exception from Secure to NonSecure
 865                     * (which means the callee-saved registers got stacked)
 866                     * and are now tailchaining to a Secure exception.
 867                     * Clear DCRS so eventual return from this Secure
 868                     * exception unstacks the callee-saved registers.
 869                     */
 870                    lr &= ~R_V7M_EXCRET_DCRS_MASK;
 871                }
 872            } else {
 873                /*
 874                 * We're going to a non-secure exception; push the
 875                 * callee-saves registers to the stack now, if they're
 876                 * not already saved.
 877                 */
 878                if (lr & R_V7M_EXCRET_DCRS_MASK &&
 879                    !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
 880                    push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
 881                                                        ignore_stackfaults);
 882                }
 883                lr |= R_V7M_EXCRET_DCRS_MASK;
 884            }
 885        }
 886
 887        lr &= ~R_V7M_EXCRET_ES_MASK;
 888        if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 889            lr |= R_V7M_EXCRET_ES_MASK;
 890        }
 891        lr &= ~R_V7M_EXCRET_SPSEL_MASK;
 892        if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
 893            lr |= R_V7M_EXCRET_SPSEL_MASK;
 894        }
 895
 896        /*
 897         * Clear registers if necessary to prevent non-secure exception
 898         * code being able to see register values from secure code.
 899         * Where register values become architecturally UNKNOWN we leave
 900         * them with their previous values.
 901         */
 902        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
 903            if (!targets_secure) {
 904                /*
 905                 * Always clear the caller-saved registers (they have been
 906                 * pushed to the stack earlier in v7m_push_stack()).
 907                 * Clear callee-saved registers if the background code is
 908                 * Secure (in which case these regs were saved in
 909                 * v7m_push_callee_stack()).
 910                 */
 911                int i;
 912
 913                for (i = 0; i < 13; i++) {
 914                    /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
 915                    if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
 916                        env->regs[i] = 0;
 917                    }
 918                }
 919                /* Clear EAPSR */
 920                xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
 921            }
 922        }
 923    }
 924
 925    if (push_failed && !ignore_stackfaults) {
 926        /*
 927         * Derived exception on callee-saves register stacking:
 928         * we might now want to take a different exception which
 929         * targets a different security state, so try again from the top.
 930         */
 931        qemu_log_mask(CPU_LOG_INT,
 932                      "...derived exception on callee-saves register stacking");
 933        v7m_exception_taken(cpu, lr, true, true);
 934        return;
 935    }
 936
 937    if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
 938        /* Vector load failed: derived exception */
 939        qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
 940        v7m_exception_taken(cpu, lr, true, true);
 941        return;
 942    }
 943
 944    /*
 945     * Now we've done everything that might cause a derived exception
 946     * we can go ahead and activate whichever exception we're going to
 947     * take (which might now be the derived exception).
 948     */
 949    armv7m_nvic_acknowledge_irq(env->nvic);
 950
 951    /* Switch to target security state -- must do this before writing SPSEL */
 952    switch_v7m_security_state(env, targets_secure);
 953    write_v7m_control_spsel(env, 0);
 954    arm_clear_exclusive(env);
 955    /* Clear SFPA and FPCA (has no effect if no FPU) */
 956    env->v7m.control[M_REG_S] &=
 957        ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
 958    /* Clear IT bits */
 959    env->condexec_bits = 0;
 960    env->regs[14] = lr;
 961    env->regs[15] = addr & 0xfffffffe;
 962    env->thumb = addr & 1;
 963    arm_rebuild_hflags(env);
 964}
 965
 966static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
 967                             bool apply_splim)
 968{
 969    /*
 970     * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
 971     * that we will need later in order to do lazy FP reg stacking.
 972     */
 973    bool is_secure = env->v7m.secure;
 974    void *nvic = env->nvic;
 975    /*
 976     * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
 977     * are banked and we want to update the bit in the bank for the
 978     * current security state; and in one case we want to specifically
 979     * update the NS banked version of a bit even if we are secure.
 980     */
 981    uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
 982    uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
 983    uint32_t *fpccr = &env->v7m.fpccr[is_secure];
 984    bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
 985
 986    env->v7m.fpcar[is_secure] = frameptr & ~0x7;
 987
 988    if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
 989        bool splimviol;
 990        uint32_t splim = v7m_sp_limit(env);
 991        bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
 992            (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
 993
 994        splimviol = !ign && frameptr < splim;
 995        *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
 996    }
 997
 998    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
 999
1000    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1001
1002    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1003
1004    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1005                        !arm_v7m_is_handler_mode(env));
1006
1007    hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1008    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1009
1010    bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1011    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1012
1013    mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1014    *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1015
1016    ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1017    *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1018
1019    monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1020    *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1021
1022    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1023        s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1024        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1025
1026        sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1027        *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1028    }
1029}
1030
1031void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1032{
1033    /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1034    bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1035    bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1036    uintptr_t ra = GETPC();
1037
1038    assert(env->v7m.secure);
1039
1040    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1041        return;
1042    }
1043
1044    /* Check access to the coprocessor is permitted */
1045    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1046        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1047    }
1048
1049    if (lspact) {
1050        /* LSPACT should not be active when there is active FP state */
1051        raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1052    }
1053
1054    if (fptr & 7) {
1055        raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1056    }
1057
1058    /*
1059     * Note that we do not use v7m_stack_write() here, because the
1060     * accesses should not set the FSR bits for stacking errors if they
1061     * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1062     * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1063     * and longjmp out.
1064     */
1065    if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1066        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1067        int i;
1068
1069        for (i = 0; i < (ts ? 32 : 16); i += 2) {
1070            uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1071            uint32_t faddr = fptr + 4 * i;
1072            uint32_t slo = extract64(dn, 0, 32);
1073            uint32_t shi = extract64(dn, 32, 32);
1074
1075            if (i >= 16) {
1076                faddr += 8; /* skip the slot for the FPSCR */
1077            }
1078            cpu_stl_data_ra(env, faddr, slo, ra);
1079            cpu_stl_data_ra(env, faddr + 4, shi, ra);
1080        }
1081        cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1082
1083        /*
1084         * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1085         * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1086         */
1087        if (ts) {
1088            for (i = 0; i < 32; i += 2) {
1089                *aa32_vfp_dreg(env, i / 2) = 0;
1090            }
1091            vfp_set_fpscr(env, 0);
1092        }
1093    } else {
1094        v7m_update_fpccr(env, fptr, false);
1095    }
1096
1097    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1098}
1099
1100void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1101{
1102    uintptr_t ra = GETPC();
1103
1104    /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1105    assert(env->v7m.secure);
1106
1107    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1108        return;
1109    }
1110
1111    /* Check access to the coprocessor is permitted */
1112    if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1113        raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1114    }
1115
1116    if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1117        /* State in FP is still valid */
1118        env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1119    } else {
1120        bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1121        int i;
1122        uint32_t fpscr;
1123
1124        if (fptr & 7) {
1125            raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1126        }
1127
1128        for (i = 0; i < (ts ? 32 : 16); i += 2) {
1129            uint32_t slo, shi;
1130            uint64_t dn;
1131            uint32_t faddr = fptr + 4 * i;
1132
1133            if (i >= 16) {
1134                faddr += 8; /* skip the slot for the FPSCR */
1135            }
1136
1137            slo = cpu_ldl_data_ra(env, faddr, ra);
1138            shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1139
1140            dn = (uint64_t) shi << 32 | slo;
1141            *aa32_vfp_dreg(env, i / 2) = dn;
1142        }
1143        fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1144        vfp_set_fpscr(env, fpscr);
1145    }
1146
1147    env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1148}
1149
1150static bool v7m_push_stack(ARMCPU *cpu)
1151{
1152    /*
1153     * Do the "set up stack frame" part of exception entry,
1154     * similar to pseudocode PushStack().
1155     * Return true if we generate a derived exception (and so
1156     * should ignore further stack faults trying to process
1157     * that derived exception.)
1158     */
1159    bool stacked_ok = true, limitviol = false;
1160    CPUARMState *env = &cpu->env;
1161    uint32_t xpsr = xpsr_read(env);
1162    uint32_t frameptr = env->regs[13];
1163    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1164    uint32_t framesize;
1165    bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1166
1167    if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1168        (env->v7m.secure || nsacr_cp10)) {
1169        if (env->v7m.secure &&
1170            env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1171            framesize = 0xa8;
1172        } else {
1173            framesize = 0x68;
1174        }
1175    } else {
1176        framesize = 0x20;
1177    }
1178
1179    /* Align stack pointer if the guest wants that */
1180    if ((frameptr & 4) &&
1181        (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1182        frameptr -= 4;
1183        xpsr |= XPSR_SPREALIGN;
1184    }
1185
1186    xpsr &= ~XPSR_SFPA;
1187    if (env->v7m.secure &&
1188        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1189        xpsr |= XPSR_SFPA;
1190    }
1191
1192    frameptr -= framesize;
1193
1194    if (arm_feature(env, ARM_FEATURE_V8)) {
1195        uint32_t limit = v7m_sp_limit(env);
1196
1197        if (frameptr < limit) {
1198            /*
1199             * Stack limit failure: set SP to the limit value, and generate
1200             * STKOF UsageFault. Stack pushes below the limit must not be
1201             * performed. It is IMPDEF whether pushes above the limit are
1202             * performed; we choose not to.
1203             */
1204            qemu_log_mask(CPU_LOG_INT,
1205                          "...STKOF during stacking\n");
1206            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1207            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1208                                    env->v7m.secure);
1209            env->regs[13] = limit;
1210            /*
1211             * We won't try to perform any further memory accesses but
1212             * we must continue through the following code to check for
1213             * permission faults during FPU state preservation, and we
1214             * must update FPCCR if lazy stacking is enabled.
1215             */
1216            limitviol = true;
1217            stacked_ok = false;
1218        }
1219    }
1220
1221    /*
1222     * Write as much of the stack frame as we can. If we fail a stack
1223     * write this will result in a derived exception being pended
1224     * (which may be taken in preference to the one we started with
1225     * if it has higher priority).
1226     */
1227    stacked_ok = stacked_ok &&
1228        v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1229        v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1230                        mmu_idx, STACK_NORMAL) &&
1231        v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1232                        mmu_idx, STACK_NORMAL) &&
1233        v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1234                        mmu_idx, STACK_NORMAL) &&
1235        v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1236                        mmu_idx, STACK_NORMAL) &&
1237        v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1238                        mmu_idx, STACK_NORMAL) &&
1239        v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1240                        mmu_idx, STACK_NORMAL) &&
1241        v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1242
1243    if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1244        /* FPU is active, try to save its registers */
1245        bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1246        bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1247
1248        if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1249            qemu_log_mask(CPU_LOG_INT,
1250                          "...SecureFault because LSPACT and FPCA both set\n");
1251            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1252            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1253        } else if (!env->v7m.secure && !nsacr_cp10) {
1254            qemu_log_mask(CPU_LOG_INT,
1255                          "...Secure UsageFault with CFSR.NOCP because "
1256                          "NSACR.CP10 prevents stacking FP regs\n");
1257            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1258            env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1259        } else {
1260            if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1261                /* Lazy stacking disabled, save registers now */
1262                int i;
1263                bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1264                                                 arm_current_el(env) != 0);
1265
1266                if (stacked_ok && !cpacr_pass) {
1267                    /*
1268                     * Take UsageFault if CPACR forbids access. The pseudocode
1269                     * here does a full CheckCPEnabled() but we know the NSACR
1270                     * check can never fail as we have already handled that.
1271                     */
1272                    qemu_log_mask(CPU_LOG_INT,
1273                                  "...UsageFault with CFSR.NOCP because "
1274                                  "CPACR.CP10 prevents stacking FP regs\n");
1275                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1276                                            env->v7m.secure);
1277                    env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1278                    stacked_ok = false;
1279                }
1280
1281                for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1282                    uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1283                    uint32_t faddr = frameptr + 0x20 + 4 * i;
1284                    uint32_t slo = extract64(dn, 0, 32);
1285                    uint32_t shi = extract64(dn, 32, 32);
1286
1287                    if (i >= 16) {
1288                        faddr += 8; /* skip the slot for the FPSCR */
1289                    }
1290                    stacked_ok = stacked_ok &&
1291                        v7m_stack_write(cpu, faddr, slo,
1292                                        mmu_idx, STACK_NORMAL) &&
1293                        v7m_stack_write(cpu, faddr + 4, shi,
1294                                        mmu_idx, STACK_NORMAL);
1295                }
1296                stacked_ok = stacked_ok &&
1297                    v7m_stack_write(cpu, frameptr + 0x60,
1298                                    vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1299                if (cpacr_pass) {
1300                    for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1301                        *aa32_vfp_dreg(env, i / 2) = 0;
1302                    }
1303                    vfp_set_fpscr(env, 0);
1304                }
1305            } else {
1306                /* Lazy stacking enabled, save necessary info to stack later */
1307                v7m_update_fpccr(env, frameptr + 0x20, true);
1308            }
1309        }
1310    }
1311
1312    /*
1313     * If we broke a stack limit then SP was already updated earlier;
1314     * otherwise we update SP regardless of whether any of the stack
1315     * accesses failed or we took some other kind of fault.
1316     */
1317    if (!limitviol) {
1318        env->regs[13] = frameptr;
1319    }
1320
1321    return !stacked_ok;
1322}
1323
1324static void do_v7m_exception_exit(ARMCPU *cpu)
1325{
1326    CPUARMState *env = &cpu->env;
1327    uint32_t excret;
1328    uint32_t xpsr, xpsr_mask;
1329    bool ufault = false;
1330    bool sfault = false;
1331    bool return_to_sp_process;
1332    bool return_to_handler;
1333    bool rettobase = false;
1334    bool exc_secure = false;
1335    bool return_to_secure;
1336    bool ftype;
1337    bool restore_s16_s31;
1338
1339    /*
1340     * If we're not in Handler mode then jumps to magic exception-exit
1341     * addresses don't have magic behaviour. However for the v8M
1342     * security extensions the magic secure-function-return has to
1343     * work in thread mode too, so to avoid doing an extra check in
1344     * the generated code we allow exception-exit magic to also cause the
1345     * internal exception and bring us here in thread mode. Correct code
1346     * will never try to do this (the following insn fetch will always
1347     * fault) so we the overhead of having taken an unnecessary exception
1348     * doesn't matter.
1349     */
1350    if (!arm_v7m_is_handler_mode(env)) {
1351        return;
1352    }
1353
1354    /*
1355     * In the spec pseudocode ExceptionReturn() is called directly
1356     * from BXWritePC() and gets the full target PC value including
1357     * bit zero. In QEMU's implementation we treat it as a normal
1358     * jump-to-register (which is then caught later on), and so split
1359     * the target value up between env->regs[15] and env->thumb in
1360     * gen_bx(). Reconstitute it.
1361     */
1362    excret = env->regs[15];
1363    if (env->thumb) {
1364        excret |= 1;
1365    }
1366
1367    qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1368                  " previous exception %d\n",
1369                  excret, env->v7m.exception);
1370
1371    if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1372        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1373                      "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1374                      excret);
1375    }
1376
1377    ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1378
1379    if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1380        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1381                      "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1382                      "if FPU not present\n",
1383                      excret);
1384        ftype = true;
1385    }
1386
1387    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1388        /*
1389         * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1390         * we pick which FAULTMASK to clear.
1391         */
1392        if (!env->v7m.secure &&
1393            ((excret & R_V7M_EXCRET_ES_MASK) ||
1394             !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1395            sfault = 1;
1396            /* For all other purposes, treat ES as 0 (R_HXSR) */
1397            excret &= ~R_V7M_EXCRET_ES_MASK;
1398        }
1399        exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1400    }
1401
1402    if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1403        /*
1404         * Auto-clear FAULTMASK on return from other than NMI.
1405         * If the security extension is implemented then this only
1406         * happens if the raw execution priority is >= 0; the
1407         * value of the ES bit in the exception return value indicates
1408         * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1409         */
1410        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1411            if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1412                env->v7m.faultmask[exc_secure] = 0;
1413            }
1414        } else {
1415            env->v7m.faultmask[M_REG_NS] = 0;
1416        }
1417    }
1418
1419    switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1420                                     exc_secure)) {
1421    case -1:
1422        /* attempt to exit an exception that isn't active */
1423        ufault = true;
1424        break;
1425    case 0:
1426        /* still an irq active now */
1427        break;
1428    case 1:
1429        /*
1430         * We returned to base exception level, no nesting.
1431         * (In the pseudocode this is written using "NestedActivation != 1"
1432         * where we have 'rettobase == false'.)
1433         */
1434        rettobase = true;
1435        break;
1436    default:
1437        g_assert_not_reached();
1438    }
1439
1440    return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1441    return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1442    return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1443        (excret & R_V7M_EXCRET_S_MASK);
1444
1445    if (arm_feature(env, ARM_FEATURE_V8)) {
1446        if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1447            /*
1448             * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1449             * we choose to take the UsageFault.
1450             */
1451            if ((excret & R_V7M_EXCRET_S_MASK) ||
1452                (excret & R_V7M_EXCRET_ES_MASK) ||
1453                !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1454                ufault = true;
1455            }
1456        }
1457        if (excret & R_V7M_EXCRET_RES0_MASK) {
1458            ufault = true;
1459        }
1460    } else {
1461        /* For v7M we only recognize certain combinations of the low bits */
1462        switch (excret & 0xf) {
1463        case 1: /* Return to Handler */
1464            break;
1465        case 13: /* Return to Thread using Process stack */
1466        case 9: /* Return to Thread using Main stack */
1467            /*
1468             * We only need to check NONBASETHRDENA for v7M, because in
1469             * v8M this bit does not exist (it is RES1).
1470             */
1471            if (!rettobase &&
1472                !(env->v7m.ccr[env->v7m.secure] &
1473                  R_V7M_CCR_NONBASETHRDENA_MASK)) {
1474                ufault = true;
1475            }
1476            break;
1477        default:
1478            ufault = true;
1479        }
1480    }
1481
1482    /*
1483     * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1484     * Handler mode (and will be until we write the new XPSR.Interrupt
1485     * field) this does not switch around the current stack pointer.
1486     * We must do this before we do any kind of tailchaining, including
1487     * for the derived exceptions on integrity check failures, or we will
1488     * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1489     */
1490    write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1491
1492    /*
1493     * Clear scratch FP values left in caller saved registers; this
1494     * must happen before any kind of tail chaining.
1495     */
1496    if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1497        (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1498        if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1499            env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1500            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1501            qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1502                          "stackframe: error during lazy state deactivation\n");
1503            v7m_exception_taken(cpu, excret, true, false);
1504            return;
1505        } else {
1506            /* Clear s0..s15 and FPSCR */
1507            int i;
1508
1509            for (i = 0; i < 16; i += 2) {
1510                *aa32_vfp_dreg(env, i / 2) = 0;
1511            }
1512            vfp_set_fpscr(env, 0);
1513        }
1514    }
1515
1516    if (sfault) {
1517        env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1518        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1519        qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1520                      "stackframe: failed EXC_RETURN.ES validity check\n");
1521        v7m_exception_taken(cpu, excret, true, false);
1522        return;
1523    }
1524
1525    if (ufault) {
1526        /*
1527         * Bad exception return: instead of popping the exception
1528         * stack, directly take a usage fault on the current stack.
1529         */
1530        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1531        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1532        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1533                      "stackframe: failed exception return integrity check\n");
1534        v7m_exception_taken(cpu, excret, true, false);
1535        return;
1536    }
1537
1538    /*
1539     * Tailchaining: if there is currently a pending exception that
1540     * is high enough priority to preempt execution at the level we're
1541     * about to return to, then just directly take that exception now,
1542     * avoiding an unstack-and-then-stack. Note that now we have
1543     * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1544     * our current execution priority is already the execution priority we are
1545     * returning to -- none of the state we would unstack or set based on
1546     * the EXCRET value affects it.
1547     */
1548    if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1549        qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1550        v7m_exception_taken(cpu, excret, true, false);
1551        return;
1552    }
1553
1554    switch_v7m_security_state(env, return_to_secure);
1555
1556    {
1557        /*
1558         * The stack pointer we should be reading the exception frame from
1559         * depends on bits in the magic exception return type value (and
1560         * for v8M isn't necessarily the stack pointer we will eventually
1561         * end up resuming execution with). Get a pointer to the location
1562         * in the CPU state struct where the SP we need is currently being
1563         * stored; we will use and modify it in place.
1564         * We use this limited C variable scope so we don't accidentally
1565         * use 'frame_sp_p' after we do something that makes it invalid.
1566         */
1567        uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1568                                              return_to_secure,
1569                                              !return_to_handler,
1570                                              return_to_sp_process);
1571        uint32_t frameptr = *frame_sp_p;
1572        bool pop_ok = true;
1573        ARMMMUIdx mmu_idx;
1574        bool return_to_priv = return_to_handler ||
1575            !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1576
1577        mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1578                                                        return_to_priv);
1579
1580        if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1581            arm_feature(env, ARM_FEATURE_V8)) {
1582            qemu_log_mask(LOG_GUEST_ERROR,
1583                          "M profile exception return with non-8-aligned SP "
1584                          "for destination state is UNPREDICTABLE\n");
1585        }
1586
1587        /* Do we need to pop callee-saved registers? */
1588        if (return_to_secure &&
1589            ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1590             (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1591            uint32_t actual_sig;
1592
1593            pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1594
1595            if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1596                /* Take a SecureFault on the current stack */
1597                env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1598                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1599                qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1600                              "stackframe: failed exception return integrity "
1601                              "signature check\n");
1602                v7m_exception_taken(cpu, excret, true, false);
1603                return;
1604            }
1605
1606            pop_ok = pop_ok &&
1607                v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1608                v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1609                v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1610                v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1611                v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1612                v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1613                v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1614                v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1615
1616            frameptr += 0x28;
1617        }
1618
1619        /* Pop registers */
1620        pop_ok = pop_ok &&
1621            v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1622            v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1623            v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1624            v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1625            v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1626            v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1627            v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1628            v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1629
1630        if (!pop_ok) {
1631            /*
1632             * v7m_stack_read() pended a fault, so take it (as a tail
1633             * chained exception on the same stack frame)
1634             */
1635            qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1636            v7m_exception_taken(cpu, excret, true, false);
1637            return;
1638        }
1639
1640        /*
1641         * Returning from an exception with a PC with bit 0 set is defined
1642         * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1643         * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1644         * the lsbit, and there are several RTOSes out there which incorrectly
1645         * assume the r15 in the stack frame should be a Thumb-style "lsbit
1646         * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1647         * complain about the badly behaved guest.
1648         */
1649        if (env->regs[15] & 1) {
1650            env->regs[15] &= ~1U;
1651            if (!arm_feature(env, ARM_FEATURE_V8)) {
1652                qemu_log_mask(LOG_GUEST_ERROR,
1653                              "M profile return from interrupt with misaligned "
1654                              "PC is UNPREDICTABLE on v7M\n");
1655            }
1656        }
1657
1658        if (arm_feature(env, ARM_FEATURE_V8)) {
1659            /*
1660             * For v8M we have to check whether the xPSR exception field
1661             * matches the EXCRET value for return to handler/thread
1662             * before we commit to changing the SP and xPSR.
1663             */
1664            bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1665            if (return_to_handler != will_be_handler) {
1666                /*
1667                 * Take an INVPC UsageFault on the current stack.
1668                 * By this point we will have switched to the security state
1669                 * for the background state, so this UsageFault will target
1670                 * that state.
1671                 */
1672                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1673                                        env->v7m.secure);
1674                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1675                qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1676                              "stackframe: failed exception return integrity "
1677                              "check\n");
1678                v7m_exception_taken(cpu, excret, true, false);
1679                return;
1680            }
1681        }
1682
1683        if (!ftype) {
1684            /* FP present and we need to handle it */
1685            if (!return_to_secure &&
1686                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1687                armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1688                env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1689                qemu_log_mask(CPU_LOG_INT,
1690                              "...taking SecureFault on existing stackframe: "
1691                              "Secure LSPACT set but exception return is "
1692                              "not to secure state\n");
1693                v7m_exception_taken(cpu, excret, true, false);
1694                return;
1695            }
1696
1697            restore_s16_s31 = return_to_secure &&
1698                (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1699
1700            if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1701                /* State in FPU is still valid, just clear LSPACT */
1702                env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1703            } else {
1704                int i;
1705                uint32_t fpscr;
1706                bool cpacr_pass, nsacr_pass;
1707
1708                cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1709                                            return_to_priv);
1710                nsacr_pass = return_to_secure ||
1711                    extract32(env->v7m.nsacr, 10, 1);
1712
1713                if (!cpacr_pass) {
1714                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1715                                            return_to_secure);
1716                    env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1717                    qemu_log_mask(CPU_LOG_INT,
1718                                  "...taking UsageFault on existing "
1719                                  "stackframe: CPACR.CP10 prevents unstacking "
1720                                  "FP regs\n");
1721                    v7m_exception_taken(cpu, excret, true, false);
1722                    return;
1723                } else if (!nsacr_pass) {
1724                    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1725                    env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1726                    qemu_log_mask(CPU_LOG_INT,
1727                                  "...taking Secure UsageFault on existing "
1728                                  "stackframe: NSACR.CP10 prevents unstacking "
1729                                  "FP regs\n");
1730                    v7m_exception_taken(cpu, excret, true, false);
1731                    return;
1732                }
1733
1734                for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1735                    uint32_t slo, shi;
1736                    uint64_t dn;
1737                    uint32_t faddr = frameptr + 0x20 + 4 * i;
1738
1739                    if (i >= 16) {
1740                        faddr += 8; /* Skip the slot for the FPSCR */
1741                    }
1742
1743                    pop_ok = pop_ok &&
1744                        v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1745                        v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1746
1747                    if (!pop_ok) {
1748                        break;
1749                    }
1750
1751                    dn = (uint64_t)shi << 32 | slo;
1752                    *aa32_vfp_dreg(env, i / 2) = dn;
1753                }
1754                pop_ok = pop_ok &&
1755                    v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1756                if (pop_ok) {
1757                    vfp_set_fpscr(env, fpscr);
1758                }
1759                if (!pop_ok) {
1760                    /*
1761                     * These regs are 0 if security extension present;
1762                     * otherwise merely UNKNOWN. We zero always.
1763                     */
1764                    for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1765                        *aa32_vfp_dreg(env, i / 2) = 0;
1766                    }
1767                    vfp_set_fpscr(env, 0);
1768                }
1769            }
1770        }
1771        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1772                                               V7M_CONTROL, FPCA, !ftype);
1773
1774        /* Commit to consuming the stack frame */
1775        frameptr += 0x20;
1776        if (!ftype) {
1777            frameptr += 0x48;
1778            if (restore_s16_s31) {
1779                frameptr += 0x40;
1780            }
1781        }
1782        /*
1783         * Undo stack alignment (the SPREALIGN bit indicates that the original
1784         * pre-exception SP was not 8-aligned and we added a padding word to
1785         * align it, so we undo this by ORing in the bit that increases it
1786         * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1787         * would work too but a logical OR is how the pseudocode specifies it.)
1788         */
1789        if (xpsr & XPSR_SPREALIGN) {
1790            frameptr |= 4;
1791        }
1792        *frame_sp_p = frameptr;
1793    }
1794
1795    xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1796    if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1797        xpsr_mask &= ~XPSR_GE;
1798    }
1799    /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1800    xpsr_write(env, xpsr, xpsr_mask);
1801
1802    if (env->v7m.secure) {
1803        bool sfpa = xpsr & XPSR_SFPA;
1804
1805        env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1806                                               V7M_CONTROL, SFPA, sfpa);
1807    }
1808
1809    /*
1810     * The restored xPSR exception field will be zero if we're
1811     * resuming in Thread mode. If that doesn't match what the
1812     * exception return excret specified then this is a UsageFault.
1813     * v7M requires we make this check here; v8M did it earlier.
1814     */
1815    if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1816        /*
1817         * Take an INVPC UsageFault by pushing the stack again;
1818         * we know we're v7M so this is never a Secure UsageFault.
1819         */
1820        bool ignore_stackfaults;
1821
1822        assert(!arm_feature(env, ARM_FEATURE_V8));
1823        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1824        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1825        ignore_stackfaults = v7m_push_stack(cpu);
1826        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1827                      "failed exception return integrity check\n");
1828        v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1829        return;
1830    }
1831
1832    /* Otherwise, we have a successful exception exit. */
1833    arm_clear_exclusive(env);
1834    arm_rebuild_hflags(env);
1835    qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1836}
1837
1838static bool do_v7m_function_return(ARMCPU *cpu)
1839{
1840    /*
1841     * v8M security extensions magic function return.
1842     * We may either:
1843     *  (1) throw an exception (longjump)
1844     *  (2) return true if we successfully handled the function return
1845     *  (3) return false if we failed a consistency check and have
1846     *      pended a UsageFault that needs to be taken now
1847     *
1848     * At this point the magic return value is split between env->regs[15]
1849     * and env->thumb. We don't bother to reconstitute it because we don't
1850     * need it (all values are handled the same way).
1851     */
1852    CPUARMState *env = &cpu->env;
1853    uint32_t newpc, newpsr, newpsr_exc;
1854
1855    qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1856
1857    {
1858        bool threadmode, spsel;
1859        TCGMemOpIdx oi;
1860        ARMMMUIdx mmu_idx;
1861        uint32_t *frame_sp_p;
1862        uint32_t frameptr;
1863
1864        /* Pull the return address and IPSR from the Secure stack */
1865        threadmode = !arm_v7m_is_handler_mode(env);
1866        spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1867
1868        frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1869        frameptr = *frame_sp_p;
1870
1871        /*
1872         * These loads may throw an exception (for MPU faults). We want to
1873         * do them as secure, so work out what MMU index that is.
1874         */
1875        mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1876        oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1877        newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1878        newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1879
1880        /* Consistency checks on new IPSR */
1881        newpsr_exc = newpsr & XPSR_EXCP;
1882        if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1883              (env->v7m.exception == 1 && newpsr_exc != 0))) {
1884            /* Pend the fault and tell our caller to take it */
1885            env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1886            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1887                                    env->v7m.secure);
1888            qemu_log_mask(CPU_LOG_INT,
1889                          "...taking INVPC UsageFault: "
1890                          "IPSR consistency check failed\n");
1891            return false;
1892        }
1893
1894        *frame_sp_p = frameptr + 8;
1895    }
1896
1897    /* This invalidates frame_sp_p */
1898    switch_v7m_security_state(env, true);
1899    env->v7m.exception = newpsr_exc;
1900    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1901    if (newpsr & XPSR_SFPA) {
1902        env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1903    }
1904    xpsr_write(env, 0, XPSR_IT);
1905    env->thumb = newpc & 1;
1906    env->regs[15] = newpc & ~1;
1907    arm_rebuild_hflags(env);
1908
1909    qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1910    return true;
1911}
1912
1913static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1914                               uint32_t addr, uint16_t *insn)
1915{
1916    /*
1917     * Load a 16-bit portion of a v7M instruction, returning true on success,
1918     * or false on failure (in which case we will have pended the appropriate
1919     * exception).
1920     * We need to do the instruction fetch's MPU and SAU checks
1921     * like this because there is no MMU index that would allow
1922     * doing the load with a single function call. Instead we must
1923     * first check that the security attributes permit the load
1924     * and that they don't mismatch on the two halves of the instruction,
1925     * and then we do the load as a secure load (ie using the security
1926     * attributes of the address, not the CPU, as architecturally required).
1927     */
1928    CPUState *cs = CPU(cpu);
1929    CPUARMState *env = &cpu->env;
1930    V8M_SAttributes sattrs = {};
1931    MemTxAttrs attrs = {};
1932    ARMMMUFaultInfo fi = {};
1933    ARMCacheAttrs cacheattrs = {};
1934    MemTxResult txres;
1935    target_ulong page_size;
1936    hwaddr physaddr;
1937    int prot;
1938
1939    v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1940    if (!sattrs.nsc || sattrs.ns) {
1941        /*
1942         * This must be the second half of the insn, and it straddles a
1943         * region boundary with the second half not being S&NSC.
1944         */
1945        env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1946        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1947        qemu_log_mask(CPU_LOG_INT,
1948                      "...really SecureFault with SFSR.INVEP\n");
1949        return false;
1950    }
1951    if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
1952                      &attrs, &prot, &page_size, &fi, &cacheattrs)) {
1953        /* the MPU lookup failed */
1954        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1955        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1956        qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1957        return false;
1958    }
1959    *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1960                                 attrs, &txres);
1961    if (txres != MEMTX_OK) {
1962        env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1963        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1964        qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1965        return false;
1966    }
1967    return true;
1968}
1969
1970static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1971{
1972    /*
1973     * Check whether this attempt to execute code in a Secure & NS-Callable
1974     * memory region is for an SG instruction; if so, then emulate the
1975     * effect of the SG instruction and return true. Otherwise pend
1976     * the correct kind of exception and return false.
1977     */
1978    CPUARMState *env = &cpu->env;
1979    ARMMMUIdx mmu_idx;
1980    uint16_t insn;
1981
1982    /*
1983     * We should never get here unless get_phys_addr_pmsav8() caused
1984     * an exception for NS executing in S&NSC memory.
1985     */
1986    assert(!env->v7m.secure);
1987    assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1988
1989    /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1990    mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1991
1992    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1993        return false;
1994    }
1995
1996    if (!env->thumb) {
1997        goto gen_invep;
1998    }
1999
2000    if (insn != 0xe97f) {
2001        /*
2002         * Not an SG instruction first half (we choose the IMPDEF
2003         * early-SG-check option).
2004         */
2005        goto gen_invep;
2006    }
2007
2008    if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
2009        return false;
2010    }
2011
2012    if (insn != 0xe97f) {
2013        /*
2014         * Not an SG instruction second half (yes, both halves of the SG
2015         * insn have the same hex value)
2016         */
2017        goto gen_invep;
2018    }
2019
2020    /*
2021     * OK, we have confirmed that we really have an SG instruction.
2022     * We know we're NS in S memory so don't need to repeat those checks.
2023     */
2024    qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2025                  ", executing it\n", env->regs[15]);
2026    env->regs[14] &= ~1;
2027    env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2028    switch_v7m_security_state(env, true);
2029    xpsr_write(env, 0, XPSR_IT);
2030    env->regs[15] += 4;
2031    arm_rebuild_hflags(env);
2032    return true;
2033
2034gen_invep:
2035    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2036    armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2037    qemu_log_mask(CPU_LOG_INT,
2038                  "...really SecureFault with SFSR.INVEP\n");
2039    return false;
2040}
2041
2042void arm_v7m_cpu_do_interrupt(CPUState *cs)
2043{
2044    ARMCPU *cpu = ARM_CPU(cs);
2045    CPUARMState *env = &cpu->env;
2046    uint32_t lr;
2047    bool ignore_stackfaults;
2048
2049    arm_log_exception(cs->exception_index);
2050
2051    /*
2052     * For exceptions we just mark as pending on the NVIC, and let that
2053     * handle it.
2054     */
2055    switch (cs->exception_index) {
2056    case EXCP_UDEF:
2057        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2058        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2059        break;
2060    case EXCP_NOCP:
2061    {
2062        /*
2063         * NOCP might be directed to something other than the current
2064         * security state if this fault is because of NSACR; we indicate
2065         * the target security state using exception.target_el.
2066         */
2067        int target_secstate;
2068
2069        if (env->exception.target_el == 3) {
2070            target_secstate = M_REG_S;
2071        } else {
2072            target_secstate = env->v7m.secure;
2073        }
2074        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2075        env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2076        break;
2077    }
2078    case EXCP_INVSTATE:
2079        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2080        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2081        break;
2082    case EXCP_STKOF:
2083        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2084        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2085        break;
2086    case EXCP_LSERR:
2087        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2088        env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2089        break;
2090    case EXCP_UNALIGNED:
2091        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2092        env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2093        break;
2094    case EXCP_SWI:
2095        /* The PC already points to the next instruction.  */
2096        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2097        break;
2098    case EXCP_PREFETCH_ABORT:
2099    case EXCP_DATA_ABORT:
2100        /*
2101         * Note that for M profile we don't have a guest facing FSR, but
2102         * the env->exception.fsr will be populated by the code that
2103         * raises the fault, in the A profile short-descriptor format.
2104         */
2105        switch (env->exception.fsr & 0xf) {
2106        case M_FAKE_FSR_NSC_EXEC:
2107            /*
2108             * Exception generated when we try to execute code at an address
2109             * which is marked as Secure & Non-Secure Callable and the CPU
2110             * is in the Non-Secure state. The only instruction which can
2111             * be executed like this is SG (and that only if both halves of
2112             * the SG instruction have the same security attributes.)
2113             * Everything else must generate an INVEP SecureFault, so we
2114             * emulate the SG instruction here.
2115             */
2116            if (v7m_handle_execute_nsc(cpu)) {
2117                return;
2118            }
2119            break;
2120        case M_FAKE_FSR_SFAULT:
2121            /*
2122             * Various flavours of SecureFault for attempts to execute or
2123             * access data in the wrong security state.
2124             */
2125            switch (cs->exception_index) {
2126            case EXCP_PREFETCH_ABORT:
2127                if (env->v7m.secure) {
2128                    env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2129                    qemu_log_mask(CPU_LOG_INT,
2130                                  "...really SecureFault with SFSR.INVTRAN\n");
2131                } else {
2132                    env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2133                    qemu_log_mask(CPU_LOG_INT,
2134                                  "...really SecureFault with SFSR.INVEP\n");
2135                }
2136                break;
2137            case EXCP_DATA_ABORT:
2138                /* This must be an NS access to S memory */
2139                env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2140                qemu_log_mask(CPU_LOG_INT,
2141                              "...really SecureFault with SFSR.AUVIOL\n");
2142                break;
2143            }
2144            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2145            break;
2146        case 0x8: /* External Abort */
2147            switch (cs->exception_index) {
2148            case EXCP_PREFETCH_ABORT:
2149                env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2150                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2151                break;
2152            case EXCP_DATA_ABORT:
2153                env->v7m.cfsr[M_REG_NS] |=
2154                    (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2155                env->v7m.bfar = env->exception.vaddress;
2156                qemu_log_mask(CPU_LOG_INT,
2157                              "...with CFSR.PRECISERR and BFAR 0x%x\n",
2158                              env->v7m.bfar);
2159                break;
2160            }
2161            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2162            break;
2163        default:
2164            /*
2165             * All other FSR values are either MPU faults or "can't happen
2166             * for M profile" cases.
2167             */
2168            switch (cs->exception_index) {
2169            case EXCP_PREFETCH_ABORT:
2170                env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2171                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2172                break;
2173            case EXCP_DATA_ABORT:
2174                env->v7m.cfsr[env->v7m.secure] |=
2175                    (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2176                env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2177                qemu_log_mask(CPU_LOG_INT,
2178                              "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2179                              env->v7m.mmfar[env->v7m.secure]);
2180                break;
2181            }
2182            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2183                                    env->v7m.secure);
2184            break;
2185        }
2186        break;
2187    case EXCP_SEMIHOST:
2188        qemu_log_mask(CPU_LOG_INT,
2189                      "...handling as semihosting call 0x%x\n",
2190                      env->regs[0]);
2191        env->regs[0] = do_arm_semihosting(env);
2192        env->regs[15] += env->thumb ? 2 : 4;
2193        return;
2194    case EXCP_BKPT:
2195        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2196        break;
2197    case EXCP_IRQ:
2198        break;
2199    case EXCP_EXCEPTION_EXIT:
2200        if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2201            /* Must be v8M security extension function return */
2202            assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2203            assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2204            if (do_v7m_function_return(cpu)) {
2205                return;
2206            }
2207        } else {
2208            do_v7m_exception_exit(cpu);
2209            return;
2210        }
2211        break;
2212    case EXCP_LAZYFP:
2213        /*
2214         * We already pended the specific exception in the NVIC in the
2215         * v7m_preserve_fp_state() helper function.
2216         */
2217        break;
2218    default:
2219        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2220        return; /* Never happens.  Keep compiler happy.  */
2221    }
2222
2223    if (arm_feature(env, ARM_FEATURE_V8)) {
2224        lr = R_V7M_EXCRET_RES1_MASK |
2225            R_V7M_EXCRET_DCRS_MASK;
2226        /*
2227         * The S bit indicates whether we should return to Secure
2228         * or NonSecure (ie our current state).
2229         * The ES bit indicates whether we're taking this exception
2230         * to Secure or NonSecure (ie our target state). We set it
2231         * later, in v7m_exception_taken().
2232         * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2233         * This corresponds to the ARM ARM pseudocode for v8M setting
2234         * some LR bits in PushStack() and some in ExceptionTaken();
2235         * the distinction matters for the tailchain cases where we
2236         * can take an exception without pushing the stack.
2237         */
2238        if (env->v7m.secure) {
2239            lr |= R_V7M_EXCRET_S_MASK;
2240        }
2241    } else {
2242        lr = R_V7M_EXCRET_RES1_MASK |
2243            R_V7M_EXCRET_S_MASK |
2244            R_V7M_EXCRET_DCRS_MASK |
2245            R_V7M_EXCRET_ES_MASK;
2246        if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2247            lr |= R_V7M_EXCRET_SPSEL_MASK;
2248        }
2249    }
2250    if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2251        lr |= R_V7M_EXCRET_FTYPE_MASK;
2252    }
2253    if (!arm_v7m_is_handler_mode(env)) {
2254        lr |= R_V7M_EXCRET_MODE_MASK;
2255    }
2256
2257    ignore_stackfaults = v7m_push_stack(cpu);
2258    v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2259}
2260
2261uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2262{
2263    unsigned el = arm_current_el(env);
2264
2265    /* First handle registers which unprivileged can read */
2266    switch (reg) {
2267    case 0 ... 7: /* xPSR sub-fields */
2268        return v7m_mrs_xpsr(env, reg, el);
2269    case 20: /* CONTROL */
2270        return v7m_mrs_control(env, env->v7m.secure);
2271    case 0x94: /* CONTROL_NS */
2272        /*
2273         * We have to handle this here because unprivileged Secure code
2274         * can read the NS CONTROL register.
2275         */
2276        if (!env->v7m.secure) {
2277            return 0;
2278        }
2279        return env->v7m.control[M_REG_NS] |
2280            (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2281    }
2282
2283    if (el == 0) {
2284        return 0; /* unprivileged reads others as zero */
2285    }
2286
2287    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2288        switch (reg) {
2289        case 0x88: /* MSP_NS */
2290            if (!env->v7m.secure) {
2291                return 0;
2292            }
2293            return env->v7m.other_ss_msp;
2294        case 0x89: /* PSP_NS */
2295            if (!env->v7m.secure) {
2296                return 0;
2297            }
2298            return env->v7m.other_ss_psp;
2299        case 0x8a: /* MSPLIM_NS */
2300            if (!env->v7m.secure) {
2301                return 0;
2302            }
2303            return env->v7m.msplim[M_REG_NS];
2304        case 0x8b: /* PSPLIM_NS */
2305            if (!env->v7m.secure) {
2306                return 0;
2307            }
2308            return env->v7m.psplim[M_REG_NS];
2309        case 0x90: /* PRIMASK_NS */
2310            if (!env->v7m.secure) {
2311                return 0;
2312            }
2313            return env->v7m.primask[M_REG_NS];
2314        case 0x91: /* BASEPRI_NS */
2315            if (!env->v7m.secure) {
2316                return 0;
2317            }
2318            return env->v7m.basepri[M_REG_NS];
2319        case 0x93: /* FAULTMASK_NS */
2320            if (!env->v7m.secure) {
2321                return 0;
2322            }
2323            return env->v7m.faultmask[M_REG_NS];
2324        case 0x98: /* SP_NS */
2325        {
2326            /*
2327             * This gives the non-secure SP selected based on whether we're
2328             * currently in handler mode or not, using the NS CONTROL.SPSEL.
2329             */
2330            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2331
2332            if (!env->v7m.secure) {
2333                return 0;
2334            }
2335            if (!arm_v7m_is_handler_mode(env) && spsel) {
2336                return env->v7m.other_ss_psp;
2337            } else {
2338                return env->v7m.other_ss_msp;
2339            }
2340        }
2341        default:
2342            break;
2343        }
2344    }
2345
2346    switch (reg) {
2347    case 8: /* MSP */
2348        return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2349    case 9: /* PSP */
2350        return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2351    case 10: /* MSPLIM */
2352        if (!arm_feature(env, ARM_FEATURE_V8)) {
2353            goto bad_reg;
2354        }
2355        return env->v7m.msplim[env->v7m.secure];
2356    case 11: /* PSPLIM */
2357        if (!arm_feature(env, ARM_FEATURE_V8)) {
2358            goto bad_reg;
2359        }
2360        return env->v7m.psplim[env->v7m.secure];
2361    case 16: /* PRIMASK */
2362        return env->v7m.primask[env->v7m.secure];
2363    case 17: /* BASEPRI */
2364    case 18: /* BASEPRI_MAX */
2365        return env->v7m.basepri[env->v7m.secure];
2366    case 19: /* FAULTMASK */
2367        return env->v7m.faultmask[env->v7m.secure];
2368    default:
2369    bad_reg:
2370        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2371                                       " register %d\n", reg);
2372        return 0;
2373    }
2374}
2375
2376void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2377{
2378    /*
2379     * We're passed bits [11..0] of the instruction; extract
2380     * SYSm and the mask bits.
2381     * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2382     * we choose to treat them as if the mask bits were valid.
2383     * NB that the pseudocode 'mask' variable is bits [11..10],
2384     * whereas ours is [11..8].
2385     */
2386    uint32_t mask = extract32(maskreg, 8, 4);
2387    uint32_t reg = extract32(maskreg, 0, 8);
2388    int cur_el = arm_current_el(env);
2389
2390    if (cur_el == 0 && reg > 7 && reg != 20) {
2391        /*
2392         * only xPSR sub-fields and CONTROL.SFPA may be written by
2393         * unprivileged code
2394         */
2395        return;
2396    }
2397
2398    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2399        switch (reg) {
2400        case 0x88: /* MSP_NS */
2401            if (!env->v7m.secure) {
2402                return;
2403            }
2404            env->v7m.other_ss_msp = val;
2405            return;
2406        case 0x89: /* PSP_NS */
2407            if (!env->v7m.secure) {
2408                return;
2409            }
2410            env->v7m.other_ss_psp = val;
2411            return;
2412        case 0x8a: /* MSPLIM_NS */
2413            if (!env->v7m.secure) {
2414                return;
2415            }
2416            env->v7m.msplim[M_REG_NS] = val & ~7;
2417            return;
2418        case 0x8b: /* PSPLIM_NS */
2419            if (!env->v7m.secure) {
2420                return;
2421            }
2422            env->v7m.psplim[M_REG_NS] = val & ~7;
2423            return;
2424        case 0x90: /* PRIMASK_NS */
2425            if (!env->v7m.secure) {
2426                return;
2427            }
2428            env->v7m.primask[M_REG_NS] = val & 1;
2429            return;
2430        case 0x91: /* BASEPRI_NS */
2431            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2432                return;
2433            }
2434            env->v7m.basepri[M_REG_NS] = val & 0xff;
2435            return;
2436        case 0x93: /* FAULTMASK_NS */
2437            if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2438                return;
2439            }
2440            env->v7m.faultmask[M_REG_NS] = val & 1;
2441            return;
2442        case 0x94: /* CONTROL_NS */
2443            if (!env->v7m.secure) {
2444                return;
2445            }
2446            write_v7m_control_spsel_for_secstate(env,
2447                                                 val & R_V7M_CONTROL_SPSEL_MASK,
2448                                                 M_REG_NS);
2449            if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2450                env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2451                env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2452            }
2453            /*
2454             * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2455             * RES0 if the FPU is not present, and is stored in the S bank
2456             */
2457            if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2458                extract32(env->v7m.nsacr, 10, 1)) {
2459                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2460                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2461            }
2462            return;
2463        case 0x98: /* SP_NS */
2464        {
2465            /*
2466             * This gives the non-secure SP selected based on whether we're
2467             * currently in handler mode or not, using the NS CONTROL.SPSEL.
2468             */
2469            bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2470            bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2471            uint32_t limit;
2472
2473            if (!env->v7m.secure) {
2474                return;
2475            }
2476
2477            limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2478
2479            if (val < limit) {
2480                CPUState *cs = env_cpu(env);
2481
2482                cpu_restore_state(cs, GETPC(), true);
2483                raise_exception(env, EXCP_STKOF, 0, 1);
2484            }
2485
2486            if (is_psp) {
2487                env->v7m.other_ss_psp = val;
2488            } else {
2489                env->v7m.other_ss_msp = val;
2490            }
2491            return;
2492        }
2493        default:
2494            break;
2495        }
2496    }
2497
2498    switch (reg) {
2499    case 0 ... 7: /* xPSR sub-fields */
2500        v7m_msr_xpsr(env, mask, reg, val);
2501        break;
2502    case 8: /* MSP */
2503        if (v7m_using_psp(env)) {
2504            env->v7m.other_sp = val;
2505        } else {
2506            env->regs[13] = val;
2507        }
2508        break;
2509    case 9: /* PSP */
2510        if (v7m_using_psp(env)) {
2511            env->regs[13] = val;
2512        } else {
2513            env->v7m.other_sp = val;
2514        }
2515        break;
2516    case 10: /* MSPLIM */
2517        if (!arm_feature(env, ARM_FEATURE_V8)) {
2518            goto bad_reg;
2519        }
2520        env->v7m.msplim[env->v7m.secure] = val & ~7;
2521        break;
2522    case 11: /* PSPLIM */
2523        if (!arm_feature(env, ARM_FEATURE_V8)) {
2524            goto bad_reg;
2525        }
2526        env->v7m.psplim[env->v7m.secure] = val & ~7;
2527        break;
2528    case 16: /* PRIMASK */
2529        env->v7m.primask[env->v7m.secure] = val & 1;
2530        break;
2531    case 17: /* BASEPRI */
2532        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2533            goto bad_reg;
2534        }
2535        env->v7m.basepri[env->v7m.secure] = val & 0xff;
2536        break;
2537    case 18: /* BASEPRI_MAX */
2538        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2539            goto bad_reg;
2540        }
2541        val &= 0xff;
2542        if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2543                         || env->v7m.basepri[env->v7m.secure] == 0)) {
2544            env->v7m.basepri[env->v7m.secure] = val;
2545        }
2546        break;
2547    case 19: /* FAULTMASK */
2548        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2549            goto bad_reg;
2550        }
2551        env->v7m.faultmask[env->v7m.secure] = val & 1;
2552        break;
2553    case 20: /* CONTROL */
2554        /*
2555         * Writing to the SPSEL bit only has an effect if we are in
2556         * thread mode; other bits can be updated by any privileged code.
2557         * write_v7m_control_spsel() deals with updating the SPSEL bit in
2558         * env->v7m.control, so we only need update the others.
2559         * For v7M, we must just ignore explicit writes to SPSEL in handler
2560         * mode; for v8M the write is permitted but will have no effect.
2561         * All these bits are writes-ignored from non-privileged code,
2562         * except for SFPA.
2563         */
2564        if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2565                           !arm_v7m_is_handler_mode(env))) {
2566            write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2567        }
2568        if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2569            env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2570            env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2571        }
2572        if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2573            /*
2574             * SFPA is RAZ/WI from NS or if no FPU.
2575             * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2576             * Both are stored in the S bank.
2577             */
2578            if (env->v7m.secure) {
2579                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2580                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2581            }
2582            if (cur_el > 0 &&
2583                (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2584                 extract32(env->v7m.nsacr, 10, 1))) {
2585                env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2586                env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2587            }
2588        }
2589        break;
2590    default:
2591    bad_reg:
2592        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2593                                       " register %d\n", reg);
2594        return;
2595    }
2596}
2597
2598uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2599{
2600    /* Implement the TT instruction. op is bits [7:6] of the insn. */
2601    bool forceunpriv = op & 1;
2602    bool alt = op & 2;
2603    V8M_SAttributes sattrs = {};
2604    uint32_t tt_resp;
2605    bool r, rw, nsr, nsrw, mrvalid;
2606    int prot;
2607    ARMMMUFaultInfo fi = {};
2608    MemTxAttrs attrs = {};
2609    hwaddr phys_addr;
2610    ARMMMUIdx mmu_idx;
2611    uint32_t mregion;
2612    bool targetpriv;
2613    bool targetsec = env->v7m.secure;
2614    bool is_subpage;
2615
2616    /*
2617     * Work out what the security state and privilege level we're
2618     * interested in is...
2619     */
2620    if (alt) {
2621        targetsec = !targetsec;
2622    }
2623
2624    if (forceunpriv) {
2625        targetpriv = false;
2626    } else {
2627        targetpriv = arm_v7m_is_handler_mode(env) ||
2628            !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2629    }
2630
2631    /* ...and then figure out which MMU index this is */
2632    mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2633
2634    /*
2635     * We know that the MPU and SAU don't care about the access type
2636     * for our purposes beyond that we don't want to claim to be
2637     * an insn fetch, so we arbitrarily call this a read.
2638     */
2639
2640    /*
2641     * MPU region info only available for privileged or if
2642     * inspecting the other MPU state.
2643     */
2644    if (arm_current_el(env) != 0 || alt) {
2645        /* We can ignore the return value as prot is always set */
2646        pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2647                          &phys_addr, &attrs, &prot, &is_subpage,
2648                          &fi, &mregion);
2649        if (mregion == -1) {
2650            mrvalid = false;
2651            mregion = 0;
2652        } else {
2653            mrvalid = true;
2654        }
2655        r = prot & PAGE_READ;
2656        rw = prot & PAGE_WRITE;
2657    } else {
2658        r = false;
2659        rw = false;
2660        mrvalid = false;
2661        mregion = 0;
2662    }
2663
2664    if (env->v7m.secure) {
2665        v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2666        nsr = sattrs.ns && r;
2667        nsrw = sattrs.ns && rw;
2668    } else {
2669        sattrs.ns = true;
2670        nsr = false;
2671        nsrw = false;
2672    }
2673
2674    tt_resp = (sattrs.iregion << 24) |
2675        (sattrs.irvalid << 23) |
2676        ((!sattrs.ns) << 22) |
2677        (nsrw << 21) |
2678        (nsr << 20) |
2679        (rw << 19) |
2680        (r << 18) |
2681        (sattrs.srvalid << 17) |
2682        (mrvalid << 16) |
2683        (sattrs.sregion << 8) |
2684        mregion;
2685
2686    return tt_resp;
2687}
2688
2689#endif /* !CONFIG_USER_ONLY */
2690
2691ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2692                              bool secstate, bool priv, bool negpri)
2693{
2694    ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2695
2696    if (priv) {
2697        mmu_idx |= ARM_MMU_IDX_M_PRIV;
2698    }
2699
2700    if (negpri) {
2701        mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2702    }
2703
2704    if (secstate) {
2705        mmu_idx |= ARM_MMU_IDX_M_S;
2706    }
2707
2708    return mmu_idx;
2709}
2710
2711ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2712                                                bool secstate, bool priv)
2713{
2714    bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2715
2716    return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2717}
2718
2719/* Return the MMU index for a v7M CPU in the specified security state */
2720ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2721{
2722    bool priv = arm_v7m_is_handler_mode(env) ||
2723        !(env->v7m.control[secstate] & 1);
2724
2725    return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2726}
2727