qemu/target/arm/translate-m-nocp.c
<<
>>
Prefs
   1/*
   2 *  ARM translation: M-profile NOCP special-case instructions
   3 *
   4 *  Copyright (c) 2020 Linaro, Ltd.
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "tcg/tcg-op.h"
  22#include "tcg/tcg-op-gvec.h"
  23#include "translate.h"
  24#include "translate-a32.h"
  25
  26#include "decode-m-nocp.c.inc"
  27
  28/*
  29 * Decode VLLDM and VLSTM are nonstandard because:
  30 *  * if there is no FPU then these insns must NOP in
  31 *    Secure state and UNDEF in Nonsecure state
  32 *  * if there is an FPU then these insns do not have
  33 *    the usual behaviour that vfp_access_check() provides of
  34 *    being controlled by CPACR/NSACR enable bits or the
  35 *    lazy-stacking logic.
  36 */
  37static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
  38{
  39    TCGv_i32 fptr;
  40
  41    if (!arm_dc_feature(s, ARM_FEATURE_M) ||
  42        !arm_dc_feature(s, ARM_FEATURE_V8)) {
  43        return false;
  44    }
  45
  46    if (a->op) {
  47        /*
  48         * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
  49         * to take the IMPDEF option to make memory accesses to the stack
  50         * slots that correspond to the D16-D31 registers (discarding
  51         * read data and writing UNKNOWN values), so for us the T2
  52         * encoding behaves identically to the T1 encoding.
  53         */
  54        if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
  55            return false;
  56        }
  57    } else {
  58        /*
  59         * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
  60         * This is currently architecturally impossible, but we add the
  61         * check to stay in line with the pseudocode. Note that we must
  62         * emit code for the UNDEF so it takes precedence over the NOCP.
  63         */
  64        if (dc_isar_feature(aa32_simd_r32, s)) {
  65            unallocated_encoding(s);
  66            return true;
  67        }
  68    }
  69
  70    /*
  71     * If not secure, UNDEF. We must emit code for this
  72     * rather than returning false so that this takes
  73     * precedence over the m-nocp.decode NOCP fallback.
  74     */
  75    if (!s->v8m_secure) {
  76        unallocated_encoding(s);
  77        return true;
  78    }
  79
  80    s->eci_handled = true;
  81
  82    /* If no fpu, NOP. */
  83    if (!dc_isar_feature(aa32_vfp, s)) {
  84        clear_eci_state(s);
  85        return true;
  86    }
  87
  88    fptr = load_reg(s, a->rn);
  89    if (a->l) {
  90        gen_helper_v7m_vlldm(cpu_env, fptr);
  91    } else {
  92        gen_helper_v7m_vlstm(cpu_env, fptr);
  93    }
  94    tcg_temp_free_i32(fptr);
  95
  96    clear_eci_state(s);
  97
  98    /* End the TB, because we have updated FP control bits */
  99    s->base.is_jmp = DISAS_UPDATE_EXIT;
 100    return true;
 101}
 102
 103static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
 104{
 105    int btmreg, topreg;
 106    TCGv_i64 zero;
 107    TCGv_i32 aspen, sfpa;
 108
 109    if (!dc_isar_feature(aa32_m_sec_state, s)) {
 110        /* Before v8.1M, fall through in decode to NOCP check */
 111        return false;
 112    }
 113
 114    /* Explicitly UNDEF because this takes precedence over NOCP */
 115    if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
 116        unallocated_encoding(s);
 117        return true;
 118    }
 119
 120    s->eci_handled = true;
 121
 122    if (!dc_isar_feature(aa32_vfp_simd, s)) {
 123        /* NOP if we have neither FP nor MVE */
 124        clear_eci_state(s);
 125        return true;
 126    }
 127
 128    /*
 129     * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
 130     * active floating point context so we must NOP (without doing
 131     * any lazy state preservation or the NOCP check).
 132     */
 133    aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
 134    sfpa = load_cpu_field(v7m.control[M_REG_S]);
 135    tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 136    tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 137    tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
 138    tcg_gen_or_i32(sfpa, sfpa, aspen);
 139    arm_gen_condlabel(s);
 140    tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
 141
 142    if (s->fp_excp_el != 0) {
 143        gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
 144                           syn_uncategorized(), s->fp_excp_el);
 145        return true;
 146    }
 147
 148    topreg = a->vd + a->imm - 1;
 149    btmreg = a->vd;
 150
 151    /* Convert to Sreg numbers if the insn specified in Dregs */
 152    if (a->size == 3) {
 153        topreg = topreg * 2 + 1;
 154        btmreg *= 2;
 155    }
 156
 157    if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
 158        /* UNPREDICTABLE: we choose to undef */
 159        unallocated_encoding(s);
 160        return true;
 161    }
 162
 163    /* Silently ignore requests to clear D16-D31 if they don't exist */
 164    if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
 165        topreg = 31;
 166    }
 167
 168    if (!vfp_access_check(s)) {
 169        return true;
 170    }
 171
 172    /* Zero the Sregs from btmreg to topreg inclusive. */
 173    zero = tcg_const_i64(0);
 174    if (btmreg & 1) {
 175        write_neon_element64(zero, btmreg >> 1, 1, MO_32);
 176        btmreg++;
 177    }
 178    for (; btmreg + 1 <= topreg; btmreg += 2) {
 179        write_neon_element64(zero, btmreg >> 1, 0, MO_64);
 180    }
 181    if (btmreg == topreg) {
 182        write_neon_element64(zero, btmreg >> 1, 0, MO_32);
 183        btmreg++;
 184    }
 185    assert(btmreg == topreg + 1);
 186    if (dc_isar_feature(aa32_mve, s)) {
 187        TCGv_i32 z32 = tcg_const_i32(0);
 188        store_cpu_field(z32, v7m.vpr);
 189    }
 190
 191    clear_eci_state(s);
 192    return true;
 193}
 194
 195/*
 196 * M-profile provides two different sets of instructions that can
 197 * access floating point system registers: VMSR/VMRS (which move
 198 * to/from a general purpose register) and VLDR/VSTR sysreg (which
 199 * move directly to/from memory). In some cases there are also side
 200 * effects which must happen after any write to memory (which could
 201 * cause an exception). So we implement the common logic for the
 202 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
 203 * which take pointers to callback functions which will perform the
 204 * actual "read/write general purpose register" and "read/write
 205 * memory" operations.
 206 */
 207
 208/*
 209 * Emit code to store the sysreg to its final destination; frees the
 210 * TCG temp 'value' it is passed. do_access is true to do the store,
 211 * and false to skip it and only perform side-effects like base
 212 * register writeback.
 213 */
 214typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
 215                               bool do_access);
 216/*
 217 * Emit code to load the value to be copied to the sysreg; returns
 218 * a new TCG temporary. do_access is true to do the store,
 219 * and false to skip it and only perform side-effects like base
 220 * register writeback.
 221 */
 222typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
 223                                  bool do_access);
 224
 225/* Common decode/access checks for fp sysreg read/write */
 226typedef enum FPSysRegCheckResult {
 227    FPSysRegCheckFailed, /* caller should return false */
 228    FPSysRegCheckDone, /* caller should return true */
 229    FPSysRegCheckContinue, /* caller should continue generating code */
 230} FPSysRegCheckResult;
 231
 232static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
 233{
 234    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
 235        return FPSysRegCheckFailed;
 236    }
 237
 238    switch (regno) {
 239    case ARM_VFP_FPSCR:
 240    case QEMU_VFP_FPSCR_NZCV:
 241        break;
 242    case ARM_VFP_FPSCR_NZCVQC:
 243        if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 244            return FPSysRegCheckFailed;
 245        }
 246        break;
 247    case ARM_VFP_FPCXT_S:
 248    case ARM_VFP_FPCXT_NS:
 249        if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 250            return FPSysRegCheckFailed;
 251        }
 252        if (!s->v8m_secure) {
 253            return FPSysRegCheckFailed;
 254        }
 255        break;
 256    case ARM_VFP_VPR:
 257    case ARM_VFP_P0:
 258        if (!dc_isar_feature(aa32_mve, s)) {
 259            return FPSysRegCheckFailed;
 260        }
 261        break;
 262    default:
 263        return FPSysRegCheckFailed;
 264    }
 265
 266    /*
 267     * FPCXT_NS is a special case: it has specific handling for
 268     * "current FP state is inactive", and must do the PreserveFPState()
 269     * but not the usual full set of actions done by ExecuteFPCheck().
 270     * So we don't call vfp_access_check() and the callers must handle this.
 271     */
 272    if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
 273        return FPSysRegCheckDone;
 274    }
 275    return FPSysRegCheckContinue;
 276}
 277
 278static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
 279                                  TCGLabel *label)
 280{
 281    /*
 282     * FPCXT_NS is a special case: it has specific handling for
 283     * "current FP state is inactive", and must do the PreserveFPState()
 284     * but not the usual full set of actions done by ExecuteFPCheck().
 285     * We don't have a TB flag that matches the fpInactive check, so we
 286     * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
 287     *
 288     * Emit code that checks fpInactive and does a conditional
 289     * branch to label based on it:
 290     *  if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
 291     *  if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
 292     */
 293    assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
 294
 295    /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
 296    TCGv_i32 aspen, fpca;
 297    aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
 298    fpca = load_cpu_field(v7m.control[M_REG_S]);
 299    tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 300    tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 301    tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
 302    tcg_gen_or_i32(fpca, fpca, aspen);
 303    tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
 304    tcg_temp_free_i32(aspen);
 305    tcg_temp_free_i32(fpca);
 306}
 307
 308static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
 309                                  fp_sysreg_loadfn *loadfn,
 310                                  void *opaque)
 311{
 312    /* Do a write to an M-profile floating point system register */
 313    TCGv_i32 tmp;
 314    TCGLabel *lab_end = NULL;
 315
 316    switch (fp_sysreg_checks(s, regno)) {
 317    case FPSysRegCheckFailed:
 318        return false;
 319    case FPSysRegCheckDone:
 320        return true;
 321    case FPSysRegCheckContinue:
 322        break;
 323    }
 324
 325    switch (regno) {
 326    case ARM_VFP_FPSCR:
 327        tmp = loadfn(s, opaque, true);
 328        gen_helper_vfp_set_fpscr(cpu_env, tmp);
 329        tcg_temp_free_i32(tmp);
 330        gen_lookup_tb(s);
 331        break;
 332    case ARM_VFP_FPSCR_NZCVQC:
 333    {
 334        TCGv_i32 fpscr;
 335        tmp = loadfn(s, opaque, true);
 336        if (dc_isar_feature(aa32_mve, s)) {
 337            /* QC is only present for MVE; otherwise RES0 */
 338            TCGv_i32 qc = tcg_temp_new_i32();
 339            tcg_gen_andi_i32(qc, tmp, FPCR_QC);
 340            /*
 341             * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
 342             * here writing the same value into all elements is simplest.
 343             */
 344            tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
 345                                 16, 16, qc);
 346        }
 347        tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
 348        fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
 349        tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
 350        tcg_gen_or_i32(fpscr, fpscr, tmp);
 351        store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
 352        tcg_temp_free_i32(tmp);
 353        break;
 354    }
 355    case ARM_VFP_FPCXT_NS:
 356    {
 357        TCGLabel *lab_active = gen_new_label();
 358
 359        lab_end = gen_new_label();
 360        gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
 361        /*
 362         * fpInactive case: write is a NOP, so only do side effects
 363         * like register writeback before we branch to end
 364         */
 365        loadfn(s, opaque, false);
 366        tcg_gen_br(lab_end);
 367
 368        gen_set_label(lab_active);
 369        /*
 370         * !fpInactive: if FPU disabled, take NOCP exception;
 371         * otherwise PreserveFPState(), and then FPCXT_NS writes
 372         * behave the same as FPCXT_S writes.
 373         */
 374        if (!vfp_access_check_m(s, true)) {
 375            /*
 376             * This was only a conditional exception, so override
 377             * gen_exception_insn()'s default to DISAS_NORETURN
 378             */
 379            s->base.is_jmp = DISAS_NEXT;
 380            break;
 381        }
 382    }
 383    /* fall through */
 384    case ARM_VFP_FPCXT_S:
 385    {
 386        TCGv_i32 sfpa, control;
 387        /*
 388         * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
 389         * bits [27:0] from value and zeroes bits [31:28].
 390         */
 391        tmp = loadfn(s, opaque, true);
 392        sfpa = tcg_temp_new_i32();
 393        tcg_gen_shri_i32(sfpa, tmp, 31);
 394        control = load_cpu_field(v7m.control[M_REG_S]);
 395        tcg_gen_deposit_i32(control, control, sfpa,
 396                            R_V7M_CONTROL_SFPA_SHIFT, 1);
 397        store_cpu_field(control, v7m.control[M_REG_S]);
 398        tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
 399        gen_helper_vfp_set_fpscr(cpu_env, tmp);
 400        tcg_temp_free_i32(tmp);
 401        tcg_temp_free_i32(sfpa);
 402        break;
 403    }
 404    case ARM_VFP_VPR:
 405        /* Behaves as NOP if not privileged */
 406        if (IS_USER(s)) {
 407            loadfn(s, opaque, false);
 408            break;
 409        }
 410        tmp = loadfn(s, opaque, true);
 411        store_cpu_field(tmp, v7m.vpr);
 412        break;
 413    case ARM_VFP_P0:
 414    {
 415        TCGv_i32 vpr;
 416        tmp = loadfn(s, opaque, true);
 417        vpr = load_cpu_field(v7m.vpr);
 418        tcg_gen_deposit_i32(vpr, vpr, tmp,
 419                            R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
 420        store_cpu_field(vpr, v7m.vpr);
 421        tcg_temp_free_i32(tmp);
 422        break;
 423    }
 424    default:
 425        g_assert_not_reached();
 426    }
 427    if (lab_end) {
 428        gen_set_label(lab_end);
 429    }
 430    return true;
 431}
 432
 433static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
 434                                 fp_sysreg_storefn *storefn,
 435                                 void *opaque)
 436{
 437    /* Do a read from an M-profile floating point system register */
 438    TCGv_i32 tmp;
 439    TCGLabel *lab_end = NULL;
 440    bool lookup_tb = false;
 441
 442    switch (fp_sysreg_checks(s, regno)) {
 443    case FPSysRegCheckFailed:
 444        return false;
 445    case FPSysRegCheckDone:
 446        return true;
 447    case FPSysRegCheckContinue:
 448        break;
 449    }
 450
 451    if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
 452        /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
 453        regno = QEMU_VFP_FPSCR_NZCV;
 454    }
 455
 456    switch (regno) {
 457    case ARM_VFP_FPSCR:
 458        tmp = tcg_temp_new_i32();
 459        gen_helper_vfp_get_fpscr(tmp, cpu_env);
 460        storefn(s, opaque, tmp, true);
 461        break;
 462    case ARM_VFP_FPSCR_NZCVQC:
 463        tmp = tcg_temp_new_i32();
 464        gen_helper_vfp_get_fpscr(tmp, cpu_env);
 465        tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
 466        storefn(s, opaque, tmp, true);
 467        break;
 468    case QEMU_VFP_FPSCR_NZCV:
 469        /*
 470         * Read just NZCV; this is a special case to avoid the
 471         * helper call for the "VMRS to CPSR.NZCV" insn.
 472         */
 473        tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
 474        tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
 475        storefn(s, opaque, tmp, true);
 476        break;
 477    case ARM_VFP_FPCXT_S:
 478    {
 479        TCGv_i32 control, sfpa, fpscr;
 480        /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
 481        tmp = tcg_temp_new_i32();
 482        sfpa = tcg_temp_new_i32();
 483        gen_helper_vfp_get_fpscr(tmp, cpu_env);
 484        tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
 485        control = load_cpu_field(v7m.control[M_REG_S]);
 486        tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
 487        tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
 488        tcg_gen_or_i32(tmp, tmp, sfpa);
 489        tcg_temp_free_i32(sfpa);
 490        /*
 491         * Store result before updating FPSCR etc, in case
 492         * it is a memory write which causes an exception.
 493         */
 494        storefn(s, opaque, tmp, true);
 495        /*
 496         * Now we must reset FPSCR from FPDSCR_NS, and clear
 497         * CONTROL.SFPA; so we'll end the TB here.
 498         */
 499        tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
 500        store_cpu_field(control, v7m.control[M_REG_S]);
 501        fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
 502        gen_helper_vfp_set_fpscr(cpu_env, fpscr);
 503        tcg_temp_free_i32(fpscr);
 504        lookup_tb = true;
 505        break;
 506    }
 507    case ARM_VFP_FPCXT_NS:
 508    {
 509        TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
 510        TCGLabel *lab_active = gen_new_label();
 511
 512        lookup_tb = true;
 513
 514        gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
 515        /* fpInactive case: reads as FPDSCR_NS */
 516        TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
 517        storefn(s, opaque, tmp, true);
 518        lab_end = gen_new_label();
 519        tcg_gen_br(lab_end);
 520
 521        gen_set_label(lab_active);
 522        /*
 523         * !fpInactive: if FPU disabled, take NOCP exception;
 524         * otherwise PreserveFPState(), and then FPCXT_NS
 525         * reads the same as FPCXT_S.
 526         */
 527        if (!vfp_access_check_m(s, true)) {
 528            /*
 529             * This was only a conditional exception, so override
 530             * gen_exception_insn()'s default to DISAS_NORETURN
 531             */
 532            s->base.is_jmp = DISAS_NEXT;
 533            break;
 534        }
 535        tmp = tcg_temp_new_i32();
 536        sfpa = tcg_temp_new_i32();
 537        fpscr = tcg_temp_new_i32();
 538        gen_helper_vfp_get_fpscr(fpscr, cpu_env);
 539        tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
 540        control = load_cpu_field(v7m.control[M_REG_S]);
 541        tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
 542        tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
 543        tcg_gen_or_i32(tmp, tmp, sfpa);
 544        tcg_temp_free_i32(control);
 545        /* Store result before updating FPSCR, in case it faults */
 546        storefn(s, opaque, tmp, true);
 547        /* If SFPA is zero then set FPSCR from FPDSCR_NS */
 548        fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
 549        zero = tcg_const_i32(0);
 550        tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
 551        gen_helper_vfp_set_fpscr(cpu_env, fpscr);
 552        tcg_temp_free_i32(zero);
 553        tcg_temp_free_i32(sfpa);
 554        tcg_temp_free_i32(fpdscr);
 555        tcg_temp_free_i32(fpscr);
 556        break;
 557    }
 558    case ARM_VFP_VPR:
 559        /* Behaves as NOP if not privileged */
 560        if (IS_USER(s)) {
 561            storefn(s, opaque, NULL, false);
 562            break;
 563        }
 564        tmp = load_cpu_field(v7m.vpr);
 565        storefn(s, opaque, tmp, true);
 566        break;
 567    case ARM_VFP_P0:
 568        tmp = load_cpu_field(v7m.vpr);
 569        tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
 570        storefn(s, opaque, tmp, true);
 571        break;
 572    default:
 573        g_assert_not_reached();
 574    }
 575
 576    if (lab_end) {
 577        gen_set_label(lab_end);
 578    }
 579    if (lookup_tb) {
 580        gen_lookup_tb(s);
 581    }
 582    return true;
 583}
 584
 585static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
 586                             bool do_access)
 587{
 588    arg_VMSR_VMRS *a = opaque;
 589
 590    if (!do_access) {
 591        return;
 592    }
 593
 594    if (a->rt == 15) {
 595        /* Set the 4 flag bits in the CPSR */
 596        gen_set_nzcv(value);
 597        tcg_temp_free_i32(value);
 598    } else {
 599        store_reg(s, a->rt, value);
 600    }
 601}
 602
 603static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
 604{
 605    arg_VMSR_VMRS *a = opaque;
 606
 607    if (!do_access) {
 608        return NULL;
 609    }
 610    return load_reg(s, a->rt);
 611}
 612
 613static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
 614{
 615    /*
 616     * Accesses to R15 are UNPREDICTABLE; we choose to undef.
 617     * FPSCR -> r15 is a special case which writes to the PSR flags;
 618     * set a->reg to a special value to tell gen_M_fp_sysreg_read()
 619     * we only care about the top 4 bits of FPSCR there.
 620     */
 621    if (a->rt == 15) {
 622        if (a->l && a->reg == ARM_VFP_FPSCR) {
 623            a->reg = QEMU_VFP_FPSCR_NZCV;
 624        } else {
 625            return false;
 626        }
 627    }
 628
 629    if (a->l) {
 630        /* VMRS, move FP system register to gp register */
 631        return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
 632    } else {
 633        /* VMSR, move gp register to FP system register */
 634        return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
 635    }
 636}
 637
 638static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
 639                                bool do_access)
 640{
 641    arg_vldr_sysreg *a = opaque;
 642    uint32_t offset = a->imm;
 643    TCGv_i32 addr;
 644
 645    if (!a->a) {
 646        offset = -offset;
 647    }
 648
 649    if (!do_access && !a->w) {
 650        return;
 651    }
 652
 653    addr = load_reg(s, a->rn);
 654    if (a->p) {
 655        tcg_gen_addi_i32(addr, addr, offset);
 656    }
 657
 658    if (s->v8m_stackcheck && a->rn == 13 && a->w) {
 659        gen_helper_v8m_stackcheck(cpu_env, addr);
 660    }
 661
 662    if (do_access) {
 663        gen_aa32_st_i32(s, value, addr, get_mem_index(s),
 664                        MO_UL | MO_ALIGN | s->be_data);
 665        tcg_temp_free_i32(value);
 666    }
 667
 668    if (a->w) {
 669        /* writeback */
 670        if (!a->p) {
 671            tcg_gen_addi_i32(addr, addr, offset);
 672        }
 673        store_reg(s, a->rn, addr);
 674    } else {
 675        tcg_temp_free_i32(addr);
 676    }
 677}
 678
 679static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
 680                                    bool do_access)
 681{
 682    arg_vldr_sysreg *a = opaque;
 683    uint32_t offset = a->imm;
 684    TCGv_i32 addr;
 685    TCGv_i32 value = NULL;
 686
 687    if (!a->a) {
 688        offset = -offset;
 689    }
 690
 691    if (!do_access && !a->w) {
 692        return NULL;
 693    }
 694
 695    addr = load_reg(s, a->rn);
 696    if (a->p) {
 697        tcg_gen_addi_i32(addr, addr, offset);
 698    }
 699
 700    if (s->v8m_stackcheck && a->rn == 13 && a->w) {
 701        gen_helper_v8m_stackcheck(cpu_env, addr);
 702    }
 703
 704    if (do_access) {
 705        value = tcg_temp_new_i32();
 706        gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
 707                        MO_UL | MO_ALIGN | s->be_data);
 708    }
 709
 710    if (a->w) {
 711        /* writeback */
 712        if (!a->p) {
 713            tcg_gen_addi_i32(addr, addr, offset);
 714        }
 715        store_reg(s, a->rn, addr);
 716    } else {
 717        tcg_temp_free_i32(addr);
 718    }
 719    return value;
 720}
 721
 722static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
 723{
 724    if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 725        return false;
 726    }
 727    if (a->rn == 15) {
 728        return false;
 729    }
 730    return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
 731}
 732
 733static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
 734{
 735    if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 736        return false;
 737    }
 738    if (a->rn == 15) {
 739        return false;
 740    }
 741    return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
 742}
 743
 744static bool trans_NOCP(DisasContext *s, arg_nocp *a)
 745{
 746    /*
 747     * Handle M-profile early check for disabled coprocessor:
 748     * all we need to do here is emit the NOCP exception if
 749     * the coprocessor is disabled. Otherwise we return false
 750     * and the real VFP/etc decode will handle the insn.
 751     */
 752    assert(arm_dc_feature(s, ARM_FEATURE_M));
 753
 754    if (a->cp == 11) {
 755        a->cp = 10;
 756    }
 757    if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
 758        (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
 759        /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
 760        a->cp = 10;
 761    }
 762
 763    if (a->cp != 10) {
 764        gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
 765                           syn_uncategorized(), default_exception_el(s));
 766        return true;
 767    }
 768
 769    if (s->fp_excp_el != 0) {
 770        gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
 771                           syn_uncategorized(), s->fp_excp_el);
 772        return true;
 773    }
 774
 775    return false;
 776}
 777
 778static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
 779{
 780    /* This range needs a coprocessor check for v8.1M and later only */
 781    if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 782        return false;
 783    }
 784    return trans_NOCP(s, a);
 785}
 786