qemu/target/arm/translate-m-nocp.c
<<
>>
Prefs
   1/*
   2 *  ARM translation: M-profile NOCP special-case instructions
   3 *
   4 *  Copyright (c) 2020 Linaro, Ltd.
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "tcg/tcg-op.h"
  22#include "tcg/tcg-op-gvec.h"
  23#include "translate.h"
  24#include "translate-a32.h"
  25
  26#include "decode-m-nocp.c.inc"
  27
  28/*
  29 * Decode VLLDM and VLSTM are nonstandard because:
  30 *  * if there is no FPU then these insns must NOP in
  31 *    Secure state and UNDEF in Nonsecure state
  32 *  * if there is an FPU then these insns do not have
  33 *    the usual behaviour that vfp_access_check() provides of
  34 *    being controlled by CPACR/NSACR enable bits or the
  35 *    lazy-stacking logic.
  36 */
  37static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
  38{
  39    TCGv_i32 fptr;
  40
  41    if (!arm_dc_feature(s, ARM_FEATURE_M) ||
  42        !arm_dc_feature(s, ARM_FEATURE_V8)) {
  43        return false;
  44    }
  45
  46    if (a->op) {
  47        /*
  48         * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
  49         * to take the IMPDEF option to make memory accesses to the stack
  50         * slots that correspond to the D16-D31 registers (discarding
  51         * read data and writing UNKNOWN values), so for us the T2
  52         * encoding behaves identically to the T1 encoding.
  53         */
  54        if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
  55            return false;
  56        }
  57    } else {
  58        /*
  59         * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
  60         * This is currently architecturally impossible, but we add the
  61         * check to stay in line with the pseudocode. Note that we must
  62         * emit code for the UNDEF so it takes precedence over the NOCP.
  63         */
  64        if (dc_isar_feature(aa32_simd_r32, s)) {
  65            unallocated_encoding(s);
  66            return true;
  67        }
  68    }
  69
  70    /*
  71     * If not secure, UNDEF. We must emit code for this
  72     * rather than returning false so that this takes
  73     * precedence over the m-nocp.decode NOCP fallback.
  74     */
  75    if (!s->v8m_secure) {
  76        unallocated_encoding(s);
  77        return true;
  78    }
  79
  80    s->eci_handled = true;
  81
  82    /* If no fpu, NOP. */
  83    if (!dc_isar_feature(aa32_vfp, s)) {
  84        clear_eci_state(s);
  85        return true;
  86    }
  87
  88    fptr = load_reg(s, a->rn);
  89    if (a->l) {
  90        gen_helper_v7m_vlldm(cpu_env, fptr);
  91    } else {
  92        gen_helper_v7m_vlstm(cpu_env, fptr);
  93    }
  94    tcg_temp_free_i32(fptr);
  95
  96    clear_eci_state(s);
  97
  98    /*
  99     * End the TB, because we have updated FP control bits,
 100     * and possibly VPR or LTPSIZE.
 101     */
 102    s->base.is_jmp = DISAS_UPDATE_EXIT;
 103    return true;
 104}
 105
 106static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
 107{
 108    int btmreg, topreg;
 109    TCGv_i64 zero;
 110    TCGv_i32 aspen, sfpa;
 111
 112    if (!dc_isar_feature(aa32_m_sec_state, s)) {
 113        /* Before v8.1M, fall through in decode to NOCP check */
 114        return false;
 115    }
 116
 117    /* Explicitly UNDEF because this takes precedence over NOCP */
 118    if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
 119        unallocated_encoding(s);
 120        return true;
 121    }
 122
 123    s->eci_handled = true;
 124
 125    if (!dc_isar_feature(aa32_vfp_simd, s)) {
 126        /* NOP if we have neither FP nor MVE */
 127        clear_eci_state(s);
 128        return true;
 129    }
 130
 131    /*
 132     * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
 133     * active floating point context so we must NOP (without doing
 134     * any lazy state preservation or the NOCP check).
 135     */
 136    aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
 137    sfpa = load_cpu_field(v7m.control[M_REG_S]);
 138    tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 139    tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 140    tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
 141    tcg_gen_or_i32(sfpa, sfpa, aspen);
 142    arm_gen_condlabel(s);
 143    tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
 144
 145    if (s->fp_excp_el != 0) {
 146        gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
 147                           syn_uncategorized(), s->fp_excp_el);
 148        return true;
 149    }
 150
 151    topreg = a->vd + a->imm - 1;
 152    btmreg = a->vd;
 153
 154    /* Convert to Sreg numbers if the insn specified in Dregs */
 155    if (a->size == 3) {
 156        topreg = topreg * 2 + 1;
 157        btmreg *= 2;
 158    }
 159
 160    if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
 161        /* UNPREDICTABLE: we choose to undef */
 162        unallocated_encoding(s);
 163        return true;
 164    }
 165
 166    /* Silently ignore requests to clear D16-D31 if they don't exist */
 167    if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
 168        topreg = 31;
 169    }
 170
 171    if (!vfp_access_check(s)) {
 172        return true;
 173    }
 174
 175    /* Zero the Sregs from btmreg to topreg inclusive. */
 176    zero = tcg_const_i64(0);
 177    if (btmreg & 1) {
 178        write_neon_element64(zero, btmreg >> 1, 1, MO_32);
 179        btmreg++;
 180    }
 181    for (; btmreg + 1 <= topreg; btmreg += 2) {
 182        write_neon_element64(zero, btmreg >> 1, 0, MO_64);
 183    }
 184    if (btmreg == topreg) {
 185        write_neon_element64(zero, btmreg >> 1, 0, MO_32);
 186        btmreg++;
 187    }
 188    assert(btmreg == topreg + 1);
 189    if (dc_isar_feature(aa32_mve, s)) {
 190        TCGv_i32 z32 = tcg_const_i32(0);
 191        store_cpu_field(z32, v7m.vpr);
 192    }
 193
 194    clear_eci_state(s);
 195    return true;
 196}
 197
 198/*
 199 * M-profile provides two different sets of instructions that can
 200 * access floating point system registers: VMSR/VMRS (which move
 201 * to/from a general purpose register) and VLDR/VSTR sysreg (which
 202 * move directly to/from memory). In some cases there are also side
 203 * effects which must happen after any write to memory (which could
 204 * cause an exception). So we implement the common logic for the
 205 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
 206 * which take pointers to callback functions which will perform the
 207 * actual "read/write general purpose register" and "read/write
 208 * memory" operations.
 209 */
 210
 211/*
 212 * Emit code to store the sysreg to its final destination; frees the
 213 * TCG temp 'value' it is passed. do_access is true to do the store,
 214 * and false to skip it and only perform side-effects like base
 215 * register writeback.
 216 */
 217typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
 218                               bool do_access);
 219/*
 220 * Emit code to load the value to be copied to the sysreg; returns
 221 * a new TCG temporary. do_access is true to do the store,
 222 * and false to skip it and only perform side-effects like base
 223 * register writeback.
 224 */
 225typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
 226                                  bool do_access);
 227
 228/* Common decode/access checks for fp sysreg read/write */
 229typedef enum FPSysRegCheckResult {
 230    FPSysRegCheckFailed, /* caller should return false */
 231    FPSysRegCheckDone, /* caller should return true */
 232    FPSysRegCheckContinue, /* caller should continue generating code */
 233} FPSysRegCheckResult;
 234
 235static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
 236{
 237    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
 238        return FPSysRegCheckFailed;
 239    }
 240
 241    switch (regno) {
 242    case ARM_VFP_FPSCR:
 243    case QEMU_VFP_FPSCR_NZCV:
 244        break;
 245    case ARM_VFP_FPSCR_NZCVQC:
 246        if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 247            return FPSysRegCheckFailed;
 248        }
 249        break;
 250    case ARM_VFP_FPCXT_S:
 251    case ARM_VFP_FPCXT_NS:
 252        if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 253            return FPSysRegCheckFailed;
 254        }
 255        if (!s->v8m_secure) {
 256            return FPSysRegCheckFailed;
 257        }
 258        break;
 259    case ARM_VFP_VPR:
 260    case ARM_VFP_P0:
 261        if (!dc_isar_feature(aa32_mve, s)) {
 262            return FPSysRegCheckFailed;
 263        }
 264        break;
 265    default:
 266        return FPSysRegCheckFailed;
 267    }
 268
 269    /*
 270     * FPCXT_NS is a special case: it has specific handling for
 271     * "current FP state is inactive", and must do the PreserveFPState()
 272     * but not the usual full set of actions done by ExecuteFPCheck().
 273     * So we don't call vfp_access_check() and the callers must handle this.
 274     */
 275    if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
 276        return FPSysRegCheckDone;
 277    }
 278    return FPSysRegCheckContinue;
 279}
 280
 281static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
 282                                  TCGLabel *label)
 283{
 284    /*
 285     * FPCXT_NS is a special case: it has specific handling for
 286     * "current FP state is inactive", and must do the PreserveFPState()
 287     * but not the usual full set of actions done by ExecuteFPCheck().
 288     * We don't have a TB flag that matches the fpInactive check, so we
 289     * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
 290     *
 291     * Emit code that checks fpInactive and does a conditional
 292     * branch to label based on it:
 293     *  if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
 294     *  if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
 295     */
 296    assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
 297
 298    /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
 299    TCGv_i32 aspen, fpca;
 300    aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
 301    fpca = load_cpu_field(v7m.control[M_REG_S]);
 302    tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 303    tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
 304    tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
 305    tcg_gen_or_i32(fpca, fpca, aspen);
 306    tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
 307    tcg_temp_free_i32(aspen);
 308    tcg_temp_free_i32(fpca);
 309}
 310
 311static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
 312                                  fp_sysreg_loadfn *loadfn,
 313                                  void *opaque)
 314{
 315    /* Do a write to an M-profile floating point system register */
 316    TCGv_i32 tmp;
 317    TCGLabel *lab_end = NULL;
 318
 319    switch (fp_sysreg_checks(s, regno)) {
 320    case FPSysRegCheckFailed:
 321        return false;
 322    case FPSysRegCheckDone:
 323        return true;
 324    case FPSysRegCheckContinue:
 325        break;
 326    }
 327
 328    switch (regno) {
 329    case ARM_VFP_FPSCR:
 330        tmp = loadfn(s, opaque, true);
 331        gen_helper_vfp_set_fpscr(cpu_env, tmp);
 332        tcg_temp_free_i32(tmp);
 333        gen_lookup_tb(s);
 334        break;
 335    case ARM_VFP_FPSCR_NZCVQC:
 336    {
 337        TCGv_i32 fpscr;
 338        tmp = loadfn(s, opaque, true);
 339        if (dc_isar_feature(aa32_mve, s)) {
 340            /* QC is only present for MVE; otherwise RES0 */
 341            TCGv_i32 qc = tcg_temp_new_i32();
 342            tcg_gen_andi_i32(qc, tmp, FPCR_QC);
 343            /*
 344             * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
 345             * here writing the same value into all elements is simplest.
 346             */
 347            tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
 348                                 16, 16, qc);
 349        }
 350        tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
 351        fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
 352        tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
 353        tcg_gen_or_i32(fpscr, fpscr, tmp);
 354        store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
 355        tcg_temp_free_i32(tmp);
 356        break;
 357    }
 358    case ARM_VFP_FPCXT_NS:
 359    {
 360        TCGLabel *lab_active = gen_new_label();
 361
 362        lab_end = gen_new_label();
 363        gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
 364        /*
 365         * fpInactive case: write is a NOP, so only do side effects
 366         * like register writeback before we branch to end
 367         */
 368        loadfn(s, opaque, false);
 369        tcg_gen_br(lab_end);
 370
 371        gen_set_label(lab_active);
 372        /*
 373         * !fpInactive: if FPU disabled, take NOCP exception;
 374         * otherwise PreserveFPState(), and then FPCXT_NS writes
 375         * behave the same as FPCXT_S writes.
 376         */
 377        if (!vfp_access_check_m(s, true)) {
 378            /*
 379             * This was only a conditional exception, so override
 380             * gen_exception_insn()'s default to DISAS_NORETURN
 381             */
 382            s->base.is_jmp = DISAS_NEXT;
 383            break;
 384        }
 385    }
 386    /* fall through */
 387    case ARM_VFP_FPCXT_S:
 388    {
 389        TCGv_i32 sfpa, control;
 390        /*
 391         * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
 392         * bits [27:0] from value and zeroes bits [31:28].
 393         */
 394        tmp = loadfn(s, opaque, true);
 395        sfpa = tcg_temp_new_i32();
 396        tcg_gen_shri_i32(sfpa, tmp, 31);
 397        control = load_cpu_field(v7m.control[M_REG_S]);
 398        tcg_gen_deposit_i32(control, control, sfpa,
 399                            R_V7M_CONTROL_SFPA_SHIFT, 1);
 400        store_cpu_field(control, v7m.control[M_REG_S]);
 401        tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
 402        gen_helper_vfp_set_fpscr(cpu_env, tmp);
 403        s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
 404        tcg_temp_free_i32(tmp);
 405        tcg_temp_free_i32(sfpa);
 406        break;
 407    }
 408    case ARM_VFP_VPR:
 409        /* Behaves as NOP if not privileged */
 410        if (IS_USER(s)) {
 411            loadfn(s, opaque, false);
 412            break;
 413        }
 414        tmp = loadfn(s, opaque, true);
 415        store_cpu_field(tmp, v7m.vpr);
 416        s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
 417        break;
 418    case ARM_VFP_P0:
 419    {
 420        TCGv_i32 vpr;
 421        tmp = loadfn(s, opaque, true);
 422        vpr = load_cpu_field(v7m.vpr);
 423        tcg_gen_deposit_i32(vpr, vpr, tmp,
 424                            R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
 425        store_cpu_field(vpr, v7m.vpr);
 426        s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
 427        tcg_temp_free_i32(tmp);
 428        break;
 429    }
 430    default:
 431        g_assert_not_reached();
 432    }
 433    if (lab_end) {
 434        gen_set_label(lab_end);
 435    }
 436    return true;
 437}
 438
 439static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
 440                                 fp_sysreg_storefn *storefn,
 441                                 void *opaque)
 442{
 443    /* Do a read from an M-profile floating point system register */
 444    TCGv_i32 tmp;
 445    TCGLabel *lab_end = NULL;
 446    bool lookup_tb = false;
 447
 448    switch (fp_sysreg_checks(s, regno)) {
 449    case FPSysRegCheckFailed:
 450        return false;
 451    case FPSysRegCheckDone:
 452        return true;
 453    case FPSysRegCheckContinue:
 454        break;
 455    }
 456
 457    if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
 458        /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
 459        regno = QEMU_VFP_FPSCR_NZCV;
 460    }
 461
 462    switch (regno) {
 463    case ARM_VFP_FPSCR:
 464        tmp = tcg_temp_new_i32();
 465        gen_helper_vfp_get_fpscr(tmp, cpu_env);
 466        storefn(s, opaque, tmp, true);
 467        break;
 468    case ARM_VFP_FPSCR_NZCVQC:
 469        tmp = tcg_temp_new_i32();
 470        gen_helper_vfp_get_fpscr(tmp, cpu_env);
 471        tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
 472        storefn(s, opaque, tmp, true);
 473        break;
 474    case QEMU_VFP_FPSCR_NZCV:
 475        /*
 476         * Read just NZCV; this is a special case to avoid the
 477         * helper call for the "VMRS to CPSR.NZCV" insn.
 478         */
 479        tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
 480        tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
 481        storefn(s, opaque, tmp, true);
 482        break;
 483    case ARM_VFP_FPCXT_S:
 484    {
 485        TCGv_i32 control, sfpa, fpscr;
 486        /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
 487        tmp = tcg_temp_new_i32();
 488        sfpa = tcg_temp_new_i32();
 489        gen_helper_vfp_get_fpscr(tmp, cpu_env);
 490        tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
 491        control = load_cpu_field(v7m.control[M_REG_S]);
 492        tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
 493        tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
 494        tcg_gen_or_i32(tmp, tmp, sfpa);
 495        tcg_temp_free_i32(sfpa);
 496        /*
 497         * Store result before updating FPSCR etc, in case
 498         * it is a memory write which causes an exception.
 499         */
 500        storefn(s, opaque, tmp, true);
 501        /*
 502         * Now we must reset FPSCR from FPDSCR_NS, and clear
 503         * CONTROL.SFPA; so we'll end the TB here.
 504         */
 505        tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
 506        store_cpu_field(control, v7m.control[M_REG_S]);
 507        fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
 508        gen_helper_vfp_set_fpscr(cpu_env, fpscr);
 509        tcg_temp_free_i32(fpscr);
 510        lookup_tb = true;
 511        break;
 512    }
 513    case ARM_VFP_FPCXT_NS:
 514    {
 515        TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
 516        TCGLabel *lab_active = gen_new_label();
 517
 518        lookup_tb = true;
 519
 520        gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
 521        /* fpInactive case: reads as FPDSCR_NS */
 522        TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
 523        storefn(s, opaque, tmp, true);
 524        lab_end = gen_new_label();
 525        tcg_gen_br(lab_end);
 526
 527        gen_set_label(lab_active);
 528        /*
 529         * !fpInactive: if FPU disabled, take NOCP exception;
 530         * otherwise PreserveFPState(), and then FPCXT_NS
 531         * reads the same as FPCXT_S.
 532         */
 533        if (!vfp_access_check_m(s, true)) {
 534            /*
 535             * This was only a conditional exception, so override
 536             * gen_exception_insn()'s default to DISAS_NORETURN
 537             */
 538            s->base.is_jmp = DISAS_NEXT;
 539            break;
 540        }
 541        tmp = tcg_temp_new_i32();
 542        sfpa = tcg_temp_new_i32();
 543        fpscr = tcg_temp_new_i32();
 544        gen_helper_vfp_get_fpscr(fpscr, cpu_env);
 545        tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
 546        control = load_cpu_field(v7m.control[M_REG_S]);
 547        tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
 548        tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
 549        tcg_gen_or_i32(tmp, tmp, sfpa);
 550        tcg_temp_free_i32(control);
 551        /* Store result before updating FPSCR, in case it faults */
 552        storefn(s, opaque, tmp, true);
 553        /* If SFPA is zero then set FPSCR from FPDSCR_NS */
 554        fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
 555        zero = tcg_const_i32(0);
 556        tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
 557        gen_helper_vfp_set_fpscr(cpu_env, fpscr);
 558        tcg_temp_free_i32(zero);
 559        tcg_temp_free_i32(sfpa);
 560        tcg_temp_free_i32(fpdscr);
 561        tcg_temp_free_i32(fpscr);
 562        break;
 563    }
 564    case ARM_VFP_VPR:
 565        /* Behaves as NOP if not privileged */
 566        if (IS_USER(s)) {
 567            storefn(s, opaque, NULL, false);
 568            break;
 569        }
 570        tmp = load_cpu_field(v7m.vpr);
 571        storefn(s, opaque, tmp, true);
 572        break;
 573    case ARM_VFP_P0:
 574        tmp = load_cpu_field(v7m.vpr);
 575        tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
 576        storefn(s, opaque, tmp, true);
 577        break;
 578    default:
 579        g_assert_not_reached();
 580    }
 581
 582    if (lab_end) {
 583        gen_set_label(lab_end);
 584    }
 585    if (lookup_tb) {
 586        gen_lookup_tb(s);
 587    }
 588    return true;
 589}
 590
 591static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
 592                             bool do_access)
 593{
 594    arg_VMSR_VMRS *a = opaque;
 595
 596    if (!do_access) {
 597        return;
 598    }
 599
 600    if (a->rt == 15) {
 601        /* Set the 4 flag bits in the CPSR */
 602        gen_set_nzcv(value);
 603        tcg_temp_free_i32(value);
 604    } else {
 605        store_reg(s, a->rt, value);
 606    }
 607}
 608
 609static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
 610{
 611    arg_VMSR_VMRS *a = opaque;
 612
 613    if (!do_access) {
 614        return NULL;
 615    }
 616    return load_reg(s, a->rt);
 617}
 618
 619static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
 620{
 621    /*
 622     * Accesses to R15 are UNPREDICTABLE; we choose to undef.
 623     * FPSCR -> r15 is a special case which writes to the PSR flags;
 624     * set a->reg to a special value to tell gen_M_fp_sysreg_read()
 625     * we only care about the top 4 bits of FPSCR there.
 626     */
 627    if (a->rt == 15) {
 628        if (a->l && a->reg == ARM_VFP_FPSCR) {
 629            a->reg = QEMU_VFP_FPSCR_NZCV;
 630        } else {
 631            return false;
 632        }
 633    }
 634
 635    if (a->l) {
 636        /* VMRS, move FP system register to gp register */
 637        return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
 638    } else {
 639        /* VMSR, move gp register to FP system register */
 640        return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
 641    }
 642}
 643
 644static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
 645                                bool do_access)
 646{
 647    arg_vldr_sysreg *a = opaque;
 648    uint32_t offset = a->imm;
 649    TCGv_i32 addr;
 650
 651    if (!a->a) {
 652        offset = -offset;
 653    }
 654
 655    if (!do_access && !a->w) {
 656        return;
 657    }
 658
 659    addr = load_reg(s, a->rn);
 660    if (a->p) {
 661        tcg_gen_addi_i32(addr, addr, offset);
 662    }
 663
 664    if (s->v8m_stackcheck && a->rn == 13 && a->w) {
 665        gen_helper_v8m_stackcheck(cpu_env, addr);
 666    }
 667
 668    if (do_access) {
 669        gen_aa32_st_i32(s, value, addr, get_mem_index(s),
 670                        MO_UL | MO_ALIGN | s->be_data);
 671        tcg_temp_free_i32(value);
 672    }
 673
 674    if (a->w) {
 675        /* writeback */
 676        if (!a->p) {
 677            tcg_gen_addi_i32(addr, addr, offset);
 678        }
 679        store_reg(s, a->rn, addr);
 680    } else {
 681        tcg_temp_free_i32(addr);
 682    }
 683}
 684
 685static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
 686                                    bool do_access)
 687{
 688    arg_vldr_sysreg *a = opaque;
 689    uint32_t offset = a->imm;
 690    TCGv_i32 addr;
 691    TCGv_i32 value = NULL;
 692
 693    if (!a->a) {
 694        offset = -offset;
 695    }
 696
 697    if (!do_access && !a->w) {
 698        return NULL;
 699    }
 700
 701    addr = load_reg(s, a->rn);
 702    if (a->p) {
 703        tcg_gen_addi_i32(addr, addr, offset);
 704    }
 705
 706    if (s->v8m_stackcheck && a->rn == 13 && a->w) {
 707        gen_helper_v8m_stackcheck(cpu_env, addr);
 708    }
 709
 710    if (do_access) {
 711        value = tcg_temp_new_i32();
 712        gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
 713                        MO_UL | MO_ALIGN | s->be_data);
 714    }
 715
 716    if (a->w) {
 717        /* writeback */
 718        if (!a->p) {
 719            tcg_gen_addi_i32(addr, addr, offset);
 720        }
 721        store_reg(s, a->rn, addr);
 722    } else {
 723        tcg_temp_free_i32(addr);
 724    }
 725    return value;
 726}
 727
 728static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
 729{
 730    if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 731        return false;
 732    }
 733    if (a->rn == 15) {
 734        return false;
 735    }
 736    return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
 737}
 738
 739static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
 740{
 741    if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 742        return false;
 743    }
 744    if (a->rn == 15) {
 745        return false;
 746    }
 747    return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
 748}
 749
 750static bool trans_NOCP(DisasContext *s, arg_nocp *a)
 751{
 752    /*
 753     * Handle M-profile early check for disabled coprocessor:
 754     * all we need to do here is emit the NOCP exception if
 755     * the coprocessor is disabled. Otherwise we return false
 756     * and the real VFP/etc decode will handle the insn.
 757     */
 758    assert(arm_dc_feature(s, ARM_FEATURE_M));
 759
 760    if (a->cp == 11) {
 761        a->cp = 10;
 762    }
 763    if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
 764        (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
 765        /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
 766        a->cp = 10;
 767    }
 768
 769    if (a->cp != 10) {
 770        gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
 771                           syn_uncategorized(), default_exception_el(s));
 772        return true;
 773    }
 774
 775    if (s->fp_excp_el != 0) {
 776        gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
 777                           syn_uncategorized(), s->fp_excp_el);
 778        return true;
 779    }
 780
 781    return false;
 782}
 783
 784static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
 785{
 786    /* This range needs a coprocessor check for v8.1M and later only */
 787    if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
 788        return false;
 789    }
 790    return trans_NOCP(s, a);
 791}
 792