qemu/target/arm/translate-vfp.c
<<
>>
Prefs
   1/*
   2 *  ARM translation: AArch32 VFP instructions
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *  Copyright (c) 2005-2007 CodeSourcery
   6 *  Copyright (c) 2007 OpenedHand, Ltd.
   7 *  Copyright (c) 2019 Linaro, Ltd.
   8 *
   9 * This library is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU Lesser General Public
  11 * License as published by the Free Software Foundation; either
  12 * version 2.1 of the License, or (at your option) any later version.
  13 *
  14 * This library is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  17 * Lesser General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU Lesser General Public
  20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  21 */
  22
  23#include "qemu/osdep.h"
  24#include "tcg/tcg-op.h"
  25#include "tcg/tcg-op-gvec.h"
  26#include "exec/exec-all.h"
  27#include "exec/gen-icount.h"
  28#include "translate.h"
  29#include "translate-a32.h"
  30
  31/* Include the generated VFP decoder */
  32#include "decode-vfp.c.inc"
  33#include "decode-vfp-uncond.c.inc"
  34
  35static inline void vfp_load_reg64(TCGv_i64 var, int reg)
  36{
  37    tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
  38}
  39
  40static inline void vfp_store_reg64(TCGv_i64 var, int reg)
  41{
  42    tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
  43}
  44
  45static inline void vfp_load_reg32(TCGv_i32 var, int reg)
  46{
  47    tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
  48}
  49
  50static inline void vfp_store_reg32(TCGv_i32 var, int reg)
  51{
  52    tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
  53}
  54
  55/*
  56 * The imm8 encodes the sign bit, enough bits to represent an exponent in
  57 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
  58 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
  59 */
  60uint64_t vfp_expand_imm(int size, uint8_t imm8)
  61{
  62    uint64_t imm;
  63
  64    switch (size) {
  65    case MO_64:
  66        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
  67            (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
  68            extract32(imm8, 0, 6);
  69        imm <<= 48;
  70        break;
  71    case MO_32:
  72        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
  73            (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
  74            (extract32(imm8, 0, 6) << 3);
  75        imm <<= 16;
  76        break;
  77    case MO_16:
  78        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
  79            (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
  80            (extract32(imm8, 0, 6) << 6);
  81        break;
  82    default:
  83        g_assert_not_reached();
  84    }
  85    return imm;
  86}
  87
  88/*
  89 * Return the offset of a 16-bit half of the specified VFP single-precision
  90 * register. If top is true, returns the top 16 bits; otherwise the bottom
  91 * 16 bits.
  92 */
  93static inline long vfp_f16_offset(unsigned reg, bool top)
  94{
  95    long offs = vfp_reg_offset(false, reg);
  96#if HOST_BIG_ENDIAN
  97    if (!top) {
  98        offs += 2;
  99    }
 100#else
 101    if (top) {
 102        offs += 2;
 103    }
 104#endif
 105    return offs;
 106}
 107
 108/*
 109 * Generate code for M-profile lazy FP state preservation if needed;
 110 * this corresponds to the pseudocode PreserveFPState() function.
 111 */
 112static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update)
 113{
 114    if (s->v7m_lspact) {
 115        /*
 116         * Lazy state saving affects external memory and also the NVIC,
 117         * so we must mark it as an IO operation for icount (and cause
 118         * this to be the last insn in the TB).
 119         */
 120        if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
 121            s->base.is_jmp = DISAS_UPDATE_EXIT;
 122            gen_io_start();
 123        }
 124        gen_helper_v7m_preserve_fp_state(cpu_env);
 125        /*
 126         * If the preserve_fp_state helper doesn't throw an exception
 127         * then it will clear LSPACT; we don't need to repeat this for
 128         * any further FP insns in this TB.
 129         */
 130        s->v7m_lspact = false;
 131        /*
 132         * The helper might have zeroed VPR, so we do not know the
 133         * correct value for the MVE_NO_PRED TB flag any more.
 134         * If we're about to create a new fp context then that
 135         * will precisely determine the MVE_NO_PRED value (see
 136         * gen_update_fp_context()). Otherwise, we must:
 137         *  - set s->mve_no_pred to false, so this instruction
 138         *    is generated to use helper functions
 139         *  - end the TB now, without chaining to the next TB
 140         */
 141        if (skip_context_update || !s->v7m_new_fp_ctxt_needed) {
 142            s->mve_no_pred = false;
 143            s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
 144        }
 145    }
 146}
 147
 148/*
 149 * Generate code for M-profile FP context handling: update the
 150 * ownership of the FP context, and create a new context if
 151 * necessary. This corresponds to the parts of the pseudocode
 152 * ExecuteFPCheck() after the inital PreserveFPState() call.
 153 */
 154static void gen_update_fp_context(DisasContext *s)
 155{
 156    /* Update ownership of FP context: set FPCCR.S to match current state */
 157    if (s->v8m_fpccr_s_wrong) {
 158        TCGv_i32 tmp;
 159
 160        tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
 161        if (s->v8m_secure) {
 162            tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
 163        } else {
 164            tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
 165        }
 166        store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
 167        /* Don't need to do this for any further FP insns in this TB */
 168        s->v8m_fpccr_s_wrong = false;
 169    }
 170
 171    if (s->v7m_new_fp_ctxt_needed) {
 172        /*
 173         * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
 174         * the FPSCR, and VPR.
 175         */
 176        TCGv_i32 control, fpscr;
 177        uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
 178
 179        fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
 180        gen_helper_vfp_set_fpscr(cpu_env, fpscr);
 181        tcg_temp_free_i32(fpscr);
 182        if (dc_isar_feature(aa32_mve, s)) {
 183            store_cpu_field(tcg_constant_i32(0), v7m.vpr);
 184        }
 185        /*
 186         * We just updated the FPSCR and VPR. Some of this state is cached
 187         * in the MVE_NO_PRED TB flag. We want to avoid having to end the
 188         * TB here, which means we need the new value of the MVE_NO_PRED
 189         * flag to be exactly known here and the same for all executions.
 190         * Luckily FPDSCR.LTPSIZE is always constant 4 and the VPR is
 191         * always set to 0, so the new MVE_NO_PRED flag is always 1
 192         * if and only if we have MVE.
 193         *
 194         * (The other FPSCR state cached in TB flags is VECLEN and VECSTRIDE,
 195         * but those do not exist for M-profile, so are not relevant here.)
 196         */
 197        s->mve_no_pred = dc_isar_feature(aa32_mve, s);
 198
 199        if (s->v8m_secure) {
 200            bits |= R_V7M_CONTROL_SFPA_MASK;
 201        }
 202        control = load_cpu_field(v7m.control[M_REG_S]);
 203        tcg_gen_ori_i32(control, control, bits);
 204        store_cpu_field(control, v7m.control[M_REG_S]);
 205        /* Don't need to do this for any further FP insns in this TB */
 206        s->v7m_new_fp_ctxt_needed = false;
 207    }
 208}
 209
 210/*
 211 * Check that VFP access is enabled, A-profile specific version.
 212 *
 213 * If VFP is enabled, return true. If not, emit code to generate an
 214 * appropriate exception and return false.
 215 * The ignore_vfp_enabled argument specifies that we should ignore
 216 * whether VFP is enabled via FPEXC.EN: this should be true for FMXR/FMRX
 217 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
 218 */
 219static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
 220{
 221    if (s->fp_excp_el) {
 222        /*
 223         * The full syndrome is only used for HSR when HCPTR traps:
 224         * For v8, when TA==0, coproc is RES0.
 225         * For v7, any use of a Floating-point instruction or access
 226         * to a Floating-point Extension register that is trapped to
 227         * Hyp mode because of a trap configured in the HCPTR sets
 228         * this field to 0xA.
 229         */
 230        int coproc = arm_dc_feature(s, ARM_FEATURE_V8) ? 0 : 0xa;
 231        uint32_t syn = syn_fp_access_trap(1, 0xe, false, coproc);
 232
 233        gen_exception_insn_el(s, 0, EXCP_UDEF, syn, s->fp_excp_el);
 234        return false;
 235    }
 236
 237    /*
 238     * Note that rebuild_hflags_a32 has already accounted for being in EL0
 239     * and the higher EL in A64 mode, etc.  Unlike A64 mode, there do not
 240     * appear to be any insns which touch VFP which are allowed.
 241     */
 242    if (s->sme_trap_nonstreaming) {
 243        gen_exception_insn(s, 0, EXCP_UDEF,
 244                           syn_smetrap(SME_ET_Streaming,
 245                                       curr_insn_len(s) == 2));
 246        return false;
 247    }
 248
 249    if (!s->vfp_enabled && !ignore_vfp_enabled) {
 250        assert(!arm_dc_feature(s, ARM_FEATURE_M));
 251        unallocated_encoding(s);
 252        return false;
 253    }
 254    return true;
 255}
 256
 257/*
 258 * Check that VFP access is enabled, M-profile specific version.
 259 *
 260 * If VFP is enabled, do the necessary M-profile lazy-FP handling and then
 261 * return true. If not, emit code to generate an appropriate exception and
 262 * return false.
 263 * skip_context_update is true to skip the "update FP context" part of this.
 264 */
 265bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
 266{
 267    if (s->fp_excp_el) {
 268        /*
 269         * M-profile mostly catches the "FPU disabled" case early, in
 270         * disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
 271         * which do coprocessor-checks are outside the large ranges of
 272         * the encoding space handled by the patterns in m-nocp.decode,
 273         * and for them we may need to raise NOCP here.
 274         */
 275        gen_exception_insn_el(s, 0, EXCP_NOCP,
 276                              syn_uncategorized(), s->fp_excp_el);
 277        return false;
 278    }
 279
 280    /* Handle M-profile lazy FP state mechanics */
 281
 282    /* Trigger lazy-state preservation if necessary */
 283    gen_preserve_fp_state(s, skip_context_update);
 284
 285    if (!skip_context_update) {
 286        /* Update ownership of FP context and create new FP context if needed */
 287        gen_update_fp_context(s);
 288    }
 289
 290    return true;
 291}
 292
 293/*
 294 * The most usual kind of VFP access check, for everything except
 295 * FMXR/FMRX to the always-available special registers.
 296 */
 297bool vfp_access_check(DisasContext *s)
 298{
 299    if (arm_dc_feature(s, ARM_FEATURE_M)) {
 300        return vfp_access_check_m(s, false);
 301    } else {
 302        return vfp_access_check_a(s, false);
 303    }
 304}
 305
 306static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
 307{
 308    uint32_t rd, rn, rm;
 309    int sz = a->sz;
 310
 311    if (!dc_isar_feature(aa32_vsel, s)) {
 312        return false;
 313    }
 314
 315    if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
 316        return false;
 317    }
 318
 319    if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
 320        return false;
 321    }
 322
 323    /* UNDEF accesses to D16-D31 if they don't exist */
 324    if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
 325        ((a->vm | a->vn | a->vd) & 0x10)) {
 326        return false;
 327    }
 328
 329    rd = a->vd;
 330    rn = a->vn;
 331    rm = a->vm;
 332
 333    if (!vfp_access_check(s)) {
 334        return true;
 335    }
 336
 337    if (sz == 3) {
 338        TCGv_i64 frn, frm, dest;
 339        TCGv_i64 tmp, zero, zf, nf, vf;
 340
 341        zero = tcg_constant_i64(0);
 342
 343        frn = tcg_temp_new_i64();
 344        frm = tcg_temp_new_i64();
 345        dest = tcg_temp_new_i64();
 346
 347        zf = tcg_temp_new_i64();
 348        nf = tcg_temp_new_i64();
 349        vf = tcg_temp_new_i64();
 350
 351        tcg_gen_extu_i32_i64(zf, cpu_ZF);
 352        tcg_gen_ext_i32_i64(nf, cpu_NF);
 353        tcg_gen_ext_i32_i64(vf, cpu_VF);
 354
 355        vfp_load_reg64(frn, rn);
 356        vfp_load_reg64(frm, rm);
 357        switch (a->cc) {
 358        case 0: /* eq: Z */
 359            tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero, frn, frm);
 360            break;
 361        case 1: /* vs: V */
 362            tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero, frn, frm);
 363            break;
 364        case 2: /* ge: N == V -> N ^ V == 0 */
 365            tmp = tcg_temp_new_i64();
 366            tcg_gen_xor_i64(tmp, vf, nf);
 367            tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, frn, frm);
 368            tcg_temp_free_i64(tmp);
 369            break;
 370        case 3: /* gt: !Z && N == V */
 371            tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, frn, frm);
 372            tmp = tcg_temp_new_i64();
 373            tcg_gen_xor_i64(tmp, vf, nf);
 374            tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, dest, frm);
 375            tcg_temp_free_i64(tmp);
 376            break;
 377        }
 378        vfp_store_reg64(dest, rd);
 379        tcg_temp_free_i64(frn);
 380        tcg_temp_free_i64(frm);
 381        tcg_temp_free_i64(dest);
 382
 383        tcg_temp_free_i64(zf);
 384        tcg_temp_free_i64(nf);
 385        tcg_temp_free_i64(vf);
 386    } else {
 387        TCGv_i32 frn, frm, dest;
 388        TCGv_i32 tmp, zero;
 389
 390        zero = tcg_constant_i32(0);
 391
 392        frn = tcg_temp_new_i32();
 393        frm = tcg_temp_new_i32();
 394        dest = tcg_temp_new_i32();
 395        vfp_load_reg32(frn, rn);
 396        vfp_load_reg32(frm, rm);
 397        switch (a->cc) {
 398        case 0: /* eq: Z */
 399            tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero, frn, frm);
 400            break;
 401        case 1: /* vs: V */
 402            tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero, frn, frm);
 403            break;
 404        case 2: /* ge: N == V -> N ^ V == 0 */
 405            tmp = tcg_temp_new_i32();
 406            tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
 407            tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, frn, frm);
 408            tcg_temp_free_i32(tmp);
 409            break;
 410        case 3: /* gt: !Z && N == V */
 411            tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, frn, frm);
 412            tmp = tcg_temp_new_i32();
 413            tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
 414            tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, dest, frm);
 415            tcg_temp_free_i32(tmp);
 416            break;
 417        }
 418        /* For fp16 the top half is always zeroes */
 419        if (sz == 1) {
 420            tcg_gen_andi_i32(dest, dest, 0xffff);
 421        }
 422        vfp_store_reg32(dest, rd);
 423        tcg_temp_free_i32(frn);
 424        tcg_temp_free_i32(frm);
 425        tcg_temp_free_i32(dest);
 426    }
 427
 428    return true;
 429}
 430
 431/*
 432 * Table for converting the most common AArch32 encoding of
 433 * rounding mode to arm_fprounding order (which matches the
 434 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
 435 */
 436static const uint8_t fp_decode_rm[] = {
 437    FPROUNDING_TIEAWAY,
 438    FPROUNDING_TIEEVEN,
 439    FPROUNDING_POSINF,
 440    FPROUNDING_NEGINF,
 441};
 442
 443static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
 444{
 445    uint32_t rd, rm;
 446    int sz = a->sz;
 447    TCGv_ptr fpst;
 448    TCGv_i32 tcg_rmode;
 449    int rounding = fp_decode_rm[a->rm];
 450
 451    if (!dc_isar_feature(aa32_vrint, s)) {
 452        return false;
 453    }
 454
 455    if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
 456        return false;
 457    }
 458
 459    if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
 460        return false;
 461    }
 462
 463    /* UNDEF accesses to D16-D31 if they don't exist */
 464    if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
 465        ((a->vm | a->vd) & 0x10)) {
 466        return false;
 467    }
 468
 469    rd = a->vd;
 470    rm = a->vm;
 471
 472    if (!vfp_access_check(s)) {
 473        return true;
 474    }
 475
 476    if (sz == 1) {
 477        fpst = fpstatus_ptr(FPST_FPCR_F16);
 478    } else {
 479        fpst = fpstatus_ptr(FPST_FPCR);
 480    }
 481
 482    tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
 483    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
 484
 485    if (sz == 3) {
 486        TCGv_i64 tcg_op;
 487        TCGv_i64 tcg_res;
 488        tcg_op = tcg_temp_new_i64();
 489        tcg_res = tcg_temp_new_i64();
 490        vfp_load_reg64(tcg_op, rm);
 491        gen_helper_rintd(tcg_res, tcg_op, fpst);
 492        vfp_store_reg64(tcg_res, rd);
 493        tcg_temp_free_i64(tcg_op);
 494        tcg_temp_free_i64(tcg_res);
 495    } else {
 496        TCGv_i32 tcg_op;
 497        TCGv_i32 tcg_res;
 498        tcg_op = tcg_temp_new_i32();
 499        tcg_res = tcg_temp_new_i32();
 500        vfp_load_reg32(tcg_op, rm);
 501        if (sz == 1) {
 502            gen_helper_rinth(tcg_res, tcg_op, fpst);
 503        } else {
 504            gen_helper_rints(tcg_res, tcg_op, fpst);
 505        }
 506        vfp_store_reg32(tcg_res, rd);
 507        tcg_temp_free_i32(tcg_op);
 508        tcg_temp_free_i32(tcg_res);
 509    }
 510
 511    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
 512    tcg_temp_free_i32(tcg_rmode);
 513
 514    tcg_temp_free_ptr(fpst);
 515    return true;
 516}
 517
 518static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
 519{
 520    uint32_t rd, rm;
 521    int sz = a->sz;
 522    TCGv_ptr fpst;
 523    TCGv_i32 tcg_rmode, tcg_shift;
 524    int rounding = fp_decode_rm[a->rm];
 525    bool is_signed = a->op;
 526
 527    if (!dc_isar_feature(aa32_vcvt_dr, s)) {
 528        return false;
 529    }
 530
 531    if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
 532        return false;
 533    }
 534
 535    if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
 536        return false;
 537    }
 538
 539    /* UNDEF accesses to D16-D31 if they don't exist */
 540    if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
 541        return false;
 542    }
 543
 544    rd = a->vd;
 545    rm = a->vm;
 546
 547    if (!vfp_access_check(s)) {
 548        return true;
 549    }
 550
 551    if (sz == 1) {
 552        fpst = fpstatus_ptr(FPST_FPCR_F16);
 553    } else {
 554        fpst = fpstatus_ptr(FPST_FPCR);
 555    }
 556
 557    tcg_shift = tcg_constant_i32(0);
 558
 559    tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
 560    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
 561
 562    if (sz == 3) {
 563        TCGv_i64 tcg_double, tcg_res;
 564        TCGv_i32 tcg_tmp;
 565        tcg_double = tcg_temp_new_i64();
 566        tcg_res = tcg_temp_new_i64();
 567        tcg_tmp = tcg_temp_new_i32();
 568        vfp_load_reg64(tcg_double, rm);
 569        if (is_signed) {
 570            gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
 571        } else {
 572            gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
 573        }
 574        tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
 575        vfp_store_reg32(tcg_tmp, rd);
 576        tcg_temp_free_i32(tcg_tmp);
 577        tcg_temp_free_i64(tcg_res);
 578        tcg_temp_free_i64(tcg_double);
 579    } else {
 580        TCGv_i32 tcg_single, tcg_res;
 581        tcg_single = tcg_temp_new_i32();
 582        tcg_res = tcg_temp_new_i32();
 583        vfp_load_reg32(tcg_single, rm);
 584        if (sz == 1) {
 585            if (is_signed) {
 586                gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
 587            } else {
 588                gen_helper_vfp_toulh(tcg_res, tcg_single, tcg_shift, fpst);
 589            }
 590        } else {
 591            if (is_signed) {
 592                gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
 593            } else {
 594                gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
 595            }
 596        }
 597        vfp_store_reg32(tcg_res, rd);
 598        tcg_temp_free_i32(tcg_res);
 599        tcg_temp_free_i32(tcg_single);
 600    }
 601
 602    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
 603    tcg_temp_free_i32(tcg_rmode);
 604
 605    tcg_temp_free_ptr(fpst);
 606
 607    return true;
 608}
 609
 610bool mve_skip_vmov(DisasContext *s, int vn, int index, int size)
 611{
 612    /*
 613     * In a CPU with MVE, the VMOV (vector lane to general-purpose register)
 614     * and VMOV (general-purpose register to vector lane) insns are not
 615     * predicated, but they are subject to beatwise execution if they are
 616     * not in an IT block.
 617     *
 618     * Since our implementation always executes all 4 beats in one tick,
 619     * this means only that if PSR.ECI says we should not be executing
 620     * the beat corresponding to the lane of the vector register being
 621     * accessed then we should skip performing the move, and that we need
 622     * to do the usual check for bad ECI state and advance of ECI state.
 623     *
 624     * Note that if PSR.ECI is non-zero then we cannot be in an IT block.
 625     *
 626     * Return true if this VMOV scalar <-> gpreg should be skipped because
 627     * the MVE PSR.ECI state says we skip the beat where the store happens.
 628     */
 629
 630    /* Calculate the byte offset into Qn which we're going to access */
 631    int ofs = (index << size) + ((vn & 1) * 8);
 632
 633    if (!dc_isar_feature(aa32_mve, s)) {
 634        return false;
 635    }
 636
 637    switch (s->eci) {
 638    case ECI_NONE:
 639        return false;
 640    case ECI_A0:
 641        return ofs < 4;
 642    case ECI_A0A1:
 643        return ofs < 8;
 644    case ECI_A0A1A2:
 645    case ECI_A0A1A2B0:
 646        return ofs < 12;
 647    default:
 648        g_assert_not_reached();
 649    }
 650}
 651
 652static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
 653{
 654    /* VMOV scalar to general purpose register */
 655    TCGv_i32 tmp;
 656
 657    /*
 658     * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
 659     * all sizes, whether the CPU has fp or not.
 660     */
 661    if (!dc_isar_feature(aa32_mve, s)) {
 662        if (a->size == MO_32
 663            ? !dc_isar_feature(aa32_fpsp_v2, s)
 664            : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
 665            return false;
 666        }
 667    }
 668
 669    /* UNDEF accesses to D16-D31 if they don't exist */
 670    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
 671        return false;
 672    }
 673
 674    if (dc_isar_feature(aa32_mve, s)) {
 675        if (!mve_eci_check(s)) {
 676            return true;
 677        }
 678    }
 679
 680    if (!vfp_access_check(s)) {
 681        return true;
 682    }
 683
 684    if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
 685        tmp = tcg_temp_new_i32();
 686        read_neon_element32(tmp, a->vn, a->index,
 687                            a->size | (a->u ? 0 : MO_SIGN));
 688        store_reg(s, a->rt, tmp);
 689    }
 690
 691    if (dc_isar_feature(aa32_mve, s)) {
 692        mve_update_and_store_eci(s);
 693    }
 694    return true;
 695}
 696
 697static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
 698{
 699    /* VMOV general purpose register to scalar */
 700    TCGv_i32 tmp;
 701
 702    /*
 703     * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
 704     * all sizes, whether the CPU has fp or not.
 705     */
 706    if (!dc_isar_feature(aa32_mve, s)) {
 707        if (a->size == MO_32
 708            ? !dc_isar_feature(aa32_fpsp_v2, s)
 709            : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
 710            return false;
 711        }
 712    }
 713
 714    /* UNDEF accesses to D16-D31 if they don't exist */
 715    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
 716        return false;
 717    }
 718
 719    if (dc_isar_feature(aa32_mve, s)) {
 720        if (!mve_eci_check(s)) {
 721            return true;
 722        }
 723    }
 724
 725    if (!vfp_access_check(s)) {
 726        return true;
 727    }
 728
 729    if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
 730        tmp = load_reg(s, a->rt);
 731        write_neon_element32(tmp, a->vn, a->index, a->size);
 732        tcg_temp_free_i32(tmp);
 733    }
 734
 735    if (dc_isar_feature(aa32_mve, s)) {
 736        mve_update_and_store_eci(s);
 737    }
 738    return true;
 739}
 740
 741static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
 742{
 743    /* VDUP (general purpose register) */
 744    TCGv_i32 tmp;
 745    int size, vec_size;
 746
 747    if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
 748        return false;
 749    }
 750
 751    /* UNDEF accesses to D16-D31 if they don't exist */
 752    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
 753        return false;
 754    }
 755
 756    if (a->b && a->e) {
 757        return false;
 758    }
 759
 760    if (a->q && (a->vn & 1)) {
 761        return false;
 762    }
 763
 764    vec_size = a->q ? 16 : 8;
 765    if (a->b) {
 766        size = 0;
 767    } else if (a->e) {
 768        size = 1;
 769    } else {
 770        size = 2;
 771    }
 772
 773    if (!vfp_access_check(s)) {
 774        return true;
 775    }
 776
 777    tmp = load_reg(s, a->rt);
 778    tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
 779                         vec_size, vec_size, tmp);
 780    tcg_temp_free_i32(tmp);
 781
 782    return true;
 783}
 784
 785static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
 786{
 787    TCGv_i32 tmp;
 788    bool ignore_vfp_enabled = false;
 789
 790    if (arm_dc_feature(s, ARM_FEATURE_M)) {
 791        /* M profile version was already handled in m-nocp.decode */
 792        return false;
 793    }
 794
 795    if (!dc_isar_feature(aa32_fpsp_v2, s)) {
 796        return false;
 797    }
 798
 799    switch (a->reg) {
 800    case ARM_VFP_FPSID:
 801        /*
 802         * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
 803         * all ID registers to privileged access only.
 804         */
 805        if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) {
 806            return false;
 807        }
 808        ignore_vfp_enabled = true;
 809        break;
 810    case ARM_VFP_MVFR0:
 811    case ARM_VFP_MVFR1:
 812        if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
 813            return false;
 814        }
 815        ignore_vfp_enabled = true;
 816        break;
 817    case ARM_VFP_MVFR2:
 818        if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
 819            return false;
 820        }
 821        ignore_vfp_enabled = true;
 822        break;
 823    case ARM_VFP_FPSCR:
 824        break;
 825    case ARM_VFP_FPEXC:
 826        if (IS_USER(s)) {
 827            return false;
 828        }
 829        ignore_vfp_enabled = true;
 830        break;
 831    case ARM_VFP_FPINST:
 832    case ARM_VFP_FPINST2:
 833        /* Not present in VFPv3 */
 834        if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) {
 835            return false;
 836        }
 837        break;
 838    default:
 839        return false;
 840    }
 841
 842    /*
 843     * Call vfp_access_check_a() directly, because we need to tell
 844     * it to ignore FPEXC.EN for some register accesses.
 845     */
 846    if (!vfp_access_check_a(s, ignore_vfp_enabled)) {
 847        return true;
 848    }
 849
 850    if (a->l) {
 851        /* VMRS, move VFP special register to gp register */
 852        switch (a->reg) {
 853        case ARM_VFP_MVFR0:
 854        case ARM_VFP_MVFR1:
 855        case ARM_VFP_MVFR2:
 856        case ARM_VFP_FPSID:
 857            if (s->current_el == 1) {
 858                gen_set_condexec(s);
 859                gen_update_pc(s, 0);
 860                gen_helper_check_hcr_el2_trap(cpu_env,
 861                                              tcg_constant_i32(a->rt),
 862                                              tcg_constant_i32(a->reg));
 863            }
 864            /* fall through */
 865        case ARM_VFP_FPEXC:
 866        case ARM_VFP_FPINST:
 867        case ARM_VFP_FPINST2:
 868            tmp = load_cpu_field(vfp.xregs[a->reg]);
 869            break;
 870        case ARM_VFP_FPSCR:
 871            if (a->rt == 15) {
 872                tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
 873                tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
 874            } else {
 875                tmp = tcg_temp_new_i32();
 876                gen_helper_vfp_get_fpscr(tmp, cpu_env);
 877            }
 878            break;
 879        default:
 880            g_assert_not_reached();
 881        }
 882
 883        if (a->rt == 15) {
 884            /* Set the 4 flag bits in the CPSR.  */
 885            gen_set_nzcv(tmp);
 886            tcg_temp_free_i32(tmp);
 887        } else {
 888            store_reg(s, a->rt, tmp);
 889        }
 890    } else {
 891        /* VMSR, move gp register to VFP special register */
 892        switch (a->reg) {
 893        case ARM_VFP_FPSID:
 894        case ARM_VFP_MVFR0:
 895        case ARM_VFP_MVFR1:
 896        case ARM_VFP_MVFR2:
 897            /* Writes are ignored.  */
 898            break;
 899        case ARM_VFP_FPSCR:
 900            tmp = load_reg(s, a->rt);
 901            gen_helper_vfp_set_fpscr(cpu_env, tmp);
 902            tcg_temp_free_i32(tmp);
 903            gen_lookup_tb(s);
 904            break;
 905        case ARM_VFP_FPEXC:
 906            /*
 907             * TODO: VFP subarchitecture support.
 908             * For now, keep the EN bit only
 909             */
 910            tmp = load_reg(s, a->rt);
 911            tcg_gen_andi_i32(tmp, tmp, 1 << 30);
 912            store_cpu_field(tmp, vfp.xregs[a->reg]);
 913            gen_lookup_tb(s);
 914            break;
 915        case ARM_VFP_FPINST:
 916        case ARM_VFP_FPINST2:
 917            tmp = load_reg(s, a->rt);
 918            store_cpu_field(tmp, vfp.xregs[a->reg]);
 919            break;
 920        default:
 921            g_assert_not_reached();
 922        }
 923    }
 924
 925    return true;
 926}
 927
 928
 929static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
 930{
 931    TCGv_i32 tmp;
 932
 933    if (!dc_isar_feature(aa32_fp16_arith, s)) {
 934        return false;
 935    }
 936
 937    if (a->rt == 15) {
 938        /* UNPREDICTABLE; we choose to UNDEF */
 939        return false;
 940    }
 941
 942    if (!vfp_access_check(s)) {
 943        return true;
 944    }
 945
 946    if (a->l) {
 947        /* VFP to general purpose register */
 948        tmp = tcg_temp_new_i32();
 949        vfp_load_reg32(tmp, a->vn);
 950        tcg_gen_andi_i32(tmp, tmp, 0xffff);
 951        store_reg(s, a->rt, tmp);
 952    } else {
 953        /* general purpose register to VFP */
 954        tmp = load_reg(s, a->rt);
 955        tcg_gen_andi_i32(tmp, tmp, 0xffff);
 956        vfp_store_reg32(tmp, a->vn);
 957        tcg_temp_free_i32(tmp);
 958    }
 959
 960    return true;
 961}
 962
 963static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
 964{
 965    TCGv_i32 tmp;
 966
 967    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
 968        return false;
 969    }
 970
 971    if (!vfp_access_check(s)) {
 972        return true;
 973    }
 974
 975    if (a->l) {
 976        /* VFP to general purpose register */
 977        tmp = tcg_temp_new_i32();
 978        vfp_load_reg32(tmp, a->vn);
 979        if (a->rt == 15) {
 980            /* Set the 4 flag bits in the CPSR.  */
 981            gen_set_nzcv(tmp);
 982            tcg_temp_free_i32(tmp);
 983        } else {
 984            store_reg(s, a->rt, tmp);
 985        }
 986    } else {
 987        /* general purpose register to VFP */
 988        tmp = load_reg(s, a->rt);
 989        vfp_store_reg32(tmp, a->vn);
 990        tcg_temp_free_i32(tmp);
 991    }
 992
 993    return true;
 994}
 995
 996static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
 997{
 998    TCGv_i32 tmp;
 999
1000    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
1001        return false;
1002    }
1003
1004    /*
1005     * VMOV between two general-purpose registers and two single precision
1006     * floating point registers
1007     */
1008    if (!vfp_access_check(s)) {
1009        return true;
1010    }
1011
1012    if (a->op) {
1013        /* fpreg to gpreg */
1014        tmp = tcg_temp_new_i32();
1015        vfp_load_reg32(tmp, a->vm);
1016        store_reg(s, a->rt, tmp);
1017        tmp = tcg_temp_new_i32();
1018        vfp_load_reg32(tmp, a->vm + 1);
1019        store_reg(s, a->rt2, tmp);
1020    } else {
1021        /* gpreg to fpreg */
1022        tmp = load_reg(s, a->rt);
1023        vfp_store_reg32(tmp, a->vm);
1024        tcg_temp_free_i32(tmp);
1025        tmp = load_reg(s, a->rt2);
1026        vfp_store_reg32(tmp, a->vm + 1);
1027        tcg_temp_free_i32(tmp);
1028    }
1029
1030    return true;
1031}
1032
1033static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
1034{
1035    TCGv_i32 tmp;
1036
1037    /*
1038     * VMOV between two general-purpose registers and one double precision
1039     * floating point register.  Note that this does not require support
1040     * for double precision arithmetic.
1041     */
1042    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
1043        return false;
1044    }
1045
1046    /* UNDEF accesses to D16-D31 if they don't exist */
1047    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
1048        return false;
1049    }
1050
1051    if (!vfp_access_check(s)) {
1052        return true;
1053    }
1054
1055    if (a->op) {
1056        /* fpreg to gpreg */
1057        tmp = tcg_temp_new_i32();
1058        vfp_load_reg32(tmp, a->vm * 2);
1059        store_reg(s, a->rt, tmp);
1060        tmp = tcg_temp_new_i32();
1061        vfp_load_reg32(tmp, a->vm * 2 + 1);
1062        store_reg(s, a->rt2, tmp);
1063    } else {
1064        /* gpreg to fpreg */
1065        tmp = load_reg(s, a->rt);
1066        vfp_store_reg32(tmp, a->vm * 2);
1067        tcg_temp_free_i32(tmp);
1068        tmp = load_reg(s, a->rt2);
1069        vfp_store_reg32(tmp, a->vm * 2 + 1);
1070        tcg_temp_free_i32(tmp);
1071    }
1072
1073    return true;
1074}
1075
1076static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
1077{
1078    uint32_t offset;
1079    TCGv_i32 addr, tmp;
1080
1081    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
1082        return false;
1083    }
1084
1085    if (!vfp_access_check(s)) {
1086        return true;
1087    }
1088
1089    /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
1090    offset = a->imm << 1;
1091    if (!a->u) {
1092        offset = -offset;
1093    }
1094
1095    /* For thumb, use of PC is UNPREDICTABLE.  */
1096    addr = add_reg_for_lit(s, a->rn, offset);
1097    tmp = tcg_temp_new_i32();
1098    if (a->l) {
1099        gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
1100        vfp_store_reg32(tmp, a->vd);
1101    } else {
1102        vfp_load_reg32(tmp, a->vd);
1103        gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
1104    }
1105    tcg_temp_free_i32(tmp);
1106    tcg_temp_free_i32(addr);
1107
1108    return true;
1109}
1110
1111static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
1112{
1113    uint32_t offset;
1114    TCGv_i32 addr, tmp;
1115
1116    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
1117        return false;
1118    }
1119
1120    if (!vfp_access_check(s)) {
1121        return true;
1122    }
1123
1124    offset = a->imm << 2;
1125    if (!a->u) {
1126        offset = -offset;
1127    }
1128
1129    /* For thumb, use of PC is UNPREDICTABLE.  */
1130    addr = add_reg_for_lit(s, a->rn, offset);
1131    tmp = tcg_temp_new_i32();
1132    if (a->l) {
1133        gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
1134        vfp_store_reg32(tmp, a->vd);
1135    } else {
1136        vfp_load_reg32(tmp, a->vd);
1137        gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
1138    }
1139    tcg_temp_free_i32(tmp);
1140    tcg_temp_free_i32(addr);
1141
1142    return true;
1143}
1144
1145static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
1146{
1147    uint32_t offset;
1148    TCGv_i32 addr;
1149    TCGv_i64 tmp;
1150
1151    /* Note that this does not require support for double arithmetic.  */
1152    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
1153        return false;
1154    }
1155
1156    /* UNDEF accesses to D16-D31 if they don't exist */
1157    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
1158        return false;
1159    }
1160
1161    if (!vfp_access_check(s)) {
1162        return true;
1163    }
1164
1165    offset = a->imm << 2;
1166    if (!a->u) {
1167        offset = -offset;
1168    }
1169
1170    /* For thumb, use of PC is UNPREDICTABLE.  */
1171    addr = add_reg_for_lit(s, a->rn, offset);
1172    tmp = tcg_temp_new_i64();
1173    if (a->l) {
1174        gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
1175        vfp_store_reg64(tmp, a->vd);
1176    } else {
1177        vfp_load_reg64(tmp, a->vd);
1178        gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
1179    }
1180    tcg_temp_free_i64(tmp);
1181    tcg_temp_free_i32(addr);
1182
1183    return true;
1184}
1185
1186static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
1187{
1188    uint32_t offset;
1189    TCGv_i32 addr, tmp;
1190    int i, n;
1191
1192    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
1193        return false;
1194    }
1195
1196    n = a->imm;
1197
1198    if (n == 0 || (a->vd + n) > 32) {
1199        /*
1200         * UNPREDICTABLE cases for bad immediates: we choose to
1201         * UNDEF to avoid generating huge numbers of TCG ops
1202         */
1203        return false;
1204    }
1205    if (a->rn == 15 && a->w) {
1206        /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1207        return false;
1208    }
1209
1210    s->eci_handled = true;
1211
1212    if (!vfp_access_check(s)) {
1213        return true;
1214    }
1215
1216    /* For thumb, use of PC is UNPREDICTABLE.  */
1217    addr = add_reg_for_lit(s, a->rn, 0);
1218    if (a->p) {
1219        /* pre-decrement */
1220        tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1221    }
1222
1223    if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1224        /*
1225         * Here 'addr' is the lowest address we will store to,
1226         * and is either the old SP (if post-increment) or
1227         * the new SP (if pre-decrement). For post-increment
1228         * where the old value is below the limit and the new
1229         * value is above, it is UNKNOWN whether the limit check
1230         * triggers; we choose to trigger.
1231         */
1232        gen_helper_v8m_stackcheck(cpu_env, addr);
1233    }
1234
1235    offset = 4;
1236    tmp = tcg_temp_new_i32();
1237    for (i = 0; i < n; i++) {
1238        if (a->l) {
1239            /* load */
1240            gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
1241            vfp_store_reg32(tmp, a->vd + i);
1242        } else {
1243            /* store */
1244            vfp_load_reg32(tmp, a->vd + i);
1245            gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
1246        }
1247        tcg_gen_addi_i32(addr, addr, offset);
1248    }
1249    tcg_temp_free_i32(tmp);
1250    if (a->w) {
1251        /* writeback */
1252        if (a->p) {
1253            offset = -offset * n;
1254            tcg_gen_addi_i32(addr, addr, offset);
1255        }
1256        store_reg(s, a->rn, addr);
1257    } else {
1258        tcg_temp_free_i32(addr);
1259    }
1260
1261    clear_eci_state(s);
1262    return true;
1263}
1264
1265static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
1266{
1267    uint32_t offset;
1268    TCGv_i32 addr;
1269    TCGv_i64 tmp;
1270    int i, n;
1271
1272    /* Note that this does not require support for double arithmetic.  */
1273    if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
1274        return false;
1275    }
1276
1277    n = a->imm >> 1;
1278
1279    if (n == 0 || (a->vd + n) > 32 || n > 16) {
1280        /*
1281         * UNPREDICTABLE cases for bad immediates: we choose to
1282         * UNDEF to avoid generating huge numbers of TCG ops
1283         */
1284        return false;
1285    }
1286    if (a->rn == 15 && a->w) {
1287        /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1288        return false;
1289    }
1290
1291    /* UNDEF accesses to D16-D31 if they don't exist */
1292    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) {
1293        return false;
1294    }
1295
1296    s->eci_handled = true;
1297
1298    if (!vfp_access_check(s)) {
1299        return true;
1300    }
1301
1302    /* For thumb, use of PC is UNPREDICTABLE.  */
1303    addr = add_reg_for_lit(s, a->rn, 0);
1304    if (a->p) {
1305        /* pre-decrement */
1306        tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1307    }
1308
1309    if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1310        /*
1311         * Here 'addr' is the lowest address we will store to,
1312         * and is either the old SP (if post-increment) or
1313         * the new SP (if pre-decrement). For post-increment
1314         * where the old value is below the limit and the new
1315         * value is above, it is UNKNOWN whether the limit check
1316         * triggers; we choose to trigger.
1317         */
1318        gen_helper_v8m_stackcheck(cpu_env, addr);
1319    }
1320
1321    offset = 8;
1322    tmp = tcg_temp_new_i64();
1323    for (i = 0; i < n; i++) {
1324        if (a->l) {
1325            /* load */
1326            gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
1327            vfp_store_reg64(tmp, a->vd + i);
1328        } else {
1329            /* store */
1330            vfp_load_reg64(tmp, a->vd + i);
1331            gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
1332        }
1333        tcg_gen_addi_i32(addr, addr, offset);
1334    }
1335    tcg_temp_free_i64(tmp);
1336    if (a->w) {
1337        /* writeback */
1338        if (a->p) {
1339            offset = -offset * n;
1340        } else if (a->imm & 1) {
1341            offset = 4;
1342        } else {
1343            offset = 0;
1344        }
1345
1346        if (offset != 0) {
1347            tcg_gen_addi_i32(addr, addr, offset);
1348        }
1349        store_reg(s, a->rn, addr);
1350    } else {
1351        tcg_temp_free_i32(addr);
1352    }
1353
1354    clear_eci_state(s);
1355    return true;
1356}
1357
1358/*
1359 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1360 * The callback should emit code to write a value to vd. If
1361 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1362 * will contain the old value of the relevant VFP register;
1363 * otherwise it must be written to only.
1364 */
1365typedef void VFPGen3OpSPFn(TCGv_i32 vd,
1366                           TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
1367typedef void VFPGen3OpDPFn(TCGv_i64 vd,
1368                           TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
1369
1370/*
1371 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1372 * The callback should emit code to write a value to vd (which
1373 * should be written to only).
1374 */
1375typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
1376typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
1377
1378/*
1379 * Return true if the specified S reg is in a scalar bank
1380 * (ie if it is s0..s7)
1381 */
1382static inline bool vfp_sreg_is_scalar(int reg)
1383{
1384    return (reg & 0x18) == 0;
1385}
1386
1387/*
1388 * Return true if the specified D reg is in a scalar bank
1389 * (ie if it is d0..d3 or d16..d19)
1390 */
1391static inline bool vfp_dreg_is_scalar(int reg)
1392{
1393    return (reg & 0xc) == 0;
1394}
1395
1396/*
1397 * Advance the S reg number forwards by delta within its bank
1398 * (ie increment the low 3 bits but leave the rest the same)
1399 */
1400static inline int vfp_advance_sreg(int reg, int delta)
1401{
1402    return ((reg + delta) & 0x7) | (reg & ~0x7);
1403}
1404
1405/*
1406 * Advance the D reg number forwards by delta within its bank
1407 * (ie increment the low 2 bits but leave the rest the same)
1408 */
1409static inline int vfp_advance_dreg(int reg, int delta)
1410{
1411    return ((reg + delta) & 0x3) | (reg & ~0x3);
1412}
1413
1414/*
1415 * Perform a 3-operand VFP data processing instruction. fn is the
1416 * callback to do the actual operation; this function deals with the
1417 * code to handle looping around for VFP vector processing.
1418 */
1419static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
1420                          int vd, int vn, int vm, bool reads_vd)
1421{
1422    uint32_t delta_m = 0;
1423    uint32_t delta_d = 0;
1424    int veclen = s->vec_len;
1425    TCGv_i32 f0, f1, fd;
1426    TCGv_ptr fpst;
1427
1428    if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1429        return false;
1430    }
1431
1432    if (!dc_isar_feature(aa32_fpshvec, s) &&
1433        (veclen != 0 || s->vec_stride != 0)) {
1434        return false;
1435    }
1436
1437    if (!vfp_access_check(s)) {
1438        return true;
1439    }
1440
1441    if (veclen > 0) {
1442        /* Figure out what type of vector operation this is.  */
1443        if (vfp_sreg_is_scalar(vd)) {
1444            /* scalar */
1445            veclen = 0;
1446        } else {
1447            delta_d = s->vec_stride + 1;
1448
1449            if (vfp_sreg_is_scalar(vm)) {
1450                /* mixed scalar/vector */
1451                delta_m = 0;
1452            } else {
1453                /* vector */
1454                delta_m = delta_d;
1455            }
1456        }
1457    }
1458
1459    f0 = tcg_temp_new_i32();
1460    f1 = tcg_temp_new_i32();
1461    fd = tcg_temp_new_i32();
1462    fpst = fpstatus_ptr(FPST_FPCR);
1463
1464    vfp_load_reg32(f0, vn);
1465    vfp_load_reg32(f1, vm);
1466
1467    for (;;) {
1468        if (reads_vd) {
1469            vfp_load_reg32(fd, vd);
1470        }
1471        fn(fd, f0, f1, fpst);
1472        vfp_store_reg32(fd, vd);
1473
1474        if (veclen == 0) {
1475            break;
1476        }
1477
1478        /* Set up the operands for the next iteration */
1479        veclen--;
1480        vd = vfp_advance_sreg(vd, delta_d);
1481        vn = vfp_advance_sreg(vn, delta_d);
1482        vfp_load_reg32(f0, vn);
1483        if (delta_m) {
1484            vm = vfp_advance_sreg(vm, delta_m);
1485            vfp_load_reg32(f1, vm);
1486        }
1487    }
1488
1489    tcg_temp_free_i32(f0);
1490    tcg_temp_free_i32(f1);
1491    tcg_temp_free_i32(fd);
1492    tcg_temp_free_ptr(fpst);
1493
1494    return true;
1495}
1496
1497static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
1498                          int vd, int vn, int vm, bool reads_vd)
1499{
1500    /*
1501     * Do a half-precision operation. Functionally this is
1502     * the same as do_vfp_3op_sp(), except:
1503     *  - it uses the FPST_FPCR_F16
1504     *  - it doesn't need the VFP vector handling (fp16 is a
1505     *    v8 feature, and in v8 VFP vectors don't exist)
1506     *  - it does the aa32_fp16_arith feature test
1507     */
1508    TCGv_i32 f0, f1, fd;
1509    TCGv_ptr fpst;
1510
1511    if (!dc_isar_feature(aa32_fp16_arith, s)) {
1512        return false;
1513    }
1514
1515    if (s->vec_len != 0 || s->vec_stride != 0) {
1516        return false;
1517    }
1518
1519    if (!vfp_access_check(s)) {
1520        return true;
1521    }
1522
1523    f0 = tcg_temp_new_i32();
1524    f1 = tcg_temp_new_i32();
1525    fd = tcg_temp_new_i32();
1526    fpst = fpstatus_ptr(FPST_FPCR_F16);
1527
1528    vfp_load_reg32(f0, vn);
1529    vfp_load_reg32(f1, vm);
1530
1531    if (reads_vd) {
1532        vfp_load_reg32(fd, vd);
1533    }
1534    fn(fd, f0, f1, fpst);
1535    vfp_store_reg32(fd, vd);
1536
1537    tcg_temp_free_i32(f0);
1538    tcg_temp_free_i32(f1);
1539    tcg_temp_free_i32(fd);
1540    tcg_temp_free_ptr(fpst);
1541
1542    return true;
1543}
1544
1545static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
1546                          int vd, int vn, int vm, bool reads_vd)
1547{
1548    uint32_t delta_m = 0;
1549    uint32_t delta_d = 0;
1550    int veclen = s->vec_len;
1551    TCGv_i64 f0, f1, fd;
1552    TCGv_ptr fpst;
1553
1554    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
1555        return false;
1556    }
1557
1558    /* UNDEF accesses to D16-D31 if they don't exist */
1559    if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
1560        return false;
1561    }
1562
1563    if (!dc_isar_feature(aa32_fpshvec, s) &&
1564        (veclen != 0 || s->vec_stride != 0)) {
1565        return false;
1566    }
1567
1568    if (!vfp_access_check(s)) {
1569        return true;
1570    }
1571
1572    if (veclen > 0) {
1573        /* Figure out what type of vector operation this is.  */
1574        if (vfp_dreg_is_scalar(vd)) {
1575            /* scalar */
1576            veclen = 0;
1577        } else {
1578            delta_d = (s->vec_stride >> 1) + 1;
1579
1580            if (vfp_dreg_is_scalar(vm)) {
1581                /* mixed scalar/vector */
1582                delta_m = 0;
1583            } else {
1584                /* vector */
1585                delta_m = delta_d;
1586            }
1587        }
1588    }
1589
1590    f0 = tcg_temp_new_i64();
1591    f1 = tcg_temp_new_i64();
1592    fd = tcg_temp_new_i64();
1593    fpst = fpstatus_ptr(FPST_FPCR);
1594
1595    vfp_load_reg64(f0, vn);
1596    vfp_load_reg64(f1, vm);
1597
1598    for (;;) {
1599        if (reads_vd) {
1600            vfp_load_reg64(fd, vd);
1601        }
1602        fn(fd, f0, f1, fpst);
1603        vfp_store_reg64(fd, vd);
1604
1605        if (veclen == 0) {
1606            break;
1607        }
1608        /* Set up the operands for the next iteration */
1609        veclen--;
1610        vd = vfp_advance_dreg(vd, delta_d);
1611        vn = vfp_advance_dreg(vn, delta_d);
1612        vfp_load_reg64(f0, vn);
1613        if (delta_m) {
1614            vm = vfp_advance_dreg(vm, delta_m);
1615            vfp_load_reg64(f1, vm);
1616        }
1617    }
1618
1619    tcg_temp_free_i64(f0);
1620    tcg_temp_free_i64(f1);
1621    tcg_temp_free_i64(fd);
1622    tcg_temp_free_ptr(fpst);
1623
1624    return true;
1625}
1626
1627static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1628{
1629    uint32_t delta_m = 0;
1630    uint32_t delta_d = 0;
1631    int veclen = s->vec_len;
1632    TCGv_i32 f0, fd;
1633
1634    /* Note that the caller must check the aa32_fpsp_v2 feature. */
1635
1636    if (!dc_isar_feature(aa32_fpshvec, s) &&
1637        (veclen != 0 || s->vec_stride != 0)) {
1638        return false;
1639    }
1640
1641    if (!vfp_access_check(s)) {
1642        return true;
1643    }
1644
1645    if (veclen > 0) {
1646        /* Figure out what type of vector operation this is.  */
1647        if (vfp_sreg_is_scalar(vd)) {
1648            /* scalar */
1649            veclen = 0;
1650        } else {
1651            delta_d = s->vec_stride + 1;
1652
1653            if (vfp_sreg_is_scalar(vm)) {
1654                /* mixed scalar/vector */
1655                delta_m = 0;
1656            } else {
1657                /* vector */
1658                delta_m = delta_d;
1659            }
1660        }
1661    }
1662
1663    f0 = tcg_temp_new_i32();
1664    fd = tcg_temp_new_i32();
1665
1666    vfp_load_reg32(f0, vm);
1667
1668    for (;;) {
1669        fn(fd, f0);
1670        vfp_store_reg32(fd, vd);
1671
1672        if (veclen == 0) {
1673            break;
1674        }
1675
1676        if (delta_m == 0) {
1677            /* single source one-many */
1678            while (veclen--) {
1679                vd = vfp_advance_sreg(vd, delta_d);
1680                vfp_store_reg32(fd, vd);
1681            }
1682            break;
1683        }
1684
1685        /* Set up the operands for the next iteration */
1686        veclen--;
1687        vd = vfp_advance_sreg(vd, delta_d);
1688        vm = vfp_advance_sreg(vm, delta_m);
1689        vfp_load_reg32(f0, vm);
1690    }
1691
1692    tcg_temp_free_i32(f0);
1693    tcg_temp_free_i32(fd);
1694
1695    return true;
1696}
1697
1698static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1699{
1700    /*
1701     * Do a half-precision operation. Functionally this is
1702     * the same as do_vfp_2op_sp(), except:
1703     *  - it doesn't need the VFP vector handling (fp16 is a
1704     *    v8 feature, and in v8 VFP vectors don't exist)
1705     *  - it does the aa32_fp16_arith feature test
1706     */
1707    TCGv_i32 f0;
1708
1709    /* Note that the caller must check the aa32_fp16_arith feature */
1710
1711    if (!dc_isar_feature(aa32_fp16_arith, s)) {
1712        return false;
1713    }
1714
1715    if (s->vec_len != 0 || s->vec_stride != 0) {
1716        return false;
1717    }
1718
1719    if (!vfp_access_check(s)) {
1720        return true;
1721    }
1722
1723    f0 = tcg_temp_new_i32();
1724    vfp_load_reg32(f0, vm);
1725    fn(f0, f0);
1726    vfp_store_reg32(f0, vd);
1727    tcg_temp_free_i32(f0);
1728
1729    return true;
1730}
1731
1732static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
1733{
1734    uint32_t delta_m = 0;
1735    uint32_t delta_d = 0;
1736    int veclen = s->vec_len;
1737    TCGv_i64 f0, fd;
1738
1739    /* Note that the caller must check the aa32_fpdp_v2 feature. */
1740
1741    /* UNDEF accesses to D16-D31 if they don't exist */
1742    if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
1743        return false;
1744    }
1745
1746    if (!dc_isar_feature(aa32_fpshvec, s) &&
1747        (veclen != 0 || s->vec_stride != 0)) {
1748        return false;
1749    }
1750
1751    if (!vfp_access_check(s)) {
1752        return true;
1753    }
1754
1755    if (veclen > 0) {
1756        /* Figure out what type of vector operation this is.  */
1757        if (vfp_dreg_is_scalar(vd)) {
1758            /* scalar */
1759            veclen = 0;
1760        } else {
1761            delta_d = (s->vec_stride >> 1) + 1;
1762
1763            if (vfp_dreg_is_scalar(vm)) {
1764                /* mixed scalar/vector */
1765                delta_m = 0;
1766            } else {
1767                /* vector */
1768                delta_m = delta_d;
1769            }
1770        }
1771    }
1772
1773    f0 = tcg_temp_new_i64();
1774    fd = tcg_temp_new_i64();
1775
1776    vfp_load_reg64(f0, vm);
1777
1778    for (;;) {
1779        fn(fd, f0);
1780        vfp_store_reg64(fd, vd);
1781
1782        if (veclen == 0) {
1783            break;
1784        }
1785
1786        if (delta_m == 0) {
1787            /* single source one-many */
1788            while (veclen--) {
1789                vd = vfp_advance_dreg(vd, delta_d);
1790                vfp_store_reg64(fd, vd);
1791            }
1792            break;
1793        }
1794
1795        /* Set up the operands for the next iteration */
1796        veclen--;
1797        vd = vfp_advance_dreg(vd, delta_d);
1798        vd = vfp_advance_dreg(vm, delta_m);
1799        vfp_load_reg64(f0, vm);
1800    }
1801
1802    tcg_temp_free_i64(f0);
1803    tcg_temp_free_i64(fd);
1804
1805    return true;
1806}
1807
1808static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1809{
1810    /* Note that order of inputs to the add matters for NaNs */
1811    TCGv_i32 tmp = tcg_temp_new_i32();
1812
1813    gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1814    gen_helper_vfp_addh(vd, vd, tmp, fpst);
1815    tcg_temp_free_i32(tmp);
1816}
1817
1818static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a)
1819{
1820    return do_vfp_3op_hp(s, gen_VMLA_hp, a->vd, a->vn, a->vm, true);
1821}
1822
1823static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1824{
1825    /* Note that order of inputs to the add matters for NaNs */
1826    TCGv_i32 tmp = tcg_temp_new_i32();
1827
1828    gen_helper_vfp_muls(tmp, vn, vm, fpst);
1829    gen_helper_vfp_adds(vd, vd, tmp, fpst);
1830    tcg_temp_free_i32(tmp);
1831}
1832
1833static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
1834{
1835    return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
1836}
1837
1838static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1839{
1840    /* Note that order of inputs to the add matters for NaNs */
1841    TCGv_i64 tmp = tcg_temp_new_i64();
1842
1843    gen_helper_vfp_muld(tmp, vn, vm, fpst);
1844    gen_helper_vfp_addd(vd, vd, tmp, fpst);
1845    tcg_temp_free_i64(tmp);
1846}
1847
1848static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
1849{
1850    return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
1851}
1852
1853static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1854{
1855    /*
1856     * VMLS: vd = vd + -(vn * vm)
1857     * Note that order of inputs to the add matters for NaNs.
1858     */
1859    TCGv_i32 tmp = tcg_temp_new_i32();
1860
1861    gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1862    gen_helper_vfp_negh(tmp, tmp);
1863    gen_helper_vfp_addh(vd, vd, tmp, fpst);
1864    tcg_temp_free_i32(tmp);
1865}
1866
1867static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a)
1868{
1869    return do_vfp_3op_hp(s, gen_VMLS_hp, a->vd, a->vn, a->vm, true);
1870}
1871
1872static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1873{
1874    /*
1875     * VMLS: vd = vd + -(vn * vm)
1876     * Note that order of inputs to the add matters for NaNs.
1877     */
1878    TCGv_i32 tmp = tcg_temp_new_i32();
1879
1880    gen_helper_vfp_muls(tmp, vn, vm, fpst);
1881    gen_helper_vfp_negs(tmp, tmp);
1882    gen_helper_vfp_adds(vd, vd, tmp, fpst);
1883    tcg_temp_free_i32(tmp);
1884}
1885
1886static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
1887{
1888    return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
1889}
1890
1891static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1892{
1893    /*
1894     * VMLS: vd = vd + -(vn * vm)
1895     * Note that order of inputs to the add matters for NaNs.
1896     */
1897    TCGv_i64 tmp = tcg_temp_new_i64();
1898
1899    gen_helper_vfp_muld(tmp, vn, vm, fpst);
1900    gen_helper_vfp_negd(tmp, tmp);
1901    gen_helper_vfp_addd(vd, vd, tmp, fpst);
1902    tcg_temp_free_i64(tmp);
1903}
1904
1905static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
1906{
1907    return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
1908}
1909
1910static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1911{
1912    /*
1913     * VNMLS: -fd + (fn * fm)
1914     * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1915     * plausible looking simplifications because this will give wrong results
1916     * for NaNs.
1917     */
1918    TCGv_i32 tmp = tcg_temp_new_i32();
1919
1920    gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1921    gen_helper_vfp_negh(vd, vd);
1922    gen_helper_vfp_addh(vd, vd, tmp, fpst);
1923    tcg_temp_free_i32(tmp);
1924}
1925
1926static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a)
1927{
1928    return do_vfp_3op_hp(s, gen_VNMLS_hp, a->vd, a->vn, a->vm, true);
1929}
1930
1931static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1932{
1933    /*
1934     * VNMLS: -fd + (fn * fm)
1935     * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1936     * plausible looking simplifications because this will give wrong results
1937     * for NaNs.
1938     */
1939    TCGv_i32 tmp = tcg_temp_new_i32();
1940
1941    gen_helper_vfp_muls(tmp, vn, vm, fpst);
1942    gen_helper_vfp_negs(vd, vd);
1943    gen_helper_vfp_adds(vd, vd, tmp, fpst);
1944    tcg_temp_free_i32(tmp);
1945}
1946
1947static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
1948{
1949    return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
1950}
1951
1952static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1953{
1954    /*
1955     * VNMLS: -fd + (fn * fm)
1956     * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1957     * plausible looking simplifications because this will give wrong results
1958     * for NaNs.
1959     */
1960    TCGv_i64 tmp = tcg_temp_new_i64();
1961
1962    gen_helper_vfp_muld(tmp, vn, vm, fpst);
1963    gen_helper_vfp_negd(vd, vd);
1964    gen_helper_vfp_addd(vd, vd, tmp, fpst);
1965    tcg_temp_free_i64(tmp);
1966}
1967
1968static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
1969{
1970    return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
1971}
1972
1973static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1974{
1975    /* VNMLA: -fd + -(fn * fm) */
1976    TCGv_i32 tmp = tcg_temp_new_i32();
1977
1978    gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1979    gen_helper_vfp_negh(tmp, tmp);
1980    gen_helper_vfp_negh(vd, vd);
1981    gen_helper_vfp_addh(vd, vd, tmp, fpst);
1982    tcg_temp_free_i32(tmp);
1983}
1984
1985static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a)
1986{
1987    return do_vfp_3op_hp(s, gen_VNMLA_hp, a->vd, a->vn, a->vm, true);
1988}
1989
1990static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1991{
1992    /* VNMLA: -fd + -(fn * fm) */
1993    TCGv_i32 tmp = tcg_temp_new_i32();
1994
1995    gen_helper_vfp_muls(tmp, vn, vm, fpst);
1996    gen_helper_vfp_negs(tmp, tmp);
1997    gen_helper_vfp_negs(vd, vd);
1998    gen_helper_vfp_adds(vd, vd, tmp, fpst);
1999    tcg_temp_free_i32(tmp);
2000}
2001
2002static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
2003{
2004    return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
2005}
2006
2007static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
2008{
2009    /* VNMLA: -fd + (fn * fm) */
2010    TCGv_i64 tmp = tcg_temp_new_i64();
2011
2012    gen_helper_vfp_muld(tmp, vn, vm, fpst);
2013    gen_helper_vfp_negd(tmp, tmp);
2014    gen_helper_vfp_negd(vd, vd);
2015    gen_helper_vfp_addd(vd, vd, tmp, fpst);
2016    tcg_temp_free_i64(tmp);
2017}
2018
2019static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
2020{
2021    return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
2022}
2023
2024static bool trans_VMUL_hp(DisasContext *s, arg_VMUL_sp *a)
2025{
2026    return do_vfp_3op_hp(s, gen_helper_vfp_mulh, a->vd, a->vn, a->vm, false);
2027}
2028
2029static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
2030{
2031    return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
2032}
2033
2034static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
2035{
2036    return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
2037}
2038
2039static void gen_VNMUL_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2040{
2041    /* VNMUL: -(fn * fm) */
2042    gen_helper_vfp_mulh(vd, vn, vm, fpst);
2043    gen_helper_vfp_negh(vd, vd);
2044}
2045
2046static bool trans_VNMUL_hp(DisasContext *s, arg_VNMUL_sp *a)
2047{
2048    return do_vfp_3op_hp(s, gen_VNMUL_hp, a->vd, a->vn, a->vm, false);
2049}
2050
2051static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2052{
2053    /* VNMUL: -(fn * fm) */
2054    gen_helper_vfp_muls(vd, vn, vm, fpst);
2055    gen_helper_vfp_negs(vd, vd);
2056}
2057
2058static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
2059{
2060    return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
2061}
2062
2063static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
2064{
2065    /* VNMUL: -(fn * fm) */
2066    gen_helper_vfp_muld(vd, vn, vm, fpst);
2067    gen_helper_vfp_negd(vd, vd);
2068}
2069
2070static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
2071{
2072    return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
2073}
2074
2075static bool trans_VADD_hp(DisasContext *s, arg_VADD_sp *a)
2076{
2077    return do_vfp_3op_hp(s, gen_helper_vfp_addh, a->vd, a->vn, a->vm, false);
2078}
2079
2080static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
2081{
2082    return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
2083}
2084
2085static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
2086{
2087    return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
2088}
2089
2090static bool trans_VSUB_hp(DisasContext *s, arg_VSUB_sp *a)
2091{
2092    return do_vfp_3op_hp(s, gen_helper_vfp_subh, a->vd, a->vn, a->vm, false);
2093}
2094
2095static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
2096{
2097    return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
2098}
2099
2100static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
2101{
2102    return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
2103}
2104
2105static bool trans_VDIV_hp(DisasContext *s, arg_VDIV_sp *a)
2106{
2107    return do_vfp_3op_hp(s, gen_helper_vfp_divh, a->vd, a->vn, a->vm, false);
2108}
2109
2110static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
2111{
2112    return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
2113}
2114
2115static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
2116{
2117    return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
2118}
2119
2120static bool trans_VMINNM_hp(DisasContext *s, arg_VMINNM_sp *a)
2121{
2122    if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2123        return false;
2124    }
2125    return do_vfp_3op_hp(s, gen_helper_vfp_minnumh,
2126                         a->vd, a->vn, a->vm, false);
2127}
2128
2129static bool trans_VMAXNM_hp(DisasContext *s, arg_VMAXNM_sp *a)
2130{
2131    if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2132        return false;
2133    }
2134    return do_vfp_3op_hp(s, gen_helper_vfp_maxnumh,
2135                         a->vd, a->vn, a->vm, false);
2136}
2137
2138static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a)
2139{
2140    if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2141        return false;
2142    }
2143    return do_vfp_3op_sp(s, gen_helper_vfp_minnums,
2144                         a->vd, a->vn, a->vm, false);
2145}
2146
2147static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a)
2148{
2149    if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2150        return false;
2151    }
2152    return do_vfp_3op_sp(s, gen_helper_vfp_maxnums,
2153                         a->vd, a->vn, a->vm, false);
2154}
2155
2156static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a)
2157{
2158    if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2159        return false;
2160    }
2161    return do_vfp_3op_dp(s, gen_helper_vfp_minnumd,
2162                         a->vd, a->vn, a->vm, false);
2163}
2164
2165static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a)
2166{
2167    if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2168        return false;
2169    }
2170    return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd,
2171                         a->vd, a->vn, a->vm, false);
2172}
2173
2174static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
2175{
2176    /*
2177     * VFNMA : fd = muladd(-fd,  fn, fm)
2178     * VFNMS : fd = muladd(-fd, -fn, fm)
2179     * VFMA  : fd = muladd( fd,  fn, fm)
2180     * VFMS  : fd = muladd( fd, -fn, fm)
2181     *
2182     * These are fused multiply-add, and must be done as one floating
2183     * point operation with no rounding between the multiplication and
2184     * addition steps.  NB that doing the negations here as separate
2185     * steps is correct : an input NaN should come out with its sign
2186     * bit flipped if it is a negated-input.
2187     */
2188    TCGv_ptr fpst;
2189    TCGv_i32 vn, vm, vd;
2190
2191    /*
2192     * Present in VFPv4 only, and only with the FP16 extension.
2193     * Note that we can't rely on the SIMDFMAC check alone, because
2194     * in a Neon-no-VFP core that ID register field will be non-zero.
2195     */
2196    if (!dc_isar_feature(aa32_fp16_arith, s) ||
2197        !dc_isar_feature(aa32_simdfmac, s) ||
2198        !dc_isar_feature(aa32_fpsp_v2, s)) {
2199        return false;
2200    }
2201
2202    if (s->vec_len != 0 || s->vec_stride != 0) {
2203        return false;
2204    }
2205
2206    if (!vfp_access_check(s)) {
2207        return true;
2208    }
2209
2210    vn = tcg_temp_new_i32();
2211    vm = tcg_temp_new_i32();
2212    vd = tcg_temp_new_i32();
2213
2214    vfp_load_reg32(vn, a->vn);
2215    vfp_load_reg32(vm, a->vm);
2216    if (neg_n) {
2217        /* VFNMS, VFMS */
2218        gen_helper_vfp_negh(vn, vn);
2219    }
2220    vfp_load_reg32(vd, a->vd);
2221    if (neg_d) {
2222        /* VFNMA, VFNMS */
2223        gen_helper_vfp_negh(vd, vd);
2224    }
2225    fpst = fpstatus_ptr(FPST_FPCR_F16);
2226    gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
2227    vfp_store_reg32(vd, a->vd);
2228
2229    tcg_temp_free_ptr(fpst);
2230    tcg_temp_free_i32(vn);
2231    tcg_temp_free_i32(vm);
2232    tcg_temp_free_i32(vd);
2233
2234    return true;
2235}
2236
2237static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
2238{
2239    /*
2240     * VFNMA : fd = muladd(-fd,  fn, fm)
2241     * VFNMS : fd = muladd(-fd, -fn, fm)
2242     * VFMA  : fd = muladd( fd,  fn, fm)
2243     * VFMS  : fd = muladd( fd, -fn, fm)
2244     *
2245     * These are fused multiply-add, and must be done as one floating
2246     * point operation with no rounding between the multiplication and
2247     * addition steps.  NB that doing the negations here as separate
2248     * steps is correct : an input NaN should come out with its sign
2249     * bit flipped if it is a negated-input.
2250     */
2251    TCGv_ptr fpst;
2252    TCGv_i32 vn, vm, vd;
2253
2254    /*
2255     * Present in VFPv4 only.
2256     * Note that we can't rely on the SIMDFMAC check alone, because
2257     * in a Neon-no-VFP core that ID register field will be non-zero.
2258     */
2259    if (!dc_isar_feature(aa32_simdfmac, s) ||
2260        !dc_isar_feature(aa32_fpsp_v2, s)) {
2261        return false;
2262    }
2263    /*
2264     * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2265     * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2266     */
2267    if (s->vec_len != 0 || s->vec_stride != 0) {
2268        return false;
2269    }
2270
2271    if (!vfp_access_check(s)) {
2272        return true;
2273    }
2274
2275    vn = tcg_temp_new_i32();
2276    vm = tcg_temp_new_i32();
2277    vd = tcg_temp_new_i32();
2278
2279    vfp_load_reg32(vn, a->vn);
2280    vfp_load_reg32(vm, a->vm);
2281    if (neg_n) {
2282        /* VFNMS, VFMS */
2283        gen_helper_vfp_negs(vn, vn);
2284    }
2285    vfp_load_reg32(vd, a->vd);
2286    if (neg_d) {
2287        /* VFNMA, VFNMS */
2288        gen_helper_vfp_negs(vd, vd);
2289    }
2290    fpst = fpstatus_ptr(FPST_FPCR);
2291    gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
2292    vfp_store_reg32(vd, a->vd);
2293
2294    tcg_temp_free_ptr(fpst);
2295    tcg_temp_free_i32(vn);
2296    tcg_temp_free_i32(vm);
2297    tcg_temp_free_i32(vd);
2298
2299    return true;
2300}
2301
2302static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
2303{
2304    /*
2305     * VFNMA : fd = muladd(-fd,  fn, fm)
2306     * VFNMS : fd = muladd(-fd, -fn, fm)
2307     * VFMA  : fd = muladd( fd,  fn, fm)
2308     * VFMS  : fd = muladd( fd, -fn, fm)
2309     *
2310     * These are fused multiply-add, and must be done as one floating
2311     * point operation with no rounding between the multiplication and
2312     * addition steps.  NB that doing the negations here as separate
2313     * steps is correct : an input NaN should come out with its sign
2314     * bit flipped if it is a negated-input.
2315     */
2316    TCGv_ptr fpst;
2317    TCGv_i64 vn, vm, vd;
2318
2319    /*
2320     * Present in VFPv4 only.
2321     * Note that we can't rely on the SIMDFMAC check alone, because
2322     * in a Neon-no-VFP core that ID register field will be non-zero.
2323     */
2324    if (!dc_isar_feature(aa32_simdfmac, s) ||
2325        !dc_isar_feature(aa32_fpdp_v2, s)) {
2326        return false;
2327    }
2328    /*
2329     * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2330     * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2331     */
2332    if (s->vec_len != 0 || s->vec_stride != 0) {
2333        return false;
2334    }
2335
2336    /* UNDEF accesses to D16-D31 if they don't exist. */
2337    if (!dc_isar_feature(aa32_simd_r32, s) &&
2338        ((a->vd | a->vn | a->vm) & 0x10)) {
2339        return false;
2340    }
2341
2342    if (!vfp_access_check(s)) {
2343        return true;
2344    }
2345
2346    vn = tcg_temp_new_i64();
2347    vm = tcg_temp_new_i64();
2348    vd = tcg_temp_new_i64();
2349
2350    vfp_load_reg64(vn, a->vn);
2351    vfp_load_reg64(vm, a->vm);
2352    if (neg_n) {
2353        /* VFNMS, VFMS */
2354        gen_helper_vfp_negd(vn, vn);
2355    }
2356    vfp_load_reg64(vd, a->vd);
2357    if (neg_d) {
2358        /* VFNMA, VFNMS */
2359        gen_helper_vfp_negd(vd, vd);
2360    }
2361    fpst = fpstatus_ptr(FPST_FPCR);
2362    gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
2363    vfp_store_reg64(vd, a->vd);
2364
2365    tcg_temp_free_ptr(fpst);
2366    tcg_temp_free_i64(vn);
2367    tcg_temp_free_i64(vm);
2368    tcg_temp_free_i64(vd);
2369
2370    return true;
2371}
2372
2373#define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD)                   \
2374    static bool trans_##INSN##_##PREC(DisasContext *s,                  \
2375                                      arg_##INSN##_##PREC *a)           \
2376    {                                                                   \
2377        return do_vfm_##PREC(s, a, NEGN, NEGD);                         \
2378    }
2379
2380#define MAKE_VFM_TRANS_FNS(PREC) \
2381    MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
2382    MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
2383    MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
2384    MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
2385
2386MAKE_VFM_TRANS_FNS(hp)
2387MAKE_VFM_TRANS_FNS(sp)
2388MAKE_VFM_TRANS_FNS(dp)
2389
2390static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
2391{
2392    if (!dc_isar_feature(aa32_fp16_arith, s)) {
2393        return false;
2394    }
2395
2396    if (s->vec_len != 0 || s->vec_stride != 0) {
2397        return false;
2398    }
2399
2400    if (!vfp_access_check(s)) {
2401        return true;
2402    }
2403
2404    vfp_store_reg32(tcg_constant_i32(vfp_expand_imm(MO_16, a->imm)), a->vd);
2405    return true;
2406}
2407
2408static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
2409{
2410    uint32_t delta_d = 0;
2411    int veclen = s->vec_len;
2412    TCGv_i32 fd;
2413    uint32_t vd;
2414
2415    vd = a->vd;
2416
2417    if (!dc_isar_feature(aa32_fpsp_v3, s)) {
2418        return false;
2419    }
2420
2421    if (!dc_isar_feature(aa32_fpshvec, s) &&
2422        (veclen != 0 || s->vec_stride != 0)) {
2423        return false;
2424    }
2425
2426    if (!vfp_access_check(s)) {
2427        return true;
2428    }
2429
2430    if (veclen > 0) {
2431        /* Figure out what type of vector operation this is.  */
2432        if (vfp_sreg_is_scalar(vd)) {
2433            /* scalar */
2434            veclen = 0;
2435        } else {
2436            delta_d = s->vec_stride + 1;
2437        }
2438    }
2439
2440    fd = tcg_constant_i32(vfp_expand_imm(MO_32, a->imm));
2441
2442    for (;;) {
2443        vfp_store_reg32(fd, vd);
2444
2445        if (veclen == 0) {
2446            break;
2447        }
2448
2449        /* Set up the operands for the next iteration */
2450        veclen--;
2451        vd = vfp_advance_sreg(vd, delta_d);
2452    }
2453
2454    return true;
2455}
2456
2457static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
2458{
2459    uint32_t delta_d = 0;
2460    int veclen = s->vec_len;
2461    TCGv_i64 fd;
2462    uint32_t vd;
2463
2464    vd = a->vd;
2465
2466    if (!dc_isar_feature(aa32_fpdp_v3, s)) {
2467        return false;
2468    }
2469
2470    /* UNDEF accesses to D16-D31 if they don't exist. */
2471    if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
2472        return false;
2473    }
2474
2475    if (!dc_isar_feature(aa32_fpshvec, s) &&
2476        (veclen != 0 || s->vec_stride != 0)) {
2477        return false;
2478    }
2479
2480    if (!vfp_access_check(s)) {
2481        return true;
2482    }
2483
2484    if (veclen > 0) {
2485        /* Figure out what type of vector operation this is.  */
2486        if (vfp_dreg_is_scalar(vd)) {
2487            /* scalar */
2488            veclen = 0;
2489        } else {
2490            delta_d = (s->vec_stride >> 1) + 1;
2491        }
2492    }
2493
2494    fd = tcg_constant_i64(vfp_expand_imm(MO_64, a->imm));
2495
2496    for (;;) {
2497        vfp_store_reg64(fd, vd);
2498
2499        if (veclen == 0) {
2500            break;
2501        }
2502
2503        /* Set up the operands for the next iteration */
2504        veclen--;
2505        vd = vfp_advance_dreg(vd, delta_d);
2506    }
2507
2508    return true;
2509}
2510
2511#define DO_VFP_2OP(INSN, PREC, FN, CHECK)                       \
2512    static bool trans_##INSN##_##PREC(DisasContext *s,          \
2513                                      arg_##INSN##_##PREC *a)   \
2514    {                                                           \
2515        if (!dc_isar_feature(CHECK, s)) {                       \
2516            return false;                                       \
2517        }                                                       \
2518        return do_vfp_2op_##PREC(s, FN, a->vd, a->vm);          \
2519    }
2520
2521#define DO_VFP_VMOV(INSN, PREC, FN)                             \
2522    static bool trans_##INSN##_##PREC(DisasContext *s,          \
2523                                      arg_##INSN##_##PREC *a)   \
2524    {                                                           \
2525        if (!dc_isar_feature(aa32_fp##PREC##_v2, s) &&          \
2526            !dc_isar_feature(aa32_mve, s)) {                    \
2527            return false;                                       \
2528        }                                                       \
2529        return do_vfp_2op_##PREC(s, FN, a->vd, a->vm);          \
2530    }
2531
2532DO_VFP_VMOV(VMOV_reg, sp, tcg_gen_mov_i32)
2533DO_VFP_VMOV(VMOV_reg, dp, tcg_gen_mov_i64)
2534
2535DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh, aa32_fp16_arith)
2536DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss, aa32_fpsp_v2)
2537DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd, aa32_fpdp_v2)
2538
2539DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh, aa32_fp16_arith)
2540DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs, aa32_fpsp_v2)
2541DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd, aa32_fpdp_v2)
2542
2543static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
2544{
2545    gen_helper_vfp_sqrth(vd, vm, cpu_env);
2546}
2547
2548static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
2549{
2550    gen_helper_vfp_sqrts(vd, vm, cpu_env);
2551}
2552
2553static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
2554{
2555    gen_helper_vfp_sqrtd(vd, vm, cpu_env);
2556}
2557
2558DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
2559DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp, aa32_fpsp_v2)
2560DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp, aa32_fpdp_v2)
2561
2562static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
2563{
2564    TCGv_i32 vd, vm;
2565
2566    if (!dc_isar_feature(aa32_fp16_arith, s)) {
2567        return false;
2568    }
2569
2570    /* Vm/M bits must be zero for the Z variant */
2571    if (a->z && a->vm != 0) {
2572        return false;
2573    }
2574
2575    if (!vfp_access_check(s)) {
2576        return true;
2577    }
2578
2579    vd = tcg_temp_new_i32();
2580    vm = tcg_temp_new_i32();
2581
2582    vfp_load_reg32(vd, a->vd);
2583    if (a->z) {
2584        tcg_gen_movi_i32(vm, 0);
2585    } else {
2586        vfp_load_reg32(vm, a->vm);
2587    }
2588
2589    if (a->e) {
2590        gen_helper_vfp_cmpeh(vd, vm, cpu_env);
2591    } else {
2592        gen_helper_vfp_cmph(vd, vm, cpu_env);
2593    }
2594
2595    tcg_temp_free_i32(vd);
2596    tcg_temp_free_i32(vm);
2597
2598    return true;
2599}
2600
2601static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
2602{
2603    TCGv_i32 vd, vm;
2604
2605    if (!dc_isar_feature(aa32_fpsp_v2, s)) {
2606        return false;
2607    }
2608
2609    /* Vm/M bits must be zero for the Z variant */
2610    if (a->z && a->vm != 0) {
2611        return false;
2612    }
2613
2614    if (!vfp_access_check(s)) {
2615        return true;
2616    }
2617
2618    vd = tcg_temp_new_i32();
2619    vm = tcg_temp_new_i32();
2620
2621    vfp_load_reg32(vd, a->vd);
2622    if (a->z) {
2623        tcg_gen_movi_i32(vm, 0);
2624    } else {
2625        vfp_load_reg32(vm, a->vm);
2626    }
2627
2628    if (a->e) {
2629        gen_helper_vfp_cmpes(vd, vm, cpu_env);
2630    } else {
2631        gen_helper_vfp_cmps(vd, vm, cpu_env);
2632    }
2633
2634    tcg_temp_free_i32(vd);
2635    tcg_temp_free_i32(vm);
2636
2637    return true;
2638}
2639
2640static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
2641{
2642    TCGv_i64 vd, vm;
2643
2644    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2645        return false;
2646    }
2647
2648    /* Vm/M bits must be zero for the Z variant */
2649    if (a->z && a->vm != 0) {
2650        return false;
2651    }
2652
2653    /* UNDEF accesses to D16-D31 if they don't exist. */
2654    if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2655        return false;
2656    }
2657
2658    if (!vfp_access_check(s)) {
2659        return true;
2660    }
2661
2662    vd = tcg_temp_new_i64();
2663    vm = tcg_temp_new_i64();
2664
2665    vfp_load_reg64(vd, a->vd);
2666    if (a->z) {
2667        tcg_gen_movi_i64(vm, 0);
2668    } else {
2669        vfp_load_reg64(vm, a->vm);
2670    }
2671
2672    if (a->e) {
2673        gen_helper_vfp_cmped(vd, vm, cpu_env);
2674    } else {
2675        gen_helper_vfp_cmpd(vd, vm, cpu_env);
2676    }
2677
2678    tcg_temp_free_i64(vd);
2679    tcg_temp_free_i64(vm);
2680
2681    return true;
2682}
2683
2684static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
2685{
2686    TCGv_ptr fpst;
2687    TCGv_i32 ahp_mode;
2688    TCGv_i32 tmp;
2689
2690    if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2691        return false;
2692    }
2693
2694    if (!vfp_access_check(s)) {
2695        return true;
2696    }
2697
2698    fpst = fpstatus_ptr(FPST_FPCR);
2699    ahp_mode = get_ahp_flag();
2700    tmp = tcg_temp_new_i32();
2701    /* The T bit tells us if we want the low or high 16 bits of Vm */
2702    tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2703    gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
2704    vfp_store_reg32(tmp, a->vd);
2705    tcg_temp_free_i32(ahp_mode);
2706    tcg_temp_free_ptr(fpst);
2707    tcg_temp_free_i32(tmp);
2708    return true;
2709}
2710
2711static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
2712{
2713    TCGv_ptr fpst;
2714    TCGv_i32 ahp_mode;
2715    TCGv_i32 tmp;
2716    TCGv_i64 vd;
2717
2718    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2719        return false;
2720    }
2721
2722    if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2723        return false;
2724    }
2725
2726    /* UNDEF accesses to D16-D31 if they don't exist. */
2727    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd  & 0x10)) {
2728        return false;
2729    }
2730
2731    if (!vfp_access_check(s)) {
2732        return true;
2733    }
2734
2735    fpst = fpstatus_ptr(FPST_FPCR);
2736    ahp_mode = get_ahp_flag();
2737    tmp = tcg_temp_new_i32();
2738    /* The T bit tells us if we want the low or high 16 bits of Vm */
2739    tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2740    vd = tcg_temp_new_i64();
2741    gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
2742    vfp_store_reg64(vd, a->vd);
2743    tcg_temp_free_i32(ahp_mode);
2744    tcg_temp_free_ptr(fpst);
2745    tcg_temp_free_i32(tmp);
2746    tcg_temp_free_i64(vd);
2747    return true;
2748}
2749
2750static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a)
2751{
2752    TCGv_ptr fpst;
2753    TCGv_i32 tmp;
2754
2755    if (!dc_isar_feature(aa32_bf16, s)) {
2756        return false;
2757    }
2758
2759    if (!vfp_access_check(s)) {
2760        return true;
2761    }
2762
2763    fpst = fpstatus_ptr(FPST_FPCR);
2764    tmp = tcg_temp_new_i32();
2765
2766    vfp_load_reg32(tmp, a->vm);
2767    gen_helper_bfcvt(tmp, tmp, fpst);
2768    tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2769    tcg_temp_free_ptr(fpst);
2770    tcg_temp_free_i32(tmp);
2771    return true;
2772}
2773
2774static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
2775{
2776    TCGv_ptr fpst;
2777    TCGv_i32 ahp_mode;
2778    TCGv_i32 tmp;
2779
2780    if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2781        return false;
2782    }
2783
2784    if (!vfp_access_check(s)) {
2785        return true;
2786    }
2787
2788    fpst = fpstatus_ptr(FPST_FPCR);
2789    ahp_mode = get_ahp_flag();
2790    tmp = tcg_temp_new_i32();
2791
2792    vfp_load_reg32(tmp, a->vm);
2793    gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
2794    tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2795    tcg_temp_free_i32(ahp_mode);
2796    tcg_temp_free_ptr(fpst);
2797    tcg_temp_free_i32(tmp);
2798    return true;
2799}
2800
2801static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
2802{
2803    TCGv_ptr fpst;
2804    TCGv_i32 ahp_mode;
2805    TCGv_i32 tmp;
2806    TCGv_i64 vm;
2807
2808    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2809        return false;
2810    }
2811
2812    if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2813        return false;
2814    }
2815
2816    /* UNDEF accesses to D16-D31 if they don't exist. */
2817    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm  & 0x10)) {
2818        return false;
2819    }
2820
2821    if (!vfp_access_check(s)) {
2822        return true;
2823    }
2824
2825    fpst = fpstatus_ptr(FPST_FPCR);
2826    ahp_mode = get_ahp_flag();
2827    tmp = tcg_temp_new_i32();
2828    vm = tcg_temp_new_i64();
2829
2830    vfp_load_reg64(vm, a->vm);
2831    gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
2832    tcg_temp_free_i64(vm);
2833    tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2834    tcg_temp_free_i32(ahp_mode);
2835    tcg_temp_free_ptr(fpst);
2836    tcg_temp_free_i32(tmp);
2837    return true;
2838}
2839
2840static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
2841{
2842    TCGv_ptr fpst;
2843    TCGv_i32 tmp;
2844
2845    if (!dc_isar_feature(aa32_fp16_arith, s)) {
2846        return false;
2847    }
2848
2849    if (!vfp_access_check(s)) {
2850        return true;
2851    }
2852
2853    tmp = tcg_temp_new_i32();
2854    vfp_load_reg32(tmp, a->vm);
2855    fpst = fpstatus_ptr(FPST_FPCR_F16);
2856    gen_helper_rinth(tmp, tmp, fpst);
2857    vfp_store_reg32(tmp, a->vd);
2858    tcg_temp_free_ptr(fpst);
2859    tcg_temp_free_i32(tmp);
2860    return true;
2861}
2862
2863static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
2864{
2865    TCGv_ptr fpst;
2866    TCGv_i32 tmp;
2867
2868    if (!dc_isar_feature(aa32_vrint, s)) {
2869        return false;
2870    }
2871
2872    if (!vfp_access_check(s)) {
2873        return true;
2874    }
2875
2876    tmp = tcg_temp_new_i32();
2877    vfp_load_reg32(tmp, a->vm);
2878    fpst = fpstatus_ptr(FPST_FPCR);
2879    gen_helper_rints(tmp, tmp, fpst);
2880    vfp_store_reg32(tmp, a->vd);
2881    tcg_temp_free_ptr(fpst);
2882    tcg_temp_free_i32(tmp);
2883    return true;
2884}
2885
2886static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
2887{
2888    TCGv_ptr fpst;
2889    TCGv_i64 tmp;
2890
2891    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2892        return false;
2893    }
2894
2895    if (!dc_isar_feature(aa32_vrint, s)) {
2896        return false;
2897    }
2898
2899    /* UNDEF accesses to D16-D31 if they don't exist. */
2900    if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2901        return false;
2902    }
2903
2904    if (!vfp_access_check(s)) {
2905        return true;
2906    }
2907
2908    tmp = tcg_temp_new_i64();
2909    vfp_load_reg64(tmp, a->vm);
2910    fpst = fpstatus_ptr(FPST_FPCR);
2911    gen_helper_rintd(tmp, tmp, fpst);
2912    vfp_store_reg64(tmp, a->vd);
2913    tcg_temp_free_ptr(fpst);
2914    tcg_temp_free_i64(tmp);
2915    return true;
2916}
2917
2918static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
2919{
2920    TCGv_ptr fpst;
2921    TCGv_i32 tmp;
2922    TCGv_i32 tcg_rmode;
2923
2924    if (!dc_isar_feature(aa32_fp16_arith, s)) {
2925        return false;
2926    }
2927
2928    if (!vfp_access_check(s)) {
2929        return true;
2930    }
2931
2932    tmp = tcg_temp_new_i32();
2933    vfp_load_reg32(tmp, a->vm);
2934    fpst = fpstatus_ptr(FPST_FPCR_F16);
2935    tcg_rmode = tcg_const_i32(float_round_to_zero);
2936    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2937    gen_helper_rinth(tmp, tmp, fpst);
2938    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2939    vfp_store_reg32(tmp, a->vd);
2940    tcg_temp_free_ptr(fpst);
2941    tcg_temp_free_i32(tcg_rmode);
2942    tcg_temp_free_i32(tmp);
2943    return true;
2944}
2945
2946static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
2947{
2948    TCGv_ptr fpst;
2949    TCGv_i32 tmp;
2950    TCGv_i32 tcg_rmode;
2951
2952    if (!dc_isar_feature(aa32_vrint, s)) {
2953        return false;
2954    }
2955
2956    if (!vfp_access_check(s)) {
2957        return true;
2958    }
2959
2960    tmp = tcg_temp_new_i32();
2961    vfp_load_reg32(tmp, a->vm);
2962    fpst = fpstatus_ptr(FPST_FPCR);
2963    tcg_rmode = tcg_const_i32(float_round_to_zero);
2964    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2965    gen_helper_rints(tmp, tmp, fpst);
2966    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2967    vfp_store_reg32(tmp, a->vd);
2968    tcg_temp_free_ptr(fpst);
2969    tcg_temp_free_i32(tcg_rmode);
2970    tcg_temp_free_i32(tmp);
2971    return true;
2972}
2973
2974static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
2975{
2976    TCGv_ptr fpst;
2977    TCGv_i64 tmp;
2978    TCGv_i32 tcg_rmode;
2979
2980    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2981        return false;
2982    }
2983
2984    if (!dc_isar_feature(aa32_vrint, s)) {
2985        return false;
2986    }
2987
2988    /* UNDEF accesses to D16-D31 if they don't exist. */
2989    if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2990        return false;
2991    }
2992
2993    if (!vfp_access_check(s)) {
2994        return true;
2995    }
2996
2997    tmp = tcg_temp_new_i64();
2998    vfp_load_reg64(tmp, a->vm);
2999    fpst = fpstatus_ptr(FPST_FPCR);
3000    tcg_rmode = tcg_const_i32(float_round_to_zero);
3001    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3002    gen_helper_rintd(tmp, tmp, fpst);
3003    gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3004    vfp_store_reg64(tmp, a->vd);
3005    tcg_temp_free_ptr(fpst);
3006    tcg_temp_free_i64(tmp);
3007    tcg_temp_free_i32(tcg_rmode);
3008    return true;
3009}
3010
3011static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
3012{
3013    TCGv_ptr fpst;
3014    TCGv_i32 tmp;
3015
3016    if (!dc_isar_feature(aa32_fp16_arith, s)) {
3017        return false;
3018    }
3019
3020    if (!vfp_access_check(s)) {
3021        return true;
3022    }
3023
3024    tmp = tcg_temp_new_i32();
3025    vfp_load_reg32(tmp, a->vm);
3026    fpst = fpstatus_ptr(FPST_FPCR_F16);
3027    gen_helper_rinth_exact(tmp, tmp, fpst);
3028    vfp_store_reg32(tmp, a->vd);
3029    tcg_temp_free_ptr(fpst);
3030    tcg_temp_free_i32(tmp);
3031    return true;
3032}
3033
3034static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
3035{
3036    TCGv_ptr fpst;
3037    TCGv_i32 tmp;
3038
3039    if (!dc_isar_feature(aa32_vrint, s)) {
3040        return false;
3041    }
3042
3043    if (!vfp_access_check(s)) {
3044        return true;
3045    }
3046
3047    tmp = tcg_temp_new_i32();
3048    vfp_load_reg32(tmp, a->vm);
3049    fpst = fpstatus_ptr(FPST_FPCR);
3050    gen_helper_rints_exact(tmp, tmp, fpst);
3051    vfp_store_reg32(tmp, a->vd);
3052    tcg_temp_free_ptr(fpst);
3053    tcg_temp_free_i32(tmp);
3054    return true;
3055}
3056
3057static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
3058{
3059    TCGv_ptr fpst;
3060    TCGv_i64 tmp;
3061
3062    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3063        return false;
3064    }
3065
3066    if (!dc_isar_feature(aa32_vrint, s)) {
3067        return false;
3068    }
3069
3070    /* UNDEF accesses to D16-D31 if they don't exist. */
3071    if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
3072        return false;
3073    }
3074
3075    if (!vfp_access_check(s)) {
3076        return true;
3077    }
3078
3079    tmp = tcg_temp_new_i64();
3080    vfp_load_reg64(tmp, a->vm);
3081    fpst = fpstatus_ptr(FPST_FPCR);
3082    gen_helper_rintd_exact(tmp, tmp, fpst);
3083    vfp_store_reg64(tmp, a->vd);
3084    tcg_temp_free_ptr(fpst);
3085    tcg_temp_free_i64(tmp);
3086    return true;
3087}
3088
3089static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
3090{
3091    TCGv_i64 vd;
3092    TCGv_i32 vm;
3093
3094    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3095        return false;
3096    }
3097
3098    /* UNDEF accesses to D16-D31 if they don't exist. */
3099    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3100        return false;
3101    }
3102
3103    if (!vfp_access_check(s)) {
3104        return true;
3105    }
3106
3107    vm = tcg_temp_new_i32();
3108    vd = tcg_temp_new_i64();
3109    vfp_load_reg32(vm, a->vm);
3110    gen_helper_vfp_fcvtds(vd, vm, cpu_env);
3111    vfp_store_reg64(vd, a->vd);
3112    tcg_temp_free_i32(vm);
3113    tcg_temp_free_i64(vd);
3114    return true;
3115}
3116
3117static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
3118{
3119    TCGv_i64 vm;
3120    TCGv_i32 vd;
3121
3122    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3123        return false;
3124    }
3125
3126    /* UNDEF accesses to D16-D31 if they don't exist. */
3127    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3128        return false;
3129    }
3130
3131    if (!vfp_access_check(s)) {
3132        return true;
3133    }
3134
3135    vd = tcg_temp_new_i32();
3136    vm = tcg_temp_new_i64();
3137    vfp_load_reg64(vm, a->vm);
3138    gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
3139    vfp_store_reg32(vd, a->vd);
3140    tcg_temp_free_i32(vd);
3141    tcg_temp_free_i64(vm);
3142    return true;
3143}
3144
3145static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
3146{
3147    TCGv_i32 vm;
3148    TCGv_ptr fpst;
3149
3150    if (!dc_isar_feature(aa32_fp16_arith, s)) {
3151        return false;
3152    }
3153
3154    if (!vfp_access_check(s)) {
3155        return true;
3156    }
3157
3158    vm = tcg_temp_new_i32();
3159    vfp_load_reg32(vm, a->vm);
3160    fpst = fpstatus_ptr(FPST_FPCR_F16);
3161    if (a->s) {
3162        /* i32 -> f16 */
3163        gen_helper_vfp_sitoh(vm, vm, fpst);
3164    } else {
3165        /* u32 -> f16 */
3166        gen_helper_vfp_uitoh(vm, vm, fpst);
3167    }
3168    vfp_store_reg32(vm, a->vd);
3169    tcg_temp_free_i32(vm);
3170    tcg_temp_free_ptr(fpst);
3171    return true;
3172}
3173
3174static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
3175{
3176    TCGv_i32 vm;
3177    TCGv_ptr fpst;
3178
3179    if (!dc_isar_feature(aa32_fpsp_v2, s)) {
3180        return false;
3181    }
3182
3183    if (!vfp_access_check(s)) {
3184        return true;
3185    }
3186
3187    vm = tcg_temp_new_i32();
3188    vfp_load_reg32(vm, a->vm);
3189    fpst = fpstatus_ptr(FPST_FPCR);
3190    if (a->s) {
3191        /* i32 -> f32 */
3192        gen_helper_vfp_sitos(vm, vm, fpst);
3193    } else {
3194        /* u32 -> f32 */
3195        gen_helper_vfp_uitos(vm, vm, fpst);
3196    }
3197    vfp_store_reg32(vm, a->vd);
3198    tcg_temp_free_i32(vm);
3199    tcg_temp_free_ptr(fpst);
3200    return true;
3201}
3202
3203static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
3204{
3205    TCGv_i32 vm;
3206    TCGv_i64 vd;
3207    TCGv_ptr fpst;
3208
3209    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3210        return false;
3211    }
3212
3213    /* UNDEF accesses to D16-D31 if they don't exist. */
3214    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3215        return false;
3216    }
3217
3218    if (!vfp_access_check(s)) {
3219        return true;
3220    }
3221
3222    vm = tcg_temp_new_i32();
3223    vd = tcg_temp_new_i64();
3224    vfp_load_reg32(vm, a->vm);
3225    fpst = fpstatus_ptr(FPST_FPCR);
3226    if (a->s) {
3227        /* i32 -> f64 */
3228        gen_helper_vfp_sitod(vd, vm, fpst);
3229    } else {
3230        /* u32 -> f64 */
3231        gen_helper_vfp_uitod(vd, vm, fpst);
3232    }
3233    vfp_store_reg64(vd, a->vd);
3234    tcg_temp_free_i32(vm);
3235    tcg_temp_free_i64(vd);
3236    tcg_temp_free_ptr(fpst);
3237    return true;
3238}
3239
3240static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
3241{
3242    TCGv_i32 vd;
3243    TCGv_i64 vm;
3244
3245    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3246        return false;
3247    }
3248
3249    if (!dc_isar_feature(aa32_jscvt, s)) {
3250        return false;
3251    }
3252
3253    /* UNDEF accesses to D16-D31 if they don't exist. */
3254    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3255        return false;
3256    }
3257
3258    if (!vfp_access_check(s)) {
3259        return true;
3260    }
3261
3262    vm = tcg_temp_new_i64();
3263    vd = tcg_temp_new_i32();
3264    vfp_load_reg64(vm, a->vm);
3265    gen_helper_vjcvt(vd, vm, cpu_env);
3266    vfp_store_reg32(vd, a->vd);
3267    tcg_temp_free_i64(vm);
3268    tcg_temp_free_i32(vd);
3269    return true;
3270}
3271
3272static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
3273{
3274    TCGv_i32 vd, shift;
3275    TCGv_ptr fpst;
3276    int frac_bits;
3277
3278    if (!dc_isar_feature(aa32_fp16_arith, s)) {
3279        return false;
3280    }
3281
3282    if (!vfp_access_check(s)) {
3283        return true;
3284    }
3285
3286    frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3287
3288    vd = tcg_temp_new_i32();
3289    vfp_load_reg32(vd, a->vd);
3290
3291    fpst = fpstatus_ptr(FPST_FPCR_F16);
3292    shift = tcg_constant_i32(frac_bits);
3293
3294    /* Switch on op:U:sx bits */
3295    switch (a->opc) {
3296    case 0:
3297        gen_helper_vfp_shtoh_round_to_nearest(vd, vd, shift, fpst);
3298        break;
3299    case 1:
3300        gen_helper_vfp_sltoh_round_to_nearest(vd, vd, shift, fpst);
3301        break;
3302    case 2:
3303        gen_helper_vfp_uhtoh_round_to_nearest(vd, vd, shift, fpst);
3304        break;
3305    case 3:
3306        gen_helper_vfp_ultoh_round_to_nearest(vd, vd, shift, fpst);
3307        break;
3308    case 4:
3309        gen_helper_vfp_toshh_round_to_zero(vd, vd, shift, fpst);
3310        break;
3311    case 5:
3312        gen_helper_vfp_toslh_round_to_zero(vd, vd, shift, fpst);
3313        break;
3314    case 6:
3315        gen_helper_vfp_touhh_round_to_zero(vd, vd, shift, fpst);
3316        break;
3317    case 7:
3318        gen_helper_vfp_toulh_round_to_zero(vd, vd, shift, fpst);
3319        break;
3320    default:
3321        g_assert_not_reached();
3322    }
3323
3324    vfp_store_reg32(vd, a->vd);
3325    tcg_temp_free_i32(vd);
3326    tcg_temp_free_ptr(fpst);
3327    return true;
3328}
3329
3330static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
3331{
3332    TCGv_i32 vd, shift;
3333    TCGv_ptr fpst;
3334    int frac_bits;
3335
3336    if (!dc_isar_feature(aa32_fpsp_v3, s)) {
3337        return false;
3338    }
3339
3340    if (!vfp_access_check(s)) {
3341        return true;
3342    }
3343
3344    frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3345
3346    vd = tcg_temp_new_i32();
3347    vfp_load_reg32(vd, a->vd);
3348
3349    fpst = fpstatus_ptr(FPST_FPCR);
3350    shift = tcg_constant_i32(frac_bits);
3351
3352    /* Switch on op:U:sx bits */
3353    switch (a->opc) {
3354    case 0:
3355        gen_helper_vfp_shtos_round_to_nearest(vd, vd, shift, fpst);
3356        break;
3357    case 1:
3358        gen_helper_vfp_sltos_round_to_nearest(vd, vd, shift, fpst);
3359        break;
3360    case 2:
3361        gen_helper_vfp_uhtos_round_to_nearest(vd, vd, shift, fpst);
3362        break;
3363    case 3:
3364        gen_helper_vfp_ultos_round_to_nearest(vd, vd, shift, fpst);
3365        break;
3366    case 4:
3367        gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
3368        break;
3369    case 5:
3370        gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
3371        break;
3372    case 6:
3373        gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
3374        break;
3375    case 7:
3376        gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
3377        break;
3378    default:
3379        g_assert_not_reached();
3380    }
3381
3382    vfp_store_reg32(vd, a->vd);
3383    tcg_temp_free_i32(vd);
3384    tcg_temp_free_ptr(fpst);
3385    return true;
3386}
3387
3388static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
3389{
3390    TCGv_i64 vd;
3391    TCGv_i32 shift;
3392    TCGv_ptr fpst;
3393    int frac_bits;
3394
3395    if (!dc_isar_feature(aa32_fpdp_v3, s)) {
3396        return false;
3397    }
3398
3399    /* UNDEF accesses to D16-D31 if they don't exist. */
3400    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3401        return false;
3402    }
3403
3404    if (!vfp_access_check(s)) {
3405        return true;
3406    }
3407
3408    frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3409
3410    vd = tcg_temp_new_i64();
3411    vfp_load_reg64(vd, a->vd);
3412
3413    fpst = fpstatus_ptr(FPST_FPCR);
3414    shift = tcg_constant_i32(frac_bits);
3415
3416    /* Switch on op:U:sx bits */
3417    switch (a->opc) {
3418    case 0:
3419        gen_helper_vfp_shtod_round_to_nearest(vd, vd, shift, fpst);
3420        break;
3421    case 1:
3422        gen_helper_vfp_sltod_round_to_nearest(vd, vd, shift, fpst);
3423        break;
3424    case 2:
3425        gen_helper_vfp_uhtod_round_to_nearest(vd, vd, shift, fpst);
3426        break;
3427    case 3:
3428        gen_helper_vfp_ultod_round_to_nearest(vd, vd, shift, fpst);
3429        break;
3430    case 4:
3431        gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
3432        break;
3433    case 5:
3434        gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
3435        break;
3436    case 6:
3437        gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
3438        break;
3439    case 7:
3440        gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
3441        break;
3442    default:
3443        g_assert_not_reached();
3444    }
3445
3446    vfp_store_reg64(vd, a->vd);
3447    tcg_temp_free_i64(vd);
3448    tcg_temp_free_ptr(fpst);
3449    return true;
3450}
3451
3452static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
3453{
3454    TCGv_i32 vm;
3455    TCGv_ptr fpst;
3456
3457    if (!dc_isar_feature(aa32_fp16_arith, s)) {
3458        return false;
3459    }
3460
3461    if (!vfp_access_check(s)) {
3462        return true;
3463    }
3464
3465    fpst = fpstatus_ptr(FPST_FPCR_F16);
3466    vm = tcg_temp_new_i32();
3467    vfp_load_reg32(vm, a->vm);
3468
3469    if (a->s) {
3470        if (a->rz) {
3471            gen_helper_vfp_tosizh(vm, vm, fpst);
3472        } else {
3473            gen_helper_vfp_tosih(vm, vm, fpst);
3474        }
3475    } else {
3476        if (a->rz) {
3477            gen_helper_vfp_touizh(vm, vm, fpst);
3478        } else {
3479            gen_helper_vfp_touih(vm, vm, fpst);
3480        }
3481    }
3482    vfp_store_reg32(vm, a->vd);
3483    tcg_temp_free_i32(vm);
3484    tcg_temp_free_ptr(fpst);
3485    return true;
3486}
3487
3488static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
3489{
3490    TCGv_i32 vm;
3491    TCGv_ptr fpst;
3492
3493    if (!dc_isar_feature(aa32_fpsp_v2, s)) {
3494        return false;
3495    }
3496
3497    if (!vfp_access_check(s)) {
3498        return true;
3499    }
3500
3501    fpst = fpstatus_ptr(FPST_FPCR);
3502    vm = tcg_temp_new_i32();
3503    vfp_load_reg32(vm, a->vm);
3504
3505    if (a->s) {
3506        if (a->rz) {
3507            gen_helper_vfp_tosizs(vm, vm, fpst);
3508        } else {
3509            gen_helper_vfp_tosis(vm, vm, fpst);
3510        }
3511    } else {
3512        if (a->rz) {
3513            gen_helper_vfp_touizs(vm, vm, fpst);
3514        } else {
3515            gen_helper_vfp_touis(vm, vm, fpst);
3516        }
3517    }
3518    vfp_store_reg32(vm, a->vd);
3519    tcg_temp_free_i32(vm);
3520    tcg_temp_free_ptr(fpst);
3521    return true;
3522}
3523
3524static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
3525{
3526    TCGv_i32 vd;
3527    TCGv_i64 vm;
3528    TCGv_ptr fpst;
3529
3530    if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3531        return false;
3532    }
3533
3534    /* UNDEF accesses to D16-D31 if they don't exist. */
3535    if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3536        return false;
3537    }
3538
3539    if (!vfp_access_check(s)) {
3540        return true;
3541    }
3542
3543    fpst = fpstatus_ptr(FPST_FPCR);
3544    vm = tcg_temp_new_i64();
3545    vd = tcg_temp_new_i32();
3546    vfp_load_reg64(vm, a->vm);
3547
3548    if (a->s) {
3549        if (a->rz) {
3550            gen_helper_vfp_tosizd(vd, vm, fpst);
3551        } else {
3552            gen_helper_vfp_tosid(vd, vm, fpst);
3553        }
3554    } else {
3555        if (a->rz) {
3556            gen_helper_vfp_touizd(vd, vm, fpst);
3557        } else {
3558            gen_helper_vfp_touid(vd, vm, fpst);
3559        }
3560    }
3561    vfp_store_reg32(vd, a->vd);
3562    tcg_temp_free_i32(vd);
3563    tcg_temp_free_i64(vm);
3564    tcg_temp_free_ptr(fpst);
3565    return true;
3566}
3567
3568static bool trans_VINS(DisasContext *s, arg_VINS *a)
3569{
3570    TCGv_i32 rd, rm;
3571
3572    if (!dc_isar_feature(aa32_fp16_arith, s)) {
3573        return false;
3574    }
3575
3576    if (s->vec_len != 0 || s->vec_stride != 0) {
3577        return false;
3578    }
3579
3580    if (!vfp_access_check(s)) {
3581        return true;
3582    }
3583
3584    /* Insert low half of Vm into high half of Vd */
3585    rm = tcg_temp_new_i32();
3586    rd = tcg_temp_new_i32();
3587    vfp_load_reg32(rm, a->vm);
3588    vfp_load_reg32(rd, a->vd);
3589    tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
3590    vfp_store_reg32(rd, a->vd);
3591    tcg_temp_free_i32(rm);
3592    tcg_temp_free_i32(rd);
3593    return true;
3594}
3595
3596static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
3597{
3598    TCGv_i32 rm;
3599
3600    if (!dc_isar_feature(aa32_fp16_arith, s)) {
3601        return false;
3602    }
3603
3604    if (s->vec_len != 0 || s->vec_stride != 0) {
3605        return false;
3606    }
3607
3608    if (!vfp_access_check(s)) {
3609        return true;
3610    }
3611
3612    /* Set Vd to high half of Vm */
3613    rm = tcg_temp_new_i32();
3614    vfp_load_reg32(rm, a->vm);
3615    tcg_gen_shri_i32(rm, rm, 16);
3616    vfp_store_reg32(rm, a->vd);
3617    tcg_temp_free_i32(rm);
3618    return true;
3619}
3620