qemu/target/arm/translate-a64.c
<<
>>
Prefs
   1/*
   2 *  AArch64 translation
   3 *
   4 *  Copyright (c) 2013 Alexander Graf <agraf@suse.de>
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "tcg/tcg-op.h"
  24#include "tcg/tcg-op-gvec.h"
  25#include "qemu/log.h"
  26#include "arm_ldst.h"
  27#include "translate.h"
  28#include "internals.h"
  29#include "qemu/host-utils.h"
  30#include "semihosting/semihost.h"
  31#include "exec/gen-icount.h"
  32#include "exec/helper-proto.h"
  33#include "exec/helper-gen.h"
  34#include "exec/log.h"
  35#include "cpregs.h"
  36#include "translate-a64.h"
  37#include "qemu/atomic128.h"
  38
  39static TCGv_i64 cpu_X[32];
  40static TCGv_i64 cpu_pc;
  41
  42/* Load/store exclusive handling */
  43static TCGv_i64 cpu_exclusive_high;
  44
  45static const char *regnames[] = {
  46    "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
  47    "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
  48    "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
  49    "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
  50};
  51
  52enum a64_shift_type {
  53    A64_SHIFT_TYPE_LSL = 0,
  54    A64_SHIFT_TYPE_LSR = 1,
  55    A64_SHIFT_TYPE_ASR = 2,
  56    A64_SHIFT_TYPE_ROR = 3
  57};
  58
  59/* Table based decoder typedefs - used when the relevant bits for decode
  60 * are too awkwardly scattered across the instruction (eg SIMD).
  61 */
  62typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
  63
  64typedef struct AArch64DecodeTable {
  65    uint32_t pattern;
  66    uint32_t mask;
  67    AArch64DecodeFn *disas_fn;
  68} AArch64DecodeTable;
  69
  70/* initialize TCG globals.  */
  71void a64_translate_init(void)
  72{
  73    int i;
  74
  75    cpu_pc = tcg_global_mem_new_i64(cpu_env,
  76                                    offsetof(CPUARMState, pc),
  77                                    "pc");
  78    for (i = 0; i < 32; i++) {
  79        cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
  80                                          offsetof(CPUARMState, xregs[i]),
  81                                          regnames[i]);
  82    }
  83
  84    cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
  85        offsetof(CPUARMState, exclusive_high), "exclusive_high");
  86}
  87
  88/*
  89 * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
  90 */
  91static int get_a64_user_mem_index(DisasContext *s)
  92{
  93    /*
  94     * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
  95     * which is the usual mmu_idx for this cpu state.
  96     */
  97    ARMMMUIdx useridx = s->mmu_idx;
  98
  99    if (s->unpriv) {
 100        /*
 101         * We have pre-computed the condition for AccType_UNPRIV.
 102         * Therefore we should never get here with a mmu_idx for
 103         * which we do not know the corresponding user mmu_idx.
 104         */
 105        switch (useridx) {
 106        case ARMMMUIdx_E10_1:
 107        case ARMMMUIdx_E10_1_PAN:
 108            useridx = ARMMMUIdx_E10_0;
 109            break;
 110        case ARMMMUIdx_E20_2:
 111        case ARMMMUIdx_E20_2_PAN:
 112            useridx = ARMMMUIdx_E20_0;
 113            break;
 114        case ARMMMUIdx_SE10_1:
 115        case ARMMMUIdx_SE10_1_PAN:
 116            useridx = ARMMMUIdx_SE10_0;
 117            break;
 118        case ARMMMUIdx_SE20_2:
 119        case ARMMMUIdx_SE20_2_PAN:
 120            useridx = ARMMMUIdx_SE20_0;
 121            break;
 122        default:
 123            g_assert_not_reached();
 124        }
 125    }
 126    return arm_to_core_mmu_idx(useridx);
 127}
 128
 129static void set_btype_raw(int val)
 130{
 131    tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
 132                   offsetof(CPUARMState, btype));
 133}
 134
 135static void set_btype(DisasContext *s, int val)
 136{
 137    /* BTYPE is a 2-bit field, and 0 should be done with reset_btype.  */
 138    tcg_debug_assert(val >= 1 && val <= 3);
 139    set_btype_raw(val);
 140    s->btype = -1;
 141}
 142
 143static void reset_btype(DisasContext *s)
 144{
 145    if (s->btype != 0) {
 146        set_btype_raw(0);
 147        s->btype = 0;
 148    }
 149}
 150
 151void gen_a64_set_pc_im(uint64_t val)
 152{
 153    tcg_gen_movi_i64(cpu_pc, val);
 154}
 155
 156/*
 157 * Handle Top Byte Ignore (TBI) bits.
 158 *
 159 * If address tagging is enabled via the TCR TBI bits:
 160 *  + for EL2 and EL3 there is only one TBI bit, and if it is set
 161 *    then the address is zero-extended, clearing bits [63:56]
 162 *  + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
 163 *    and TBI1 controls addressses with bit 55 == 1.
 164 *    If the appropriate TBI bit is set for the address then
 165 *    the address is sign-extended from bit 55 into bits [63:56]
 166 *
 167 * Here We have concatenated TBI{1,0} into tbi.
 168 */
 169static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
 170                                TCGv_i64 src, int tbi)
 171{
 172    if (tbi == 0) {
 173        /* Load unmodified address */
 174        tcg_gen_mov_i64(dst, src);
 175    } else if (!regime_has_2_ranges(s->mmu_idx)) {
 176        /* Force tag byte to all zero */
 177        tcg_gen_extract_i64(dst, src, 0, 56);
 178    } else {
 179        /* Sign-extend from bit 55.  */
 180        tcg_gen_sextract_i64(dst, src, 0, 56);
 181
 182        switch (tbi) {
 183        case 1:
 184            /* tbi0 but !tbi1: only use the extension if positive */
 185            tcg_gen_and_i64(dst, dst, src);
 186            break;
 187        case 2:
 188            /* !tbi0 but tbi1: only use the extension if negative */
 189            tcg_gen_or_i64(dst, dst, src);
 190            break;
 191        case 3:
 192            /* tbi0 and tbi1: always use the extension */
 193            break;
 194        default:
 195            g_assert_not_reached();
 196        }
 197    }
 198}
 199
 200static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
 201{
 202    /*
 203     * If address tagging is enabled for instructions via the TCR TBI bits,
 204     * then loading an address into the PC will clear out any tag.
 205     */
 206    gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
 207}
 208
 209/*
 210 * Handle MTE and/or TBI.
 211 *
 212 * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
 213 * for the tag to be present in the FAR_ELx register.  But for user-only
 214 * mode we do not have a TLB with which to implement this, so we must
 215 * remove the top byte now.
 216 *
 217 * Always return a fresh temporary that we can increment independently
 218 * of the write-back address.
 219 */
 220
 221TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
 222{
 223    TCGv_i64 clean = new_tmp_a64(s);
 224#ifdef CONFIG_USER_ONLY
 225    gen_top_byte_ignore(s, clean, addr, s->tbid);
 226#else
 227    tcg_gen_mov_i64(clean, addr);
 228#endif
 229    return clean;
 230}
 231
 232/* Insert a zero tag into src, with the result at dst. */
 233static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
 234{
 235    tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
 236}
 237
 238static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
 239                             MMUAccessType acc, int log2_size)
 240{
 241    gen_helper_probe_access(cpu_env, ptr,
 242                            tcg_constant_i32(acc),
 243                            tcg_constant_i32(get_mem_index(s)),
 244                            tcg_constant_i32(1 << log2_size));
 245}
 246
 247/*
 248 * For MTE, check a single logical or atomic access.  This probes a single
 249 * address, the exact one specified.  The size and alignment of the access
 250 * is not relevant to MTE, per se, but watchpoints do require the size,
 251 * and we want to recognize those before making any other changes to state.
 252 */
 253static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
 254                                      bool is_write, bool tag_checked,
 255                                      int log2_size, bool is_unpriv,
 256                                      int core_idx)
 257{
 258    if (tag_checked && s->mte_active[is_unpriv]) {
 259        TCGv_i64 ret;
 260        int desc = 0;
 261
 262        desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
 263        desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
 264        desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
 265        desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
 266        desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
 267
 268        ret = new_tmp_a64(s);
 269        gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
 270
 271        return ret;
 272    }
 273    return clean_data_tbi(s, addr);
 274}
 275
 276TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
 277                        bool tag_checked, int log2_size)
 278{
 279    return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
 280                                 false, get_mem_index(s));
 281}
 282
 283/*
 284 * For MTE, check multiple logical sequential accesses.
 285 */
 286TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
 287                        bool tag_checked, int size)
 288{
 289    if (tag_checked && s->mte_active[0]) {
 290        TCGv_i64 ret;
 291        int desc = 0;
 292
 293        desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
 294        desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
 295        desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
 296        desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
 297        desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
 298
 299        ret = new_tmp_a64(s);
 300        gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
 301
 302        return ret;
 303    }
 304    return clean_data_tbi(s, addr);
 305}
 306
 307typedef struct DisasCompare64 {
 308    TCGCond cond;
 309    TCGv_i64 value;
 310} DisasCompare64;
 311
 312static void a64_test_cc(DisasCompare64 *c64, int cc)
 313{
 314    DisasCompare c32;
 315
 316    arm_test_cc(&c32, cc);
 317
 318    /* Sign-extend the 32-bit value so that the GE/LT comparisons work
 319       * properly.  The NE/EQ comparisons are also fine with this choice.  */
 320    c64->cond = c32.cond;
 321    c64->value = tcg_temp_new_i64();
 322    tcg_gen_ext_i32_i64(c64->value, c32.value);
 323
 324    arm_free_cc(&c32);
 325}
 326
 327static void a64_free_cc(DisasCompare64 *c64)
 328{
 329    tcg_temp_free_i64(c64->value);
 330}
 331
 332static void gen_rebuild_hflags(DisasContext *s)
 333{
 334    gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
 335}
 336
 337static void gen_exception_internal(int excp)
 338{
 339    assert(excp_is_internal(excp));
 340    gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
 341}
 342
 343static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
 344{
 345    gen_a64_set_pc_im(pc);
 346    gen_exception_internal(excp);
 347    s->base.is_jmp = DISAS_NORETURN;
 348}
 349
 350static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
 351{
 352    gen_a64_set_pc_im(s->pc_curr);
 353    gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
 354    s->base.is_jmp = DISAS_NORETURN;
 355}
 356
 357static void gen_step_complete_exception(DisasContext *s)
 358{
 359    /* We just completed step of an insn. Move from Active-not-pending
 360     * to Active-pending, and then also take the swstep exception.
 361     * This corresponds to making the (IMPDEF) choice to prioritize
 362     * swstep exceptions over asynchronous exceptions taken to an exception
 363     * level where debug is disabled. This choice has the advantage that
 364     * we do not need to maintain internal state corresponding to the
 365     * ISV/EX syndrome bits between completion of the step and generation
 366     * of the exception, and our syndrome information is always correct.
 367     */
 368    gen_ss_advance(s);
 369    gen_swstep_exception(s, 1, s->is_ldex);
 370    s->base.is_jmp = DISAS_NORETURN;
 371}
 372
 373static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
 374{
 375    if (s->ss_active) {
 376        return false;
 377    }
 378    return translator_use_goto_tb(&s->base, dest);
 379}
 380
 381static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
 382{
 383    if (use_goto_tb(s, dest)) {
 384        tcg_gen_goto_tb(n);
 385        gen_a64_set_pc_im(dest);
 386        tcg_gen_exit_tb(s->base.tb, n);
 387        s->base.is_jmp = DISAS_NORETURN;
 388    } else {
 389        gen_a64_set_pc_im(dest);
 390        if (s->ss_active) {
 391            gen_step_complete_exception(s);
 392        } else {
 393            tcg_gen_lookup_and_goto_ptr();
 394            s->base.is_jmp = DISAS_NORETURN;
 395        }
 396    }
 397}
 398
 399static void init_tmp_a64_array(DisasContext *s)
 400{
 401#ifdef CONFIG_DEBUG_TCG
 402    memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
 403#endif
 404    s->tmp_a64_count = 0;
 405}
 406
 407static void free_tmp_a64(DisasContext *s)
 408{
 409    int i;
 410    for (i = 0; i < s->tmp_a64_count; i++) {
 411        tcg_temp_free_i64(s->tmp_a64[i]);
 412    }
 413    init_tmp_a64_array(s);
 414}
 415
 416TCGv_i64 new_tmp_a64(DisasContext *s)
 417{
 418    assert(s->tmp_a64_count < TMP_A64_MAX);
 419    return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
 420}
 421
 422TCGv_i64 new_tmp_a64_local(DisasContext *s)
 423{
 424    assert(s->tmp_a64_count < TMP_A64_MAX);
 425    return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
 426}
 427
 428TCGv_i64 new_tmp_a64_zero(DisasContext *s)
 429{
 430    TCGv_i64 t = new_tmp_a64(s);
 431    tcg_gen_movi_i64(t, 0);
 432    return t;
 433}
 434
 435/*
 436 * Register access functions
 437 *
 438 * These functions are used for directly accessing a register in where
 439 * changes to the final register value are likely to be made. If you
 440 * need to use a register for temporary calculation (e.g. index type
 441 * operations) use the read_* form.
 442 *
 443 * B1.2.1 Register mappings
 444 *
 445 * In instruction register encoding 31 can refer to ZR (zero register) or
 446 * the SP (stack pointer) depending on context. In QEMU's case we map SP
 447 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
 448 * This is the point of the _sp forms.
 449 */
 450TCGv_i64 cpu_reg(DisasContext *s, int reg)
 451{
 452    if (reg == 31) {
 453        return new_tmp_a64_zero(s);
 454    } else {
 455        return cpu_X[reg];
 456    }
 457}
 458
 459/* register access for when 31 == SP */
 460TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
 461{
 462    return cpu_X[reg];
 463}
 464
 465/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
 466 * representing the register contents. This TCGv is an auto-freed
 467 * temporary so it need not be explicitly freed, and may be modified.
 468 */
 469TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
 470{
 471    TCGv_i64 v = new_tmp_a64(s);
 472    if (reg != 31) {
 473        if (sf) {
 474            tcg_gen_mov_i64(v, cpu_X[reg]);
 475        } else {
 476            tcg_gen_ext32u_i64(v, cpu_X[reg]);
 477        }
 478    } else {
 479        tcg_gen_movi_i64(v, 0);
 480    }
 481    return v;
 482}
 483
 484TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
 485{
 486    TCGv_i64 v = new_tmp_a64(s);
 487    if (sf) {
 488        tcg_gen_mov_i64(v, cpu_X[reg]);
 489    } else {
 490        tcg_gen_ext32u_i64(v, cpu_X[reg]);
 491    }
 492    return v;
 493}
 494
 495/* Return the offset into CPUARMState of a slice (from
 496 * the least significant end) of FP register Qn (ie
 497 * Dn, Sn, Hn or Bn).
 498 * (Note that this is not the same mapping as for A32; see cpu.h)
 499 */
 500static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
 501{
 502    return vec_reg_offset(s, regno, 0, size);
 503}
 504
 505/* Offset of the high half of the 128 bit vector Qn */
 506static inline int fp_reg_hi_offset(DisasContext *s, int regno)
 507{
 508    return vec_reg_offset(s, regno, 1, MO_64);
 509}
 510
 511/* Convenience accessors for reading and writing single and double
 512 * FP registers. Writing clears the upper parts of the associated
 513 * 128 bit vector register, as required by the architecture.
 514 * Note that unlike the GP register accessors, the values returned
 515 * by the read functions must be manually freed.
 516 */
 517static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
 518{
 519    TCGv_i64 v = tcg_temp_new_i64();
 520
 521    tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
 522    return v;
 523}
 524
 525static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
 526{
 527    TCGv_i32 v = tcg_temp_new_i32();
 528
 529    tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
 530    return v;
 531}
 532
 533static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
 534{
 535    TCGv_i32 v = tcg_temp_new_i32();
 536
 537    tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
 538    return v;
 539}
 540
 541/* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
 542 * If SVE is not enabled, then there are only 128 bits in the vector.
 543 */
 544static void clear_vec_high(DisasContext *s, bool is_q, int rd)
 545{
 546    unsigned ofs = fp_reg_offset(s, rd, MO_64);
 547    unsigned vsz = vec_full_reg_size(s);
 548
 549    /* Nop move, with side effect of clearing the tail. */
 550    tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
 551}
 552
 553void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
 554{
 555    unsigned ofs = fp_reg_offset(s, reg, MO_64);
 556
 557    tcg_gen_st_i64(v, cpu_env, ofs);
 558    clear_vec_high(s, false, reg);
 559}
 560
 561static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
 562{
 563    TCGv_i64 tmp = tcg_temp_new_i64();
 564
 565    tcg_gen_extu_i32_i64(tmp, v);
 566    write_fp_dreg(s, reg, tmp);
 567    tcg_temp_free_i64(tmp);
 568}
 569
 570/* Expand a 2-operand AdvSIMD vector operation using an expander function.  */
 571static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
 572                         GVecGen2Fn *gvec_fn, int vece)
 573{
 574    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 575            is_q ? 16 : 8, vec_full_reg_size(s));
 576}
 577
 578/* Expand a 2-operand + immediate AdvSIMD vector operation using
 579 * an expander function.
 580 */
 581static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
 582                          int64_t imm, GVecGen2iFn *gvec_fn, int vece)
 583{
 584    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 585            imm, is_q ? 16 : 8, vec_full_reg_size(s));
 586}
 587
 588/* Expand a 3-operand AdvSIMD vector operation using an expander function.  */
 589static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
 590                         GVecGen3Fn *gvec_fn, int vece)
 591{
 592    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 593            vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
 594}
 595
 596/* Expand a 4-operand AdvSIMD vector operation using an expander function.  */
 597static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
 598                         int rx, GVecGen4Fn *gvec_fn, int vece)
 599{
 600    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 601            vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
 602            is_q ? 16 : 8, vec_full_reg_size(s));
 603}
 604
 605/* Expand a 2-operand operation using an out-of-line helper.  */
 606static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
 607                             int rn, int data, gen_helper_gvec_2 *fn)
 608{
 609    tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
 610                       vec_full_reg_offset(s, rn),
 611                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 612}
 613
 614/* Expand a 3-operand operation using an out-of-line helper.  */
 615static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
 616                             int rn, int rm, int data, gen_helper_gvec_3 *fn)
 617{
 618    tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
 619                       vec_full_reg_offset(s, rn),
 620                       vec_full_reg_offset(s, rm),
 621                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 622}
 623
 624/* Expand a 3-operand + fpstatus pointer + simd data value operation using
 625 * an out-of-line helper.
 626 */
 627static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
 628                              int rm, bool is_fp16, int data,
 629                              gen_helper_gvec_3_ptr *fn)
 630{
 631    TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
 632    tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
 633                       vec_full_reg_offset(s, rn),
 634                       vec_full_reg_offset(s, rm), fpst,
 635                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 636    tcg_temp_free_ptr(fpst);
 637}
 638
 639/* Expand a 3-operand + qc + operation using an out-of-line helper.  */
 640static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
 641                            int rm, gen_helper_gvec_3_ptr *fn)
 642{
 643    TCGv_ptr qc_ptr = tcg_temp_new_ptr();
 644
 645    tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
 646    tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
 647                       vec_full_reg_offset(s, rn),
 648                       vec_full_reg_offset(s, rm), qc_ptr,
 649                       is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
 650    tcg_temp_free_ptr(qc_ptr);
 651}
 652
 653/* Expand a 4-operand operation using an out-of-line helper.  */
 654static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
 655                             int rm, int ra, int data, gen_helper_gvec_4 *fn)
 656{
 657    tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
 658                       vec_full_reg_offset(s, rn),
 659                       vec_full_reg_offset(s, rm),
 660                       vec_full_reg_offset(s, ra),
 661                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 662}
 663
 664/*
 665 * Expand a 4-operand + fpstatus pointer + simd data value operation using
 666 * an out-of-line helper.
 667 */
 668static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
 669                              int rm, int ra, bool is_fp16, int data,
 670                              gen_helper_gvec_4_ptr *fn)
 671{
 672    TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
 673    tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
 674                       vec_full_reg_offset(s, rn),
 675                       vec_full_reg_offset(s, rm),
 676                       vec_full_reg_offset(s, ra), fpst,
 677                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 678    tcg_temp_free_ptr(fpst);
 679}
 680
 681/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
 682 * than the 32 bit equivalent.
 683 */
 684static inline void gen_set_NZ64(TCGv_i64 result)
 685{
 686    tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
 687    tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
 688}
 689
 690/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
 691static inline void gen_logic_CC(int sf, TCGv_i64 result)
 692{
 693    if (sf) {
 694        gen_set_NZ64(result);
 695    } else {
 696        tcg_gen_extrl_i64_i32(cpu_ZF, result);
 697        tcg_gen_mov_i32(cpu_NF, cpu_ZF);
 698    }
 699    tcg_gen_movi_i32(cpu_CF, 0);
 700    tcg_gen_movi_i32(cpu_VF, 0);
 701}
 702
 703/* dest = T0 + T1; compute C, N, V and Z flags */
 704static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 705{
 706    if (sf) {
 707        TCGv_i64 result, flag, tmp;
 708        result = tcg_temp_new_i64();
 709        flag = tcg_temp_new_i64();
 710        tmp = tcg_temp_new_i64();
 711
 712        tcg_gen_movi_i64(tmp, 0);
 713        tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
 714
 715        tcg_gen_extrl_i64_i32(cpu_CF, flag);
 716
 717        gen_set_NZ64(result);
 718
 719        tcg_gen_xor_i64(flag, result, t0);
 720        tcg_gen_xor_i64(tmp, t0, t1);
 721        tcg_gen_andc_i64(flag, flag, tmp);
 722        tcg_temp_free_i64(tmp);
 723        tcg_gen_extrh_i64_i32(cpu_VF, flag);
 724
 725        tcg_gen_mov_i64(dest, result);
 726        tcg_temp_free_i64(result);
 727        tcg_temp_free_i64(flag);
 728    } else {
 729        /* 32 bit arithmetic */
 730        TCGv_i32 t0_32 = tcg_temp_new_i32();
 731        TCGv_i32 t1_32 = tcg_temp_new_i32();
 732        TCGv_i32 tmp = tcg_temp_new_i32();
 733
 734        tcg_gen_movi_i32(tmp, 0);
 735        tcg_gen_extrl_i64_i32(t0_32, t0);
 736        tcg_gen_extrl_i64_i32(t1_32, t1);
 737        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
 738        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 739        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 740        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 741        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
 742        tcg_gen_extu_i32_i64(dest, cpu_NF);
 743
 744        tcg_temp_free_i32(tmp);
 745        tcg_temp_free_i32(t0_32);
 746        tcg_temp_free_i32(t1_32);
 747    }
 748}
 749
 750/* dest = T0 - T1; compute C, N, V and Z flags */
 751static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 752{
 753    if (sf) {
 754        /* 64 bit arithmetic */
 755        TCGv_i64 result, flag, tmp;
 756
 757        result = tcg_temp_new_i64();
 758        flag = tcg_temp_new_i64();
 759        tcg_gen_sub_i64(result, t0, t1);
 760
 761        gen_set_NZ64(result);
 762
 763        tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
 764        tcg_gen_extrl_i64_i32(cpu_CF, flag);
 765
 766        tcg_gen_xor_i64(flag, result, t0);
 767        tmp = tcg_temp_new_i64();
 768        tcg_gen_xor_i64(tmp, t0, t1);
 769        tcg_gen_and_i64(flag, flag, tmp);
 770        tcg_temp_free_i64(tmp);
 771        tcg_gen_extrh_i64_i32(cpu_VF, flag);
 772        tcg_gen_mov_i64(dest, result);
 773        tcg_temp_free_i64(flag);
 774        tcg_temp_free_i64(result);
 775    } else {
 776        /* 32 bit arithmetic */
 777        TCGv_i32 t0_32 = tcg_temp_new_i32();
 778        TCGv_i32 t1_32 = tcg_temp_new_i32();
 779        TCGv_i32 tmp;
 780
 781        tcg_gen_extrl_i64_i32(t0_32, t0);
 782        tcg_gen_extrl_i64_i32(t1_32, t1);
 783        tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
 784        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 785        tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
 786        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 787        tmp = tcg_temp_new_i32();
 788        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 789        tcg_temp_free_i32(t0_32);
 790        tcg_temp_free_i32(t1_32);
 791        tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
 792        tcg_temp_free_i32(tmp);
 793        tcg_gen_extu_i32_i64(dest, cpu_NF);
 794    }
 795}
 796
 797/* dest = T0 + T1 + CF; do not compute flags. */
 798static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 799{
 800    TCGv_i64 flag = tcg_temp_new_i64();
 801    tcg_gen_extu_i32_i64(flag, cpu_CF);
 802    tcg_gen_add_i64(dest, t0, t1);
 803    tcg_gen_add_i64(dest, dest, flag);
 804    tcg_temp_free_i64(flag);
 805
 806    if (!sf) {
 807        tcg_gen_ext32u_i64(dest, dest);
 808    }
 809}
 810
 811/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
 812static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 813{
 814    if (sf) {
 815        TCGv_i64 result = tcg_temp_new_i64();
 816        TCGv_i64 cf_64 = tcg_temp_new_i64();
 817        TCGv_i64 vf_64 = tcg_temp_new_i64();
 818        TCGv_i64 tmp = tcg_temp_new_i64();
 819        TCGv_i64 zero = tcg_constant_i64(0);
 820
 821        tcg_gen_extu_i32_i64(cf_64, cpu_CF);
 822        tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
 823        tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
 824        tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
 825        gen_set_NZ64(result);
 826
 827        tcg_gen_xor_i64(vf_64, result, t0);
 828        tcg_gen_xor_i64(tmp, t0, t1);
 829        tcg_gen_andc_i64(vf_64, vf_64, tmp);
 830        tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
 831
 832        tcg_gen_mov_i64(dest, result);
 833
 834        tcg_temp_free_i64(tmp);
 835        tcg_temp_free_i64(vf_64);
 836        tcg_temp_free_i64(cf_64);
 837        tcg_temp_free_i64(result);
 838    } else {
 839        TCGv_i32 t0_32 = tcg_temp_new_i32();
 840        TCGv_i32 t1_32 = tcg_temp_new_i32();
 841        TCGv_i32 tmp = tcg_temp_new_i32();
 842        TCGv_i32 zero = tcg_constant_i32(0);
 843
 844        tcg_gen_extrl_i64_i32(t0_32, t0);
 845        tcg_gen_extrl_i64_i32(t1_32, t1);
 846        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
 847        tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
 848
 849        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 850        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 851        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 852        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
 853        tcg_gen_extu_i32_i64(dest, cpu_NF);
 854
 855        tcg_temp_free_i32(tmp);
 856        tcg_temp_free_i32(t1_32);
 857        tcg_temp_free_i32(t0_32);
 858    }
 859}
 860
 861/*
 862 * Load/Store generators
 863 */
 864
 865/*
 866 * Store from GPR register to memory.
 867 */
 868static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
 869                             TCGv_i64 tcg_addr, MemOp memop, int memidx,
 870                             bool iss_valid,
 871                             unsigned int iss_srt,
 872                             bool iss_sf, bool iss_ar)
 873{
 874    memop = finalize_memop(s, memop);
 875    tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
 876
 877    if (iss_valid) {
 878        uint32_t syn;
 879
 880        syn = syn_data_abort_with_iss(0,
 881                                      (memop & MO_SIZE),
 882                                      false,
 883                                      iss_srt,
 884                                      iss_sf,
 885                                      iss_ar,
 886                                      0, 0, 0, 0, 0, false);
 887        disas_set_insn_syndrome(s, syn);
 888    }
 889}
 890
 891static void do_gpr_st(DisasContext *s, TCGv_i64 source,
 892                      TCGv_i64 tcg_addr, MemOp memop,
 893                      bool iss_valid,
 894                      unsigned int iss_srt,
 895                      bool iss_sf, bool iss_ar)
 896{
 897    do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
 898                     iss_valid, iss_srt, iss_sf, iss_ar);
 899}
 900
 901/*
 902 * Load from memory to GPR register
 903 */
 904static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
 905                             MemOp memop, bool extend, int memidx,
 906                             bool iss_valid, unsigned int iss_srt,
 907                             bool iss_sf, bool iss_ar)
 908{
 909    memop = finalize_memop(s, memop);
 910    tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
 911
 912    if (extend && (memop & MO_SIGN)) {
 913        g_assert((memop & MO_SIZE) <= MO_32);
 914        tcg_gen_ext32u_i64(dest, dest);
 915    }
 916
 917    if (iss_valid) {
 918        uint32_t syn;
 919
 920        syn = syn_data_abort_with_iss(0,
 921                                      (memop & MO_SIZE),
 922                                      (memop & MO_SIGN) != 0,
 923                                      iss_srt,
 924                                      iss_sf,
 925                                      iss_ar,
 926                                      0, 0, 0, 0, 0, false);
 927        disas_set_insn_syndrome(s, syn);
 928    }
 929}
 930
 931static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
 932                      MemOp memop, bool extend,
 933                      bool iss_valid, unsigned int iss_srt,
 934                      bool iss_sf, bool iss_ar)
 935{
 936    do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
 937                     iss_valid, iss_srt, iss_sf, iss_ar);
 938}
 939
 940/*
 941 * Store from FP register to memory
 942 */
 943static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
 944{
 945    /* This writes the bottom N bits of a 128 bit wide vector to memory */
 946    TCGv_i64 tmplo = tcg_temp_new_i64();
 947    MemOp mop;
 948
 949    tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
 950
 951    if (size < 4) {
 952        mop = finalize_memop(s, size);
 953        tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
 954    } else {
 955        bool be = s->be_data == MO_BE;
 956        TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
 957        TCGv_i64 tmphi = tcg_temp_new_i64();
 958
 959        tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
 960
 961        mop = s->be_data | MO_UQ;
 962        tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
 963                            mop | (s->align_mem ? MO_ALIGN_16 : 0));
 964        tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
 965        tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
 966                            get_mem_index(s), mop);
 967
 968        tcg_temp_free_i64(tcg_hiaddr);
 969        tcg_temp_free_i64(tmphi);
 970    }
 971
 972    tcg_temp_free_i64(tmplo);
 973}
 974
 975/*
 976 * Load from memory to FP register
 977 */
 978static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
 979{
 980    /* This always zero-extends and writes to a full 128 bit wide vector */
 981    TCGv_i64 tmplo = tcg_temp_new_i64();
 982    TCGv_i64 tmphi = NULL;
 983    MemOp mop;
 984
 985    if (size < 4) {
 986        mop = finalize_memop(s, size);
 987        tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
 988    } else {
 989        bool be = s->be_data == MO_BE;
 990        TCGv_i64 tcg_hiaddr;
 991
 992        tmphi = tcg_temp_new_i64();
 993        tcg_hiaddr = tcg_temp_new_i64();
 994
 995        mop = s->be_data | MO_UQ;
 996        tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
 997                            mop | (s->align_mem ? MO_ALIGN_16 : 0));
 998        tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
 999        tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
1000                            get_mem_index(s), mop);
1001        tcg_temp_free_i64(tcg_hiaddr);
1002    }
1003
1004    tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1005    tcg_temp_free_i64(tmplo);
1006
1007    if (tmphi) {
1008        tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1009        tcg_temp_free_i64(tmphi);
1010    }
1011    clear_vec_high(s, tmphi != NULL, destidx);
1012}
1013
1014/*
1015 * Vector load/store helpers.
1016 *
1017 * The principal difference between this and a FP load is that we don't
1018 * zero extend as we are filling a partial chunk of the vector register.
1019 * These functions don't support 128 bit loads/stores, which would be
1020 * normal load/store operations.
1021 *
1022 * The _i32 versions are useful when operating on 32 bit quantities
1023 * (eg for floating point single or using Neon helper functions).
1024 */
1025
1026/* Get value of an element within a vector register */
1027static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1028                             int element, MemOp memop)
1029{
1030    int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1031    switch ((unsigned)memop) {
1032    case MO_8:
1033        tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1034        break;
1035    case MO_16:
1036        tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1037        break;
1038    case MO_32:
1039        tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1040        break;
1041    case MO_8|MO_SIGN:
1042        tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1043        break;
1044    case MO_16|MO_SIGN:
1045        tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1046        break;
1047    case MO_32|MO_SIGN:
1048        tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1049        break;
1050    case MO_64:
1051    case MO_64|MO_SIGN:
1052        tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1053        break;
1054    default:
1055        g_assert_not_reached();
1056    }
1057}
1058
1059static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1060                                 int element, MemOp memop)
1061{
1062    int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1063    switch (memop) {
1064    case MO_8:
1065        tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1066        break;
1067    case MO_16:
1068        tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1069        break;
1070    case MO_8|MO_SIGN:
1071        tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1072        break;
1073    case MO_16|MO_SIGN:
1074        tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1075        break;
1076    case MO_32:
1077    case MO_32|MO_SIGN:
1078        tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1079        break;
1080    default:
1081        g_assert_not_reached();
1082    }
1083}
1084
1085/* Set value of an element within a vector register */
1086static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1087                              int element, MemOp memop)
1088{
1089    int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1090    switch (memop) {
1091    case MO_8:
1092        tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1093        break;
1094    case MO_16:
1095        tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1096        break;
1097    case MO_32:
1098        tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1099        break;
1100    case MO_64:
1101        tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1102        break;
1103    default:
1104        g_assert_not_reached();
1105    }
1106}
1107
1108static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1109                                  int destidx, int element, MemOp memop)
1110{
1111    int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1112    switch (memop) {
1113    case MO_8:
1114        tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1115        break;
1116    case MO_16:
1117        tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1118        break;
1119    case MO_32:
1120        tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1121        break;
1122    default:
1123        g_assert_not_reached();
1124    }
1125}
1126
1127/* Store from vector register to memory */
1128static void do_vec_st(DisasContext *s, int srcidx, int element,
1129                      TCGv_i64 tcg_addr, MemOp mop)
1130{
1131    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1132
1133    read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1134    tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1135
1136    tcg_temp_free_i64(tcg_tmp);
1137}
1138
1139/* Load from memory to vector register */
1140static void do_vec_ld(DisasContext *s, int destidx, int element,
1141                      TCGv_i64 tcg_addr, MemOp mop)
1142{
1143    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1144
1145    tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1146    write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1147
1148    tcg_temp_free_i64(tcg_tmp);
1149}
1150
1151/* Check that FP/Neon access is enabled. If it is, return
1152 * true. If not, emit code to generate an appropriate exception,
1153 * and return false; the caller should not emit any code for
1154 * the instruction. Note that this check must happen after all
1155 * unallocated-encoding checks (otherwise the syndrome information
1156 * for the resulting exception will be incorrect).
1157 */
1158static bool fp_access_check_only(DisasContext *s)
1159{
1160    if (s->fp_excp_el) {
1161        assert(!s->fp_access_checked);
1162        s->fp_access_checked = true;
1163
1164        gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1165                              syn_fp_access_trap(1, 0xe, false, 0),
1166                              s->fp_excp_el);
1167        return false;
1168    }
1169    s->fp_access_checked = true;
1170    return true;
1171}
1172
1173static bool fp_access_check(DisasContext *s)
1174{
1175    if (!fp_access_check_only(s)) {
1176        return false;
1177    }
1178    if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
1179        gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1180                           syn_smetrap(SME_ET_Streaming, false));
1181        return false;
1182    }
1183    return true;
1184}
1185
1186/*
1187 * Check that SVE access is enabled.  If it is, return true.
1188 * If not, emit code to generate an appropriate exception and return false.
1189 * This function corresponds to CheckSVEEnabled().
1190 */
1191bool sve_access_check(DisasContext *s)
1192{
1193    if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
1194        assert(dc_isar_feature(aa64_sme, s));
1195        if (!sme_sm_enabled_check(s)) {
1196            goto fail_exit;
1197        }
1198    } else if (s->sve_excp_el) {
1199        gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1200                              syn_sve_access_trap(), s->sve_excp_el);
1201        goto fail_exit;
1202    }
1203    s->sve_access_checked = true;
1204    return fp_access_check(s);
1205
1206 fail_exit:
1207    /* Assert that we only raise one exception per instruction. */
1208    assert(!s->sve_access_checked);
1209    s->sve_access_checked = true;
1210    return false;
1211}
1212
1213/*
1214 * Check that SME access is enabled, raise an exception if not.
1215 * Note that this function corresponds to CheckSMEAccess and is
1216 * only used directly for cpregs.
1217 */
1218static bool sme_access_check(DisasContext *s)
1219{
1220    if (s->sme_excp_el) {
1221        gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1222                              syn_smetrap(SME_ET_AccessTrap, false),
1223                              s->sme_excp_el);
1224        return false;
1225    }
1226    return true;
1227}
1228
1229/* This function corresponds to CheckSMEEnabled. */
1230bool sme_enabled_check(DisasContext *s)
1231{
1232    /*
1233     * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1234     * to be zero when fp_excp_el has priority.  This is because we need
1235     * sme_excp_el by itself for cpregs access checks.
1236     */
1237    if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
1238        s->fp_access_checked = true;
1239        return sme_access_check(s);
1240    }
1241    return fp_access_check_only(s);
1242}
1243
1244/* Common subroutine for CheckSMEAnd*Enabled. */
1245bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
1246{
1247    if (!sme_enabled_check(s)) {
1248        return false;
1249    }
1250    if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
1251        gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1252                           syn_smetrap(SME_ET_NotStreaming, false));
1253        return false;
1254    }
1255    if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
1256        gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1257                           syn_smetrap(SME_ET_InactiveZA, false));
1258        return false;
1259    }
1260    return true;
1261}
1262
1263/*
1264 * This utility function is for doing register extension with an
1265 * optional shift. You will likely want to pass a temporary for the
1266 * destination register. See DecodeRegExtend() in the ARM ARM.
1267 */
1268static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1269                              int option, unsigned int shift)
1270{
1271    int extsize = extract32(option, 0, 2);
1272    bool is_signed = extract32(option, 2, 1);
1273
1274    if (is_signed) {
1275        switch (extsize) {
1276        case 0:
1277            tcg_gen_ext8s_i64(tcg_out, tcg_in);
1278            break;
1279        case 1:
1280            tcg_gen_ext16s_i64(tcg_out, tcg_in);
1281            break;
1282        case 2:
1283            tcg_gen_ext32s_i64(tcg_out, tcg_in);
1284            break;
1285        case 3:
1286            tcg_gen_mov_i64(tcg_out, tcg_in);
1287            break;
1288        }
1289    } else {
1290        switch (extsize) {
1291        case 0:
1292            tcg_gen_ext8u_i64(tcg_out, tcg_in);
1293            break;
1294        case 1:
1295            tcg_gen_ext16u_i64(tcg_out, tcg_in);
1296            break;
1297        case 2:
1298            tcg_gen_ext32u_i64(tcg_out, tcg_in);
1299            break;
1300        case 3:
1301            tcg_gen_mov_i64(tcg_out, tcg_in);
1302            break;
1303        }
1304    }
1305
1306    if (shift) {
1307        tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1308    }
1309}
1310
1311static inline void gen_check_sp_alignment(DisasContext *s)
1312{
1313    /* The AArch64 architecture mandates that (if enabled via PSTATE
1314     * or SCTLR bits) there is a check that SP is 16-aligned on every
1315     * SP-relative load or store (with an exception generated if it is not).
1316     * In line with general QEMU practice regarding misaligned accesses,
1317     * we omit these checks for the sake of guest program performance.
1318     * This function is provided as a hook so we can more easily add these
1319     * checks in future (possibly as a "favour catching guest program bugs
1320     * over speed" user selectable option).
1321     */
1322}
1323
1324/*
1325 * This provides a simple table based table lookup decoder. It is
1326 * intended to be used when the relevant bits for decode are too
1327 * awkwardly placed and switch/if based logic would be confusing and
1328 * deeply nested. Since it's a linear search through the table, tables
1329 * should be kept small.
1330 *
1331 * It returns the first handler where insn & mask == pattern, or
1332 * NULL if there is no match.
1333 * The table is terminated by an empty mask (i.e. 0)
1334 */
1335static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1336                                               uint32_t insn)
1337{
1338    const AArch64DecodeTable *tptr = table;
1339
1340    while (tptr->mask) {
1341        if ((insn & tptr->mask) == tptr->pattern) {
1342            return tptr->disas_fn;
1343        }
1344        tptr++;
1345    }
1346    return NULL;
1347}
1348
1349/*
1350 * The instruction disassembly implemented here matches
1351 * the instruction encoding classifications in chapter C4
1352 * of the ARM Architecture Reference Manual (DDI0487B_a);
1353 * classification names and decode diagrams here should generally
1354 * match up with those in the manual.
1355 */
1356
1357/* Unconditional branch (immediate)
1358 *   31  30       26 25                                  0
1359 * +----+-----------+-------------------------------------+
1360 * | op | 0 0 1 0 1 |                 imm26               |
1361 * +----+-----------+-------------------------------------+
1362 */
1363static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1364{
1365    uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1366
1367    if (insn & (1U << 31)) {
1368        /* BL Branch with link */
1369        tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1370    }
1371
1372    /* B Branch / BL Branch with link */
1373    reset_btype(s);
1374    gen_goto_tb(s, 0, addr);
1375}
1376
1377/* Compare and branch (immediate)
1378 *   31  30         25  24  23                  5 4      0
1379 * +----+-------------+----+---------------------+--------+
1380 * | sf | 0 1 1 0 1 0 | op |         imm19       |   Rt   |
1381 * +----+-------------+----+---------------------+--------+
1382 */
1383static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1384{
1385    unsigned int sf, op, rt;
1386    uint64_t addr;
1387    TCGLabel *label_match;
1388    TCGv_i64 tcg_cmp;
1389
1390    sf = extract32(insn, 31, 1);
1391    op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1392    rt = extract32(insn, 0, 5);
1393    addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1394
1395    tcg_cmp = read_cpu_reg(s, rt, sf);
1396    label_match = gen_new_label();
1397
1398    reset_btype(s);
1399    tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1400                        tcg_cmp, 0, label_match);
1401
1402    gen_goto_tb(s, 0, s->base.pc_next);
1403    gen_set_label(label_match);
1404    gen_goto_tb(s, 1, addr);
1405}
1406
1407/* Test and branch (immediate)
1408 *   31  30         25  24  23   19 18          5 4    0
1409 * +----+-------------+----+-------+-------------+------+
1410 * | b5 | 0 1 1 0 1 1 | op |  b40  |    imm14    |  Rt  |
1411 * +----+-------------+----+-------+-------------+------+
1412 */
1413static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1414{
1415    unsigned int bit_pos, op, rt;
1416    uint64_t addr;
1417    TCGLabel *label_match;
1418    TCGv_i64 tcg_cmp;
1419
1420    bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1421    op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1422    addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1423    rt = extract32(insn, 0, 5);
1424
1425    tcg_cmp = tcg_temp_new_i64();
1426    tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1427    label_match = gen_new_label();
1428
1429    reset_btype(s);
1430    tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1431                        tcg_cmp, 0, label_match);
1432    tcg_temp_free_i64(tcg_cmp);
1433    gen_goto_tb(s, 0, s->base.pc_next);
1434    gen_set_label(label_match);
1435    gen_goto_tb(s, 1, addr);
1436}
1437
1438/* Conditional branch (immediate)
1439 *  31           25  24  23                  5   4  3    0
1440 * +---------------+----+---------------------+----+------+
1441 * | 0 1 0 1 0 1 0 | o1 |         imm19       | o0 | cond |
1442 * +---------------+----+---------------------+----+------+
1443 */
1444static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1445{
1446    unsigned int cond;
1447    uint64_t addr;
1448
1449    if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1450        unallocated_encoding(s);
1451        return;
1452    }
1453    addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1454    cond = extract32(insn, 0, 4);
1455
1456    reset_btype(s);
1457    if (cond < 0x0e) {
1458        /* genuinely conditional branches */
1459        TCGLabel *label_match = gen_new_label();
1460        arm_gen_test_cc(cond, label_match);
1461        gen_goto_tb(s, 0, s->base.pc_next);
1462        gen_set_label(label_match);
1463        gen_goto_tb(s, 1, addr);
1464    } else {
1465        /* 0xe and 0xf are both "always" conditions */
1466        gen_goto_tb(s, 0, addr);
1467    }
1468}
1469
1470/* HINT instruction group, including various allocated HINTs */
1471static void handle_hint(DisasContext *s, uint32_t insn,
1472                        unsigned int op1, unsigned int op2, unsigned int crm)
1473{
1474    unsigned int selector = crm << 3 | op2;
1475
1476    if (op1 != 3) {
1477        unallocated_encoding(s);
1478        return;
1479    }
1480
1481    switch (selector) {
1482    case 0b00000: /* NOP */
1483        break;
1484    case 0b00011: /* WFI */
1485        s->base.is_jmp = DISAS_WFI;
1486        break;
1487    case 0b00001: /* YIELD */
1488        /* When running in MTTCG we don't generate jumps to the yield and
1489         * WFE helpers as it won't affect the scheduling of other vCPUs.
1490         * If we wanted to more completely model WFE/SEV so we don't busy
1491         * spin unnecessarily we would need to do something more involved.
1492         */
1493        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1494            s->base.is_jmp = DISAS_YIELD;
1495        }
1496        break;
1497    case 0b00010: /* WFE */
1498        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1499            s->base.is_jmp = DISAS_WFE;
1500        }
1501        break;
1502    case 0b00100: /* SEV */
1503    case 0b00101: /* SEVL */
1504    case 0b00110: /* DGH */
1505        /* we treat all as NOP at least for now */
1506        break;
1507    case 0b00111: /* XPACLRI */
1508        if (s->pauth_active) {
1509            gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1510        }
1511        break;
1512    case 0b01000: /* PACIA1716 */
1513        if (s->pauth_active) {
1514            gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1515        }
1516        break;
1517    case 0b01010: /* PACIB1716 */
1518        if (s->pauth_active) {
1519            gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1520        }
1521        break;
1522    case 0b01100: /* AUTIA1716 */
1523        if (s->pauth_active) {
1524            gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1525        }
1526        break;
1527    case 0b01110: /* AUTIB1716 */
1528        if (s->pauth_active) {
1529            gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1530        }
1531        break;
1532    case 0b10000: /* ESB */
1533        /* Without RAS, we must implement this as NOP. */
1534        if (dc_isar_feature(aa64_ras, s)) {
1535            /*
1536             * QEMU does not have a source of physical SErrors,
1537             * so we are only concerned with virtual SErrors.
1538             * The pseudocode in the ARM for this case is
1539             *   if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1540             *      AArch64.vESBOperation();
1541             * Most of the condition can be evaluated at translation time.
1542             * Test for EL2 present, and defer test for SEL2 to runtime.
1543             */
1544            if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
1545                gen_helper_vesb(cpu_env);
1546            }
1547        }
1548        break;
1549    case 0b11000: /* PACIAZ */
1550        if (s->pauth_active) {
1551            gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1552                                new_tmp_a64_zero(s));
1553        }
1554        break;
1555    case 0b11001: /* PACIASP */
1556        if (s->pauth_active) {
1557            gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1558        }
1559        break;
1560    case 0b11010: /* PACIBZ */
1561        if (s->pauth_active) {
1562            gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1563                                new_tmp_a64_zero(s));
1564        }
1565        break;
1566    case 0b11011: /* PACIBSP */
1567        if (s->pauth_active) {
1568            gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1569        }
1570        break;
1571    case 0b11100: /* AUTIAZ */
1572        if (s->pauth_active) {
1573            gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1574                              new_tmp_a64_zero(s));
1575        }
1576        break;
1577    case 0b11101: /* AUTIASP */
1578        if (s->pauth_active) {
1579            gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1580        }
1581        break;
1582    case 0b11110: /* AUTIBZ */
1583        if (s->pauth_active) {
1584            gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1585                              new_tmp_a64_zero(s));
1586        }
1587        break;
1588    case 0b11111: /* AUTIBSP */
1589        if (s->pauth_active) {
1590            gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1591        }
1592        break;
1593    default:
1594        /* default specified as NOP equivalent */
1595        break;
1596    }
1597}
1598
1599static void gen_clrex(DisasContext *s, uint32_t insn)
1600{
1601    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1602}
1603
1604/* CLREX, DSB, DMB, ISB */
1605static void handle_sync(DisasContext *s, uint32_t insn,
1606                        unsigned int op1, unsigned int op2, unsigned int crm)
1607{
1608    TCGBar bar;
1609
1610    if (op1 != 3) {
1611        unallocated_encoding(s);
1612        return;
1613    }
1614
1615    switch (op2) {
1616    case 2: /* CLREX */
1617        gen_clrex(s, insn);
1618        return;
1619    case 4: /* DSB */
1620    case 5: /* DMB */
1621        switch (crm & 3) {
1622        case 1: /* MBReqTypes_Reads */
1623            bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1624            break;
1625        case 2: /* MBReqTypes_Writes */
1626            bar = TCG_BAR_SC | TCG_MO_ST_ST;
1627            break;
1628        default: /* MBReqTypes_All */
1629            bar = TCG_BAR_SC | TCG_MO_ALL;
1630            break;
1631        }
1632        tcg_gen_mb(bar);
1633        return;
1634    case 6: /* ISB */
1635        /* We need to break the TB after this insn to execute
1636         * a self-modified code correctly and also to take
1637         * any pending interrupts immediately.
1638         */
1639        reset_btype(s);
1640        gen_goto_tb(s, 0, s->base.pc_next);
1641        return;
1642
1643    case 7: /* SB */
1644        if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1645            goto do_unallocated;
1646        }
1647        /*
1648         * TODO: There is no speculation barrier opcode for TCG;
1649         * MB and end the TB instead.
1650         */
1651        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1652        gen_goto_tb(s, 0, s->base.pc_next);
1653        return;
1654
1655    default:
1656    do_unallocated:
1657        unallocated_encoding(s);
1658        return;
1659    }
1660}
1661
1662static void gen_xaflag(void)
1663{
1664    TCGv_i32 z = tcg_temp_new_i32();
1665
1666    tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1667
1668    /*
1669     * (!C & !Z) << 31
1670     * (!(C | Z)) << 31
1671     * ~((C | Z) << 31)
1672     * ~-(C | Z)
1673     * (C | Z) - 1
1674     */
1675    tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1676    tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1677
1678    /* !(Z & C) */
1679    tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1680    tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1681
1682    /* (!C & Z) << 31 -> -(Z & ~C) */
1683    tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1684    tcg_gen_neg_i32(cpu_VF, cpu_VF);
1685
1686    /* C | Z */
1687    tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1688
1689    tcg_temp_free_i32(z);
1690}
1691
1692static void gen_axflag(void)
1693{
1694    tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);         /* V ? -1 : 0 */
1695    tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);     /* C & !V */
1696
1697    /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1698    tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1699
1700    tcg_gen_movi_i32(cpu_NF, 0);
1701    tcg_gen_movi_i32(cpu_VF, 0);
1702}
1703
1704/* MSR (immediate) - move immediate to processor state field */
1705static void handle_msr_i(DisasContext *s, uint32_t insn,
1706                         unsigned int op1, unsigned int op2, unsigned int crm)
1707{
1708    int op = op1 << 3 | op2;
1709
1710    /* End the TB by default, chaining is ok.  */
1711    s->base.is_jmp = DISAS_TOO_MANY;
1712
1713    switch (op) {
1714    case 0x00: /* CFINV */
1715        if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1716            goto do_unallocated;
1717        }
1718        tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1719        s->base.is_jmp = DISAS_NEXT;
1720        break;
1721
1722    case 0x01: /* XAFlag */
1723        if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1724            goto do_unallocated;
1725        }
1726        gen_xaflag();
1727        s->base.is_jmp = DISAS_NEXT;
1728        break;
1729
1730    case 0x02: /* AXFlag */
1731        if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1732            goto do_unallocated;
1733        }
1734        gen_axflag();
1735        s->base.is_jmp = DISAS_NEXT;
1736        break;
1737
1738    case 0x03: /* UAO */
1739        if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1740            goto do_unallocated;
1741        }
1742        if (crm & 1) {
1743            set_pstate_bits(PSTATE_UAO);
1744        } else {
1745            clear_pstate_bits(PSTATE_UAO);
1746        }
1747        gen_rebuild_hflags(s);
1748        break;
1749
1750    case 0x04: /* PAN */
1751        if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1752            goto do_unallocated;
1753        }
1754        if (crm & 1) {
1755            set_pstate_bits(PSTATE_PAN);
1756        } else {
1757            clear_pstate_bits(PSTATE_PAN);
1758        }
1759        gen_rebuild_hflags(s);
1760        break;
1761
1762    case 0x05: /* SPSel */
1763        if (s->current_el == 0) {
1764            goto do_unallocated;
1765        }
1766        gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP));
1767        break;
1768
1769    case 0x19: /* SSBS */
1770        if (!dc_isar_feature(aa64_ssbs, s)) {
1771            goto do_unallocated;
1772        }
1773        if (crm & 1) {
1774            set_pstate_bits(PSTATE_SSBS);
1775        } else {
1776            clear_pstate_bits(PSTATE_SSBS);
1777        }
1778        /* Don't need to rebuild hflags since SSBS is a nop */
1779        break;
1780
1781    case 0x1a: /* DIT */
1782        if (!dc_isar_feature(aa64_dit, s)) {
1783            goto do_unallocated;
1784        }
1785        if (crm & 1) {
1786            set_pstate_bits(PSTATE_DIT);
1787        } else {
1788            clear_pstate_bits(PSTATE_DIT);
1789        }
1790        /* There's no need to rebuild hflags because DIT is a nop */
1791        break;
1792
1793    case 0x1e: /* DAIFSet */
1794        gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm));
1795        break;
1796
1797    case 0x1f: /* DAIFClear */
1798        gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm));
1799        /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs.  */
1800        s->base.is_jmp = DISAS_UPDATE_EXIT;
1801        break;
1802
1803    case 0x1c: /* TCO */
1804        if (dc_isar_feature(aa64_mte, s)) {
1805            /* Full MTE is enabled -- set the TCO bit as directed. */
1806            if (crm & 1) {
1807                set_pstate_bits(PSTATE_TCO);
1808            } else {
1809                clear_pstate_bits(PSTATE_TCO);
1810            }
1811            gen_rebuild_hflags(s);
1812            /* Many factors, including TCO, go into MTE_ACTIVE. */
1813            s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1814        } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1815            /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI.  */
1816            s->base.is_jmp = DISAS_NEXT;
1817        } else {
1818            goto do_unallocated;
1819        }
1820        break;
1821
1822    case 0x1b: /* SVCR* */
1823        if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) {
1824            goto do_unallocated;
1825        }
1826        if (sme_access_check(s)) {
1827            bool i = crm & 1;
1828            bool changed = false;
1829
1830            if ((crm & 2) && i != s->pstate_sm) {
1831                gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i));
1832                changed = true;
1833            }
1834            if ((crm & 4) && i != s->pstate_za) {
1835                gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
1836                changed = true;
1837            }
1838            if (changed) {
1839                gen_rebuild_hflags(s);
1840            } else {
1841                s->base.is_jmp = DISAS_NEXT;
1842            }
1843        }
1844        break;
1845
1846    default:
1847    do_unallocated:
1848        unallocated_encoding(s);
1849        return;
1850    }
1851}
1852
1853static void gen_get_nzcv(TCGv_i64 tcg_rt)
1854{
1855    TCGv_i32 tmp = tcg_temp_new_i32();
1856    TCGv_i32 nzcv = tcg_temp_new_i32();
1857
1858    /* build bit 31, N */
1859    tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1860    /* build bit 30, Z */
1861    tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1862    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1863    /* build bit 29, C */
1864    tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1865    /* build bit 28, V */
1866    tcg_gen_shri_i32(tmp, cpu_VF, 31);
1867    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1868    /* generate result */
1869    tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1870
1871    tcg_temp_free_i32(nzcv);
1872    tcg_temp_free_i32(tmp);
1873}
1874
1875static void gen_set_nzcv(TCGv_i64 tcg_rt)
1876{
1877    TCGv_i32 nzcv = tcg_temp_new_i32();
1878
1879    /* take NZCV from R[t] */
1880    tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1881
1882    /* bit 31, N */
1883    tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1884    /* bit 30, Z */
1885    tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1886    tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1887    /* bit 29, C */
1888    tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1889    tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1890    /* bit 28, V */
1891    tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1892    tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1893    tcg_temp_free_i32(nzcv);
1894}
1895
1896static void gen_sysreg_undef(DisasContext *s, bool isread,
1897                             uint8_t op0, uint8_t op1, uint8_t op2,
1898                             uint8_t crn, uint8_t crm, uint8_t rt)
1899{
1900    /*
1901     * Generate code to emit an UNDEF with correct syndrome
1902     * information for a failed system register access.
1903     * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
1904     * but if FEAT_IDST is implemented then read accesses to registers
1905     * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
1906     * syndrome.
1907     */
1908    uint32_t syndrome;
1909
1910    if (isread && dc_isar_feature(aa64_ids, s) &&
1911        arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
1912        syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1913    } else {
1914        syndrome = syn_uncategorized();
1915    }
1916    gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syndrome);
1917}
1918
1919/* MRS - move from system register
1920 * MSR (register) - move to system register
1921 * SYS
1922 * SYSL
1923 * These are all essentially the same insn in 'read' and 'write'
1924 * versions, with varying op0 fields.
1925 */
1926static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1927                       unsigned int op0, unsigned int op1, unsigned int op2,
1928                       unsigned int crn, unsigned int crm, unsigned int rt)
1929{
1930    const ARMCPRegInfo *ri;
1931    TCGv_i64 tcg_rt;
1932
1933    ri = get_arm_cp_reginfo(s->cp_regs,
1934                            ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1935                                               crn, crm, op0, op1, op2));
1936
1937    if (!ri) {
1938        /* Unknown register; this might be a guest error or a QEMU
1939         * unimplemented feature.
1940         */
1941        qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1942                      "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1943                      isread ? "read" : "write", op0, op1, crn, crm, op2);
1944        gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
1945        return;
1946    }
1947
1948    /* Check access permissions */
1949    if (!cp_access_ok(s->current_el, ri, isread)) {
1950        gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
1951        return;
1952    }
1953
1954    if (ri->accessfn) {
1955        /* Emit code to perform further access permissions checks at
1956         * runtime; this may result in an exception.
1957         */
1958        uint32_t syndrome;
1959
1960        syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1961        gen_a64_set_pc_im(s->pc_curr);
1962        gen_helper_access_check_cp_reg(cpu_env,
1963                                       tcg_constant_ptr(ri),
1964                                       tcg_constant_i32(syndrome),
1965                                       tcg_constant_i32(isread));
1966    } else if (ri->type & ARM_CP_RAISES_EXC) {
1967        /*
1968         * The readfn or writefn might raise an exception;
1969         * synchronize the CPU state in case it does.
1970         */
1971        gen_a64_set_pc_im(s->pc_curr);
1972    }
1973
1974    /* Handle special cases first */
1975    switch (ri->type & ARM_CP_SPECIAL_MASK) {
1976    case 0:
1977        break;
1978    case ARM_CP_NOP:
1979        return;
1980    case ARM_CP_NZCV:
1981        tcg_rt = cpu_reg(s, rt);
1982        if (isread) {
1983            gen_get_nzcv(tcg_rt);
1984        } else {
1985            gen_set_nzcv(tcg_rt);
1986        }
1987        return;
1988    case ARM_CP_CURRENTEL:
1989        /* Reads as current EL value from pstate, which is
1990         * guaranteed to be constant by the tb flags.
1991         */
1992        tcg_rt = cpu_reg(s, rt);
1993        tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1994        return;
1995    case ARM_CP_DC_ZVA:
1996        /* Writes clear the aligned block of memory which rt points into. */
1997        if (s->mte_active[0]) {
1998            int desc = 0;
1999
2000            desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
2001            desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
2002            desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
2003
2004            tcg_rt = new_tmp_a64(s);
2005            gen_helper_mte_check_zva(tcg_rt, cpu_env,
2006                                     tcg_constant_i32(desc), cpu_reg(s, rt));
2007        } else {
2008            tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
2009        }
2010        gen_helper_dc_zva(cpu_env, tcg_rt);
2011        return;
2012    case ARM_CP_DC_GVA:
2013        {
2014            TCGv_i64 clean_addr, tag;
2015
2016            /*
2017             * DC_GVA, like DC_ZVA, requires that we supply the original
2018             * pointer for an invalid page.  Probe that address first.
2019             */
2020            tcg_rt = cpu_reg(s, rt);
2021            clean_addr = clean_data_tbi(s, tcg_rt);
2022            gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
2023
2024            if (s->ata) {
2025                /* Extract the tag from the register to match STZGM.  */
2026                tag = tcg_temp_new_i64();
2027                tcg_gen_shri_i64(tag, tcg_rt, 56);
2028                gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2029                tcg_temp_free_i64(tag);
2030            }
2031        }
2032        return;
2033    case ARM_CP_DC_GZVA:
2034        {
2035            TCGv_i64 clean_addr, tag;
2036
2037            /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2038            tcg_rt = cpu_reg(s, rt);
2039            clean_addr = clean_data_tbi(s, tcg_rt);
2040            gen_helper_dc_zva(cpu_env, clean_addr);
2041
2042            if (s->ata) {
2043                /* Extract the tag from the register to match STZGM.  */
2044                tag = tcg_temp_new_i64();
2045                tcg_gen_shri_i64(tag, tcg_rt, 56);
2046                gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2047                tcg_temp_free_i64(tag);
2048            }
2049        }
2050        return;
2051    default:
2052        g_assert_not_reached();
2053    }
2054    if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
2055        return;
2056    } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
2057        return;
2058    } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
2059        return;
2060    }
2061
2062    if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2063        gen_io_start();
2064    }
2065
2066    tcg_rt = cpu_reg(s, rt);
2067
2068    if (isread) {
2069        if (ri->type & ARM_CP_CONST) {
2070            tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
2071        } else if (ri->readfn) {
2072            gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_constant_ptr(ri));
2073        } else {
2074            tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
2075        }
2076    } else {
2077        if (ri->type & ARM_CP_CONST) {
2078            /* If not forbidden by access permissions, treat as WI */
2079            return;
2080        } else if (ri->writefn) {
2081            gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), tcg_rt);
2082        } else {
2083            tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
2084        }
2085    }
2086
2087    if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2088        /* I/O operations must end the TB here (whether read or write) */
2089        s->base.is_jmp = DISAS_UPDATE_EXIT;
2090    }
2091    if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
2092        /*
2093         * A write to any coprocessor regiser that ends a TB
2094         * must rebuild the hflags for the next TB.
2095         */
2096        gen_rebuild_hflags(s);
2097        /*
2098         * We default to ending the TB on a coprocessor register write,
2099         * but allow this to be suppressed by the register definition
2100         * (usually only necessary to work around guest bugs).
2101         */
2102        s->base.is_jmp = DISAS_UPDATE_EXIT;
2103    }
2104}
2105
2106/* System
2107 *  31                 22 21  20 19 18 16 15   12 11    8 7   5 4    0
2108 * +---------------------+---+-----+-----+-------+-------+-----+------+
2109 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 |  CRn  |  CRm  | op2 |  Rt  |
2110 * +---------------------+---+-----+-----+-------+-------+-----+------+
2111 */
2112static void disas_system(DisasContext *s, uint32_t insn)
2113{
2114    unsigned int l, op0, op1, crn, crm, op2, rt;
2115    l = extract32(insn, 21, 1);
2116    op0 = extract32(insn, 19, 2);
2117    op1 = extract32(insn, 16, 3);
2118    crn = extract32(insn, 12, 4);
2119    crm = extract32(insn, 8, 4);
2120    op2 = extract32(insn, 5, 3);
2121    rt = extract32(insn, 0, 5);
2122
2123    if (op0 == 0) {
2124        if (l || rt != 31) {
2125            unallocated_encoding(s);
2126            return;
2127        }
2128        switch (crn) {
2129        case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
2130            handle_hint(s, insn, op1, op2, crm);
2131            break;
2132        case 3: /* CLREX, DSB, DMB, ISB */
2133            handle_sync(s, insn, op1, op2, crm);
2134            break;
2135        case 4: /* MSR (immediate) */
2136            handle_msr_i(s, insn, op1, op2, crm);
2137            break;
2138        default:
2139            unallocated_encoding(s);
2140            break;
2141        }
2142        return;
2143    }
2144    handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2145}
2146
2147/* Exception generation
2148 *
2149 *  31             24 23 21 20                     5 4   2 1  0
2150 * +-----------------+-----+------------------------+-----+----+
2151 * | 1 1 0 1 0 1 0 0 | opc |          imm16         | op2 | LL |
2152 * +-----------------------+------------------------+----------+
2153 */
2154static void disas_exc(DisasContext *s, uint32_t insn)
2155{
2156    int opc = extract32(insn, 21, 3);
2157    int op2_ll = extract32(insn, 0, 5);
2158    int imm16 = extract32(insn, 5, 16);
2159
2160    switch (opc) {
2161    case 0:
2162        /* For SVC, HVC and SMC we advance the single-step state
2163         * machine before taking the exception. This is architecturally
2164         * mandated, to ensure that single-stepping a system call
2165         * instruction works properly.
2166         */
2167        switch (op2_ll) {
2168        case 1:                                                     /* SVC */
2169            gen_ss_advance(s);
2170            gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
2171                               syn_aa64_svc(imm16));
2172            break;
2173        case 2:                                                     /* HVC */
2174            if (s->current_el == 0) {
2175                unallocated_encoding(s);
2176                break;
2177            }
2178            /* The pre HVC helper handles cases when HVC gets trapped
2179             * as an undefined insn by runtime configuration.
2180             */
2181            gen_a64_set_pc_im(s->pc_curr);
2182            gen_helper_pre_hvc(cpu_env);
2183            gen_ss_advance(s);
2184            gen_exception_insn_el(s, s->base.pc_next, EXCP_HVC,
2185                                  syn_aa64_hvc(imm16), 2);
2186            break;
2187        case 3:                                                     /* SMC */
2188            if (s->current_el == 0) {
2189                unallocated_encoding(s);
2190                break;
2191            }
2192            gen_a64_set_pc_im(s->pc_curr);
2193            gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
2194            gen_ss_advance(s);
2195            gen_exception_insn_el(s, s->base.pc_next, EXCP_SMC,
2196                                  syn_aa64_smc(imm16), 3);
2197            break;
2198        default:
2199            unallocated_encoding(s);
2200            break;
2201        }
2202        break;
2203    case 1:
2204        if (op2_ll != 0) {
2205            unallocated_encoding(s);
2206            break;
2207        }
2208        /* BRK */
2209        gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2210        break;
2211    case 2:
2212        if (op2_ll != 0) {
2213            unallocated_encoding(s);
2214            break;
2215        }
2216        /* HLT. This has two purposes.
2217         * Architecturally, it is an external halting debug instruction.
2218         * Since QEMU doesn't implement external debug, we treat this as
2219         * it is required for halting debug disabled: it will UNDEF.
2220         * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2221         */
2222        if (semihosting_enabled() && imm16 == 0xf000) {
2223#ifndef CONFIG_USER_ONLY
2224            /* In system mode, don't allow userspace access to semihosting,
2225             * to provide some semblance of security (and for consistency
2226             * with our 32-bit semihosting).
2227             */
2228            if (s->current_el == 0) {
2229                unallocated_encoding(s);
2230                break;
2231            }
2232#endif
2233            gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
2234        } else {
2235            unallocated_encoding(s);
2236        }
2237        break;
2238    case 5:
2239        if (op2_ll < 1 || op2_ll > 3) {
2240            unallocated_encoding(s);
2241            break;
2242        }
2243        /* DCPS1, DCPS2, DCPS3 */
2244        unallocated_encoding(s);
2245        break;
2246    default:
2247        unallocated_encoding(s);
2248        break;
2249    }
2250}
2251
2252/* Unconditional branch (register)
2253 *  31           25 24   21 20   16 15   10 9    5 4     0
2254 * +---------------+-------+-------+-------+------+-------+
2255 * | 1 1 0 1 0 1 1 |  opc  |  op2  |  op3  |  Rn  |  op4  |
2256 * +---------------+-------+-------+-------+------+-------+
2257 */
2258static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
2259{
2260    unsigned int opc, op2, op3, rn, op4;
2261    unsigned btype_mod = 2;   /* 0: BR, 1: BLR, 2: other */
2262    TCGv_i64 dst;
2263    TCGv_i64 modifier;
2264
2265    opc = extract32(insn, 21, 4);
2266    op2 = extract32(insn, 16, 5);
2267    op3 = extract32(insn, 10, 6);
2268    rn = extract32(insn, 5, 5);
2269    op4 = extract32(insn, 0, 5);
2270
2271    if (op2 != 0x1f) {
2272        goto do_unallocated;
2273    }
2274
2275    switch (opc) {
2276    case 0: /* BR */
2277    case 1: /* BLR */
2278    case 2: /* RET */
2279        btype_mod = opc;
2280        switch (op3) {
2281        case 0:
2282            /* BR, BLR, RET */
2283            if (op4 != 0) {
2284                goto do_unallocated;
2285            }
2286            dst = cpu_reg(s, rn);
2287            break;
2288
2289        case 2:
2290        case 3:
2291            if (!dc_isar_feature(aa64_pauth, s)) {
2292                goto do_unallocated;
2293            }
2294            if (opc == 2) {
2295                /* RETAA, RETAB */
2296                if (rn != 0x1f || op4 != 0x1f) {
2297                    goto do_unallocated;
2298                }
2299                rn = 30;
2300                modifier = cpu_X[31];
2301            } else {
2302                /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
2303                if (op4 != 0x1f) {
2304                    goto do_unallocated;
2305                }
2306                modifier = new_tmp_a64_zero(s);
2307            }
2308            if (s->pauth_active) {
2309                dst = new_tmp_a64(s);
2310                if (op3 == 2) {
2311                    gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2312                } else {
2313                    gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2314                }
2315            } else {
2316                dst = cpu_reg(s, rn);
2317            }
2318            break;
2319
2320        default:
2321            goto do_unallocated;
2322        }
2323        gen_a64_set_pc(s, dst);
2324        /* BLR also needs to load return address */
2325        if (opc == 1) {
2326            tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2327        }
2328        break;
2329
2330    case 8: /* BRAA */
2331    case 9: /* BLRAA */
2332        if (!dc_isar_feature(aa64_pauth, s)) {
2333            goto do_unallocated;
2334        }
2335        if ((op3 & ~1) != 2) {
2336            goto do_unallocated;
2337        }
2338        btype_mod = opc & 1;
2339        if (s->pauth_active) {
2340            dst = new_tmp_a64(s);
2341            modifier = cpu_reg_sp(s, op4);
2342            if (op3 == 2) {
2343                gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2344            } else {
2345                gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2346            }
2347        } else {
2348            dst = cpu_reg(s, rn);
2349        }
2350        gen_a64_set_pc(s, dst);
2351        /* BLRAA also needs to load return address */
2352        if (opc == 9) {
2353            tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2354        }
2355        break;
2356
2357    case 4: /* ERET */
2358        if (s->current_el == 0) {
2359            goto do_unallocated;
2360        }
2361        switch (op3) {
2362        case 0: /* ERET */
2363            if (op4 != 0) {
2364                goto do_unallocated;
2365            }
2366            dst = tcg_temp_new_i64();
2367            tcg_gen_ld_i64(dst, cpu_env,
2368                           offsetof(CPUARMState, elr_el[s->current_el]));
2369            break;
2370
2371        case 2: /* ERETAA */
2372        case 3: /* ERETAB */
2373            if (!dc_isar_feature(aa64_pauth, s)) {
2374                goto do_unallocated;
2375            }
2376            if (rn != 0x1f || op4 != 0x1f) {
2377                goto do_unallocated;
2378            }
2379            dst = tcg_temp_new_i64();
2380            tcg_gen_ld_i64(dst, cpu_env,
2381                           offsetof(CPUARMState, elr_el[s->current_el]));
2382            if (s->pauth_active) {
2383                modifier = cpu_X[31];
2384                if (op3 == 2) {
2385                    gen_helper_autia(dst, cpu_env, dst, modifier);
2386                } else {
2387                    gen_helper_autib(dst, cpu_env, dst, modifier);
2388                }
2389            }
2390            break;
2391
2392        default:
2393            goto do_unallocated;
2394        }
2395        if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2396            gen_io_start();
2397        }
2398
2399        gen_helper_exception_return(cpu_env, dst);
2400        tcg_temp_free_i64(dst);
2401        /* Must exit loop to check un-masked IRQs */
2402        s->base.is_jmp = DISAS_EXIT;
2403        return;
2404
2405    case 5: /* DRPS */
2406        if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2407            goto do_unallocated;
2408        } else {
2409            unallocated_encoding(s);
2410        }
2411        return;
2412
2413    default:
2414    do_unallocated:
2415        unallocated_encoding(s);
2416        return;
2417    }
2418
2419    switch (btype_mod) {
2420    case 0: /* BR */
2421        if (dc_isar_feature(aa64_bti, s)) {
2422            /* BR to {x16,x17} or !guard -> 1, else 3.  */
2423            set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2424        }
2425        break;
2426
2427    case 1: /* BLR */
2428        if (dc_isar_feature(aa64_bti, s)) {
2429            /* BLR sets BTYPE to 2, regardless of source guarded page.  */
2430            set_btype(s, 2);
2431        }
2432        break;
2433
2434    default: /* RET or none of the above.  */
2435        /* BTYPE will be set to 0 by normal end-of-insn processing.  */
2436        break;
2437    }
2438
2439    s->base.is_jmp = DISAS_JUMP;
2440}
2441
2442/* Branches, exception generating and system instructions */
2443static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2444{
2445    switch (extract32(insn, 25, 7)) {
2446    case 0x0a: case 0x0b:
2447    case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
2448        disas_uncond_b_imm(s, insn);
2449        break;
2450    case 0x1a: case 0x5a: /* Compare & branch (immediate) */
2451        disas_comp_b_imm(s, insn);
2452        break;
2453    case 0x1b: case 0x5b: /* Test & branch (immediate) */
2454        disas_test_b_imm(s, insn);
2455        break;
2456    case 0x2a: /* Conditional branch (immediate) */
2457        disas_cond_b_imm(s, insn);
2458        break;
2459    case 0x6a: /* Exception generation / System */
2460        if (insn & (1 << 24)) {
2461            if (extract32(insn, 22, 2) == 0) {
2462                disas_system(s, insn);
2463            } else {
2464                unallocated_encoding(s);
2465            }
2466        } else {
2467            disas_exc(s, insn);
2468        }
2469        break;
2470    case 0x6b: /* Unconditional branch (register) */
2471        disas_uncond_b_reg(s, insn);
2472        break;
2473    default:
2474        unallocated_encoding(s);
2475        break;
2476    }
2477}
2478
2479/*
2480 * Load/Store exclusive instructions are implemented by remembering
2481 * the value/address loaded, and seeing if these are the same
2482 * when the store is performed. This is not actually the architecturally
2483 * mandated semantics, but it works for typical guest code sequences
2484 * and avoids having to monitor regular stores.
2485 *
2486 * The store exclusive uses the atomic cmpxchg primitives to avoid
2487 * races in multi-threaded linux-user and when MTTCG softmmu is
2488 * enabled.
2489 */
2490static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2491                               TCGv_i64 addr, int size, bool is_pair)
2492{
2493    int idx = get_mem_index(s);
2494    MemOp memop = s->be_data;
2495
2496    g_assert(size <= 3);
2497    if (is_pair) {
2498        g_assert(size >= 2);
2499        if (size == 2) {
2500            /* The pair must be single-copy atomic for the doubleword.  */
2501            memop |= MO_64 | MO_ALIGN;
2502            tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2503            if (s->be_data == MO_LE) {
2504                tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2505                tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2506            } else {
2507                tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2508                tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2509            }
2510        } else {
2511            /* The pair must be single-copy atomic for *each* doubleword, not
2512               the entire quadword, however it must be quadword aligned.  */
2513            memop |= MO_64;
2514            tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2515                                memop | MO_ALIGN_16);
2516
2517            TCGv_i64 addr2 = tcg_temp_new_i64();
2518            tcg_gen_addi_i64(addr2, addr, 8);
2519            tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2520            tcg_temp_free_i64(addr2);
2521
2522            tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2523            tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2524        }
2525    } else {
2526        memop |= size | MO_ALIGN;
2527        tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2528        tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2529    }
2530    tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2531}
2532
2533static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2534                                TCGv_i64 addr, int size, int is_pair)
2535{
2536    /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2537     *     && (!is_pair || env->exclusive_high == [addr + datasize])) {
2538     *     [addr] = {Rt};
2539     *     if (is_pair) {
2540     *         [addr + datasize] = {Rt2};
2541     *     }
2542     *     {Rd} = 0;
2543     * } else {
2544     *     {Rd} = 1;
2545     * }
2546     * env->exclusive_addr = -1;
2547     */
2548    TCGLabel *fail_label = gen_new_label();
2549    TCGLabel *done_label = gen_new_label();
2550    TCGv_i64 tmp;
2551
2552    tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2553
2554    tmp = tcg_temp_new_i64();
2555    if (is_pair) {
2556        if (size == 2) {
2557            if (s->be_data == MO_LE) {
2558                tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2559            } else {
2560                tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2561            }
2562            tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2563                                       cpu_exclusive_val, tmp,
2564                                       get_mem_index(s),
2565                                       MO_64 | MO_ALIGN | s->be_data);
2566            tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2567        } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2568            if (!HAVE_CMPXCHG128) {
2569                gen_helper_exit_atomic(cpu_env);
2570                /*
2571                 * Produce a result so we have a well-formed opcode
2572                 * stream when the following (dead) code uses 'tmp'.
2573                 * TCG will remove the dead ops for us.
2574                 */
2575                tcg_gen_movi_i64(tmp, 0);
2576            } else if (s->be_data == MO_LE) {
2577                gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2578                                                        cpu_exclusive_addr,
2579                                                        cpu_reg(s, rt),
2580                                                        cpu_reg(s, rt2));
2581            } else {
2582                gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2583                                                        cpu_exclusive_addr,
2584                                                        cpu_reg(s, rt),
2585                                                        cpu_reg(s, rt2));
2586            }
2587        } else if (s->be_data == MO_LE) {
2588            gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2589                                           cpu_reg(s, rt), cpu_reg(s, rt2));
2590        } else {
2591            gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2592                                           cpu_reg(s, rt), cpu_reg(s, rt2));
2593        }
2594    } else {
2595        tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2596                                   cpu_reg(s, rt), get_mem_index(s),
2597                                   size | MO_ALIGN | s->be_data);
2598        tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2599    }
2600    tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2601    tcg_temp_free_i64(tmp);
2602    tcg_gen_br(done_label);
2603
2604    gen_set_label(fail_label);
2605    tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2606    gen_set_label(done_label);
2607    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2608}
2609
2610static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2611                                 int rn, int size)
2612{
2613    TCGv_i64 tcg_rs = cpu_reg(s, rs);
2614    TCGv_i64 tcg_rt = cpu_reg(s, rt);
2615    int memidx = get_mem_index(s);
2616    TCGv_i64 clean_addr;
2617
2618    if (rn == 31) {
2619        gen_check_sp_alignment(s);
2620    }
2621    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2622    tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2623                               size | MO_ALIGN | s->be_data);
2624}
2625
2626static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2627                                      int rn, int size)
2628{
2629    TCGv_i64 s1 = cpu_reg(s, rs);
2630    TCGv_i64 s2 = cpu_reg(s, rs + 1);
2631    TCGv_i64 t1 = cpu_reg(s, rt);
2632    TCGv_i64 t2 = cpu_reg(s, rt + 1);
2633    TCGv_i64 clean_addr;
2634    int memidx = get_mem_index(s);
2635
2636    if (rn == 31) {
2637        gen_check_sp_alignment(s);
2638    }
2639
2640    /* This is a single atomic access, despite the "pair". */
2641    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2642
2643    if (size == 2) {
2644        TCGv_i64 cmp = tcg_temp_new_i64();
2645        TCGv_i64 val = tcg_temp_new_i64();
2646
2647        if (s->be_data == MO_LE) {
2648            tcg_gen_concat32_i64(val, t1, t2);
2649            tcg_gen_concat32_i64(cmp, s1, s2);
2650        } else {
2651            tcg_gen_concat32_i64(val, t2, t1);
2652            tcg_gen_concat32_i64(cmp, s2, s1);
2653        }
2654
2655        tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2656                                   MO_64 | MO_ALIGN | s->be_data);
2657        tcg_temp_free_i64(val);
2658
2659        if (s->be_data == MO_LE) {
2660            tcg_gen_extr32_i64(s1, s2, cmp);
2661        } else {
2662            tcg_gen_extr32_i64(s2, s1, cmp);
2663        }
2664        tcg_temp_free_i64(cmp);
2665    } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2666        if (HAVE_CMPXCHG128) {
2667            TCGv_i32 tcg_rs = tcg_constant_i32(rs);
2668            if (s->be_data == MO_LE) {
2669                gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2670                                            clean_addr, t1, t2);
2671            } else {
2672                gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2673                                            clean_addr, t1, t2);
2674            }
2675        } else {
2676            gen_helper_exit_atomic(cpu_env);
2677            s->base.is_jmp = DISAS_NORETURN;
2678        }
2679    } else {
2680        TCGv_i64 d1 = tcg_temp_new_i64();
2681        TCGv_i64 d2 = tcg_temp_new_i64();
2682        TCGv_i64 a2 = tcg_temp_new_i64();
2683        TCGv_i64 c1 = tcg_temp_new_i64();
2684        TCGv_i64 c2 = tcg_temp_new_i64();
2685        TCGv_i64 zero = tcg_constant_i64(0);
2686
2687        /* Load the two words, in memory order.  */
2688        tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2689                            MO_64 | MO_ALIGN_16 | s->be_data);
2690        tcg_gen_addi_i64(a2, clean_addr, 8);
2691        tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2692
2693        /* Compare the two words, also in memory order.  */
2694        tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2695        tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2696        tcg_gen_and_i64(c2, c2, c1);
2697
2698        /* If compare equal, write back new data, else write back old data.  */
2699        tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2700        tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2701        tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2702        tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2703        tcg_temp_free_i64(a2);
2704        tcg_temp_free_i64(c1);
2705        tcg_temp_free_i64(c2);
2706
2707        /* Write back the data from memory to Rs.  */
2708        tcg_gen_mov_i64(s1, d1);
2709        tcg_gen_mov_i64(s2, d2);
2710        tcg_temp_free_i64(d1);
2711        tcg_temp_free_i64(d2);
2712    }
2713}
2714
2715/* Update the Sixty-Four bit (SF) registersize. This logic is derived
2716 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2717 */
2718static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2719{
2720    int opc0 = extract32(opc, 0, 1);
2721    int regsize;
2722
2723    if (is_signed) {
2724        regsize = opc0 ? 32 : 64;
2725    } else {
2726        regsize = size == 3 ? 64 : 32;
2727    }
2728    return regsize == 64;
2729}
2730
2731/* Load/store exclusive
2732 *
2733 *  31 30 29         24  23  22   21  20  16  15  14   10 9    5 4    0
2734 * +-----+-------------+----+---+----+------+----+-------+------+------+
2735 * | sz  | 0 0 1 0 0 0 | o2 | L | o1 |  Rs  | o0 |  Rt2  |  Rn  | Rt   |
2736 * +-----+-------------+----+---+----+------+----+-------+------+------+
2737 *
2738 *  sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2739 *   L: 0 -> store, 1 -> load
2740 *  o2: 0 -> exclusive, 1 -> not
2741 *  o1: 0 -> single register, 1 -> register pair
2742 *  o0: 1 -> load-acquire/store-release, 0 -> not
2743 */
2744static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2745{
2746    int rt = extract32(insn, 0, 5);
2747    int rn = extract32(insn, 5, 5);
2748    int rt2 = extract32(insn, 10, 5);
2749    int rs = extract32(insn, 16, 5);
2750    int is_lasr = extract32(insn, 15, 1);
2751    int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2752    int size = extract32(insn, 30, 2);
2753    TCGv_i64 clean_addr;
2754
2755    switch (o2_L_o1_o0) {
2756    case 0x0: /* STXR */
2757    case 0x1: /* STLXR */
2758        if (rn == 31) {
2759            gen_check_sp_alignment(s);
2760        }
2761        if (is_lasr) {
2762            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2763        }
2764        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2765                                    true, rn != 31, size);
2766        gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2767        return;
2768
2769    case 0x4: /* LDXR */
2770    case 0x5: /* LDAXR */
2771        if (rn == 31) {
2772            gen_check_sp_alignment(s);
2773        }
2774        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2775                                    false, rn != 31, size);
2776        s->is_ldex = true;
2777        gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2778        if (is_lasr) {
2779            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2780        }
2781        return;
2782
2783    case 0x8: /* STLLR */
2784        if (!dc_isar_feature(aa64_lor, s)) {
2785            break;
2786        }
2787        /* StoreLORelease is the same as Store-Release for QEMU.  */
2788        /* fall through */
2789    case 0x9: /* STLR */
2790        /* Generate ISS for non-exclusive accesses including LASR.  */
2791        if (rn == 31) {
2792            gen_check_sp_alignment(s);
2793        }
2794        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2795        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2796                                    true, rn != 31, size);
2797        /* TODO: ARMv8.4-LSE SCTLR.nAA */
2798        do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
2799                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2800        return;
2801
2802    case 0xc: /* LDLAR */
2803        if (!dc_isar_feature(aa64_lor, s)) {
2804            break;
2805        }
2806        /* LoadLOAcquire is the same as Load-Acquire for QEMU.  */
2807        /* fall through */
2808    case 0xd: /* LDAR */
2809        /* Generate ISS for non-exclusive accesses including LASR.  */
2810        if (rn == 31) {
2811            gen_check_sp_alignment(s);
2812        }
2813        clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2814                                    false, rn != 31, size);
2815        /* TODO: ARMv8.4-LSE SCTLR.nAA */
2816        do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
2817                  rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2818        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2819        return;
2820
2821    case 0x2: case 0x3: /* CASP / STXP */
2822        if (size & 2) { /* STXP / STLXP */
2823            if (rn == 31) {
2824                gen_check_sp_alignment(s);
2825            }
2826            if (is_lasr) {
2827                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2828            }
2829            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2830                                        true, rn != 31, size);
2831            gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2832            return;
2833        }
2834        if (rt2 == 31
2835            && ((rt | rs) & 1) == 0
2836            && dc_isar_feature(aa64_atomics, s)) {
2837            /* CASP / CASPL */
2838            gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2839            return;
2840        }
2841        break;
2842
2843    case 0x6: case 0x7: /* CASPA / LDXP */
2844        if (size & 2) { /* LDXP / LDAXP */
2845            if (rn == 31) {
2846                gen_check_sp_alignment(s);
2847            }
2848            clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2849                                        false, rn != 31, size);
2850            s->is_ldex = true;
2851            gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2852            if (is_lasr) {
2853                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2854            }
2855            return;
2856        }
2857        if (rt2 == 31
2858            && ((rt | rs) & 1) == 0
2859            && dc_isar_feature(aa64_atomics, s)) {
2860            /* CASPA / CASPAL */
2861            gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2862            return;
2863        }
2864        break;
2865
2866    case 0xa: /* CAS */
2867    case 0xb: /* CASL */
2868    case 0xe: /* CASA */
2869    case 0xf: /* CASAL */
2870        if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2871            gen_compare_and_swap(s, rs, rt, rn, size);
2872            return;
2873        }
2874        break;
2875    }
2876    unallocated_encoding(s);
2877}
2878
2879/*
2880 * Load register (literal)
2881 *
2882 *  31 30 29   27  26 25 24 23                5 4     0
2883 * +-----+-------+---+-----+-------------------+-------+
2884 * | opc | 0 1 1 | V | 0 0 |     imm19         |  Rt   |
2885 * +-----+-------+---+-----+-------------------+-------+
2886 *
2887 * V: 1 -> vector (simd/fp)
2888 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2889 *                   10-> 32 bit signed, 11 -> prefetch
2890 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2891 */
2892static void disas_ld_lit(DisasContext *s, uint32_t insn)
2893{
2894    int rt = extract32(insn, 0, 5);
2895    int64_t imm = sextract32(insn, 5, 19) << 2;
2896    bool is_vector = extract32(insn, 26, 1);
2897    int opc = extract32(insn, 30, 2);
2898    bool is_signed = false;
2899    int size = 2;
2900    TCGv_i64 tcg_rt, clean_addr;
2901
2902    if (is_vector) {
2903        if (opc == 3) {
2904            unallocated_encoding(s);
2905            return;
2906        }
2907        size = 2 + opc;
2908        if (!fp_access_check(s)) {
2909            return;
2910        }
2911    } else {
2912        if (opc == 3) {
2913            /* PRFM (literal) : prefetch */
2914            return;
2915        }
2916        size = 2 + extract32(opc, 0, 1);
2917        is_signed = extract32(opc, 1, 1);
2918    }
2919
2920    tcg_rt = cpu_reg(s, rt);
2921
2922    clean_addr = tcg_constant_i64(s->pc_curr + imm);
2923    if (is_vector) {
2924        do_fp_ld(s, rt, clean_addr, size);
2925    } else {
2926        /* Only unsigned 32bit loads target 32bit registers.  */
2927        bool iss_sf = opc != 0;
2928
2929        do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
2930                  false, true, rt, iss_sf, false);
2931    }
2932}
2933
2934/*
2935 * LDNP (Load Pair - non-temporal hint)
2936 * LDP (Load Pair - non vector)
2937 * LDPSW (Load Pair Signed Word - non vector)
2938 * STNP (Store Pair - non-temporal hint)
2939 * STP (Store Pair - non vector)
2940 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2941 * LDP (Load Pair of SIMD&FP)
2942 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2943 * STP (Store Pair of SIMD&FP)
2944 *
2945 *  31 30 29   27  26  25 24   23  22 21   15 14   10 9    5 4    0
2946 * +-----+-------+---+---+-------+---+-----------------------------+
2947 * | opc | 1 0 1 | V | 0 | index | L |  imm7 |  Rt2  |  Rn  | Rt   |
2948 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2949 *
2950 * opc: LDP/STP/LDNP/STNP        00 -> 32 bit, 10 -> 64 bit
2951 *      LDPSW/STGP               01
2952 *      LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2953 *   V: 0 -> GPR, 1 -> Vector
2954 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2955 *      10 -> signed offset, 11 -> pre-index
2956 *   L: 0 -> Store 1 -> Load
2957 *
2958 * Rt, Rt2 = GPR or SIMD registers to be stored
2959 * Rn = general purpose register containing address
2960 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2961 */
2962static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2963{
2964    int rt = extract32(insn, 0, 5);
2965    int rn = extract32(insn, 5, 5);
2966    int rt2 = extract32(insn, 10, 5);
2967    uint64_t offset = sextract64(insn, 15, 7);
2968    int index = extract32(insn, 23, 2);
2969    bool is_vector = extract32(insn, 26, 1);
2970    bool is_load = extract32(insn, 22, 1);
2971    int opc = extract32(insn, 30, 2);
2972
2973    bool is_signed = false;
2974    bool postindex = false;
2975    bool wback = false;
2976    bool set_tag = false;
2977
2978    TCGv_i64 clean_addr, dirty_addr;
2979
2980    int size;
2981
2982    if (opc == 3) {
2983        unallocated_encoding(s);
2984        return;
2985    }
2986
2987    if (is_vector) {
2988        size = 2 + opc;
2989    } else if (opc == 1 && !is_load) {
2990        /* STGP */
2991        if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2992            unallocated_encoding(s);
2993            return;
2994        }
2995        size = 3;
2996        set_tag = true;
2997    } else {
2998        size = 2 + extract32(opc, 1, 1);
2999        is_signed = extract32(opc, 0, 1);
3000        if (!is_load && is_signed) {
3001            unallocated_encoding(s);
3002            return;
3003        }
3004    }
3005
3006    switch (index) {
3007    case 1: /* post-index */
3008        postindex = true;
3009        wback = true;
3010        break;
3011    case 0:
3012        /* signed offset with "non-temporal" hint. Since we don't emulate
3013         * caches we don't care about hints to the cache system about
3014         * data access patterns, and handle this identically to plain
3015         * signed offset.
3016         */
3017        if (is_signed) {
3018            /* There is no non-temporal-hint version of LDPSW */
3019            unallocated_encoding(s);
3020            return;
3021        }
3022        postindex = false;
3023        break;
3024    case 2: /* signed offset, rn not updated */
3025        postindex = false;
3026        break;
3027    case 3: /* pre-index */
3028        postindex = false;
3029        wback = true;
3030        break;
3031    }
3032
3033    if (is_vector && !fp_access_check(s)) {
3034        return;
3035    }
3036
3037    offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
3038
3039    if (rn == 31) {
3040        gen_check_sp_alignment(s);
3041    }
3042
3043    dirty_addr = read_cpu_reg_sp(s, rn, 1);
3044    if (!postindex) {
3045        tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3046    }
3047
3048    if (set_tag) {
3049        if (!s->ata) {
3050            /*
3051             * TODO: We could rely on the stores below, at least for
3052             * system mode, if we arrange to add MO_ALIGN_16.
3053             */
3054            gen_helper_stg_stub(cpu_env, dirty_addr);
3055        } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3056            gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
3057        } else {
3058            gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
3059        }
3060    }
3061
3062    clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
3063                                (wback || rn != 31) && !set_tag, 2 << size);
3064
3065    if (is_vector) {
3066        if (is_load) {
3067            do_fp_ld(s, rt, clean_addr, size);
3068        } else {
3069            do_fp_st(s, rt, clean_addr, size);
3070        }
3071        tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3072        if (is_load) {
3073            do_fp_ld(s, rt2, clean_addr, size);
3074        } else {
3075            do_fp_st(s, rt2, clean_addr, size);
3076        }
3077    } else {
3078        TCGv_i64 tcg_rt = cpu_reg(s, rt);
3079        TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
3080
3081        if (is_load) {
3082            TCGv_i64 tmp = tcg_temp_new_i64();
3083
3084            /* Do not modify tcg_rt before recognizing any exception
3085             * from the second load.
3086             */
3087            do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
3088                      false, false, 0, false, false);
3089            tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3090            do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
3091                      false, false, 0, false, false);
3092
3093            tcg_gen_mov_i64(tcg_rt, tmp);
3094            tcg_temp_free_i64(tmp);
3095        } else {
3096            do_gpr_st(s, tcg_rt, clean_addr, size,
3097                      false, 0, false, false);
3098            tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3099            do_gpr_st(s, tcg_rt2, clean_addr, size,
3100                      false, 0, false, false);
3101        }
3102    }
3103
3104    if (wback) {
3105        if (postindex) {
3106            tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3107        }
3108        tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3109    }
3110}
3111
3112/*
3113 * Load/store (immediate post-indexed)
3114 * Load/store (immediate pre-indexed)
3115 * Load/store (unscaled immediate)
3116 *
3117 * 31 30 29   27  26 25 24 23 22 21  20    12 11 10 9    5 4    0
3118 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3119 * |size| 1 1 1 | V | 0 0 | opc | 0 |  imm9  | idx |  Rn  |  Rt  |
3120 * +----+-------+---+-----+-----+---+--------+-----+------+------+
3121 *
3122 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
3123         10 -> unprivileged
3124 * V = 0 -> non-vector
3125 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
3126 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3127 */
3128static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
3129                                int opc,
3130                                int size,
3131                                int rt,
3132                                bool is_vector)
3133{
3134    int rn = extract32(insn, 5, 5);
3135    int imm9 = sextract32(insn, 12, 9);
3136    int idx = extract32(insn, 10, 2);
3137    bool is_signed = false;
3138    bool is_store = false;
3139    bool is_extended = false;
3140    bool is_unpriv = (idx == 2);
3141    bool iss_valid;
3142    bool post_index;
3143    bool writeback;
3144    int memidx;
3145
3146    TCGv_i64 clean_addr, dirty_addr;
3147
3148    if (is_vector) {
3149        size |= (opc & 2) << 1;
3150        if (size > 4 || is_unpriv) {
3151            unallocated_encoding(s);
3152            return;
3153        }
3154        is_store = ((opc & 1) == 0);
3155        if (!fp_access_check(s)) {
3156            return;
3157        }
3158    } else {
3159        if (size == 3 && opc == 2) {
3160            /* PRFM - prefetch */
3161            if (idx != 0) {
3162                unallocated_encoding(s);
3163                return;
3164            }
3165            return;
3166        }
3167        if (opc == 3 && size > 1) {
3168            unallocated_encoding(s);
3169            return;
3170        }
3171        is_store = (opc == 0);
3172        is_signed = extract32(opc, 1, 1);
3173        is_extended = (size < 3) && extract32(opc, 0, 1);
3174    }
3175
3176    switch (idx) {
3177    case 0:
3178    case 2:
3179        post_index = false;
3180        writeback = false;
3181        break;
3182    case 1:
3183        post_index = true;
3184        writeback = true;
3185        break;
3186    case 3:
3187        post_index = false;
3188        writeback = true;
3189        break;
3190    default:
3191        g_assert_not_reached();
3192    }
3193
3194    iss_valid = !is_vector && !writeback;
3195
3196    if (rn == 31) {
3197        gen_check_sp_alignment(s);
3198    }
3199
3200    dirty_addr = read_cpu_reg_sp(s, rn, 1);
3201    if (!post_index) {
3202        tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3203    }
3204
3205    memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3206    clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3207                                       writeback || rn != 31,
3208                                       size, is_unpriv, memidx);
3209
3210    if (is_vector) {
3211        if (is_store) {
3212            do_fp_st(s, rt, clean_addr, size);
3213        } else {
3214            do_fp_ld(s, rt, clean_addr, size);
3215        }
3216    } else {
3217        TCGv_i64 tcg_rt = cpu_reg(s, rt);
3218        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3219
3220        if (is_store) {
3221            do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3222                             iss_valid, rt, iss_sf, false);
3223        } else {
3224            do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3225                             is_extended, memidx,
3226                             iss_valid, rt, iss_sf, false);
3227        }
3228    }
3229
3230    if (writeback) {
3231        TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3232        if (post_index) {
3233            tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3234        }
3235        tcg_gen_mov_i64(tcg_rn, dirty_addr);
3236    }
3237}
3238
3239/*
3240 * Load/store (register offset)
3241 *
3242 * 31 30 29   27  26 25 24 23 22 21  20  16 15 13 12 11 10 9  5 4  0
3243 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3244 * |size| 1 1 1 | V | 0 0 | opc | 1 |  Rm  | opt | S| 1 0 | Rn | Rt |
3245 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3246 *
3247 * For non-vector:
3248 *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3249 *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3250 * For vector:
3251 *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3252 *   opc<0>: 0 -> store, 1 -> load
3253 * V: 1 -> vector/simd
3254 * opt: extend encoding (see DecodeRegExtend)
3255 * S: if S=1 then scale (essentially index by sizeof(size))
3256 * Rt: register to transfer into/out of
3257 * Rn: address register or SP for base
3258 * Rm: offset register or ZR for offset
3259 */
3260static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3261                                   int opc,
3262                                   int size,
3263                                   int rt,
3264                                   bool is_vector)
3265{
3266    int rn = extract32(insn, 5, 5);
3267    int shift = extract32(insn, 12, 1);
3268    int rm = extract32(insn, 16, 5);
3269    int opt = extract32(insn, 13, 3);
3270    bool is_signed = false;
3271    bool is_store = false;
3272    bool is_extended = false;
3273
3274    TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3275
3276    if (extract32(opt, 1, 1) == 0) {
3277        unallocated_encoding(s);
3278        return;
3279    }
3280
3281    if (is_vector) {
3282        size |= (opc & 2) << 1;
3283        if (size > 4) {
3284            unallocated_encoding(s);
3285            return;
3286        }
3287        is_store = !extract32(opc, 0, 1);
3288        if (!fp_access_check(s)) {
3289            return;
3290        }
3291    } else {
3292        if (size == 3 && opc == 2) {
3293            /* PRFM - prefetch */
3294            return;
3295        }
3296        if (opc == 3 && size > 1) {
3297            unallocated_encoding(s);
3298            return;
3299        }
3300        is_store = (opc == 0);
3301        is_signed = extract32(opc, 1, 1);
3302        is_extended = (size < 3) && extract32(opc, 0, 1);
3303    }
3304
3305    if (rn == 31) {
3306        gen_check_sp_alignment(s);
3307    }
3308    dirty_addr = read_cpu_reg_sp(s, rn, 1);
3309
3310    tcg_rm = read_cpu_reg(s, rm, 1);
3311    ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3312
3313    tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3314    clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3315
3316    if (is_vector) {
3317        if (is_store) {
3318            do_fp_st(s, rt, clean_addr, size);
3319        } else {
3320            do_fp_ld(s, rt, clean_addr, size);
3321        }
3322    } else {
3323        TCGv_i64 tcg_rt = cpu_reg(s, rt);
3324        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3325        if (is_store) {
3326            do_gpr_st(s, tcg_rt, clean_addr, size,
3327                      true, rt, iss_sf, false);
3328        } else {
3329            do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3330                      is_extended, true, rt, iss_sf, false);
3331        }
3332    }
3333}
3334
3335/*
3336 * Load/store (unsigned immediate)
3337 *
3338 * 31 30 29   27  26 25 24 23 22 21        10 9     5
3339 * +----+-------+---+-----+-----+------------+-------+------+
3340 * |size| 1 1 1 | V | 0 1 | opc |   imm12    |  Rn   |  Rt  |
3341 * +----+-------+---+-----+-----+------------+-------+------+
3342 *
3343 * For non-vector:
3344 *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3345 *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3346 * For vector:
3347 *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3348 *   opc<0>: 0 -> store, 1 -> load
3349 * Rn: base address register (inc SP)
3350 * Rt: target register
3351 */
3352static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3353                                        int opc,
3354                                        int size,
3355                                        int rt,
3356                                        bool is_vector)
3357{
3358    int rn = extract32(insn, 5, 5);
3359    unsigned int imm12 = extract32(insn, 10, 12);
3360    unsigned int offset;
3361
3362    TCGv_i64 clean_addr, dirty_addr;
3363
3364    bool is_store;
3365    bool is_signed = false;
3366    bool is_extended = false;
3367
3368    if (is_vector) {
3369        size |= (opc & 2) << 1;
3370        if (size > 4) {
3371            unallocated_encoding(s);
3372            return;
3373        }
3374        is_store = !extract32(opc, 0, 1);
3375        if (!fp_access_check(s)) {
3376            return;
3377        }
3378    } else {
3379        if (size == 3 && opc == 2) {
3380            /* PRFM - prefetch */
3381            return;
3382        }
3383        if (opc == 3 && size > 1) {
3384            unallocated_encoding(s);
3385            return;
3386        }
3387        is_store = (opc == 0);
3388        is_signed = extract32(opc, 1, 1);
3389        is_extended = (size < 3) && extract32(opc, 0, 1);
3390    }
3391
3392    if (rn == 31) {
3393        gen_check_sp_alignment(s);
3394    }
3395    dirty_addr = read_cpu_reg_sp(s, rn, 1);
3396    offset = imm12 << size;
3397    tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3398    clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3399
3400    if (is_vector) {
3401        if (is_store) {
3402            do_fp_st(s, rt, clean_addr, size);
3403        } else {
3404            do_fp_ld(s, rt, clean_addr, size);
3405        }
3406    } else {
3407        TCGv_i64 tcg_rt = cpu_reg(s, rt);
3408        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3409        if (is_store) {
3410            do_gpr_st(s, tcg_rt, clean_addr, size,
3411                      true, rt, iss_sf, false);
3412        } else {
3413            do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3414                      is_extended, true, rt, iss_sf, false);
3415        }
3416    }
3417}
3418
3419/* Atomic memory operations
3420 *
3421 *  31  30      27  26    24    22  21   16   15    12    10    5     0
3422 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3423 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn |  Rt |
3424 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3425 *
3426 * Rt: the result register
3427 * Rn: base address or SP
3428 * Rs: the source register for the operation
3429 * V: vector flag (always 0 as of v8.3)
3430 * A: acquire flag
3431 * R: release flag
3432 */
3433static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3434                              int size, int rt, bool is_vector)
3435{
3436    int rs = extract32(insn, 16, 5);
3437    int rn = extract32(insn, 5, 5);
3438    int o3_opc = extract32(insn, 12, 4);
3439    bool r = extract32(insn, 22, 1);
3440    bool a = extract32(insn, 23, 1);
3441    TCGv_i64 tcg_rs, tcg_rt, clean_addr;
3442    AtomicThreeOpFn *fn = NULL;
3443    MemOp mop = s->be_data | size | MO_ALIGN;
3444
3445    if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3446        unallocated_encoding(s);
3447        return;
3448    }
3449    switch (o3_opc) {
3450    case 000: /* LDADD */
3451        fn = tcg_gen_atomic_fetch_add_i64;
3452        break;
3453    case 001: /* LDCLR */
3454        fn = tcg_gen_atomic_fetch_and_i64;
3455        break;
3456    case 002: /* LDEOR */
3457        fn = tcg_gen_atomic_fetch_xor_i64;
3458        break;
3459    case 003: /* LDSET */
3460        fn = tcg_gen_atomic_fetch_or_i64;
3461        break;
3462    case 004: /* LDSMAX */
3463        fn = tcg_gen_atomic_fetch_smax_i64;
3464        mop |= MO_SIGN;
3465        break;
3466    case 005: /* LDSMIN */
3467        fn = tcg_gen_atomic_fetch_smin_i64;
3468        mop |= MO_SIGN;
3469        break;
3470    case 006: /* LDUMAX */
3471        fn = tcg_gen_atomic_fetch_umax_i64;
3472        break;
3473    case 007: /* LDUMIN */
3474        fn = tcg_gen_atomic_fetch_umin_i64;
3475        break;
3476    case 010: /* SWP */
3477        fn = tcg_gen_atomic_xchg_i64;
3478        break;
3479    case 014: /* LDAPR, LDAPRH, LDAPRB */
3480        if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3481            rs != 31 || a != 1 || r != 0) {
3482            unallocated_encoding(s);
3483            return;
3484        }
3485        break;
3486    default:
3487        unallocated_encoding(s);
3488        return;
3489    }
3490
3491    if (rn == 31) {
3492        gen_check_sp_alignment(s);
3493    }
3494    clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3495
3496    if (o3_opc == 014) {
3497        /*
3498         * LDAPR* are a special case because they are a simple load, not a
3499         * fetch-and-do-something op.
3500         * The architectural consistency requirements here are weaker than
3501         * full load-acquire (we only need "load-acquire processor consistent"),
3502         * but we choose to implement them as full LDAQ.
3503         */
3504        do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
3505                  true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3506        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3507        return;
3508    }
3509
3510    tcg_rs = read_cpu_reg(s, rs, true);
3511    tcg_rt = cpu_reg(s, rt);
3512
3513    if (o3_opc == 1) { /* LDCLR */
3514        tcg_gen_not_i64(tcg_rs, tcg_rs);
3515    }
3516
3517    /* The tcg atomic primitives are all full barriers.  Therefore we
3518     * can ignore the Acquire and Release bits of this instruction.
3519     */
3520    fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3521
3522    if ((mop & MO_SIGN) && size != MO_64) {
3523        tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3524    }
3525}
3526
3527/*
3528 * PAC memory operations
3529 *
3530 *  31  30      27  26    24    22  21       12  11  10    5     0
3531 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3532 * | size | 1 1 1 | V | 0 0 | M S | 1 |  imm9  | W | 1 | Rn |  Rt |
3533 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3534 *
3535 * Rt: the result register
3536 * Rn: base address or SP
3537 * V: vector flag (always 0 as of v8.3)
3538 * M: clear for key DA, set for key DB
3539 * W: pre-indexing flag
3540 * S: sign for imm9.
3541 */
3542static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3543                           int size, int rt, bool is_vector)
3544{
3545    int rn = extract32(insn, 5, 5);
3546    bool is_wback = extract32(insn, 11, 1);
3547    bool use_key_a = !extract32(insn, 23, 1);
3548    int offset;
3549    TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3550
3551    if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3552        unallocated_encoding(s);
3553        return;
3554    }
3555
3556    if (rn == 31) {
3557        gen_check_sp_alignment(s);
3558    }
3559    dirty_addr = read_cpu_reg_sp(s, rn, 1);
3560
3561    if (s->pauth_active) {
3562        if (use_key_a) {
3563            gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3564                             new_tmp_a64_zero(s));
3565        } else {
3566            gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3567                             new_tmp_a64_zero(s));
3568        }
3569    }
3570
3571    /* Form the 10-bit signed, scaled offset.  */
3572    offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3573    offset = sextract32(offset << size, 0, 10 + size);
3574    tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3575
3576    /* Note that "clean" and "dirty" here refer to TBI not PAC.  */
3577    clean_addr = gen_mte_check1(s, dirty_addr, false,
3578                                is_wback || rn != 31, size);
3579
3580    tcg_rt = cpu_reg(s, rt);
3581    do_gpr_ld(s, tcg_rt, clean_addr, size,
3582              /* extend */ false, /* iss_valid */ !is_wback,
3583              /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
3584
3585    if (is_wback) {
3586        tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3587    }
3588}
3589
3590/*
3591 * LDAPR/STLR (unscaled immediate)
3592 *
3593 *  31  30            24    22  21       12    10    5     0
3594 * +------+-------------+-----+---+--------+-----+----+-----+
3595 * | size | 0 1 1 0 0 1 | opc | 0 |  imm9  | 0 0 | Rn |  Rt |
3596 * +------+-------------+-----+---+--------+-----+----+-----+
3597 *
3598 * Rt: source or destination register
3599 * Rn: base register
3600 * imm9: unscaled immediate offset
3601 * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3602 * size: size of load/store
3603 */
3604static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3605{
3606    int rt = extract32(insn, 0, 5);
3607    int rn = extract32(insn, 5, 5);
3608    int offset = sextract32(insn, 12, 9);
3609    int opc = extract32(insn, 22, 2);
3610    int size = extract32(insn, 30, 2);
3611    TCGv_i64 clean_addr, dirty_addr;
3612    bool is_store = false;
3613    bool extend = false;
3614    bool iss_sf;
3615    MemOp mop;
3616
3617    if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3618        unallocated_encoding(s);
3619        return;
3620    }
3621
3622    /* TODO: ARMv8.4-LSE SCTLR.nAA */
3623    mop = size | MO_ALIGN;
3624
3625    switch (opc) {
3626    case 0: /* STLURB */
3627        is_store = true;
3628        break;
3629    case 1: /* LDAPUR* */
3630        break;
3631    case 2: /* LDAPURS* 64-bit variant */
3632        if (size == 3) {
3633            unallocated_encoding(s);
3634            return;
3635        }
3636        mop |= MO_SIGN;
3637        break;
3638    case 3: /* LDAPURS* 32-bit variant */
3639        if (size > 1) {
3640            unallocated_encoding(s);
3641            return;
3642        }
3643        mop |= MO_SIGN;
3644        extend = true; /* zero-extend 32->64 after signed load */
3645        break;
3646    default:
3647        g_assert_not_reached();
3648    }
3649
3650    iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
3651
3652    if (rn == 31) {
3653        gen_check_sp_alignment(s);
3654    }
3655
3656    dirty_addr = read_cpu_reg_sp(s, rn, 1);
3657    tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3658    clean_addr = clean_data_tbi(s, dirty_addr);
3659
3660    if (is_store) {
3661        /* Store-Release semantics */
3662        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3663        do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
3664    } else {
3665        /*
3666         * Load-AcquirePC semantics; we implement as the slightly more
3667         * restrictive Load-Acquire.
3668         */
3669        do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
3670                  extend, true, rt, iss_sf, true);
3671        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3672    }
3673}
3674
3675/* Load/store register (all forms) */
3676static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3677{
3678    int rt = extract32(insn, 0, 5);
3679    int opc = extract32(insn, 22, 2);
3680    bool is_vector = extract32(insn, 26, 1);
3681    int size = extract32(insn, 30, 2);
3682
3683    switch (extract32(insn, 24, 2)) {
3684    case 0:
3685        if (extract32(insn, 21, 1) == 0) {
3686            /* Load/store register (unscaled immediate)
3687             * Load/store immediate pre/post-indexed
3688             * Load/store register unprivileged
3689             */
3690            disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3691            return;
3692        }
3693        switch (extract32(insn, 10, 2)) {
3694        case 0:
3695            disas_ldst_atomic(s, insn, size, rt, is_vector);
3696            return;
3697        case 2:
3698            disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3699            return;
3700        default:
3701            disas_ldst_pac(s, insn, size, rt, is_vector);
3702            return;
3703        }
3704        break;
3705    case 1:
3706        disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3707        return;
3708    }
3709    unallocated_encoding(s);
3710}
3711
3712/* AdvSIMD load/store multiple structures
3713 *
3714 *  31  30  29           23 22  21         16 15    12 11  10 9    5 4    0
3715 * +---+---+---------------+---+-------------+--------+------+------+------+
3716 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size |  Rn  |  Rt  |
3717 * +---+---+---------------+---+-------------+--------+------+------+------+
3718 *
3719 * AdvSIMD load/store multiple structures (post-indexed)
3720 *
3721 *  31  30  29           23 22  21  20     16 15    12 11  10 9    5 4    0
3722 * +---+---+---------------+---+---+---------+--------+------+------+------+
3723 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 |   Rm    | opcode | size |  Rn  |  Rt  |
3724 * +---+---+---------------+---+---+---------+--------+------+------+------+
3725 *
3726 * Rt: first (or only) SIMD&FP register to be transferred
3727 * Rn: base address or SP
3728 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3729 */
3730static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3731{
3732    int rt = extract32(insn, 0, 5);
3733    int rn = extract32(insn, 5, 5);
3734    int rm = extract32(insn, 16, 5);
3735    int size = extract32(insn, 10, 2);
3736    int opcode = extract32(insn, 12, 4);
3737    bool is_store = !extract32(insn, 22, 1);
3738    bool is_postidx = extract32(insn, 23, 1);
3739    bool is_q = extract32(insn, 30, 1);
3740    TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3741    MemOp endian, align, mop;
3742
3743    int total;    /* total bytes */
3744    int elements; /* elements per vector */
3745    int rpt;    /* num iterations */
3746    int selem;  /* structure elements */
3747    int r;
3748
3749    if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3750        unallocated_encoding(s);
3751        return;
3752    }
3753
3754    if (!is_postidx && rm != 0) {
3755        unallocated_encoding(s);
3756        return;
3757    }
3758
3759    /* From the shared decode logic */
3760    switch (opcode) {
3761    case 0x0:
3762        rpt = 1;
3763        selem = 4;
3764        break;
3765    case 0x2:
3766        rpt = 4;
3767        selem = 1;
3768        break;
3769    case 0x4:
3770        rpt = 1;
3771        selem = 3;
3772        break;
3773    case 0x6:
3774        rpt = 3;
3775        selem = 1;
3776        break;
3777    case 0x7:
3778        rpt = 1;
3779        selem = 1;
3780        break;
3781    case 0x8:
3782        rpt = 1;
3783        selem = 2;
3784        break;
3785    case 0xa:
3786        rpt = 2;
3787        selem = 1;
3788        break;
3789    default:
3790        unallocated_encoding(s);
3791        return;
3792    }
3793
3794    if (size == 3 && !is_q && selem != 1) {
3795        /* reserved */
3796        unallocated_encoding(s);
3797        return;
3798    }
3799
3800    if (!fp_access_check(s)) {
3801        return;
3802    }
3803
3804    if (rn == 31) {
3805        gen_check_sp_alignment(s);
3806    }
3807
3808    /* For our purposes, bytes are always little-endian.  */
3809    endian = s->be_data;
3810    if (size == 0) {
3811        endian = MO_LE;
3812    }
3813
3814    total = rpt * selem * (is_q ? 16 : 8);
3815    tcg_rn = cpu_reg_sp(s, rn);
3816
3817    /*
3818     * Issue the MTE check vs the logical repeat count, before we
3819     * promote consecutive little-endian elements below.
3820     */
3821    clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3822                                total);
3823
3824    /*
3825     * Consecutive little-endian elements from a single register
3826     * can be promoted to a larger little-endian operation.
3827     */
3828    align = MO_ALIGN;
3829    if (selem == 1 && endian == MO_LE) {
3830        align = pow2_align(size);
3831        size = 3;
3832    }
3833    if (!s->align_mem) {
3834        align = 0;
3835    }
3836    mop = endian | size | align;
3837
3838    elements = (is_q ? 16 : 8) >> size;
3839    tcg_ebytes = tcg_constant_i64(1 << size);
3840    for (r = 0; r < rpt; r++) {
3841        int e;
3842        for (e = 0; e < elements; e++) {
3843            int xs;
3844            for (xs = 0; xs < selem; xs++) {
3845                int tt = (rt + r + xs) % 32;
3846                if (is_store) {
3847                    do_vec_st(s, tt, e, clean_addr, mop);
3848                } else {
3849                    do_vec_ld(s, tt, e, clean_addr, mop);
3850                }
3851                tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3852            }
3853        }
3854    }
3855
3856    if (!is_store) {
3857        /* For non-quad operations, setting a slice of the low
3858         * 64 bits of the register clears the high 64 bits (in
3859         * the ARM ARM pseudocode this is implicit in the fact
3860         * that 'rval' is a 64 bit wide variable).
3861         * For quad operations, we might still need to zero the
3862         * high bits of SVE.
3863         */
3864        for (r = 0; r < rpt * selem; r++) {
3865            int tt = (rt + r) % 32;
3866            clear_vec_high(s, is_q, tt);
3867        }
3868    }
3869
3870    if (is_postidx) {
3871        if (rm == 31) {
3872            tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3873        } else {
3874            tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3875        }
3876    }
3877}
3878
3879/* AdvSIMD load/store single structure
3880 *
3881 *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3882 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3883 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size |  Rn  |  Rt  |
3884 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3885 *
3886 * AdvSIMD load/store single structure (post-indexed)
3887 *
3888 *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3889 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3890 * | 0 | Q | 0 0 1 1 0 1 1 | L R |     Rm    | opc | S | size |  Rn  |  Rt  |
3891 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3892 *
3893 * Rt: first (or only) SIMD&FP register to be transferred
3894 * Rn: base address or SP
3895 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3896 * index = encoded in Q:S:size dependent on size
3897 *
3898 * lane_size = encoded in R, opc
3899 * transfer width = encoded in opc, S, size
3900 */
3901static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3902{
3903    int rt = extract32(insn, 0, 5);
3904    int rn = extract32(insn, 5, 5);
3905    int rm = extract32(insn, 16, 5);
3906    int size = extract32(insn, 10, 2);
3907    int S = extract32(insn, 12, 1);
3908    int opc = extract32(insn, 13, 3);
3909    int R = extract32(insn, 21, 1);
3910    int is_load = extract32(insn, 22, 1);
3911    int is_postidx = extract32(insn, 23, 1);
3912    int is_q = extract32(insn, 30, 1);
3913
3914    int scale = extract32(opc, 1, 2);
3915    int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3916    bool replicate = false;
3917    int index = is_q << 3 | S << 2 | size;
3918    int xs, total;
3919    TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3920    MemOp mop;
3921
3922    if (extract32(insn, 31, 1)) {
3923        unallocated_encoding(s);
3924        return;
3925    }
3926    if (!is_postidx && rm != 0) {
3927        unallocated_encoding(s);
3928        return;
3929    }
3930
3931    switch (scale) {
3932    case 3:
3933        if (!is_load || S) {
3934            unallocated_encoding(s);
3935            return;
3936        }
3937        scale = size;
3938        replicate = true;
3939        break;
3940    case 0:
3941        break;
3942    case 1:
3943        if (extract32(size, 0, 1)) {
3944            unallocated_encoding(s);
3945            return;
3946        }
3947        index >>= 1;
3948        break;
3949    case 2:
3950        if (extract32(size, 1, 1)) {
3951            unallocated_encoding(s);
3952            return;
3953        }
3954        if (!extract32(size, 0, 1)) {
3955            index >>= 2;
3956        } else {
3957            if (S) {
3958                unallocated_encoding(s);
3959                return;
3960            }
3961            index >>= 3;
3962            scale = 3;
3963        }
3964        break;
3965    default:
3966        g_assert_not_reached();
3967    }
3968
3969    if (!fp_access_check(s)) {
3970        return;
3971    }
3972
3973    if (rn == 31) {
3974        gen_check_sp_alignment(s);
3975    }
3976
3977    total = selem << scale;
3978    tcg_rn = cpu_reg_sp(s, rn);
3979
3980    clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3981                                total);
3982    mop = finalize_memop(s, scale);
3983
3984    tcg_ebytes = tcg_constant_i64(1 << scale);
3985    for (xs = 0; xs < selem; xs++) {
3986        if (replicate) {
3987            /* Load and replicate to all elements */
3988            TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3989
3990            tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3991            tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3992                                 (is_q + 1) * 8, vec_full_reg_size(s),
3993                                 tcg_tmp);
3994            tcg_temp_free_i64(tcg_tmp);
3995        } else {
3996            /* Load/store one element per register */
3997            if (is_load) {
3998                do_vec_ld(s, rt, index, clean_addr, mop);
3999            } else {
4000                do_vec_st(s, rt, index, clean_addr, mop);
4001            }
4002        }
4003        tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
4004        rt = (rt + 1) % 32;
4005    }
4006
4007    if (is_postidx) {
4008        if (rm == 31) {
4009            tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
4010        } else {
4011            tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
4012        }
4013    }
4014}
4015
4016/*
4017 * Load/Store memory tags
4018 *
4019 *  31 30 29         24     22  21     12    10      5      0
4020 * +-----+-------------+-----+---+------+-----+------+------+
4021 * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 |  Rn  |  Rt  |
4022 * +-----+-------------+-----+---+------+-----+------+------+
4023 */
4024static void disas_ldst_tag(DisasContext *s, uint32_t insn)
4025{
4026    int rt = extract32(insn, 0, 5);
4027    int rn = extract32(insn, 5, 5);
4028    uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
4029    int op2 = extract32(insn, 10, 2);
4030    int op1 = extract32(insn, 22, 2);
4031    bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
4032    int index = 0;
4033    TCGv_i64 addr, clean_addr, tcg_rt;
4034
4035    /* We checked insn bits [29:24,21] in the caller.  */
4036    if (extract32(insn, 30, 2) != 3) {
4037        goto do_unallocated;
4038    }
4039
4040    /*
4041     * @index is a tri-state variable which has 3 states:
4042     * < 0 : post-index, writeback
4043     * = 0 : signed offset
4044     * > 0 : pre-index, writeback
4045     */
4046    switch (op1) {
4047    case 0:
4048        if (op2 != 0) {
4049            /* STG */
4050            index = op2 - 2;
4051        } else {
4052            /* STZGM */
4053            if (s->current_el == 0 || offset != 0) {
4054                goto do_unallocated;
4055            }
4056            is_mult = is_zero = true;
4057        }
4058        break;
4059    case 1:
4060        if (op2 != 0) {
4061            /* STZG */
4062            is_zero = true;
4063            index = op2 - 2;
4064        } else {
4065            /* LDG */
4066            is_load = true;
4067        }
4068        break;
4069    case 2:
4070        if (op2 != 0) {
4071            /* ST2G */
4072            is_pair = true;
4073            index = op2 - 2;
4074        } else {
4075            /* STGM */
4076            if (s->current_el == 0 || offset != 0) {
4077                goto do_unallocated;
4078            }
4079            is_mult = true;
4080        }
4081        break;
4082    case 3:
4083        if (op2 != 0) {
4084            /* STZ2G */
4085            is_pair = is_zero = true;
4086            index = op2 - 2;
4087        } else {
4088            /* LDGM */
4089            if (s->current_el == 0 || offset != 0) {
4090                goto do_unallocated;
4091            }
4092            is_mult = is_load = true;
4093        }
4094        break;
4095
4096    default:
4097    do_unallocated:
4098        unallocated_encoding(s);
4099        return;
4100    }
4101
4102    if (is_mult
4103        ? !dc_isar_feature(aa64_mte, s)
4104        : !dc_isar_feature(aa64_mte_insn_reg, s)) {
4105        goto do_unallocated;
4106    }
4107
4108    if (rn == 31) {
4109        gen_check_sp_alignment(s);
4110    }
4111
4112    addr = read_cpu_reg_sp(s, rn, true);
4113    if (index >= 0) {
4114        /* pre-index or signed offset */
4115        tcg_gen_addi_i64(addr, addr, offset);
4116    }
4117
4118    if (is_mult) {
4119        tcg_rt = cpu_reg(s, rt);
4120
4121        if (is_zero) {
4122            int size = 4 << s->dcz_blocksize;
4123
4124            if (s->ata) {
4125                gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
4126            }
4127            /*
4128             * The non-tags portion of STZGM is mostly like DC_ZVA,
4129             * except the alignment happens before the access.
4130             */
4131            clean_addr = clean_data_tbi(s, addr);
4132            tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4133            gen_helper_dc_zva(cpu_env, clean_addr);
4134        } else if (s->ata) {
4135            if (is_load) {
4136                gen_helper_ldgm(tcg_rt, cpu_env, addr);
4137            } else {
4138                gen_helper_stgm(cpu_env, addr, tcg_rt);
4139            }
4140        } else {
4141            MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4142            int size = 4 << GMID_EL1_BS;
4143
4144            clean_addr = clean_data_tbi(s, addr);
4145            tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4146            gen_probe_access(s, clean_addr, acc, size);
4147
4148            if (is_load) {
4149                /* The result tags are zeros.  */
4150                tcg_gen_movi_i64(tcg_rt, 0);
4151            }
4152        }
4153        return;
4154    }
4155
4156    if (is_load) {
4157        tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4158        tcg_rt = cpu_reg(s, rt);
4159        if (s->ata) {
4160            gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4161        } else {
4162            clean_addr = clean_data_tbi(s, addr);
4163            gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4164            gen_address_with_allocation_tag0(tcg_rt, addr);
4165        }
4166    } else {
4167        tcg_rt = cpu_reg_sp(s, rt);
4168        if (!s->ata) {
4169            /*
4170             * For STG and ST2G, we need to check alignment and probe memory.
4171             * TODO: For STZG and STZ2G, we could rely on the stores below,
4172             * at least for system mode; user-only won't enforce alignment.
4173             */
4174            if (is_pair) {
4175                gen_helper_st2g_stub(cpu_env, addr);
4176            } else {
4177                gen_helper_stg_stub(cpu_env, addr);
4178            }
4179        } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4180            if (is_pair) {
4181                gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4182            } else {
4183                gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4184            }
4185        } else {
4186            if (is_pair) {
4187                gen_helper_st2g(cpu_env, addr, tcg_rt);
4188            } else {
4189                gen_helper_stg(cpu_env, addr, tcg_rt);
4190            }
4191        }
4192    }
4193
4194    if (is_zero) {
4195        TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4196        TCGv_i64 tcg_zero = tcg_constant_i64(0);
4197        int mem_index = get_mem_index(s);
4198        int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
4199
4200        tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
4201                            MO_UQ | MO_ALIGN_16);
4202        for (i = 8; i < n; i += 8) {
4203            tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4204            tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ);
4205        }
4206    }
4207
4208    if (index != 0) {
4209        /* pre-index or post-index */
4210        if (index < 0) {
4211            /* post-index */
4212            tcg_gen_addi_i64(addr, addr, offset);
4213        }
4214        tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4215    }
4216}
4217
4218/* Loads and stores */
4219static void disas_ldst(DisasContext *s, uint32_t insn)
4220{
4221    switch (extract32(insn, 24, 6)) {
4222    case 0x08: /* Load/store exclusive */
4223        disas_ldst_excl(s, insn);
4224        break;
4225    case 0x18: case 0x1c: /* Load register (literal) */
4226        disas_ld_lit(s, insn);
4227        break;
4228    case 0x28: case 0x29:
4229    case 0x2c: case 0x2d: /* Load/store pair (all forms) */
4230        disas_ldst_pair(s, insn);
4231        break;
4232    case 0x38: case 0x39:
4233    case 0x3c: case 0x3d: /* Load/store register (all forms) */
4234        disas_ldst_reg(s, insn);
4235        break;
4236    case 0x0c: /* AdvSIMD load/store multiple structures */
4237        disas_ldst_multiple_struct(s, insn);
4238        break;
4239    case 0x0d: /* AdvSIMD load/store single structure */
4240        disas_ldst_single_struct(s, insn);
4241        break;
4242    case 0x19:
4243        if (extract32(insn, 21, 1) != 0) {
4244            disas_ldst_tag(s, insn);
4245        } else if (extract32(insn, 10, 2) == 0) {
4246            disas_ldst_ldapr_stlr(s, insn);
4247        } else {
4248            unallocated_encoding(s);
4249        }
4250        break;
4251    default:
4252        unallocated_encoding(s);
4253        break;
4254    }
4255}
4256
4257/* PC-rel. addressing
4258 *   31  30   29 28       24 23                5 4    0
4259 * +----+-------+-----------+-------------------+------+
4260 * | op | immlo | 1 0 0 0 0 |       immhi       |  Rd  |
4261 * +----+-------+-----------+-------------------+------+
4262 */
4263static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
4264{
4265    unsigned int page, rd;
4266    uint64_t base;
4267    uint64_t offset;
4268
4269    page = extract32(insn, 31, 1);
4270    /* SignExtend(immhi:immlo) -> offset */
4271    offset = sextract64(insn, 5, 19);
4272    offset = offset << 2 | extract32(insn, 29, 2);
4273    rd = extract32(insn, 0, 5);
4274    base = s->pc_curr;
4275
4276    if (page) {
4277        /* ADRP (page based) */
4278        base &= ~0xfff;
4279        offset <<= 12;
4280    }
4281
4282    tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
4283}
4284
4285/*
4286 * Add/subtract (immediate)
4287 *
4288 *  31 30 29 28         23 22 21         10 9   5 4   0
4289 * +--+--+--+-------------+--+-------------+-----+-----+
4290 * |sf|op| S| 1 0 0 0 1 0 |sh|    imm12    |  Rn | Rd  |
4291 * +--+--+--+-------------+--+-------------+-----+-----+
4292 *
4293 *    sf: 0 -> 32bit, 1 -> 64bit
4294 *    op: 0 -> add  , 1 -> sub
4295 *     S: 1 -> set flags
4296 *    sh: 1 -> LSL imm by 12
4297 */
4298static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
4299{
4300    int rd = extract32(insn, 0, 5);
4301    int rn = extract32(insn, 5, 5);
4302    uint64_t imm = extract32(insn, 10, 12);
4303    bool shift = extract32(insn, 22, 1);
4304    bool setflags = extract32(insn, 29, 1);
4305    bool sub_op = extract32(insn, 30, 1);
4306    bool is_64bit = extract32(insn, 31, 1);
4307
4308    TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
4309    TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
4310    TCGv_i64 tcg_result;
4311
4312    if (shift) {
4313        imm <<= 12;
4314    }
4315
4316    tcg_result = tcg_temp_new_i64();
4317    if (!setflags) {
4318        if (sub_op) {
4319            tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
4320        } else {
4321            tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
4322        }
4323    } else {
4324        TCGv_i64 tcg_imm = tcg_constant_i64(imm);
4325        if (sub_op) {
4326            gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4327        } else {
4328            gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4329        }
4330    }
4331
4332    if (is_64bit) {
4333        tcg_gen_mov_i64(tcg_rd, tcg_result);
4334    } else {
4335        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4336    }
4337
4338    tcg_temp_free_i64(tcg_result);
4339}
4340
4341/*
4342 * Add/subtract (immediate, with tags)
4343 *
4344 *  31 30 29 28         23 22 21     16 14      10 9   5 4   0
4345 * +--+--+--+-------------+--+---------+--+-------+-----+-----+
4346 * |sf|op| S| 1 0 0 0 1 1 |o2|  uimm6  |o3| uimm4 |  Rn | Rd  |
4347 * +--+--+--+-------------+--+---------+--+-------+-----+-----+
4348 *
4349 *    op: 0 -> add, 1 -> sub
4350 */
4351static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
4352{
4353    int rd = extract32(insn, 0, 5);
4354    int rn = extract32(insn, 5, 5);
4355    int uimm4 = extract32(insn, 10, 4);
4356    int uimm6 = extract32(insn, 16, 6);
4357    bool sub_op = extract32(insn, 30, 1);
4358    TCGv_i64 tcg_rn, tcg_rd;
4359    int imm;
4360
4361    /* Test all of sf=1, S=0, o2=0, o3=0.  */
4362    if ((insn & 0xa040c000u) != 0x80000000u ||
4363        !dc_isar_feature(aa64_mte_insn_reg, s)) {
4364        unallocated_encoding(s);
4365        return;
4366    }
4367
4368    imm = uimm6 << LOG2_TAG_GRANULE;
4369    if (sub_op) {
4370        imm = -imm;
4371    }
4372
4373    tcg_rn = cpu_reg_sp(s, rn);
4374    tcg_rd = cpu_reg_sp(s, rd);
4375
4376    if (s->ata) {
4377        gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
4378                           tcg_constant_i32(imm),
4379                           tcg_constant_i32(uimm4));
4380    } else {
4381        tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4382        gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4383    }
4384}
4385
4386/* The input should be a value in the bottom e bits (with higher
4387 * bits zero); returns that value replicated into every element
4388 * of size e in a 64 bit integer.
4389 */
4390static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4391{
4392    assert(e != 0);
4393    while (e < 64) {
4394        mask |= mask << e;
4395        e *= 2;
4396    }
4397    return mask;
4398}
4399
4400/* Return a value with the bottom len bits set (where 0 < len <= 64) */
4401static inline uint64_t bitmask64(unsigned int length)
4402{
4403    assert(length > 0 && length <= 64);
4404    return ~0ULL >> (64 - length);
4405}
4406
4407/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
4408 * only require the wmask. Returns false if the imms/immr/immn are a reserved
4409 * value (ie should cause a guest UNDEF exception), and true if they are
4410 * valid, in which case the decoded bit pattern is written to result.
4411 */
4412bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4413                            unsigned int imms, unsigned int immr)
4414{
4415    uint64_t mask;
4416    unsigned e, levels, s, r;
4417    int len;
4418
4419    assert(immn < 2 && imms < 64 && immr < 64);
4420
4421    /* The bit patterns we create here are 64 bit patterns which
4422     * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4423     * 64 bits each. Each element contains the same value: a run
4424     * of between 1 and e-1 non-zero bits, rotated within the
4425     * element by between 0 and e-1 bits.
4426     *
4427     * The element size and run length are encoded into immn (1 bit)
4428     * and imms (6 bits) as follows:
4429     * 64 bit elements: immn = 1, imms = <length of run - 1>
4430     * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4431     * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4432     *  8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4433     *  4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4434     *  2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4435     * Notice that immn = 0, imms = 11111x is the only combination
4436     * not covered by one of the above options; this is reserved.
4437     * Further, <length of run - 1> all-ones is a reserved pattern.
4438     *
4439     * In all cases the rotation is by immr % e (and immr is 6 bits).
4440     */
4441
4442    /* First determine the element size */
4443    len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4444    if (len < 1) {
4445        /* This is the immn == 0, imms == 0x11111x case */
4446        return false;
4447    }
4448    e = 1 << len;
4449
4450    levels = e - 1;
4451    s = imms & levels;
4452    r = immr & levels;
4453
4454    if (s == levels) {
4455        /* <length of run - 1> mustn't be all-ones. */
4456        return false;
4457    }
4458
4459    /* Create the value of one element: s+1 set bits rotated
4460     * by r within the element (which is e bits wide)...
4461     */
4462    mask = bitmask64(s + 1);
4463    if (r) {
4464        mask = (mask >> r) | (mask << (e - r));
4465        mask &= bitmask64(e);
4466    }
4467    /* ...then replicate the element over the whole 64 bit value */
4468    mask = bitfield_replicate(mask, e);
4469    *result = mask;
4470    return true;
4471}
4472
4473/* Logical (immediate)
4474 *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
4475 * +----+-----+-------------+---+------+------+------+------+
4476 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms |  Rn  |  Rd  |
4477 * +----+-----+-------------+---+------+------+------+------+
4478 */
4479static void disas_logic_imm(DisasContext *s, uint32_t insn)
4480{
4481    unsigned int sf, opc, is_n, immr, imms, rn, rd;
4482    TCGv_i64 tcg_rd, tcg_rn;
4483    uint64_t wmask;
4484    bool is_and = false;
4485
4486    sf = extract32(insn, 31, 1);
4487    opc = extract32(insn, 29, 2);
4488    is_n = extract32(insn, 22, 1);
4489    immr = extract32(insn, 16, 6);
4490    imms = extract32(insn, 10, 6);
4491    rn = extract32(insn, 5, 5);
4492    rd = extract32(insn, 0, 5);
4493
4494    if (!sf && is_n) {
4495        unallocated_encoding(s);
4496        return;
4497    }
4498
4499    if (opc == 0x3) { /* ANDS */
4500        tcg_rd = cpu_reg(s, rd);
4501    } else {
4502        tcg_rd = cpu_reg_sp(s, rd);
4503    }
4504    tcg_rn = cpu_reg(s, rn);
4505
4506    if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
4507        /* some immediate field values are reserved */
4508        unallocated_encoding(s);
4509        return;
4510    }
4511
4512    if (!sf) {
4513        wmask &= 0xffffffff;
4514    }
4515
4516    switch (opc) {
4517    case 0x3: /* ANDS */
4518    case 0x0: /* AND */
4519        tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
4520        is_and = true;
4521        break;
4522    case 0x1: /* ORR */
4523        tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
4524        break;
4525    case 0x2: /* EOR */
4526        tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
4527        break;
4528    default:
4529        assert(FALSE); /* must handle all above */
4530        break;
4531    }
4532
4533    if (!sf && !is_and) {
4534        /* zero extend final result; we know we can skip this for AND
4535         * since the immediate had the high 32 bits clear.
4536         */
4537        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4538    }
4539
4540    if (opc == 3) { /* ANDS */
4541        gen_logic_CC(sf, tcg_rd);
4542    }
4543}
4544
4545/*
4546 * Move wide (immediate)
4547 *
4548 *  31 30 29 28         23 22 21 20             5 4    0
4549 * +--+-----+-------------+-----+----------------+------+
4550 * |sf| opc | 1 0 0 1 0 1 |  hw |  imm16         |  Rd  |
4551 * +--+-----+-------------+-----+----------------+------+
4552 *
4553 * sf: 0 -> 32 bit, 1 -> 64 bit
4554 * opc: 00 -> N, 10 -> Z, 11 -> K
4555 * hw: shift/16 (0,16, and sf only 32, 48)
4556 */
4557static void disas_movw_imm(DisasContext *s, uint32_t insn)
4558{
4559    int rd = extract32(insn, 0, 5);
4560    uint64_t imm = extract32(insn, 5, 16);
4561    int sf = extract32(insn, 31, 1);
4562    int opc = extract32(insn, 29, 2);
4563    int pos = extract32(insn, 21, 2) << 4;
4564    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4565
4566    if (!sf && (pos >= 32)) {
4567        unallocated_encoding(s);
4568        return;
4569    }
4570
4571    switch (opc) {
4572    case 0: /* MOVN */
4573    case 2: /* MOVZ */
4574        imm <<= pos;
4575        if (opc == 0) {
4576            imm = ~imm;
4577        }
4578        if (!sf) {
4579            imm &= 0xffffffffu;
4580        }
4581        tcg_gen_movi_i64(tcg_rd, imm);
4582        break;
4583    case 3: /* MOVK */
4584        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_constant_i64(imm), pos, 16);
4585        if (!sf) {
4586            tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4587        }
4588        break;
4589    default:
4590        unallocated_encoding(s);
4591        break;
4592    }
4593}
4594
4595/* Bitfield
4596 *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
4597 * +----+-----+-------------+---+------+------+------+------+
4598 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms |  Rn  |  Rd  |
4599 * +----+-----+-------------+---+------+------+------+------+
4600 */
4601static void disas_bitfield(DisasContext *s, uint32_t insn)
4602{
4603    unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4604    TCGv_i64 tcg_rd, tcg_tmp;
4605
4606    sf = extract32(insn, 31, 1);
4607    opc = extract32(insn, 29, 2);
4608    n = extract32(insn, 22, 1);
4609    ri = extract32(insn, 16, 6);
4610    si = extract32(insn, 10, 6);
4611    rn = extract32(insn, 5, 5);
4612    rd = extract32(insn, 0, 5);
4613    bitsize = sf ? 64 : 32;
4614
4615    if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4616        unallocated_encoding(s);
4617        return;
4618    }
4619
4620    tcg_rd = cpu_reg(s, rd);
4621
4622    /* Suppress the zero-extend for !sf.  Since RI and SI are constrained
4623       to be smaller than bitsize, we'll never reference data outside the
4624       low 32-bits anyway.  */
4625    tcg_tmp = read_cpu_reg(s, rn, 1);
4626
4627    /* Recognize simple(r) extractions.  */
4628    if (si >= ri) {
4629        /* Wd<s-r:0> = Wn<s:r> */
4630        len = (si - ri) + 1;
4631        if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
4632            tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4633            goto done;
4634        } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
4635            tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4636            return;
4637        }
4638        /* opc == 1, BFXIL fall through to deposit */
4639        tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4640        pos = 0;
4641    } else {
4642        /* Handle the ri > si case with a deposit
4643         * Wd<32+s-r,32-r> = Wn<s:0>
4644         */
4645        len = si + 1;
4646        pos = (bitsize - ri) & (bitsize - 1);
4647    }
4648
4649    if (opc == 0 && len < ri) {
4650        /* SBFM: sign extend the destination field from len to fill
4651           the balance of the word.  Let the deposit below insert all
4652           of those sign bits.  */
4653        tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4654        len = ri;
4655    }
4656
4657    if (opc == 1) { /* BFM, BFXIL */
4658        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4659    } else {
4660        /* SBFM or UBFM: We start with zero, and we haven't modified
4661           any bits outside bitsize, therefore the zero-extension
4662           below is unneeded.  */
4663        tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4664        return;
4665    }
4666
4667 done:
4668    if (!sf) { /* zero extend final result */
4669        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4670    }
4671}
4672
4673/* Extract
4674 *   31  30  29 28         23 22   21  20  16 15    10 9    5 4    0
4675 * +----+------+-------------+---+----+------+--------+------+------+
4676 * | sf | op21 | 1 0 0 1 1 1 | N | o0 |  Rm  |  imms  |  Rn  |  Rd  |
4677 * +----+------+-------------+---+----+------+--------+------+------+
4678 */
4679static void disas_extract(DisasContext *s, uint32_t insn)
4680{
4681    unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4682
4683    sf = extract32(insn, 31, 1);
4684    n = extract32(insn, 22, 1);
4685    rm = extract32(insn, 16, 5);
4686    imm = extract32(insn, 10, 6);
4687    rn = extract32(insn, 5, 5);
4688    rd = extract32(insn, 0, 5);
4689    op21 = extract32(insn, 29, 2);
4690    op0 = extract32(insn, 21, 1);
4691    bitsize = sf ? 64 : 32;
4692
4693    if (sf != n || op21 || op0 || imm >= bitsize) {
4694        unallocated_encoding(s);
4695    } else {
4696        TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4697
4698        tcg_rd = cpu_reg(s, rd);
4699
4700        if (unlikely(imm == 0)) {
4701            /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4702             * so an extract from bit 0 is a special case.
4703             */
4704            if (sf) {
4705                tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4706            } else {
4707                tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4708            }
4709        } else {
4710            tcg_rm = cpu_reg(s, rm);
4711            tcg_rn = cpu_reg(s, rn);
4712
4713            if (sf) {
4714                /* Specialization to ROR happens in EXTRACT2.  */
4715                tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4716            } else {
4717                TCGv_i32 t0 = tcg_temp_new_i32();
4718
4719                tcg_gen_extrl_i64_i32(t0, tcg_rm);
4720                if (rm == rn) {
4721                    tcg_gen_rotri_i32(t0, t0, imm);
4722                } else {
4723                    TCGv_i32 t1 = tcg_temp_new_i32();
4724                    tcg_gen_extrl_i64_i32(t1, tcg_rn);
4725                    tcg_gen_extract2_i32(t0, t0, t1, imm);
4726                    tcg_temp_free_i32(t1);
4727                }
4728                tcg_gen_extu_i32_i64(tcg_rd, t0);
4729                tcg_temp_free_i32(t0);
4730            }
4731        }
4732    }
4733}
4734
4735/* Data processing - immediate */
4736static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4737{
4738    switch (extract32(insn, 23, 6)) {
4739    case 0x20: case 0x21: /* PC-rel. addressing */
4740        disas_pc_rel_adr(s, insn);
4741        break;
4742    case 0x22: /* Add/subtract (immediate) */
4743        disas_add_sub_imm(s, insn);
4744        break;
4745    case 0x23: /* Add/subtract (immediate, with tags) */
4746        disas_add_sub_imm_with_tags(s, insn);
4747        break;
4748    case 0x24: /* Logical (immediate) */
4749        disas_logic_imm(s, insn);
4750        break;
4751    case 0x25: /* Move wide (immediate) */
4752        disas_movw_imm(s, insn);
4753        break;
4754    case 0x26: /* Bitfield */
4755        disas_bitfield(s, insn);
4756        break;
4757    case 0x27: /* Extract */
4758        disas_extract(s, insn);
4759        break;
4760    default:
4761        unallocated_encoding(s);
4762        break;
4763    }
4764}
4765
4766/* Shift a TCGv src by TCGv shift_amount, put result in dst.
4767 * Note that it is the caller's responsibility to ensure that the
4768 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4769 * mandated semantics for out of range shifts.
4770 */
4771static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4772                      enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4773{
4774    switch (shift_type) {
4775    case A64_SHIFT_TYPE_LSL:
4776        tcg_gen_shl_i64(dst, src, shift_amount);
4777        break;
4778    case A64_SHIFT_TYPE_LSR:
4779        tcg_gen_shr_i64(dst, src, shift_amount);
4780        break;
4781    case A64_SHIFT_TYPE_ASR:
4782        if (!sf) {
4783            tcg_gen_ext32s_i64(dst, src);
4784        }
4785        tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4786        break;
4787    case A64_SHIFT_TYPE_ROR:
4788        if (sf) {
4789            tcg_gen_rotr_i64(dst, src, shift_amount);
4790        } else {
4791            TCGv_i32 t0, t1;
4792            t0 = tcg_temp_new_i32();
4793            t1 = tcg_temp_new_i32();
4794            tcg_gen_extrl_i64_i32(t0, src);
4795            tcg_gen_extrl_i64_i32(t1, shift_amount);
4796            tcg_gen_rotr_i32(t0, t0, t1);
4797            tcg_gen_extu_i32_i64(dst, t0);
4798            tcg_temp_free_i32(t0);
4799            tcg_temp_free_i32(t1);
4800        }
4801        break;
4802    default:
4803        assert(FALSE); /* all shift types should be handled */
4804        break;
4805    }
4806
4807    if (!sf) { /* zero extend final result */
4808        tcg_gen_ext32u_i64(dst, dst);
4809    }
4810}
4811
4812/* Shift a TCGv src by immediate, put result in dst.
4813 * The shift amount must be in range (this should always be true as the
4814 * relevant instructions will UNDEF on bad shift immediates).
4815 */
4816static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4817                          enum a64_shift_type shift_type, unsigned int shift_i)
4818{
4819    assert(shift_i < (sf ? 64 : 32));
4820
4821    if (shift_i == 0) {
4822        tcg_gen_mov_i64(dst, src);
4823    } else {
4824        shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
4825    }
4826}
4827
4828/* Logical (shifted register)
4829 *   31  30 29 28       24 23   22 21  20  16 15    10 9    5 4    0
4830 * +----+-----+-----------+-------+---+------+--------+------+------+
4831 * | sf | opc | 0 1 0 1 0 | shift | N |  Rm  |  imm6  |  Rn  |  Rd  |
4832 * +----+-----+-----------+-------+---+------+--------+------+------+
4833 */
4834static void disas_logic_reg(DisasContext *s, uint32_t insn)
4835{
4836    TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4837    unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4838
4839    sf = extract32(insn, 31, 1);
4840    opc = extract32(insn, 29, 2);
4841    shift_type = extract32(insn, 22, 2);
4842    invert = extract32(insn, 21, 1);
4843    rm = extract32(insn, 16, 5);
4844    shift_amount = extract32(insn, 10, 6);
4845    rn = extract32(insn, 5, 5);
4846    rd = extract32(insn, 0, 5);
4847
4848    if (!sf && (shift_amount & (1 << 5))) {
4849        unallocated_encoding(s);
4850        return;
4851    }
4852
4853    tcg_rd = cpu_reg(s, rd);
4854
4855    if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4856        /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4857         * register-register MOV and MVN, so it is worth special casing.
4858         */
4859        tcg_rm = cpu_reg(s, rm);
4860        if (invert) {
4861            tcg_gen_not_i64(tcg_rd, tcg_rm);
4862            if (!sf) {
4863                tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4864            }
4865        } else {
4866            if (sf) {
4867                tcg_gen_mov_i64(tcg_rd, tcg_rm);
4868            } else {
4869                tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4870            }
4871        }
4872        return;
4873    }
4874
4875    tcg_rm = read_cpu_reg(s, rm, sf);
4876
4877    if (shift_amount) {
4878        shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4879    }
4880
4881    tcg_rn = cpu_reg(s, rn);
4882
4883    switch (opc | (invert << 2)) {
4884    case 0: /* AND */
4885    case 3: /* ANDS */
4886        tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4887        break;
4888    case 1: /* ORR */
4889        tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4890        break;
4891    case 2: /* EOR */
4892        tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4893        break;
4894    case 4: /* BIC */
4895    case 7: /* BICS */
4896        tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4897        break;
4898    case 5: /* ORN */
4899        tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4900        break;
4901    case 6: /* EON */
4902        tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4903        break;
4904    default:
4905        assert(FALSE);
4906        break;
4907    }
4908
4909    if (!sf) {
4910        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4911    }
4912
4913    if (opc == 3) {
4914        gen_logic_CC(sf, tcg_rd);
4915    }
4916}
4917
4918/*
4919 * Add/subtract (extended register)
4920 *
4921 *  31|30|29|28       24|23 22|21|20   16|15  13|12  10|9  5|4  0|
4922 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4923 * |sf|op| S| 0 1 0 1 1 | opt | 1|  Rm   |option| imm3 | Rn | Rd |
4924 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4925 *
4926 *  sf: 0 -> 32bit, 1 -> 64bit
4927 *  op: 0 -> add  , 1 -> sub
4928 *   S: 1 -> set flags
4929 * opt: 00
4930 * option: extension type (see DecodeRegExtend)
4931 * imm3: optional shift to Rm
4932 *
4933 * Rd = Rn + LSL(extend(Rm), amount)
4934 */
4935static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4936{
4937    int rd = extract32(insn, 0, 5);
4938    int rn = extract32(insn, 5, 5);
4939    int imm3 = extract32(insn, 10, 3);
4940    int option = extract32(insn, 13, 3);
4941    int rm = extract32(insn, 16, 5);
4942    int opt = extract32(insn, 22, 2);
4943    bool setflags = extract32(insn, 29, 1);
4944    bool sub_op = extract32(insn, 30, 1);
4945    bool sf = extract32(insn, 31, 1);
4946
4947    TCGv_i64 tcg_rm, tcg_rn; /* temps */
4948    TCGv_i64 tcg_rd;
4949    TCGv_i64 tcg_result;
4950
4951    if (imm3 > 4 || opt != 0) {
4952        unallocated_encoding(s);
4953        return;
4954    }
4955
4956    /* non-flag setting ops may use SP */
4957    if (!setflags) {
4958        tcg_rd = cpu_reg_sp(s, rd);
4959    } else {
4960        tcg_rd = cpu_reg(s, rd);
4961    }
4962    tcg_rn = read_cpu_reg_sp(s, rn, sf);
4963
4964    tcg_rm = read_cpu_reg(s, rm, sf);
4965    ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4966
4967    tcg_result = tcg_temp_new_i64();
4968
4969    if (!setflags) {
4970        if (sub_op) {
4971            tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4972        } else {
4973            tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4974        }
4975    } else {
4976        if (sub_op) {
4977            gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4978        } else {
4979            gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4980        }
4981    }
4982
4983    if (sf) {
4984        tcg_gen_mov_i64(tcg_rd, tcg_result);
4985    } else {
4986        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4987    }
4988
4989    tcg_temp_free_i64(tcg_result);
4990}
4991
4992/*
4993 * Add/subtract (shifted register)
4994 *
4995 *  31 30 29 28       24 23 22 21 20   16 15     10 9    5 4    0
4996 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4997 * |sf|op| S| 0 1 0 1 1 |shift| 0|  Rm   |  imm6   |  Rn  |  Rd  |
4998 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4999 *
5000 *    sf: 0 -> 32bit, 1 -> 64bit
5001 *    op: 0 -> add  , 1 -> sub
5002 *     S: 1 -> set flags
5003 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
5004 *  imm6: Shift amount to apply to Rm before the add/sub
5005 */
5006static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
5007{
5008    int rd = extract32(insn, 0, 5);
5009    int rn = extract32(insn, 5, 5);
5010    int imm6 = extract32(insn, 10, 6);
5011    int rm = extract32(insn, 16, 5);
5012    int shift_type = extract32(insn, 22, 2);
5013    bool setflags = extract32(insn, 29, 1);
5014    bool sub_op = extract32(insn, 30, 1);
5015    bool sf = extract32(insn, 31, 1);
5016
5017    TCGv_i64 tcg_rd = cpu_reg(s, rd);
5018    TCGv_i64 tcg_rn, tcg_rm;
5019    TCGv_i64 tcg_result;
5020
5021    if ((shift_type == 3) || (!sf && (imm6 > 31))) {
5022        unallocated_encoding(s);
5023        return;
5024    }
5025
5026    tcg_rn = read_cpu_reg(s, rn, sf);
5027    tcg_rm = read_cpu_reg(s, rm, sf);
5028
5029    shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
5030
5031    tcg_result = tcg_temp_new_i64();
5032
5033    if (!setflags) {
5034        if (sub_op) {
5035            tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
5036        } else {
5037            tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
5038        }
5039    } else {
5040        if (sub_op) {
5041            gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
5042        } else {
5043            gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
5044        }
5045    }
5046
5047    if (sf) {
5048        tcg_gen_mov_i64(tcg_rd, tcg_result);
5049    } else {
5050        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
5051    }
5052
5053    tcg_temp_free_i64(tcg_result);
5054}
5055
5056/* Data-processing (3 source)
5057 *
5058 *    31 30  29 28       24 23 21  20  16  15  14  10 9    5 4    0
5059 *  +--+------+-----------+------+------+----+------+------+------+
5060 *  |sf| op54 | 1 1 0 1 1 | op31 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
5061 *  +--+------+-----------+------+------+----+------+------+------+
5062 */
5063static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
5064{
5065    int rd = extract32(insn, 0, 5);
5066    int rn = extract32(insn, 5, 5);
5067    int ra = extract32(insn, 10, 5);
5068    int rm = extract32(insn, 16, 5);
5069    int op_id = (extract32(insn, 29, 3) << 4) |
5070        (extract32(insn, 21, 3) << 1) |
5071        extract32(insn, 15, 1);
5072    bool sf = extract32(insn, 31, 1);
5073    bool is_sub = extract32(op_id, 0, 1);
5074    bool is_high = extract32(op_id, 2, 1);
5075    bool is_signed = false;
5076    TCGv_i64 tcg_op1;
5077    TCGv_i64 tcg_op2;
5078    TCGv_i64 tcg_tmp;
5079
5080    /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
5081    switch (op_id) {
5082    case 0x42: /* SMADDL */
5083    case 0x43: /* SMSUBL */
5084    case 0x44: /* SMULH */
5085        is_signed = true;
5086        break;
5087    case 0x0: /* MADD (32bit) */
5088    case 0x1: /* MSUB (32bit) */
5089    case 0x40: /* MADD (64bit) */
5090    case 0x41: /* MSUB (64bit) */
5091    case 0x4a: /* UMADDL */
5092    case 0x4b: /* UMSUBL */
5093    case 0x4c: /* UMULH */
5094        break;
5095    default:
5096        unallocated_encoding(s);
5097        return;
5098    }
5099
5100    if (is_high) {
5101        TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
5102        TCGv_i64 tcg_rd = cpu_reg(s, rd);
5103        TCGv_i64 tcg_rn = cpu_reg(s, rn);
5104        TCGv_i64 tcg_rm = cpu_reg(s, rm);
5105
5106        if (is_signed) {
5107            tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5108        } else {
5109            tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5110        }
5111
5112        tcg_temp_free_i64(low_bits);
5113        return;
5114    }
5115
5116    tcg_op1 = tcg_temp_new_i64();
5117    tcg_op2 = tcg_temp_new_i64();
5118    tcg_tmp = tcg_temp_new_i64();
5119
5120    if (op_id < 0x42) {
5121        tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
5122        tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
5123    } else {
5124        if (is_signed) {
5125            tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
5126            tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
5127        } else {
5128            tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
5129            tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
5130        }
5131    }
5132
5133    if (ra == 31 && !is_sub) {
5134        /* Special-case MADD with rA == XZR; it is the standard MUL alias */
5135        tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
5136    } else {
5137        tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
5138        if (is_sub) {
5139            tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5140        } else {
5141            tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5142        }
5143    }
5144
5145    if (!sf) {
5146        tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
5147    }
5148
5149    tcg_temp_free_i64(tcg_op1);
5150    tcg_temp_free_i64(tcg_op2);
5151    tcg_temp_free_i64(tcg_tmp);
5152}
5153
5154/* Add/subtract (with carry)
5155 *  31 30 29 28 27 26 25 24 23 22 21  20  16  15       10  9    5 4   0
5156 * +--+--+--+------------------------+------+-------------+------+-----+
5157 * |sf|op| S| 1  1  0  1  0  0  0  0 |  rm  | 0 0 0 0 0 0 |  Rn  |  Rd |
5158 * +--+--+--+------------------------+------+-------------+------+-----+
5159 */
5160
5161static void disas_adc_sbc(DisasContext *s, uint32_t insn)
5162{
5163    unsigned int sf, op, setflags, rm, rn, rd;
5164    TCGv_i64 tcg_y, tcg_rn, tcg_rd;
5165
5166    sf = extract32(insn, 31, 1);
5167    op = extract32(insn, 30, 1);
5168    setflags = extract32(insn, 29, 1);
5169    rm = extract32(insn, 16, 5);
5170    rn = extract32(insn, 5, 5);
5171    rd = extract32(insn, 0, 5);
5172
5173    tcg_rd = cpu_reg(s, rd);
5174    tcg_rn = cpu_reg(s, rn);
5175
5176    if (op) {
5177        tcg_y = new_tmp_a64(s);
5178        tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
5179    } else {
5180        tcg_y = cpu_reg(s, rm);
5181    }
5182
5183    if (setflags) {
5184        gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
5185    } else {
5186        gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
5187    }
5188}
5189
5190/*
5191 * Rotate right into flags
5192 *  31 30 29                21       15          10      5  4      0
5193 * +--+--+--+-----------------+--------+-----------+------+--+------+
5194 * |sf|op| S| 1 1 0 1 0 0 0 0 |  imm6  | 0 0 0 0 1 |  Rn  |o2| mask |
5195 * +--+--+--+-----------------+--------+-----------+------+--+------+
5196 */
5197static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
5198{
5199    int mask = extract32(insn, 0, 4);
5200    int o2 = extract32(insn, 4, 1);
5201    int rn = extract32(insn, 5, 5);
5202    int imm6 = extract32(insn, 15, 6);
5203    int sf_op_s = extract32(insn, 29, 3);
5204    TCGv_i64 tcg_rn;
5205    TCGv_i32 nzcv;
5206
5207    if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
5208        unallocated_encoding(s);
5209        return;
5210    }
5211
5212    tcg_rn = read_cpu_reg(s, rn, 1);
5213    tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
5214
5215    nzcv = tcg_temp_new_i32();
5216    tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
5217
5218    if (mask & 8) { /* N */
5219        tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
5220    }
5221    if (mask & 4) { /* Z */
5222        tcg_gen_not_i32(cpu_ZF, nzcv);
5223        tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
5224    }
5225    if (mask & 2) { /* C */
5226        tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
5227    }
5228    if (mask & 1) { /* V */
5229        tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
5230    }
5231
5232    tcg_temp_free_i32(nzcv);
5233}
5234
5235/*
5236 * Evaluate into flags
5237 *  31 30 29                21        15   14        10      5  4      0
5238 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5239 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 |  Rn  |o3| mask |
5240 * +--+--+--+-----------------+---------+----+---------+------+--+------+
5241 */
5242static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
5243{
5244    int o3_mask = extract32(insn, 0, 5);
5245    int rn = extract32(insn, 5, 5);
5246    int o2 = extract32(insn, 15, 6);
5247    int sz = extract32(insn, 14, 1);
5248    int sf_op_s = extract32(insn, 29, 3);
5249    TCGv_i32 tmp;
5250    int shift;
5251
5252    if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
5253        !dc_isar_feature(aa64_condm_4, s)) {
5254        unallocated_encoding(s);
5255        return;
5256    }
5257    shift = sz ? 16 : 24;  /* SETF16 or SETF8 */
5258
5259    tmp = tcg_temp_new_i32();
5260    tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
5261    tcg_gen_shli_i32(cpu_NF, tmp, shift);
5262    tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
5263    tcg_gen_mov_i32(cpu_ZF, cpu_NF);
5264    tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
5265    tcg_temp_free_i32(tmp);
5266}
5267
5268/* Conditional compare (immediate / register)
5269 *  31 30 29 28 27 26 25 24 23 22 21  20    16 15  12  11  10  9   5  4 3   0
5270 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5271 * |sf|op| S| 1  1  0  1  0  0  1  0 |imm5/rm | cond |i/r |o2|  Rn  |o3|nzcv |
5272 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
5273 *        [1]                             y                [0]       [0]
5274 */
5275static void disas_cc(DisasContext *s, uint32_t insn)
5276{
5277    unsigned int sf, op, y, cond, rn, nzcv, is_imm;
5278    TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
5279    TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
5280    DisasCompare c;
5281
5282    if (!extract32(insn, 29, 1)) {
5283        unallocated_encoding(s);
5284        return;
5285    }
5286    if (insn & (1 << 10 | 1 << 4)) {
5287        unallocated_encoding(s);
5288        return;
5289    }
5290    sf = extract32(insn, 31, 1);
5291    op = extract32(insn, 30, 1);
5292    is_imm = extract32(insn, 11, 1);
5293    y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
5294    cond = extract32(insn, 12, 4);
5295    rn = extract32(insn, 5, 5);
5296    nzcv = extract32(insn, 0, 4);
5297
5298    /* Set T0 = !COND.  */
5299    tcg_t0 = tcg_temp_new_i32();
5300    arm_test_cc(&c, cond);
5301    tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5302    arm_free_cc(&c);
5303
5304    /* Load the arguments for the new comparison.  */
5305    if (is_imm) {
5306        tcg_y = new_tmp_a64(s);
5307        tcg_gen_movi_i64(tcg_y, y);
5308    } else {
5309        tcg_y = cpu_reg(s, y);
5310    }
5311    tcg_rn = cpu_reg(s, rn);
5312
5313    /* Set the flags for the new comparison.  */
5314    tcg_tmp = tcg_temp_new_i64();
5315    if (op) {
5316        gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5317    } else {
5318        gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5319    }
5320    tcg_temp_free_i64(tcg_tmp);
5321
5322    /* If COND was false, force the flags to #nzcv.  Compute two masks
5323     * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
5324     * For tcg hosts that support ANDC, we can make do with just T1.
5325     * In either case, allow the tcg optimizer to delete any unused mask.
5326     */
5327    tcg_t1 = tcg_temp_new_i32();
5328    tcg_t2 = tcg_temp_new_i32();
5329    tcg_gen_neg_i32(tcg_t1, tcg_t0);
5330    tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5331
5332    if (nzcv & 8) { /* N */
5333        tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5334    } else {
5335        if (TCG_TARGET_HAS_andc_i32) {
5336            tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5337        } else {
5338            tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5339        }
5340    }
5341    if (nzcv & 4) { /* Z */
5342        if (TCG_TARGET_HAS_andc_i32) {
5343            tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5344        } else {
5345            tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5346        }
5347    } else {
5348        tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5349    }
5350    if (nzcv & 2) { /* C */
5351        tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5352    } else {
5353        if (TCG_TARGET_HAS_andc_i32) {
5354            tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5355        } else {
5356            tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5357        }
5358    }
5359    if (nzcv & 1) { /* V */
5360        tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5361    } else {
5362        if (TCG_TARGET_HAS_andc_i32) {
5363            tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5364        } else {
5365            tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5366        }
5367    }
5368    tcg_temp_free_i32(tcg_t0);
5369    tcg_temp_free_i32(tcg_t1);
5370    tcg_temp_free_i32(tcg_t2);
5371}
5372
5373/* Conditional select
5374 *   31   30  29  28             21 20  16 15  12 11 10 9    5 4    0
5375 * +----+----+---+-----------------+------+------+-----+------+------+
5376 * | sf | op | S | 1 1 0 1 0 1 0 0 |  Rm  | cond | op2 |  Rn  |  Rd  |
5377 * +----+----+---+-----------------+------+------+-----+------+------+
5378 */
5379static void disas_cond_select(DisasContext *s, uint32_t insn)
5380{
5381    unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5382    TCGv_i64 tcg_rd, zero;
5383    DisasCompare64 c;
5384
5385    if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5386        /* S == 1 or op2<1> == 1 */
5387        unallocated_encoding(s);
5388        return;
5389    }
5390    sf = extract32(insn, 31, 1);
5391    else_inv = extract32(insn, 30, 1);
5392    rm = extract32(insn, 16, 5);
5393    cond = extract32(insn, 12, 4);
5394    else_inc = extract32(insn, 10, 1);
5395    rn = extract32(insn, 5, 5);
5396    rd = extract32(insn, 0, 5);
5397
5398    tcg_rd = cpu_reg(s, rd);
5399
5400    a64_test_cc(&c, cond);
5401    zero = tcg_constant_i64(0);
5402
5403    if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5404        /* CSET & CSETM.  */
5405        tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5406        if (else_inv) {
5407            tcg_gen_neg_i64(tcg_rd, tcg_rd);
5408        }
5409    } else {
5410        TCGv_i64 t_true = cpu_reg(s, rn);
5411        TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5412        if (else_inv && else_inc) {
5413            tcg_gen_neg_i64(t_false, t_false);
5414        } else if (else_inv) {
5415            tcg_gen_not_i64(t_false, t_false);
5416        } else if (else_inc) {
5417            tcg_gen_addi_i64(t_false, t_false, 1);
5418        }
5419        tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5420    }
5421
5422    a64_free_cc(&c);
5423
5424    if (!sf) {
5425        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5426    }
5427}
5428
5429static void handle_clz(DisasContext *s, unsigned int sf,
5430                       unsigned int rn, unsigned int rd)
5431{
5432    TCGv_i64 tcg_rd, tcg_rn;
5433    tcg_rd = cpu_reg(s, rd);
5434    tcg_rn = cpu_reg(s, rn);
5435
5436    if (sf) {
5437        tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5438    } else {
5439        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5440        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5441        tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5442        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5443        tcg_temp_free_i32(tcg_tmp32);
5444    }
5445}
5446
5447static void handle_cls(DisasContext *s, unsigned int sf,
5448                       unsigned int rn, unsigned int rd)
5449{
5450    TCGv_i64 tcg_rd, tcg_rn;
5451    tcg_rd = cpu_reg(s, rd);
5452    tcg_rn = cpu_reg(s, rn);
5453
5454    if (sf) {
5455        tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5456    } else {
5457        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5458        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5459        tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5460        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5461        tcg_temp_free_i32(tcg_tmp32);
5462    }
5463}
5464
5465static void handle_rbit(DisasContext *s, unsigned int sf,
5466                        unsigned int rn, unsigned int rd)
5467{
5468    TCGv_i64 tcg_rd, tcg_rn;
5469    tcg_rd = cpu_reg(s, rd);
5470    tcg_rn = cpu_reg(s, rn);
5471
5472    if (sf) {
5473        gen_helper_rbit64(tcg_rd, tcg_rn);
5474    } else {
5475        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5476        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5477        gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5478        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5479        tcg_temp_free_i32(tcg_tmp32);
5480    }
5481}
5482
5483/* REV with sf==1, opcode==3 ("REV64") */
5484static void handle_rev64(DisasContext *s, unsigned int sf,
5485                         unsigned int rn, unsigned int rd)
5486{
5487    if (!sf) {
5488        unallocated_encoding(s);
5489        return;
5490    }
5491    tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5492}
5493
5494/* REV with sf==0, opcode==2
5495 * REV32 (sf==1, opcode==2)
5496 */
5497static void handle_rev32(DisasContext *s, unsigned int sf,
5498                         unsigned int rn, unsigned int rd)
5499{
5500    TCGv_i64 tcg_rd = cpu_reg(s, rd);
5501    TCGv_i64 tcg_rn = cpu_reg(s, rn);
5502
5503    if (sf) {
5504        tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5505        tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5506    } else {
5507        tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5508    }
5509}
5510
5511/* REV16 (opcode==1) */
5512static void handle_rev16(DisasContext *s, unsigned int sf,
5513                         unsigned int rn, unsigned int rd)
5514{
5515    TCGv_i64 tcg_rd = cpu_reg(s, rd);
5516    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5517    TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5518    TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5519
5520    tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5521    tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5522    tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5523    tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5524    tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5525
5526    tcg_temp_free_i64(tcg_tmp);
5527}
5528
5529/* Data-processing (1 source)
5530 *   31  30  29  28             21 20     16 15    10 9    5 4    0
5531 * +----+---+---+-----------------+---------+--------+------+------+
5532 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode |  Rn  |  Rd  |
5533 * +----+---+---+-----------------+---------+--------+------+------+
5534 */
5535static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5536{
5537    unsigned int sf, opcode, opcode2, rn, rd;
5538    TCGv_i64 tcg_rd;
5539
5540    if (extract32(insn, 29, 1)) {
5541        unallocated_encoding(s);
5542        return;
5543    }
5544
5545    sf = extract32(insn, 31, 1);
5546    opcode = extract32(insn, 10, 6);
5547    opcode2 = extract32(insn, 16, 5);
5548    rn = extract32(insn, 5, 5);
5549    rd = extract32(insn, 0, 5);
5550
5551#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5552
5553    switch (MAP(sf, opcode2, opcode)) {
5554    case MAP(0, 0x00, 0x00): /* RBIT */
5555    case MAP(1, 0x00, 0x00):
5556        handle_rbit(s, sf, rn, rd);
5557        break;
5558    case MAP(0, 0x00, 0x01): /* REV16 */
5559    case MAP(1, 0x00, 0x01):
5560        handle_rev16(s, sf, rn, rd);
5561        break;
5562    case MAP(0, 0x00, 0x02): /* REV/REV32 */
5563    case MAP(1, 0x00, 0x02):
5564        handle_rev32(s, sf, rn, rd);
5565        break;
5566    case MAP(1, 0x00, 0x03): /* REV64 */
5567        handle_rev64(s, sf, rn, rd);
5568        break;
5569    case MAP(0, 0x00, 0x04): /* CLZ */
5570    case MAP(1, 0x00, 0x04):
5571        handle_clz(s, sf, rn, rd);
5572        break;
5573    case MAP(0, 0x00, 0x05): /* CLS */
5574    case MAP(1, 0x00, 0x05):
5575        handle_cls(s, sf, rn, rd);
5576        break;
5577    case MAP(1, 0x01, 0x00): /* PACIA */
5578        if (s->pauth_active) {
5579            tcg_rd = cpu_reg(s, rd);
5580            gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5581        } else if (!dc_isar_feature(aa64_pauth, s)) {
5582            goto do_unallocated;
5583        }
5584        break;
5585    case MAP(1, 0x01, 0x01): /* PACIB */
5586        if (s->pauth_active) {
5587            tcg_rd = cpu_reg(s, rd);
5588            gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5589        } else if (!dc_isar_feature(aa64_pauth, s)) {
5590            goto do_unallocated;
5591        }
5592        break;
5593    case MAP(1, 0x01, 0x02): /* PACDA */
5594        if (s->pauth_active) {
5595            tcg_rd = cpu_reg(s, rd);
5596            gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5597        } else if (!dc_isar_feature(aa64_pauth, s)) {
5598            goto do_unallocated;
5599        }
5600        break;
5601    case MAP(1, 0x01, 0x03): /* PACDB */
5602        if (s->pauth_active) {
5603            tcg_rd = cpu_reg(s, rd);
5604            gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5605        } else if (!dc_isar_feature(aa64_pauth, s)) {
5606            goto do_unallocated;
5607        }
5608        break;
5609    case MAP(1, 0x01, 0x04): /* AUTIA */
5610        if (s->pauth_active) {
5611            tcg_rd = cpu_reg(s, rd);
5612            gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5613        } else if (!dc_isar_feature(aa64_pauth, s)) {
5614            goto do_unallocated;
5615        }
5616        break;
5617    case MAP(1, 0x01, 0x05): /* AUTIB */
5618        if (s->pauth_active) {
5619            tcg_rd = cpu_reg(s, rd);
5620            gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5621        } else if (!dc_isar_feature(aa64_pauth, s)) {
5622            goto do_unallocated;
5623        }
5624        break;
5625    case MAP(1, 0x01, 0x06): /* AUTDA */
5626        if (s->pauth_active) {
5627            tcg_rd = cpu_reg(s, rd);
5628            gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5629        } else if (!dc_isar_feature(aa64_pauth, s)) {
5630            goto do_unallocated;
5631        }
5632        break;
5633    case MAP(1, 0x01, 0x07): /* AUTDB */
5634        if (s->pauth_active) {
5635            tcg_rd = cpu_reg(s, rd);
5636            gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5637        } else if (!dc_isar_feature(aa64_pauth, s)) {
5638            goto do_unallocated;
5639        }
5640        break;
5641    case MAP(1, 0x01, 0x08): /* PACIZA */
5642        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5643            goto do_unallocated;
5644        } else if (s->pauth_active) {
5645            tcg_rd = cpu_reg(s, rd);
5646            gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5647        }
5648        break;
5649    case MAP(1, 0x01, 0x09): /* PACIZB */
5650        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5651            goto do_unallocated;
5652        } else if (s->pauth_active) {
5653            tcg_rd = cpu_reg(s, rd);
5654            gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5655        }
5656        break;
5657    case MAP(1, 0x01, 0x0a): /* PACDZA */
5658        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5659            goto do_unallocated;
5660        } else if (s->pauth_active) {
5661            tcg_rd = cpu_reg(s, rd);
5662            gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5663        }
5664        break;
5665    case MAP(1, 0x01, 0x0b): /* PACDZB */
5666        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5667            goto do_unallocated;
5668        } else if (s->pauth_active) {
5669            tcg_rd = cpu_reg(s, rd);
5670            gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5671        }
5672        break;
5673    case MAP(1, 0x01, 0x0c): /* AUTIZA */
5674        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5675            goto do_unallocated;
5676        } else if (s->pauth_active) {
5677            tcg_rd = cpu_reg(s, rd);
5678            gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5679        }
5680        break;
5681    case MAP(1, 0x01, 0x0d): /* AUTIZB */
5682        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5683            goto do_unallocated;
5684        } else if (s->pauth_active) {
5685            tcg_rd = cpu_reg(s, rd);
5686            gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5687        }
5688        break;
5689    case MAP(1, 0x01, 0x0e): /* AUTDZA */
5690        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5691            goto do_unallocated;
5692        } else if (s->pauth_active) {
5693            tcg_rd = cpu_reg(s, rd);
5694            gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5695        }
5696        break;
5697    case MAP(1, 0x01, 0x0f): /* AUTDZB */
5698        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5699            goto do_unallocated;
5700        } else if (s->pauth_active) {
5701            tcg_rd = cpu_reg(s, rd);
5702            gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5703        }
5704        break;
5705    case MAP(1, 0x01, 0x10): /* XPACI */
5706        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5707            goto do_unallocated;
5708        } else if (s->pauth_active) {
5709            tcg_rd = cpu_reg(s, rd);
5710            gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5711        }
5712        break;
5713    case MAP(1, 0x01, 0x11): /* XPACD */
5714        if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5715            goto do_unallocated;
5716        } else if (s->pauth_active) {
5717            tcg_rd = cpu_reg(s, rd);
5718            gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5719        }
5720        break;
5721    default:
5722    do_unallocated:
5723        unallocated_encoding(s);
5724        break;
5725    }
5726
5727#undef MAP
5728}
5729
5730static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5731                       unsigned int rm, unsigned int rn, unsigned int rd)
5732{
5733    TCGv_i64 tcg_n, tcg_m, tcg_rd;
5734    tcg_rd = cpu_reg(s, rd);
5735
5736    if (!sf && is_signed) {
5737        tcg_n = new_tmp_a64(s);
5738        tcg_m = new_tmp_a64(s);
5739        tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5740        tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5741    } else {
5742        tcg_n = read_cpu_reg(s, rn, sf);
5743        tcg_m = read_cpu_reg(s, rm, sf);
5744    }
5745
5746    if (is_signed) {
5747        gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5748    } else {
5749        gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5750    }
5751
5752    if (!sf) { /* zero extend final result */
5753        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5754    }
5755}
5756
5757/* LSLV, LSRV, ASRV, RORV */
5758static void handle_shift_reg(DisasContext *s,
5759                             enum a64_shift_type shift_type, unsigned int sf,
5760                             unsigned int rm, unsigned int rn, unsigned int rd)
5761{
5762    TCGv_i64 tcg_shift = tcg_temp_new_i64();
5763    TCGv_i64 tcg_rd = cpu_reg(s, rd);
5764    TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5765
5766    tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5767    shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5768    tcg_temp_free_i64(tcg_shift);
5769}
5770
5771/* CRC32[BHWX], CRC32C[BHWX] */
5772static void handle_crc32(DisasContext *s,
5773                         unsigned int sf, unsigned int sz, bool crc32c,
5774                         unsigned int rm, unsigned int rn, unsigned int rd)
5775{
5776    TCGv_i64 tcg_acc, tcg_val;
5777    TCGv_i32 tcg_bytes;
5778
5779    if (!dc_isar_feature(aa64_crc32, s)
5780        || (sf == 1 && sz != 3)
5781        || (sf == 0 && sz == 3)) {
5782        unallocated_encoding(s);
5783        return;
5784    }
5785
5786    if (sz == 3) {
5787        tcg_val = cpu_reg(s, rm);
5788    } else {
5789        uint64_t mask;
5790        switch (sz) {
5791        case 0:
5792            mask = 0xFF;
5793            break;
5794        case 1:
5795            mask = 0xFFFF;
5796            break;
5797        case 2:
5798            mask = 0xFFFFFFFF;
5799            break;
5800        default:
5801            g_assert_not_reached();
5802        }
5803        tcg_val = new_tmp_a64(s);
5804        tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5805    }
5806
5807    tcg_acc = cpu_reg(s, rn);
5808    tcg_bytes = tcg_constant_i32(1 << sz);
5809
5810    if (crc32c) {
5811        gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5812    } else {
5813        gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5814    }
5815}
5816
5817/* Data-processing (2 source)
5818 *   31   30  29 28             21 20  16 15    10 9    5 4    0
5819 * +----+---+---+-----------------+------+--------+------+------+
5820 * | sf | 0 | S | 1 1 0 1 0 1 1 0 |  Rm  | opcode |  Rn  |  Rd  |
5821 * +----+---+---+-----------------+------+--------+------+------+
5822 */
5823static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5824{
5825    unsigned int sf, rm, opcode, rn, rd, setflag;
5826    sf = extract32(insn, 31, 1);
5827    setflag = extract32(insn, 29, 1);
5828    rm = extract32(insn, 16, 5);
5829    opcode = extract32(insn, 10, 6);
5830    rn = extract32(insn, 5, 5);
5831    rd = extract32(insn, 0, 5);
5832
5833    if (setflag && opcode != 0) {
5834        unallocated_encoding(s);
5835        return;
5836    }
5837
5838    switch (opcode) {
5839    case 0: /* SUBP(S) */
5840        if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5841            goto do_unallocated;
5842        } else {
5843            TCGv_i64 tcg_n, tcg_m, tcg_d;
5844
5845            tcg_n = read_cpu_reg_sp(s, rn, true);
5846            tcg_m = read_cpu_reg_sp(s, rm, true);
5847            tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5848            tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5849            tcg_d = cpu_reg(s, rd);
5850
5851            if (setflag) {
5852                gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5853            } else {
5854                tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5855            }
5856        }
5857        break;
5858    case 2: /* UDIV */
5859        handle_div(s, false, sf, rm, rn, rd);
5860        break;
5861    case 3: /* SDIV */
5862        handle_div(s, true, sf, rm, rn, rd);
5863        break;
5864    case 4: /* IRG */
5865        if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5866            goto do_unallocated;
5867        }
5868        if (s->ata) {
5869            gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5870                           cpu_reg_sp(s, rn), cpu_reg(s, rm));
5871        } else {
5872            gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5873                                             cpu_reg_sp(s, rn));
5874        }
5875        break;
5876    case 5: /* GMI */
5877        if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5878            goto do_unallocated;
5879        } else {
5880            TCGv_i64 t = tcg_temp_new_i64();
5881
5882            tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
5883            tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
5884            tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
5885
5886            tcg_temp_free_i64(t);
5887        }
5888        break;
5889    case 8: /* LSLV */
5890        handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5891        break;
5892    case 9: /* LSRV */
5893        handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5894        break;
5895    case 10: /* ASRV */
5896        handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5897        break;
5898    case 11: /* RORV */
5899        handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5900        break;
5901    case 12: /* PACGA */
5902        if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5903            goto do_unallocated;
5904        }
5905        gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5906                         cpu_reg(s, rn), cpu_reg_sp(s, rm));
5907        break;
5908    case 16:
5909    case 17:
5910    case 18:
5911    case 19:
5912    case 20:
5913    case 21:
5914    case 22:
5915    case 23: /* CRC32 */
5916    {
5917        int sz = extract32(opcode, 0, 2);
5918        bool crc32c = extract32(opcode, 2, 1);
5919        handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5920        break;
5921    }
5922    default:
5923    do_unallocated:
5924        unallocated_encoding(s);
5925        break;
5926    }
5927}
5928
5929/*
5930 * Data processing - register
5931 *  31  30 29  28      25    21  20  16      10         0
5932 * +--+---+--+---+-------+-----+-------+-------+---------+
5933 * |  |op0|  |op1| 1 0 1 | op2 |       |  op3  |         |
5934 * +--+---+--+---+-------+-----+-------+-------+---------+
5935 */
5936static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5937{
5938    int op0 = extract32(insn, 30, 1);
5939    int op1 = extract32(insn, 28, 1);
5940    int op2 = extract32(insn, 21, 4);
5941    int op3 = extract32(insn, 10, 6);
5942
5943    if (!op1) {
5944        if (op2 & 8) {
5945            if (op2 & 1) {
5946                /* Add/sub (extended register) */
5947                disas_add_sub_ext_reg(s, insn);
5948            } else {
5949                /* Add/sub (shifted register) */
5950                disas_add_sub_reg(s, insn);
5951            }
5952        } else {
5953            /* Logical (shifted register) */
5954            disas_logic_reg(s, insn);
5955        }
5956        return;
5957    }
5958
5959    switch (op2) {
5960    case 0x0:
5961        switch (op3) {
5962        case 0x00: /* Add/subtract (with carry) */
5963            disas_adc_sbc(s, insn);
5964            break;
5965
5966        case 0x01: /* Rotate right into flags */
5967        case 0x21:
5968            disas_rotate_right_into_flags(s, insn);
5969            break;
5970
5971        case 0x02: /* Evaluate into flags */
5972        case 0x12:
5973        case 0x22:
5974        case 0x32:
5975            disas_evaluate_into_flags(s, insn);
5976            break;
5977
5978        default:
5979            goto do_unallocated;
5980        }
5981        break;
5982
5983    case 0x2: /* Conditional compare */
5984        disas_cc(s, insn); /* both imm and reg forms */
5985        break;
5986
5987    case 0x4: /* Conditional select */
5988        disas_cond_select(s, insn);
5989        break;
5990
5991    case 0x6: /* Data-processing */
5992        if (op0) {    /* (1 source) */
5993            disas_data_proc_1src(s, insn);
5994        } else {      /* (2 source) */
5995            disas_data_proc_2src(s, insn);
5996        }
5997        break;
5998    case 0x8 ... 0xf: /* (3 source) */
5999        disas_data_proc_3src(s, insn);
6000        break;
6001
6002    default:
6003    do_unallocated:
6004        unallocated_encoding(s);
6005        break;
6006    }
6007}
6008
6009static void handle_fp_compare(DisasContext *s, int size,
6010                              unsigned int rn, unsigned int rm,
6011                              bool cmp_with_zero, bool signal_all_nans)
6012{
6013    TCGv_i64 tcg_flags = tcg_temp_new_i64();
6014    TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
6015
6016    if (size == MO_64) {
6017        TCGv_i64 tcg_vn, tcg_vm;
6018
6019        tcg_vn = read_fp_dreg(s, rn);
6020        if (cmp_with_zero) {
6021            tcg_vm = tcg_constant_i64(0);
6022        } else {
6023            tcg_vm = read_fp_dreg(s, rm);
6024        }
6025        if (signal_all_nans) {
6026            gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6027        } else {
6028            gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6029        }
6030        tcg_temp_free_i64(tcg_vn);
6031        tcg_temp_free_i64(tcg_vm);
6032    } else {
6033        TCGv_i32 tcg_vn = tcg_temp_new_i32();
6034        TCGv_i32 tcg_vm = tcg_temp_new_i32();
6035
6036        read_vec_element_i32(s, tcg_vn, rn, 0, size);
6037        if (cmp_with_zero) {
6038            tcg_gen_movi_i32(tcg_vm, 0);
6039        } else {
6040            read_vec_element_i32(s, tcg_vm, rm, 0, size);
6041        }
6042
6043        switch (size) {
6044        case MO_32:
6045            if (signal_all_nans) {
6046                gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6047            } else {
6048                gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6049            }
6050            break;
6051        case MO_16:
6052            if (signal_all_nans) {
6053                gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6054            } else {
6055                gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6056            }
6057            break;
6058        default:
6059            g_assert_not_reached();
6060        }
6061
6062        tcg_temp_free_i32(tcg_vn);
6063        tcg_temp_free_i32(tcg_vm);
6064    }
6065
6066    tcg_temp_free_ptr(fpst);
6067
6068    gen_set_nzcv(tcg_flags);
6069
6070    tcg_temp_free_i64(tcg_flags);
6071}
6072
6073/* Floating point compare
6074 *   31  30  29 28       24 23  22  21 20  16 15 14 13  10    9    5 4     0
6075 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
6076 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | op  | 1 0 0 0 |  Rn  |  op2  |
6077 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
6078 */
6079static void disas_fp_compare(DisasContext *s, uint32_t insn)
6080{
6081    unsigned int mos, type, rm, op, rn, opc, op2r;
6082    int size;
6083
6084    mos = extract32(insn, 29, 3);
6085    type = extract32(insn, 22, 2);
6086    rm = extract32(insn, 16, 5);
6087    op = extract32(insn, 14, 2);
6088    rn = extract32(insn, 5, 5);
6089    opc = extract32(insn, 3, 2);
6090    op2r = extract32(insn, 0, 3);
6091
6092    if (mos || op || op2r) {
6093        unallocated_encoding(s);
6094        return;
6095    }
6096
6097    switch (type) {
6098    case 0:
6099        size = MO_32;
6100        break;
6101    case 1:
6102        size = MO_64;
6103        break;
6104    case 3:
6105        size = MO_16;
6106        if (dc_isar_feature(aa64_fp16, s)) {
6107            break;
6108        }
6109        /* fallthru */
6110    default:
6111        unallocated_encoding(s);
6112        return;
6113    }
6114
6115    if (!fp_access_check(s)) {
6116        return;
6117    }
6118
6119    handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
6120}
6121
6122/* Floating point conditional compare
6123 *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5  4   3    0
6124 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
6125 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 0 1 |  Rn  | op | nzcv |
6126 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
6127 */
6128static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
6129{
6130    unsigned int mos, type, rm, cond, rn, op, nzcv;
6131    TCGLabel *label_continue = NULL;
6132    int size;
6133
6134    mos = extract32(insn, 29, 3);
6135    type = extract32(insn, 22, 2);
6136    rm = extract32(insn, 16, 5);
6137    cond = extract32(insn, 12, 4);
6138    rn = extract32(insn, 5, 5);
6139    op = extract32(insn, 4, 1);
6140    nzcv = extract32(insn, 0, 4);
6141
6142    if (mos) {
6143        unallocated_encoding(s);
6144        return;
6145    }
6146
6147    switch (type) {
6148    case 0:
6149        size = MO_32;
6150        break;
6151    case 1:
6152        size = MO_64;
6153        break;
6154    case 3:
6155        size = MO_16;
6156        if (dc_isar_feature(aa64_fp16, s)) {
6157            break;
6158        }
6159        /* fallthru */
6160    default:
6161        unallocated_encoding(s);
6162        return;
6163    }
6164
6165    if (!fp_access_check(s)) {
6166        return;
6167    }
6168
6169    if (cond < 0x0e) { /* not always */
6170        TCGLabel *label_match = gen_new_label();
6171        label_continue = gen_new_label();
6172        arm_gen_test_cc(cond, label_match);
6173        /* nomatch: */
6174        gen_set_nzcv(tcg_constant_i64(nzcv << 28));
6175        tcg_gen_br(label_continue);
6176        gen_set_label(label_match);
6177    }
6178
6179    handle_fp_compare(s, size, rn, rm, false, op);
6180
6181    if (cond < 0x0e) {
6182        gen_set_label(label_continue);
6183    }
6184}
6185
6186/* Floating point conditional select
6187 *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5 4    0
6188 * +---+---+---+-----------+------+---+------+------+-----+------+------+
6189 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 1 1 |  Rn  |  Rd  |
6190 * +---+---+---+-----------+------+---+------+------+-----+------+------+
6191 */
6192static void disas_fp_csel(DisasContext *s, uint32_t insn)
6193{
6194    unsigned int mos, type, rm, cond, rn, rd;
6195    TCGv_i64 t_true, t_false;
6196    DisasCompare64 c;
6197    MemOp sz;
6198
6199    mos = extract32(insn, 29, 3);
6200    type = extract32(insn, 22, 2);
6201    rm = extract32(insn, 16, 5);
6202    cond = extract32(insn, 12, 4);
6203    rn = extract32(insn, 5, 5);
6204    rd = extract32(insn, 0, 5);
6205
6206    if (mos) {
6207        unallocated_encoding(s);
6208        return;
6209    }
6210
6211    switch (type) {
6212    case 0:
6213        sz = MO_32;
6214        break;
6215    case 1:
6216        sz = MO_64;
6217        break;
6218    case 3:
6219        sz = MO_16;
6220        if (dc_isar_feature(aa64_fp16, s)) {
6221            break;
6222        }
6223        /* fallthru */
6224    default:
6225        unallocated_encoding(s);
6226        return;
6227    }
6228
6229    if (!fp_access_check(s)) {
6230        return;
6231    }
6232
6233    /* Zero extend sreg & hreg inputs to 64 bits now.  */
6234    t_true = tcg_temp_new_i64();
6235    t_false = tcg_temp_new_i64();
6236    read_vec_element(s, t_true, rn, 0, sz);
6237    read_vec_element(s, t_false, rm, 0, sz);
6238
6239    a64_test_cc(&c, cond);
6240    tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
6241                        t_true, t_false);
6242    tcg_temp_free_i64(t_false);
6243    a64_free_cc(&c);
6244
6245    /* Note that sregs & hregs write back zeros to the high bits,
6246       and we've already done the zero-extension.  */
6247    write_fp_dreg(s, rd, t_true);
6248    tcg_temp_free_i64(t_true);
6249}
6250
6251/* Floating-point data-processing (1 source) - half precision */
6252static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
6253{
6254    TCGv_ptr fpst = NULL;
6255    TCGv_i32 tcg_op = read_fp_hreg(s, rn);
6256    TCGv_i32 tcg_res = tcg_temp_new_i32();
6257
6258    switch (opcode) {
6259    case 0x0: /* FMOV */
6260        tcg_gen_mov_i32(tcg_res, tcg_op);
6261        break;
6262    case 0x1: /* FABS */
6263        tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
6264        break;
6265    case 0x2: /* FNEG */
6266        tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
6267        break;
6268    case 0x3: /* FSQRT */
6269        fpst = fpstatus_ptr(FPST_FPCR_F16);
6270        gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
6271        break;
6272    case 0x8: /* FRINTN */
6273    case 0x9: /* FRINTP */
6274    case 0xa: /* FRINTM */
6275    case 0xb: /* FRINTZ */
6276    case 0xc: /* FRINTA */
6277    {
6278        TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
6279        fpst = fpstatus_ptr(FPST_FPCR_F16);
6280
6281        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6282        gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6283
6284        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6285        tcg_temp_free_i32(tcg_rmode);
6286        break;
6287    }
6288    case 0xe: /* FRINTX */
6289        fpst = fpstatus_ptr(FPST_FPCR_F16);
6290        gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
6291        break;
6292    case 0xf: /* FRINTI */
6293        fpst = fpstatus_ptr(FPST_FPCR_F16);
6294        gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6295        break;
6296    default:
6297        g_assert_not_reached();
6298    }
6299
6300    write_fp_sreg(s, rd, tcg_res);
6301
6302    if (fpst) {
6303        tcg_temp_free_ptr(fpst);
6304    }
6305    tcg_temp_free_i32(tcg_op);
6306    tcg_temp_free_i32(tcg_res);
6307}
6308
6309/* Floating-point data-processing (1 source) - single precision */
6310static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
6311{
6312    void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
6313    TCGv_i32 tcg_op, tcg_res;
6314    TCGv_ptr fpst;
6315    int rmode = -1;
6316
6317    tcg_op = read_fp_sreg(s, rn);
6318    tcg_res = tcg_temp_new_i32();
6319
6320    switch (opcode) {
6321    case 0x0: /* FMOV */
6322        tcg_gen_mov_i32(tcg_res, tcg_op);
6323        goto done;
6324    case 0x1: /* FABS */
6325        gen_helper_vfp_abss(tcg_res, tcg_op);
6326        goto done;
6327    case 0x2: /* FNEG */
6328        gen_helper_vfp_negs(tcg_res, tcg_op);
6329        goto done;
6330    case 0x3: /* FSQRT */
6331        gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6332        goto done;
6333    case 0x6: /* BFCVT */
6334        gen_fpst = gen_helper_bfcvt;
6335        break;
6336    case 0x8: /* FRINTN */
6337    case 0x9: /* FRINTP */
6338    case 0xa: /* FRINTM */
6339    case 0xb: /* FRINTZ */
6340    case 0xc: /* FRINTA */
6341        rmode = arm_rmode_to_sf(opcode & 7);
6342        gen_fpst = gen_helper_rints;
6343        break;
6344    case 0xe: /* FRINTX */
6345        gen_fpst = gen_helper_rints_exact;
6346        break;
6347    case 0xf: /* FRINTI */
6348        gen_fpst = gen_helper_rints;
6349        break;
6350    case 0x10: /* FRINT32Z */
6351        rmode = float_round_to_zero;
6352        gen_fpst = gen_helper_frint32_s;
6353        break;
6354    case 0x11: /* FRINT32X */
6355        gen_fpst = gen_helper_frint32_s;
6356        break;
6357    case 0x12: /* FRINT64Z */
6358        rmode = float_round_to_zero;
6359        gen_fpst = gen_helper_frint64_s;
6360        break;
6361    case 0x13: /* FRINT64X */
6362        gen_fpst = gen_helper_frint64_s;
6363        break;
6364    default:
6365        g_assert_not_reached();
6366    }
6367
6368    fpst = fpstatus_ptr(FPST_FPCR);
6369    if (rmode >= 0) {
6370        TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6371        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6372        gen_fpst(tcg_res, tcg_op, fpst);
6373        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6374        tcg_temp_free_i32(tcg_rmode);
6375    } else {
6376        gen_fpst(tcg_res, tcg_op, fpst);
6377    }
6378    tcg_temp_free_ptr(fpst);
6379
6380 done:
6381    write_fp_sreg(s, rd, tcg_res);
6382    tcg_temp_free_i32(tcg_op);
6383    tcg_temp_free_i32(tcg_res);
6384}
6385
6386/* Floating-point data-processing (1 source) - double precision */
6387static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6388{
6389    void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6390    TCGv_i64 tcg_op, tcg_res;
6391    TCGv_ptr fpst;
6392    int rmode = -1;
6393
6394    switch (opcode) {
6395    case 0x0: /* FMOV */
6396        gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6397        return;
6398    }
6399
6400    tcg_op = read_fp_dreg(s, rn);
6401    tcg_res = tcg_temp_new_i64();
6402
6403    switch (opcode) {
6404    case 0x1: /* FABS */
6405        gen_helper_vfp_absd(tcg_res, tcg_op);
6406        goto done;
6407    case 0x2: /* FNEG */
6408        gen_helper_vfp_negd(tcg_res, tcg_op);
6409        goto done;
6410    case 0x3: /* FSQRT */
6411        gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6412        goto done;
6413    case 0x8: /* FRINTN */
6414    case 0x9: /* FRINTP */
6415    case 0xa: /* FRINTM */
6416    case 0xb: /* FRINTZ */
6417    case 0xc: /* FRINTA */
6418        rmode = arm_rmode_to_sf(opcode & 7);
6419        gen_fpst = gen_helper_rintd;
6420        break;
6421    case 0xe: /* FRINTX */
6422        gen_fpst = gen_helper_rintd_exact;
6423        break;
6424    case 0xf: /* FRINTI */
6425        gen_fpst = gen_helper_rintd;
6426        break;
6427    case 0x10: /* FRINT32Z */
6428        rmode = float_round_to_zero;
6429        gen_fpst = gen_helper_frint32_d;
6430        break;
6431    case 0x11: /* FRINT32X */
6432        gen_fpst = gen_helper_frint32_d;
6433        break;
6434    case 0x12: /* FRINT64Z */
6435        rmode = float_round_to_zero;
6436        gen_fpst = gen_helper_frint64_d;
6437        break;
6438    case 0x13: /* FRINT64X */
6439        gen_fpst = gen_helper_frint64_d;
6440        break;
6441    default:
6442        g_assert_not_reached();
6443    }
6444
6445    fpst = fpstatus_ptr(FPST_FPCR);
6446    if (rmode >= 0) {
6447        TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6448        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6449        gen_fpst(tcg_res, tcg_op, fpst);
6450        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6451        tcg_temp_free_i32(tcg_rmode);
6452    } else {
6453        gen_fpst(tcg_res, tcg_op, fpst);
6454    }
6455    tcg_temp_free_ptr(fpst);
6456
6457 done:
6458    write_fp_dreg(s, rd, tcg_res);
6459    tcg_temp_free_i64(tcg_op);
6460    tcg_temp_free_i64(tcg_res);
6461}
6462
6463static void handle_fp_fcvt(DisasContext *s, int opcode,
6464                           int rd, int rn, int dtype, int ntype)
6465{
6466    switch (ntype) {
6467    case 0x0:
6468    {
6469        TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6470        if (dtype == 1) {
6471            /* Single to double */
6472            TCGv_i64 tcg_rd = tcg_temp_new_i64();
6473            gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6474            write_fp_dreg(s, rd, tcg_rd);
6475            tcg_temp_free_i64(tcg_rd);
6476        } else {
6477            /* Single to half */
6478            TCGv_i32 tcg_rd = tcg_temp_new_i32();
6479            TCGv_i32 ahp = get_ahp_flag();
6480            TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6481
6482            gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6483            /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6484            write_fp_sreg(s, rd, tcg_rd);
6485            tcg_temp_free_i32(tcg_rd);
6486            tcg_temp_free_i32(ahp);
6487            tcg_temp_free_ptr(fpst);
6488        }
6489        tcg_temp_free_i32(tcg_rn);
6490        break;
6491    }
6492    case 0x1:
6493    {
6494        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6495        TCGv_i32 tcg_rd = tcg_temp_new_i32();
6496        if (dtype == 0) {
6497            /* Double to single */
6498            gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6499        } else {
6500            TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6501            TCGv_i32 ahp = get_ahp_flag();
6502            /* Double to half */
6503            gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6504            /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6505            tcg_temp_free_ptr(fpst);
6506            tcg_temp_free_i32(ahp);
6507        }
6508        write_fp_sreg(s, rd, tcg_rd);
6509        tcg_temp_free_i32(tcg_rd);
6510        tcg_temp_free_i64(tcg_rn);
6511        break;
6512    }
6513    case 0x3:
6514    {
6515        TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6516        TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6517        TCGv_i32 tcg_ahp = get_ahp_flag();
6518        tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6519        if (dtype == 0) {
6520            /* Half to single */
6521            TCGv_i32 tcg_rd = tcg_temp_new_i32();
6522            gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6523            write_fp_sreg(s, rd, tcg_rd);
6524            tcg_temp_free_i32(tcg_rd);
6525        } else {
6526            /* Half to double */
6527            TCGv_i64 tcg_rd = tcg_temp_new_i64();
6528            gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6529            write_fp_dreg(s, rd, tcg_rd);
6530            tcg_temp_free_i64(tcg_rd);
6531        }
6532        tcg_temp_free_i32(tcg_rn);
6533        tcg_temp_free_ptr(tcg_fpst);
6534        tcg_temp_free_i32(tcg_ahp);
6535        break;
6536    }
6537    default:
6538        g_assert_not_reached();
6539    }
6540}
6541
6542/* Floating point data-processing (1 source)
6543 *   31  30  29 28       24 23  22  21 20    15 14       10 9    5 4    0
6544 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6545 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 |  Rn  |  Rd  |
6546 * +---+---+---+-----------+------+---+--------+-----------+------+------+
6547 */
6548static void disas_fp_1src(DisasContext *s, uint32_t insn)
6549{
6550    int mos = extract32(insn, 29, 3);
6551    int type = extract32(insn, 22, 2);
6552    int opcode = extract32(insn, 15, 6);
6553    int rn = extract32(insn, 5, 5);
6554    int rd = extract32(insn, 0, 5);
6555
6556    if (mos) {
6557        goto do_unallocated;
6558    }
6559
6560    switch (opcode) {
6561    case 0x4: case 0x5: case 0x7:
6562    {
6563        /* FCVT between half, single and double precision */
6564        int dtype = extract32(opcode, 0, 2);
6565        if (type == 2 || dtype == type) {
6566            goto do_unallocated;
6567        }
6568        if (!fp_access_check(s)) {
6569            return;
6570        }
6571
6572        handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6573        break;
6574    }
6575
6576    case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6577        if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6578            goto do_unallocated;
6579        }
6580        /* fall through */
6581    case 0x0 ... 0x3:
6582    case 0x8 ... 0xc:
6583    case 0xe ... 0xf:
6584        /* 32-to-32 and 64-to-64 ops */
6585        switch (type) {
6586        case 0:
6587            if (!fp_access_check(s)) {
6588                return;
6589            }
6590            handle_fp_1src_single(s, opcode, rd, rn);
6591            break;
6592        case 1:
6593            if (!fp_access_check(s)) {
6594                return;
6595            }
6596            handle_fp_1src_double(s, opcode, rd, rn);
6597            break;
6598        case 3:
6599            if (!dc_isar_feature(aa64_fp16, s)) {
6600                goto do_unallocated;
6601            }
6602
6603            if (!fp_access_check(s)) {
6604                return;
6605            }
6606            handle_fp_1src_half(s, opcode, rd, rn);
6607            break;
6608        default:
6609            goto do_unallocated;
6610        }
6611        break;
6612
6613    case 0x6:
6614        switch (type) {
6615        case 1: /* BFCVT */
6616            if (!dc_isar_feature(aa64_bf16, s)) {
6617                goto do_unallocated;
6618            }
6619            if (!fp_access_check(s)) {
6620                return;
6621            }
6622            handle_fp_1src_single(s, opcode, rd, rn);
6623            break;
6624        default:
6625            goto do_unallocated;
6626        }
6627        break;
6628
6629    default:
6630    do_unallocated:
6631        unallocated_encoding(s);
6632        break;
6633    }
6634}
6635
6636/* Floating-point data-processing (2 source) - single precision */
6637static void handle_fp_2src_single(DisasContext *s, int opcode,
6638                                  int rd, int rn, int rm)
6639{
6640    TCGv_i32 tcg_op1;
6641    TCGv_i32 tcg_op2;
6642    TCGv_i32 tcg_res;
6643    TCGv_ptr fpst;
6644
6645    tcg_res = tcg_temp_new_i32();
6646    fpst = fpstatus_ptr(FPST_FPCR);
6647    tcg_op1 = read_fp_sreg(s, rn);
6648    tcg_op2 = read_fp_sreg(s, rm);
6649
6650    switch (opcode) {
6651    case 0x0: /* FMUL */
6652        gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6653        break;
6654    case 0x1: /* FDIV */
6655        gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6656        break;
6657    case 0x2: /* FADD */
6658        gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6659        break;
6660    case 0x3: /* FSUB */
6661        gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6662        break;
6663    case 0x4: /* FMAX */
6664        gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6665        break;
6666    case 0x5: /* FMIN */
6667        gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6668        break;
6669    case 0x6: /* FMAXNM */
6670        gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6671        break;
6672    case 0x7: /* FMINNM */
6673        gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6674        break;
6675    case 0x8: /* FNMUL */
6676        gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6677        gen_helper_vfp_negs(tcg_res, tcg_res);
6678        break;
6679    }
6680
6681    write_fp_sreg(s, rd, tcg_res);
6682
6683    tcg_temp_free_ptr(fpst);
6684    tcg_temp_free_i32(tcg_op1);
6685    tcg_temp_free_i32(tcg_op2);
6686    tcg_temp_free_i32(tcg_res);
6687}
6688
6689/* Floating-point data-processing (2 source) - double precision */
6690static void handle_fp_2src_double(DisasContext *s, int opcode,
6691                                  int rd, int rn, int rm)
6692{
6693    TCGv_i64 tcg_op1;
6694    TCGv_i64 tcg_op2;
6695    TCGv_i64 tcg_res;
6696    TCGv_ptr fpst;
6697
6698    tcg_res = tcg_temp_new_i64();
6699    fpst = fpstatus_ptr(FPST_FPCR);
6700    tcg_op1 = read_fp_dreg(s, rn);
6701    tcg_op2 = read_fp_dreg(s, rm);
6702
6703    switch (opcode) {
6704    case 0x0: /* FMUL */
6705        gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6706        break;
6707    case 0x1: /* FDIV */
6708        gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6709        break;
6710    case 0x2: /* FADD */
6711        gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6712        break;
6713    case 0x3: /* FSUB */
6714        gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6715        break;
6716    case 0x4: /* FMAX */
6717        gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6718        break;
6719    case 0x5: /* FMIN */
6720        gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6721        break;
6722    case 0x6: /* FMAXNM */
6723        gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6724        break;
6725    case 0x7: /* FMINNM */
6726        gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6727        break;
6728    case 0x8: /* FNMUL */
6729        gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6730        gen_helper_vfp_negd(tcg_res, tcg_res);
6731        break;
6732    }
6733
6734    write_fp_dreg(s, rd, tcg_res);
6735
6736    tcg_temp_free_ptr(fpst);
6737    tcg_temp_free_i64(tcg_op1);
6738    tcg_temp_free_i64(tcg_op2);
6739    tcg_temp_free_i64(tcg_res);
6740}
6741
6742/* Floating-point data-processing (2 source) - half precision */
6743static void handle_fp_2src_half(DisasContext *s, int opcode,
6744                                int rd, int rn, int rm)
6745{
6746    TCGv_i32 tcg_op1;
6747    TCGv_i32 tcg_op2;
6748    TCGv_i32 tcg_res;
6749    TCGv_ptr fpst;
6750
6751    tcg_res = tcg_temp_new_i32();
6752    fpst = fpstatus_ptr(FPST_FPCR_F16);
6753    tcg_op1 = read_fp_hreg(s, rn);
6754    tcg_op2 = read_fp_hreg(s, rm);
6755
6756    switch (opcode) {
6757    case 0x0: /* FMUL */
6758        gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6759        break;
6760    case 0x1: /* FDIV */
6761        gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6762        break;
6763    case 0x2: /* FADD */
6764        gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6765        break;
6766    case 0x3: /* FSUB */
6767        gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6768        break;
6769    case 0x4: /* FMAX */
6770        gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6771        break;
6772    case 0x5: /* FMIN */
6773        gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6774        break;
6775    case 0x6: /* FMAXNM */
6776        gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6777        break;
6778    case 0x7: /* FMINNM */
6779        gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6780        break;
6781    case 0x8: /* FNMUL */
6782        gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6783        tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6784        break;
6785    default:
6786        g_assert_not_reached();
6787    }
6788
6789    write_fp_sreg(s, rd, tcg_res);
6790
6791    tcg_temp_free_ptr(fpst);
6792    tcg_temp_free_i32(tcg_op1);
6793    tcg_temp_free_i32(tcg_op2);
6794    tcg_temp_free_i32(tcg_res);
6795}
6796
6797/* Floating point data-processing (2 source)
6798 *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
6799 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6800 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | opcode | 1 0 |  Rn  |  Rd  |
6801 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6802 */
6803static void disas_fp_2src(DisasContext *s, uint32_t insn)
6804{
6805    int mos = extract32(insn, 29, 3);
6806    int type = extract32(insn, 22, 2);
6807    int rd = extract32(insn, 0, 5);
6808    int rn = extract32(insn, 5, 5);
6809    int rm = extract32(insn, 16, 5);
6810    int opcode = extract32(insn, 12, 4);
6811
6812    if (opcode > 8 || mos) {
6813        unallocated_encoding(s);
6814        return;
6815    }
6816
6817    switch (type) {
6818    case 0:
6819        if (!fp_access_check(s)) {
6820            return;
6821        }
6822        handle_fp_2src_single(s, opcode, rd, rn, rm);
6823        break;
6824    case 1:
6825        if (!fp_access_check(s)) {
6826            return;
6827        }
6828        handle_fp_2src_double(s, opcode, rd, rn, rm);
6829        break;
6830    case 3:
6831        if (!dc_isar_feature(aa64_fp16, s)) {
6832            unallocated_encoding(s);
6833            return;
6834        }
6835        if (!fp_access_check(s)) {
6836            return;
6837        }
6838        handle_fp_2src_half(s, opcode, rd, rn, rm);
6839        break;
6840    default:
6841        unallocated_encoding(s);
6842    }
6843}
6844
6845/* Floating-point data-processing (3 source) - single precision */
6846static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6847                                  int rd, int rn, int rm, int ra)
6848{
6849    TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6850    TCGv_i32 tcg_res = tcg_temp_new_i32();
6851    TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6852
6853    tcg_op1 = read_fp_sreg(s, rn);
6854    tcg_op2 = read_fp_sreg(s, rm);
6855    tcg_op3 = read_fp_sreg(s, ra);
6856
6857    /* These are fused multiply-add, and must be done as one
6858     * floating point operation with no rounding between the
6859     * multiplication and addition steps.
6860     * NB that doing the negations here as separate steps is
6861     * correct : an input NaN should come out with its sign bit
6862     * flipped if it is a negated-input.
6863     */
6864    if (o1 == true) {
6865        gen_helper_vfp_negs(tcg_op3, tcg_op3);
6866    }
6867
6868    if (o0 != o1) {
6869        gen_helper_vfp_negs(tcg_op1, tcg_op1);
6870    }
6871
6872    gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6873
6874    write_fp_sreg(s, rd, tcg_res);
6875
6876    tcg_temp_free_ptr(fpst);
6877    tcg_temp_free_i32(tcg_op1);
6878    tcg_temp_free_i32(tcg_op2);
6879    tcg_temp_free_i32(tcg_op3);
6880    tcg_temp_free_i32(tcg_res);
6881}
6882
6883/* Floating-point data-processing (3 source) - double precision */
6884static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6885                                  int rd, int rn, int rm, int ra)
6886{
6887    TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6888    TCGv_i64 tcg_res = tcg_temp_new_i64();
6889    TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6890
6891    tcg_op1 = read_fp_dreg(s, rn);
6892    tcg_op2 = read_fp_dreg(s, rm);
6893    tcg_op3 = read_fp_dreg(s, ra);
6894
6895    /* These are fused multiply-add, and must be done as one
6896     * floating point operation with no rounding between the
6897     * multiplication and addition steps.
6898     * NB that doing the negations here as separate steps is
6899     * correct : an input NaN should come out with its sign bit
6900     * flipped if it is a negated-input.
6901     */
6902    if (o1 == true) {
6903        gen_helper_vfp_negd(tcg_op3, tcg_op3);
6904    }
6905
6906    if (o0 != o1) {
6907        gen_helper_vfp_negd(tcg_op1, tcg_op1);
6908    }
6909
6910    gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6911
6912    write_fp_dreg(s, rd, tcg_res);
6913
6914    tcg_temp_free_ptr(fpst);
6915    tcg_temp_free_i64(tcg_op1);
6916    tcg_temp_free_i64(tcg_op2);
6917    tcg_temp_free_i64(tcg_op3);
6918    tcg_temp_free_i64(tcg_res);
6919}
6920
6921/* Floating-point data-processing (3 source) - half precision */
6922static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6923                                int rd, int rn, int rm, int ra)
6924{
6925    TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6926    TCGv_i32 tcg_res = tcg_temp_new_i32();
6927    TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6928
6929    tcg_op1 = read_fp_hreg(s, rn);
6930    tcg_op2 = read_fp_hreg(s, rm);
6931    tcg_op3 = read_fp_hreg(s, ra);
6932
6933    /* These are fused multiply-add, and must be done as one
6934     * floating point operation with no rounding between the
6935     * multiplication and addition steps.
6936     * NB that doing the negations here as separate steps is
6937     * correct : an input NaN should come out with its sign bit
6938     * flipped if it is a negated-input.
6939     */
6940    if (o1 == true) {
6941        tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6942    }
6943
6944    if (o0 != o1) {
6945        tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6946    }
6947
6948    gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6949
6950    write_fp_sreg(s, rd, tcg_res);
6951
6952    tcg_temp_free_ptr(fpst);
6953    tcg_temp_free_i32(tcg_op1);
6954    tcg_temp_free_i32(tcg_op2);
6955    tcg_temp_free_i32(tcg_op3);
6956    tcg_temp_free_i32(tcg_res);
6957}
6958
6959/* Floating point data-processing (3 source)
6960 *   31  30  29 28       24 23  22  21  20  16  15  14  10 9    5 4    0
6961 * +---+---+---+-----------+------+----+------+----+------+------+------+
6962 * | M | 0 | S | 1 1 1 1 1 | type | o1 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
6963 * +---+---+---+-----------+------+----+------+----+------+------+------+
6964 */
6965static void disas_fp_3src(DisasContext *s, uint32_t insn)
6966{
6967    int mos = extract32(insn, 29, 3);
6968    int type = extract32(insn, 22, 2);
6969    int rd = extract32(insn, 0, 5);
6970    int rn = extract32(insn, 5, 5);
6971    int ra = extract32(insn, 10, 5);
6972    int rm = extract32(insn, 16, 5);
6973    bool o0 = extract32(insn, 15, 1);
6974    bool o1 = extract32(insn, 21, 1);
6975
6976    if (mos) {
6977        unallocated_encoding(s);
6978        return;
6979    }
6980
6981    switch (type) {
6982    case 0:
6983        if (!fp_access_check(s)) {
6984            return;
6985        }
6986        handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6987        break;
6988    case 1:
6989        if (!fp_access_check(s)) {
6990            return;
6991        }
6992        handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6993        break;
6994    case 3:
6995        if (!dc_isar_feature(aa64_fp16, s)) {
6996            unallocated_encoding(s);
6997            return;
6998        }
6999        if (!fp_access_check(s)) {
7000            return;
7001        }
7002        handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
7003        break;
7004    default:
7005        unallocated_encoding(s);
7006    }
7007}
7008
7009/* Floating point immediate
7010 *   31  30  29 28       24 23  22  21 20        13 12   10 9    5 4    0
7011 * +---+---+---+-----------+------+---+------------+-------+------+------+
7012 * | M | 0 | S | 1 1 1 1 0 | type | 1 |    imm8    | 1 0 0 | imm5 |  Rd  |
7013 * +---+---+---+-----------+------+---+------------+-------+------+------+
7014 */
7015static void disas_fp_imm(DisasContext *s, uint32_t insn)
7016{
7017    int rd = extract32(insn, 0, 5);
7018    int imm5 = extract32(insn, 5, 5);
7019    int imm8 = extract32(insn, 13, 8);
7020    int type = extract32(insn, 22, 2);
7021    int mos = extract32(insn, 29, 3);
7022    uint64_t imm;
7023    MemOp sz;
7024
7025    if (mos || imm5) {
7026        unallocated_encoding(s);
7027        return;
7028    }
7029
7030    switch (type) {
7031    case 0:
7032        sz = MO_32;
7033        break;
7034    case 1:
7035        sz = MO_64;
7036        break;
7037    case 3:
7038        sz = MO_16;
7039        if (dc_isar_feature(aa64_fp16, s)) {
7040            break;
7041        }
7042        /* fallthru */
7043    default:
7044        unallocated_encoding(s);
7045        return;
7046    }
7047
7048    if (!fp_access_check(s)) {
7049        return;
7050    }
7051
7052    imm = vfp_expand_imm(sz, imm8);
7053    write_fp_dreg(s, rd, tcg_constant_i64(imm));
7054}
7055
7056/* Handle floating point <=> fixed point conversions. Note that we can
7057 * also deal with fp <=> integer conversions as a special case (scale == 64)
7058 * OPTME: consider handling that special case specially or at least skipping
7059 * the call to scalbn in the helpers for zero shifts.
7060 */
7061static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
7062                           bool itof, int rmode, int scale, int sf, int type)
7063{
7064    bool is_signed = !(opcode & 1);
7065    TCGv_ptr tcg_fpstatus;
7066    TCGv_i32 tcg_shift, tcg_single;
7067    TCGv_i64 tcg_double;
7068
7069    tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
7070
7071    tcg_shift = tcg_constant_i32(64 - scale);
7072
7073    if (itof) {
7074        TCGv_i64 tcg_int = cpu_reg(s, rn);
7075        if (!sf) {
7076            TCGv_i64 tcg_extend = new_tmp_a64(s);
7077
7078            if (is_signed) {
7079                tcg_gen_ext32s_i64(tcg_extend, tcg_int);
7080            } else {
7081                tcg_gen_ext32u_i64(tcg_extend, tcg_int);
7082            }
7083
7084            tcg_int = tcg_extend;
7085        }
7086
7087        switch (type) {
7088        case 1: /* float64 */
7089            tcg_double = tcg_temp_new_i64();
7090            if (is_signed) {
7091                gen_helper_vfp_sqtod(tcg_double, tcg_int,
7092                                     tcg_shift, tcg_fpstatus);
7093            } else {
7094                gen_helper_vfp_uqtod(tcg_double, tcg_int,
7095                                     tcg_shift, tcg_fpstatus);
7096            }
7097            write_fp_dreg(s, rd, tcg_double);
7098            tcg_temp_free_i64(tcg_double);
7099            break;
7100
7101        case 0: /* float32 */
7102            tcg_single = tcg_temp_new_i32();
7103            if (is_signed) {
7104                gen_helper_vfp_sqtos(tcg_single, tcg_int,
7105                                     tcg_shift, tcg_fpstatus);
7106            } else {
7107                gen_helper_vfp_uqtos(tcg_single, tcg_int,
7108                                     tcg_shift, tcg_fpstatus);
7109            }
7110            write_fp_sreg(s, rd, tcg_single);
7111            tcg_temp_free_i32(tcg_single);
7112            break;
7113
7114        case 3: /* float16 */
7115            tcg_single = tcg_temp_new_i32();
7116            if (is_signed) {
7117                gen_helper_vfp_sqtoh(tcg_single, tcg_int,
7118                                     tcg_shift, tcg_fpstatus);
7119            } else {
7120                gen_helper_vfp_uqtoh(tcg_single, tcg_int,
7121                                     tcg_shift, tcg_fpstatus);
7122            }
7123            write_fp_sreg(s, rd, tcg_single);
7124            tcg_temp_free_i32(tcg_single);
7125            break;
7126
7127        default:
7128            g_assert_not_reached();
7129        }
7130    } else {
7131        TCGv_i64 tcg_int = cpu_reg(s, rd);
7132        TCGv_i32 tcg_rmode;
7133
7134        if (extract32(opcode, 2, 1)) {
7135            /* There are too many rounding modes to all fit into rmode,
7136             * so FCVTA[US] is a special case.
7137             */
7138            rmode = FPROUNDING_TIEAWAY;
7139        }
7140
7141        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7142
7143        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7144
7145        switch (type) {
7146        case 1: /* float64 */
7147            tcg_double = read_fp_dreg(s, rn);
7148            if (is_signed) {
7149                if (!sf) {
7150                    gen_helper_vfp_tosld(tcg_int, tcg_double,
7151                                         tcg_shift, tcg_fpstatus);
7152                } else {
7153                    gen_helper_vfp_tosqd(tcg_int, tcg_double,
7154                                         tcg_shift, tcg_fpstatus);
7155                }
7156            } else {
7157                if (!sf) {
7158                    gen_helper_vfp_tould(tcg_int, tcg_double,
7159                                         tcg_shift, tcg_fpstatus);
7160                } else {
7161                    gen_helper_vfp_touqd(tcg_int, tcg_double,
7162                                         tcg_shift, tcg_fpstatus);
7163                }
7164            }
7165            if (!sf) {
7166                tcg_gen_ext32u_i64(tcg_int, tcg_int);
7167            }
7168            tcg_temp_free_i64(tcg_double);
7169            break;
7170
7171        case 0: /* float32 */
7172            tcg_single = read_fp_sreg(s, rn);
7173            if (sf) {
7174                if (is_signed) {
7175                    gen_helper_vfp_tosqs(tcg_int, tcg_single,
7176                                         tcg_shift, tcg_fpstatus);
7177                } else {
7178                    gen_helper_vfp_touqs(tcg_int, tcg_single,
7179                                         tcg_shift, tcg_fpstatus);
7180                }
7181            } else {
7182                TCGv_i32 tcg_dest = tcg_temp_new_i32();
7183                if (is_signed) {
7184                    gen_helper_vfp_tosls(tcg_dest, tcg_single,
7185                                         tcg_shift, tcg_fpstatus);
7186                } else {
7187                    gen_helper_vfp_touls(tcg_dest, tcg_single,
7188                                         tcg_shift, tcg_fpstatus);
7189                }
7190                tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7191                tcg_temp_free_i32(tcg_dest);
7192            }
7193            tcg_temp_free_i32(tcg_single);
7194            break;
7195
7196        case 3: /* float16 */
7197            tcg_single = read_fp_sreg(s, rn);
7198            if (sf) {
7199                if (is_signed) {
7200                    gen_helper_vfp_tosqh(tcg_int, tcg_single,
7201                                         tcg_shift, tcg_fpstatus);
7202                } else {
7203                    gen_helper_vfp_touqh(tcg_int, tcg_single,
7204                                         tcg_shift, tcg_fpstatus);
7205                }
7206            } else {
7207                TCGv_i32 tcg_dest = tcg_temp_new_i32();
7208                if (is_signed) {
7209                    gen_helper_vfp_toslh(tcg_dest, tcg_single,
7210                                         tcg_shift, tcg_fpstatus);
7211                } else {
7212                    gen_helper_vfp_toulh(tcg_dest, tcg_single,
7213                                         tcg_shift, tcg_fpstatus);
7214                }
7215                tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7216                tcg_temp_free_i32(tcg_dest);
7217            }
7218            tcg_temp_free_i32(tcg_single);
7219            break;
7220
7221        default:
7222            g_assert_not_reached();
7223        }
7224
7225        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7226        tcg_temp_free_i32(tcg_rmode);
7227    }
7228
7229    tcg_temp_free_ptr(tcg_fpstatus);
7230}
7231
7232/* Floating point <-> fixed point conversions
7233 *   31   30  29 28       24 23  22  21 20   19 18    16 15   10 9    5 4    0
7234 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7235 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale |  Rn  |  Rd  |
7236 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
7237 */
7238static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
7239{
7240    int rd = extract32(insn, 0, 5);
7241    int rn = extract32(insn, 5, 5);
7242    int scale = extract32(insn, 10, 6);
7243    int opcode = extract32(insn, 16, 3);
7244    int rmode = extract32(insn, 19, 2);
7245    int type = extract32(insn, 22, 2);
7246    bool sbit = extract32(insn, 29, 1);
7247    bool sf = extract32(insn, 31, 1);
7248    bool itof;
7249
7250    if (sbit || (!sf && scale < 32)) {
7251        unallocated_encoding(s);
7252        return;
7253    }
7254
7255    switch (type) {
7256    case 0: /* float32 */
7257    case 1: /* float64 */
7258        break;
7259    case 3: /* float16 */
7260        if (dc_isar_feature(aa64_fp16, s)) {
7261            break;
7262        }
7263        /* fallthru */
7264    default:
7265        unallocated_encoding(s);
7266        return;
7267    }
7268
7269    switch ((rmode << 3) | opcode) {
7270    case 0x2: /* SCVTF */
7271    case 0x3: /* UCVTF */
7272        itof = true;
7273        break;
7274    case 0x18: /* FCVTZS */
7275    case 0x19: /* FCVTZU */
7276        itof = false;
7277        break;
7278    default:
7279        unallocated_encoding(s);
7280        return;
7281    }
7282
7283    if (!fp_access_check(s)) {
7284        return;
7285    }
7286
7287    handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
7288}
7289
7290static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
7291{
7292    /* FMOV: gpr to or from float, double, or top half of quad fp reg,
7293     * without conversion.
7294     */
7295
7296    if (itof) {
7297        TCGv_i64 tcg_rn = cpu_reg(s, rn);
7298        TCGv_i64 tmp;
7299
7300        switch (type) {
7301        case 0:
7302            /* 32 bit */
7303            tmp = tcg_temp_new_i64();
7304            tcg_gen_ext32u_i64(tmp, tcg_rn);
7305            write_fp_dreg(s, rd, tmp);
7306            tcg_temp_free_i64(tmp);
7307            break;
7308        case 1:
7309            /* 64 bit */
7310            write_fp_dreg(s, rd, tcg_rn);
7311            break;
7312        case 2:
7313            /* 64 bit to top half. */
7314            tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
7315            clear_vec_high(s, true, rd);
7316            break;
7317        case 3:
7318            /* 16 bit */
7319            tmp = tcg_temp_new_i64();
7320            tcg_gen_ext16u_i64(tmp, tcg_rn);
7321            write_fp_dreg(s, rd, tmp);
7322            tcg_temp_free_i64(tmp);
7323            break;
7324        default:
7325            g_assert_not_reached();
7326        }
7327    } else {
7328        TCGv_i64 tcg_rd = cpu_reg(s, rd);
7329
7330        switch (type) {
7331        case 0:
7332            /* 32 bit */
7333            tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
7334            break;
7335        case 1:
7336            /* 64 bit */
7337            tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
7338            break;
7339        case 2:
7340            /* 64 bits from top half */
7341            tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
7342            break;
7343        case 3:
7344            /* 16 bit */
7345            tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
7346            break;
7347        default:
7348            g_assert_not_reached();
7349        }
7350    }
7351}
7352
7353static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
7354{
7355    TCGv_i64 t = read_fp_dreg(s, rn);
7356    TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
7357
7358    gen_helper_fjcvtzs(t, t, fpstatus);
7359
7360    tcg_temp_free_ptr(fpstatus);
7361
7362    tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
7363    tcg_gen_extrh_i64_i32(cpu_ZF, t);
7364    tcg_gen_movi_i32(cpu_CF, 0);
7365    tcg_gen_movi_i32(cpu_NF, 0);
7366    tcg_gen_movi_i32(cpu_VF, 0);
7367
7368    tcg_temp_free_i64(t);
7369}
7370
7371/* Floating point <-> integer conversions
7372 *   31   30  29 28       24 23  22  21 20   19 18 16 15         10 9  5 4  0
7373 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7374 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
7375 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
7376 */
7377static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
7378{
7379    int rd = extract32(insn, 0, 5);
7380    int rn = extract32(insn, 5, 5);
7381    int opcode = extract32(insn, 16, 3);
7382    int rmode = extract32(insn, 19, 2);
7383    int type = extract32(insn, 22, 2);
7384    bool sbit = extract32(insn, 29, 1);
7385    bool sf = extract32(insn, 31, 1);
7386    bool itof = false;
7387
7388    if (sbit) {
7389        goto do_unallocated;
7390    }
7391
7392    switch (opcode) {
7393    case 2: /* SCVTF */
7394    case 3: /* UCVTF */
7395        itof = true;
7396        /* fallthru */
7397    case 4: /* FCVTAS */
7398    case 5: /* FCVTAU */
7399        if (rmode != 0) {
7400            goto do_unallocated;
7401        }
7402        /* fallthru */
7403    case 0: /* FCVT[NPMZ]S */
7404    case 1: /* FCVT[NPMZ]U */
7405        switch (type) {
7406        case 0: /* float32 */
7407        case 1: /* float64 */
7408            break;
7409        case 3: /* float16 */
7410            if (!dc_isar_feature(aa64_fp16, s)) {
7411                goto do_unallocated;
7412            }
7413            break;
7414        default:
7415            goto do_unallocated;
7416        }
7417        if (!fp_access_check(s)) {
7418            return;
7419        }
7420        handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7421        break;
7422
7423    default:
7424        switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7425        case 0b01100110: /* FMOV half <-> 32-bit int */
7426        case 0b01100111:
7427        case 0b11100110: /* FMOV half <-> 64-bit int */
7428        case 0b11100111:
7429            if (!dc_isar_feature(aa64_fp16, s)) {
7430                goto do_unallocated;
7431            }
7432            /* fallthru */
7433        case 0b00000110: /* FMOV 32-bit */
7434        case 0b00000111:
7435        case 0b10100110: /* FMOV 64-bit */
7436        case 0b10100111:
7437        case 0b11001110: /* FMOV top half of 128-bit */
7438        case 0b11001111:
7439            if (!fp_access_check(s)) {
7440                return;
7441            }
7442            itof = opcode & 1;
7443            handle_fmov(s, rd, rn, type, itof);
7444            break;
7445
7446        case 0b00111110: /* FJCVTZS */
7447            if (!dc_isar_feature(aa64_jscvt, s)) {
7448                goto do_unallocated;
7449            } else if (fp_access_check(s)) {
7450                handle_fjcvtzs(s, rd, rn);
7451            }
7452            break;
7453
7454        default:
7455        do_unallocated:
7456            unallocated_encoding(s);
7457            return;
7458        }
7459        break;
7460    }
7461}
7462
7463/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7464 *   31  30  29 28     25 24                          0
7465 * +---+---+---+---------+-----------------------------+
7466 * |   | 0 |   | 1 1 1 1 |                             |
7467 * +---+---+---+---------+-----------------------------+
7468 */
7469static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7470{
7471    if (extract32(insn, 24, 1)) {
7472        /* Floating point data-processing (3 source) */
7473        disas_fp_3src(s, insn);
7474    } else if (extract32(insn, 21, 1) == 0) {
7475        /* Floating point to fixed point conversions */
7476        disas_fp_fixed_conv(s, insn);
7477    } else {
7478        switch (extract32(insn, 10, 2)) {
7479        case 1:
7480            /* Floating point conditional compare */
7481            disas_fp_ccomp(s, insn);
7482            break;
7483        case 2:
7484            /* Floating point data-processing (2 source) */
7485            disas_fp_2src(s, insn);
7486            break;
7487        case 3:
7488            /* Floating point conditional select */
7489            disas_fp_csel(s, insn);
7490            break;
7491        case 0:
7492            switch (ctz32(extract32(insn, 12, 4))) {
7493            case 0: /* [15:12] == xxx1 */
7494                /* Floating point immediate */
7495                disas_fp_imm(s, insn);
7496                break;
7497            case 1: /* [15:12] == xx10 */
7498                /* Floating point compare */
7499                disas_fp_compare(s, insn);
7500                break;
7501            case 2: /* [15:12] == x100 */
7502                /* Floating point data-processing (1 source) */
7503                disas_fp_1src(s, insn);
7504                break;
7505            case 3: /* [15:12] == 1000 */
7506                unallocated_encoding(s);
7507                break;
7508            default: /* [15:12] == 0000 */
7509                /* Floating point <-> integer conversions */
7510                disas_fp_int_conv(s, insn);
7511                break;
7512            }
7513            break;
7514        }
7515    }
7516}
7517
7518static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7519                     int pos)
7520{
7521    /* Extract 64 bits from the middle of two concatenated 64 bit
7522     * vector register slices left:right. The extracted bits start
7523     * at 'pos' bits into the right (least significant) side.
7524     * We return the result in tcg_right, and guarantee not to
7525     * trash tcg_left.
7526     */
7527    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7528    assert(pos > 0 && pos < 64);
7529
7530    tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7531    tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7532    tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7533
7534    tcg_temp_free_i64(tcg_tmp);
7535}
7536
7537/* EXT
7538 *   31  30 29         24 23 22  21 20  16 15  14  11 10  9    5 4    0
7539 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7540 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | imm4 | 0 |  Rn  |  Rd  |
7541 * +---+---+-------------+-----+---+------+---+------+---+------+------+
7542 */
7543static void disas_simd_ext(DisasContext *s, uint32_t insn)
7544{
7545    int is_q = extract32(insn, 30, 1);
7546    int op2 = extract32(insn, 22, 2);
7547    int imm4 = extract32(insn, 11, 4);
7548    int rm = extract32(insn, 16, 5);
7549    int rn = extract32(insn, 5, 5);
7550    int rd = extract32(insn, 0, 5);
7551    int pos = imm4 << 3;
7552    TCGv_i64 tcg_resl, tcg_resh;
7553
7554    if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7555        unallocated_encoding(s);
7556        return;
7557    }
7558
7559    if (!fp_access_check(s)) {
7560        return;
7561    }
7562
7563    tcg_resh = tcg_temp_new_i64();
7564    tcg_resl = tcg_temp_new_i64();
7565
7566    /* Vd gets bits starting at pos bits into Vm:Vn. This is
7567     * either extracting 128 bits from a 128:128 concatenation, or
7568     * extracting 64 bits from a 64:64 concatenation.
7569     */
7570    if (!is_q) {
7571        read_vec_element(s, tcg_resl, rn, 0, MO_64);
7572        if (pos != 0) {
7573            read_vec_element(s, tcg_resh, rm, 0, MO_64);
7574            do_ext64(s, tcg_resh, tcg_resl, pos);
7575        }
7576    } else {
7577        TCGv_i64 tcg_hh;
7578        typedef struct {
7579            int reg;
7580            int elt;
7581        } EltPosns;
7582        EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7583        EltPosns *elt = eltposns;
7584
7585        if (pos >= 64) {
7586            elt++;
7587            pos -= 64;
7588        }
7589
7590        read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7591        elt++;
7592        read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7593        elt++;
7594        if (pos != 0) {
7595            do_ext64(s, tcg_resh, tcg_resl, pos);
7596            tcg_hh = tcg_temp_new_i64();
7597            read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7598            do_ext64(s, tcg_hh, tcg_resh, pos);
7599            tcg_temp_free_i64(tcg_hh);
7600        }
7601    }
7602
7603    write_vec_element(s, tcg_resl, rd, 0, MO_64);
7604    tcg_temp_free_i64(tcg_resl);
7605    if (is_q) {
7606        write_vec_element(s, tcg_resh, rd, 1, MO_64);
7607    }
7608    tcg_temp_free_i64(tcg_resh);
7609    clear_vec_high(s, is_q, rd);
7610}
7611
7612/* TBL/TBX
7613 *   31  30 29         24 23 22  21 20  16 15  14 13  12  11 10 9    5 4    0
7614 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7615 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | len | op | 0 0 |  Rn  |  Rd  |
7616 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7617 */
7618static void disas_simd_tb(DisasContext *s, uint32_t insn)
7619{
7620    int op2 = extract32(insn, 22, 2);
7621    int is_q = extract32(insn, 30, 1);
7622    int rm = extract32(insn, 16, 5);
7623    int rn = extract32(insn, 5, 5);
7624    int rd = extract32(insn, 0, 5);
7625    int is_tbx = extract32(insn, 12, 1);
7626    int len = (extract32(insn, 13, 2) + 1) * 16;
7627
7628    if (op2 != 0) {
7629        unallocated_encoding(s);
7630        return;
7631    }
7632
7633    if (!fp_access_check(s)) {
7634        return;
7635    }
7636
7637    tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7638                       vec_full_reg_offset(s, rm), cpu_env,
7639                       is_q ? 16 : 8, vec_full_reg_size(s),
7640                       (len << 6) | (is_tbx << 5) | rn,
7641                       gen_helper_simd_tblx);
7642}
7643
7644/* ZIP/UZP/TRN
7645 *   31  30 29         24 23  22  21 20   16 15 14 12 11 10 9    5 4    0
7646 * +---+---+-------------+------+---+------+---+------------------+------+
7647 * | 0 | Q | 0 0 1 1 1 0 | size | 0 |  Rm  | 0 | opc | 1 0 |  Rn  |  Rd  |
7648 * +---+---+-------------+------+---+------+---+------------------+------+
7649 */
7650static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7651{
7652    int rd = extract32(insn, 0, 5);
7653    int rn = extract32(insn, 5, 5);
7654    int rm = extract32(insn, 16, 5);
7655    int size = extract32(insn, 22, 2);
7656    /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7657     * bit 2 indicates 1 vs 2 variant of the insn.
7658     */
7659    int opcode = extract32(insn, 12, 2);
7660    bool part = extract32(insn, 14, 1);
7661    bool is_q = extract32(insn, 30, 1);
7662    int esize = 8 << size;
7663    int i, ofs;
7664    int datasize = is_q ? 128 : 64;
7665    int elements = datasize / esize;
7666    TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7667
7668    if (opcode == 0 || (size == 3 && !is_q)) {
7669        unallocated_encoding(s);
7670        return;
7671    }
7672
7673    if (!fp_access_check(s)) {
7674        return;
7675    }
7676
7677    tcg_resl = tcg_const_i64(0);
7678    tcg_resh = is_q ? tcg_const_i64(0) : NULL;
7679    tcg_res = tcg_temp_new_i64();
7680
7681    for (i = 0; i < elements; i++) {
7682        switch (opcode) {
7683        case 1: /* UZP1/2 */
7684        {
7685            int midpoint = elements / 2;
7686            if (i < midpoint) {
7687                read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7688            } else {
7689                read_vec_element(s, tcg_res, rm,
7690                                 2 * (i - midpoint) + part, size);
7691            }
7692            break;
7693        }
7694        case 2: /* TRN1/2 */
7695            if (i & 1) {
7696                read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7697            } else {
7698                read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7699            }
7700            break;
7701        case 3: /* ZIP1/2 */
7702        {
7703            int base = part * elements / 2;
7704            if (i & 1) {
7705                read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7706            } else {
7707                read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7708            }
7709            break;
7710        }
7711        default:
7712            g_assert_not_reached();
7713        }
7714
7715        ofs = i * esize;
7716        if (ofs < 64) {
7717            tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7718            tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7719        } else {
7720            tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7721            tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7722        }
7723    }
7724
7725    tcg_temp_free_i64(tcg_res);
7726
7727    write_vec_element(s, tcg_resl, rd, 0, MO_64);
7728    tcg_temp_free_i64(tcg_resl);
7729
7730    if (is_q) {
7731        write_vec_element(s, tcg_resh, rd, 1, MO_64);
7732        tcg_temp_free_i64(tcg_resh);
7733    }
7734    clear_vec_high(s, is_q, rd);
7735}
7736
7737/*
7738 * do_reduction_op helper
7739 *
7740 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7741 * important for correct NaN propagation that we do these
7742 * operations in exactly the order specified by the pseudocode.
7743 *
7744 * This is a recursive function, TCG temps should be freed by the
7745 * calling function once it is done with the values.
7746 */
7747static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7748                                int esize, int size, int vmap, TCGv_ptr fpst)
7749{
7750    if (esize == size) {
7751        int element;
7752        MemOp msize = esize == 16 ? MO_16 : MO_32;
7753        TCGv_i32 tcg_elem;
7754
7755        /* We should have one register left here */
7756        assert(ctpop8(vmap) == 1);
7757        element = ctz32(vmap);
7758        assert(element < 8);
7759
7760        tcg_elem = tcg_temp_new_i32();
7761        read_vec_element_i32(s, tcg_elem, rn, element, msize);
7762        return tcg_elem;
7763    } else {
7764        int bits = size / 2;
7765        int shift = ctpop8(vmap) / 2;
7766        int vmap_lo = (vmap >> shift) & vmap;
7767        int vmap_hi = (vmap & ~vmap_lo);
7768        TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7769
7770        tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7771        tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7772        tcg_res = tcg_temp_new_i32();
7773
7774        switch (fpopcode) {
7775        case 0x0c: /* fmaxnmv half-precision */
7776            gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7777            break;
7778        case 0x0f: /* fmaxv half-precision */
7779            gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7780            break;
7781        case 0x1c: /* fminnmv half-precision */
7782            gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7783            break;
7784        case 0x1f: /* fminv half-precision */
7785            gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7786            break;
7787        case 0x2c: /* fmaxnmv */
7788            gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7789            break;
7790        case 0x2f: /* fmaxv */
7791            gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7792            break;
7793        case 0x3c: /* fminnmv */
7794            gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7795            break;
7796        case 0x3f: /* fminv */
7797            gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7798            break;
7799        default:
7800            g_assert_not_reached();
7801        }
7802
7803        tcg_temp_free_i32(tcg_hi);
7804        tcg_temp_free_i32(tcg_lo);
7805        return tcg_res;
7806    }
7807}
7808
7809/* AdvSIMD across lanes
7810 *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7811 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7812 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7813 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7814 */
7815static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7816{
7817    int rd = extract32(insn, 0, 5);
7818    int rn = extract32(insn, 5, 5);
7819    int size = extract32(insn, 22, 2);
7820    int opcode = extract32(insn, 12, 5);
7821    bool is_q = extract32(insn, 30, 1);
7822    bool is_u = extract32(insn, 29, 1);
7823    bool is_fp = false;
7824    bool is_min = false;
7825    int esize;
7826    int elements;
7827    int i;
7828    TCGv_i64 tcg_res, tcg_elt;
7829
7830    switch (opcode) {
7831    case 0x1b: /* ADDV */
7832        if (is_u) {
7833            unallocated_encoding(s);
7834            return;
7835        }
7836        /* fall through */
7837    case 0x3: /* SADDLV, UADDLV */
7838    case 0xa: /* SMAXV, UMAXV */
7839    case 0x1a: /* SMINV, UMINV */
7840        if (size == 3 || (size == 2 && !is_q)) {
7841            unallocated_encoding(s);
7842            return;
7843        }
7844        break;
7845    case 0xc: /* FMAXNMV, FMINNMV */
7846    case 0xf: /* FMAXV, FMINV */
7847        /* Bit 1 of size field encodes min vs max and the actual size
7848         * depends on the encoding of the U bit. If not set (and FP16
7849         * enabled) then we do half-precision float instead of single
7850         * precision.
7851         */
7852        is_min = extract32(size, 1, 1);
7853        is_fp = true;
7854        if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7855            size = 1;
7856        } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7857            unallocated_encoding(s);
7858            return;
7859        } else {
7860            size = 2;
7861        }
7862        break;
7863    default:
7864        unallocated_encoding(s);
7865        return;
7866    }
7867
7868    if (!fp_access_check(s)) {
7869        return;
7870    }
7871
7872    esize = 8 << size;
7873    elements = (is_q ? 128 : 64) / esize;
7874
7875    tcg_res = tcg_temp_new_i64();
7876    tcg_elt = tcg_temp_new_i64();
7877
7878    /* These instructions operate across all lanes of a vector
7879     * to produce a single result. We can guarantee that a 64
7880     * bit intermediate is sufficient:
7881     *  + for [US]ADDLV the maximum element size is 32 bits, and
7882     *    the result type is 64 bits
7883     *  + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7884     *    same as the element size, which is 32 bits at most
7885     * For the integer operations we can choose to work at 64
7886     * or 32 bits and truncate at the end; for simplicity
7887     * we use 64 bits always. The floating point
7888     * ops do require 32 bit intermediates, though.
7889     */
7890    if (!is_fp) {
7891        read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7892
7893        for (i = 1; i < elements; i++) {
7894            read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7895
7896            switch (opcode) {
7897            case 0x03: /* SADDLV / UADDLV */
7898            case 0x1b: /* ADDV */
7899                tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7900                break;
7901            case 0x0a: /* SMAXV / UMAXV */
7902                if (is_u) {
7903                    tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7904                } else {
7905                    tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7906                }
7907                break;
7908            case 0x1a: /* SMINV / UMINV */
7909                if (is_u) {
7910                    tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7911                } else {
7912                    tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7913                }
7914                break;
7915            default:
7916                g_assert_not_reached();
7917            }
7918
7919        }
7920    } else {
7921        /* Floating point vector reduction ops which work across 32
7922         * bit (single) or 16 bit (half-precision) intermediates.
7923         * Note that correct NaN propagation requires that we do these
7924         * operations in exactly the order specified by the pseudocode.
7925         */
7926        TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7927        int fpopcode = opcode | is_min << 4 | is_u << 5;
7928        int vmap = (1 << elements) - 1;
7929        TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7930                                             (is_q ? 128 : 64), vmap, fpst);
7931        tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7932        tcg_temp_free_i32(tcg_res32);
7933        tcg_temp_free_ptr(fpst);
7934    }
7935
7936    tcg_temp_free_i64(tcg_elt);
7937
7938    /* Now truncate the result to the width required for the final output */
7939    if (opcode == 0x03) {
7940        /* SADDLV, UADDLV: result is 2*esize */
7941        size++;
7942    }
7943
7944    switch (size) {
7945    case 0:
7946        tcg_gen_ext8u_i64(tcg_res, tcg_res);
7947        break;
7948    case 1:
7949        tcg_gen_ext16u_i64(tcg_res, tcg_res);
7950        break;
7951    case 2:
7952        tcg_gen_ext32u_i64(tcg_res, tcg_res);
7953        break;
7954    case 3:
7955        break;
7956    default:
7957        g_assert_not_reached();
7958    }
7959
7960    write_fp_dreg(s, rd, tcg_res);
7961    tcg_temp_free_i64(tcg_res);
7962}
7963
7964/* DUP (Element, Vector)
7965 *
7966 *  31  30   29              21 20    16 15        10  9    5 4    0
7967 * +---+---+-------------------+--------+-------------+------+------+
7968 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7969 * +---+---+-------------------+--------+-------------+------+------+
7970 *
7971 * size: encoded in imm5 (see ARM ARM LowestSetBit())
7972 */
7973static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7974                             int imm5)
7975{
7976    int size = ctz32(imm5);
7977    int index;
7978
7979    if (size > 3 || (size == 3 && !is_q)) {
7980        unallocated_encoding(s);
7981        return;
7982    }
7983
7984    if (!fp_access_check(s)) {
7985        return;
7986    }
7987
7988    index = imm5 >> (size + 1);
7989    tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7990                         vec_reg_offset(s, rn, index, size),
7991                         is_q ? 16 : 8, vec_full_reg_size(s));
7992}
7993
7994/* DUP (element, scalar)
7995 *  31                   21 20    16 15        10  9    5 4    0
7996 * +-----------------------+--------+-------------+------+------+
7997 * | 0 1 0 1 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7998 * +-----------------------+--------+-------------+------+------+
7999 */
8000static void handle_simd_dupes(DisasContext *s, int rd, int rn,
8001                              int imm5)
8002{
8003    int size = ctz32(imm5);
8004    int index;
8005    TCGv_i64 tmp;
8006
8007    if (size > 3) {
8008        unallocated_encoding(s);
8009        return;
8010    }
8011
8012    if (!fp_access_check(s)) {
8013        return;
8014    }
8015
8016    index = imm5 >> (size + 1);
8017
8018    /* This instruction just extracts the specified element and
8019     * zero-extends it into the bottom of the destination register.
8020     */
8021    tmp = tcg_temp_new_i64();
8022    read_vec_element(s, tmp, rn, index, size);
8023    write_fp_dreg(s, rd, tmp);
8024    tcg_temp_free_i64(tmp);
8025}
8026
8027/* DUP (General)
8028 *
8029 *  31  30   29              21 20    16 15        10  9    5 4    0
8030 * +---+---+-------------------+--------+-------------+------+------+
8031 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 1 1 |  Rn  |  Rd  |
8032 * +---+---+-------------------+--------+-------------+------+------+
8033 *
8034 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8035 */
8036static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
8037                             int imm5)
8038{
8039    int size = ctz32(imm5);
8040    uint32_t dofs, oprsz, maxsz;
8041
8042    if (size > 3 || ((size == 3) && !is_q)) {
8043        unallocated_encoding(s);
8044        return;
8045    }
8046
8047    if (!fp_access_check(s)) {
8048        return;
8049    }
8050
8051    dofs = vec_full_reg_offset(s, rd);
8052    oprsz = is_q ? 16 : 8;
8053    maxsz = vec_full_reg_size(s);
8054
8055    tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
8056}
8057
8058/* INS (Element)
8059 *
8060 *  31                   21 20    16 15  14    11  10 9    5 4    0
8061 * +-----------------------+--------+------------+---+------+------+
8062 * | 0 1 1 0 1 1 1 0 0 0 0 |  imm5  | 0 |  imm4  | 1 |  Rn  |  Rd  |
8063 * +-----------------------+--------+------------+---+------+------+
8064 *
8065 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8066 * index: encoded in imm5<4:size+1>
8067 */
8068static void handle_simd_inse(DisasContext *s, int rd, int rn,
8069                             int imm4, int imm5)
8070{
8071    int size = ctz32(imm5);
8072    int src_index, dst_index;
8073    TCGv_i64 tmp;
8074
8075    if (size > 3) {
8076        unallocated_encoding(s);
8077        return;
8078    }
8079
8080    if (!fp_access_check(s)) {
8081        return;
8082    }
8083
8084    dst_index = extract32(imm5, 1+size, 5);
8085    src_index = extract32(imm4, size, 4);
8086
8087    tmp = tcg_temp_new_i64();
8088
8089    read_vec_element(s, tmp, rn, src_index, size);
8090    write_vec_element(s, tmp, rd, dst_index, size);
8091
8092    tcg_temp_free_i64(tmp);
8093
8094    /* INS is considered a 128-bit write for SVE. */
8095    clear_vec_high(s, true, rd);
8096}
8097
8098
8099/* INS (General)
8100 *
8101 *  31                   21 20    16 15        10  9    5 4    0
8102 * +-----------------------+--------+-------------+------+------+
8103 * | 0 1 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 1 1 1 |  Rn  |  Rd  |
8104 * +-----------------------+--------+-------------+------+------+
8105 *
8106 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8107 * index: encoded in imm5<4:size+1>
8108 */
8109static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
8110{
8111    int size = ctz32(imm5);
8112    int idx;
8113
8114    if (size > 3) {
8115        unallocated_encoding(s);
8116        return;
8117    }
8118
8119    if (!fp_access_check(s)) {
8120        return;
8121    }
8122
8123    idx = extract32(imm5, 1 + size, 4 - size);
8124    write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
8125
8126    /* INS is considered a 128-bit write for SVE. */
8127    clear_vec_high(s, true, rd);
8128}
8129
8130/*
8131 * UMOV (General)
8132 * SMOV (General)
8133 *
8134 *  31  30   29              21 20    16 15    12   10 9    5 4    0
8135 * +---+---+-------------------+--------+-------------+------+------+
8136 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 1 U 1 1 |  Rn  |  Rd  |
8137 * +---+---+-------------------+--------+-------------+------+------+
8138 *
8139 * U: unsigned when set
8140 * size: encoded in imm5 (see ARM ARM LowestSetBit())
8141 */
8142static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
8143                                  int rn, int rd, int imm5)
8144{
8145    int size = ctz32(imm5);
8146    int element;
8147    TCGv_i64 tcg_rd;
8148
8149    /* Check for UnallocatedEncodings */
8150    if (is_signed) {
8151        if (size > 2 || (size == 2 && !is_q)) {
8152            unallocated_encoding(s);
8153            return;
8154        }
8155    } else {
8156        if (size > 3
8157            || (size < 3 && is_q)
8158            || (size == 3 && !is_q)) {
8159            unallocated_encoding(s);
8160            return;
8161        }
8162    }
8163
8164    if (!fp_access_check(s)) {
8165        return;
8166    }
8167
8168    element = extract32(imm5, 1+size, 4);
8169
8170    tcg_rd = cpu_reg(s, rd);
8171    read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
8172    if (is_signed && !is_q) {
8173        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
8174    }
8175}
8176
8177/* AdvSIMD copy
8178 *   31  30  29  28             21 20  16 15  14  11 10  9    5 4    0
8179 * +---+---+----+-----------------+------+---+------+---+------+------+
8180 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
8181 * +---+---+----+-----------------+------+---+------+---+------+------+
8182 */
8183static void disas_simd_copy(DisasContext *s, uint32_t insn)
8184{
8185    int rd = extract32(insn, 0, 5);
8186    int rn = extract32(insn, 5, 5);
8187    int imm4 = extract32(insn, 11, 4);
8188    int op = extract32(insn, 29, 1);
8189    int is_q = extract32(insn, 30, 1);
8190    int imm5 = extract32(insn, 16, 5);
8191
8192    if (op) {
8193        if (is_q) {
8194            /* INS (element) */
8195            handle_simd_inse(s, rd, rn, imm4, imm5);
8196        } else {
8197            unallocated_encoding(s);
8198        }
8199    } else {
8200        switch (imm4) {
8201        case 0:
8202            /* DUP (element - vector) */
8203            handle_simd_dupe(s, is_q, rd, rn, imm5);
8204            break;
8205        case 1:
8206            /* DUP (general) */
8207            handle_simd_dupg(s, is_q, rd, rn, imm5);
8208            break;
8209        case 3:
8210            if (is_q) {
8211                /* INS (general) */
8212                handle_simd_insg(s, rd, rn, imm5);
8213            } else {
8214                unallocated_encoding(s);
8215            }
8216            break;
8217        case 5:
8218        case 7:
8219            /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
8220            handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
8221            break;
8222        default:
8223            unallocated_encoding(s);
8224            break;
8225        }
8226    }
8227}
8228
8229/* AdvSIMD modified immediate
8230 *  31  30   29  28                 19 18 16 15   12  11  10  9     5 4    0
8231 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8232 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh |  Rd  |
8233 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
8234 *
8235 * There are a number of operations that can be carried out here:
8236 *   MOVI - move (shifted) imm into register
8237 *   MVNI - move inverted (shifted) imm into register
8238 *   ORR  - bitwise OR of (shifted) imm with register
8239 *   BIC  - bitwise clear of (shifted) imm with register
8240 * With ARMv8.2 we also have:
8241 *   FMOV half-precision
8242 */
8243static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
8244{
8245    int rd = extract32(insn, 0, 5);
8246    int cmode = extract32(insn, 12, 4);
8247    int o2 = extract32(insn, 11, 1);
8248    uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
8249    bool is_neg = extract32(insn, 29, 1);
8250    bool is_q = extract32(insn, 30, 1);
8251    uint64_t imm = 0;
8252
8253    if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
8254        /* Check for FMOV (vector, immediate) - half-precision */
8255        if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
8256            unallocated_encoding(s);
8257            return;
8258        }
8259    }
8260
8261    if (!fp_access_check(s)) {
8262        return;
8263    }
8264
8265    if (cmode == 15 && o2 && !is_neg) {
8266        /* FMOV (vector, immediate) - half-precision */
8267        imm = vfp_expand_imm(MO_16, abcdefgh);
8268        /* now duplicate across the lanes */
8269        imm = dup_const(MO_16, imm);
8270    } else {
8271        imm = asimd_imm_const(abcdefgh, cmode, is_neg);
8272    }
8273
8274    if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
8275        /* MOVI or MVNI, with MVNI negation handled above.  */
8276        tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
8277                             vec_full_reg_size(s), imm);
8278    } else {
8279        /* ORR or BIC, with BIC negation to AND handled above.  */
8280        if (is_neg) {
8281            gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
8282        } else {
8283            gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
8284        }
8285    }
8286}
8287
8288/* AdvSIMD scalar copy
8289 *  31 30  29  28             21 20  16 15  14  11 10  9    5 4    0
8290 * +-----+----+-----------------+------+---+------+---+------+------+
8291 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
8292 * +-----+----+-----------------+------+---+------+---+------+------+
8293 */
8294static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
8295{
8296    int rd = extract32(insn, 0, 5);
8297    int rn = extract32(insn, 5, 5);
8298    int imm4 = extract32(insn, 11, 4);
8299    int imm5 = extract32(insn, 16, 5);
8300    int op = extract32(insn, 29, 1);
8301
8302    if (op != 0 || imm4 != 0) {
8303        unallocated_encoding(s);
8304        return;
8305    }
8306
8307    /* DUP (element, scalar) */
8308    handle_simd_dupes(s, rd, rn, imm5);
8309}
8310
8311/* AdvSIMD scalar pairwise
8312 *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
8313 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8314 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
8315 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8316 */
8317static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
8318{
8319    int u = extract32(insn, 29, 1);
8320    int size = extract32(insn, 22, 2);
8321    int opcode = extract32(insn, 12, 5);
8322    int rn = extract32(insn, 5, 5);
8323    int rd = extract32(insn, 0, 5);
8324    TCGv_ptr fpst;
8325
8326    /* For some ops (the FP ones), size[1] is part of the encoding.
8327     * For ADDP strictly it is not but size[1] is always 1 for valid
8328     * encodings.
8329     */
8330    opcode |= (extract32(size, 1, 1) << 5);
8331
8332    switch (opcode) {
8333    case 0x3b: /* ADDP */
8334        if (u || size != 3) {
8335            unallocated_encoding(s);
8336            return;
8337        }
8338        if (!fp_access_check(s)) {
8339            return;
8340        }
8341
8342        fpst = NULL;
8343        break;
8344    case 0xc: /* FMAXNMP */
8345    case 0xd: /* FADDP */
8346    case 0xf: /* FMAXP */
8347    case 0x2c: /* FMINNMP */
8348    case 0x2f: /* FMINP */
8349        /* FP op, size[0] is 32 or 64 bit*/
8350        if (!u) {
8351            if (!dc_isar_feature(aa64_fp16, s)) {
8352                unallocated_encoding(s);
8353                return;
8354            } else {
8355                size = MO_16;
8356            }
8357        } else {
8358            size = extract32(size, 0, 1) ? MO_64 : MO_32;
8359        }
8360
8361        if (!fp_access_check(s)) {
8362            return;
8363        }
8364
8365        fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8366        break;
8367    default:
8368        unallocated_encoding(s);
8369        return;
8370    }
8371
8372    if (size == MO_64) {
8373        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8374        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8375        TCGv_i64 tcg_res = tcg_temp_new_i64();
8376
8377        read_vec_element(s, tcg_op1, rn, 0, MO_64);
8378        read_vec_element(s, tcg_op2, rn, 1, MO_64);
8379
8380        switch (opcode) {
8381        case 0x3b: /* ADDP */
8382            tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
8383            break;
8384        case 0xc: /* FMAXNMP */
8385            gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8386            break;
8387        case 0xd: /* FADDP */
8388            gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8389            break;
8390        case 0xf: /* FMAXP */
8391            gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8392            break;
8393        case 0x2c: /* FMINNMP */
8394            gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8395            break;
8396        case 0x2f: /* FMINP */
8397            gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8398            break;
8399        default:
8400            g_assert_not_reached();
8401        }
8402
8403        write_fp_dreg(s, rd, tcg_res);
8404
8405        tcg_temp_free_i64(tcg_op1);
8406        tcg_temp_free_i64(tcg_op2);
8407        tcg_temp_free_i64(tcg_res);
8408    } else {
8409        TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8410        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8411        TCGv_i32 tcg_res = tcg_temp_new_i32();
8412
8413        read_vec_element_i32(s, tcg_op1, rn, 0, size);
8414        read_vec_element_i32(s, tcg_op2, rn, 1, size);
8415
8416        if (size == MO_16) {
8417            switch (opcode) {
8418            case 0xc: /* FMAXNMP */
8419                gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8420                break;
8421            case 0xd: /* FADDP */
8422                gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8423                break;
8424            case 0xf: /* FMAXP */
8425                gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8426                break;
8427            case 0x2c: /* FMINNMP */
8428                gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8429                break;
8430            case 0x2f: /* FMINP */
8431                gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8432                break;
8433            default:
8434                g_assert_not_reached();
8435            }
8436        } else {
8437            switch (opcode) {
8438            case 0xc: /* FMAXNMP */
8439                gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8440                break;
8441            case 0xd: /* FADDP */
8442                gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8443                break;
8444            case 0xf: /* FMAXP */
8445                gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8446                break;
8447            case 0x2c: /* FMINNMP */
8448                gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8449                break;
8450            case 0x2f: /* FMINP */
8451                gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8452                break;
8453            default:
8454                g_assert_not_reached();
8455            }
8456        }
8457
8458        write_fp_sreg(s, rd, tcg_res);
8459
8460        tcg_temp_free_i32(tcg_op1);
8461        tcg_temp_free_i32(tcg_op2);
8462        tcg_temp_free_i32(tcg_res);
8463    }
8464
8465    if (fpst) {
8466        tcg_temp_free_ptr(fpst);
8467    }
8468}
8469
8470/*
8471 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8472 *
8473 * This code is handles the common shifting code and is used by both
8474 * the vector and scalar code.
8475 */
8476static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8477                                    TCGv_i64 tcg_rnd, bool accumulate,
8478                                    bool is_u, int size, int shift)
8479{
8480    bool extended_result = false;
8481    bool round = tcg_rnd != NULL;
8482    int ext_lshift = 0;
8483    TCGv_i64 tcg_src_hi;
8484
8485    if (round && size == 3) {
8486        extended_result = true;
8487        ext_lshift = 64 - shift;
8488        tcg_src_hi = tcg_temp_new_i64();
8489    } else if (shift == 64) {
8490        if (!accumulate && is_u) {
8491            /* result is zero */
8492            tcg_gen_movi_i64(tcg_res, 0);
8493            return;
8494        }
8495    }
8496
8497    /* Deal with the rounding step */
8498    if (round) {
8499        if (extended_result) {
8500            TCGv_i64 tcg_zero = tcg_constant_i64(0);
8501            if (!is_u) {
8502                /* take care of sign extending tcg_res */
8503                tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8504                tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8505                                 tcg_src, tcg_src_hi,
8506                                 tcg_rnd, tcg_zero);
8507            } else {
8508                tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8509                                 tcg_src, tcg_zero,
8510                                 tcg_rnd, tcg_zero);
8511            }
8512        } else {
8513            tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8514        }
8515    }
8516
8517    /* Now do the shift right */
8518    if (round && extended_result) {
8519        /* extended case, >64 bit precision required */
8520        if (ext_lshift == 0) {
8521            /* special case, only high bits matter */
8522            tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8523        } else {
8524            tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8525            tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8526            tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8527        }
8528    } else {
8529        if (is_u) {
8530            if (shift == 64) {
8531                /* essentially shifting in 64 zeros */
8532                tcg_gen_movi_i64(tcg_src, 0);
8533            } else {
8534                tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8535            }
8536        } else {
8537            if (shift == 64) {
8538                /* effectively extending the sign-bit */
8539                tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8540            } else {
8541                tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8542            }
8543        }
8544    }
8545
8546    if (accumulate) {
8547        tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8548    } else {
8549        tcg_gen_mov_i64(tcg_res, tcg_src);
8550    }
8551
8552    if (extended_result) {
8553        tcg_temp_free_i64(tcg_src_hi);
8554    }
8555}
8556
8557/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8558static void handle_scalar_simd_shri(DisasContext *s,
8559                                    bool is_u, int immh, int immb,
8560                                    int opcode, int rn, int rd)
8561{
8562    const int size = 3;
8563    int immhb = immh << 3 | immb;
8564    int shift = 2 * (8 << size) - immhb;
8565    bool accumulate = false;
8566    bool round = false;
8567    bool insert = false;
8568    TCGv_i64 tcg_rn;
8569    TCGv_i64 tcg_rd;
8570    TCGv_i64 tcg_round;
8571
8572    if (!extract32(immh, 3, 1)) {
8573        unallocated_encoding(s);
8574        return;
8575    }
8576
8577    if (!fp_access_check(s)) {
8578        return;
8579    }
8580
8581    switch (opcode) {
8582    case 0x02: /* SSRA / USRA (accumulate) */
8583        accumulate = true;
8584        break;
8585    case 0x04: /* SRSHR / URSHR (rounding) */
8586        round = true;
8587        break;
8588    case 0x06: /* SRSRA / URSRA (accum + rounding) */
8589        accumulate = round = true;
8590        break;
8591    case 0x08: /* SRI */
8592        insert = true;
8593        break;
8594    }
8595
8596    if (round) {
8597        tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8598    } else {
8599        tcg_round = NULL;
8600    }
8601
8602    tcg_rn = read_fp_dreg(s, rn);
8603    tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8604
8605    if (insert) {
8606        /* shift count same as element size is valid but does nothing;
8607         * special case to avoid potential shift by 64.
8608         */
8609        int esize = 8 << size;
8610        if (shift != esize) {
8611            tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8612            tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8613        }
8614    } else {
8615        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8616                                accumulate, is_u, size, shift);
8617    }
8618
8619    write_fp_dreg(s, rd, tcg_rd);
8620
8621    tcg_temp_free_i64(tcg_rn);
8622    tcg_temp_free_i64(tcg_rd);
8623}
8624
8625/* SHL/SLI - Scalar shift left */
8626static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8627                                    int immh, int immb, int opcode,
8628                                    int rn, int rd)
8629{
8630    int size = 32 - clz32(immh) - 1;
8631    int immhb = immh << 3 | immb;
8632    int shift = immhb - (8 << size);
8633    TCGv_i64 tcg_rn;
8634    TCGv_i64 tcg_rd;
8635
8636    if (!extract32(immh, 3, 1)) {
8637        unallocated_encoding(s);
8638        return;
8639    }
8640
8641    if (!fp_access_check(s)) {
8642        return;
8643    }
8644
8645    tcg_rn = read_fp_dreg(s, rn);
8646    tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8647
8648    if (insert) {
8649        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8650    } else {
8651        tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8652    }
8653
8654    write_fp_dreg(s, rd, tcg_rd);
8655
8656    tcg_temp_free_i64(tcg_rn);
8657    tcg_temp_free_i64(tcg_rd);
8658}
8659
8660/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8661 * (signed/unsigned) narrowing */
8662static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8663                                   bool is_u_shift, bool is_u_narrow,
8664                                   int immh, int immb, int opcode,
8665                                   int rn, int rd)
8666{
8667    int immhb = immh << 3 | immb;
8668    int size = 32 - clz32(immh) - 1;
8669    int esize = 8 << size;
8670    int shift = (2 * esize) - immhb;
8671    int elements = is_scalar ? 1 : (64 / esize);
8672    bool round = extract32(opcode, 0, 1);
8673    MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8674    TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8675    TCGv_i32 tcg_rd_narrowed;
8676    TCGv_i64 tcg_final;
8677
8678    static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8679        { gen_helper_neon_narrow_sat_s8,
8680          gen_helper_neon_unarrow_sat8 },
8681        { gen_helper_neon_narrow_sat_s16,
8682          gen_helper_neon_unarrow_sat16 },
8683        { gen_helper_neon_narrow_sat_s32,
8684          gen_helper_neon_unarrow_sat32 },
8685        { NULL, NULL },
8686    };
8687    static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8688        gen_helper_neon_narrow_sat_u8,
8689        gen_helper_neon_narrow_sat_u16,
8690        gen_helper_neon_narrow_sat_u32,
8691        NULL
8692    };
8693    NeonGenNarrowEnvFn *narrowfn;
8694
8695    int i;
8696
8697    assert(size < 4);
8698
8699    if (extract32(immh, 3, 1)) {
8700        unallocated_encoding(s);
8701        return;
8702    }
8703
8704    if (!fp_access_check(s)) {
8705        return;
8706    }
8707
8708    if (is_u_shift) {
8709        narrowfn = unsigned_narrow_fns[size];
8710    } else {
8711        narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8712    }
8713
8714    tcg_rn = tcg_temp_new_i64();
8715    tcg_rd = tcg_temp_new_i64();
8716    tcg_rd_narrowed = tcg_temp_new_i32();
8717    tcg_final = tcg_const_i64(0);
8718
8719    if (round) {
8720        tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8721    } else {
8722        tcg_round = NULL;
8723    }
8724
8725    for (i = 0; i < elements; i++) {
8726        read_vec_element(s, tcg_rn, rn, i, ldop);
8727        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8728                                false, is_u_shift, size+1, shift);
8729        narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8730        tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8731        tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8732    }
8733
8734    if (!is_q) {
8735        write_vec_element(s, tcg_final, rd, 0, MO_64);
8736    } else {
8737        write_vec_element(s, tcg_final, rd, 1, MO_64);
8738    }
8739
8740    tcg_temp_free_i64(tcg_rn);
8741    tcg_temp_free_i64(tcg_rd);
8742    tcg_temp_free_i32(tcg_rd_narrowed);
8743    tcg_temp_free_i64(tcg_final);
8744
8745    clear_vec_high(s, is_q, rd);
8746}
8747
8748/* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8749static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8750                             bool src_unsigned, bool dst_unsigned,
8751                             int immh, int immb, int rn, int rd)
8752{
8753    int immhb = immh << 3 | immb;
8754    int size = 32 - clz32(immh) - 1;
8755    int shift = immhb - (8 << size);
8756    int pass;
8757
8758    assert(immh != 0);
8759    assert(!(scalar && is_q));
8760
8761    if (!scalar) {
8762        if (!is_q && extract32(immh, 3, 1)) {
8763            unallocated_encoding(s);
8764            return;
8765        }
8766
8767        /* Since we use the variable-shift helpers we must
8768         * replicate the shift count into each element of
8769         * the tcg_shift value.
8770         */
8771        switch (size) {
8772        case 0:
8773            shift |= shift << 8;
8774            /* fall through */
8775        case 1:
8776            shift |= shift << 16;
8777            break;
8778        case 2:
8779        case 3:
8780            break;
8781        default:
8782            g_assert_not_reached();
8783        }
8784    }
8785
8786    if (!fp_access_check(s)) {
8787        return;
8788    }
8789
8790    if (size == 3) {
8791        TCGv_i64 tcg_shift = tcg_constant_i64(shift);
8792        static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8793            { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8794            { NULL, gen_helper_neon_qshl_u64 },
8795        };
8796        NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8797        int maxpass = is_q ? 2 : 1;
8798
8799        for (pass = 0; pass < maxpass; pass++) {
8800            TCGv_i64 tcg_op = tcg_temp_new_i64();
8801
8802            read_vec_element(s, tcg_op, rn, pass, MO_64);
8803            genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8804            write_vec_element(s, tcg_op, rd, pass, MO_64);
8805
8806            tcg_temp_free_i64(tcg_op);
8807        }
8808        clear_vec_high(s, is_q, rd);
8809    } else {
8810        TCGv_i32 tcg_shift = tcg_constant_i32(shift);
8811        static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8812            {
8813                { gen_helper_neon_qshl_s8,
8814                  gen_helper_neon_qshl_s16,
8815                  gen_helper_neon_qshl_s32 },
8816                { gen_helper_neon_qshlu_s8,
8817                  gen_helper_neon_qshlu_s16,
8818                  gen_helper_neon_qshlu_s32 }
8819            }, {
8820                { NULL, NULL, NULL },
8821                { gen_helper_neon_qshl_u8,
8822                  gen_helper_neon_qshl_u16,
8823                  gen_helper_neon_qshl_u32 }
8824            }
8825        };
8826        NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8827        MemOp memop = scalar ? size : MO_32;
8828        int maxpass = scalar ? 1 : is_q ? 4 : 2;
8829
8830        for (pass = 0; pass < maxpass; pass++) {
8831            TCGv_i32 tcg_op = tcg_temp_new_i32();
8832
8833            read_vec_element_i32(s, tcg_op, rn, pass, memop);
8834            genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8835            if (scalar) {
8836                switch (size) {
8837                case 0:
8838                    tcg_gen_ext8u_i32(tcg_op, tcg_op);
8839                    break;
8840                case 1:
8841                    tcg_gen_ext16u_i32(tcg_op, tcg_op);
8842                    break;
8843                case 2:
8844                    break;
8845                default:
8846                    g_assert_not_reached();
8847                }
8848                write_fp_sreg(s, rd, tcg_op);
8849            } else {
8850                write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8851            }
8852
8853            tcg_temp_free_i32(tcg_op);
8854        }
8855
8856        if (!scalar) {
8857            clear_vec_high(s, is_q, rd);
8858        }
8859    }
8860}
8861
8862/* Common vector code for handling integer to FP conversion */
8863static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8864                                   int elements, int is_signed,
8865                                   int fracbits, int size)
8866{
8867    TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8868    TCGv_i32 tcg_shift = NULL;
8869
8870    MemOp mop = size | (is_signed ? MO_SIGN : 0);
8871    int pass;
8872
8873    if (fracbits || size == MO_64) {
8874        tcg_shift = tcg_constant_i32(fracbits);
8875    }
8876
8877    if (size == MO_64) {
8878        TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8879        TCGv_i64 tcg_double = tcg_temp_new_i64();
8880
8881        for (pass = 0; pass < elements; pass++) {
8882            read_vec_element(s, tcg_int64, rn, pass, mop);
8883
8884            if (is_signed) {
8885                gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8886                                     tcg_shift, tcg_fpst);
8887            } else {
8888                gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8889                                     tcg_shift, tcg_fpst);
8890            }
8891            if (elements == 1) {
8892                write_fp_dreg(s, rd, tcg_double);
8893            } else {
8894                write_vec_element(s, tcg_double, rd, pass, MO_64);
8895            }
8896        }
8897
8898        tcg_temp_free_i64(tcg_int64);
8899        tcg_temp_free_i64(tcg_double);
8900
8901    } else {
8902        TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8903        TCGv_i32 tcg_float = tcg_temp_new_i32();
8904
8905        for (pass = 0; pass < elements; pass++) {
8906            read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8907
8908            switch (size) {
8909            case MO_32:
8910                if (fracbits) {
8911                    if (is_signed) {
8912                        gen_helper_vfp_sltos(tcg_float, tcg_int32,
8913                                             tcg_shift, tcg_fpst);
8914                    } else {
8915                        gen_helper_vfp_ultos(tcg_float, tcg_int32,
8916                                             tcg_shift, tcg_fpst);
8917                    }
8918                } else {
8919                    if (is_signed) {
8920                        gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8921                    } else {
8922                        gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8923                    }
8924                }
8925                break;
8926            case MO_16:
8927                if (fracbits) {
8928                    if (is_signed) {
8929                        gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8930                                             tcg_shift, tcg_fpst);
8931                    } else {
8932                        gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8933                                             tcg_shift, tcg_fpst);
8934                    }
8935                } else {
8936                    if (is_signed) {
8937                        gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8938                    } else {
8939                        gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8940                    }
8941                }
8942                break;
8943            default:
8944                g_assert_not_reached();
8945            }
8946
8947            if (elements == 1) {
8948                write_fp_sreg(s, rd, tcg_float);
8949            } else {
8950                write_vec_element_i32(s, tcg_float, rd, pass, size);
8951            }
8952        }
8953
8954        tcg_temp_free_i32(tcg_int32);
8955        tcg_temp_free_i32(tcg_float);
8956    }
8957
8958    tcg_temp_free_ptr(tcg_fpst);
8959
8960    clear_vec_high(s, elements << size == 16, rd);
8961}
8962
8963/* UCVTF/SCVTF - Integer to FP conversion */
8964static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8965                                         bool is_q, bool is_u,
8966                                         int immh, int immb, int opcode,
8967                                         int rn, int rd)
8968{
8969    int size, elements, fracbits;
8970    int immhb = immh << 3 | immb;
8971
8972    if (immh & 8) {
8973        size = MO_64;
8974        if (!is_scalar && !is_q) {
8975            unallocated_encoding(s);
8976            return;
8977        }
8978    } else if (immh & 4) {
8979        size = MO_32;
8980    } else if (immh & 2) {
8981        size = MO_16;
8982        if (!dc_isar_feature(aa64_fp16, s)) {
8983            unallocated_encoding(s);
8984            return;
8985        }
8986    } else {
8987        /* immh == 0 would be a failure of the decode logic */
8988        g_assert(immh == 1);
8989        unallocated_encoding(s);
8990        return;
8991    }
8992
8993    if (is_scalar) {
8994        elements = 1;
8995    } else {
8996        elements = (8 << is_q) >> size;
8997    }
8998    fracbits = (16 << size) - immhb;
8999
9000    if (!fp_access_check(s)) {
9001        return;
9002    }
9003
9004    handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
9005}
9006
9007/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
9008static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
9009                                         bool is_q, bool is_u,
9010                                         int immh, int immb, int rn, int rd)
9011{
9012    int immhb = immh << 3 | immb;
9013    int pass, size, fracbits;
9014    TCGv_ptr tcg_fpstatus;
9015    TCGv_i32 tcg_rmode, tcg_shift;
9016
9017    if (immh & 0x8) {
9018        size = MO_64;
9019        if (!is_scalar && !is_q) {
9020            unallocated_encoding(s);
9021            return;
9022        }
9023    } else if (immh & 0x4) {
9024        size = MO_32;
9025    } else if (immh & 0x2) {
9026        size = MO_16;
9027        if (!dc_isar_feature(aa64_fp16, s)) {
9028            unallocated_encoding(s);
9029            return;
9030        }
9031    } else {
9032        /* Should have split out AdvSIMD modified immediate earlier.  */
9033        assert(immh == 1);
9034        unallocated_encoding(s);
9035        return;
9036    }
9037
9038    if (!fp_access_check(s)) {
9039        return;
9040    }
9041
9042    assert(!(is_scalar && is_q));
9043
9044    tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
9045    tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9046    gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9047    fracbits = (16 << size) - immhb;
9048    tcg_shift = tcg_constant_i32(fracbits);
9049
9050    if (size == MO_64) {
9051        int maxpass = is_scalar ? 1 : 2;
9052
9053        for (pass = 0; pass < maxpass; pass++) {
9054            TCGv_i64 tcg_op = tcg_temp_new_i64();
9055
9056            read_vec_element(s, tcg_op, rn, pass, MO_64);
9057            if (is_u) {
9058                gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9059            } else {
9060                gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9061            }
9062            write_vec_element(s, tcg_op, rd, pass, MO_64);
9063            tcg_temp_free_i64(tcg_op);
9064        }
9065        clear_vec_high(s, is_q, rd);
9066    } else {
9067        void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
9068        int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
9069
9070        switch (size) {
9071        case MO_16:
9072            if (is_u) {
9073                fn = gen_helper_vfp_touhh;
9074            } else {
9075                fn = gen_helper_vfp_toshh;
9076            }
9077            break;
9078        case MO_32:
9079            if (is_u) {
9080                fn = gen_helper_vfp_touls;
9081            } else {
9082                fn = gen_helper_vfp_tosls;
9083            }
9084            break;
9085        default:
9086            g_assert_not_reached();
9087        }
9088
9089        for (pass = 0; pass < maxpass; pass++) {
9090            TCGv_i32 tcg_op = tcg_temp_new_i32();
9091
9092            read_vec_element_i32(s, tcg_op, rn, pass, size);
9093            fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9094            if (is_scalar) {
9095                write_fp_sreg(s, rd, tcg_op);
9096            } else {
9097                write_vec_element_i32(s, tcg_op, rd, pass, size);
9098            }
9099            tcg_temp_free_i32(tcg_op);
9100        }
9101        if (!is_scalar) {
9102            clear_vec_high(s, is_q, rd);
9103        }
9104    }
9105
9106    gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9107    tcg_temp_free_ptr(tcg_fpstatus);
9108    tcg_temp_free_i32(tcg_rmode);
9109}
9110
9111/* AdvSIMD scalar shift by immediate
9112 *  31 30  29 28         23 22  19 18  16 15    11  10 9    5 4    0
9113 * +-----+---+-------------+------+------+--------+---+------+------+
9114 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
9115 * +-----+---+-------------+------+------+--------+---+------+------+
9116 *
9117 * This is the scalar version so it works on a fixed sized registers
9118 */
9119static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
9120{
9121    int rd = extract32(insn, 0, 5);
9122    int rn = extract32(insn, 5, 5);
9123    int opcode = extract32(insn, 11, 5);
9124    int immb = extract32(insn, 16, 3);
9125    int immh = extract32(insn, 19, 4);
9126    bool is_u = extract32(insn, 29, 1);
9127
9128    if (immh == 0) {
9129        unallocated_encoding(s);
9130        return;
9131    }
9132
9133    switch (opcode) {
9134    case 0x08: /* SRI */
9135        if (!is_u) {
9136            unallocated_encoding(s);
9137            return;
9138        }
9139        /* fall through */
9140    case 0x00: /* SSHR / USHR */
9141    case 0x02: /* SSRA / USRA */
9142    case 0x04: /* SRSHR / URSHR */
9143    case 0x06: /* SRSRA / URSRA */
9144        handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
9145        break;
9146    case 0x0a: /* SHL / SLI */
9147        handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
9148        break;
9149    case 0x1c: /* SCVTF, UCVTF */
9150        handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
9151                                     opcode, rn, rd);
9152        break;
9153    case 0x10: /* SQSHRUN, SQSHRUN2 */
9154    case 0x11: /* SQRSHRUN, SQRSHRUN2 */
9155        if (!is_u) {
9156            unallocated_encoding(s);
9157            return;
9158        }
9159        handle_vec_simd_sqshrn(s, true, false, false, true,
9160                               immh, immb, opcode, rn, rd);
9161        break;
9162    case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
9163    case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
9164        handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
9165                               immh, immb, opcode, rn, rd);
9166        break;
9167    case 0xc: /* SQSHLU */
9168        if (!is_u) {
9169            unallocated_encoding(s);
9170            return;
9171        }
9172        handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
9173        break;
9174    case 0xe: /* SQSHL, UQSHL */
9175        handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
9176        break;
9177    case 0x1f: /* FCVTZS, FCVTZU */
9178        handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
9179        break;
9180    default:
9181        unallocated_encoding(s);
9182        break;
9183    }
9184}
9185
9186/* AdvSIMD scalar three different
9187 *  31 30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
9188 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9189 * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
9190 * +-----+---+-----------+------+---+------+--------+-----+------+------+
9191 */
9192static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
9193{
9194    bool is_u = extract32(insn, 29, 1);
9195    int size = extract32(insn, 22, 2);
9196    int opcode = extract32(insn, 12, 4);
9197    int rm = extract32(insn, 16, 5);
9198    int rn = extract32(insn, 5, 5);
9199    int rd = extract32(insn, 0, 5);
9200
9201    if (is_u) {
9202        unallocated_encoding(s);
9203        return;
9204    }
9205
9206    switch (opcode) {
9207    case 0x9: /* SQDMLAL, SQDMLAL2 */
9208    case 0xb: /* SQDMLSL, SQDMLSL2 */
9209    case 0xd: /* SQDMULL, SQDMULL2 */
9210        if (size == 0 || size == 3) {
9211            unallocated_encoding(s);
9212            return;
9213        }
9214        break;
9215    default:
9216        unallocated_encoding(s);
9217        return;
9218    }
9219
9220    if (!fp_access_check(s)) {
9221        return;
9222    }
9223
9224    if (size == 2) {
9225        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9226        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9227        TCGv_i64 tcg_res = tcg_temp_new_i64();
9228
9229        read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
9230        read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
9231
9232        tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
9233        gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
9234
9235        switch (opcode) {
9236        case 0xd: /* SQDMULL, SQDMULL2 */
9237            break;
9238        case 0xb: /* SQDMLSL, SQDMLSL2 */
9239            tcg_gen_neg_i64(tcg_res, tcg_res);
9240            /* fall through */
9241        case 0x9: /* SQDMLAL, SQDMLAL2 */
9242            read_vec_element(s, tcg_op1, rd, 0, MO_64);
9243            gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
9244                                              tcg_res, tcg_op1);
9245            break;
9246        default:
9247            g_assert_not_reached();
9248        }
9249
9250        write_fp_dreg(s, rd, tcg_res);
9251
9252        tcg_temp_free_i64(tcg_op1);
9253        tcg_temp_free_i64(tcg_op2);
9254        tcg_temp_free_i64(tcg_res);
9255    } else {
9256        TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
9257        TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
9258        TCGv_i64 tcg_res = tcg_temp_new_i64();
9259
9260        gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
9261        gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
9262
9263        switch (opcode) {
9264        case 0xd: /* SQDMULL, SQDMULL2 */
9265            break;
9266        case 0xb: /* SQDMLSL, SQDMLSL2 */
9267            gen_helper_neon_negl_u32(tcg_res, tcg_res);
9268            /* fall through */
9269        case 0x9: /* SQDMLAL, SQDMLAL2 */
9270        {
9271            TCGv_i64 tcg_op3 = tcg_temp_new_i64();
9272            read_vec_element(s, tcg_op3, rd, 0, MO_32);
9273            gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
9274                                              tcg_res, tcg_op3);
9275            tcg_temp_free_i64(tcg_op3);
9276            break;
9277        }
9278        default:
9279            g_assert_not_reached();
9280        }
9281
9282        tcg_gen_ext32u_i64(tcg_res, tcg_res);
9283        write_fp_dreg(s, rd, tcg_res);
9284
9285        tcg_temp_free_i32(tcg_op1);
9286        tcg_temp_free_i32(tcg_op2);
9287        tcg_temp_free_i64(tcg_res);
9288    }
9289}
9290
9291static void handle_3same_64(DisasContext *s, int opcode, bool u,
9292                            TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
9293{
9294    /* Handle 64x64->64 opcodes which are shared between the scalar
9295     * and vector 3-same groups. We cover every opcode where size == 3
9296     * is valid in either the three-reg-same (integer, not pairwise)
9297     * or scalar-three-reg-same groups.
9298     */
9299    TCGCond cond;
9300
9301    switch (opcode) {
9302    case 0x1: /* SQADD */
9303        if (u) {
9304            gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9305        } else {
9306            gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9307        }
9308        break;
9309    case 0x5: /* SQSUB */
9310        if (u) {
9311            gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9312        } else {
9313            gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9314        }
9315        break;
9316    case 0x6: /* CMGT, CMHI */
9317        /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
9318         * We implement this using setcond (test) and then negating.
9319         */
9320        cond = u ? TCG_COND_GTU : TCG_COND_GT;
9321    do_cmop:
9322        tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
9323        tcg_gen_neg_i64(tcg_rd, tcg_rd);
9324        break;
9325    case 0x7: /* CMGE, CMHS */
9326        cond = u ? TCG_COND_GEU : TCG_COND_GE;
9327        goto do_cmop;
9328    case 0x11: /* CMTST, CMEQ */
9329        if (u) {
9330            cond = TCG_COND_EQ;
9331            goto do_cmop;
9332        }
9333        gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
9334        break;
9335    case 0x8: /* SSHL, USHL */
9336        if (u) {
9337            gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
9338        } else {
9339            gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
9340        }
9341        break;
9342    case 0x9: /* SQSHL, UQSHL */
9343        if (u) {
9344            gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9345        } else {
9346            gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9347        }
9348        break;
9349    case 0xa: /* SRSHL, URSHL */
9350        if (u) {
9351            gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
9352        } else {
9353            gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
9354        }
9355        break;
9356    case 0xb: /* SQRSHL, UQRSHL */
9357        if (u) {
9358            gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9359        } else {
9360            gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9361        }
9362        break;
9363    case 0x10: /* ADD, SUB */
9364        if (u) {
9365            tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
9366        } else {
9367            tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
9368        }
9369        break;
9370    default:
9371        g_assert_not_reached();
9372    }
9373}
9374
9375/* Handle the 3-same-operands float operations; shared by the scalar
9376 * and vector encodings. The caller must filter out any encodings
9377 * not allocated for the encoding it is dealing with.
9378 */
9379static void handle_3same_float(DisasContext *s, int size, int elements,
9380                               int fpopcode, int rd, int rn, int rm)
9381{
9382    int pass;
9383    TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9384
9385    for (pass = 0; pass < elements; pass++) {
9386        if (size) {
9387            /* Double */
9388            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9389            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9390            TCGv_i64 tcg_res = tcg_temp_new_i64();
9391
9392            read_vec_element(s, tcg_op1, rn, pass, MO_64);
9393            read_vec_element(s, tcg_op2, rm, pass, MO_64);
9394
9395            switch (fpopcode) {
9396            case 0x39: /* FMLS */
9397                /* As usual for ARM, separate negation for fused multiply-add */
9398                gen_helper_vfp_negd(tcg_op1, tcg_op1);
9399                /* fall through */
9400            case 0x19: /* FMLA */
9401                read_vec_element(s, tcg_res, rd, pass, MO_64);
9402                gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
9403                                       tcg_res, fpst);
9404                break;
9405            case 0x18: /* FMAXNM */
9406                gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9407                break;
9408            case 0x1a: /* FADD */
9409                gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
9410                break;
9411            case 0x1b: /* FMULX */
9412                gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
9413                break;
9414            case 0x1c: /* FCMEQ */
9415                gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9416                break;
9417            case 0x1e: /* FMAX */
9418                gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
9419                break;
9420            case 0x1f: /* FRECPS */
9421                gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9422                break;
9423            case 0x38: /* FMINNM */
9424                gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9425                break;
9426            case 0x3a: /* FSUB */
9427                gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9428                break;
9429            case 0x3e: /* FMIN */
9430                gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
9431                break;
9432            case 0x3f: /* FRSQRTS */
9433                gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9434                break;
9435            case 0x5b: /* FMUL */
9436                gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
9437                break;
9438            case 0x5c: /* FCMGE */
9439                gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9440                break;
9441            case 0x5d: /* FACGE */
9442                gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9443                break;
9444            case 0x5f: /* FDIV */
9445                gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
9446                break;
9447            case 0x7a: /* FABD */
9448                gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9449                gen_helper_vfp_absd(tcg_res, tcg_res);
9450                break;
9451            case 0x7c: /* FCMGT */
9452                gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9453                break;
9454            case 0x7d: /* FACGT */
9455                gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9456                break;
9457            default:
9458                g_assert_not_reached();
9459            }
9460
9461            write_vec_element(s, tcg_res, rd, pass, MO_64);
9462
9463            tcg_temp_free_i64(tcg_res);
9464            tcg_temp_free_i64(tcg_op1);
9465            tcg_temp_free_i64(tcg_op2);
9466        } else {
9467            /* Single */
9468            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9469            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9470            TCGv_i32 tcg_res = tcg_temp_new_i32();
9471
9472            read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9473            read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9474
9475            switch (fpopcode) {
9476            case 0x39: /* FMLS */
9477                /* As usual for ARM, separate negation for fused multiply-add */
9478                gen_helper_vfp_negs(tcg_op1, tcg_op1);
9479                /* fall through */
9480            case 0x19: /* FMLA */
9481                read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9482                gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9483                                       tcg_res, fpst);
9484                break;
9485            case 0x1a: /* FADD */
9486                gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9487                break;
9488            case 0x1b: /* FMULX */
9489                gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9490                break;
9491            case 0x1c: /* FCMEQ */
9492                gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9493                break;
9494            case 0x1e: /* FMAX */
9495                gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9496                break;
9497            case 0x1f: /* FRECPS */
9498                gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9499                break;
9500            case 0x18: /* FMAXNM */
9501                gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9502                break;
9503            case 0x38: /* FMINNM */
9504                gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9505                break;
9506            case 0x3a: /* FSUB */
9507                gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9508                break;
9509            case 0x3e: /* FMIN */
9510                gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9511                break;
9512            case 0x3f: /* FRSQRTS */
9513                gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9514                break;
9515            case 0x5b: /* FMUL */
9516                gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9517                break;
9518            case 0x5c: /* FCMGE */
9519                gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9520                break;
9521            case 0x5d: /* FACGE */
9522                gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9523                break;
9524            case 0x5f: /* FDIV */
9525                gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9526                break;
9527            case 0x7a: /* FABD */
9528                gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9529                gen_helper_vfp_abss(tcg_res, tcg_res);
9530                break;
9531            case 0x7c: /* FCMGT */
9532                gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9533                break;
9534            case 0x7d: /* FACGT */
9535                gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9536                break;
9537            default:
9538                g_assert_not_reached();
9539            }
9540
9541            if (elements == 1) {
9542                /* scalar single so clear high part */
9543                TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9544
9545                tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9546                write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9547                tcg_temp_free_i64(tcg_tmp);
9548            } else {
9549                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9550            }
9551
9552            tcg_temp_free_i32(tcg_res);
9553            tcg_temp_free_i32(tcg_op1);
9554            tcg_temp_free_i32(tcg_op2);
9555        }
9556    }
9557
9558    tcg_temp_free_ptr(fpst);
9559
9560    clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9561}
9562
9563/* AdvSIMD scalar three same
9564 *  31 30  29 28       24 23  22  21 20  16 15    11  10 9    5 4    0
9565 * +-----+---+-----------+------+---+------+--------+---+------+------+
9566 * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
9567 * +-----+---+-----------+------+---+------+--------+---+------+------+
9568 */
9569static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9570{
9571    int rd = extract32(insn, 0, 5);
9572    int rn = extract32(insn, 5, 5);
9573    int opcode = extract32(insn, 11, 5);
9574    int rm = extract32(insn, 16, 5);
9575    int size = extract32(insn, 22, 2);
9576    bool u = extract32(insn, 29, 1);
9577    TCGv_i64 tcg_rd;
9578
9579    if (opcode >= 0x18) {
9580        /* Floating point: U, size[1] and opcode indicate operation */
9581        int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9582        switch (fpopcode) {
9583        case 0x1b: /* FMULX */
9584        case 0x1f: /* FRECPS */
9585        case 0x3f: /* FRSQRTS */
9586        case 0x5d: /* FACGE */
9587        case 0x7d: /* FACGT */
9588        case 0x1c: /* FCMEQ */
9589        case 0x5c: /* FCMGE */
9590        case 0x7c: /* FCMGT */
9591        case 0x7a: /* FABD */
9592            break;
9593        default:
9594            unallocated_encoding(s);
9595            return;
9596        }
9597
9598        if (!fp_access_check(s)) {
9599            return;
9600        }
9601
9602        handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9603        return;
9604    }
9605
9606    switch (opcode) {
9607    case 0x1: /* SQADD, UQADD */
9608    case 0x5: /* SQSUB, UQSUB */
9609    case 0x9: /* SQSHL, UQSHL */
9610    case 0xb: /* SQRSHL, UQRSHL */
9611        break;
9612    case 0x8: /* SSHL, USHL */
9613    case 0xa: /* SRSHL, URSHL */
9614    case 0x6: /* CMGT, CMHI */
9615    case 0x7: /* CMGE, CMHS */
9616    case 0x11: /* CMTST, CMEQ */
9617    case 0x10: /* ADD, SUB (vector) */
9618        if (size != 3) {
9619            unallocated_encoding(s);
9620            return;
9621        }
9622        break;
9623    case 0x16: /* SQDMULH, SQRDMULH (vector) */
9624        if (size != 1 && size != 2) {
9625            unallocated_encoding(s);
9626            return;
9627        }
9628        break;
9629    default:
9630        unallocated_encoding(s);
9631        return;
9632    }
9633
9634    if (!fp_access_check(s)) {
9635        return;
9636    }
9637
9638    tcg_rd = tcg_temp_new_i64();
9639
9640    if (size == 3) {
9641        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9642        TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9643
9644        handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9645        tcg_temp_free_i64(tcg_rn);
9646        tcg_temp_free_i64(tcg_rm);
9647    } else {
9648        /* Do a single operation on the lowest element in the vector.
9649         * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9650         * no side effects for all these operations.
9651         * OPTME: special-purpose helpers would avoid doing some
9652         * unnecessary work in the helper for the 8 and 16 bit cases.
9653         */
9654        NeonGenTwoOpEnvFn *genenvfn;
9655        TCGv_i32 tcg_rn = tcg_temp_new_i32();
9656        TCGv_i32 tcg_rm = tcg_temp_new_i32();
9657        TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9658
9659        read_vec_element_i32(s, tcg_rn, rn, 0, size);
9660        read_vec_element_i32(s, tcg_rm, rm, 0, size);
9661
9662        switch (opcode) {
9663        case 0x1: /* SQADD, UQADD */
9664        {
9665            static NeonGenTwoOpEnvFn * const fns[3][2] = {
9666                { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9667                { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9668                { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9669            };
9670            genenvfn = fns[size][u];
9671            break;
9672        }
9673        case 0x5: /* SQSUB, UQSUB */
9674        {
9675            static NeonGenTwoOpEnvFn * const fns[3][2] = {
9676                { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9677                { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9678                { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9679            };
9680            genenvfn = fns[size][u];
9681            break;
9682        }
9683        case 0x9: /* SQSHL, UQSHL */
9684        {
9685            static NeonGenTwoOpEnvFn * const fns[3][2] = {
9686                { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9687                { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9688                { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9689            };
9690            genenvfn = fns[size][u];
9691            break;
9692        }
9693        case 0xb: /* SQRSHL, UQRSHL */
9694        {
9695            static NeonGenTwoOpEnvFn * const fns[3][2] = {
9696                { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9697                { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9698                { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9699            };
9700            genenvfn = fns[size][u];
9701            break;
9702        }
9703        case 0x16: /* SQDMULH, SQRDMULH */
9704        {
9705            static NeonGenTwoOpEnvFn * const fns[2][2] = {
9706                { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9707                { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9708            };
9709            assert(size == 1 || size == 2);
9710            genenvfn = fns[size - 1][u];
9711            break;
9712        }
9713        default:
9714            g_assert_not_reached();
9715        }
9716
9717        genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9718        tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9719        tcg_temp_free_i32(tcg_rd32);
9720        tcg_temp_free_i32(tcg_rn);
9721        tcg_temp_free_i32(tcg_rm);
9722    }
9723
9724    write_fp_dreg(s, rd, tcg_rd);
9725
9726    tcg_temp_free_i64(tcg_rd);
9727}
9728
9729/* AdvSIMD scalar three same FP16
9730 *  31 30  29 28       24 23  22 21 20  16 15 14 13    11 10  9  5 4  0
9731 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9732 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 | Rn | Rd |
9733 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9734 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9735 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9736 */
9737static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9738                                                  uint32_t insn)
9739{
9740    int rd = extract32(insn, 0, 5);
9741    int rn = extract32(insn, 5, 5);
9742    int opcode = extract32(insn, 11, 3);
9743    int rm = extract32(insn, 16, 5);
9744    bool u = extract32(insn, 29, 1);
9745    bool a = extract32(insn, 23, 1);
9746    int fpopcode = opcode | (a << 3) |  (u << 4);
9747    TCGv_ptr fpst;
9748    TCGv_i32 tcg_op1;
9749    TCGv_i32 tcg_op2;
9750    TCGv_i32 tcg_res;
9751
9752    switch (fpopcode) {
9753    case 0x03: /* FMULX */
9754    case 0x04: /* FCMEQ (reg) */
9755    case 0x07: /* FRECPS */
9756    case 0x0f: /* FRSQRTS */
9757    case 0x14: /* FCMGE (reg) */
9758    case 0x15: /* FACGE */
9759    case 0x1a: /* FABD */
9760    case 0x1c: /* FCMGT (reg) */
9761    case 0x1d: /* FACGT */
9762        break;
9763    default:
9764        unallocated_encoding(s);
9765        return;
9766    }
9767
9768    if (!dc_isar_feature(aa64_fp16, s)) {
9769        unallocated_encoding(s);
9770    }
9771
9772    if (!fp_access_check(s)) {
9773        return;
9774    }
9775
9776    fpst = fpstatus_ptr(FPST_FPCR_F16);
9777
9778    tcg_op1 = read_fp_hreg(s, rn);
9779    tcg_op2 = read_fp_hreg(s, rm);
9780    tcg_res = tcg_temp_new_i32();
9781
9782    switch (fpopcode) {
9783    case 0x03: /* FMULX */
9784        gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9785        break;
9786    case 0x04: /* FCMEQ (reg) */
9787        gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9788        break;
9789    case 0x07: /* FRECPS */
9790        gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9791        break;
9792    case 0x0f: /* FRSQRTS */
9793        gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9794        break;
9795    case 0x14: /* FCMGE (reg) */
9796        gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9797        break;
9798    case 0x15: /* FACGE */
9799        gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9800        break;
9801    case 0x1a: /* FABD */
9802        gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9803        tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9804        break;
9805    case 0x1c: /* FCMGT (reg) */
9806        gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9807        break;
9808    case 0x1d: /* FACGT */
9809        gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9810        break;
9811    default:
9812        g_assert_not_reached();
9813    }
9814
9815    write_fp_sreg(s, rd, tcg_res);
9816
9817
9818    tcg_temp_free_i32(tcg_res);
9819    tcg_temp_free_i32(tcg_op1);
9820    tcg_temp_free_i32(tcg_op2);
9821    tcg_temp_free_ptr(fpst);
9822}
9823
9824/* AdvSIMD scalar three same extra
9825 *  31 30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
9826 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9827 * | 0 1 | U | 1 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
9828 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9829 */
9830static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9831                                                   uint32_t insn)
9832{
9833    int rd = extract32(insn, 0, 5);
9834    int rn = extract32(insn, 5, 5);
9835    int opcode = extract32(insn, 11, 4);
9836    int rm = extract32(insn, 16, 5);
9837    int size = extract32(insn, 22, 2);
9838    bool u = extract32(insn, 29, 1);
9839    TCGv_i32 ele1, ele2, ele3;
9840    TCGv_i64 res;
9841    bool feature;
9842
9843    switch (u * 16 + opcode) {
9844    case 0x10: /* SQRDMLAH (vector) */
9845    case 0x11: /* SQRDMLSH (vector) */
9846        if (size != 1 && size != 2) {
9847            unallocated_encoding(s);
9848            return;
9849        }
9850        feature = dc_isar_feature(aa64_rdm, s);
9851        break;
9852    default:
9853        unallocated_encoding(s);
9854        return;
9855    }
9856    if (!feature) {
9857        unallocated_encoding(s);
9858        return;
9859    }
9860    if (!fp_access_check(s)) {
9861        return;
9862    }
9863
9864    /* Do a single operation on the lowest element in the vector.
9865     * We use the standard Neon helpers and rely on 0 OP 0 == 0
9866     * with no side effects for all these operations.
9867     * OPTME: special-purpose helpers would avoid doing some
9868     * unnecessary work in the helper for the 16 bit cases.
9869     */
9870    ele1 = tcg_temp_new_i32();
9871    ele2 = tcg_temp_new_i32();
9872    ele3 = tcg_temp_new_i32();
9873
9874    read_vec_element_i32(s, ele1, rn, 0, size);
9875    read_vec_element_i32(s, ele2, rm, 0, size);
9876    read_vec_element_i32(s, ele3, rd, 0, size);
9877
9878    switch (opcode) {
9879    case 0x0: /* SQRDMLAH */
9880        if (size == 1) {
9881            gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9882        } else {
9883            gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9884        }
9885        break;
9886    case 0x1: /* SQRDMLSH */
9887        if (size == 1) {
9888            gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9889        } else {
9890            gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9891        }
9892        break;
9893    default:
9894        g_assert_not_reached();
9895    }
9896    tcg_temp_free_i32(ele1);
9897    tcg_temp_free_i32(ele2);
9898
9899    res = tcg_temp_new_i64();
9900    tcg_gen_extu_i32_i64(res, ele3);
9901    tcg_temp_free_i32(ele3);
9902
9903    write_fp_dreg(s, rd, res);
9904    tcg_temp_free_i64(res);
9905}
9906
9907static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9908                            TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9909                            TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9910{
9911    /* Handle 64->64 opcodes which are shared between the scalar and
9912     * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9913     * is valid in either group and also the double-precision fp ops.
9914     * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9915     * requires them.
9916     */
9917    TCGCond cond;
9918
9919    switch (opcode) {
9920    case 0x4: /* CLS, CLZ */
9921        if (u) {
9922            tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9923        } else {
9924            tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9925        }
9926        break;
9927    case 0x5: /* NOT */
9928        /* This opcode is shared with CNT and RBIT but we have earlier
9929         * enforced that size == 3 if and only if this is the NOT insn.
9930         */
9931        tcg_gen_not_i64(tcg_rd, tcg_rn);
9932        break;
9933    case 0x7: /* SQABS, SQNEG */
9934        if (u) {
9935            gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9936        } else {
9937            gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9938        }
9939        break;
9940    case 0xa: /* CMLT */
9941        /* 64 bit integer comparison against zero, result is
9942         * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9943         * subtracting 1.
9944         */
9945        cond = TCG_COND_LT;
9946    do_cmop:
9947        tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9948        tcg_gen_neg_i64(tcg_rd, tcg_rd);
9949        break;
9950    case 0x8: /* CMGT, CMGE */
9951        cond = u ? TCG_COND_GE : TCG_COND_GT;
9952        goto do_cmop;
9953    case 0x9: /* CMEQ, CMLE */
9954        cond = u ? TCG_COND_LE : TCG_COND_EQ;
9955        goto do_cmop;
9956    case 0xb: /* ABS, NEG */
9957        if (u) {
9958            tcg_gen_neg_i64(tcg_rd, tcg_rn);
9959        } else {
9960            tcg_gen_abs_i64(tcg_rd, tcg_rn);
9961        }
9962        break;
9963    case 0x2f: /* FABS */
9964        gen_helper_vfp_absd(tcg_rd, tcg_rn);
9965        break;
9966    case 0x6f: /* FNEG */
9967        gen_helper_vfp_negd(tcg_rd, tcg_rn);
9968        break;
9969    case 0x7f: /* FSQRT */
9970        gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9971        break;
9972    case 0x1a: /* FCVTNS */
9973    case 0x1b: /* FCVTMS */
9974    case 0x1c: /* FCVTAS */
9975    case 0x3a: /* FCVTPS */
9976    case 0x3b: /* FCVTZS */
9977        gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9978        break;
9979    case 0x5a: /* FCVTNU */
9980    case 0x5b: /* FCVTMU */
9981    case 0x5c: /* FCVTAU */
9982    case 0x7a: /* FCVTPU */
9983    case 0x7b: /* FCVTZU */
9984        gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9985        break;
9986    case 0x18: /* FRINTN */
9987    case 0x19: /* FRINTM */
9988    case 0x38: /* FRINTP */
9989    case 0x39: /* FRINTZ */
9990    case 0x58: /* FRINTA */
9991    case 0x79: /* FRINTI */
9992        gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9993        break;
9994    case 0x59: /* FRINTX */
9995        gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9996        break;
9997    case 0x1e: /* FRINT32Z */
9998    case 0x5e: /* FRINT32X */
9999        gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
10000        break;
10001    case 0x1f: /* FRINT64Z */
10002    case 0x5f: /* FRINT64X */
10003        gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
10004        break;
10005    default:
10006        g_assert_not_reached();
10007    }
10008}
10009
10010static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
10011                                   bool is_scalar, bool is_u, bool is_q,
10012                                   int size, int rn, int rd)
10013{
10014    bool is_double = (size == MO_64);
10015    TCGv_ptr fpst;
10016
10017    if (!fp_access_check(s)) {
10018        return;
10019    }
10020
10021    fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
10022
10023    if (is_double) {
10024        TCGv_i64 tcg_op = tcg_temp_new_i64();
10025        TCGv_i64 tcg_zero = tcg_constant_i64(0);
10026        TCGv_i64 tcg_res = tcg_temp_new_i64();
10027        NeonGenTwoDoubleOpFn *genfn;
10028        bool swap = false;
10029        int pass;
10030
10031        switch (opcode) {
10032        case 0x2e: /* FCMLT (zero) */
10033            swap = true;
10034            /* fallthrough */
10035        case 0x2c: /* FCMGT (zero) */
10036            genfn = gen_helper_neon_cgt_f64;
10037            break;
10038        case 0x2d: /* FCMEQ (zero) */
10039            genfn = gen_helper_neon_ceq_f64;
10040            break;
10041        case 0x6d: /* FCMLE (zero) */
10042            swap = true;
10043            /* fall through */
10044        case 0x6c: /* FCMGE (zero) */
10045            genfn = gen_helper_neon_cge_f64;
10046            break;
10047        default:
10048            g_assert_not_reached();
10049        }
10050
10051        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10052            read_vec_element(s, tcg_op, rn, pass, MO_64);
10053            if (swap) {
10054                genfn(tcg_res, tcg_zero, tcg_op, fpst);
10055            } else {
10056                genfn(tcg_res, tcg_op, tcg_zero, fpst);
10057            }
10058            write_vec_element(s, tcg_res, rd, pass, MO_64);
10059        }
10060        tcg_temp_free_i64(tcg_res);
10061        tcg_temp_free_i64(tcg_op);
10062
10063        clear_vec_high(s, !is_scalar, rd);
10064    } else {
10065        TCGv_i32 tcg_op = tcg_temp_new_i32();
10066        TCGv_i32 tcg_zero = tcg_constant_i32(0);
10067        TCGv_i32 tcg_res = tcg_temp_new_i32();
10068        NeonGenTwoSingleOpFn *genfn;
10069        bool swap = false;
10070        int pass, maxpasses;
10071
10072        if (size == MO_16) {
10073            switch (opcode) {
10074            case 0x2e: /* FCMLT (zero) */
10075                swap = true;
10076                /* fall through */
10077            case 0x2c: /* FCMGT (zero) */
10078                genfn = gen_helper_advsimd_cgt_f16;
10079                break;
10080            case 0x2d: /* FCMEQ (zero) */
10081                genfn = gen_helper_advsimd_ceq_f16;
10082                break;
10083            case 0x6d: /* FCMLE (zero) */
10084                swap = true;
10085                /* fall through */
10086            case 0x6c: /* FCMGE (zero) */
10087                genfn = gen_helper_advsimd_cge_f16;
10088                break;
10089            default:
10090                g_assert_not_reached();
10091            }
10092        } else {
10093            switch (opcode) {
10094            case 0x2e: /* FCMLT (zero) */
10095                swap = true;
10096                /* fall through */
10097            case 0x2c: /* FCMGT (zero) */
10098                genfn = gen_helper_neon_cgt_f32;
10099                break;
10100            case 0x2d: /* FCMEQ (zero) */
10101                genfn = gen_helper_neon_ceq_f32;
10102                break;
10103            case 0x6d: /* FCMLE (zero) */
10104                swap = true;
10105                /* fall through */
10106            case 0x6c: /* FCMGE (zero) */
10107                genfn = gen_helper_neon_cge_f32;
10108                break;
10109            default:
10110                g_assert_not_reached();
10111            }
10112        }
10113
10114        if (is_scalar) {
10115            maxpasses = 1;
10116        } else {
10117            int vector_size = 8 << is_q;
10118            maxpasses = vector_size >> size;
10119        }
10120
10121        for (pass = 0; pass < maxpasses; pass++) {
10122            read_vec_element_i32(s, tcg_op, rn, pass, size);
10123            if (swap) {
10124                genfn(tcg_res, tcg_zero, tcg_op, fpst);
10125            } else {
10126                genfn(tcg_res, tcg_op, tcg_zero, fpst);
10127            }
10128            if (is_scalar) {
10129                write_fp_sreg(s, rd, tcg_res);
10130            } else {
10131                write_vec_element_i32(s, tcg_res, rd, pass, size);
10132            }
10133        }
10134        tcg_temp_free_i32(tcg_res);
10135        tcg_temp_free_i32(tcg_op);
10136        if (!is_scalar) {
10137            clear_vec_high(s, is_q, rd);
10138        }
10139    }
10140
10141    tcg_temp_free_ptr(fpst);
10142}
10143
10144static void handle_2misc_reciprocal(DisasContext *s, int opcode,
10145                                    bool is_scalar, bool is_u, bool is_q,
10146                                    int size, int rn, int rd)
10147{
10148    bool is_double = (size == 3);
10149    TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10150
10151    if (is_double) {
10152        TCGv_i64 tcg_op = tcg_temp_new_i64();
10153        TCGv_i64 tcg_res = tcg_temp_new_i64();
10154        int pass;
10155
10156        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10157            read_vec_element(s, tcg_op, rn, pass, MO_64);
10158            switch (opcode) {
10159            case 0x3d: /* FRECPE */
10160                gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
10161                break;
10162            case 0x3f: /* FRECPX */
10163                gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
10164                break;
10165            case 0x7d: /* FRSQRTE */
10166                gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
10167                break;
10168            default:
10169                g_assert_not_reached();
10170            }
10171            write_vec_element(s, tcg_res, rd, pass, MO_64);
10172        }
10173        tcg_temp_free_i64(tcg_res);
10174        tcg_temp_free_i64(tcg_op);
10175        clear_vec_high(s, !is_scalar, rd);
10176    } else {
10177        TCGv_i32 tcg_op = tcg_temp_new_i32();
10178        TCGv_i32 tcg_res = tcg_temp_new_i32();
10179        int pass, maxpasses;
10180
10181        if (is_scalar) {
10182            maxpasses = 1;
10183        } else {
10184            maxpasses = is_q ? 4 : 2;
10185        }
10186
10187        for (pass = 0; pass < maxpasses; pass++) {
10188            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10189
10190            switch (opcode) {
10191            case 0x3c: /* URECPE */
10192                gen_helper_recpe_u32(tcg_res, tcg_op);
10193                break;
10194            case 0x3d: /* FRECPE */
10195                gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
10196                break;
10197            case 0x3f: /* FRECPX */
10198                gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
10199                break;
10200            case 0x7d: /* FRSQRTE */
10201                gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
10202                break;
10203            default:
10204                g_assert_not_reached();
10205            }
10206
10207            if (is_scalar) {
10208                write_fp_sreg(s, rd, tcg_res);
10209            } else {
10210                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10211            }
10212        }
10213        tcg_temp_free_i32(tcg_res);
10214        tcg_temp_free_i32(tcg_op);
10215        if (!is_scalar) {
10216            clear_vec_high(s, is_q, rd);
10217        }
10218    }
10219    tcg_temp_free_ptr(fpst);
10220}
10221
10222static void handle_2misc_narrow(DisasContext *s, bool scalar,
10223                                int opcode, bool u, bool is_q,
10224                                int size, int rn, int rd)
10225{
10226    /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
10227     * in the source becomes a size element in the destination).
10228     */
10229    int pass;
10230    TCGv_i32 tcg_res[2];
10231    int destelt = is_q ? 2 : 0;
10232    int passes = scalar ? 1 : 2;
10233
10234    if (scalar) {
10235        tcg_res[1] = tcg_constant_i32(0);
10236    }
10237
10238    for (pass = 0; pass < passes; pass++) {
10239        TCGv_i64 tcg_op = tcg_temp_new_i64();
10240        NeonGenNarrowFn *genfn = NULL;
10241        NeonGenNarrowEnvFn *genenvfn = NULL;
10242
10243        if (scalar) {
10244            read_vec_element(s, tcg_op, rn, pass, size + 1);
10245        } else {
10246            read_vec_element(s, tcg_op, rn, pass, MO_64);
10247        }
10248        tcg_res[pass] = tcg_temp_new_i32();
10249
10250        switch (opcode) {
10251        case 0x12: /* XTN, SQXTUN */
10252        {
10253            static NeonGenNarrowFn * const xtnfns[3] = {
10254                gen_helper_neon_narrow_u8,
10255                gen_helper_neon_narrow_u16,
10256                tcg_gen_extrl_i64_i32,
10257            };
10258            static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
10259                gen_helper_neon_unarrow_sat8,
10260                gen_helper_neon_unarrow_sat16,
10261                gen_helper_neon_unarrow_sat32,
10262            };
10263            if (u) {
10264                genenvfn = sqxtunfns[size];
10265            } else {
10266                genfn = xtnfns[size];
10267            }
10268            break;
10269        }
10270        case 0x14: /* SQXTN, UQXTN */
10271        {
10272            static NeonGenNarrowEnvFn * const fns[3][2] = {
10273                { gen_helper_neon_narrow_sat_s8,
10274                  gen_helper_neon_narrow_sat_u8 },
10275                { gen_helper_neon_narrow_sat_s16,
10276                  gen_helper_neon_narrow_sat_u16 },
10277                { gen_helper_neon_narrow_sat_s32,
10278                  gen_helper_neon_narrow_sat_u32 },
10279            };
10280            genenvfn = fns[size][u];
10281            break;
10282        }
10283        case 0x16: /* FCVTN, FCVTN2 */
10284            /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
10285            if (size == 2) {
10286                gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
10287            } else {
10288                TCGv_i32 tcg_lo = tcg_temp_new_i32();
10289                TCGv_i32 tcg_hi = tcg_temp_new_i32();
10290                TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10291                TCGv_i32 ahp = get_ahp_flag();
10292
10293                tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
10294                gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
10295                gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
10296                tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
10297                tcg_temp_free_i32(tcg_lo);
10298                tcg_temp_free_i32(tcg_hi);
10299                tcg_temp_free_ptr(fpst);
10300                tcg_temp_free_i32(ahp);
10301            }
10302            break;
10303        case 0x36: /* BFCVTN, BFCVTN2 */
10304            {
10305                TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10306                gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
10307                tcg_temp_free_ptr(fpst);
10308            }
10309            break;
10310        case 0x56:  /* FCVTXN, FCVTXN2 */
10311            /* 64 bit to 32 bit float conversion
10312             * with von Neumann rounding (round to odd)
10313             */
10314            assert(size == 2);
10315            gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
10316            break;
10317        default:
10318            g_assert_not_reached();
10319        }
10320
10321        if (genfn) {
10322            genfn(tcg_res[pass], tcg_op);
10323        } else if (genenvfn) {
10324            genenvfn(tcg_res[pass], cpu_env, tcg_op);
10325        }
10326
10327        tcg_temp_free_i64(tcg_op);
10328    }
10329
10330    for (pass = 0; pass < 2; pass++) {
10331        write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
10332        tcg_temp_free_i32(tcg_res[pass]);
10333    }
10334    clear_vec_high(s, is_q, rd);
10335}
10336
10337/* Remaining saturating accumulating ops */
10338static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
10339                                bool is_q, int size, int rn, int rd)
10340{
10341    bool is_double = (size == 3);
10342
10343    if (is_double) {
10344        TCGv_i64 tcg_rn = tcg_temp_new_i64();
10345        TCGv_i64 tcg_rd = tcg_temp_new_i64();
10346        int pass;
10347
10348        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10349            read_vec_element(s, tcg_rn, rn, pass, MO_64);
10350            read_vec_element(s, tcg_rd, rd, pass, MO_64);
10351
10352            if (is_u) { /* USQADD */
10353                gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10354            } else { /* SUQADD */
10355                gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10356            }
10357            write_vec_element(s, tcg_rd, rd, pass, MO_64);
10358        }
10359        tcg_temp_free_i64(tcg_rd);
10360        tcg_temp_free_i64(tcg_rn);
10361        clear_vec_high(s, !is_scalar, rd);
10362    } else {
10363        TCGv_i32 tcg_rn = tcg_temp_new_i32();
10364        TCGv_i32 tcg_rd = tcg_temp_new_i32();
10365        int pass, maxpasses;
10366
10367        if (is_scalar) {
10368            maxpasses = 1;
10369        } else {
10370            maxpasses = is_q ? 4 : 2;
10371        }
10372
10373        for (pass = 0; pass < maxpasses; pass++) {
10374            if (is_scalar) {
10375                read_vec_element_i32(s, tcg_rn, rn, pass, size);
10376                read_vec_element_i32(s, tcg_rd, rd, pass, size);
10377            } else {
10378                read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
10379                read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10380            }
10381
10382            if (is_u) { /* USQADD */
10383                switch (size) {
10384                case 0:
10385                    gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10386                    break;
10387                case 1:
10388                    gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10389                    break;
10390                case 2:
10391                    gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10392                    break;
10393                default:
10394                    g_assert_not_reached();
10395                }
10396            } else { /* SUQADD */
10397                switch (size) {
10398                case 0:
10399                    gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10400                    break;
10401                case 1:
10402                    gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10403                    break;
10404                case 2:
10405                    gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10406                    break;
10407                default:
10408                    g_assert_not_reached();
10409                }
10410            }
10411
10412            if (is_scalar) {
10413                write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
10414            }
10415            write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10416        }
10417        tcg_temp_free_i32(tcg_rd);
10418        tcg_temp_free_i32(tcg_rn);
10419        clear_vec_high(s, is_q, rd);
10420    }
10421}
10422
10423/* AdvSIMD scalar two reg misc
10424 *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
10425 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10426 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
10427 * +-----+---+-----------+------+-----------+--------+-----+------+------+
10428 */
10429static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
10430{
10431    int rd = extract32(insn, 0, 5);
10432    int rn = extract32(insn, 5, 5);
10433    int opcode = extract32(insn, 12, 5);
10434    int size = extract32(insn, 22, 2);
10435    bool u = extract32(insn, 29, 1);
10436    bool is_fcvt = false;
10437    int rmode;
10438    TCGv_i32 tcg_rmode;
10439    TCGv_ptr tcg_fpstatus;
10440
10441    switch (opcode) {
10442    case 0x3: /* USQADD / SUQADD*/
10443        if (!fp_access_check(s)) {
10444            return;
10445        }
10446        handle_2misc_satacc(s, true, u, false, size, rn, rd);
10447        return;
10448    case 0x7: /* SQABS / SQNEG */
10449        break;
10450    case 0xa: /* CMLT */
10451        if (u) {
10452            unallocated_encoding(s);
10453            return;
10454        }
10455        /* fall through */
10456    case 0x8: /* CMGT, CMGE */
10457    case 0x9: /* CMEQ, CMLE */
10458    case 0xb: /* ABS, NEG */
10459        if (size != 3) {
10460            unallocated_encoding(s);
10461            return;
10462        }
10463        break;
10464    case 0x12: /* SQXTUN */
10465        if (!u) {
10466            unallocated_encoding(s);
10467            return;
10468        }
10469        /* fall through */
10470    case 0x14: /* SQXTN, UQXTN */
10471        if (size == 3) {
10472            unallocated_encoding(s);
10473            return;
10474        }
10475        if (!fp_access_check(s)) {
10476            return;
10477        }
10478        handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10479        return;
10480    case 0xc ... 0xf:
10481    case 0x16 ... 0x1d:
10482    case 0x1f:
10483        /* Floating point: U, size[1] and opcode indicate operation;
10484         * size[0] indicates single or double precision.
10485         */
10486        opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10487        size = extract32(size, 0, 1) ? 3 : 2;
10488        switch (opcode) {
10489        case 0x2c: /* FCMGT (zero) */
10490        case 0x2d: /* FCMEQ (zero) */
10491        case 0x2e: /* FCMLT (zero) */
10492        case 0x6c: /* FCMGE (zero) */
10493        case 0x6d: /* FCMLE (zero) */
10494            handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10495            return;
10496        case 0x1d: /* SCVTF */
10497        case 0x5d: /* UCVTF */
10498        {
10499            bool is_signed = (opcode == 0x1d);
10500            if (!fp_access_check(s)) {
10501                return;
10502            }
10503            handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10504            return;
10505        }
10506        case 0x3d: /* FRECPE */
10507        case 0x3f: /* FRECPX */
10508        case 0x7d: /* FRSQRTE */
10509            if (!fp_access_check(s)) {
10510                return;
10511            }
10512            handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10513            return;
10514        case 0x1a: /* FCVTNS */
10515        case 0x1b: /* FCVTMS */
10516        case 0x3a: /* FCVTPS */
10517        case 0x3b: /* FCVTZS */
10518        case 0x5a: /* FCVTNU */
10519        case 0x5b: /* FCVTMU */
10520        case 0x7a: /* FCVTPU */
10521        case 0x7b: /* FCVTZU */
10522            is_fcvt = true;
10523            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10524            break;
10525        case 0x1c: /* FCVTAS */
10526        case 0x5c: /* FCVTAU */
10527            /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10528            is_fcvt = true;
10529            rmode = FPROUNDING_TIEAWAY;
10530            break;
10531        case 0x56: /* FCVTXN, FCVTXN2 */
10532            if (size == 2) {
10533                unallocated_encoding(s);
10534                return;
10535            }
10536            if (!fp_access_check(s)) {
10537                return;
10538            }
10539            handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10540            return;
10541        default:
10542            unallocated_encoding(s);
10543            return;
10544        }
10545        break;
10546    default:
10547        unallocated_encoding(s);
10548        return;
10549    }
10550
10551    if (!fp_access_check(s)) {
10552        return;
10553    }
10554
10555    if (is_fcvt) {
10556        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10557        tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10558        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10559    } else {
10560        tcg_rmode = NULL;
10561        tcg_fpstatus = NULL;
10562    }
10563
10564    if (size == 3) {
10565        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10566        TCGv_i64 tcg_rd = tcg_temp_new_i64();
10567
10568        handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10569        write_fp_dreg(s, rd, tcg_rd);
10570        tcg_temp_free_i64(tcg_rd);
10571        tcg_temp_free_i64(tcg_rn);
10572    } else {
10573        TCGv_i32 tcg_rn = tcg_temp_new_i32();
10574        TCGv_i32 tcg_rd = tcg_temp_new_i32();
10575
10576        read_vec_element_i32(s, tcg_rn, rn, 0, size);
10577
10578        switch (opcode) {
10579        case 0x7: /* SQABS, SQNEG */
10580        {
10581            NeonGenOneOpEnvFn *genfn;
10582            static NeonGenOneOpEnvFn * const fns[3][2] = {
10583                { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10584                { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10585                { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10586            };
10587            genfn = fns[size][u];
10588            genfn(tcg_rd, cpu_env, tcg_rn);
10589            break;
10590        }
10591        case 0x1a: /* FCVTNS */
10592        case 0x1b: /* FCVTMS */
10593        case 0x1c: /* FCVTAS */
10594        case 0x3a: /* FCVTPS */
10595        case 0x3b: /* FCVTZS */
10596            gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10597                                 tcg_fpstatus);
10598            break;
10599        case 0x5a: /* FCVTNU */
10600        case 0x5b: /* FCVTMU */
10601        case 0x5c: /* FCVTAU */
10602        case 0x7a: /* FCVTPU */
10603        case 0x7b: /* FCVTZU */
10604            gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10605                                 tcg_fpstatus);
10606            break;
10607        default:
10608            g_assert_not_reached();
10609        }
10610
10611        write_fp_sreg(s, rd, tcg_rd);
10612        tcg_temp_free_i32(tcg_rd);
10613        tcg_temp_free_i32(tcg_rn);
10614    }
10615
10616    if (is_fcvt) {
10617        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10618        tcg_temp_free_i32(tcg_rmode);
10619        tcg_temp_free_ptr(tcg_fpstatus);
10620    }
10621}
10622
10623/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10624static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10625                                 int immh, int immb, int opcode, int rn, int rd)
10626{
10627    int size = 32 - clz32(immh) - 1;
10628    int immhb = immh << 3 | immb;
10629    int shift = 2 * (8 << size) - immhb;
10630    GVecGen2iFn *gvec_fn;
10631
10632    if (extract32(immh, 3, 1) && !is_q) {
10633        unallocated_encoding(s);
10634        return;
10635    }
10636    tcg_debug_assert(size <= 3);
10637
10638    if (!fp_access_check(s)) {
10639        return;
10640    }
10641
10642    switch (opcode) {
10643    case 0x02: /* SSRA / USRA (accumulate) */
10644        gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10645        break;
10646
10647    case 0x08: /* SRI */
10648        gvec_fn = gen_gvec_sri;
10649        break;
10650
10651    case 0x00: /* SSHR / USHR */
10652        if (is_u) {
10653            if (shift == 8 << size) {
10654                /* Shift count the same size as element size produces zero.  */
10655                tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10656                                     is_q ? 16 : 8, vec_full_reg_size(s), 0);
10657                return;
10658            }
10659            gvec_fn = tcg_gen_gvec_shri;
10660        } else {
10661            /* Shift count the same size as element size produces all sign.  */
10662            if (shift == 8 << size) {
10663                shift -= 1;
10664            }
10665            gvec_fn = tcg_gen_gvec_sari;
10666        }
10667        break;
10668
10669    case 0x04: /* SRSHR / URSHR (rounding) */
10670        gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10671        break;
10672
10673    case 0x06: /* SRSRA / URSRA (accum + rounding) */
10674        gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10675        break;
10676
10677    default:
10678        g_assert_not_reached();
10679    }
10680
10681    gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10682}
10683
10684/* SHL/SLI - Vector shift left */
10685static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10686                                 int immh, int immb, int opcode, int rn, int rd)
10687{
10688    int size = 32 - clz32(immh) - 1;
10689    int immhb = immh << 3 | immb;
10690    int shift = immhb - (8 << size);
10691
10692    /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10693    assert(size >= 0 && size <= 3);
10694
10695    if (extract32(immh, 3, 1) && !is_q) {
10696        unallocated_encoding(s);
10697        return;
10698    }
10699
10700    if (!fp_access_check(s)) {
10701        return;
10702    }
10703
10704    if (insert) {
10705        gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10706    } else {
10707        gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10708    }
10709}
10710
10711/* USHLL/SHLL - Vector shift left with widening */
10712static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10713                                 int immh, int immb, int opcode, int rn, int rd)
10714{
10715    int size = 32 - clz32(immh) - 1;
10716    int immhb = immh << 3 | immb;
10717    int shift = immhb - (8 << size);
10718    int dsize = 64;
10719    int esize = 8 << size;
10720    int elements = dsize/esize;
10721    TCGv_i64 tcg_rn = new_tmp_a64(s);
10722    TCGv_i64 tcg_rd = new_tmp_a64(s);
10723    int i;
10724
10725    if (size >= 3) {
10726        unallocated_encoding(s);
10727        return;
10728    }
10729
10730    if (!fp_access_check(s)) {
10731        return;
10732    }
10733
10734    /* For the LL variants the store is larger than the load,
10735     * so if rd == rn we would overwrite parts of our input.
10736     * So load everything right now and use shifts in the main loop.
10737     */
10738    read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10739
10740    for (i = 0; i < elements; i++) {
10741        tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10742        ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10743        tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10744        write_vec_element(s, tcg_rd, rd, i, size + 1);
10745    }
10746}
10747
10748/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10749static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10750                                 int immh, int immb, int opcode, int rn, int rd)
10751{
10752    int immhb = immh << 3 | immb;
10753    int size = 32 - clz32(immh) - 1;
10754    int dsize = 64;
10755    int esize = 8 << size;
10756    int elements = dsize/esize;
10757    int shift = (2 * esize) - immhb;
10758    bool round = extract32(opcode, 0, 1);
10759    TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10760    TCGv_i64 tcg_round;
10761    int i;
10762
10763    if (extract32(immh, 3, 1)) {
10764        unallocated_encoding(s);
10765        return;
10766    }
10767
10768    if (!fp_access_check(s)) {
10769        return;
10770    }
10771
10772    tcg_rn = tcg_temp_new_i64();
10773    tcg_rd = tcg_temp_new_i64();
10774    tcg_final = tcg_temp_new_i64();
10775    read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10776
10777    if (round) {
10778        tcg_round = tcg_constant_i64(1ULL << (shift - 1));
10779    } else {
10780        tcg_round = NULL;
10781    }
10782
10783    for (i = 0; i < elements; i++) {
10784        read_vec_element(s, tcg_rn, rn, i, size+1);
10785        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10786                                false, true, size+1, shift);
10787
10788        tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10789    }
10790
10791    if (!is_q) {
10792        write_vec_element(s, tcg_final, rd, 0, MO_64);
10793    } else {
10794        write_vec_element(s, tcg_final, rd, 1, MO_64);
10795    }
10796    tcg_temp_free_i64(tcg_rn);
10797    tcg_temp_free_i64(tcg_rd);
10798    tcg_temp_free_i64(tcg_final);
10799
10800    clear_vec_high(s, is_q, rd);
10801}
10802
10803
10804/* AdvSIMD shift by immediate
10805 *  31  30   29 28         23 22  19 18  16 15    11  10 9    5 4    0
10806 * +---+---+---+-------------+------+------+--------+---+------+------+
10807 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
10808 * +---+---+---+-------------+------+------+--------+---+------+------+
10809 */
10810static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10811{
10812    int rd = extract32(insn, 0, 5);
10813    int rn = extract32(insn, 5, 5);
10814    int opcode = extract32(insn, 11, 5);
10815    int immb = extract32(insn, 16, 3);
10816    int immh = extract32(insn, 19, 4);
10817    bool is_u = extract32(insn, 29, 1);
10818    bool is_q = extract32(insn, 30, 1);
10819
10820    /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10821    assert(immh != 0);
10822
10823    switch (opcode) {
10824    case 0x08: /* SRI */
10825        if (!is_u) {
10826            unallocated_encoding(s);
10827            return;
10828        }
10829        /* fall through */
10830    case 0x00: /* SSHR / USHR */
10831    case 0x02: /* SSRA / USRA (accumulate) */
10832    case 0x04: /* SRSHR / URSHR (rounding) */
10833    case 0x06: /* SRSRA / URSRA (accum + rounding) */
10834        handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10835        break;
10836    case 0x0a: /* SHL / SLI */
10837        handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10838        break;
10839    case 0x10: /* SHRN */
10840    case 0x11: /* RSHRN / SQRSHRUN */
10841        if (is_u) {
10842            handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10843                                   opcode, rn, rd);
10844        } else {
10845            handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10846        }
10847        break;
10848    case 0x12: /* SQSHRN / UQSHRN */
10849    case 0x13: /* SQRSHRN / UQRSHRN */
10850        handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10851                               opcode, rn, rd);
10852        break;
10853    case 0x14: /* SSHLL / USHLL */
10854        handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10855        break;
10856    case 0x1c: /* SCVTF / UCVTF */
10857        handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10858                                     opcode, rn, rd);
10859        break;
10860    case 0xc: /* SQSHLU */
10861        if (!is_u) {
10862            unallocated_encoding(s);
10863            return;
10864        }
10865        handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10866        break;
10867    case 0xe: /* SQSHL, UQSHL */
10868        handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10869        break;
10870    case 0x1f: /* FCVTZS/ FCVTZU */
10871        handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10872        return;
10873    default:
10874        unallocated_encoding(s);
10875        return;
10876    }
10877}
10878
10879/* Generate code to do a "long" addition or subtraction, ie one done in
10880 * TCGv_i64 on vector lanes twice the width specified by size.
10881 */
10882static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10883                          TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10884{
10885    static NeonGenTwo64OpFn * const fns[3][2] = {
10886        { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10887        { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10888        { tcg_gen_add_i64, tcg_gen_sub_i64 },
10889    };
10890    NeonGenTwo64OpFn *genfn;
10891    assert(size < 3);
10892
10893    genfn = fns[size][is_sub];
10894    genfn(tcg_res, tcg_op1, tcg_op2);
10895}
10896
10897static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10898                                int opcode, int rd, int rn, int rm)
10899{
10900    /* 3-reg-different widening insns: 64 x 64 -> 128 */
10901    TCGv_i64 tcg_res[2];
10902    int pass, accop;
10903
10904    tcg_res[0] = tcg_temp_new_i64();
10905    tcg_res[1] = tcg_temp_new_i64();
10906
10907    /* Does this op do an adding accumulate, a subtracting accumulate,
10908     * or no accumulate at all?
10909     */
10910    switch (opcode) {
10911    case 5:
10912    case 8:
10913    case 9:
10914        accop = 1;
10915        break;
10916    case 10:
10917    case 11:
10918        accop = -1;
10919        break;
10920    default:
10921        accop = 0;
10922        break;
10923    }
10924
10925    if (accop != 0) {
10926        read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10927        read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10928    }
10929
10930    /* size == 2 means two 32x32->64 operations; this is worth special
10931     * casing because we can generally handle it inline.
10932     */
10933    if (size == 2) {
10934        for (pass = 0; pass < 2; pass++) {
10935            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10936            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10937            TCGv_i64 tcg_passres;
10938            MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10939
10940            int elt = pass + is_q * 2;
10941
10942            read_vec_element(s, tcg_op1, rn, elt, memop);
10943            read_vec_element(s, tcg_op2, rm, elt, memop);
10944
10945            if (accop == 0) {
10946                tcg_passres = tcg_res[pass];
10947            } else {
10948                tcg_passres = tcg_temp_new_i64();
10949            }
10950
10951            switch (opcode) {
10952            case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10953                tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10954                break;
10955            case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10956                tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10957                break;
10958            case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10959            case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10960            {
10961                TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10962                TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10963
10964                tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10965                tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10966                tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10967                                    tcg_passres,
10968                                    tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10969                tcg_temp_free_i64(tcg_tmp1);
10970                tcg_temp_free_i64(tcg_tmp2);
10971                break;
10972            }
10973            case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10974            case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10975            case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10976                tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10977                break;
10978            case 9: /* SQDMLAL, SQDMLAL2 */
10979            case 11: /* SQDMLSL, SQDMLSL2 */
10980            case 13: /* SQDMULL, SQDMULL2 */
10981                tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10982                gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10983                                                  tcg_passres, tcg_passres);
10984                break;
10985            default:
10986                g_assert_not_reached();
10987            }
10988
10989            if (opcode == 9 || opcode == 11) {
10990                /* saturating accumulate ops */
10991                if (accop < 0) {
10992                    tcg_gen_neg_i64(tcg_passres, tcg_passres);
10993                }
10994                gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10995                                                  tcg_res[pass], tcg_passres);
10996            } else if (accop > 0) {
10997                tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10998            } else if (accop < 0) {
10999                tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
11000            }
11001
11002            if (accop != 0) {
11003                tcg_temp_free_i64(tcg_passres);
11004            }
11005
11006            tcg_temp_free_i64(tcg_op1);
11007            tcg_temp_free_i64(tcg_op2);
11008        }
11009    } else {
11010        /* size 0 or 1, generally helper functions */
11011        for (pass = 0; pass < 2; pass++) {
11012            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11013            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11014            TCGv_i64 tcg_passres;
11015            int elt = pass + is_q * 2;
11016
11017            read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
11018            read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
11019
11020            if (accop == 0) {
11021                tcg_passres = tcg_res[pass];
11022            } else {
11023                tcg_passres = tcg_temp_new_i64();
11024            }
11025
11026            switch (opcode) {
11027            case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
11028            case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
11029            {
11030                TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
11031                static NeonGenWidenFn * const widenfns[2][2] = {
11032                    { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11033                    { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11034                };
11035                NeonGenWidenFn *widenfn = widenfns[size][is_u];
11036
11037                widenfn(tcg_op2_64, tcg_op2);
11038                widenfn(tcg_passres, tcg_op1);
11039                gen_neon_addl(size, (opcode == 2), tcg_passres,
11040                              tcg_passres, tcg_op2_64);
11041                tcg_temp_free_i64(tcg_op2_64);
11042                break;
11043            }
11044            case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
11045            case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
11046                if (size == 0) {
11047                    if (is_u) {
11048                        gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
11049                    } else {
11050                        gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
11051                    }
11052                } else {
11053                    if (is_u) {
11054                        gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
11055                    } else {
11056                        gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
11057                    }
11058                }
11059                break;
11060            case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
11061            case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
11062            case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
11063                if (size == 0) {
11064                    if (is_u) {
11065                        gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
11066                    } else {
11067                        gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
11068                    }
11069                } else {
11070                    if (is_u) {
11071                        gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
11072                    } else {
11073                        gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11074                    }
11075                }
11076                break;
11077            case 9: /* SQDMLAL, SQDMLAL2 */
11078            case 11: /* SQDMLSL, SQDMLSL2 */
11079            case 13: /* SQDMULL, SQDMULL2 */
11080                assert(size == 1);
11081                gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11082                gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
11083                                                  tcg_passres, tcg_passres);
11084                break;
11085            default:
11086                g_assert_not_reached();
11087            }
11088            tcg_temp_free_i32(tcg_op1);
11089            tcg_temp_free_i32(tcg_op2);
11090
11091            if (accop != 0) {
11092                if (opcode == 9 || opcode == 11) {
11093                    /* saturating accumulate ops */
11094                    if (accop < 0) {
11095                        gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
11096                    }
11097                    gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
11098                                                      tcg_res[pass],
11099                                                      tcg_passres);
11100                } else {
11101                    gen_neon_addl(size, (accop < 0), tcg_res[pass],
11102                                  tcg_res[pass], tcg_passres);
11103                }
11104                tcg_temp_free_i64(tcg_passres);
11105            }
11106        }
11107    }
11108
11109    write_vec_element(s, tcg_res[0], rd, 0, MO_64);
11110    write_vec_element(s, tcg_res[1], rd, 1, MO_64);
11111    tcg_temp_free_i64(tcg_res[0]);
11112    tcg_temp_free_i64(tcg_res[1]);
11113}
11114
11115static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
11116                            int opcode, int rd, int rn, int rm)
11117{
11118    TCGv_i64 tcg_res[2];
11119    int part = is_q ? 2 : 0;
11120    int pass;
11121
11122    for (pass = 0; pass < 2; pass++) {
11123        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11124        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11125        TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
11126        static NeonGenWidenFn * const widenfns[3][2] = {
11127            { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11128            { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11129            { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
11130        };
11131        NeonGenWidenFn *widenfn = widenfns[size][is_u];
11132
11133        read_vec_element(s, tcg_op1, rn, pass, MO_64);
11134        read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
11135        widenfn(tcg_op2_wide, tcg_op2);
11136        tcg_temp_free_i32(tcg_op2);
11137        tcg_res[pass] = tcg_temp_new_i64();
11138        gen_neon_addl(size, (opcode == 3),
11139                      tcg_res[pass], tcg_op1, tcg_op2_wide);
11140        tcg_temp_free_i64(tcg_op1);
11141        tcg_temp_free_i64(tcg_op2_wide);
11142    }
11143
11144    for (pass = 0; pass < 2; pass++) {
11145        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11146        tcg_temp_free_i64(tcg_res[pass]);
11147    }
11148}
11149
11150static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
11151{
11152    tcg_gen_addi_i64(in, in, 1U << 31);
11153    tcg_gen_extrh_i64_i32(res, in);
11154}
11155
11156static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
11157                                 int opcode, int rd, int rn, int rm)
11158{
11159    TCGv_i32 tcg_res[2];
11160    int part = is_q ? 2 : 0;
11161    int pass;
11162
11163    for (pass = 0; pass < 2; pass++) {
11164        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11165        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11166        TCGv_i64 tcg_wideres = tcg_temp_new_i64();
11167        static NeonGenNarrowFn * const narrowfns[3][2] = {
11168            { gen_helper_neon_narrow_high_u8,
11169              gen_helper_neon_narrow_round_high_u8 },
11170            { gen_helper_neon_narrow_high_u16,
11171              gen_helper_neon_narrow_round_high_u16 },
11172            { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
11173        };
11174        NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
11175
11176        read_vec_element(s, tcg_op1, rn, pass, MO_64);
11177        read_vec_element(s, tcg_op2, rm, pass, MO_64);
11178
11179        gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
11180
11181        tcg_temp_free_i64(tcg_op1);
11182        tcg_temp_free_i64(tcg_op2);
11183
11184        tcg_res[pass] = tcg_temp_new_i32();
11185        gennarrow(tcg_res[pass], tcg_wideres);
11186        tcg_temp_free_i64(tcg_wideres);
11187    }
11188
11189    for (pass = 0; pass < 2; pass++) {
11190        write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
11191        tcg_temp_free_i32(tcg_res[pass]);
11192    }
11193    clear_vec_high(s, is_q, rd);
11194}
11195
11196/* AdvSIMD three different
11197 *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
11198 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
11199 * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
11200 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
11201 */
11202static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
11203{
11204    /* Instructions in this group fall into three basic classes
11205     * (in each case with the operation working on each element in
11206     * the input vectors):
11207     * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
11208     *     128 bit input)
11209     * (2) wide 64 x 128 -> 128
11210     * (3) narrowing 128 x 128 -> 64
11211     * Here we do initial decode, catch unallocated cases and
11212     * dispatch to separate functions for each class.
11213     */
11214    int is_q = extract32(insn, 30, 1);
11215    int is_u = extract32(insn, 29, 1);
11216    int size = extract32(insn, 22, 2);
11217    int opcode = extract32(insn, 12, 4);
11218    int rm = extract32(insn, 16, 5);
11219    int rn = extract32(insn, 5, 5);
11220    int rd = extract32(insn, 0, 5);
11221
11222    switch (opcode) {
11223    case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
11224    case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
11225        /* 64 x 128 -> 128 */
11226        if (size == 3) {
11227            unallocated_encoding(s);
11228            return;
11229        }
11230        if (!fp_access_check(s)) {
11231            return;
11232        }
11233        handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
11234        break;
11235    case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
11236    case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
11237        /* 128 x 128 -> 64 */
11238        if (size == 3) {
11239            unallocated_encoding(s);
11240            return;
11241        }
11242        if (!fp_access_check(s)) {
11243            return;
11244        }
11245        handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
11246        break;
11247    case 14: /* PMULL, PMULL2 */
11248        if (is_u) {
11249            unallocated_encoding(s);
11250            return;
11251        }
11252        switch (size) {
11253        case 0: /* PMULL.P8 */
11254            if (!fp_access_check(s)) {
11255                return;
11256            }
11257            /* The Q field specifies lo/hi half input for this insn.  */
11258            gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11259                             gen_helper_neon_pmull_h);
11260            break;
11261
11262        case 3: /* PMULL.P64 */
11263            if (!dc_isar_feature(aa64_pmull, s)) {
11264                unallocated_encoding(s);
11265                return;
11266            }
11267            if (!fp_access_check(s)) {
11268                return;
11269            }
11270            /* The Q field specifies lo/hi half input for this insn.  */
11271            gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11272                             gen_helper_gvec_pmull_q);
11273            break;
11274
11275        default:
11276            unallocated_encoding(s);
11277            break;
11278        }
11279        return;
11280    case 9: /* SQDMLAL, SQDMLAL2 */
11281    case 11: /* SQDMLSL, SQDMLSL2 */
11282    case 13: /* SQDMULL, SQDMULL2 */
11283        if (is_u || size == 0) {
11284            unallocated_encoding(s);
11285            return;
11286        }
11287        /* fall through */
11288    case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
11289    case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
11290    case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
11291    case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
11292    case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
11293    case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
11294    case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
11295        /* 64 x 64 -> 128 */
11296        if (size == 3) {
11297            unallocated_encoding(s);
11298            return;
11299        }
11300        if (!fp_access_check(s)) {
11301            return;
11302        }
11303
11304        handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
11305        break;
11306    default:
11307        /* opcode 15 not allocated */
11308        unallocated_encoding(s);
11309        break;
11310    }
11311}
11312
11313/* Logic op (opcode == 3) subgroup of C3.6.16. */
11314static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
11315{
11316    int rd = extract32(insn, 0, 5);
11317    int rn = extract32(insn, 5, 5);
11318    int rm = extract32(insn, 16, 5);
11319    int size = extract32(insn, 22, 2);
11320    bool is_u = extract32(insn, 29, 1);
11321    bool is_q = extract32(insn, 30, 1);
11322
11323    if (!fp_access_check(s)) {
11324        return;
11325    }
11326
11327    switch (size + 4 * is_u) {
11328    case 0: /* AND */
11329        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
11330        return;
11331    case 1: /* BIC */
11332        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
11333        return;
11334    case 2: /* ORR */
11335        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
11336        return;
11337    case 3: /* ORN */
11338        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
11339        return;
11340    case 4: /* EOR */
11341        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
11342        return;
11343
11344    case 5: /* BSL bitwise select */
11345        gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
11346        return;
11347    case 6: /* BIT, bitwise insert if true */
11348        gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
11349        return;
11350    case 7: /* BIF, bitwise insert if false */
11351        gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
11352        return;
11353
11354    default:
11355        g_assert_not_reached();
11356    }
11357}
11358
11359/* Pairwise op subgroup of C3.6.16.
11360 *
11361 * This is called directly or via the handle_3same_float for float pairwise
11362 * operations where the opcode and size are calculated differently.
11363 */
11364static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
11365                                   int size, int rn, int rm, int rd)
11366{
11367    TCGv_ptr fpst;
11368    int pass;
11369
11370    /* Floating point operations need fpst */
11371    if (opcode >= 0x58) {
11372        fpst = fpstatus_ptr(FPST_FPCR);
11373    } else {
11374        fpst = NULL;
11375    }
11376
11377    if (!fp_access_check(s)) {
11378        return;
11379    }
11380
11381    /* These operations work on the concatenated rm:rn, with each pair of
11382     * adjacent elements being operated on to produce an element in the result.
11383     */
11384    if (size == 3) {
11385        TCGv_i64 tcg_res[2];
11386
11387        for (pass = 0; pass < 2; pass++) {
11388            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11389            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11390            int passreg = (pass == 0) ? rn : rm;
11391
11392            read_vec_element(s, tcg_op1, passreg, 0, MO_64);
11393            read_vec_element(s, tcg_op2, passreg, 1, MO_64);
11394            tcg_res[pass] = tcg_temp_new_i64();
11395
11396            switch (opcode) {
11397            case 0x17: /* ADDP */
11398                tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11399                break;
11400            case 0x58: /* FMAXNMP */
11401                gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11402                break;
11403            case 0x5a: /* FADDP */
11404                gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11405                break;
11406            case 0x5e: /* FMAXP */
11407                gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11408                break;
11409            case 0x78: /* FMINNMP */
11410                gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11411                break;
11412            case 0x7e: /* FMINP */
11413                gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11414                break;
11415            default:
11416                g_assert_not_reached();
11417            }
11418
11419            tcg_temp_free_i64(tcg_op1);
11420            tcg_temp_free_i64(tcg_op2);
11421        }
11422
11423        for (pass = 0; pass < 2; pass++) {
11424            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11425            tcg_temp_free_i64(tcg_res[pass]);
11426        }
11427    } else {
11428        int maxpass = is_q ? 4 : 2;
11429        TCGv_i32 tcg_res[4];
11430
11431        for (pass = 0; pass < maxpass; pass++) {
11432            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11433            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11434            NeonGenTwoOpFn *genfn = NULL;
11435            int passreg = pass < (maxpass / 2) ? rn : rm;
11436            int passelt = (is_q && (pass & 1)) ? 2 : 0;
11437
11438            read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
11439            read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
11440            tcg_res[pass] = tcg_temp_new_i32();
11441
11442            switch (opcode) {
11443            case 0x17: /* ADDP */
11444            {
11445                static NeonGenTwoOpFn * const fns[3] = {
11446                    gen_helper_neon_padd_u8,
11447                    gen_helper_neon_padd_u16,
11448                    tcg_gen_add_i32,
11449                };
11450                genfn = fns[size];
11451                break;
11452            }
11453            case 0x14: /* SMAXP, UMAXP */
11454            {
11455                static NeonGenTwoOpFn * const fns[3][2] = {
11456                    { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
11457                    { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
11458                    { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11459                };
11460                genfn = fns[size][u];
11461                break;
11462            }
11463            case 0x15: /* SMINP, UMINP */
11464            {
11465                static NeonGenTwoOpFn * const fns[3][2] = {
11466                    { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11467                    { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11468                    { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11469                };
11470                genfn = fns[size][u];
11471                break;
11472            }
11473            /* The FP operations are all on single floats (32 bit) */
11474            case 0x58: /* FMAXNMP */
11475                gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11476                break;
11477            case 0x5a: /* FADDP */
11478                gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11479                break;
11480            case 0x5e: /* FMAXP */
11481                gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11482                break;
11483            case 0x78: /* FMINNMP */
11484                gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11485                break;
11486            case 0x7e: /* FMINP */
11487                gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11488                break;
11489            default:
11490                g_assert_not_reached();
11491            }
11492
11493            /* FP ops called directly, otherwise call now */
11494            if (genfn) {
11495                genfn(tcg_res[pass], tcg_op1, tcg_op2);
11496            }
11497
11498            tcg_temp_free_i32(tcg_op1);
11499            tcg_temp_free_i32(tcg_op2);
11500        }
11501
11502        for (pass = 0; pass < maxpass; pass++) {
11503            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11504            tcg_temp_free_i32(tcg_res[pass]);
11505        }
11506        clear_vec_high(s, is_q, rd);
11507    }
11508
11509    if (fpst) {
11510        tcg_temp_free_ptr(fpst);
11511    }
11512}
11513
11514/* Floating point op subgroup of C3.6.16. */
11515static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11516{
11517    /* For floating point ops, the U, size[1] and opcode bits
11518     * together indicate the operation. size[0] indicates single
11519     * or double.
11520     */
11521    int fpopcode = extract32(insn, 11, 5)
11522        | (extract32(insn, 23, 1) << 5)
11523        | (extract32(insn, 29, 1) << 6);
11524    int is_q = extract32(insn, 30, 1);
11525    int size = extract32(insn, 22, 1);
11526    int rm = extract32(insn, 16, 5);
11527    int rn = extract32(insn, 5, 5);
11528    int rd = extract32(insn, 0, 5);
11529
11530    int datasize = is_q ? 128 : 64;
11531    int esize = 32 << size;
11532    int elements = datasize / esize;
11533
11534    if (size == 1 && !is_q) {
11535        unallocated_encoding(s);
11536        return;
11537    }
11538
11539    switch (fpopcode) {
11540    case 0x58: /* FMAXNMP */
11541    case 0x5a: /* FADDP */
11542    case 0x5e: /* FMAXP */
11543    case 0x78: /* FMINNMP */
11544    case 0x7e: /* FMINP */
11545        if (size && !is_q) {
11546            unallocated_encoding(s);
11547            return;
11548        }
11549        handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11550                               rn, rm, rd);
11551        return;
11552    case 0x1b: /* FMULX */
11553    case 0x1f: /* FRECPS */
11554    case 0x3f: /* FRSQRTS */
11555    case 0x5d: /* FACGE */
11556    case 0x7d: /* FACGT */
11557    case 0x19: /* FMLA */
11558    case 0x39: /* FMLS */
11559    case 0x18: /* FMAXNM */
11560    case 0x1a: /* FADD */
11561    case 0x1c: /* FCMEQ */
11562    case 0x1e: /* FMAX */
11563    case 0x38: /* FMINNM */
11564    case 0x3a: /* FSUB */
11565    case 0x3e: /* FMIN */
11566    case 0x5b: /* FMUL */
11567    case 0x5c: /* FCMGE */
11568    case 0x5f: /* FDIV */
11569    case 0x7a: /* FABD */
11570    case 0x7c: /* FCMGT */
11571        if (!fp_access_check(s)) {
11572            return;
11573        }
11574        handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11575        return;
11576
11577    case 0x1d: /* FMLAL  */
11578    case 0x3d: /* FMLSL  */
11579    case 0x59: /* FMLAL2 */
11580    case 0x79: /* FMLSL2 */
11581        if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11582            unallocated_encoding(s);
11583            return;
11584        }
11585        if (fp_access_check(s)) {
11586            int is_s = extract32(insn, 23, 1);
11587            int is_2 = extract32(insn, 29, 1);
11588            int data = (is_2 << 1) | is_s;
11589            tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11590                               vec_full_reg_offset(s, rn),
11591                               vec_full_reg_offset(s, rm), cpu_env,
11592                               is_q ? 16 : 8, vec_full_reg_size(s),
11593                               data, gen_helper_gvec_fmlal_a64);
11594        }
11595        return;
11596
11597    default:
11598        unallocated_encoding(s);
11599        return;
11600    }
11601}
11602
11603/* Integer op subgroup of C3.6.16. */
11604static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11605{
11606    int is_q = extract32(insn, 30, 1);
11607    int u = extract32(insn, 29, 1);
11608    int size = extract32(insn, 22, 2);
11609    int opcode = extract32(insn, 11, 5);
11610    int rm = extract32(insn, 16, 5);
11611    int rn = extract32(insn, 5, 5);
11612    int rd = extract32(insn, 0, 5);
11613    int pass;
11614    TCGCond cond;
11615
11616    switch (opcode) {
11617    case 0x13: /* MUL, PMUL */
11618        if (u && size != 0) {
11619            unallocated_encoding(s);
11620            return;
11621        }
11622        /* fall through */
11623    case 0x0: /* SHADD, UHADD */
11624    case 0x2: /* SRHADD, URHADD */
11625    case 0x4: /* SHSUB, UHSUB */
11626    case 0xc: /* SMAX, UMAX */
11627    case 0xd: /* SMIN, UMIN */
11628    case 0xe: /* SABD, UABD */
11629    case 0xf: /* SABA, UABA */
11630    case 0x12: /* MLA, MLS */
11631        if (size == 3) {
11632            unallocated_encoding(s);
11633            return;
11634        }
11635        break;
11636    case 0x16: /* SQDMULH, SQRDMULH */
11637        if (size == 0 || size == 3) {
11638            unallocated_encoding(s);
11639            return;
11640        }
11641        break;
11642    default:
11643        if (size == 3 && !is_q) {
11644            unallocated_encoding(s);
11645            return;
11646        }
11647        break;
11648    }
11649
11650    if (!fp_access_check(s)) {
11651        return;
11652    }
11653
11654    switch (opcode) {
11655    case 0x01: /* SQADD, UQADD */
11656        if (u) {
11657            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11658        } else {
11659            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11660        }
11661        return;
11662    case 0x05: /* SQSUB, UQSUB */
11663        if (u) {
11664            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11665        } else {
11666            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11667        }
11668        return;
11669    case 0x08: /* SSHL, USHL */
11670        if (u) {
11671            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11672        } else {
11673            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11674        }
11675        return;
11676    case 0x0c: /* SMAX, UMAX */
11677        if (u) {
11678            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11679        } else {
11680            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11681        }
11682        return;
11683    case 0x0d: /* SMIN, UMIN */
11684        if (u) {
11685            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11686        } else {
11687            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11688        }
11689        return;
11690    case 0xe: /* SABD, UABD */
11691        if (u) {
11692            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11693        } else {
11694            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11695        }
11696        return;
11697    case 0xf: /* SABA, UABA */
11698        if (u) {
11699            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11700        } else {
11701            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11702        }
11703        return;
11704    case 0x10: /* ADD, SUB */
11705        if (u) {
11706            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11707        } else {
11708            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11709        }
11710        return;
11711    case 0x13: /* MUL, PMUL */
11712        if (!u) { /* MUL */
11713            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11714        } else {  /* PMUL */
11715            gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11716        }
11717        return;
11718    case 0x12: /* MLA, MLS */
11719        if (u) {
11720            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11721        } else {
11722            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11723        }
11724        return;
11725    case 0x16: /* SQDMULH, SQRDMULH */
11726        {
11727            static gen_helper_gvec_3_ptr * const fns[2][2] = {
11728                { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11729                { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11730            };
11731            gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11732        }
11733        return;
11734    case 0x11:
11735        if (!u) { /* CMTST */
11736            gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11737            return;
11738        }
11739        /* else CMEQ */
11740        cond = TCG_COND_EQ;
11741        goto do_gvec_cmp;
11742    case 0x06: /* CMGT, CMHI */
11743        cond = u ? TCG_COND_GTU : TCG_COND_GT;
11744        goto do_gvec_cmp;
11745    case 0x07: /* CMGE, CMHS */
11746        cond = u ? TCG_COND_GEU : TCG_COND_GE;
11747    do_gvec_cmp:
11748        tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11749                         vec_full_reg_offset(s, rn),
11750                         vec_full_reg_offset(s, rm),
11751                         is_q ? 16 : 8, vec_full_reg_size(s));
11752        return;
11753    }
11754
11755    if (size == 3) {
11756        assert(is_q);
11757        for (pass = 0; pass < 2; pass++) {
11758            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11759            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11760            TCGv_i64 tcg_res = tcg_temp_new_i64();
11761
11762            read_vec_element(s, tcg_op1, rn, pass, MO_64);
11763            read_vec_element(s, tcg_op2, rm, pass, MO_64);
11764
11765            handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11766
11767            write_vec_element(s, tcg_res, rd, pass, MO_64);
11768
11769            tcg_temp_free_i64(tcg_res);
11770            tcg_temp_free_i64(tcg_op1);
11771            tcg_temp_free_i64(tcg_op2);
11772        }
11773    } else {
11774        for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11775            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11776            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11777            TCGv_i32 tcg_res = tcg_temp_new_i32();
11778            NeonGenTwoOpFn *genfn = NULL;
11779            NeonGenTwoOpEnvFn *genenvfn = NULL;
11780
11781            read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11782            read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11783
11784            switch (opcode) {
11785            case 0x0: /* SHADD, UHADD */
11786            {
11787                static NeonGenTwoOpFn * const fns[3][2] = {
11788                    { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11789                    { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11790                    { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11791                };
11792                genfn = fns[size][u];
11793                break;
11794            }
11795            case 0x2: /* SRHADD, URHADD */
11796            {
11797                static NeonGenTwoOpFn * const fns[3][2] = {
11798                    { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11799                    { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11800                    { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11801                };
11802                genfn = fns[size][u];
11803                break;
11804            }
11805            case 0x4: /* SHSUB, UHSUB */
11806            {
11807                static NeonGenTwoOpFn * const fns[3][2] = {
11808                    { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11809                    { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11810                    { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11811                };
11812                genfn = fns[size][u];
11813                break;
11814            }
11815            case 0x9: /* SQSHL, UQSHL */
11816            {
11817                static NeonGenTwoOpEnvFn * const fns[3][2] = {
11818                    { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11819                    { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11820                    { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11821                };
11822                genenvfn = fns[size][u];
11823                break;
11824            }
11825            case 0xa: /* SRSHL, URSHL */
11826            {
11827                static NeonGenTwoOpFn * const fns[3][2] = {
11828                    { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11829                    { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11830                    { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11831                };
11832                genfn = fns[size][u];
11833                break;
11834            }
11835            case 0xb: /* SQRSHL, UQRSHL */
11836            {
11837                static NeonGenTwoOpEnvFn * const fns[3][2] = {
11838                    { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11839                    { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11840                    { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11841                };
11842                genenvfn = fns[size][u];
11843                break;
11844            }
11845            default:
11846                g_assert_not_reached();
11847            }
11848
11849            if (genenvfn) {
11850                genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11851            } else {
11852                genfn(tcg_res, tcg_op1, tcg_op2);
11853            }
11854
11855            write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11856
11857            tcg_temp_free_i32(tcg_res);
11858            tcg_temp_free_i32(tcg_op1);
11859            tcg_temp_free_i32(tcg_op2);
11860        }
11861    }
11862    clear_vec_high(s, is_q, rd);
11863}
11864
11865/* AdvSIMD three same
11866 *  31  30  29  28       24 23  22  21 20  16 15    11  10 9    5 4    0
11867 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11868 * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
11869 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11870 */
11871static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11872{
11873    int opcode = extract32(insn, 11, 5);
11874
11875    switch (opcode) {
11876    case 0x3: /* logic ops */
11877        disas_simd_3same_logic(s, insn);
11878        break;
11879    case 0x17: /* ADDP */
11880    case 0x14: /* SMAXP, UMAXP */
11881    case 0x15: /* SMINP, UMINP */
11882    {
11883        /* Pairwise operations */
11884        int is_q = extract32(insn, 30, 1);
11885        int u = extract32(insn, 29, 1);
11886        int size = extract32(insn, 22, 2);
11887        int rm = extract32(insn, 16, 5);
11888        int rn = extract32(insn, 5, 5);
11889        int rd = extract32(insn, 0, 5);
11890        if (opcode == 0x17) {
11891            if (u || (size == 3 && !is_q)) {
11892                unallocated_encoding(s);
11893                return;
11894            }
11895        } else {
11896            if (size == 3) {
11897                unallocated_encoding(s);
11898                return;
11899            }
11900        }
11901        handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11902        break;
11903    }
11904    case 0x18 ... 0x31:
11905        /* floating point ops, sz[1] and U are part of opcode */
11906        disas_simd_3same_float(s, insn);
11907        break;
11908    default:
11909        disas_simd_3same_int(s, insn);
11910        break;
11911    }
11912}
11913
11914/*
11915 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11916 *
11917 *  31  30  29  28       24 23  22 21 20  16 15 14 13    11 10  9    5 4    0
11918 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11919 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 |  Rn  |  Rd  |
11920 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11921 *
11922 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11923 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11924 *
11925 */
11926static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11927{
11928    int opcode = extract32(insn, 11, 3);
11929    int u = extract32(insn, 29, 1);
11930    int a = extract32(insn, 23, 1);
11931    int is_q = extract32(insn, 30, 1);
11932    int rm = extract32(insn, 16, 5);
11933    int rn = extract32(insn, 5, 5);
11934    int rd = extract32(insn, 0, 5);
11935    /*
11936     * For these floating point ops, the U, a and opcode bits
11937     * together indicate the operation.
11938     */
11939    int fpopcode = opcode | (a << 3) | (u << 4);
11940    int datasize = is_q ? 128 : 64;
11941    int elements = datasize / 16;
11942    bool pairwise;
11943    TCGv_ptr fpst;
11944    int pass;
11945
11946    switch (fpopcode) {
11947    case 0x0: /* FMAXNM */
11948    case 0x1: /* FMLA */
11949    case 0x2: /* FADD */
11950    case 0x3: /* FMULX */
11951    case 0x4: /* FCMEQ */
11952    case 0x6: /* FMAX */
11953    case 0x7: /* FRECPS */
11954    case 0x8: /* FMINNM */
11955    case 0x9: /* FMLS */
11956    case 0xa: /* FSUB */
11957    case 0xe: /* FMIN */
11958    case 0xf: /* FRSQRTS */
11959    case 0x13: /* FMUL */
11960    case 0x14: /* FCMGE */
11961    case 0x15: /* FACGE */
11962    case 0x17: /* FDIV */
11963    case 0x1a: /* FABD */
11964    case 0x1c: /* FCMGT */
11965    case 0x1d: /* FACGT */
11966        pairwise = false;
11967        break;
11968    case 0x10: /* FMAXNMP */
11969    case 0x12: /* FADDP */
11970    case 0x16: /* FMAXP */
11971    case 0x18: /* FMINNMP */
11972    case 0x1e: /* FMINP */
11973        pairwise = true;
11974        break;
11975    default:
11976        unallocated_encoding(s);
11977        return;
11978    }
11979
11980    if (!dc_isar_feature(aa64_fp16, s)) {
11981        unallocated_encoding(s);
11982        return;
11983    }
11984
11985    if (!fp_access_check(s)) {
11986        return;
11987    }
11988
11989    fpst = fpstatus_ptr(FPST_FPCR_F16);
11990
11991    if (pairwise) {
11992        int maxpass = is_q ? 8 : 4;
11993        TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11994        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11995        TCGv_i32 tcg_res[8];
11996
11997        for (pass = 0; pass < maxpass; pass++) {
11998            int passreg = pass < (maxpass / 2) ? rn : rm;
11999            int passelt = (pass << 1) & (maxpass - 1);
12000
12001            read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
12002            read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
12003            tcg_res[pass] = tcg_temp_new_i32();
12004
12005            switch (fpopcode) {
12006            case 0x10: /* FMAXNMP */
12007                gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
12008                                           fpst);
12009                break;
12010            case 0x12: /* FADDP */
12011                gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12012                break;
12013            case 0x16: /* FMAXP */
12014                gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12015                break;
12016            case 0x18: /* FMINNMP */
12017                gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
12018                                           fpst);
12019                break;
12020            case 0x1e: /* FMINP */
12021                gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12022                break;
12023            default:
12024                g_assert_not_reached();
12025            }
12026        }
12027
12028        for (pass = 0; pass < maxpass; pass++) {
12029            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
12030            tcg_temp_free_i32(tcg_res[pass]);
12031        }
12032
12033        tcg_temp_free_i32(tcg_op1);
12034        tcg_temp_free_i32(tcg_op2);
12035
12036    } else {
12037        for (pass = 0; pass < elements; pass++) {
12038            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
12039            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
12040            TCGv_i32 tcg_res = tcg_temp_new_i32();
12041
12042            read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
12043            read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
12044
12045            switch (fpopcode) {
12046            case 0x0: /* FMAXNM */
12047                gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12048                break;
12049            case 0x1: /* FMLA */
12050                read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12051                gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12052                                           fpst);
12053                break;
12054            case 0x2: /* FADD */
12055                gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
12056                break;
12057            case 0x3: /* FMULX */
12058                gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
12059                break;
12060            case 0x4: /* FCMEQ */
12061                gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12062                break;
12063            case 0x6: /* FMAX */
12064                gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
12065                break;
12066            case 0x7: /* FRECPS */
12067                gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12068                break;
12069            case 0x8: /* FMINNM */
12070                gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12071                break;
12072            case 0x9: /* FMLS */
12073                /* As usual for ARM, separate negation for fused multiply-add */
12074                tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
12075                read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12076                gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12077                                           fpst);
12078                break;
12079            case 0xa: /* FSUB */
12080                gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12081                break;
12082            case 0xe: /* FMIN */
12083                gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
12084                break;
12085            case 0xf: /* FRSQRTS */
12086                gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12087                break;
12088            case 0x13: /* FMUL */
12089                gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
12090                break;
12091            case 0x14: /* FCMGE */
12092                gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12093                break;
12094            case 0x15: /* FACGE */
12095                gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12096                break;
12097            case 0x17: /* FDIV */
12098                gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
12099                break;
12100            case 0x1a: /* FABD */
12101                gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12102                tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
12103                break;
12104            case 0x1c: /* FCMGT */
12105                gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12106                break;
12107            case 0x1d: /* FACGT */
12108                gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12109                break;
12110            default:
12111                g_assert_not_reached();
12112            }
12113
12114            write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12115            tcg_temp_free_i32(tcg_res);
12116            tcg_temp_free_i32(tcg_op1);
12117            tcg_temp_free_i32(tcg_op2);
12118        }
12119    }
12120
12121    tcg_temp_free_ptr(fpst);
12122
12123    clear_vec_high(s, is_q, rd);
12124}
12125
12126/* AdvSIMD three same extra
12127 *  31   30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
12128 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
12129 * | 0 | Q | U | 0 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
12130 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
12131 */
12132static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
12133{
12134    int rd = extract32(insn, 0, 5);
12135    int rn = extract32(insn, 5, 5);
12136    int opcode = extract32(insn, 11, 4);
12137    int rm = extract32(insn, 16, 5);
12138    int size = extract32(insn, 22, 2);
12139    bool u = extract32(insn, 29, 1);
12140    bool is_q = extract32(insn, 30, 1);
12141    bool feature;
12142    int rot;
12143
12144    switch (u * 16 + opcode) {
12145    case 0x10: /* SQRDMLAH (vector) */
12146    case 0x11: /* SQRDMLSH (vector) */
12147        if (size != 1 && size != 2) {
12148            unallocated_encoding(s);
12149            return;
12150        }
12151        feature = dc_isar_feature(aa64_rdm, s);
12152        break;
12153    case 0x02: /* SDOT (vector) */
12154    case 0x12: /* UDOT (vector) */
12155        if (size != MO_32) {
12156            unallocated_encoding(s);
12157            return;
12158        }
12159        feature = dc_isar_feature(aa64_dp, s);
12160        break;
12161    case 0x03: /* USDOT */
12162        if (size != MO_32) {
12163            unallocated_encoding(s);
12164            return;
12165        }
12166        feature = dc_isar_feature(aa64_i8mm, s);
12167        break;
12168    case 0x04: /* SMMLA */
12169    case 0x14: /* UMMLA */
12170    case 0x05: /* USMMLA */
12171        if (!is_q || size != MO_32) {
12172            unallocated_encoding(s);
12173            return;
12174        }
12175        feature = dc_isar_feature(aa64_i8mm, s);
12176        break;
12177    case 0x18: /* FCMLA, #0 */
12178    case 0x19: /* FCMLA, #90 */
12179    case 0x1a: /* FCMLA, #180 */
12180    case 0x1b: /* FCMLA, #270 */
12181    case 0x1c: /* FCADD, #90 */
12182    case 0x1e: /* FCADD, #270 */
12183        if (size == 0
12184            || (size == 1 && !dc_isar_feature(aa64_fp16, s))
12185            || (size == 3 && !is_q)) {
12186            unallocated_encoding(s);
12187            return;
12188        }
12189        feature = dc_isar_feature(aa64_fcma, s);
12190        break;
12191    case 0x1d: /* BFMMLA */
12192        if (size != MO_16 || !is_q) {
12193            unallocated_encoding(s);
12194            return;
12195        }
12196        feature = dc_isar_feature(aa64_bf16, s);
12197        break;
12198    case 0x1f:
12199        switch (size) {
12200        case 1: /* BFDOT */
12201        case 3: /* BFMLAL{B,T} */
12202            feature = dc_isar_feature(aa64_bf16, s);
12203            break;
12204        default:
12205            unallocated_encoding(s);
12206            return;
12207        }
12208        break;
12209    default:
12210        unallocated_encoding(s);
12211        return;
12212    }
12213    if (!feature) {
12214        unallocated_encoding(s);
12215        return;
12216    }
12217    if (!fp_access_check(s)) {
12218        return;
12219    }
12220
12221    switch (opcode) {
12222    case 0x0: /* SQRDMLAH (vector) */
12223        gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
12224        return;
12225
12226    case 0x1: /* SQRDMLSH (vector) */
12227        gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
12228        return;
12229
12230    case 0x2: /* SDOT / UDOT */
12231        gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
12232                         u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
12233        return;
12234
12235    case 0x3: /* USDOT */
12236        gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
12237        return;
12238
12239    case 0x04: /* SMMLA, UMMLA */
12240        gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
12241                         u ? gen_helper_gvec_ummla_b
12242                         : gen_helper_gvec_smmla_b);
12243        return;
12244    case 0x05: /* USMMLA */
12245        gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
12246        return;
12247
12248    case 0x8: /* FCMLA, #0 */
12249    case 0x9: /* FCMLA, #90 */
12250    case 0xa: /* FCMLA, #180 */
12251    case 0xb: /* FCMLA, #270 */
12252        rot = extract32(opcode, 0, 2);
12253        switch (size) {
12254        case 1:
12255            gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
12256                              gen_helper_gvec_fcmlah);
12257            break;
12258        case 2:
12259            gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12260                              gen_helper_gvec_fcmlas);
12261            break;
12262        case 3:
12263            gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12264                              gen_helper_gvec_fcmlad);
12265            break;
12266        default:
12267            g_assert_not_reached();
12268        }
12269        return;
12270
12271    case 0xc: /* FCADD, #90 */
12272    case 0xe: /* FCADD, #270 */
12273        rot = extract32(opcode, 1, 1);
12274        switch (size) {
12275        case 1:
12276            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12277                              gen_helper_gvec_fcaddh);
12278            break;
12279        case 2:
12280            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12281                              gen_helper_gvec_fcadds);
12282            break;
12283        case 3:
12284            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12285                              gen_helper_gvec_fcaddd);
12286            break;
12287        default:
12288            g_assert_not_reached();
12289        }
12290        return;
12291
12292    case 0xd: /* BFMMLA */
12293        gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
12294        return;
12295    case 0xf:
12296        switch (size) {
12297        case 1: /* BFDOT */
12298            gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
12299            break;
12300        case 3: /* BFMLAL{B,T} */
12301            gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
12302                              gen_helper_gvec_bfmlal);
12303            break;
12304        default:
12305            g_assert_not_reached();
12306        }
12307        return;
12308
12309    default:
12310        g_assert_not_reached();
12311    }
12312}
12313
12314static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
12315                                  int size, int rn, int rd)
12316{
12317    /* Handle 2-reg-misc ops which are widening (so each size element
12318     * in the source becomes a 2*size element in the destination.
12319     * The only instruction like this is FCVTL.
12320     */
12321    int pass;
12322
12323    if (size == 3) {
12324        /* 32 -> 64 bit fp conversion */
12325        TCGv_i64 tcg_res[2];
12326        int srcelt = is_q ? 2 : 0;
12327
12328        for (pass = 0; pass < 2; pass++) {
12329            TCGv_i32 tcg_op = tcg_temp_new_i32();
12330            tcg_res[pass] = tcg_temp_new_i64();
12331
12332            read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
12333            gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
12334            tcg_temp_free_i32(tcg_op);
12335        }
12336        for (pass = 0; pass < 2; pass++) {
12337            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12338            tcg_temp_free_i64(tcg_res[pass]);
12339        }
12340    } else {
12341        /* 16 -> 32 bit fp conversion */
12342        int srcelt = is_q ? 4 : 0;
12343        TCGv_i32 tcg_res[4];
12344        TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
12345        TCGv_i32 ahp = get_ahp_flag();
12346
12347        for (pass = 0; pass < 4; pass++) {
12348            tcg_res[pass] = tcg_temp_new_i32();
12349
12350            read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
12351            gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
12352                                           fpst, ahp);
12353        }
12354        for (pass = 0; pass < 4; pass++) {
12355            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
12356            tcg_temp_free_i32(tcg_res[pass]);
12357        }
12358
12359        tcg_temp_free_ptr(fpst);
12360        tcg_temp_free_i32(ahp);
12361    }
12362}
12363
12364static void handle_rev(DisasContext *s, int opcode, bool u,
12365                       bool is_q, int size, int rn, int rd)
12366{
12367    int op = (opcode << 1) | u;
12368    int opsz = op + size;
12369    int grp_size = 3 - opsz;
12370    int dsize = is_q ? 128 : 64;
12371    int i;
12372
12373    if (opsz >= 3) {
12374        unallocated_encoding(s);
12375        return;
12376    }
12377
12378    if (!fp_access_check(s)) {
12379        return;
12380    }
12381
12382    if (size == 0) {
12383        /* Special case bytes, use bswap op on each group of elements */
12384        int groups = dsize / (8 << grp_size);
12385
12386        for (i = 0; i < groups; i++) {
12387            TCGv_i64 tcg_tmp = tcg_temp_new_i64();
12388
12389            read_vec_element(s, tcg_tmp, rn, i, grp_size);
12390            switch (grp_size) {
12391            case MO_16:
12392                tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12393                break;
12394            case MO_32:
12395                tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12396                break;
12397            case MO_64:
12398                tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
12399                break;
12400            default:
12401                g_assert_not_reached();
12402            }
12403            write_vec_element(s, tcg_tmp, rd, i, grp_size);
12404            tcg_temp_free_i64(tcg_tmp);
12405        }
12406        clear_vec_high(s, is_q, rd);
12407    } else {
12408        int revmask = (1 << grp_size) - 1;
12409        int esize = 8 << size;
12410        int elements = dsize / esize;
12411        TCGv_i64 tcg_rn = tcg_temp_new_i64();
12412        TCGv_i64 tcg_rd = tcg_const_i64(0);
12413        TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
12414
12415        for (i = 0; i < elements; i++) {
12416            int e_rev = (i & 0xf) ^ revmask;
12417            int off = e_rev * esize;
12418            read_vec_element(s, tcg_rn, rn, i, size);
12419            if (off >= 64) {
12420                tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
12421                                    tcg_rn, off - 64, esize);
12422            } else {
12423                tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
12424            }
12425        }
12426        write_vec_element(s, tcg_rd, rd, 0, MO_64);
12427        write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
12428
12429        tcg_temp_free_i64(tcg_rd_hi);
12430        tcg_temp_free_i64(tcg_rd);
12431        tcg_temp_free_i64(tcg_rn);
12432    }
12433}
12434
12435static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
12436                                  bool is_q, int size, int rn, int rd)
12437{
12438    /* Implement the pairwise operations from 2-misc:
12439     * SADDLP, UADDLP, SADALP, UADALP.
12440     * These all add pairs of elements in the input to produce a
12441     * double-width result element in the output (possibly accumulating).
12442     */
12443    bool accum = (opcode == 0x6);
12444    int maxpass = is_q ? 2 : 1;
12445    int pass;
12446    TCGv_i64 tcg_res[2];
12447
12448    if (size == 2) {
12449        /* 32 + 32 -> 64 op */
12450        MemOp memop = size + (u ? 0 : MO_SIGN);
12451
12452        for (pass = 0; pass < maxpass; pass++) {
12453            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
12454            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
12455
12456            tcg_res[pass] = tcg_temp_new_i64();
12457
12458            read_vec_element(s, tcg_op1, rn, pass * 2, memop);
12459            read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
12460            tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
12461            if (accum) {
12462                read_vec_element(s, tcg_op1, rd, pass, MO_64);
12463                tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12464            }
12465
12466            tcg_temp_free_i64(tcg_op1);
12467            tcg_temp_free_i64(tcg_op2);
12468        }
12469    } else {
12470        for (pass = 0; pass < maxpass; pass++) {
12471            TCGv_i64 tcg_op = tcg_temp_new_i64();
12472            NeonGenOne64OpFn *genfn;
12473            static NeonGenOne64OpFn * const fns[2][2] = {
12474                { gen_helper_neon_addlp_s8,  gen_helper_neon_addlp_u8 },
12475                { gen_helper_neon_addlp_s16,  gen_helper_neon_addlp_u16 },
12476            };
12477
12478            genfn = fns[size][u];
12479
12480            tcg_res[pass] = tcg_temp_new_i64();
12481
12482            read_vec_element(s, tcg_op, rn, pass, MO_64);
12483            genfn(tcg_res[pass], tcg_op);
12484
12485            if (accum) {
12486                read_vec_element(s, tcg_op, rd, pass, MO_64);
12487                if (size == 0) {
12488                    gen_helper_neon_addl_u16(tcg_res[pass],
12489                                             tcg_res[pass], tcg_op);
12490                } else {
12491                    gen_helper_neon_addl_u32(tcg_res[pass],
12492                                             tcg_res[pass], tcg_op);
12493                }
12494            }
12495            tcg_temp_free_i64(tcg_op);
12496        }
12497    }
12498    if (!is_q) {
12499        tcg_res[1] = tcg_constant_i64(0);
12500    }
12501    for (pass = 0; pass < 2; pass++) {
12502        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12503        tcg_temp_free_i64(tcg_res[pass]);
12504    }
12505}
12506
12507static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
12508{
12509    /* Implement SHLL and SHLL2 */
12510    int pass;
12511    int part = is_q ? 2 : 0;
12512    TCGv_i64 tcg_res[2];
12513
12514    for (pass = 0; pass < 2; pass++) {
12515        static NeonGenWidenFn * const widenfns[3] = {
12516            gen_helper_neon_widen_u8,
12517            gen_helper_neon_widen_u16,
12518            tcg_gen_extu_i32_i64,
12519        };
12520        NeonGenWidenFn *widenfn = widenfns[size];
12521        TCGv_i32 tcg_op = tcg_temp_new_i32();
12522
12523        read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
12524        tcg_res[pass] = tcg_temp_new_i64();
12525        widenfn(tcg_res[pass], tcg_op);
12526        tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
12527
12528        tcg_temp_free_i32(tcg_op);
12529    }
12530
12531    for (pass = 0; pass < 2; pass++) {
12532        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12533        tcg_temp_free_i64(tcg_res[pass]);
12534    }
12535}
12536
12537/* AdvSIMD two reg misc
12538 *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
12539 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12540 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
12541 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
12542 */
12543static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12544{
12545    int size = extract32(insn, 22, 2);
12546    int opcode = extract32(insn, 12, 5);
12547    bool u = extract32(insn, 29, 1);
12548    bool is_q = extract32(insn, 30, 1);
12549    int rn = extract32(insn, 5, 5);
12550    int rd = extract32(insn, 0, 5);
12551    bool need_fpstatus = false;
12552    bool need_rmode = false;
12553    int rmode = -1;
12554    TCGv_i32 tcg_rmode;
12555    TCGv_ptr tcg_fpstatus;
12556
12557    switch (opcode) {
12558    case 0x0: /* REV64, REV32 */
12559    case 0x1: /* REV16 */
12560        handle_rev(s, opcode, u, is_q, size, rn, rd);
12561        return;
12562    case 0x5: /* CNT, NOT, RBIT */
12563        if (u && size == 0) {
12564            /* NOT */
12565            break;
12566        } else if (u && size == 1) {
12567            /* RBIT */
12568            break;
12569        } else if (!u && size == 0) {
12570            /* CNT */
12571            break;
12572        }
12573        unallocated_encoding(s);
12574        return;
12575    case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
12576    case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
12577        if (size == 3) {
12578            unallocated_encoding(s);
12579            return;
12580        }
12581        if (!fp_access_check(s)) {
12582            return;
12583        }
12584
12585        handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12586        return;
12587    case 0x4: /* CLS, CLZ */
12588        if (size == 3) {
12589            unallocated_encoding(s);
12590            return;
12591        }
12592        break;
12593    case 0x2: /* SADDLP, UADDLP */
12594    case 0x6: /* SADALP, UADALP */
12595        if (size == 3) {
12596            unallocated_encoding(s);
12597            return;
12598        }
12599        if (!fp_access_check(s)) {
12600            return;
12601        }
12602        handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12603        return;
12604    case 0x13: /* SHLL, SHLL2 */
12605        if (u == 0 || size == 3) {
12606            unallocated_encoding(s);
12607            return;
12608        }
12609        if (!fp_access_check(s)) {
12610            return;
12611        }
12612        handle_shll(s, is_q, size, rn, rd);
12613        return;
12614    case 0xa: /* CMLT */
12615        if (u == 1) {
12616            unallocated_encoding(s);
12617            return;
12618        }
12619        /* fall through */
12620    case 0x8: /* CMGT, CMGE */
12621    case 0x9: /* CMEQ, CMLE */
12622    case 0xb: /* ABS, NEG */
12623        if (size == 3 && !is_q) {
12624            unallocated_encoding(s);
12625            return;
12626        }
12627        break;
12628    case 0x3: /* SUQADD, USQADD */
12629        if (size == 3 && !is_q) {
12630            unallocated_encoding(s);
12631            return;
12632        }
12633        if (!fp_access_check(s)) {
12634            return;
12635        }
12636        handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12637        return;
12638    case 0x7: /* SQABS, SQNEG */
12639        if (size == 3 && !is_q) {
12640            unallocated_encoding(s);
12641            return;
12642        }
12643        break;
12644    case 0xc ... 0xf:
12645    case 0x16 ... 0x1f:
12646    {
12647        /* Floating point: U, size[1] and opcode indicate operation;
12648         * size[0] indicates single or double precision.
12649         */
12650        int is_double = extract32(size, 0, 1);
12651        opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12652        size = is_double ? 3 : 2;
12653        switch (opcode) {
12654        case 0x2f: /* FABS */
12655        case 0x6f: /* FNEG */
12656            if (size == 3 && !is_q) {
12657                unallocated_encoding(s);
12658                return;
12659            }
12660            break;
12661        case 0x1d: /* SCVTF */
12662        case 0x5d: /* UCVTF */
12663        {
12664            bool is_signed = (opcode == 0x1d) ? true : false;
12665            int elements = is_double ? 2 : is_q ? 4 : 2;
12666            if (is_double && !is_q) {
12667                unallocated_encoding(s);
12668                return;
12669            }
12670            if (!fp_access_check(s)) {
12671                return;
12672            }
12673            handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12674            return;
12675        }
12676        case 0x2c: /* FCMGT (zero) */
12677        case 0x2d: /* FCMEQ (zero) */
12678        case 0x2e: /* FCMLT (zero) */
12679        case 0x6c: /* FCMGE (zero) */
12680        case 0x6d: /* FCMLE (zero) */
12681            if (size == 3 && !is_q) {
12682                unallocated_encoding(s);
12683                return;
12684            }
12685            handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12686            return;
12687        case 0x7f: /* FSQRT */
12688            if (size == 3 && !is_q) {
12689                unallocated_encoding(s);
12690                return;
12691            }
12692            break;
12693        case 0x1a: /* FCVTNS */
12694        case 0x1b: /* FCVTMS */
12695        case 0x3a: /* FCVTPS */
12696        case 0x3b: /* FCVTZS */
12697        case 0x5a: /* FCVTNU */
12698        case 0x5b: /* FCVTMU */
12699        case 0x7a: /* FCVTPU */
12700        case 0x7b: /* FCVTZU */
12701            need_fpstatus = true;
12702            need_rmode = true;
12703            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12704            if (size == 3 && !is_q) {
12705                unallocated_encoding(s);
12706                return;
12707            }
12708            break;
12709        case 0x5c: /* FCVTAU */
12710        case 0x1c: /* FCVTAS */
12711            need_fpstatus = true;
12712            need_rmode = true;
12713            rmode = FPROUNDING_TIEAWAY;
12714            if (size == 3 && !is_q) {
12715                unallocated_encoding(s);
12716                return;
12717            }
12718            break;
12719        case 0x3c: /* URECPE */
12720            if (size == 3) {
12721                unallocated_encoding(s);
12722                return;
12723            }
12724            /* fall through */
12725        case 0x3d: /* FRECPE */
12726        case 0x7d: /* FRSQRTE */
12727            if (size == 3 && !is_q) {
12728                unallocated_encoding(s);
12729                return;
12730            }
12731            if (!fp_access_check(s)) {
12732                return;
12733            }
12734            handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12735            return;
12736        case 0x56: /* FCVTXN, FCVTXN2 */
12737            if (size == 2) {
12738                unallocated_encoding(s);
12739                return;
12740            }
12741            /* fall through */
12742        case 0x16: /* FCVTN, FCVTN2 */
12743            /* handle_2misc_narrow does a 2*size -> size operation, but these
12744             * instructions encode the source size rather than dest size.
12745             */
12746            if (!fp_access_check(s)) {
12747                return;
12748            }
12749            handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12750            return;
12751        case 0x36: /* BFCVTN, BFCVTN2 */
12752            if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
12753                unallocated_encoding(s);
12754                return;
12755            }
12756            if (!fp_access_check(s)) {
12757                return;
12758            }
12759            handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12760            return;
12761        case 0x17: /* FCVTL, FCVTL2 */
12762            if (!fp_access_check(s)) {
12763                return;
12764            }
12765            handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12766            return;
12767        case 0x18: /* FRINTN */
12768        case 0x19: /* FRINTM */
12769        case 0x38: /* FRINTP */
12770        case 0x39: /* FRINTZ */
12771            need_rmode = true;
12772            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12773            /* fall through */
12774        case 0x59: /* FRINTX */
12775        case 0x79: /* FRINTI */
12776            need_fpstatus = true;
12777            if (size == 3 && !is_q) {
12778                unallocated_encoding(s);
12779                return;
12780            }
12781            break;
12782        case 0x58: /* FRINTA */
12783            need_rmode = true;
12784            rmode = FPROUNDING_TIEAWAY;
12785            need_fpstatus = true;
12786            if (size == 3 && !is_q) {
12787                unallocated_encoding(s);
12788                return;
12789            }
12790            break;
12791        case 0x7c: /* URSQRTE */
12792            if (size == 3) {
12793                unallocated_encoding(s);
12794                return;
12795            }
12796            break;
12797        case 0x1e: /* FRINT32Z */
12798        case 0x1f: /* FRINT64Z */
12799            need_rmode = true;
12800            rmode = FPROUNDING_ZERO;
12801            /* fall through */
12802        case 0x5e: /* FRINT32X */
12803        case 0x5f: /* FRINT64X */
12804            need_fpstatus = true;
12805            if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12806                unallocated_encoding(s);
12807                return;
12808            }
12809            break;
12810        default:
12811            unallocated_encoding(s);
12812            return;
12813        }
12814        break;
12815    }
12816    default:
12817        unallocated_encoding(s);
12818        return;
12819    }
12820
12821    if (!fp_access_check(s)) {
12822        return;
12823    }
12824
12825    if (need_fpstatus || need_rmode) {
12826        tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12827    } else {
12828        tcg_fpstatus = NULL;
12829    }
12830    if (need_rmode) {
12831        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12832        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12833    } else {
12834        tcg_rmode = NULL;
12835    }
12836
12837    switch (opcode) {
12838    case 0x5:
12839        if (u && size == 0) { /* NOT */
12840            gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12841            return;
12842        }
12843        break;
12844    case 0x8: /* CMGT, CMGE */
12845        if (u) {
12846            gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12847        } else {
12848            gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12849        }
12850        return;
12851    case 0x9: /* CMEQ, CMLE */
12852        if (u) {
12853            gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12854        } else {
12855            gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12856        }
12857        return;
12858    case 0xa: /* CMLT */
12859        gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12860        return;
12861    case 0xb:
12862        if (u) { /* ABS, NEG */
12863            gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12864        } else {
12865            gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12866        }
12867        return;
12868    }
12869
12870    if (size == 3) {
12871        /* All 64-bit element operations can be shared with scalar 2misc */
12872        int pass;
12873
12874        /* Coverity claims (size == 3 && !is_q) has been eliminated
12875         * from all paths leading to here.
12876         */
12877        tcg_debug_assert(is_q);
12878        for (pass = 0; pass < 2; pass++) {
12879            TCGv_i64 tcg_op = tcg_temp_new_i64();
12880            TCGv_i64 tcg_res = tcg_temp_new_i64();
12881
12882            read_vec_element(s, tcg_op, rn, pass, MO_64);
12883
12884            handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12885                            tcg_rmode, tcg_fpstatus);
12886
12887            write_vec_element(s, tcg_res, rd, pass, MO_64);
12888
12889            tcg_temp_free_i64(tcg_res);
12890            tcg_temp_free_i64(tcg_op);
12891        }
12892    } else {
12893        int pass;
12894
12895        for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12896            TCGv_i32 tcg_op = tcg_temp_new_i32();
12897            TCGv_i32 tcg_res = tcg_temp_new_i32();
12898
12899            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12900
12901            if (size == 2) {
12902                /* Special cases for 32 bit elements */
12903                switch (opcode) {
12904                case 0x4: /* CLS */
12905                    if (u) {
12906                        tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12907                    } else {
12908                        tcg_gen_clrsb_i32(tcg_res, tcg_op);
12909                    }
12910                    break;
12911                case 0x7: /* SQABS, SQNEG */
12912                    if (u) {
12913                        gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12914                    } else {
12915                        gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12916                    }
12917                    break;
12918                case 0x2f: /* FABS */
12919                    gen_helper_vfp_abss(tcg_res, tcg_op);
12920                    break;
12921                case 0x6f: /* FNEG */
12922                    gen_helper_vfp_negs(tcg_res, tcg_op);
12923                    break;
12924                case 0x7f: /* FSQRT */
12925                    gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12926                    break;
12927                case 0x1a: /* FCVTNS */
12928                case 0x1b: /* FCVTMS */
12929                case 0x1c: /* FCVTAS */
12930                case 0x3a: /* FCVTPS */
12931                case 0x3b: /* FCVTZS */
12932                    gen_helper_vfp_tosls(tcg_res, tcg_op,
12933                                         tcg_constant_i32(0), tcg_fpstatus);
12934                    break;
12935                case 0x5a: /* FCVTNU */
12936                case 0x5b: /* FCVTMU */
12937                case 0x5c: /* FCVTAU */
12938                case 0x7a: /* FCVTPU */
12939                case 0x7b: /* FCVTZU */
12940                    gen_helper_vfp_touls(tcg_res, tcg_op,
12941                                         tcg_constant_i32(0), tcg_fpstatus);
12942                    break;
12943                case 0x18: /* FRINTN */
12944                case 0x19: /* FRINTM */
12945                case 0x38: /* FRINTP */
12946                case 0x39: /* FRINTZ */
12947                case 0x58: /* FRINTA */
12948                case 0x79: /* FRINTI */
12949                    gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12950                    break;
12951                case 0x59: /* FRINTX */
12952                    gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12953                    break;
12954                case 0x7c: /* URSQRTE */
12955                    gen_helper_rsqrte_u32(tcg_res, tcg_op);
12956                    break;
12957                case 0x1e: /* FRINT32Z */
12958                case 0x5e: /* FRINT32X */
12959                    gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12960                    break;
12961                case 0x1f: /* FRINT64Z */
12962                case 0x5f: /* FRINT64X */
12963                    gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12964                    break;
12965                default:
12966                    g_assert_not_reached();
12967                }
12968            } else {
12969                /* Use helpers for 8 and 16 bit elements */
12970                switch (opcode) {
12971                case 0x5: /* CNT, RBIT */
12972                    /* For these two insns size is part of the opcode specifier
12973                     * (handled earlier); they always operate on byte elements.
12974                     */
12975                    if (u) {
12976                        gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12977                    } else {
12978                        gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12979                    }
12980                    break;
12981                case 0x7: /* SQABS, SQNEG */
12982                {
12983                    NeonGenOneOpEnvFn *genfn;
12984                    static NeonGenOneOpEnvFn * const fns[2][2] = {
12985                        { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12986                        { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12987                    };
12988                    genfn = fns[size][u];
12989                    genfn(tcg_res, cpu_env, tcg_op);
12990                    break;
12991                }
12992                case 0x4: /* CLS, CLZ */
12993                    if (u) {
12994                        if (size == 0) {
12995                            gen_helper_neon_clz_u8(tcg_res, tcg_op);
12996                        } else {
12997                            gen_helper_neon_clz_u16(tcg_res, tcg_op);
12998                        }
12999                    } else {
13000                        if (size == 0) {
13001                            gen_helper_neon_cls_s8(tcg_res, tcg_op);
13002                        } else {
13003                            gen_helper_neon_cls_s16(tcg_res, tcg_op);
13004                        }
13005                    }
13006                    break;
13007                default:
13008                    g_assert_not_reached();
13009                }
13010            }
13011
13012            write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13013
13014            tcg_temp_free_i32(tcg_res);
13015            tcg_temp_free_i32(tcg_op);
13016        }
13017    }
13018    clear_vec_high(s, is_q, rd);
13019
13020    if (need_rmode) {
13021        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13022        tcg_temp_free_i32(tcg_rmode);
13023    }
13024    if (need_fpstatus) {
13025        tcg_temp_free_ptr(tcg_fpstatus);
13026    }
13027}
13028
13029/* AdvSIMD [scalar] two register miscellaneous (FP16)
13030 *
13031 *   31  30  29 28  27     24  23 22 21       17 16    12 11 10 9    5 4    0
13032 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
13033 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13034 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
13035 *   mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
13036 *   val:  0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
13037 *
13038 * This actually covers two groups where scalar access is governed by
13039 * bit 28. A bunch of the instructions (float to integral) only exist
13040 * in the vector form and are un-allocated for the scalar decode. Also
13041 * in the scalar decode Q is always 1.
13042 */
13043static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
13044{
13045    int fpop, opcode, a, u;
13046    int rn, rd;
13047    bool is_q;
13048    bool is_scalar;
13049    bool only_in_vector = false;
13050
13051    int pass;
13052    TCGv_i32 tcg_rmode = NULL;
13053    TCGv_ptr tcg_fpstatus = NULL;
13054    bool need_rmode = false;
13055    bool need_fpst = true;
13056    int rmode;
13057
13058    if (!dc_isar_feature(aa64_fp16, s)) {
13059        unallocated_encoding(s);
13060        return;
13061    }
13062
13063    rd = extract32(insn, 0, 5);
13064    rn = extract32(insn, 5, 5);
13065
13066    a = extract32(insn, 23, 1);
13067    u = extract32(insn, 29, 1);
13068    is_scalar = extract32(insn, 28, 1);
13069    is_q = extract32(insn, 30, 1);
13070
13071    opcode = extract32(insn, 12, 5);
13072    fpop = deposit32(opcode, 5, 1, a);
13073    fpop = deposit32(fpop, 6, 1, u);
13074
13075    switch (fpop) {
13076    case 0x1d: /* SCVTF */
13077    case 0x5d: /* UCVTF */
13078    {
13079        int elements;
13080
13081        if (is_scalar) {
13082            elements = 1;
13083        } else {
13084            elements = (is_q ? 8 : 4);
13085        }
13086
13087        if (!fp_access_check(s)) {
13088            return;
13089        }
13090        handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
13091        return;
13092    }
13093    break;
13094    case 0x2c: /* FCMGT (zero) */
13095    case 0x2d: /* FCMEQ (zero) */
13096    case 0x2e: /* FCMLT (zero) */
13097    case 0x6c: /* FCMGE (zero) */
13098    case 0x6d: /* FCMLE (zero) */
13099        handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
13100        return;
13101    case 0x3d: /* FRECPE */
13102    case 0x3f: /* FRECPX */
13103        break;
13104    case 0x18: /* FRINTN */
13105        need_rmode = true;
13106        only_in_vector = true;
13107        rmode = FPROUNDING_TIEEVEN;
13108        break;
13109    case 0x19: /* FRINTM */
13110        need_rmode = true;
13111        only_in_vector = true;
13112        rmode = FPROUNDING_NEGINF;
13113        break;
13114    case 0x38: /* FRINTP */
13115        need_rmode = true;
13116        only_in_vector = true;
13117        rmode = FPROUNDING_POSINF;
13118        break;
13119    case 0x39: /* FRINTZ */
13120        need_rmode = true;
13121        only_in_vector = true;
13122        rmode = FPROUNDING_ZERO;
13123        break;
13124    case 0x58: /* FRINTA */
13125        need_rmode = true;
13126        only_in_vector = true;
13127        rmode = FPROUNDING_TIEAWAY;
13128        break;
13129    case 0x59: /* FRINTX */
13130    case 0x79: /* FRINTI */
13131        only_in_vector = true;
13132        /* current rounding mode */
13133        break;
13134    case 0x1a: /* FCVTNS */
13135        need_rmode = true;
13136        rmode = FPROUNDING_TIEEVEN;
13137        break;
13138    case 0x1b: /* FCVTMS */
13139        need_rmode = true;
13140        rmode = FPROUNDING_NEGINF;
13141        break;
13142    case 0x1c: /* FCVTAS */
13143        need_rmode = true;
13144        rmode = FPROUNDING_TIEAWAY;
13145        break;
13146    case 0x3a: /* FCVTPS */
13147        need_rmode = true;
13148        rmode = FPROUNDING_POSINF;
13149        break;
13150    case 0x3b: /* FCVTZS */
13151        need_rmode = true;
13152        rmode = FPROUNDING_ZERO;
13153        break;
13154    case 0x5a: /* FCVTNU */
13155        need_rmode = true;
13156        rmode = FPROUNDING_TIEEVEN;
13157        break;
13158    case 0x5b: /* FCVTMU */
13159        need_rmode = true;
13160        rmode = FPROUNDING_NEGINF;
13161        break;
13162    case 0x5c: /* FCVTAU */
13163        need_rmode = true;
13164        rmode = FPROUNDING_TIEAWAY;
13165        break;
13166    case 0x7a: /* FCVTPU */
13167        need_rmode = true;
13168        rmode = FPROUNDING_POSINF;
13169        break;
13170    case 0x7b: /* FCVTZU */
13171        need_rmode = true;
13172        rmode = FPROUNDING_ZERO;
13173        break;
13174    case 0x2f: /* FABS */
13175    case 0x6f: /* FNEG */
13176        need_fpst = false;
13177        break;
13178    case 0x7d: /* FRSQRTE */
13179    case 0x7f: /* FSQRT (vector) */
13180        break;
13181    default:
13182        unallocated_encoding(s);
13183        return;
13184    }
13185
13186
13187    /* Check additional constraints for the scalar encoding */
13188    if (is_scalar) {
13189        if (!is_q) {
13190            unallocated_encoding(s);
13191            return;
13192        }
13193        /* FRINTxx is only in the vector form */
13194        if (only_in_vector) {
13195            unallocated_encoding(s);
13196            return;
13197        }
13198    }
13199
13200    if (!fp_access_check(s)) {
13201        return;
13202    }
13203
13204    if (need_rmode || need_fpst) {
13205        tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
13206    }
13207
13208    if (need_rmode) {
13209        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
13210        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13211    }
13212
13213    if (is_scalar) {
13214        TCGv_i32 tcg_op = read_fp_hreg(s, rn);
13215        TCGv_i32 tcg_res = tcg_temp_new_i32();
13216
13217        switch (fpop) {
13218        case 0x1a: /* FCVTNS */
13219        case 0x1b: /* FCVTMS */
13220        case 0x1c: /* FCVTAS */
13221        case 0x3a: /* FCVTPS */
13222        case 0x3b: /* FCVTZS */
13223            gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13224            break;
13225        case 0x3d: /* FRECPE */
13226            gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13227            break;
13228        case 0x3f: /* FRECPX */
13229            gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
13230            break;
13231        case 0x5a: /* FCVTNU */
13232        case 0x5b: /* FCVTMU */
13233        case 0x5c: /* FCVTAU */
13234        case 0x7a: /* FCVTPU */
13235        case 0x7b: /* FCVTZU */
13236            gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13237            break;
13238        case 0x6f: /* FNEG */
13239            tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13240            break;
13241        case 0x7d: /* FRSQRTE */
13242            gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13243            break;
13244        default:
13245            g_assert_not_reached();
13246        }
13247
13248        /* limit any sign extension going on */
13249        tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
13250        write_fp_sreg(s, rd, tcg_res);
13251
13252        tcg_temp_free_i32(tcg_res);
13253        tcg_temp_free_i32(tcg_op);
13254    } else {
13255        for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
13256            TCGv_i32 tcg_op = tcg_temp_new_i32();
13257            TCGv_i32 tcg_res = tcg_temp_new_i32();
13258
13259            read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
13260
13261            switch (fpop) {
13262            case 0x1a: /* FCVTNS */
13263            case 0x1b: /* FCVTMS */
13264            case 0x1c: /* FCVTAS */
13265            case 0x3a: /* FCVTPS */
13266            case 0x3b: /* FCVTZS */
13267                gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13268                break;
13269            case 0x3d: /* FRECPE */
13270                gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13271                break;
13272            case 0x5a: /* FCVTNU */
13273            case 0x5b: /* FCVTMU */
13274            case 0x5c: /* FCVTAU */
13275            case 0x7a: /* FCVTPU */
13276            case 0x7b: /* FCVTZU */
13277                gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13278                break;
13279            case 0x18: /* FRINTN */
13280            case 0x19: /* FRINTM */
13281            case 0x38: /* FRINTP */
13282            case 0x39: /* FRINTZ */
13283            case 0x58: /* FRINTA */
13284            case 0x79: /* FRINTI */
13285                gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
13286                break;
13287            case 0x59: /* FRINTX */
13288                gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
13289                break;
13290            case 0x2f: /* FABS */
13291                tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
13292                break;
13293            case 0x6f: /* FNEG */
13294                tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13295                break;
13296            case 0x7d: /* FRSQRTE */
13297                gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13298                break;
13299            case 0x7f: /* FSQRT */
13300                gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
13301                break;
13302            default:
13303                g_assert_not_reached();
13304            }
13305
13306            write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
13307
13308            tcg_temp_free_i32(tcg_res);
13309            tcg_temp_free_i32(tcg_op);
13310        }
13311
13312        clear_vec_high(s, is_q, rd);
13313    }
13314
13315    if (tcg_rmode) {
13316        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13317        tcg_temp_free_i32(tcg_rmode);
13318    }
13319
13320    if (tcg_fpstatus) {
13321        tcg_temp_free_ptr(tcg_fpstatus);
13322    }
13323}
13324
13325/* AdvSIMD scalar x indexed element
13326 *  31 30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
13327 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
13328 * | 0 1 | U | 1 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
13329 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
13330 * AdvSIMD vector x indexed element
13331 *   31  30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
13332 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
13333 * | 0 | Q | U | 0 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
13334 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
13335 */
13336static void disas_simd_indexed(DisasContext *s, uint32_t insn)
13337{
13338    /* This encoding has two kinds of instruction:
13339     *  normal, where we perform elt x idxelt => elt for each
13340     *     element in the vector
13341     *  long, where we perform elt x idxelt and generate a result of
13342     *     double the width of the input element
13343     * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
13344     */
13345    bool is_scalar = extract32(insn, 28, 1);
13346    bool is_q = extract32(insn, 30, 1);
13347    bool u = extract32(insn, 29, 1);
13348    int size = extract32(insn, 22, 2);
13349    int l = extract32(insn, 21, 1);
13350    int m = extract32(insn, 20, 1);
13351    /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
13352    int rm = extract32(insn, 16, 4);
13353    int opcode = extract32(insn, 12, 4);
13354    int h = extract32(insn, 11, 1);
13355    int rn = extract32(insn, 5, 5);
13356    int rd = extract32(insn, 0, 5);
13357    bool is_long = false;
13358    int is_fp = 0;
13359    bool is_fp16 = false;
13360    int index;
13361    TCGv_ptr fpst;
13362
13363    switch (16 * u + opcode) {
13364    case 0x08: /* MUL */
13365    case 0x10: /* MLA */
13366    case 0x14: /* MLS */
13367        if (is_scalar) {
13368            unallocated_encoding(s);
13369            return;
13370        }
13371        break;
13372    case 0x02: /* SMLAL, SMLAL2 */
13373    case 0x12: /* UMLAL, UMLAL2 */
13374    case 0x06: /* SMLSL, SMLSL2 */
13375    case 0x16: /* UMLSL, UMLSL2 */
13376    case 0x0a: /* SMULL, SMULL2 */
13377    case 0x1a: /* UMULL, UMULL2 */
13378        if (is_scalar) {
13379            unallocated_encoding(s);
13380            return;
13381        }
13382        is_long = true;
13383        break;
13384    case 0x03: /* SQDMLAL, SQDMLAL2 */
13385    case 0x07: /* SQDMLSL, SQDMLSL2 */
13386    case 0x0b: /* SQDMULL, SQDMULL2 */
13387        is_long = true;
13388        break;
13389    case 0x0c: /* SQDMULH */
13390    case 0x0d: /* SQRDMULH */
13391        break;
13392    case 0x01: /* FMLA */
13393    case 0x05: /* FMLS */
13394    case 0x09: /* FMUL */
13395    case 0x19: /* FMULX */
13396        is_fp = 1;
13397        break;
13398    case 0x1d: /* SQRDMLAH */
13399    case 0x1f: /* SQRDMLSH */
13400        if (!dc_isar_feature(aa64_rdm, s)) {
13401            unallocated_encoding(s);
13402            return;
13403        }
13404        break;
13405    case 0x0e: /* SDOT */
13406    case 0x1e: /* UDOT */
13407        if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
13408            unallocated_encoding(s);
13409            return;
13410        }
13411        break;
13412    case 0x0f:
13413        switch (size) {
13414        case 0: /* SUDOT */
13415        case 2: /* USDOT */
13416            if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
13417                unallocated_encoding(s);
13418                return;
13419            }
13420            size = MO_32;
13421            break;
13422        case 1: /* BFDOT */
13423            if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13424                unallocated_encoding(s);
13425                return;
13426            }
13427            size = MO_32;
13428            break;
13429        case 3: /* BFMLAL{B,T} */
13430            if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13431                unallocated_encoding(s);
13432                return;
13433            }
13434            /* can't set is_fp without other incorrect size checks */
13435            size = MO_16;
13436            break;
13437        default:
13438            unallocated_encoding(s);
13439            return;
13440        }
13441        break;
13442    case 0x11: /* FCMLA #0 */
13443    case 0x13: /* FCMLA #90 */
13444    case 0x15: /* FCMLA #180 */
13445    case 0x17: /* FCMLA #270 */
13446        if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
13447            unallocated_encoding(s);
13448            return;
13449        }
13450        is_fp = 2;
13451        break;
13452    case 0x00: /* FMLAL */
13453    case 0x04: /* FMLSL */
13454    case 0x18: /* FMLAL2 */
13455    case 0x1c: /* FMLSL2 */
13456        if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
13457            unallocated_encoding(s);
13458            return;
13459        }
13460        size = MO_16;
13461        /* is_fp, but we pass cpu_env not fp_status.  */
13462        break;
13463    default:
13464        unallocated_encoding(s);
13465        return;
13466    }
13467
13468    switch (is_fp) {
13469    case 1: /* normal fp */
13470        /* convert insn encoded size to MemOp size */
13471        switch (size) {
13472        case 0: /* half-precision */
13473            size = MO_16;
13474            is_fp16 = true;
13475            break;
13476        case MO_32: /* single precision */
13477        case MO_64: /* double precision */
13478            break;
13479        default:
13480            unallocated_encoding(s);
13481            return;
13482        }
13483        break;
13484
13485    case 2: /* complex fp */
13486        /* Each indexable element is a complex pair.  */
13487        size += 1;
13488        switch (size) {
13489        case MO_32:
13490            if (h && !is_q) {
13491                unallocated_encoding(s);
13492                return;
13493            }
13494            is_fp16 = true;
13495            break;
13496        case MO_64:
13497            break;
13498        default:
13499            unallocated_encoding(s);
13500            return;
13501        }
13502        break;
13503
13504    default: /* integer */
13505        switch (size) {
13506        case MO_8:
13507        case MO_64:
13508            unallocated_encoding(s);
13509            return;
13510        }
13511        break;
13512    }
13513    if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
13514        unallocated_encoding(s);
13515        return;
13516    }
13517
13518    /* Given MemOp size, adjust register and indexing.  */
13519    switch (size) {
13520    case MO_16:
13521        index = h << 2 | l << 1 | m;
13522        break;
13523    case MO_32:
13524        index = h << 1 | l;
13525        rm |= m << 4;
13526        break;
13527    case MO_64:
13528        if (l || !is_q) {
13529            unallocated_encoding(s);
13530            return;
13531        }
13532        index = h;
13533        rm |= m << 4;
13534        break;
13535    default:
13536        g_assert_not_reached();
13537    }
13538
13539    if (!fp_access_check(s)) {
13540        return;
13541    }
13542
13543    if (is_fp) {
13544        fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
13545    } else {
13546        fpst = NULL;
13547    }
13548
13549    switch (16 * u + opcode) {
13550    case 0x0e: /* SDOT */
13551    case 0x1e: /* UDOT */
13552        gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13553                         u ? gen_helper_gvec_udot_idx_b
13554                         : gen_helper_gvec_sdot_idx_b);
13555        return;
13556    case 0x0f:
13557        switch (extract32(insn, 22, 2)) {
13558        case 0: /* SUDOT */
13559            gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13560                             gen_helper_gvec_sudot_idx_b);
13561            return;
13562        case 1: /* BFDOT */
13563            gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13564                             gen_helper_gvec_bfdot_idx);
13565            return;
13566        case 2: /* USDOT */
13567            gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13568                             gen_helper_gvec_usdot_idx_b);
13569            return;
13570        case 3: /* BFMLAL{B,T} */
13571            gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
13572                              gen_helper_gvec_bfmlal_idx);
13573            return;
13574        }
13575        g_assert_not_reached();
13576    case 0x11: /* FCMLA #0 */
13577    case 0x13: /* FCMLA #90 */
13578    case 0x15: /* FCMLA #180 */
13579    case 0x17: /* FCMLA #270 */
13580        {
13581            int rot = extract32(insn, 13, 2);
13582            int data = (index << 2) | rot;
13583            tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
13584                               vec_full_reg_offset(s, rn),
13585                               vec_full_reg_offset(s, rm),
13586                               vec_full_reg_offset(s, rd), fpst,
13587                               is_q ? 16 : 8, vec_full_reg_size(s), data,
13588                               size == MO_64
13589                               ? gen_helper_gvec_fcmlas_idx
13590                               : gen_helper_gvec_fcmlah_idx);
13591            tcg_temp_free_ptr(fpst);
13592        }
13593        return;
13594
13595    case 0x00: /* FMLAL */
13596    case 0x04: /* FMLSL */
13597    case 0x18: /* FMLAL2 */
13598    case 0x1c: /* FMLSL2 */
13599        {
13600            int is_s = extract32(opcode, 2, 1);
13601            int is_2 = u;
13602            int data = (index << 2) | (is_2 << 1) | is_s;
13603            tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13604                               vec_full_reg_offset(s, rn),
13605                               vec_full_reg_offset(s, rm), cpu_env,
13606                               is_q ? 16 : 8, vec_full_reg_size(s),
13607                               data, gen_helper_gvec_fmlal_idx_a64);
13608        }
13609        return;
13610
13611    case 0x08: /* MUL */
13612        if (!is_long && !is_scalar) {
13613            static gen_helper_gvec_3 * const fns[3] = {
13614                gen_helper_gvec_mul_idx_h,
13615                gen_helper_gvec_mul_idx_s,
13616                gen_helper_gvec_mul_idx_d,
13617            };
13618            tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
13619                               vec_full_reg_offset(s, rn),
13620                               vec_full_reg_offset(s, rm),
13621                               is_q ? 16 : 8, vec_full_reg_size(s),
13622                               index, fns[size - 1]);
13623            return;
13624        }
13625        break;
13626
13627    case 0x10: /* MLA */
13628        if (!is_long && !is_scalar) {
13629            static gen_helper_gvec_4 * const fns[3] = {
13630                gen_helper_gvec_mla_idx_h,
13631                gen_helper_gvec_mla_idx_s,
13632                gen_helper_gvec_mla_idx_d,
13633            };
13634            tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13635                               vec_full_reg_offset(s, rn),
13636                               vec_full_reg_offset(s, rm),
13637                               vec_full_reg_offset(s, rd),
13638                               is_q ? 16 : 8, vec_full_reg_size(s),
13639                               index, fns[size - 1]);
13640            return;
13641        }
13642        break;
13643
13644    case 0x14: /* MLS */
13645        if (!is_long && !is_scalar) {
13646            static gen_helper_gvec_4 * const fns[3] = {
13647                gen_helper_gvec_mls_idx_h,
13648                gen_helper_gvec_mls_idx_s,
13649                gen_helper_gvec_mls_idx_d,
13650            };
13651            tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13652                               vec_full_reg_offset(s, rn),
13653                               vec_full_reg_offset(s, rm),
13654                               vec_full_reg_offset(s, rd),
13655                               is_q ? 16 : 8, vec_full_reg_size(s),
13656                               index, fns[size - 1]);
13657            return;
13658        }
13659        break;
13660    }
13661
13662    if (size == 3) {
13663        TCGv_i64 tcg_idx = tcg_temp_new_i64();
13664        int pass;
13665
13666        assert(is_fp && is_q && !is_long);
13667
13668        read_vec_element(s, tcg_idx, rm, index, MO_64);
13669
13670        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13671            TCGv_i64 tcg_op = tcg_temp_new_i64();
13672            TCGv_i64 tcg_res = tcg_temp_new_i64();
13673
13674            read_vec_element(s, tcg_op, rn, pass, MO_64);
13675
13676            switch (16 * u + opcode) {
13677            case 0x05: /* FMLS */
13678                /* As usual for ARM, separate negation for fused multiply-add */
13679                gen_helper_vfp_negd(tcg_op, tcg_op);
13680                /* fall through */
13681            case 0x01: /* FMLA */
13682                read_vec_element(s, tcg_res, rd, pass, MO_64);
13683                gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13684                break;
13685            case 0x09: /* FMUL */
13686                gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13687                break;
13688            case 0x19: /* FMULX */
13689                gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13690                break;
13691            default:
13692                g_assert_not_reached();
13693            }
13694
13695            write_vec_element(s, tcg_res, rd, pass, MO_64);
13696            tcg_temp_free_i64(tcg_op);
13697            tcg_temp_free_i64(tcg_res);
13698        }
13699
13700        tcg_temp_free_i64(tcg_idx);
13701        clear_vec_high(s, !is_scalar, rd);
13702    } else if (!is_long) {
13703        /* 32 bit floating point, or 16 or 32 bit integer.
13704         * For the 16 bit scalar case we use the usual Neon helpers and
13705         * rely on the fact that 0 op 0 == 0 with no side effects.
13706         */
13707        TCGv_i32 tcg_idx = tcg_temp_new_i32();
13708        int pass, maxpasses;
13709
13710        if (is_scalar) {
13711            maxpasses = 1;
13712        } else {
13713            maxpasses = is_q ? 4 : 2;
13714        }
13715
13716        read_vec_element_i32(s, tcg_idx, rm, index, size);
13717
13718        if (size == 1 && !is_scalar) {
13719            /* The simplest way to handle the 16x16 indexed ops is to duplicate
13720             * the index into both halves of the 32 bit tcg_idx and then use
13721             * the usual Neon helpers.
13722             */
13723            tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13724        }
13725
13726        for (pass = 0; pass < maxpasses; pass++) {
13727            TCGv_i32 tcg_op = tcg_temp_new_i32();
13728            TCGv_i32 tcg_res = tcg_temp_new_i32();
13729
13730            read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13731
13732            switch (16 * u + opcode) {
13733            case 0x08: /* MUL */
13734            case 0x10: /* MLA */
13735            case 0x14: /* MLS */
13736            {
13737                static NeonGenTwoOpFn * const fns[2][2] = {
13738                    { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13739                    { tcg_gen_add_i32, tcg_gen_sub_i32 },
13740                };
13741                NeonGenTwoOpFn *genfn;
13742                bool is_sub = opcode == 0x4;
13743
13744                if (size == 1) {
13745                    gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13746                } else {
13747                    tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13748                }
13749                if (opcode == 0x8) {
13750                    break;
13751                }
13752                read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13753                genfn = fns[size - 1][is_sub];
13754                genfn(tcg_res, tcg_op, tcg_res);
13755                break;
13756            }
13757            case 0x05: /* FMLS */
13758            case 0x01: /* FMLA */
13759                read_vec_element_i32(s, tcg_res, rd, pass,
13760                                     is_scalar ? size : MO_32);
13761                switch (size) {
13762                case 1:
13763                    if (opcode == 0x5) {
13764                        /* As usual for ARM, separate negation for fused
13765                         * multiply-add */
13766                        tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13767                    }
13768                    if (is_scalar) {
13769                        gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13770                                                   tcg_res, fpst);
13771                    } else {
13772                        gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13773                                                    tcg_res, fpst);
13774                    }
13775                    break;
13776                case 2:
13777                    if (opcode == 0x5) {
13778                        /* As usual for ARM, separate negation for
13779                         * fused multiply-add */
13780                        tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13781                    }
13782                    gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13783                                           tcg_res, fpst);
13784                    break;
13785                default:
13786                    g_assert_not_reached();
13787                }
13788                break;
13789            case 0x09: /* FMUL */
13790                switch (size) {
13791                case 1:
13792                    if (is_scalar) {
13793                        gen_helper_advsimd_mulh(tcg_res, tcg_op,
13794                                                tcg_idx, fpst);
13795                    } else {
13796                        gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13797                                                 tcg_idx, fpst);
13798                    }
13799                    break;
13800                case 2:
13801                    gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13802                    break;
13803                default:
13804                    g_assert_not_reached();
13805                }
13806                break;
13807            case 0x19: /* FMULX */
13808                switch (size) {
13809                case 1:
13810                    if (is_scalar) {
13811                        gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13812                                                 tcg_idx, fpst);
13813                    } else {
13814                        gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13815                                                  tcg_idx, fpst);
13816                    }
13817                    break;
13818                case 2:
13819                    gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13820                    break;
13821                default:
13822                    g_assert_not_reached();
13823                }
13824                break;
13825            case 0x0c: /* SQDMULH */
13826                if (size == 1) {
13827                    gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13828                                               tcg_op, tcg_idx);
13829                } else {
13830                    gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13831                                               tcg_op, tcg_idx);
13832                }
13833                break;
13834            case 0x0d: /* SQRDMULH */
13835                if (size == 1) {
13836                    gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13837                                                tcg_op, tcg_idx);
13838                } else {
13839                    gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13840                                                tcg_op, tcg_idx);
13841                }
13842                break;
13843            case 0x1d: /* SQRDMLAH */
13844                read_vec_element_i32(s, tcg_res, rd, pass,
13845                                     is_scalar ? size : MO_32);
13846                if (size == 1) {
13847                    gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13848                                                tcg_op, tcg_idx, tcg_res);
13849                } else {
13850                    gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13851                                                tcg_op, tcg_idx, tcg_res);
13852                }
13853                break;
13854            case 0x1f: /* SQRDMLSH */
13855                read_vec_element_i32(s, tcg_res, rd, pass,
13856                                     is_scalar ? size : MO_32);
13857                if (size == 1) {
13858                    gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13859                                                tcg_op, tcg_idx, tcg_res);
13860                } else {
13861                    gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13862                                                tcg_op, tcg_idx, tcg_res);
13863                }
13864                break;
13865            default:
13866                g_assert_not_reached();
13867            }
13868
13869            if (is_scalar) {
13870                write_fp_sreg(s, rd, tcg_res);
13871            } else {
13872                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13873            }
13874
13875            tcg_temp_free_i32(tcg_op);
13876            tcg_temp_free_i32(tcg_res);
13877        }
13878
13879        tcg_temp_free_i32(tcg_idx);
13880        clear_vec_high(s, is_q, rd);
13881    } else {
13882        /* long ops: 16x16->32 or 32x32->64 */
13883        TCGv_i64 tcg_res[2];
13884        int pass;
13885        bool satop = extract32(opcode, 0, 1);
13886        MemOp memop = MO_32;
13887
13888        if (satop || !u) {
13889            memop |= MO_SIGN;
13890        }
13891
13892        if (size == 2) {
13893            TCGv_i64 tcg_idx = tcg_temp_new_i64();
13894
13895            read_vec_element(s, tcg_idx, rm, index, memop);
13896
13897            for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13898                TCGv_i64 tcg_op = tcg_temp_new_i64();
13899                TCGv_i64 tcg_passres;
13900                int passelt;
13901
13902                if (is_scalar) {
13903                    passelt = 0;
13904                } else {
13905                    passelt = pass + (is_q * 2);
13906                }
13907
13908                read_vec_element(s, tcg_op, rn, passelt, memop);
13909
13910                tcg_res[pass] = tcg_temp_new_i64();
13911
13912                if (opcode == 0xa || opcode == 0xb) {
13913                    /* Non-accumulating ops */
13914                    tcg_passres = tcg_res[pass];
13915                } else {
13916                    tcg_passres = tcg_temp_new_i64();
13917                }
13918
13919                tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13920                tcg_temp_free_i64(tcg_op);
13921
13922                if (satop) {
13923                    /* saturating, doubling */
13924                    gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13925                                                      tcg_passres, tcg_passres);
13926                }
13927
13928                if (opcode == 0xa || opcode == 0xb) {
13929                    continue;
13930                }
13931
13932                /* Accumulating op: handle accumulate step */
13933                read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13934
13935                switch (opcode) {
13936                case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13937                    tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13938                    break;
13939                case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13940                    tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13941                    break;
13942                case 0x7: /* SQDMLSL, SQDMLSL2 */
13943                    tcg_gen_neg_i64(tcg_passres, tcg_passres);
13944                    /* fall through */
13945                case 0x3: /* SQDMLAL, SQDMLAL2 */
13946                    gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13947                                                      tcg_res[pass],
13948                                                      tcg_passres);
13949                    break;
13950                default:
13951                    g_assert_not_reached();
13952                }
13953                tcg_temp_free_i64(tcg_passres);
13954            }
13955            tcg_temp_free_i64(tcg_idx);
13956
13957            clear_vec_high(s, !is_scalar, rd);
13958        } else {
13959            TCGv_i32 tcg_idx = tcg_temp_new_i32();
13960
13961            assert(size == 1);
13962            read_vec_element_i32(s, tcg_idx, rm, index, size);
13963
13964            if (!is_scalar) {
13965                /* The simplest way to handle the 16x16 indexed ops is to
13966                 * duplicate the index into both halves of the 32 bit tcg_idx
13967                 * and then use the usual Neon helpers.
13968                 */
13969                tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13970            }
13971
13972            for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13973                TCGv_i32 tcg_op = tcg_temp_new_i32();
13974                TCGv_i64 tcg_passres;
13975
13976                if (is_scalar) {
13977                    read_vec_element_i32(s, tcg_op, rn, pass, size);
13978                } else {
13979                    read_vec_element_i32(s, tcg_op, rn,
13980                                         pass + (is_q * 2), MO_32);
13981                }
13982
13983                tcg_res[pass] = tcg_temp_new_i64();
13984
13985                if (opcode == 0xa || opcode == 0xb) {
13986                    /* Non-accumulating ops */
13987                    tcg_passres = tcg_res[pass];
13988                } else {
13989                    tcg_passres = tcg_temp_new_i64();
13990                }
13991
13992                if (memop & MO_SIGN) {
13993                    gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13994                } else {
13995                    gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13996                }
13997                if (satop) {
13998                    gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13999                                                      tcg_passres, tcg_passres);
14000                }
14001                tcg_temp_free_i32(tcg_op);
14002
14003                if (opcode == 0xa || opcode == 0xb) {
14004                    continue;
14005                }
14006
14007                /* Accumulating op: handle accumulate step */
14008                read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
14009
14010                switch (opcode) {
14011                case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
14012                    gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
14013                                             tcg_passres);
14014                    break;
14015                case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
14016                    gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
14017                                             tcg_passres);
14018                    break;
14019                case 0x7: /* SQDMLSL, SQDMLSL2 */
14020                    gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
14021                    /* fall through */
14022                case 0x3: /* SQDMLAL, SQDMLAL2 */
14023                    gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
14024                                                      tcg_res[pass],
14025                                                      tcg_passres);
14026                    break;
14027                default:
14028                    g_assert_not_reached();
14029                }
14030                tcg_temp_free_i64(tcg_passres);
14031            }
14032            tcg_temp_free_i32(tcg_idx);
14033
14034            if (is_scalar) {
14035                tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
14036            }
14037        }
14038
14039        if (is_scalar) {
14040            tcg_res[1] = tcg_constant_i64(0);
14041        }
14042
14043        for (pass = 0; pass < 2; pass++) {
14044            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
14045            tcg_temp_free_i64(tcg_res[pass]);
14046        }
14047    }
14048
14049    if (fpst) {
14050        tcg_temp_free_ptr(fpst);
14051    }
14052}
14053
14054/* Crypto AES
14055 *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
14056 * +-----------------+------+-----------+--------+-----+------+------+
14057 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
14058 * +-----------------+------+-----------+--------+-----+------+------+
14059 */
14060static void disas_crypto_aes(DisasContext *s, uint32_t insn)
14061{
14062    int size = extract32(insn, 22, 2);
14063    int opcode = extract32(insn, 12, 5);
14064    int rn = extract32(insn, 5, 5);
14065    int rd = extract32(insn, 0, 5);
14066    int decrypt;
14067    gen_helper_gvec_2 *genfn2 = NULL;
14068    gen_helper_gvec_3 *genfn3 = NULL;
14069
14070    if (!dc_isar_feature(aa64_aes, s) || size != 0) {
14071        unallocated_encoding(s);
14072        return;
14073    }
14074
14075    switch (opcode) {
14076    case 0x4: /* AESE */
14077        decrypt = 0;
14078        genfn3 = gen_helper_crypto_aese;
14079        break;
14080    case 0x6: /* AESMC */
14081        decrypt = 0;
14082        genfn2 = gen_helper_crypto_aesmc;
14083        break;
14084    case 0x5: /* AESD */
14085        decrypt = 1;
14086        genfn3 = gen_helper_crypto_aese;
14087        break;
14088    case 0x7: /* AESIMC */
14089        decrypt = 1;
14090        genfn2 = gen_helper_crypto_aesmc;
14091        break;
14092    default:
14093        unallocated_encoding(s);
14094        return;
14095    }
14096
14097    if (!fp_access_check(s)) {
14098        return;
14099    }
14100    if (genfn2) {
14101        gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
14102    } else {
14103        gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
14104    }
14105}
14106
14107/* Crypto three-reg SHA
14108 *  31             24 23  22  21 20  16  15 14    12 11 10 9    5 4    0
14109 * +-----------------+------+---+------+---+--------+-----+------+------+
14110 * | 0 1 0 1 1 1 1 0 | size | 0 |  Rm  | 0 | opcode | 0 0 |  Rn  |  Rd  |
14111 * +-----------------+------+---+------+---+--------+-----+------+------+
14112 */
14113static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
14114{
14115    int size = extract32(insn, 22, 2);
14116    int opcode = extract32(insn, 12, 3);
14117    int rm = extract32(insn, 16, 5);
14118    int rn = extract32(insn, 5, 5);
14119    int rd = extract32(insn, 0, 5);
14120    gen_helper_gvec_3 *genfn;
14121    bool feature;
14122
14123    if (size != 0) {
14124        unallocated_encoding(s);
14125        return;
14126    }
14127
14128    switch (opcode) {
14129    case 0: /* SHA1C */
14130        genfn = gen_helper_crypto_sha1c;
14131        feature = dc_isar_feature(aa64_sha1, s);
14132        break;
14133    case 1: /* SHA1P */
14134        genfn = gen_helper_crypto_sha1p;
14135        feature = dc_isar_feature(aa64_sha1, s);
14136        break;
14137    case 2: /* SHA1M */
14138        genfn = gen_helper_crypto_sha1m;
14139        feature = dc_isar_feature(aa64_sha1, s);
14140        break;
14141    case 3: /* SHA1SU0 */
14142        genfn = gen_helper_crypto_sha1su0;
14143        feature = dc_isar_feature(aa64_sha1, s);
14144        break;
14145    case 4: /* SHA256H */
14146        genfn = gen_helper_crypto_sha256h;
14147        feature = dc_isar_feature(aa64_sha256, s);
14148        break;
14149    case 5: /* SHA256H2 */
14150        genfn = gen_helper_crypto_sha256h2;
14151        feature = dc_isar_feature(aa64_sha256, s);
14152        break;
14153    case 6: /* SHA256SU1 */
14154        genfn = gen_helper_crypto_sha256su1;
14155        feature = dc_isar_feature(aa64_sha256, s);
14156        break;
14157    default:
14158        unallocated_encoding(s);
14159        return;
14160    }
14161
14162    if (!feature) {
14163        unallocated_encoding(s);
14164        return;
14165    }
14166
14167    if (!fp_access_check(s)) {
14168        return;
14169    }
14170    gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
14171}
14172
14173/* Crypto two-reg SHA
14174 *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
14175 * +-----------------+------+-----------+--------+-----+------+------+
14176 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
14177 * +-----------------+------+-----------+--------+-----+------+------+
14178 */
14179static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
14180{
14181    int size = extract32(insn, 22, 2);
14182    int opcode = extract32(insn, 12, 5);
14183    int rn = extract32(insn, 5, 5);
14184    int rd = extract32(insn, 0, 5);
14185    gen_helper_gvec_2 *genfn;
14186    bool feature;
14187
14188    if (size != 0) {
14189        unallocated_encoding(s);
14190        return;
14191    }
14192
14193    switch (opcode) {
14194    case 0: /* SHA1H */
14195        feature = dc_isar_feature(aa64_sha1, s);
14196        genfn = gen_helper_crypto_sha1h;
14197        break;
14198    case 1: /* SHA1SU1 */
14199        feature = dc_isar_feature(aa64_sha1, s);
14200        genfn = gen_helper_crypto_sha1su1;
14201        break;
14202    case 2: /* SHA256SU0 */
14203        feature = dc_isar_feature(aa64_sha256, s);
14204        genfn = gen_helper_crypto_sha256su0;
14205        break;
14206    default:
14207        unallocated_encoding(s);
14208        return;
14209    }
14210
14211    if (!feature) {
14212        unallocated_encoding(s);
14213        return;
14214    }
14215
14216    if (!fp_access_check(s)) {
14217        return;
14218    }
14219    gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
14220}
14221
14222static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
14223{
14224    tcg_gen_rotli_i64(d, m, 1);
14225    tcg_gen_xor_i64(d, d, n);
14226}
14227
14228static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
14229{
14230    tcg_gen_rotli_vec(vece, d, m, 1);
14231    tcg_gen_xor_vec(vece, d, d, n);
14232}
14233
14234void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
14235                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
14236{
14237    static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
14238    static const GVecGen3 op = {
14239        .fni8 = gen_rax1_i64,
14240        .fniv = gen_rax1_vec,
14241        .opt_opc = vecop_list,
14242        .fno = gen_helper_crypto_rax1,
14243        .vece = MO_64,
14244    };
14245    tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
14246}
14247
14248/* Crypto three-reg SHA512
14249 *  31                   21 20  16 15  14  13 12  11  10  9    5 4    0
14250 * +-----------------------+------+---+---+-----+--------+------+------+
14251 * | 1 1 0 0 1 1 1 0 0 1 1 |  Rm  | 1 | O | 0 0 | opcode |  Rn  |  Rd  |
14252 * +-----------------------+------+---+---+-----+--------+------+------+
14253 */
14254static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
14255{
14256    int opcode = extract32(insn, 10, 2);
14257    int o =  extract32(insn, 14, 1);
14258    int rm = extract32(insn, 16, 5);
14259    int rn = extract32(insn, 5, 5);
14260    int rd = extract32(insn, 0, 5);
14261    bool feature;
14262    gen_helper_gvec_3 *oolfn = NULL;
14263    GVecGen3Fn *gvecfn = NULL;
14264
14265    if (o == 0) {
14266        switch (opcode) {
14267        case 0: /* SHA512H */
14268            feature = dc_isar_feature(aa64_sha512, s);
14269            oolfn = gen_helper_crypto_sha512h;
14270            break;
14271        case 1: /* SHA512H2 */
14272            feature = dc_isar_feature(aa64_sha512, s);
14273            oolfn = gen_helper_crypto_sha512h2;
14274            break;
14275        case 2: /* SHA512SU1 */
14276            feature = dc_isar_feature(aa64_sha512, s);
14277            oolfn = gen_helper_crypto_sha512su1;
14278            break;
14279        case 3: /* RAX1 */
14280            feature = dc_isar_feature(aa64_sha3, s);
14281            gvecfn = gen_gvec_rax1;
14282            break;
14283        default:
14284            g_assert_not_reached();
14285        }
14286    } else {
14287        switch (opcode) {
14288        case 0: /* SM3PARTW1 */
14289            feature = dc_isar_feature(aa64_sm3, s);
14290            oolfn = gen_helper_crypto_sm3partw1;
14291            break;
14292        case 1: /* SM3PARTW2 */
14293            feature = dc_isar_feature(aa64_sm3, s);
14294            oolfn = gen_helper_crypto_sm3partw2;
14295            break;
14296        case 2: /* SM4EKEY */
14297            feature = dc_isar_feature(aa64_sm4, s);
14298            oolfn = gen_helper_crypto_sm4ekey;
14299            break;
14300        default:
14301            unallocated_encoding(s);
14302            return;
14303        }
14304    }
14305
14306    if (!feature) {
14307        unallocated_encoding(s);
14308        return;
14309    }
14310
14311    if (!fp_access_check(s)) {
14312        return;
14313    }
14314
14315    if (oolfn) {
14316        gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
14317    } else {
14318        gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
14319    }
14320}
14321
14322/* Crypto two-reg SHA512
14323 *  31                                     12  11  10  9    5 4    0
14324 * +-----------------------------------------+--------+------+------+
14325 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode |  Rn  |  Rd  |
14326 * +-----------------------------------------+--------+------+------+
14327 */
14328static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
14329{
14330    int opcode = extract32(insn, 10, 2);
14331    int rn = extract32(insn, 5, 5);
14332    int rd = extract32(insn, 0, 5);
14333    bool feature;
14334
14335    switch (opcode) {
14336    case 0: /* SHA512SU0 */
14337        feature = dc_isar_feature(aa64_sha512, s);
14338        break;
14339    case 1: /* SM4E */
14340        feature = dc_isar_feature(aa64_sm4, s);
14341        break;
14342    default:
14343        unallocated_encoding(s);
14344        return;
14345    }
14346
14347    if (!feature) {
14348        unallocated_encoding(s);
14349        return;
14350    }
14351
14352    if (!fp_access_check(s)) {
14353        return;
14354    }
14355
14356    switch (opcode) {
14357    case 0: /* SHA512SU0 */
14358        gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
14359        break;
14360    case 1: /* SM4E */
14361        gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
14362        break;
14363    default:
14364        g_assert_not_reached();
14365    }
14366}
14367
14368/* Crypto four-register
14369 *  31               23 22 21 20  16 15  14  10 9    5 4    0
14370 * +-------------------+-----+------+---+------+------+------+
14371 * | 1 1 0 0 1 1 1 0 0 | Op0 |  Rm  | 0 |  Ra  |  Rn  |  Rd  |
14372 * +-------------------+-----+------+---+------+------+------+
14373 */
14374static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
14375{
14376    int op0 = extract32(insn, 21, 2);
14377    int rm = extract32(insn, 16, 5);
14378    int ra = extract32(insn, 10, 5);
14379    int rn = extract32(insn, 5, 5);
14380    int rd = extract32(insn, 0, 5);
14381    bool feature;
14382
14383    switch (op0) {
14384    case 0: /* EOR3 */
14385    case 1: /* BCAX */
14386        feature = dc_isar_feature(aa64_sha3, s);
14387        break;
14388    case 2: /* SM3SS1 */
14389        feature = dc_isar_feature(aa64_sm3, s);
14390        break;
14391    default:
14392        unallocated_encoding(s);
14393        return;
14394    }
14395
14396    if (!feature) {
14397        unallocated_encoding(s);
14398        return;
14399    }
14400
14401    if (!fp_access_check(s)) {
14402        return;
14403    }
14404
14405    if (op0 < 2) {
14406        TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
14407        int pass;
14408
14409        tcg_op1 = tcg_temp_new_i64();
14410        tcg_op2 = tcg_temp_new_i64();
14411        tcg_op3 = tcg_temp_new_i64();
14412        tcg_res[0] = tcg_temp_new_i64();
14413        tcg_res[1] = tcg_temp_new_i64();
14414
14415        for (pass = 0; pass < 2; pass++) {
14416            read_vec_element(s, tcg_op1, rn, pass, MO_64);
14417            read_vec_element(s, tcg_op2, rm, pass, MO_64);
14418            read_vec_element(s, tcg_op3, ra, pass, MO_64);
14419
14420            if (op0 == 0) {
14421                /* EOR3 */
14422                tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
14423            } else {
14424                /* BCAX */
14425                tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
14426            }
14427            tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
14428        }
14429        write_vec_element(s, tcg_res[0], rd, 0, MO_64);
14430        write_vec_element(s, tcg_res[1], rd, 1, MO_64);
14431
14432        tcg_temp_free_i64(tcg_op1);
14433        tcg_temp_free_i64(tcg_op2);
14434        tcg_temp_free_i64(tcg_op3);
14435        tcg_temp_free_i64(tcg_res[0]);
14436        tcg_temp_free_i64(tcg_res[1]);
14437    } else {
14438        TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
14439
14440        tcg_op1 = tcg_temp_new_i32();
14441        tcg_op2 = tcg_temp_new_i32();
14442        tcg_op3 = tcg_temp_new_i32();
14443        tcg_res = tcg_temp_new_i32();
14444        tcg_zero = tcg_constant_i32(0);
14445
14446        read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
14447        read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
14448        read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
14449
14450        tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
14451        tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
14452        tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
14453        tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
14454
14455        write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
14456        write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
14457        write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
14458        write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
14459
14460        tcg_temp_free_i32(tcg_op1);
14461        tcg_temp_free_i32(tcg_op2);
14462        tcg_temp_free_i32(tcg_op3);
14463        tcg_temp_free_i32(tcg_res);
14464    }
14465}
14466
14467/* Crypto XAR
14468 *  31                   21 20  16 15    10 9    5 4    0
14469 * +-----------------------+------+--------+------+------+
14470 * | 1 1 0 0 1 1 1 0 1 0 0 |  Rm  |  imm6  |  Rn  |  Rd  |
14471 * +-----------------------+------+--------+------+------+
14472 */
14473static void disas_crypto_xar(DisasContext *s, uint32_t insn)
14474{
14475    int rm = extract32(insn, 16, 5);
14476    int imm6 = extract32(insn, 10, 6);
14477    int rn = extract32(insn, 5, 5);
14478    int rd = extract32(insn, 0, 5);
14479
14480    if (!dc_isar_feature(aa64_sha3, s)) {
14481        unallocated_encoding(s);
14482        return;
14483    }
14484
14485    if (!fp_access_check(s)) {
14486        return;
14487    }
14488
14489    gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
14490                 vec_full_reg_offset(s, rn),
14491                 vec_full_reg_offset(s, rm), imm6, 16,
14492                 vec_full_reg_size(s));
14493}
14494
14495/* Crypto three-reg imm2
14496 *  31                   21 20  16 15  14 13 12  11  10  9    5 4    0
14497 * +-----------------------+------+-----+------+--------+------+------+
14498 * | 1 1 0 0 1 1 1 0 0 1 0 |  Rm  | 1 0 | imm2 | opcode |  Rn  |  Rd  |
14499 * +-----------------------+------+-----+------+--------+------+------+
14500 */
14501static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
14502{
14503    static gen_helper_gvec_3 * const fns[4] = {
14504        gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
14505        gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
14506    };
14507    int opcode = extract32(insn, 10, 2);
14508    int imm2 = extract32(insn, 12, 2);
14509    int rm = extract32(insn, 16, 5);
14510    int rn = extract32(insn, 5, 5);
14511    int rd = extract32(insn, 0, 5);
14512
14513    if (!dc_isar_feature(aa64_sm3, s)) {
14514        unallocated_encoding(s);
14515        return;
14516    }
14517
14518    if (!fp_access_check(s)) {
14519        return;
14520    }
14521
14522    gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
14523}
14524
14525/* C3.6 Data processing - SIMD, inc Crypto
14526 *
14527 * As the decode gets a little complex we are using a table based
14528 * approach for this part of the decode.
14529 */
14530static const AArch64DecodeTable data_proc_simd[] = {
14531    /* pattern  ,  mask     ,  fn                        */
14532    { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
14533    { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
14534    { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
14535    { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
14536    { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
14537    { 0x0e000400, 0x9fe08400, disas_simd_copy },
14538    { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
14539    /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
14540    { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
14541    { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
14542    { 0x0e000000, 0xbf208c00, disas_simd_tb },
14543    { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
14544    { 0x2e000000, 0xbf208400, disas_simd_ext },
14545    { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
14546    { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
14547    { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
14548    { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
14549    { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
14550    { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
14551    { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
14552    { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
14553    { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
14554    { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
14555    { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
14556    { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
14557    { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
14558    { 0xce000000, 0xff808000, disas_crypto_four_reg },
14559    { 0xce800000, 0xffe00000, disas_crypto_xar },
14560    { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
14561    { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
14562    { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
14563    { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
14564    { 0x00000000, 0x00000000, NULL }
14565};
14566
14567static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
14568{
14569    /* Note that this is called with all non-FP cases from
14570     * table C3-6 so it must UNDEF for entries not specifically
14571     * allocated to instructions in that table.
14572     */
14573    AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
14574    if (fn) {
14575        fn(s, insn);
14576    } else {
14577        unallocated_encoding(s);
14578    }
14579}
14580
14581/* C3.6 Data processing - SIMD and floating point */
14582static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
14583{
14584    if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
14585        disas_data_proc_fp(s, insn);
14586    } else {
14587        /* SIMD, including crypto */
14588        disas_data_proc_simd(s, insn);
14589    }
14590}
14591
14592/*
14593 * Include the generated SME FA64 decoder.
14594 */
14595
14596#include "decode-sme-fa64.c.inc"
14597
14598static bool trans_OK(DisasContext *s, arg_OK *a)
14599{
14600    return true;
14601}
14602
14603static bool trans_FAIL(DisasContext *s, arg_OK *a)
14604{
14605    s->is_nonstreaming = true;
14606    return true;
14607}
14608
14609/**
14610 * is_guarded_page:
14611 * @env: The cpu environment
14612 * @s: The DisasContext
14613 *
14614 * Return true if the page is guarded.
14615 */
14616static bool is_guarded_page(CPUARMState *env, DisasContext *s)
14617{
14618    uint64_t addr = s->base.pc_first;
14619#ifdef CONFIG_USER_ONLY
14620    return page_get_flags(addr) & PAGE_BTI;
14621#else
14622    int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
14623    unsigned int index = tlb_index(env, mmu_idx, addr);
14624    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
14625
14626    /*
14627     * We test this immediately after reading an insn, which means
14628     * that any normal page must be in the TLB.  The only exception
14629     * would be for executing from flash or device memory, which
14630     * does not retain the TLB entry.
14631     *
14632     * FIXME: Assume false for those, for now.  We could use
14633     * arm_cpu_get_phys_page_attrs_debug to re-read the page
14634     * table entry even for that case.
14635     */
14636    return (tlb_hit(entry->addr_code, addr) &&
14637            arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
14638#endif
14639}
14640
14641/**
14642 * btype_destination_ok:
14643 * @insn: The instruction at the branch destination
14644 * @bt: SCTLR_ELx.BT
14645 * @btype: PSTATE.BTYPE, and is non-zero
14646 *
14647 * On a guarded page, there are a limited number of insns
14648 * that may be present at the branch target:
14649 *   - branch target identifiers,
14650 *   - paciasp, pacibsp,
14651 *   - BRK insn
14652 *   - HLT insn
14653 * Anything else causes a Branch Target Exception.
14654 *
14655 * Return true if the branch is compatible, false to raise BTITRAP.
14656 */
14657static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14658{
14659    if ((insn & 0xfffff01fu) == 0xd503201fu) {
14660        /* HINT space */
14661        switch (extract32(insn, 5, 7)) {
14662        case 0b011001: /* PACIASP */
14663        case 0b011011: /* PACIBSP */
14664            /*
14665             * If SCTLR_ELx.BT, then PACI*SP are not compatible
14666             * with btype == 3.  Otherwise all btype are ok.
14667             */
14668            return !bt || btype != 3;
14669        case 0b100000: /* BTI */
14670            /* Not compatible with any btype.  */
14671            return false;
14672        case 0b100010: /* BTI c */
14673            /* Not compatible with btype == 3 */
14674            return btype != 3;
14675        case 0b100100: /* BTI j */
14676            /* Not compatible with btype == 2 */
14677            return btype != 2;
14678        case 0b100110: /* BTI jc */
14679            /* Compatible with any btype.  */
14680            return true;
14681        }
14682    } else {
14683        switch (insn & 0xffe0001fu) {
14684        case 0xd4200000u: /* BRK */
14685        case 0xd4400000u: /* HLT */
14686            /* Give priority to the breakpoint exception.  */
14687            return true;
14688        }
14689    }
14690    return false;
14691}
14692
14693static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14694                                          CPUState *cpu)
14695{
14696    DisasContext *dc = container_of(dcbase, DisasContext, base);
14697    CPUARMState *env = cpu->env_ptr;
14698    ARMCPU *arm_cpu = env_archcpu(env);
14699    CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
14700    int bound, core_mmu_idx;
14701
14702    dc->isar = &arm_cpu->isar;
14703    dc->condjmp = 0;
14704
14705    dc->aarch64 = true;
14706    dc->thumb = false;
14707    dc->sctlr_b = 0;
14708    dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
14709    dc->condexec_mask = 0;
14710    dc->condexec_cond = 0;
14711    core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
14712    dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14713    dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
14714    dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
14715    dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
14716    dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14717#if !defined(CONFIG_USER_ONLY)
14718    dc->user = (dc->current_el == 0);
14719#endif
14720    dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
14721    dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
14722    dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
14723    dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
14724    dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
14725    dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
14726    dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
14727    dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
14728    dc->bt = EX_TBFLAG_A64(tb_flags, BT);
14729    dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
14730    dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
14731    dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
14732    dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
14733    dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
14734    dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
14735    dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
14736    dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
14737    dc->vec_len = 0;
14738    dc->vec_stride = 0;
14739    dc->cp_regs = arm_cpu->cp_regs;
14740    dc->features = env->features;
14741    dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14742
14743#ifdef CONFIG_USER_ONLY
14744    /* In sve_probe_page, we assume TBI is enabled. */
14745    tcg_debug_assert(dc->tbid & 1);
14746#endif
14747
14748    /* Single step state. The code-generation logic here is:
14749     *  SS_ACTIVE == 0:
14750     *   generate code with no special handling for single-stepping (except
14751     *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14752     *   this happens anyway because those changes are all system register or
14753     *   PSTATE writes).
14754     *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14755     *   emit code for one insn
14756     *   emit code to clear PSTATE.SS
14757     *   emit code to generate software step exception for completed step
14758     *   end TB (as usual for having generated an exception)
14759     *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14760     *   emit code to generate a software step exception
14761     *   end the TB
14762     */
14763    dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
14764    dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
14765    dc->is_ldex = false;
14766
14767    /* Bound the number of insns to execute to those left on the page.  */
14768    bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14769
14770    /* If architectural single step active, limit to 1.  */
14771    if (dc->ss_active) {
14772        bound = 1;
14773    }
14774    dc->base.max_insns = MIN(dc->base.max_insns, bound);
14775
14776    init_tmp_a64_array(dc);
14777}
14778
14779static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14780{
14781}
14782
14783static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14784{
14785    DisasContext *dc = container_of(dcbase, DisasContext, base);
14786
14787    tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14788    dc->insn_start = tcg_last_op();
14789}
14790
14791static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14792{
14793    DisasContext *s = container_of(dcbase, DisasContext, base);
14794    CPUARMState *env = cpu->env_ptr;
14795    uint64_t pc = s->base.pc_next;
14796    uint32_t insn;
14797
14798    /* Singlestep exceptions have the highest priority. */
14799    if (s->ss_active && !s->pstate_ss) {
14800        /* Singlestep state is Active-pending.
14801         * If we're in this state at the start of a TB then either
14802         *  a) we just took an exception to an EL which is being debugged
14803         *     and this is the first insn in the exception handler
14804         *  b) debug exceptions were masked and we just unmasked them
14805         *     without changing EL (eg by clearing PSTATE.D)
14806         * In either case we're going to take a swstep exception in the
14807         * "did not step an insn" case, and so the syndrome ISV and EX
14808         * bits should be zero.
14809         */
14810        assert(s->base.num_insns == 1);
14811        gen_swstep_exception(s, 0, 0);
14812        s->base.is_jmp = DISAS_NORETURN;
14813        s->base.pc_next = pc + 4;
14814        return;
14815    }
14816
14817    if (pc & 3) {
14818        /*
14819         * PC alignment fault.  This has priority over the instruction abort
14820         * that we would receive from a translation fault via arm_ldl_code.
14821         * This should only be possible after an indirect branch, at the
14822         * start of the TB.
14823         */
14824        assert(s->base.num_insns == 1);
14825        gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
14826        s->base.is_jmp = DISAS_NORETURN;
14827        s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
14828        return;
14829    }
14830
14831    s->pc_curr = pc;
14832    insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
14833    s->insn = insn;
14834    s->base.pc_next = pc + 4;
14835
14836    s->fp_access_checked = false;
14837    s->sve_access_checked = false;
14838
14839    if (s->pstate_il) {
14840        /*
14841         * Illegal execution state. This has priority over BTI
14842         * exceptions, but comes after instruction abort exceptions.
14843         */
14844        gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_illegalstate());
14845        return;
14846    }
14847
14848    if (dc_isar_feature(aa64_bti, s)) {
14849        if (s->base.num_insns == 1) {
14850            /*
14851             * At the first insn of the TB, compute s->guarded_page.
14852             * We delayed computing this until successfully reading
14853             * the first insn of the TB, above.  This (mostly) ensures
14854             * that the softmmu tlb entry has been populated, and the
14855             * page table GP bit is available.
14856             *
14857             * Note that we need to compute this even if btype == 0,
14858             * because this value is used for BR instructions later
14859             * where ENV is not available.
14860             */
14861            s->guarded_page = is_guarded_page(env, s);
14862
14863            /* First insn can have btype set to non-zero.  */
14864            tcg_debug_assert(s->btype >= 0);
14865
14866            /*
14867             * Note that the Branch Target Exception has fairly high
14868             * priority -- below debugging exceptions but above most
14869             * everything else.  This allows us to handle this now
14870             * instead of waiting until the insn is otherwise decoded.
14871             */
14872            if (s->btype != 0
14873                && s->guarded_page
14874                && !btype_destination_ok(insn, s->bt, s->btype)) {
14875                gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14876                                   syn_btitrap(s->btype));
14877                return;
14878            }
14879        } else {
14880            /* Not the first insn: btype must be 0.  */
14881            tcg_debug_assert(s->btype == 0);
14882        }
14883    }
14884
14885    s->is_nonstreaming = false;
14886    if (s->sme_trap_nonstreaming) {
14887        disas_sme_fa64(s, insn);
14888    }
14889
14890    switch (extract32(insn, 25, 4)) {
14891    case 0x0:
14892        if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
14893            unallocated_encoding(s);
14894        }
14895        break;
14896    case 0x1: case 0x3: /* UNALLOCATED */
14897        unallocated_encoding(s);
14898        break;
14899    case 0x2:
14900        if (!disas_sve(s, insn)) {
14901            unallocated_encoding(s);
14902        }
14903        break;
14904    case 0x8: case 0x9: /* Data processing - immediate */
14905        disas_data_proc_imm(s, insn);
14906        break;
14907    case 0xa: case 0xb: /* Branch, exception generation and system insns */
14908        disas_b_exc_sys(s, insn);
14909        break;
14910    case 0x4:
14911    case 0x6:
14912    case 0xc:
14913    case 0xe:      /* Loads and stores */
14914        disas_ldst(s, insn);
14915        break;
14916    case 0x5:
14917    case 0xd:      /* Data processing - register */
14918        disas_data_proc_reg(s, insn);
14919        break;
14920    case 0x7:
14921    case 0xf:      /* Data processing - SIMD and floating point */
14922        disas_data_proc_simd_fp(s, insn);
14923        break;
14924    default:
14925        assert(FALSE); /* all 15 cases should be handled above */
14926        break;
14927    }
14928
14929    /* if we allocated any temporaries, free them here */
14930    free_tmp_a64(s);
14931
14932    /*
14933     * After execution of most insns, btype is reset to 0.
14934     * Note that we set btype == -1 when the insn sets btype.
14935     */
14936    if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14937        reset_btype(s);
14938    }
14939
14940    translator_loop_temp_check(&s->base);
14941}
14942
14943static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14944{
14945    DisasContext *dc = container_of(dcbase, DisasContext, base);
14946
14947    if (unlikely(dc->ss_active)) {
14948        /* Note that this means single stepping WFI doesn't halt the CPU.
14949         * For conditional branch insns this is harmless unreachable code as
14950         * gen_goto_tb() has already handled emitting the debug exception
14951         * (and thus a tb-jump is not possible when singlestepping).
14952         */
14953        switch (dc->base.is_jmp) {
14954        default:
14955            gen_a64_set_pc_im(dc->base.pc_next);
14956            /* fall through */
14957        case DISAS_EXIT:
14958        case DISAS_JUMP:
14959            gen_step_complete_exception(dc);
14960            break;
14961        case DISAS_NORETURN:
14962            break;
14963        }
14964    } else {
14965        switch (dc->base.is_jmp) {
14966        case DISAS_NEXT:
14967        case DISAS_TOO_MANY:
14968            gen_goto_tb(dc, 1, dc->base.pc_next);
14969            break;
14970        default:
14971        case DISAS_UPDATE_EXIT:
14972            gen_a64_set_pc_im(dc->base.pc_next);
14973            /* fall through */
14974        case DISAS_EXIT:
14975            tcg_gen_exit_tb(NULL, 0);
14976            break;
14977        case DISAS_UPDATE_NOCHAIN:
14978            gen_a64_set_pc_im(dc->base.pc_next);
14979            /* fall through */
14980        case DISAS_JUMP:
14981            tcg_gen_lookup_and_goto_ptr();
14982            break;
14983        case DISAS_NORETURN:
14984        case DISAS_SWI:
14985            break;
14986        case DISAS_WFE:
14987            gen_a64_set_pc_im(dc->base.pc_next);
14988            gen_helper_wfe(cpu_env);
14989            break;
14990        case DISAS_YIELD:
14991            gen_a64_set_pc_im(dc->base.pc_next);
14992            gen_helper_yield(cpu_env);
14993            break;
14994        case DISAS_WFI:
14995            /*
14996             * This is a special case because we don't want to just halt
14997             * the CPU if trying to debug across a WFI.
14998             */
14999            gen_a64_set_pc_im(dc->base.pc_next);
15000            gen_helper_wfi(cpu_env, tcg_constant_i32(4));
15001            /*
15002             * The helper doesn't necessarily throw an exception, but we
15003             * must go back to the main loop to check for interrupts anyway.
15004             */
15005            tcg_gen_exit_tb(NULL, 0);
15006            break;
15007        }
15008    }
15009}
15010
15011static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
15012                                 CPUState *cpu, FILE *logfile)
15013{
15014    DisasContext *dc = container_of(dcbase, DisasContext, base);
15015
15016    fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
15017    target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
15018}
15019
15020const TranslatorOps aarch64_translator_ops = {
15021    .init_disas_context = aarch64_tr_init_disas_context,
15022    .tb_start           = aarch64_tr_tb_start,
15023    .insn_start         = aarch64_tr_insn_start,
15024    .translate_insn     = aarch64_tr_translate_insn,
15025    .tb_stop            = aarch64_tr_tb_stop,
15026    .disas_log          = aarch64_tr_disas_log,
15027};
15028