qemu/target/arm/translate-a64.c
<<
>>
Prefs
   1/*
   2 *  AArch64 translation
   3 *
   4 *  Copyright (c) 2013 Alexander Graf <agraf@suse.de>
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "tcg-op.h"
  24#include "qemu/log.h"
  25#include "arm_ldst.h"
  26#include "translate.h"
  27#include "internals.h"
  28#include "qemu/host-utils.h"
  29
  30#include "exec/semihost.h"
  31#include "exec/gen-icount.h"
  32
  33#include "exec/helper-proto.h"
  34#include "exec/helper-gen.h"
  35#include "exec/log.h"
  36
  37#include "trace-tcg.h"
  38
  39static TCGv_i64 cpu_X[32];
  40static TCGv_i64 cpu_pc;
  41
  42/* Load/store exclusive handling */
  43static TCGv_i64 cpu_exclusive_high;
  44static TCGv_i64 cpu_reg(DisasContext *s, int reg);
  45
  46static const char *regnames[] = {
  47    "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
  48    "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
  49    "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
  50    "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
  51};
  52
  53enum a64_shift_type {
  54    A64_SHIFT_TYPE_LSL = 0,
  55    A64_SHIFT_TYPE_LSR = 1,
  56    A64_SHIFT_TYPE_ASR = 2,
  57    A64_SHIFT_TYPE_ROR = 3
  58};
  59
  60/* Table based decoder typedefs - used when the relevant bits for decode
  61 * are too awkwardly scattered across the instruction (eg SIMD).
  62 */
  63typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
  64
  65typedef struct AArch64DecodeTable {
  66    uint32_t pattern;
  67    uint32_t mask;
  68    AArch64DecodeFn *disas_fn;
  69} AArch64DecodeTable;
  70
  71/* Function prototype for gen_ functions for calling Neon helpers */
  72typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
  73typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
  74typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
  75typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
  76typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
  77typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
  78typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
  79typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
  80typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
  81typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
  82typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
  83typedef void CryptoTwoOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32);
  84typedef void CryptoThreeOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
  85
  86/* initialize TCG globals.  */
  87void a64_translate_init(void)
  88{
  89    int i;
  90
  91    cpu_pc = tcg_global_mem_new_i64(cpu_env,
  92                                    offsetof(CPUARMState, pc),
  93                                    "pc");
  94    for (i = 0; i < 32; i++) {
  95        cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
  96                                          offsetof(CPUARMState, xregs[i]),
  97                                          regnames[i]);
  98    }
  99
 100    cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
 101        offsetof(CPUARMState, exclusive_high), "exclusive_high");
 102}
 103
 104static inline int get_a64_user_mem_index(DisasContext *s)
 105{
 106    /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
 107     *  if EL1, access as if EL0; otherwise access at current EL
 108     */
 109    ARMMMUIdx useridx;
 110
 111    switch (s->mmu_idx) {
 112    case ARMMMUIdx_S12NSE1:
 113        useridx = ARMMMUIdx_S12NSE0;
 114        break;
 115    case ARMMMUIdx_S1SE1:
 116        useridx = ARMMMUIdx_S1SE0;
 117        break;
 118    case ARMMMUIdx_S2NS:
 119        g_assert_not_reached();
 120    default:
 121        useridx = s->mmu_idx;
 122        break;
 123    }
 124    return arm_to_core_mmu_idx(useridx);
 125}
 126
 127void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
 128                            fprintf_function cpu_fprintf, int flags)
 129{
 130    ARMCPU *cpu = ARM_CPU(cs);
 131    CPUARMState *env = &cpu->env;
 132    uint32_t psr = pstate_read(env);
 133    int i;
 134    int el = arm_current_el(env);
 135    const char *ns_status;
 136
 137    cpu_fprintf(f, "PC=%016"PRIx64"  SP=%016"PRIx64"\n",
 138            env->pc, env->xregs[31]);
 139    for (i = 0; i < 31; i++) {
 140        cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
 141        if ((i % 4) == 3) {
 142            cpu_fprintf(f, "\n");
 143        } else {
 144            cpu_fprintf(f, " ");
 145        }
 146    }
 147
 148    if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
 149        ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
 150    } else {
 151        ns_status = "";
 152    }
 153
 154    cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
 155                psr,
 156                psr & PSTATE_N ? 'N' : '-',
 157                psr & PSTATE_Z ? 'Z' : '-',
 158                psr & PSTATE_C ? 'C' : '-',
 159                psr & PSTATE_V ? 'V' : '-',
 160                ns_status,
 161                el,
 162                psr & PSTATE_SP ? 'h' : 't');
 163
 164    if (flags & CPU_DUMP_FPU) {
 165        int numvfpregs = 32;
 166        for (i = 0; i < numvfpregs; i += 2) {
 167            uint64_t vlo = float64_val(env->vfp.regs[i * 2]);
 168            uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]);
 169            cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ",
 170                        i, vhi, vlo);
 171            vlo = float64_val(env->vfp.regs[(i + 1) * 2]);
 172            vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]);
 173            cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n",
 174                        i + 1, vhi, vlo);
 175        }
 176        cpu_fprintf(f, "FPCR: %08x  FPSR: %08x\n",
 177                    vfp_get_fpcr(env), vfp_get_fpsr(env));
 178    }
 179}
 180
 181void gen_a64_set_pc_im(uint64_t val)
 182{
 183    tcg_gen_movi_i64(cpu_pc, val);
 184}
 185
 186/* Load the PC from a generic TCG variable.
 187 *
 188 * If address tagging is enabled via the TCR TBI bits, then loading
 189 * an address into the PC will clear out any tag in the it:
 190 *  + for EL2 and EL3 there is only one TBI bit, and if it is set
 191 *    then the address is zero-extended, clearing bits [63:56]
 192 *  + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
 193 *    and TBI1 controls addressses with bit 55 == 1.
 194 *    If the appropriate TBI bit is set for the address then
 195 *    the address is sign-extended from bit 55 into bits [63:56]
 196 *
 197 * We can avoid doing this for relative-branches, because the
 198 * PC + offset can never overflow into the tag bits (assuming
 199 * that virtual addresses are less than 56 bits wide, as they
 200 * are currently), but we must handle it for branch-to-register.
 201 */
 202static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
 203{
 204
 205    if (s->current_el <= 1) {
 206        /* Test if NEITHER or BOTH TBI values are set.  If so, no need to
 207         * examine bit 55 of address, can just generate code.
 208         * If mixed, then test via generated code
 209         */
 210        if (s->tbi0 && s->tbi1) {
 211            TCGv_i64 tmp_reg = tcg_temp_new_i64();
 212            /* Both bits set, sign extension from bit 55 into [63:56] will
 213             * cover both cases
 214             */
 215            tcg_gen_shli_i64(tmp_reg, src, 8);
 216            tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
 217            tcg_temp_free_i64(tmp_reg);
 218        } else if (!s->tbi0 && !s->tbi1) {
 219            /* Neither bit set, just load it as-is */
 220            tcg_gen_mov_i64(cpu_pc, src);
 221        } else {
 222            TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
 223            TCGv_i64 tcg_bit55  = tcg_temp_new_i64();
 224            TCGv_i64 tcg_zero   = tcg_const_i64(0);
 225
 226            tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
 227
 228            if (s->tbi0) {
 229                /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
 230                tcg_gen_andi_i64(tcg_tmpval, src,
 231                                 0x00FFFFFFFFFFFFFFull);
 232                tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
 233                                    tcg_tmpval, src);
 234            } else {
 235                /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
 236                tcg_gen_ori_i64(tcg_tmpval, src,
 237                                0xFF00000000000000ull);
 238                tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
 239                                    tcg_tmpval, src);
 240            }
 241            tcg_temp_free_i64(tcg_zero);
 242            tcg_temp_free_i64(tcg_bit55);
 243            tcg_temp_free_i64(tcg_tmpval);
 244        }
 245    } else {  /* EL > 1 */
 246        if (s->tbi0) {
 247            /* Force tag byte to all zero */
 248            tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
 249        } else {
 250            /* Load unmodified address */
 251            tcg_gen_mov_i64(cpu_pc, src);
 252        }
 253    }
 254}
 255
 256typedef struct DisasCompare64 {
 257    TCGCond cond;
 258    TCGv_i64 value;
 259} DisasCompare64;
 260
 261static void a64_test_cc(DisasCompare64 *c64, int cc)
 262{
 263    DisasCompare c32;
 264
 265    arm_test_cc(&c32, cc);
 266
 267    /* Sign-extend the 32-bit value so that the GE/LT comparisons work
 268       * properly.  The NE/EQ comparisons are also fine with this choice.  */
 269    c64->cond = c32.cond;
 270    c64->value = tcg_temp_new_i64();
 271    tcg_gen_ext_i32_i64(c64->value, c32.value);
 272
 273    arm_free_cc(&c32);
 274}
 275
 276static void a64_free_cc(DisasCompare64 *c64)
 277{
 278    tcg_temp_free_i64(c64->value);
 279}
 280
 281static void gen_exception_internal(int excp)
 282{
 283    TCGv_i32 tcg_excp = tcg_const_i32(excp);
 284
 285    assert(excp_is_internal(excp));
 286    gen_helper_exception_internal(cpu_env, tcg_excp);
 287    tcg_temp_free_i32(tcg_excp);
 288}
 289
 290static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
 291{
 292    TCGv_i32 tcg_excp = tcg_const_i32(excp);
 293    TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
 294    TCGv_i32 tcg_el = tcg_const_i32(target_el);
 295
 296    gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
 297                                       tcg_syn, tcg_el);
 298    tcg_temp_free_i32(tcg_el);
 299    tcg_temp_free_i32(tcg_syn);
 300    tcg_temp_free_i32(tcg_excp);
 301}
 302
 303static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
 304{
 305    gen_a64_set_pc_im(s->pc - offset);
 306    gen_exception_internal(excp);
 307    s->is_jmp = DISAS_EXC;
 308}
 309
 310static void gen_exception_insn(DisasContext *s, int offset, int excp,
 311                               uint32_t syndrome, uint32_t target_el)
 312{
 313    gen_a64_set_pc_im(s->pc - offset);
 314    gen_exception(excp, syndrome, target_el);
 315    s->is_jmp = DISAS_EXC;
 316}
 317
 318static void gen_ss_advance(DisasContext *s)
 319{
 320    /* If the singlestep state is Active-not-pending, advance to
 321     * Active-pending.
 322     */
 323    if (s->ss_active) {
 324        s->pstate_ss = 0;
 325        gen_helper_clear_pstate_ss(cpu_env);
 326    }
 327}
 328
 329static void gen_step_complete_exception(DisasContext *s)
 330{
 331    /* We just completed step of an insn. Move from Active-not-pending
 332     * to Active-pending, and then also take the swstep exception.
 333     * This corresponds to making the (IMPDEF) choice to prioritize
 334     * swstep exceptions over asynchronous exceptions taken to an exception
 335     * level where debug is disabled. This choice has the advantage that
 336     * we do not need to maintain internal state corresponding to the
 337     * ISV/EX syndrome bits between completion of the step and generation
 338     * of the exception, and our syndrome information is always correct.
 339     */
 340    gen_ss_advance(s);
 341    gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
 342                  default_exception_el(s));
 343    s->is_jmp = DISAS_EXC;
 344}
 345
 346static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
 347{
 348    /* No direct tb linking with singlestep (either QEMU's or the ARM
 349     * debug architecture kind) or deterministic io
 350     */
 351    if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) {
 352        return false;
 353    }
 354
 355#ifndef CONFIG_USER_ONLY
 356    /* Only link tbs from inside the same guest page */
 357    if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
 358        return false;
 359    }
 360#endif
 361
 362    return true;
 363}
 364
 365static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
 366{
 367    TranslationBlock *tb;
 368
 369    tb = s->tb;
 370    if (use_goto_tb(s, n, dest)) {
 371        tcg_gen_goto_tb(n);
 372        gen_a64_set_pc_im(dest);
 373        tcg_gen_exit_tb((intptr_t)tb + n);
 374        s->is_jmp = DISAS_TB_JUMP;
 375    } else {
 376        gen_a64_set_pc_im(dest);
 377        if (s->ss_active) {
 378            gen_step_complete_exception(s);
 379        } else if (s->singlestep_enabled) {
 380            gen_exception_internal(EXCP_DEBUG);
 381        } else {
 382            tcg_gen_lookup_and_goto_ptr(cpu_pc);
 383            s->is_jmp = DISAS_TB_JUMP;
 384        }
 385    }
 386}
 387
 388static void unallocated_encoding(DisasContext *s)
 389{
 390    /* Unallocated and reserved encodings are uncategorized */
 391    gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
 392                       default_exception_el(s));
 393}
 394
 395#define unsupported_encoding(s, insn)                                    \
 396    do {                                                                 \
 397        qemu_log_mask(LOG_UNIMP,                                         \
 398                      "%s:%d: unsupported instruction encoding 0x%08x "  \
 399                      "at pc=%016" PRIx64 "\n",                          \
 400                      __FILE__, __LINE__, insn, s->pc - 4);              \
 401        unallocated_encoding(s);                                         \
 402    } while (0);
 403
 404static void init_tmp_a64_array(DisasContext *s)
 405{
 406#ifdef CONFIG_DEBUG_TCG
 407    int i;
 408    for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
 409        TCGV_UNUSED_I64(s->tmp_a64[i]);
 410    }
 411#endif
 412    s->tmp_a64_count = 0;
 413}
 414
 415static void free_tmp_a64(DisasContext *s)
 416{
 417    int i;
 418    for (i = 0; i < s->tmp_a64_count; i++) {
 419        tcg_temp_free_i64(s->tmp_a64[i]);
 420    }
 421    init_tmp_a64_array(s);
 422}
 423
 424static TCGv_i64 new_tmp_a64(DisasContext *s)
 425{
 426    assert(s->tmp_a64_count < TMP_A64_MAX);
 427    return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
 428}
 429
 430static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
 431{
 432    TCGv_i64 t = new_tmp_a64(s);
 433    tcg_gen_movi_i64(t, 0);
 434    return t;
 435}
 436
 437/*
 438 * Register access functions
 439 *
 440 * These functions are used for directly accessing a register in where
 441 * changes to the final register value are likely to be made. If you
 442 * need to use a register for temporary calculation (e.g. index type
 443 * operations) use the read_* form.
 444 *
 445 * B1.2.1 Register mappings
 446 *
 447 * In instruction register encoding 31 can refer to ZR (zero register) or
 448 * the SP (stack pointer) depending on context. In QEMU's case we map SP
 449 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
 450 * This is the point of the _sp forms.
 451 */
 452static TCGv_i64 cpu_reg(DisasContext *s, int reg)
 453{
 454    if (reg == 31) {
 455        return new_tmp_a64_zero(s);
 456    } else {
 457        return cpu_X[reg];
 458    }
 459}
 460
 461/* register access for when 31 == SP */
 462static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
 463{
 464    return cpu_X[reg];
 465}
 466
 467/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
 468 * representing the register contents. This TCGv is an auto-freed
 469 * temporary so it need not be explicitly freed, and may be modified.
 470 */
 471static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
 472{
 473    TCGv_i64 v = new_tmp_a64(s);
 474    if (reg != 31) {
 475        if (sf) {
 476            tcg_gen_mov_i64(v, cpu_X[reg]);
 477        } else {
 478            tcg_gen_ext32u_i64(v, cpu_X[reg]);
 479        }
 480    } else {
 481        tcg_gen_movi_i64(v, 0);
 482    }
 483    return v;
 484}
 485
 486static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
 487{
 488    TCGv_i64 v = new_tmp_a64(s);
 489    if (sf) {
 490        tcg_gen_mov_i64(v, cpu_X[reg]);
 491    } else {
 492        tcg_gen_ext32u_i64(v, cpu_X[reg]);
 493    }
 494    return v;
 495}
 496
 497/* We should have at some point before trying to access an FP register
 498 * done the necessary access check, so assert that
 499 * (a) we did the check and
 500 * (b) we didn't then just plough ahead anyway if it failed.
 501 * Print the instruction pattern in the abort message so we can figure
 502 * out what we need to fix if a user encounters this problem in the wild.
 503 */
 504static inline void assert_fp_access_checked(DisasContext *s)
 505{
 506#ifdef CONFIG_DEBUG_TCG
 507    if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
 508        fprintf(stderr, "target-arm: FP access check missing for "
 509                "instruction 0x%08x\n", s->insn);
 510        abort();
 511    }
 512#endif
 513}
 514
 515/* Return the offset into CPUARMState of an element of specified
 516 * size, 'element' places in from the least significant end of
 517 * the FP/vector register Qn.
 518 */
 519static inline int vec_reg_offset(DisasContext *s, int regno,
 520                                 int element, TCGMemOp size)
 521{
 522    int offs = 0;
 523#ifdef HOST_WORDS_BIGENDIAN
 524    /* This is complicated slightly because vfp.regs[2n] is
 525     * still the low half and  vfp.regs[2n+1] the high half
 526     * of the 128 bit vector, even on big endian systems.
 527     * Calculate the offset assuming a fully bigendian 128 bits,
 528     * then XOR to account for the order of the two 64 bit halves.
 529     */
 530    offs += (16 - ((element + 1) * (1 << size)));
 531    offs ^= 8;
 532#else
 533    offs += element * (1 << size);
 534#endif
 535    offs += offsetof(CPUARMState, vfp.regs[regno * 2]);
 536    assert_fp_access_checked(s);
 537    return offs;
 538}
 539
 540/* Return the offset into CPUARMState of a slice (from
 541 * the least significant end) of FP register Qn (ie
 542 * Dn, Sn, Hn or Bn).
 543 * (Note that this is not the same mapping as for A32; see cpu.h)
 544 */
 545static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
 546{
 547    int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
 548#ifdef HOST_WORDS_BIGENDIAN
 549    offs += (8 - (1 << size));
 550#endif
 551    assert_fp_access_checked(s);
 552    return offs;
 553}
 554
 555/* Offset of the high half of the 128 bit vector Qn */
 556static inline int fp_reg_hi_offset(DisasContext *s, int regno)
 557{
 558    assert_fp_access_checked(s);
 559    return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
 560}
 561
 562/* Convenience accessors for reading and writing single and double
 563 * FP registers. Writing clears the upper parts of the associated
 564 * 128 bit vector register, as required by the architecture.
 565 * Note that unlike the GP register accessors, the values returned
 566 * by the read functions must be manually freed.
 567 */
 568static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
 569{
 570    TCGv_i64 v = tcg_temp_new_i64();
 571
 572    tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
 573    return v;
 574}
 575
 576static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
 577{
 578    TCGv_i32 v = tcg_temp_new_i32();
 579
 580    tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
 581    return v;
 582}
 583
 584static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
 585{
 586    TCGv_i64 tcg_zero = tcg_const_i64(0);
 587
 588    tcg_gen_st_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
 589    tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(s, reg));
 590    tcg_temp_free_i64(tcg_zero);
 591}
 592
 593static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
 594{
 595    TCGv_i64 tmp = tcg_temp_new_i64();
 596
 597    tcg_gen_extu_i32_i64(tmp, v);
 598    write_fp_dreg(s, reg, tmp);
 599    tcg_temp_free_i64(tmp);
 600}
 601
 602static TCGv_ptr get_fpstatus_ptr(void)
 603{
 604    TCGv_ptr statusptr = tcg_temp_new_ptr();
 605    int offset;
 606
 607    /* In A64 all instructions (both FP and Neon) use the FPCR;
 608     * there is no equivalent of the A32 Neon "standard FPSCR value"
 609     * and all operations use vfp.fp_status.
 610     */
 611    offset = offsetof(CPUARMState, vfp.fp_status);
 612    tcg_gen_addi_ptr(statusptr, cpu_env, offset);
 613    return statusptr;
 614}
 615
 616/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
 617 * than the 32 bit equivalent.
 618 */
 619static inline void gen_set_NZ64(TCGv_i64 result)
 620{
 621    tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
 622    tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
 623}
 624
 625/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
 626static inline void gen_logic_CC(int sf, TCGv_i64 result)
 627{
 628    if (sf) {
 629        gen_set_NZ64(result);
 630    } else {
 631        tcg_gen_extrl_i64_i32(cpu_ZF, result);
 632        tcg_gen_mov_i32(cpu_NF, cpu_ZF);
 633    }
 634    tcg_gen_movi_i32(cpu_CF, 0);
 635    tcg_gen_movi_i32(cpu_VF, 0);
 636}
 637
 638/* dest = T0 + T1; compute C, N, V and Z flags */
 639static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 640{
 641    if (sf) {
 642        TCGv_i64 result, flag, tmp;
 643        result = tcg_temp_new_i64();
 644        flag = tcg_temp_new_i64();
 645        tmp = tcg_temp_new_i64();
 646
 647        tcg_gen_movi_i64(tmp, 0);
 648        tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
 649
 650        tcg_gen_extrl_i64_i32(cpu_CF, flag);
 651
 652        gen_set_NZ64(result);
 653
 654        tcg_gen_xor_i64(flag, result, t0);
 655        tcg_gen_xor_i64(tmp, t0, t1);
 656        tcg_gen_andc_i64(flag, flag, tmp);
 657        tcg_temp_free_i64(tmp);
 658        tcg_gen_extrh_i64_i32(cpu_VF, flag);
 659
 660        tcg_gen_mov_i64(dest, result);
 661        tcg_temp_free_i64(result);
 662        tcg_temp_free_i64(flag);
 663    } else {
 664        /* 32 bit arithmetic */
 665        TCGv_i32 t0_32 = tcg_temp_new_i32();
 666        TCGv_i32 t1_32 = tcg_temp_new_i32();
 667        TCGv_i32 tmp = tcg_temp_new_i32();
 668
 669        tcg_gen_movi_i32(tmp, 0);
 670        tcg_gen_extrl_i64_i32(t0_32, t0);
 671        tcg_gen_extrl_i64_i32(t1_32, t1);
 672        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
 673        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 674        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 675        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 676        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
 677        tcg_gen_extu_i32_i64(dest, cpu_NF);
 678
 679        tcg_temp_free_i32(tmp);
 680        tcg_temp_free_i32(t0_32);
 681        tcg_temp_free_i32(t1_32);
 682    }
 683}
 684
 685/* dest = T0 - T1; compute C, N, V and Z flags */
 686static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 687{
 688    if (sf) {
 689        /* 64 bit arithmetic */
 690        TCGv_i64 result, flag, tmp;
 691
 692        result = tcg_temp_new_i64();
 693        flag = tcg_temp_new_i64();
 694        tcg_gen_sub_i64(result, t0, t1);
 695
 696        gen_set_NZ64(result);
 697
 698        tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
 699        tcg_gen_extrl_i64_i32(cpu_CF, flag);
 700
 701        tcg_gen_xor_i64(flag, result, t0);
 702        tmp = tcg_temp_new_i64();
 703        tcg_gen_xor_i64(tmp, t0, t1);
 704        tcg_gen_and_i64(flag, flag, tmp);
 705        tcg_temp_free_i64(tmp);
 706        tcg_gen_extrh_i64_i32(cpu_VF, flag);
 707        tcg_gen_mov_i64(dest, result);
 708        tcg_temp_free_i64(flag);
 709        tcg_temp_free_i64(result);
 710    } else {
 711        /* 32 bit arithmetic */
 712        TCGv_i32 t0_32 = tcg_temp_new_i32();
 713        TCGv_i32 t1_32 = tcg_temp_new_i32();
 714        TCGv_i32 tmp;
 715
 716        tcg_gen_extrl_i64_i32(t0_32, t0);
 717        tcg_gen_extrl_i64_i32(t1_32, t1);
 718        tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
 719        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 720        tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
 721        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 722        tmp = tcg_temp_new_i32();
 723        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 724        tcg_temp_free_i32(t0_32);
 725        tcg_temp_free_i32(t1_32);
 726        tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
 727        tcg_temp_free_i32(tmp);
 728        tcg_gen_extu_i32_i64(dest, cpu_NF);
 729    }
 730}
 731
 732/* dest = T0 + T1 + CF; do not compute flags. */
 733static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 734{
 735    TCGv_i64 flag = tcg_temp_new_i64();
 736    tcg_gen_extu_i32_i64(flag, cpu_CF);
 737    tcg_gen_add_i64(dest, t0, t1);
 738    tcg_gen_add_i64(dest, dest, flag);
 739    tcg_temp_free_i64(flag);
 740
 741    if (!sf) {
 742        tcg_gen_ext32u_i64(dest, dest);
 743    }
 744}
 745
 746/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
 747static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 748{
 749    if (sf) {
 750        TCGv_i64 result, cf_64, vf_64, tmp;
 751        result = tcg_temp_new_i64();
 752        cf_64 = tcg_temp_new_i64();
 753        vf_64 = tcg_temp_new_i64();
 754        tmp = tcg_const_i64(0);
 755
 756        tcg_gen_extu_i32_i64(cf_64, cpu_CF);
 757        tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
 758        tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
 759        tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
 760        gen_set_NZ64(result);
 761
 762        tcg_gen_xor_i64(vf_64, result, t0);
 763        tcg_gen_xor_i64(tmp, t0, t1);
 764        tcg_gen_andc_i64(vf_64, vf_64, tmp);
 765        tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
 766
 767        tcg_gen_mov_i64(dest, result);
 768
 769        tcg_temp_free_i64(tmp);
 770        tcg_temp_free_i64(vf_64);
 771        tcg_temp_free_i64(cf_64);
 772        tcg_temp_free_i64(result);
 773    } else {
 774        TCGv_i32 t0_32, t1_32, tmp;
 775        t0_32 = tcg_temp_new_i32();
 776        t1_32 = tcg_temp_new_i32();
 777        tmp = tcg_const_i32(0);
 778
 779        tcg_gen_extrl_i64_i32(t0_32, t0);
 780        tcg_gen_extrl_i64_i32(t1_32, t1);
 781        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
 782        tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
 783
 784        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 785        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 786        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 787        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
 788        tcg_gen_extu_i32_i64(dest, cpu_NF);
 789
 790        tcg_temp_free_i32(tmp);
 791        tcg_temp_free_i32(t1_32);
 792        tcg_temp_free_i32(t0_32);
 793    }
 794}
 795
 796/*
 797 * Load/Store generators
 798 */
 799
 800/*
 801 * Store from GPR register to memory.
 802 */
 803static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
 804                             TCGv_i64 tcg_addr, int size, int memidx,
 805                             bool iss_valid,
 806                             unsigned int iss_srt,
 807                             bool iss_sf, bool iss_ar)
 808{
 809    g_assert(size <= 3);
 810    tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
 811
 812    if (iss_valid) {
 813        uint32_t syn;
 814
 815        syn = syn_data_abort_with_iss(0,
 816                                      size,
 817                                      false,
 818                                      iss_srt,
 819                                      iss_sf,
 820                                      iss_ar,
 821                                      0, 0, 0, 0, 0, false);
 822        disas_set_insn_syndrome(s, syn);
 823    }
 824}
 825
 826static void do_gpr_st(DisasContext *s, TCGv_i64 source,
 827                      TCGv_i64 tcg_addr, int size,
 828                      bool iss_valid,
 829                      unsigned int iss_srt,
 830                      bool iss_sf, bool iss_ar)
 831{
 832    do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
 833                     iss_valid, iss_srt, iss_sf, iss_ar);
 834}
 835
 836/*
 837 * Load from memory to GPR register
 838 */
 839static void do_gpr_ld_memidx(DisasContext *s,
 840                             TCGv_i64 dest, TCGv_i64 tcg_addr,
 841                             int size, bool is_signed,
 842                             bool extend, int memidx,
 843                             bool iss_valid, unsigned int iss_srt,
 844                             bool iss_sf, bool iss_ar)
 845{
 846    TCGMemOp memop = s->be_data + size;
 847
 848    g_assert(size <= 3);
 849
 850    if (is_signed) {
 851        memop += MO_SIGN;
 852    }
 853
 854    tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
 855
 856    if (extend && is_signed) {
 857        g_assert(size < 3);
 858        tcg_gen_ext32u_i64(dest, dest);
 859    }
 860
 861    if (iss_valid) {
 862        uint32_t syn;
 863
 864        syn = syn_data_abort_with_iss(0,
 865                                      size,
 866                                      is_signed,
 867                                      iss_srt,
 868                                      iss_sf,
 869                                      iss_ar,
 870                                      0, 0, 0, 0, 0, false);
 871        disas_set_insn_syndrome(s, syn);
 872    }
 873}
 874
 875static void do_gpr_ld(DisasContext *s,
 876                      TCGv_i64 dest, TCGv_i64 tcg_addr,
 877                      int size, bool is_signed, bool extend,
 878                      bool iss_valid, unsigned int iss_srt,
 879                      bool iss_sf, bool iss_ar)
 880{
 881    do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
 882                     get_mem_index(s),
 883                     iss_valid, iss_srt, iss_sf, iss_ar);
 884}
 885
 886/*
 887 * Store from FP register to memory
 888 */
 889static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
 890{
 891    /* This writes the bottom N bits of a 128 bit wide vector to memory */
 892    TCGv_i64 tmp = tcg_temp_new_i64();
 893    tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
 894    if (size < 4) {
 895        tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
 896                            s->be_data + size);
 897    } else {
 898        bool be = s->be_data == MO_BE;
 899        TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
 900
 901        tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
 902        tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
 903                            s->be_data | MO_Q);
 904        tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
 905        tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
 906                            s->be_data | MO_Q);
 907        tcg_temp_free_i64(tcg_hiaddr);
 908    }
 909
 910    tcg_temp_free_i64(tmp);
 911}
 912
 913/*
 914 * Load from memory to FP register
 915 */
 916static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
 917{
 918    /* This always zero-extends and writes to a full 128 bit wide vector */
 919    TCGv_i64 tmplo = tcg_temp_new_i64();
 920    TCGv_i64 tmphi;
 921
 922    if (size < 4) {
 923        TCGMemOp memop = s->be_data + size;
 924        tmphi = tcg_const_i64(0);
 925        tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
 926    } else {
 927        bool be = s->be_data == MO_BE;
 928        TCGv_i64 tcg_hiaddr;
 929
 930        tmphi = tcg_temp_new_i64();
 931        tcg_hiaddr = tcg_temp_new_i64();
 932
 933        tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
 934        tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
 935                            s->be_data | MO_Q);
 936        tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
 937                            s->be_data | MO_Q);
 938        tcg_temp_free_i64(tcg_hiaddr);
 939    }
 940
 941    tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
 942    tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
 943
 944    tcg_temp_free_i64(tmplo);
 945    tcg_temp_free_i64(tmphi);
 946}
 947
 948/*
 949 * Vector load/store helpers.
 950 *
 951 * The principal difference between this and a FP load is that we don't
 952 * zero extend as we are filling a partial chunk of the vector register.
 953 * These functions don't support 128 bit loads/stores, which would be
 954 * normal load/store operations.
 955 *
 956 * The _i32 versions are useful when operating on 32 bit quantities
 957 * (eg for floating point single or using Neon helper functions).
 958 */
 959
 960/* Get value of an element within a vector register */
 961static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
 962                             int element, TCGMemOp memop)
 963{
 964    int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
 965    switch (memop) {
 966    case MO_8:
 967        tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
 968        break;
 969    case MO_16:
 970        tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
 971        break;
 972    case MO_32:
 973        tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
 974        break;
 975    case MO_8|MO_SIGN:
 976        tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
 977        break;
 978    case MO_16|MO_SIGN:
 979        tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
 980        break;
 981    case MO_32|MO_SIGN:
 982        tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
 983        break;
 984    case MO_64:
 985    case MO_64|MO_SIGN:
 986        tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
 987        break;
 988    default:
 989        g_assert_not_reached();
 990    }
 991}
 992
 993static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
 994                                 int element, TCGMemOp memop)
 995{
 996    int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
 997    switch (memop) {
 998    case MO_8:
 999        tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1000        break;
1001    case MO_16:
1002        tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1003        break;
1004    case MO_8|MO_SIGN:
1005        tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1006        break;
1007    case MO_16|MO_SIGN:
1008        tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1009        break;
1010    case MO_32:
1011    case MO_32|MO_SIGN:
1012        tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1013        break;
1014    default:
1015        g_assert_not_reached();
1016    }
1017}
1018
1019/* Set value of an element within a vector register */
1020static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1021                              int element, TCGMemOp memop)
1022{
1023    int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1024    switch (memop) {
1025    case MO_8:
1026        tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1027        break;
1028    case MO_16:
1029        tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1030        break;
1031    case MO_32:
1032        tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1033        break;
1034    case MO_64:
1035        tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1036        break;
1037    default:
1038        g_assert_not_reached();
1039    }
1040}
1041
1042static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1043                                  int destidx, int element, TCGMemOp memop)
1044{
1045    int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1046    switch (memop) {
1047    case MO_8:
1048        tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1049        break;
1050    case MO_16:
1051        tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1052        break;
1053    case MO_32:
1054        tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1055        break;
1056    default:
1057        g_assert_not_reached();
1058    }
1059}
1060
1061/* Clear the high 64 bits of a 128 bit vector (in general non-quad
1062 * vector ops all need to do this).
1063 */
1064static void clear_vec_high(DisasContext *s, int rd)
1065{
1066    TCGv_i64 tcg_zero = tcg_const_i64(0);
1067
1068    write_vec_element(s, tcg_zero, rd, 1, MO_64);
1069    tcg_temp_free_i64(tcg_zero);
1070}
1071
1072/* Store from vector register to memory */
1073static void do_vec_st(DisasContext *s, int srcidx, int element,
1074                      TCGv_i64 tcg_addr, int size)
1075{
1076    TCGMemOp memop = s->be_data + size;
1077    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1078
1079    read_vec_element(s, tcg_tmp, srcidx, element, size);
1080    tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1081
1082    tcg_temp_free_i64(tcg_tmp);
1083}
1084
1085/* Load from memory to vector register */
1086static void do_vec_ld(DisasContext *s, int destidx, int element,
1087                      TCGv_i64 tcg_addr, int size)
1088{
1089    TCGMemOp memop = s->be_data + size;
1090    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1091
1092    tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1093    write_vec_element(s, tcg_tmp, destidx, element, size);
1094
1095    tcg_temp_free_i64(tcg_tmp);
1096}
1097
1098/* Check that FP/Neon access is enabled. If it is, return
1099 * true. If not, emit code to generate an appropriate exception,
1100 * and return false; the caller should not emit any code for
1101 * the instruction. Note that this check must happen after all
1102 * unallocated-encoding checks (otherwise the syndrome information
1103 * for the resulting exception will be incorrect).
1104 */
1105static inline bool fp_access_check(DisasContext *s)
1106{
1107    assert(!s->fp_access_checked);
1108    s->fp_access_checked = true;
1109
1110    if (!s->fp_excp_el) {
1111        return true;
1112    }
1113
1114    gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1115                       s->fp_excp_el);
1116    return false;
1117}
1118
1119/*
1120 * This utility function is for doing register extension with an
1121 * optional shift. You will likely want to pass a temporary for the
1122 * destination register. See DecodeRegExtend() in the ARM ARM.
1123 */
1124static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1125                              int option, unsigned int shift)
1126{
1127    int extsize = extract32(option, 0, 2);
1128    bool is_signed = extract32(option, 2, 1);
1129
1130    if (is_signed) {
1131        switch (extsize) {
1132        case 0:
1133            tcg_gen_ext8s_i64(tcg_out, tcg_in);
1134            break;
1135        case 1:
1136            tcg_gen_ext16s_i64(tcg_out, tcg_in);
1137            break;
1138        case 2:
1139            tcg_gen_ext32s_i64(tcg_out, tcg_in);
1140            break;
1141        case 3:
1142            tcg_gen_mov_i64(tcg_out, tcg_in);
1143            break;
1144        }
1145    } else {
1146        switch (extsize) {
1147        case 0:
1148            tcg_gen_ext8u_i64(tcg_out, tcg_in);
1149            break;
1150        case 1:
1151            tcg_gen_ext16u_i64(tcg_out, tcg_in);
1152            break;
1153        case 2:
1154            tcg_gen_ext32u_i64(tcg_out, tcg_in);
1155            break;
1156        case 3:
1157            tcg_gen_mov_i64(tcg_out, tcg_in);
1158            break;
1159        }
1160    }
1161
1162    if (shift) {
1163        tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1164    }
1165}
1166
1167static inline void gen_check_sp_alignment(DisasContext *s)
1168{
1169    /* The AArch64 architecture mandates that (if enabled via PSTATE
1170     * or SCTLR bits) there is a check that SP is 16-aligned on every
1171     * SP-relative load or store (with an exception generated if it is not).
1172     * In line with general QEMU practice regarding misaligned accesses,
1173     * we omit these checks for the sake of guest program performance.
1174     * This function is provided as a hook so we can more easily add these
1175     * checks in future (possibly as a "favour catching guest program bugs
1176     * over speed" user selectable option).
1177     */
1178}
1179
1180/*
1181 * This provides a simple table based table lookup decoder. It is
1182 * intended to be used when the relevant bits for decode are too
1183 * awkwardly placed and switch/if based logic would be confusing and
1184 * deeply nested. Since it's a linear search through the table, tables
1185 * should be kept small.
1186 *
1187 * It returns the first handler where insn & mask == pattern, or
1188 * NULL if there is no match.
1189 * The table is terminated by an empty mask (i.e. 0)
1190 */
1191static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1192                                               uint32_t insn)
1193{
1194    const AArch64DecodeTable *tptr = table;
1195
1196    while (tptr->mask) {
1197        if ((insn & tptr->mask) == tptr->pattern) {
1198            return tptr->disas_fn;
1199        }
1200        tptr++;
1201    }
1202    return NULL;
1203}
1204
1205/*
1206 * the instruction disassembly implemented here matches
1207 * the instruction encoding classifications in chapter 3 (C3)
1208 * of the ARM Architecture Reference Manual (DDI0487A_a)
1209 */
1210
1211/* C3.2.7 Unconditional branch (immediate)
1212 *   31  30       26 25                                  0
1213 * +----+-----------+-------------------------------------+
1214 * | op | 0 0 1 0 1 |                 imm26               |
1215 * +----+-----------+-------------------------------------+
1216 */
1217static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1218{
1219    uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1220
1221    if (insn & (1U << 31)) {
1222        /* C5.6.26 BL Branch with link */
1223        tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1224    }
1225
1226    /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
1227    gen_goto_tb(s, 0, addr);
1228}
1229
1230/* C3.2.1 Compare & branch (immediate)
1231 *   31  30         25  24  23                  5 4      0
1232 * +----+-------------+----+---------------------+--------+
1233 * | sf | 0 1 1 0 1 0 | op |         imm19       |   Rt   |
1234 * +----+-------------+----+---------------------+--------+
1235 */
1236static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1237{
1238    unsigned int sf, op, rt;
1239    uint64_t addr;
1240    TCGLabel *label_match;
1241    TCGv_i64 tcg_cmp;
1242
1243    sf = extract32(insn, 31, 1);
1244    op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1245    rt = extract32(insn, 0, 5);
1246    addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1247
1248    tcg_cmp = read_cpu_reg(s, rt, sf);
1249    label_match = gen_new_label();
1250
1251    tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1252                        tcg_cmp, 0, label_match);
1253
1254    gen_goto_tb(s, 0, s->pc);
1255    gen_set_label(label_match);
1256    gen_goto_tb(s, 1, addr);
1257}
1258
1259/* C3.2.5 Test & branch (immediate)
1260 *   31  30         25  24  23   19 18          5 4    0
1261 * +----+-------------+----+-------+-------------+------+
1262 * | b5 | 0 1 1 0 1 1 | op |  b40  |    imm14    |  Rt  |
1263 * +----+-------------+----+-------+-------------+------+
1264 */
1265static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1266{
1267    unsigned int bit_pos, op, rt;
1268    uint64_t addr;
1269    TCGLabel *label_match;
1270    TCGv_i64 tcg_cmp;
1271
1272    bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1273    op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1274    addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1275    rt = extract32(insn, 0, 5);
1276
1277    tcg_cmp = tcg_temp_new_i64();
1278    tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1279    label_match = gen_new_label();
1280    tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1281                        tcg_cmp, 0, label_match);
1282    tcg_temp_free_i64(tcg_cmp);
1283    gen_goto_tb(s, 0, s->pc);
1284    gen_set_label(label_match);
1285    gen_goto_tb(s, 1, addr);
1286}
1287
1288/* C3.2.2 / C5.6.19 Conditional branch (immediate)
1289 *  31           25  24  23                  5   4  3    0
1290 * +---------------+----+---------------------+----+------+
1291 * | 0 1 0 1 0 1 0 | o1 |         imm19       | o0 | cond |
1292 * +---------------+----+---------------------+----+------+
1293 */
1294static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1295{
1296    unsigned int cond;
1297    uint64_t addr;
1298
1299    if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1300        unallocated_encoding(s);
1301        return;
1302    }
1303    addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1304    cond = extract32(insn, 0, 4);
1305
1306    if (cond < 0x0e) {
1307        /* genuinely conditional branches */
1308        TCGLabel *label_match = gen_new_label();
1309        arm_gen_test_cc(cond, label_match);
1310        gen_goto_tb(s, 0, s->pc);
1311        gen_set_label(label_match);
1312        gen_goto_tb(s, 1, addr);
1313    } else {
1314        /* 0xe and 0xf are both "always" conditions */
1315        gen_goto_tb(s, 0, addr);
1316    }
1317}
1318
1319/* C5.6.68 HINT */
1320static void handle_hint(DisasContext *s, uint32_t insn,
1321                        unsigned int op1, unsigned int op2, unsigned int crm)
1322{
1323    unsigned int selector = crm << 3 | op2;
1324
1325    if (op1 != 3) {
1326        unallocated_encoding(s);
1327        return;
1328    }
1329
1330    switch (selector) {
1331    case 0: /* NOP */
1332        return;
1333    case 3: /* WFI */
1334        s->is_jmp = DISAS_WFI;
1335        return;
1336    case 1: /* YIELD */
1337        if (!parallel_cpus) {
1338            s->is_jmp = DISAS_YIELD;
1339        }
1340        return;
1341    case 2: /* WFE */
1342        if (!parallel_cpus) {
1343            s->is_jmp = DISAS_WFE;
1344        }
1345        return;
1346    case 4: /* SEV */
1347    case 5: /* SEVL */
1348        /* we treat all as NOP at least for now */
1349        return;
1350    default:
1351        /* default specified as NOP equivalent */
1352        return;
1353    }
1354}
1355
1356static void gen_clrex(DisasContext *s, uint32_t insn)
1357{
1358    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1359}
1360
1361/* CLREX, DSB, DMB, ISB */
1362static void handle_sync(DisasContext *s, uint32_t insn,
1363                        unsigned int op1, unsigned int op2, unsigned int crm)
1364{
1365    TCGBar bar;
1366
1367    if (op1 != 3) {
1368        unallocated_encoding(s);
1369        return;
1370    }
1371
1372    switch (op2) {
1373    case 2: /* CLREX */
1374        gen_clrex(s, insn);
1375        return;
1376    case 4: /* DSB */
1377    case 5: /* DMB */
1378        switch (crm & 3) {
1379        case 1: /* MBReqTypes_Reads */
1380            bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1381            break;
1382        case 2: /* MBReqTypes_Writes */
1383            bar = TCG_BAR_SC | TCG_MO_ST_ST;
1384            break;
1385        default: /* MBReqTypes_All */
1386            bar = TCG_BAR_SC | TCG_MO_ALL;
1387            break;
1388        }
1389        tcg_gen_mb(bar);
1390        return;
1391    case 6: /* ISB */
1392        /* We need to break the TB after this insn to execute
1393         * a self-modified code correctly and also to take
1394         * any pending interrupts immediately.
1395         */
1396        gen_goto_tb(s, 0, s->pc);
1397        return;
1398    default:
1399        unallocated_encoding(s);
1400        return;
1401    }
1402}
1403
1404/* C5.6.130 MSR (immediate) - move immediate to processor state field */
1405static void handle_msr_i(DisasContext *s, uint32_t insn,
1406                         unsigned int op1, unsigned int op2, unsigned int crm)
1407{
1408    int op = op1 << 3 | op2;
1409    switch (op) {
1410    case 0x05: /* SPSel */
1411        if (s->current_el == 0) {
1412            unallocated_encoding(s);
1413            return;
1414        }
1415        /* fall through */
1416    case 0x1e: /* DAIFSet */
1417    case 0x1f: /* DAIFClear */
1418    {
1419        TCGv_i32 tcg_imm = tcg_const_i32(crm);
1420        TCGv_i32 tcg_op = tcg_const_i32(op);
1421        gen_a64_set_pc_im(s->pc - 4);
1422        gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1423        tcg_temp_free_i32(tcg_imm);
1424        tcg_temp_free_i32(tcg_op);
1425        /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs.  */
1426        gen_a64_set_pc_im(s->pc);
1427        s->is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
1428        break;
1429    }
1430    default:
1431        unallocated_encoding(s);
1432        return;
1433    }
1434}
1435
1436static void gen_get_nzcv(TCGv_i64 tcg_rt)
1437{
1438    TCGv_i32 tmp = tcg_temp_new_i32();
1439    TCGv_i32 nzcv = tcg_temp_new_i32();
1440
1441    /* build bit 31, N */
1442    tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1443    /* build bit 30, Z */
1444    tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1445    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1446    /* build bit 29, C */
1447    tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1448    /* build bit 28, V */
1449    tcg_gen_shri_i32(tmp, cpu_VF, 31);
1450    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1451    /* generate result */
1452    tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1453
1454    tcg_temp_free_i32(nzcv);
1455    tcg_temp_free_i32(tmp);
1456}
1457
1458static void gen_set_nzcv(TCGv_i64 tcg_rt)
1459
1460{
1461    TCGv_i32 nzcv = tcg_temp_new_i32();
1462
1463    /* take NZCV from R[t] */
1464    tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1465
1466    /* bit 31, N */
1467    tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1468    /* bit 30, Z */
1469    tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1470    tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1471    /* bit 29, C */
1472    tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1473    tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1474    /* bit 28, V */
1475    tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1476    tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1477    tcg_temp_free_i32(nzcv);
1478}
1479
1480/* C5.6.129 MRS - move from system register
1481 * C5.6.131 MSR (register) - move to system register
1482 * C5.6.204 SYS
1483 * C5.6.205 SYSL
1484 * These are all essentially the same insn in 'read' and 'write'
1485 * versions, with varying op0 fields.
1486 */
1487static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1488                       unsigned int op0, unsigned int op1, unsigned int op2,
1489                       unsigned int crn, unsigned int crm, unsigned int rt)
1490{
1491    const ARMCPRegInfo *ri;
1492    TCGv_i64 tcg_rt;
1493
1494    ri = get_arm_cp_reginfo(s->cp_regs,
1495                            ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1496                                               crn, crm, op0, op1, op2));
1497
1498    if (!ri) {
1499        /* Unknown register; this might be a guest error or a QEMU
1500         * unimplemented feature.
1501         */
1502        qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1503                      "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1504                      isread ? "read" : "write", op0, op1, crn, crm, op2);
1505        unallocated_encoding(s);
1506        return;
1507    }
1508
1509    /* Check access permissions */
1510    if (!cp_access_ok(s->current_el, ri, isread)) {
1511        unallocated_encoding(s);
1512        return;
1513    }
1514
1515    if (ri->accessfn) {
1516        /* Emit code to perform further access permissions checks at
1517         * runtime; this may result in an exception.
1518         */
1519        TCGv_ptr tmpptr;
1520        TCGv_i32 tcg_syn, tcg_isread;
1521        uint32_t syndrome;
1522
1523        gen_a64_set_pc_im(s->pc - 4);
1524        tmpptr = tcg_const_ptr(ri);
1525        syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1526        tcg_syn = tcg_const_i32(syndrome);
1527        tcg_isread = tcg_const_i32(isread);
1528        gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1529        tcg_temp_free_ptr(tmpptr);
1530        tcg_temp_free_i32(tcg_syn);
1531        tcg_temp_free_i32(tcg_isread);
1532    }
1533
1534    /* Handle special cases first */
1535    switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1536    case ARM_CP_NOP:
1537        return;
1538    case ARM_CP_NZCV:
1539        tcg_rt = cpu_reg(s, rt);
1540        if (isread) {
1541            gen_get_nzcv(tcg_rt);
1542        } else {
1543            gen_set_nzcv(tcg_rt);
1544        }
1545        return;
1546    case ARM_CP_CURRENTEL:
1547        /* Reads as current EL value from pstate, which is
1548         * guaranteed to be constant by the tb flags.
1549         */
1550        tcg_rt = cpu_reg(s, rt);
1551        tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1552        return;
1553    case ARM_CP_DC_ZVA:
1554        /* Writes clear the aligned block of memory which rt points into. */
1555        tcg_rt = cpu_reg(s, rt);
1556        gen_helper_dc_zva(cpu_env, tcg_rt);
1557        return;
1558    default:
1559        break;
1560    }
1561
1562    if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1563        gen_io_start();
1564    }
1565
1566    tcg_rt = cpu_reg(s, rt);
1567
1568    if (isread) {
1569        if (ri->type & ARM_CP_CONST) {
1570            tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1571        } else if (ri->readfn) {
1572            TCGv_ptr tmpptr;
1573            tmpptr = tcg_const_ptr(ri);
1574            gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1575            tcg_temp_free_ptr(tmpptr);
1576        } else {
1577            tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1578        }
1579    } else {
1580        if (ri->type & ARM_CP_CONST) {
1581            /* If not forbidden by access permissions, treat as WI */
1582            return;
1583        } else if (ri->writefn) {
1584            TCGv_ptr tmpptr;
1585            tmpptr = tcg_const_ptr(ri);
1586            gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1587            tcg_temp_free_ptr(tmpptr);
1588        } else {
1589            tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1590        }
1591    }
1592
1593    if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1594        /* I/O operations must end the TB here (whether read or write) */
1595        gen_io_end();
1596        s->is_jmp = DISAS_UPDATE;
1597    } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1598        /* We default to ending the TB on a coprocessor register write,
1599         * but allow this to be suppressed by the register definition
1600         * (usually only necessary to work around guest bugs).
1601         */
1602        s->is_jmp = DISAS_UPDATE;
1603    }
1604}
1605
1606/* C3.2.4 System
1607 *  31                 22 21  20 19 18 16 15   12 11    8 7   5 4    0
1608 * +---------------------+---+-----+-----+-------+-------+-----+------+
1609 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 |  CRn  |  CRm  | op2 |  Rt  |
1610 * +---------------------+---+-----+-----+-------+-------+-----+------+
1611 */
1612static void disas_system(DisasContext *s, uint32_t insn)
1613{
1614    unsigned int l, op0, op1, crn, crm, op2, rt;
1615    l = extract32(insn, 21, 1);
1616    op0 = extract32(insn, 19, 2);
1617    op1 = extract32(insn, 16, 3);
1618    crn = extract32(insn, 12, 4);
1619    crm = extract32(insn, 8, 4);
1620    op2 = extract32(insn, 5, 3);
1621    rt = extract32(insn, 0, 5);
1622
1623    if (op0 == 0) {
1624        if (l || rt != 31) {
1625            unallocated_encoding(s);
1626            return;
1627        }
1628        switch (crn) {
1629        case 2: /* C5.6.68 HINT */
1630            handle_hint(s, insn, op1, op2, crm);
1631            break;
1632        case 3: /* CLREX, DSB, DMB, ISB */
1633            handle_sync(s, insn, op1, op2, crm);
1634            break;
1635        case 4: /* C5.6.130 MSR (immediate) */
1636            handle_msr_i(s, insn, op1, op2, crm);
1637            break;
1638        default:
1639            unallocated_encoding(s);
1640            break;
1641        }
1642        return;
1643    }
1644    handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1645}
1646
1647/* C3.2.3 Exception generation
1648 *
1649 *  31             24 23 21 20                     5 4   2 1  0
1650 * +-----------------+-----+------------------------+-----+----+
1651 * | 1 1 0 1 0 1 0 0 | opc |          imm16         | op2 | LL |
1652 * +-----------------------+------------------------+----------+
1653 */
1654static void disas_exc(DisasContext *s, uint32_t insn)
1655{
1656    int opc = extract32(insn, 21, 3);
1657    int op2_ll = extract32(insn, 0, 5);
1658    int imm16 = extract32(insn, 5, 16);
1659    TCGv_i32 tmp;
1660
1661    switch (opc) {
1662    case 0:
1663        /* For SVC, HVC and SMC we advance the single-step state
1664         * machine before taking the exception. This is architecturally
1665         * mandated, to ensure that single-stepping a system call
1666         * instruction works properly.
1667         */
1668        switch (op2_ll) {
1669        case 1:                                                     /* SVC */
1670            gen_ss_advance(s);
1671            gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1672                               default_exception_el(s));
1673            break;
1674        case 2:                                                     /* HVC */
1675            if (s->current_el == 0) {
1676                unallocated_encoding(s);
1677                break;
1678            }
1679            /* The pre HVC helper handles cases when HVC gets trapped
1680             * as an undefined insn by runtime configuration.
1681             */
1682            gen_a64_set_pc_im(s->pc - 4);
1683            gen_helper_pre_hvc(cpu_env);
1684            gen_ss_advance(s);
1685            gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1686            break;
1687        case 3:                                                     /* SMC */
1688            if (s->current_el == 0) {
1689                unallocated_encoding(s);
1690                break;
1691            }
1692            gen_a64_set_pc_im(s->pc - 4);
1693            tmp = tcg_const_i32(syn_aa64_smc(imm16));
1694            gen_helper_pre_smc(cpu_env, tmp);
1695            tcg_temp_free_i32(tmp);
1696            gen_ss_advance(s);
1697            gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1698            break;
1699        default:
1700            unallocated_encoding(s);
1701            break;
1702        }
1703        break;
1704    case 1:
1705        if (op2_ll != 0) {
1706            unallocated_encoding(s);
1707            break;
1708        }
1709        /* BRK */
1710        gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
1711                           default_exception_el(s));
1712        break;
1713    case 2:
1714        if (op2_ll != 0) {
1715            unallocated_encoding(s);
1716            break;
1717        }
1718        /* HLT. This has two purposes.
1719         * Architecturally, it is an external halting debug instruction.
1720         * Since QEMU doesn't implement external debug, we treat this as
1721         * it is required for halting debug disabled: it will UNDEF.
1722         * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1723         */
1724        if (semihosting_enabled() && imm16 == 0xf000) {
1725#ifndef CONFIG_USER_ONLY
1726            /* In system mode, don't allow userspace access to semihosting,
1727             * to provide some semblance of security (and for consistency
1728             * with our 32-bit semihosting).
1729             */
1730            if (s->current_el == 0) {
1731                unsupported_encoding(s, insn);
1732                break;
1733            }
1734#endif
1735            gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1736        } else {
1737            unsupported_encoding(s, insn);
1738        }
1739        break;
1740    case 5:
1741        if (op2_ll < 1 || op2_ll > 3) {
1742            unallocated_encoding(s);
1743            break;
1744        }
1745        /* DCPS1, DCPS2, DCPS3 */
1746        unsupported_encoding(s, insn);
1747        break;
1748    default:
1749        unallocated_encoding(s);
1750        break;
1751    }
1752}
1753
1754/* C3.2.7 Unconditional branch (register)
1755 *  31           25 24   21 20   16 15   10 9    5 4     0
1756 * +---------------+-------+-------+-------+------+-------+
1757 * | 1 1 0 1 0 1 1 |  opc  |  op2  |  op3  |  Rn  |  op4  |
1758 * +---------------+-------+-------+-------+------+-------+
1759 */
1760static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1761{
1762    unsigned int opc, op2, op3, rn, op4;
1763
1764    opc = extract32(insn, 21, 4);
1765    op2 = extract32(insn, 16, 5);
1766    op3 = extract32(insn, 10, 6);
1767    rn = extract32(insn, 5, 5);
1768    op4 = extract32(insn, 0, 5);
1769
1770    if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1771        unallocated_encoding(s);
1772        return;
1773    }
1774
1775    switch (opc) {
1776    case 0: /* BR */
1777    case 1: /* BLR */
1778    case 2: /* RET */
1779        gen_a64_set_pc(s, cpu_reg(s, rn));
1780        /* BLR also needs to load return address */
1781        if (opc == 1) {
1782            tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1783        }
1784        break;
1785    case 4: /* ERET */
1786        if (s->current_el == 0) {
1787            unallocated_encoding(s);
1788            return;
1789        }
1790        gen_helper_exception_return(cpu_env);
1791        /* Must exit loop to check un-masked IRQs */
1792        s->is_jmp = DISAS_EXIT;
1793        return;
1794    case 5: /* DRPS */
1795        if (rn != 0x1f) {
1796            unallocated_encoding(s);
1797        } else {
1798            unsupported_encoding(s, insn);
1799        }
1800        return;
1801    default:
1802        unallocated_encoding(s);
1803        return;
1804    }
1805
1806    s->is_jmp = DISAS_JUMP;
1807}
1808
1809/* C3.2 Branches, exception generating and system instructions */
1810static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1811{
1812    switch (extract32(insn, 25, 7)) {
1813    case 0x0a: case 0x0b:
1814    case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1815        disas_uncond_b_imm(s, insn);
1816        break;
1817    case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1818        disas_comp_b_imm(s, insn);
1819        break;
1820    case 0x1b: case 0x5b: /* Test & branch (immediate) */
1821        disas_test_b_imm(s, insn);
1822        break;
1823    case 0x2a: /* Conditional branch (immediate) */
1824        disas_cond_b_imm(s, insn);
1825        break;
1826    case 0x6a: /* Exception generation / System */
1827        if (insn & (1 << 24)) {
1828            disas_system(s, insn);
1829        } else {
1830            disas_exc(s, insn);
1831        }
1832        break;
1833    case 0x6b: /* Unconditional branch (register) */
1834        disas_uncond_b_reg(s, insn);
1835        break;
1836    default:
1837        unallocated_encoding(s);
1838        break;
1839    }
1840}
1841
1842/*
1843 * Load/Store exclusive instructions are implemented by remembering
1844 * the value/address loaded, and seeing if these are the same
1845 * when the store is performed. This is not actually the architecturally
1846 * mandated semantics, but it works for typical guest code sequences
1847 * and avoids having to monitor regular stores.
1848 *
1849 * The store exclusive uses the atomic cmpxchg primitives to avoid
1850 * races in multi-threaded linux-user and when MTTCG softmmu is
1851 * enabled.
1852 */
1853static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
1854                               TCGv_i64 addr, int size, bool is_pair)
1855{
1856    int idx = get_mem_index(s);
1857    TCGMemOp memop = s->be_data;
1858
1859    g_assert(size <= 3);
1860    if (is_pair) {
1861        g_assert(size >= 2);
1862        if (size == 2) {
1863            /* The pair must be single-copy atomic for the doubleword.  */
1864            memop |= MO_64 | MO_ALIGN;
1865            tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
1866            if (s->be_data == MO_LE) {
1867                tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
1868                tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
1869            } else {
1870                tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
1871                tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
1872            }
1873        } else {
1874            /* The pair must be single-copy atomic for *each* doubleword, not
1875               the entire quadword, however it must be quadword aligned.  */
1876            memop |= MO_64;
1877            tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
1878                                memop | MO_ALIGN_16);
1879
1880            TCGv_i64 addr2 = tcg_temp_new_i64();
1881            tcg_gen_addi_i64(addr2, addr, 8);
1882            tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
1883            tcg_temp_free_i64(addr2);
1884
1885            tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
1886            tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
1887        }
1888    } else {
1889        memop |= size | MO_ALIGN;
1890        tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
1891        tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
1892    }
1893    tcg_gen_mov_i64(cpu_exclusive_addr, addr);
1894}
1895
1896static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1897                                TCGv_i64 inaddr, int size, int is_pair)
1898{
1899    /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
1900     *     && (!is_pair || env->exclusive_high == [addr + datasize])) {
1901     *     [addr] = {Rt};
1902     *     if (is_pair) {
1903     *         [addr + datasize] = {Rt2};
1904     *     }
1905     *     {Rd} = 0;
1906     * } else {
1907     *     {Rd} = 1;
1908     * }
1909     * env->exclusive_addr = -1;
1910     */
1911    TCGLabel *fail_label = gen_new_label();
1912    TCGLabel *done_label = gen_new_label();
1913    TCGv_i64 addr = tcg_temp_local_new_i64();
1914    TCGv_i64 tmp;
1915
1916    /* Copy input into a local temp so it is not trashed when the
1917     * basic block ends at the branch insn.
1918     */
1919    tcg_gen_mov_i64(addr, inaddr);
1920    tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
1921
1922    tmp = tcg_temp_new_i64();
1923    if (is_pair) {
1924        if (size == 2) {
1925            if (s->be_data == MO_LE) {
1926                tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
1927            } else {
1928                tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
1929            }
1930            tcg_gen_atomic_cmpxchg_i64(tmp, addr, cpu_exclusive_val, tmp,
1931                                       get_mem_index(s),
1932                                       MO_64 | MO_ALIGN | s->be_data);
1933            tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
1934        } else if (s->be_data == MO_LE) {
1935            gen_helper_paired_cmpxchg64_le(tmp, cpu_env, addr, cpu_reg(s, rt),
1936                                           cpu_reg(s, rt2));
1937        } else {
1938            gen_helper_paired_cmpxchg64_be(tmp, cpu_env, addr, cpu_reg(s, rt),
1939                                           cpu_reg(s, rt2));
1940        }
1941    } else {
1942        TCGv_i64 val = cpu_reg(s, rt);
1943        tcg_gen_atomic_cmpxchg_i64(tmp, addr, cpu_exclusive_val, val,
1944                                   get_mem_index(s),
1945                                   size | MO_ALIGN | s->be_data);
1946        tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
1947    }
1948
1949    tcg_temp_free_i64(addr);
1950
1951    tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
1952    tcg_temp_free_i64(tmp);
1953    tcg_gen_br(done_label);
1954
1955    gen_set_label(fail_label);
1956    tcg_gen_movi_i64(cpu_reg(s, rd), 1);
1957    gen_set_label(done_label);
1958    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1959}
1960
1961/* Update the Sixty-Four bit (SF) registersize. This logic is derived
1962 * from the ARMv8 specs for LDR (Shared decode for all encodings).
1963 */
1964static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
1965{
1966    int opc0 = extract32(opc, 0, 1);
1967    int regsize;
1968
1969    if (is_signed) {
1970        regsize = opc0 ? 32 : 64;
1971    } else {
1972        regsize = size == 3 ? 64 : 32;
1973    }
1974    return regsize == 64;
1975}
1976
1977/* C3.3.6 Load/store exclusive
1978 *
1979 *  31 30 29         24  23  22   21  20  16  15  14   10 9    5 4    0
1980 * +-----+-------------+----+---+----+------+----+-------+------+------+
1981 * | sz  | 0 0 1 0 0 0 | o2 | L | o1 |  Rs  | o0 |  Rt2  |  Rn  | Rt   |
1982 * +-----+-------------+----+---+----+------+----+-------+------+------+
1983 *
1984 *  sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
1985 *   L: 0 -> store, 1 -> load
1986 *  o2: 0 -> exclusive, 1 -> not
1987 *  o1: 0 -> single register, 1 -> register pair
1988 *  o0: 1 -> load-acquire/store-release, 0 -> not
1989 */
1990static void disas_ldst_excl(DisasContext *s, uint32_t insn)
1991{
1992    int rt = extract32(insn, 0, 5);
1993    int rn = extract32(insn, 5, 5);
1994    int rt2 = extract32(insn, 10, 5);
1995    int is_lasr = extract32(insn, 15, 1);
1996    int rs = extract32(insn, 16, 5);
1997    int is_pair = extract32(insn, 21, 1);
1998    int is_store = !extract32(insn, 22, 1);
1999    int is_excl = !extract32(insn, 23, 1);
2000    int size = extract32(insn, 30, 2);
2001    TCGv_i64 tcg_addr;
2002
2003    if ((!is_excl && !is_pair && !is_lasr) ||
2004        (!is_excl && is_pair) ||
2005        (is_pair && size < 2)) {
2006        unallocated_encoding(s);
2007        return;
2008    }
2009
2010    if (rn == 31) {
2011        gen_check_sp_alignment(s);
2012    }
2013    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2014
2015    /* Note that since TCG is single threaded load-acquire/store-release
2016     * semantics require no extra if (is_lasr) { ... } handling.
2017     */
2018
2019    if (is_excl) {
2020        if (!is_store) {
2021            s->is_ldex = true;
2022            gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
2023            if (is_lasr) {
2024                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2025            }
2026        } else {
2027            if (is_lasr) {
2028                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2029            }
2030            gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
2031        }
2032    } else {
2033        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2034        bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
2035
2036        /* Generate ISS for non-exclusive accesses including LASR.  */
2037        if (is_store) {
2038            if (is_lasr) {
2039                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2040            }
2041            do_gpr_st(s, tcg_rt, tcg_addr, size,
2042                      true, rt, iss_sf, is_lasr);
2043        } else {
2044            do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
2045                      true, rt, iss_sf, is_lasr);
2046            if (is_lasr) {
2047                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2048            }
2049        }
2050    }
2051}
2052
2053/*
2054 * C3.3.5 Load register (literal)
2055 *
2056 *  31 30 29   27  26 25 24 23                5 4     0
2057 * +-----+-------+---+-----+-------------------+-------+
2058 * | opc | 0 1 1 | V | 0 0 |     imm19         |  Rt   |
2059 * +-----+-------+---+-----+-------------------+-------+
2060 *
2061 * V: 1 -> vector (simd/fp)
2062 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2063 *                   10-> 32 bit signed, 11 -> prefetch
2064 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2065 */
2066static void disas_ld_lit(DisasContext *s, uint32_t insn)
2067{
2068    int rt = extract32(insn, 0, 5);
2069    int64_t imm = sextract32(insn, 5, 19) << 2;
2070    bool is_vector = extract32(insn, 26, 1);
2071    int opc = extract32(insn, 30, 2);
2072    bool is_signed = false;
2073    int size = 2;
2074    TCGv_i64 tcg_rt, tcg_addr;
2075
2076    if (is_vector) {
2077        if (opc == 3) {
2078            unallocated_encoding(s);
2079            return;
2080        }
2081        size = 2 + opc;
2082        if (!fp_access_check(s)) {
2083            return;
2084        }
2085    } else {
2086        if (opc == 3) {
2087            /* PRFM (literal) : prefetch */
2088            return;
2089        }
2090        size = 2 + extract32(opc, 0, 1);
2091        is_signed = extract32(opc, 1, 1);
2092    }
2093
2094    tcg_rt = cpu_reg(s, rt);
2095
2096    tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2097    if (is_vector) {
2098        do_fp_ld(s, rt, tcg_addr, size);
2099    } else {
2100        /* Only unsigned 32bit loads target 32bit registers.  */
2101        bool iss_sf = opc != 0;
2102
2103        do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2104                  true, rt, iss_sf, false);
2105    }
2106    tcg_temp_free_i64(tcg_addr);
2107}
2108
2109/*
2110 * C5.6.80 LDNP (Load Pair - non-temporal hint)
2111 * C5.6.81 LDP (Load Pair - non vector)
2112 * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
2113 * C5.6.176 STNP (Store Pair - non-temporal hint)
2114 * C5.6.177 STP (Store Pair - non vector)
2115 * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
2116 * C6.3.165 LDP (Load Pair of SIMD&FP)
2117 * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
2118 * C6.3.284 STP (Store Pair of SIMD&FP)
2119 *
2120 *  31 30 29   27  26  25 24   23  22 21   15 14   10 9    5 4    0
2121 * +-----+-------+---+---+-------+---+-----------------------------+
2122 * | opc | 1 0 1 | V | 0 | index | L |  imm7 |  Rt2  |  Rn  | Rt   |
2123 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2124 *
2125 * opc: LDP/STP/LDNP/STNP        00 -> 32 bit, 10 -> 64 bit
2126 *      LDPSW                    01
2127 *      LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2128 *   V: 0 -> GPR, 1 -> Vector
2129 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2130 *      10 -> signed offset, 11 -> pre-index
2131 *   L: 0 -> Store 1 -> Load
2132 *
2133 * Rt, Rt2 = GPR or SIMD registers to be stored
2134 * Rn = general purpose register containing address
2135 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2136 */
2137static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2138{
2139    int rt = extract32(insn, 0, 5);
2140    int rn = extract32(insn, 5, 5);
2141    int rt2 = extract32(insn, 10, 5);
2142    uint64_t offset = sextract64(insn, 15, 7);
2143    int index = extract32(insn, 23, 2);
2144    bool is_vector = extract32(insn, 26, 1);
2145    bool is_load = extract32(insn, 22, 1);
2146    int opc = extract32(insn, 30, 2);
2147
2148    bool is_signed = false;
2149    bool postindex = false;
2150    bool wback = false;
2151
2152    TCGv_i64 tcg_addr; /* calculated address */
2153    int size;
2154
2155    if (opc == 3) {
2156        unallocated_encoding(s);
2157        return;
2158    }
2159
2160    if (is_vector) {
2161        size = 2 + opc;
2162    } else {
2163        size = 2 + extract32(opc, 1, 1);
2164        is_signed = extract32(opc, 0, 1);
2165        if (!is_load && is_signed) {
2166            unallocated_encoding(s);
2167            return;
2168        }
2169    }
2170
2171    switch (index) {
2172    case 1: /* post-index */
2173        postindex = true;
2174        wback = true;
2175        break;
2176    case 0:
2177        /* signed offset with "non-temporal" hint. Since we don't emulate
2178         * caches we don't care about hints to the cache system about
2179         * data access patterns, and handle this identically to plain
2180         * signed offset.
2181         */
2182        if (is_signed) {
2183            /* There is no non-temporal-hint version of LDPSW */
2184            unallocated_encoding(s);
2185            return;
2186        }
2187        postindex = false;
2188        break;
2189    case 2: /* signed offset, rn not updated */
2190        postindex = false;
2191        break;
2192    case 3: /* pre-index */
2193        postindex = false;
2194        wback = true;
2195        break;
2196    }
2197
2198    if (is_vector && !fp_access_check(s)) {
2199        return;
2200    }
2201
2202    offset <<= size;
2203
2204    if (rn == 31) {
2205        gen_check_sp_alignment(s);
2206    }
2207
2208    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2209
2210    if (!postindex) {
2211        tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2212    }
2213
2214    if (is_vector) {
2215        if (is_load) {
2216            do_fp_ld(s, rt, tcg_addr, size);
2217        } else {
2218            do_fp_st(s, rt, tcg_addr, size);
2219        }
2220        tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2221        if (is_load) {
2222            do_fp_ld(s, rt2, tcg_addr, size);
2223        } else {
2224            do_fp_st(s, rt2, tcg_addr, size);
2225        }
2226    } else {
2227        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2228        TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2229
2230        if (is_load) {
2231            TCGv_i64 tmp = tcg_temp_new_i64();
2232
2233            /* Do not modify tcg_rt before recognizing any exception
2234             * from the second load.
2235             */
2236            do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
2237                      false, 0, false, false);
2238            tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2239            do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2240                      false, 0, false, false);
2241
2242            tcg_gen_mov_i64(tcg_rt, tmp);
2243            tcg_temp_free_i64(tmp);
2244        } else {
2245            do_gpr_st(s, tcg_rt, tcg_addr, size,
2246                      false, 0, false, false);
2247            tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2248            do_gpr_st(s, tcg_rt2, tcg_addr, size,
2249                      false, 0, false, false);
2250        }
2251    }
2252
2253    if (wback) {
2254        if (postindex) {
2255            tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2256        } else {
2257            tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2258        }
2259        tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2260    }
2261}
2262
2263/*
2264 * C3.3.8 Load/store (immediate post-indexed)
2265 * C3.3.9 Load/store (immediate pre-indexed)
2266 * C3.3.12 Load/store (unscaled immediate)
2267 *
2268 * 31 30 29   27  26 25 24 23 22 21  20    12 11 10 9    5 4    0
2269 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2270 * |size| 1 1 1 | V | 0 0 | opc | 0 |  imm9  | idx |  Rn  |  Rt  |
2271 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2272 *
2273 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2274         10 -> unprivileged
2275 * V = 0 -> non-vector
2276 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2277 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2278 */
2279static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2280                                int opc,
2281                                int size,
2282                                int rt,
2283                                bool is_vector)
2284{
2285    int rn = extract32(insn, 5, 5);
2286    int imm9 = sextract32(insn, 12, 9);
2287    int idx = extract32(insn, 10, 2);
2288    bool is_signed = false;
2289    bool is_store = false;
2290    bool is_extended = false;
2291    bool is_unpriv = (idx == 2);
2292    bool iss_valid = !is_vector;
2293    bool post_index;
2294    bool writeback;
2295
2296    TCGv_i64 tcg_addr;
2297
2298    if (is_vector) {
2299        size |= (opc & 2) << 1;
2300        if (size > 4 || is_unpriv) {
2301            unallocated_encoding(s);
2302            return;
2303        }
2304        is_store = ((opc & 1) == 0);
2305        if (!fp_access_check(s)) {
2306            return;
2307        }
2308    } else {
2309        if (size == 3 && opc == 2) {
2310            /* PRFM - prefetch */
2311            if (is_unpriv) {
2312                unallocated_encoding(s);
2313                return;
2314            }
2315            return;
2316        }
2317        if (opc == 3 && size > 1) {
2318            unallocated_encoding(s);
2319            return;
2320        }
2321        is_store = (opc == 0);
2322        is_signed = extract32(opc, 1, 1);
2323        is_extended = (size < 3) && extract32(opc, 0, 1);
2324    }
2325
2326    switch (idx) {
2327    case 0:
2328    case 2:
2329        post_index = false;
2330        writeback = false;
2331        break;
2332    case 1:
2333        post_index = true;
2334        writeback = true;
2335        break;
2336    case 3:
2337        post_index = false;
2338        writeback = true;
2339        break;
2340    }
2341
2342    if (rn == 31) {
2343        gen_check_sp_alignment(s);
2344    }
2345    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2346
2347    if (!post_index) {
2348        tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2349    }
2350
2351    if (is_vector) {
2352        if (is_store) {
2353            do_fp_st(s, rt, tcg_addr, size);
2354        } else {
2355            do_fp_ld(s, rt, tcg_addr, size);
2356        }
2357    } else {
2358        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2359        int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2360        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2361
2362        if (is_store) {
2363            do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2364                             iss_valid, rt, iss_sf, false);
2365        } else {
2366            do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2367                             is_signed, is_extended, memidx,
2368                             iss_valid, rt, iss_sf, false);
2369        }
2370    }
2371
2372    if (writeback) {
2373        TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2374        if (post_index) {
2375            tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2376        }
2377        tcg_gen_mov_i64(tcg_rn, tcg_addr);
2378    }
2379}
2380
2381/*
2382 * C3.3.10 Load/store (register offset)
2383 *
2384 * 31 30 29   27  26 25 24 23 22 21  20  16 15 13 12 11 10 9  5 4  0
2385 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2386 * |size| 1 1 1 | V | 0 0 | opc | 1 |  Rm  | opt | S| 1 0 | Rn | Rt |
2387 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2388 *
2389 * For non-vector:
2390 *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2391 *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2392 * For vector:
2393 *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2394 *   opc<0>: 0 -> store, 1 -> load
2395 * V: 1 -> vector/simd
2396 * opt: extend encoding (see DecodeRegExtend)
2397 * S: if S=1 then scale (essentially index by sizeof(size))
2398 * Rt: register to transfer into/out of
2399 * Rn: address register or SP for base
2400 * Rm: offset register or ZR for offset
2401 */
2402static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2403                                   int opc,
2404                                   int size,
2405                                   int rt,
2406                                   bool is_vector)
2407{
2408    int rn = extract32(insn, 5, 5);
2409    int shift = extract32(insn, 12, 1);
2410    int rm = extract32(insn, 16, 5);
2411    int opt = extract32(insn, 13, 3);
2412    bool is_signed = false;
2413    bool is_store = false;
2414    bool is_extended = false;
2415
2416    TCGv_i64 tcg_rm;
2417    TCGv_i64 tcg_addr;
2418
2419    if (extract32(opt, 1, 1) == 0) {
2420        unallocated_encoding(s);
2421        return;
2422    }
2423
2424    if (is_vector) {
2425        size |= (opc & 2) << 1;
2426        if (size > 4) {
2427            unallocated_encoding(s);
2428            return;
2429        }
2430        is_store = !extract32(opc, 0, 1);
2431        if (!fp_access_check(s)) {
2432            return;
2433        }
2434    } else {
2435        if (size == 3 && opc == 2) {
2436            /* PRFM - prefetch */
2437            return;
2438        }
2439        if (opc == 3 && size > 1) {
2440            unallocated_encoding(s);
2441            return;
2442        }
2443        is_store = (opc == 0);
2444        is_signed = extract32(opc, 1, 1);
2445        is_extended = (size < 3) && extract32(opc, 0, 1);
2446    }
2447
2448    if (rn == 31) {
2449        gen_check_sp_alignment(s);
2450    }
2451    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2452
2453    tcg_rm = read_cpu_reg(s, rm, 1);
2454    ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2455
2456    tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2457
2458    if (is_vector) {
2459        if (is_store) {
2460            do_fp_st(s, rt, tcg_addr, size);
2461        } else {
2462            do_fp_ld(s, rt, tcg_addr, size);
2463        }
2464    } else {
2465        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2466        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2467        if (is_store) {
2468            do_gpr_st(s, tcg_rt, tcg_addr, size,
2469                      true, rt, iss_sf, false);
2470        } else {
2471            do_gpr_ld(s, tcg_rt, tcg_addr, size,
2472                      is_signed, is_extended,
2473                      true, rt, iss_sf, false);
2474        }
2475    }
2476}
2477
2478/*
2479 * C3.3.13 Load/store (unsigned immediate)
2480 *
2481 * 31 30 29   27  26 25 24 23 22 21        10 9     5
2482 * +----+-------+---+-----+-----+------------+-------+------+
2483 * |size| 1 1 1 | V | 0 1 | opc |   imm12    |  Rn   |  Rt  |
2484 * +----+-------+---+-----+-----+------------+-------+------+
2485 *
2486 * For non-vector:
2487 *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2488 *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2489 * For vector:
2490 *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2491 *   opc<0>: 0 -> store, 1 -> load
2492 * Rn: base address register (inc SP)
2493 * Rt: target register
2494 */
2495static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2496                                        int opc,
2497                                        int size,
2498                                        int rt,
2499                                        bool is_vector)
2500{
2501    int rn = extract32(insn, 5, 5);
2502    unsigned int imm12 = extract32(insn, 10, 12);
2503    unsigned int offset;
2504
2505    TCGv_i64 tcg_addr;
2506
2507    bool is_store;
2508    bool is_signed = false;
2509    bool is_extended = false;
2510
2511    if (is_vector) {
2512        size |= (opc & 2) << 1;
2513        if (size > 4) {
2514            unallocated_encoding(s);
2515            return;
2516        }
2517        is_store = !extract32(opc, 0, 1);
2518        if (!fp_access_check(s)) {
2519            return;
2520        }
2521    } else {
2522        if (size == 3 && opc == 2) {
2523            /* PRFM - prefetch */
2524            return;
2525        }
2526        if (opc == 3 && size > 1) {
2527            unallocated_encoding(s);
2528            return;
2529        }
2530        is_store = (opc == 0);
2531        is_signed = extract32(opc, 1, 1);
2532        is_extended = (size < 3) && extract32(opc, 0, 1);
2533    }
2534
2535    if (rn == 31) {
2536        gen_check_sp_alignment(s);
2537    }
2538    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2539    offset = imm12 << size;
2540    tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2541
2542    if (is_vector) {
2543        if (is_store) {
2544            do_fp_st(s, rt, tcg_addr, size);
2545        } else {
2546            do_fp_ld(s, rt, tcg_addr, size);
2547        }
2548    } else {
2549        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2550        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2551        if (is_store) {
2552            do_gpr_st(s, tcg_rt, tcg_addr, size,
2553                      true, rt, iss_sf, false);
2554        } else {
2555            do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2556                      true, rt, iss_sf, false);
2557        }
2558    }
2559}
2560
2561/* Load/store register (all forms) */
2562static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2563{
2564    int rt = extract32(insn, 0, 5);
2565    int opc = extract32(insn, 22, 2);
2566    bool is_vector = extract32(insn, 26, 1);
2567    int size = extract32(insn, 30, 2);
2568
2569    switch (extract32(insn, 24, 2)) {
2570    case 0:
2571        if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
2572            disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2573        } else {
2574            /* Load/store register (unscaled immediate)
2575             * Load/store immediate pre/post-indexed
2576             * Load/store register unprivileged
2577             */
2578            disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2579        }
2580        break;
2581    case 1:
2582        disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2583        break;
2584    default:
2585        unallocated_encoding(s);
2586        break;
2587    }
2588}
2589
2590/* C3.3.1 AdvSIMD load/store multiple structures
2591 *
2592 *  31  30  29           23 22  21         16 15    12 11  10 9    5 4    0
2593 * +---+---+---------------+---+-------------+--------+------+------+------+
2594 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size |  Rn  |  Rt  |
2595 * +---+---+---------------+---+-------------+--------+------+------+------+
2596 *
2597 * C3.3.2 AdvSIMD load/store multiple structures (post-indexed)
2598 *
2599 *  31  30  29           23 22  21  20     16 15    12 11  10 9    5 4    0
2600 * +---+---+---------------+---+---+---------+--------+------+------+------+
2601 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 |   Rm    | opcode | size |  Rn  |  Rt  |
2602 * +---+---+---------------+---+---+---------+--------+------+------+------+
2603 *
2604 * Rt: first (or only) SIMD&FP register to be transferred
2605 * Rn: base address or SP
2606 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2607 */
2608static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
2609{
2610    int rt = extract32(insn, 0, 5);
2611    int rn = extract32(insn, 5, 5);
2612    int size = extract32(insn, 10, 2);
2613    int opcode = extract32(insn, 12, 4);
2614    bool is_store = !extract32(insn, 22, 1);
2615    bool is_postidx = extract32(insn, 23, 1);
2616    bool is_q = extract32(insn, 30, 1);
2617    TCGv_i64 tcg_addr, tcg_rn;
2618
2619    int ebytes = 1 << size;
2620    int elements = (is_q ? 128 : 64) / (8 << size);
2621    int rpt;    /* num iterations */
2622    int selem;  /* structure elements */
2623    int r;
2624
2625    if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
2626        unallocated_encoding(s);
2627        return;
2628    }
2629
2630    /* From the shared decode logic */
2631    switch (opcode) {
2632    case 0x0:
2633        rpt = 1;
2634        selem = 4;
2635        break;
2636    case 0x2:
2637        rpt = 4;
2638        selem = 1;
2639        break;
2640    case 0x4:
2641        rpt = 1;
2642        selem = 3;
2643        break;
2644    case 0x6:
2645        rpt = 3;
2646        selem = 1;
2647        break;
2648    case 0x7:
2649        rpt = 1;
2650        selem = 1;
2651        break;
2652    case 0x8:
2653        rpt = 1;
2654        selem = 2;
2655        break;
2656    case 0xa:
2657        rpt = 2;
2658        selem = 1;
2659        break;
2660    default:
2661        unallocated_encoding(s);
2662        return;
2663    }
2664
2665    if (size == 3 && !is_q && selem != 1) {
2666        /* reserved */
2667        unallocated_encoding(s);
2668        return;
2669    }
2670
2671    if (!fp_access_check(s)) {
2672        return;
2673    }
2674
2675    if (rn == 31) {
2676        gen_check_sp_alignment(s);
2677    }
2678
2679    tcg_rn = cpu_reg_sp(s, rn);
2680    tcg_addr = tcg_temp_new_i64();
2681    tcg_gen_mov_i64(tcg_addr, tcg_rn);
2682
2683    for (r = 0; r < rpt; r++) {
2684        int e;
2685        for (e = 0; e < elements; e++) {
2686            int tt = (rt + r) % 32;
2687            int xs;
2688            for (xs = 0; xs < selem; xs++) {
2689                if (is_store) {
2690                    do_vec_st(s, tt, e, tcg_addr, size);
2691                } else {
2692                    do_vec_ld(s, tt, e, tcg_addr, size);
2693
2694                    /* For non-quad operations, setting a slice of the low
2695                     * 64 bits of the register clears the high 64 bits (in
2696                     * the ARM ARM pseudocode this is implicit in the fact
2697                     * that 'rval' is a 64 bit wide variable). We optimize
2698                     * by noticing that we only need to do this the first
2699                     * time we touch a register.
2700                     */
2701                    if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) {
2702                        clear_vec_high(s, tt);
2703                    }
2704                }
2705                tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2706                tt = (tt + 1) % 32;
2707            }
2708        }
2709    }
2710
2711    if (is_postidx) {
2712        int rm = extract32(insn, 16, 5);
2713        if (rm == 31) {
2714            tcg_gen_mov_i64(tcg_rn, tcg_addr);
2715        } else {
2716            tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2717        }
2718    }
2719    tcg_temp_free_i64(tcg_addr);
2720}
2721
2722/* C3.3.3 AdvSIMD load/store single structure
2723 *
2724 *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
2725 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2726 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size |  Rn  |  Rt  |
2727 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2728 *
2729 * C3.3.4 AdvSIMD load/store single structure (post-indexed)
2730 *
2731 *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
2732 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2733 * | 0 | Q | 0 0 1 1 0 1 1 | L R |     Rm    | opc | S | size |  Rn  |  Rt  |
2734 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2735 *
2736 * Rt: first (or only) SIMD&FP register to be transferred
2737 * Rn: base address or SP
2738 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2739 * index = encoded in Q:S:size dependent on size
2740 *
2741 * lane_size = encoded in R, opc
2742 * transfer width = encoded in opc, S, size
2743 */
2744static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
2745{
2746    int rt = extract32(insn, 0, 5);
2747    int rn = extract32(insn, 5, 5);
2748    int size = extract32(insn, 10, 2);
2749    int S = extract32(insn, 12, 1);
2750    int opc = extract32(insn, 13, 3);
2751    int R = extract32(insn, 21, 1);
2752    int is_load = extract32(insn, 22, 1);
2753    int is_postidx = extract32(insn, 23, 1);
2754    int is_q = extract32(insn, 30, 1);
2755
2756    int scale = extract32(opc, 1, 2);
2757    int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
2758    bool replicate = false;
2759    int index = is_q << 3 | S << 2 | size;
2760    int ebytes, xs;
2761    TCGv_i64 tcg_addr, tcg_rn;
2762
2763    switch (scale) {
2764    case 3:
2765        if (!is_load || S) {
2766            unallocated_encoding(s);
2767            return;
2768        }
2769        scale = size;
2770        replicate = true;
2771        break;
2772    case 0:
2773        break;
2774    case 1:
2775        if (extract32(size, 0, 1)) {
2776            unallocated_encoding(s);
2777            return;
2778        }
2779        index >>= 1;
2780        break;
2781    case 2:
2782        if (extract32(size, 1, 1)) {
2783            unallocated_encoding(s);
2784            return;
2785        }
2786        if (!extract32(size, 0, 1)) {
2787            index >>= 2;
2788        } else {
2789            if (S) {
2790                unallocated_encoding(s);
2791                return;
2792            }
2793            index >>= 3;
2794            scale = 3;
2795        }
2796        break;
2797    default:
2798        g_assert_not_reached();
2799    }
2800
2801    if (!fp_access_check(s)) {
2802        return;
2803    }
2804
2805    ebytes = 1 << scale;
2806
2807    if (rn == 31) {
2808        gen_check_sp_alignment(s);
2809    }
2810
2811    tcg_rn = cpu_reg_sp(s, rn);
2812    tcg_addr = tcg_temp_new_i64();
2813    tcg_gen_mov_i64(tcg_addr, tcg_rn);
2814
2815    for (xs = 0; xs < selem; xs++) {
2816        if (replicate) {
2817            /* Load and replicate to all elements */
2818            uint64_t mulconst;
2819            TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2820
2821            tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
2822                                get_mem_index(s), s->be_data + scale);
2823            switch (scale) {
2824            case 0:
2825                mulconst = 0x0101010101010101ULL;
2826                break;
2827            case 1:
2828                mulconst = 0x0001000100010001ULL;
2829                break;
2830            case 2:
2831                mulconst = 0x0000000100000001ULL;
2832                break;
2833            case 3:
2834                mulconst = 0;
2835                break;
2836            default:
2837                g_assert_not_reached();
2838            }
2839            if (mulconst) {
2840                tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
2841            }
2842            write_vec_element(s, tcg_tmp, rt, 0, MO_64);
2843            if (is_q) {
2844                write_vec_element(s, tcg_tmp, rt, 1, MO_64);
2845            } else {
2846                clear_vec_high(s, rt);
2847            }
2848            tcg_temp_free_i64(tcg_tmp);
2849        } else {
2850            /* Load/store one element per register */
2851            if (is_load) {
2852                do_vec_ld(s, rt, index, tcg_addr, scale);
2853            } else {
2854                do_vec_st(s, rt, index, tcg_addr, scale);
2855            }
2856        }
2857        tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2858        rt = (rt + 1) % 32;
2859    }
2860
2861    if (is_postidx) {
2862        int rm = extract32(insn, 16, 5);
2863        if (rm == 31) {
2864            tcg_gen_mov_i64(tcg_rn, tcg_addr);
2865        } else {
2866            tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2867        }
2868    }
2869    tcg_temp_free_i64(tcg_addr);
2870}
2871
2872/* C3.3 Loads and stores */
2873static void disas_ldst(DisasContext *s, uint32_t insn)
2874{
2875    switch (extract32(insn, 24, 6)) {
2876    case 0x08: /* Load/store exclusive */
2877        disas_ldst_excl(s, insn);
2878        break;
2879    case 0x18: case 0x1c: /* Load register (literal) */
2880        disas_ld_lit(s, insn);
2881        break;
2882    case 0x28: case 0x29:
2883    case 0x2c: case 0x2d: /* Load/store pair (all forms) */
2884        disas_ldst_pair(s, insn);
2885        break;
2886    case 0x38: case 0x39:
2887    case 0x3c: case 0x3d: /* Load/store register (all forms) */
2888        disas_ldst_reg(s, insn);
2889        break;
2890    case 0x0c: /* AdvSIMD load/store multiple structures */
2891        disas_ldst_multiple_struct(s, insn);
2892        break;
2893    case 0x0d: /* AdvSIMD load/store single structure */
2894        disas_ldst_single_struct(s, insn);
2895        break;
2896    default:
2897        unallocated_encoding(s);
2898        break;
2899    }
2900}
2901
2902/* C3.4.6 PC-rel. addressing
2903 *   31  30   29 28       24 23                5 4    0
2904 * +----+-------+-----------+-------------------+------+
2905 * | op | immlo | 1 0 0 0 0 |       immhi       |  Rd  |
2906 * +----+-------+-----------+-------------------+------+
2907 */
2908static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
2909{
2910    unsigned int page, rd;
2911    uint64_t base;
2912    uint64_t offset;
2913
2914    page = extract32(insn, 31, 1);
2915    /* SignExtend(immhi:immlo) -> offset */
2916    offset = sextract64(insn, 5, 19);
2917    offset = offset << 2 | extract32(insn, 29, 2);
2918    rd = extract32(insn, 0, 5);
2919    base = s->pc - 4;
2920
2921    if (page) {
2922        /* ADRP (page based) */
2923        base &= ~0xfff;
2924        offset <<= 12;
2925    }
2926
2927    tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
2928}
2929
2930/*
2931 * C3.4.1 Add/subtract (immediate)
2932 *
2933 *  31 30 29 28       24 23 22 21         10 9   5 4   0
2934 * +--+--+--+-----------+-----+-------------+-----+-----+
2935 * |sf|op| S| 1 0 0 0 1 |shift|    imm12    |  Rn | Rd  |
2936 * +--+--+--+-----------+-----+-------------+-----+-----+
2937 *
2938 *    sf: 0 -> 32bit, 1 -> 64bit
2939 *    op: 0 -> add  , 1 -> sub
2940 *     S: 1 -> set flags
2941 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
2942 */
2943static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
2944{
2945    int rd = extract32(insn, 0, 5);
2946    int rn = extract32(insn, 5, 5);
2947    uint64_t imm = extract32(insn, 10, 12);
2948    int shift = extract32(insn, 22, 2);
2949    bool setflags = extract32(insn, 29, 1);
2950    bool sub_op = extract32(insn, 30, 1);
2951    bool is_64bit = extract32(insn, 31, 1);
2952
2953    TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2954    TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
2955    TCGv_i64 tcg_result;
2956
2957    switch (shift) {
2958    case 0x0:
2959        break;
2960    case 0x1:
2961        imm <<= 12;
2962        break;
2963    default:
2964        unallocated_encoding(s);
2965        return;
2966    }
2967
2968    tcg_result = tcg_temp_new_i64();
2969    if (!setflags) {
2970        if (sub_op) {
2971            tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
2972        } else {
2973            tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
2974        }
2975    } else {
2976        TCGv_i64 tcg_imm = tcg_const_i64(imm);
2977        if (sub_op) {
2978            gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2979        } else {
2980            gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2981        }
2982        tcg_temp_free_i64(tcg_imm);
2983    }
2984
2985    if (is_64bit) {
2986        tcg_gen_mov_i64(tcg_rd, tcg_result);
2987    } else {
2988        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2989    }
2990
2991    tcg_temp_free_i64(tcg_result);
2992}
2993
2994/* The input should be a value in the bottom e bits (with higher
2995 * bits zero); returns that value replicated into every element
2996 * of size e in a 64 bit integer.
2997 */
2998static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
2999{
3000    assert(e != 0);
3001    while (e < 64) {
3002        mask |= mask << e;
3003        e *= 2;
3004    }
3005    return mask;
3006}
3007
3008/* Return a value with the bottom len bits set (where 0 < len <= 64) */
3009static inline uint64_t bitmask64(unsigned int length)
3010{
3011    assert(length > 0 && length <= 64);
3012    return ~0ULL >> (64 - length);
3013}
3014
3015/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3016 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3017 * value (ie should cause a guest UNDEF exception), and true if they are
3018 * valid, in which case the decoded bit pattern is written to result.
3019 */
3020static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3021                                   unsigned int imms, unsigned int immr)
3022{
3023    uint64_t mask;
3024    unsigned e, levels, s, r;
3025    int len;
3026
3027    assert(immn < 2 && imms < 64 && immr < 64);
3028
3029    /* The bit patterns we create here are 64 bit patterns which
3030     * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3031     * 64 bits each. Each element contains the same value: a run
3032     * of between 1 and e-1 non-zero bits, rotated within the
3033     * element by between 0 and e-1 bits.
3034     *
3035     * The element size and run length are encoded into immn (1 bit)
3036     * and imms (6 bits) as follows:
3037     * 64 bit elements: immn = 1, imms = <length of run - 1>
3038     * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3039     * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3040     *  8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3041     *  4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3042     *  2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3043     * Notice that immn = 0, imms = 11111x is the only combination
3044     * not covered by one of the above options; this is reserved.
3045     * Further, <length of run - 1> all-ones is a reserved pattern.
3046     *
3047     * In all cases the rotation is by immr % e (and immr is 6 bits).
3048     */
3049
3050    /* First determine the element size */
3051    len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3052    if (len < 1) {
3053        /* This is the immn == 0, imms == 0x11111x case */
3054        return false;
3055    }
3056    e = 1 << len;
3057
3058    levels = e - 1;
3059    s = imms & levels;
3060    r = immr & levels;
3061
3062    if (s == levels) {
3063        /* <length of run - 1> mustn't be all-ones. */
3064        return false;
3065    }
3066
3067    /* Create the value of one element: s+1 set bits rotated
3068     * by r within the element (which is e bits wide)...
3069     */
3070    mask = bitmask64(s + 1);
3071    if (r) {
3072        mask = (mask >> r) | (mask << (e - r));
3073        mask &= bitmask64(e);
3074    }
3075    /* ...then replicate the element over the whole 64 bit value */
3076    mask = bitfield_replicate(mask, e);
3077    *result = mask;
3078    return true;
3079}
3080
3081/* C3.4.4 Logical (immediate)
3082 *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
3083 * +----+-----+-------------+---+------+------+------+------+
3084 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms |  Rn  |  Rd  |
3085 * +----+-----+-------------+---+------+------+------+------+
3086 */
3087static void disas_logic_imm(DisasContext *s, uint32_t insn)
3088{
3089    unsigned int sf, opc, is_n, immr, imms, rn, rd;
3090    TCGv_i64 tcg_rd, tcg_rn;
3091    uint64_t wmask;
3092    bool is_and = false;
3093
3094    sf = extract32(insn, 31, 1);
3095    opc = extract32(insn, 29, 2);
3096    is_n = extract32(insn, 22, 1);
3097    immr = extract32(insn, 16, 6);
3098    imms = extract32(insn, 10, 6);
3099    rn = extract32(insn, 5, 5);
3100    rd = extract32(insn, 0, 5);
3101
3102    if (!sf && is_n) {
3103        unallocated_encoding(s);
3104        return;
3105    }
3106
3107    if (opc == 0x3) { /* ANDS */
3108        tcg_rd = cpu_reg(s, rd);
3109    } else {
3110        tcg_rd = cpu_reg_sp(s, rd);
3111    }
3112    tcg_rn = cpu_reg(s, rn);
3113
3114    if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3115        /* some immediate field values are reserved */
3116        unallocated_encoding(s);
3117        return;
3118    }
3119
3120    if (!sf) {
3121        wmask &= 0xffffffff;
3122    }
3123
3124    switch (opc) {
3125    case 0x3: /* ANDS */
3126    case 0x0: /* AND */
3127        tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3128        is_and = true;
3129        break;
3130    case 0x1: /* ORR */
3131        tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3132        break;
3133    case 0x2: /* EOR */
3134        tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3135        break;
3136    default:
3137        assert(FALSE); /* must handle all above */
3138        break;
3139    }
3140
3141    if (!sf && !is_and) {
3142        /* zero extend final result; we know we can skip this for AND
3143         * since the immediate had the high 32 bits clear.
3144         */
3145        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3146    }
3147
3148    if (opc == 3) { /* ANDS */
3149        gen_logic_CC(sf, tcg_rd);
3150    }
3151}
3152
3153/*
3154 * C3.4.5 Move wide (immediate)
3155 *
3156 *  31 30 29 28         23 22 21 20             5 4    0
3157 * +--+-----+-------------+-----+----------------+------+
3158 * |sf| opc | 1 0 0 1 0 1 |  hw |  imm16         |  Rd  |
3159 * +--+-----+-------------+-----+----------------+------+
3160 *
3161 * sf: 0 -> 32 bit, 1 -> 64 bit
3162 * opc: 00 -> N, 10 -> Z, 11 -> K
3163 * hw: shift/16 (0,16, and sf only 32, 48)
3164 */
3165static void disas_movw_imm(DisasContext *s, uint32_t insn)
3166{
3167    int rd = extract32(insn, 0, 5);
3168    uint64_t imm = extract32(insn, 5, 16);
3169    int sf = extract32(insn, 31, 1);
3170    int opc = extract32(insn, 29, 2);
3171    int pos = extract32(insn, 21, 2) << 4;
3172    TCGv_i64 tcg_rd = cpu_reg(s, rd);
3173    TCGv_i64 tcg_imm;
3174
3175    if (!sf && (pos >= 32)) {
3176        unallocated_encoding(s);
3177        return;
3178    }
3179
3180    switch (opc) {
3181    case 0: /* MOVN */
3182    case 2: /* MOVZ */
3183        imm <<= pos;
3184        if (opc == 0) {
3185            imm = ~imm;
3186        }
3187        if (!sf) {
3188            imm &= 0xffffffffu;
3189        }
3190        tcg_gen_movi_i64(tcg_rd, imm);
3191        break;
3192    case 3: /* MOVK */
3193        tcg_imm = tcg_const_i64(imm);
3194        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3195        tcg_temp_free_i64(tcg_imm);
3196        if (!sf) {
3197            tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3198        }
3199        break;
3200    default:
3201        unallocated_encoding(s);
3202        break;
3203    }
3204}
3205
3206/* C3.4.2 Bitfield
3207 *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
3208 * +----+-----+-------------+---+------+------+------+------+
3209 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms |  Rn  |  Rd  |
3210 * +----+-----+-------------+---+------+------+------+------+
3211 */
3212static void disas_bitfield(DisasContext *s, uint32_t insn)
3213{
3214    unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3215    TCGv_i64 tcg_rd, tcg_tmp;
3216
3217    sf = extract32(insn, 31, 1);
3218    opc = extract32(insn, 29, 2);
3219    n = extract32(insn, 22, 1);
3220    ri = extract32(insn, 16, 6);
3221    si = extract32(insn, 10, 6);
3222    rn = extract32(insn, 5, 5);
3223    rd = extract32(insn, 0, 5);
3224    bitsize = sf ? 64 : 32;
3225
3226    if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3227        unallocated_encoding(s);
3228        return;
3229    }
3230
3231    tcg_rd = cpu_reg(s, rd);
3232
3233    /* Suppress the zero-extend for !sf.  Since RI and SI are constrained
3234       to be smaller than bitsize, we'll never reference data outside the
3235       low 32-bits anyway.  */
3236    tcg_tmp = read_cpu_reg(s, rn, 1);
3237
3238    /* Recognize simple(r) extractions.  */
3239    if (si >= ri) {
3240        /* Wd<s-r:0> = Wn<s:r> */
3241        len = (si - ri) + 1;
3242        if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
3243            tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
3244            goto done;
3245        } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
3246            tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
3247            return;
3248        }
3249        /* opc == 1, BXFIL fall through to deposit */
3250        tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
3251        pos = 0;
3252    } else {
3253        /* Handle the ri > si case with a deposit
3254         * Wd<32+s-r,32-r> = Wn<s:0>
3255         */
3256        len = si + 1;
3257        pos = (bitsize - ri) & (bitsize - 1);
3258    }
3259
3260    if (opc == 0 && len < ri) {
3261        /* SBFM: sign extend the destination field from len to fill
3262           the balance of the word.  Let the deposit below insert all
3263           of those sign bits.  */
3264        tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
3265        len = ri;
3266    }
3267
3268    if (opc == 1) { /* BFM, BXFIL */
3269        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3270    } else {
3271        /* SBFM or UBFM: We start with zero, and we haven't modified
3272           any bits outside bitsize, therefore the zero-extension
3273           below is unneeded.  */
3274        tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
3275        return;
3276    }
3277
3278 done:
3279    if (!sf) { /* zero extend final result */
3280        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3281    }
3282}
3283
3284/* C3.4.3 Extract
3285 *   31  30  29 28         23 22   21  20  16 15    10 9    5 4    0
3286 * +----+------+-------------+---+----+------+--------+------+------+
3287 * | sf | op21 | 1 0 0 1 1 1 | N | o0 |  Rm  |  imms  |  Rn  |  Rd  |
3288 * +----+------+-------------+---+----+------+--------+------+------+
3289 */
3290static void disas_extract(DisasContext *s, uint32_t insn)
3291{
3292    unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3293
3294    sf = extract32(insn, 31, 1);
3295    n = extract32(insn, 22, 1);
3296    rm = extract32(insn, 16, 5);
3297    imm = extract32(insn, 10, 6);
3298    rn = extract32(insn, 5, 5);
3299    rd = extract32(insn, 0, 5);
3300    op21 = extract32(insn, 29, 2);
3301    op0 = extract32(insn, 21, 1);
3302    bitsize = sf ? 64 : 32;
3303
3304    if (sf != n || op21 || op0 || imm >= bitsize) {
3305        unallocated_encoding(s);
3306    } else {
3307        TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3308
3309        tcg_rd = cpu_reg(s, rd);
3310
3311        if (unlikely(imm == 0)) {
3312            /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
3313             * so an extract from bit 0 is a special case.
3314             */
3315            if (sf) {
3316                tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3317            } else {
3318                tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3319            }
3320        } else if (rm == rn) { /* ROR */
3321            tcg_rm = cpu_reg(s, rm);
3322            if (sf) {
3323                tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3324            } else {
3325                TCGv_i32 tmp = tcg_temp_new_i32();
3326                tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3327                tcg_gen_rotri_i32(tmp, tmp, imm);
3328                tcg_gen_extu_i32_i64(tcg_rd, tmp);
3329                tcg_temp_free_i32(tmp);
3330            }
3331        } else {
3332            tcg_rm = read_cpu_reg(s, rm, sf);
3333            tcg_rn = read_cpu_reg(s, rn, sf);
3334            tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3335            tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3336            tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3337            if (!sf) {
3338                tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3339            }
3340        }
3341    }
3342}
3343
3344/* C3.4 Data processing - immediate */
3345static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3346{
3347    switch (extract32(insn, 23, 6)) {
3348    case 0x20: case 0x21: /* PC-rel. addressing */
3349        disas_pc_rel_adr(s, insn);
3350        break;
3351    case 0x22: case 0x23: /* Add/subtract (immediate) */
3352        disas_add_sub_imm(s, insn);
3353        break;
3354    case 0x24: /* Logical (immediate) */
3355        disas_logic_imm(s, insn);
3356        break;
3357    case 0x25: /* Move wide (immediate) */
3358        disas_movw_imm(s, insn);
3359        break;
3360    case 0x26: /* Bitfield */
3361        disas_bitfield(s, insn);
3362        break;
3363    case 0x27: /* Extract */
3364        disas_extract(s, insn);
3365        break;
3366    default:
3367        unallocated_encoding(s);
3368        break;
3369    }
3370}
3371
3372/* Shift a TCGv src by TCGv shift_amount, put result in dst.
3373 * Note that it is the caller's responsibility to ensure that the
3374 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
3375 * mandated semantics for out of range shifts.
3376 */
3377static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3378                      enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3379{
3380    switch (shift_type) {
3381    case A64_SHIFT_TYPE_LSL:
3382        tcg_gen_shl_i64(dst, src, shift_amount);
3383        break;
3384    case A64_SHIFT_TYPE_LSR:
3385        tcg_gen_shr_i64(dst, src, shift_amount);
3386        break;
3387    case A64_SHIFT_TYPE_ASR:
3388        if (!sf) {
3389            tcg_gen_ext32s_i64(dst, src);
3390        }
3391        tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3392        break;
3393    case A64_SHIFT_TYPE_ROR:
3394        if (sf) {
3395            tcg_gen_rotr_i64(dst, src, shift_amount);
3396        } else {
3397            TCGv_i32 t0, t1;
3398            t0 = tcg_temp_new_i32();
3399            t1 = tcg_temp_new_i32();
3400            tcg_gen_extrl_i64_i32(t0, src);
3401            tcg_gen_extrl_i64_i32(t1, shift_amount);
3402            tcg_gen_rotr_i32(t0, t0, t1);
3403            tcg_gen_extu_i32_i64(dst, t0);
3404            tcg_temp_free_i32(t0);
3405            tcg_temp_free_i32(t1);
3406        }
3407        break;
3408    default:
3409        assert(FALSE); /* all shift types should be handled */
3410        break;
3411    }
3412
3413    if (!sf) { /* zero extend final result */
3414        tcg_gen_ext32u_i64(dst, dst);
3415    }
3416}
3417
3418/* Shift a TCGv src by immediate, put result in dst.
3419 * The shift amount must be in range (this should always be true as the
3420 * relevant instructions will UNDEF on bad shift immediates).
3421 */
3422static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3423                          enum a64_shift_type shift_type, unsigned int shift_i)
3424{
3425    assert(shift_i < (sf ? 64 : 32));
3426
3427    if (shift_i == 0) {
3428        tcg_gen_mov_i64(dst, src);
3429    } else {
3430        TCGv_i64 shift_const;
3431
3432        shift_const = tcg_const_i64(shift_i);
3433        shift_reg(dst, src, sf, shift_type, shift_const);
3434        tcg_temp_free_i64(shift_const);
3435    }
3436}
3437
3438/* C3.5.10 Logical (shifted register)
3439 *   31  30 29 28       24 23   22 21  20  16 15    10 9    5 4    0
3440 * +----+-----+-----------+-------+---+------+--------+------+------+
3441 * | sf | opc | 0 1 0 1 0 | shift | N |  Rm  |  imm6  |  Rn  |  Rd  |
3442 * +----+-----+-----------+-------+---+------+--------+------+------+
3443 */
3444static void disas_logic_reg(DisasContext *s, uint32_t insn)
3445{
3446    TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3447    unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3448
3449    sf = extract32(insn, 31, 1);
3450    opc = extract32(insn, 29, 2);
3451    shift_type = extract32(insn, 22, 2);
3452    invert = extract32(insn, 21, 1);
3453    rm = extract32(insn, 16, 5);
3454    shift_amount = extract32(insn, 10, 6);
3455    rn = extract32(insn, 5, 5);
3456    rd = extract32(insn, 0, 5);
3457
3458    if (!sf && (shift_amount & (1 << 5))) {
3459        unallocated_encoding(s);
3460        return;
3461    }
3462
3463    tcg_rd = cpu_reg(s, rd);
3464
3465    if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3466        /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
3467         * register-register MOV and MVN, so it is worth special casing.
3468         */
3469        tcg_rm = cpu_reg(s, rm);
3470        if (invert) {
3471            tcg_gen_not_i64(tcg_rd, tcg_rm);
3472            if (!sf) {
3473                tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3474            }
3475        } else {
3476            if (sf) {
3477                tcg_gen_mov_i64(tcg_rd, tcg_rm);
3478            } else {
3479                tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3480            }
3481        }
3482        return;
3483    }
3484
3485    tcg_rm = read_cpu_reg(s, rm, sf);
3486
3487    if (shift_amount) {
3488        shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3489    }
3490
3491    tcg_rn = cpu_reg(s, rn);
3492
3493    switch (opc | (invert << 2)) {
3494    case 0: /* AND */
3495    case 3: /* ANDS */
3496        tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3497        break;
3498    case 1: /* ORR */
3499        tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3500        break;
3501    case 2: /* EOR */
3502        tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3503        break;
3504    case 4: /* BIC */
3505    case 7: /* BICS */
3506        tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3507        break;
3508    case 5: /* ORN */
3509        tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3510        break;
3511    case 6: /* EON */
3512        tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3513        break;
3514    default:
3515        assert(FALSE);
3516        break;
3517    }
3518
3519    if (!sf) {
3520        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3521    }
3522
3523    if (opc == 3) {
3524        gen_logic_CC(sf, tcg_rd);
3525    }
3526}
3527
3528/*
3529 * C3.5.1 Add/subtract (extended register)
3530 *
3531 *  31|30|29|28       24|23 22|21|20   16|15  13|12  10|9  5|4  0|
3532 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3533 * |sf|op| S| 0 1 0 1 1 | opt | 1|  Rm   |option| imm3 | Rn | Rd |
3534 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3535 *
3536 *  sf: 0 -> 32bit, 1 -> 64bit
3537 *  op: 0 -> add  , 1 -> sub
3538 *   S: 1 -> set flags
3539 * opt: 00
3540 * option: extension type (see DecodeRegExtend)
3541 * imm3: optional shift to Rm
3542 *
3543 * Rd = Rn + LSL(extend(Rm), amount)
3544 */
3545static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3546{
3547    int rd = extract32(insn, 0, 5);
3548    int rn = extract32(insn, 5, 5);
3549    int imm3 = extract32(insn, 10, 3);
3550    int option = extract32(insn, 13, 3);
3551    int rm = extract32(insn, 16, 5);
3552    bool setflags = extract32(insn, 29, 1);
3553    bool sub_op = extract32(insn, 30, 1);
3554    bool sf = extract32(insn, 31, 1);
3555
3556    TCGv_i64 tcg_rm, tcg_rn; /* temps */
3557    TCGv_i64 tcg_rd;
3558    TCGv_i64 tcg_result;
3559
3560    if (imm3 > 4) {
3561        unallocated_encoding(s);
3562        return;
3563    }
3564
3565    /* non-flag setting ops may use SP */
3566    if (!setflags) {
3567        tcg_rd = cpu_reg_sp(s, rd);
3568    } else {
3569        tcg_rd = cpu_reg(s, rd);
3570    }
3571    tcg_rn = read_cpu_reg_sp(s, rn, sf);
3572
3573    tcg_rm = read_cpu_reg(s, rm, sf);
3574    ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3575
3576    tcg_result = tcg_temp_new_i64();
3577
3578    if (!setflags) {
3579        if (sub_op) {
3580            tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3581        } else {
3582            tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3583        }
3584    } else {
3585        if (sub_op) {
3586            gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3587        } else {
3588            gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3589        }
3590    }
3591
3592    if (sf) {
3593        tcg_gen_mov_i64(tcg_rd, tcg_result);
3594    } else {
3595        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3596    }
3597
3598    tcg_temp_free_i64(tcg_result);
3599}
3600
3601/*
3602 * C3.5.2 Add/subtract (shifted register)
3603 *
3604 *  31 30 29 28       24 23 22 21 20   16 15     10 9    5 4    0
3605 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3606 * |sf|op| S| 0 1 0 1 1 |shift| 0|  Rm   |  imm6   |  Rn  |  Rd  |
3607 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3608 *
3609 *    sf: 0 -> 32bit, 1 -> 64bit
3610 *    op: 0 -> add  , 1 -> sub
3611 *     S: 1 -> set flags
3612 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
3613 *  imm6: Shift amount to apply to Rm before the add/sub
3614 */
3615static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
3616{
3617    int rd = extract32(insn, 0, 5);
3618    int rn = extract32(insn, 5, 5);
3619    int imm6 = extract32(insn, 10, 6);
3620    int rm = extract32(insn, 16, 5);
3621    int shift_type = extract32(insn, 22, 2);
3622    bool setflags = extract32(insn, 29, 1);
3623    bool sub_op = extract32(insn, 30, 1);
3624    bool sf = extract32(insn, 31, 1);
3625
3626    TCGv_i64 tcg_rd = cpu_reg(s, rd);
3627    TCGv_i64 tcg_rn, tcg_rm;
3628    TCGv_i64 tcg_result;
3629
3630    if ((shift_type == 3) || (!sf && (imm6 > 31))) {
3631        unallocated_encoding(s);
3632        return;
3633    }
3634
3635    tcg_rn = read_cpu_reg(s, rn, sf);
3636    tcg_rm = read_cpu_reg(s, rm, sf);
3637
3638    shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
3639
3640    tcg_result = tcg_temp_new_i64();
3641
3642    if (!setflags) {
3643        if (sub_op) {
3644            tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3645        } else {
3646            tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3647        }
3648    } else {
3649        if (sub_op) {
3650            gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3651        } else {
3652            gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3653        }
3654    }
3655
3656    if (sf) {
3657        tcg_gen_mov_i64(tcg_rd, tcg_result);
3658    } else {
3659        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3660    }
3661
3662    tcg_temp_free_i64(tcg_result);
3663}
3664
3665/* C3.5.9 Data-processing (3 source)
3666
3667   31 30  29 28       24 23 21  20  16  15  14  10 9    5 4    0
3668  +--+------+-----------+------+------+----+------+------+------+
3669  |sf| op54 | 1 1 0 1 1 | op31 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
3670  +--+------+-----------+------+------+----+------+------+------+
3671
3672 */
3673static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
3674{
3675    int rd = extract32(insn, 0, 5);
3676    int rn = extract32(insn, 5, 5);
3677    int ra = extract32(insn, 10, 5);
3678    int rm = extract32(insn, 16, 5);
3679    int op_id = (extract32(insn, 29, 3) << 4) |
3680        (extract32(insn, 21, 3) << 1) |
3681        extract32(insn, 15, 1);
3682    bool sf = extract32(insn, 31, 1);
3683    bool is_sub = extract32(op_id, 0, 1);
3684    bool is_high = extract32(op_id, 2, 1);
3685    bool is_signed = false;
3686    TCGv_i64 tcg_op1;
3687    TCGv_i64 tcg_op2;
3688    TCGv_i64 tcg_tmp;
3689
3690    /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
3691    switch (op_id) {
3692    case 0x42: /* SMADDL */
3693    case 0x43: /* SMSUBL */
3694    case 0x44: /* SMULH */
3695        is_signed = true;
3696        break;
3697    case 0x0: /* MADD (32bit) */
3698    case 0x1: /* MSUB (32bit) */
3699    case 0x40: /* MADD (64bit) */
3700    case 0x41: /* MSUB (64bit) */
3701    case 0x4a: /* UMADDL */
3702    case 0x4b: /* UMSUBL */
3703    case 0x4c: /* UMULH */
3704        break;
3705    default:
3706        unallocated_encoding(s);
3707        return;
3708    }
3709
3710    if (is_high) {
3711        TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
3712        TCGv_i64 tcg_rd = cpu_reg(s, rd);
3713        TCGv_i64 tcg_rn = cpu_reg(s, rn);
3714        TCGv_i64 tcg_rm = cpu_reg(s, rm);
3715
3716        if (is_signed) {
3717            tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3718        } else {
3719            tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3720        }
3721
3722        tcg_temp_free_i64(low_bits);
3723        return;
3724    }
3725
3726    tcg_op1 = tcg_temp_new_i64();
3727    tcg_op2 = tcg_temp_new_i64();
3728    tcg_tmp = tcg_temp_new_i64();
3729
3730    if (op_id < 0x42) {
3731        tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
3732        tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
3733    } else {
3734        if (is_signed) {
3735            tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
3736            tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
3737        } else {
3738            tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
3739            tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
3740        }
3741    }
3742
3743    if (ra == 31 && !is_sub) {
3744        /* Special-case MADD with rA == XZR; it is the standard MUL alias */
3745        tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
3746    } else {
3747        tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
3748        if (is_sub) {
3749            tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3750        } else {
3751            tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3752        }
3753    }
3754
3755    if (!sf) {
3756        tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
3757    }
3758
3759    tcg_temp_free_i64(tcg_op1);
3760    tcg_temp_free_i64(tcg_op2);
3761    tcg_temp_free_i64(tcg_tmp);
3762}
3763
3764/* C3.5.3 - Add/subtract (with carry)
3765 *  31 30 29 28 27 26 25 24 23 22 21  20  16  15   10  9    5 4   0
3766 * +--+--+--+------------------------+------+---------+------+-----+
3767 * |sf|op| S| 1  1  0  1  0  0  0  0 |  rm  | opcode2 |  Rn  |  Rd |
3768 * +--+--+--+------------------------+------+---------+------+-----+
3769 *                                            [000000]
3770 */
3771
3772static void disas_adc_sbc(DisasContext *s, uint32_t insn)
3773{
3774    unsigned int sf, op, setflags, rm, rn, rd;
3775    TCGv_i64 tcg_y, tcg_rn, tcg_rd;
3776
3777    if (extract32(insn, 10, 6) != 0) {
3778        unallocated_encoding(s);
3779        return;
3780    }
3781
3782    sf = extract32(insn, 31, 1);
3783    op = extract32(insn, 30, 1);
3784    setflags = extract32(insn, 29, 1);
3785    rm = extract32(insn, 16, 5);
3786    rn = extract32(insn, 5, 5);
3787    rd = extract32(insn, 0, 5);
3788
3789    tcg_rd = cpu_reg(s, rd);
3790    tcg_rn = cpu_reg(s, rn);
3791
3792    if (op) {
3793        tcg_y = new_tmp_a64(s);
3794        tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
3795    } else {
3796        tcg_y = cpu_reg(s, rm);
3797    }
3798
3799    if (setflags) {
3800        gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
3801    } else {
3802        gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
3803    }
3804}
3805
3806/* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
3807 *  31 30 29 28 27 26 25 24 23 22 21  20    16 15  12  11  10  9   5  4 3   0
3808 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3809 * |sf|op| S| 1  1  0  1  0  0  1  0 |imm5/rm | cond |i/r |o2|  Rn  |o3|nzcv |
3810 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3811 *        [1]                             y                [0]       [0]
3812 */
3813static void disas_cc(DisasContext *s, uint32_t insn)
3814{
3815    unsigned int sf, op, y, cond, rn, nzcv, is_imm;
3816    TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
3817    TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
3818    DisasCompare c;
3819
3820    if (!extract32(insn, 29, 1)) {
3821        unallocated_encoding(s);
3822        return;
3823    }
3824    if (insn & (1 << 10 | 1 << 4)) {
3825        unallocated_encoding(s);
3826        return;
3827    }
3828    sf = extract32(insn, 31, 1);
3829    op = extract32(insn, 30, 1);
3830    is_imm = extract32(insn, 11, 1);
3831    y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
3832    cond = extract32(insn, 12, 4);
3833    rn = extract32(insn, 5, 5);
3834    nzcv = extract32(insn, 0, 4);
3835
3836    /* Set T0 = !COND.  */
3837    tcg_t0 = tcg_temp_new_i32();
3838    arm_test_cc(&c, cond);
3839    tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
3840    arm_free_cc(&c);
3841
3842    /* Load the arguments for the new comparison.  */
3843    if (is_imm) {
3844        tcg_y = new_tmp_a64(s);
3845        tcg_gen_movi_i64(tcg_y, y);
3846    } else {
3847        tcg_y = cpu_reg(s, y);
3848    }
3849    tcg_rn = cpu_reg(s, rn);
3850
3851    /* Set the flags for the new comparison.  */
3852    tcg_tmp = tcg_temp_new_i64();
3853    if (op) {
3854        gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3855    } else {
3856        gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3857    }
3858    tcg_temp_free_i64(tcg_tmp);
3859
3860    /* If COND was false, force the flags to #nzcv.  Compute two masks
3861     * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
3862     * For tcg hosts that support ANDC, we can make do with just T1.
3863     * In either case, allow the tcg optimizer to delete any unused mask.
3864     */
3865    tcg_t1 = tcg_temp_new_i32();
3866    tcg_t2 = tcg_temp_new_i32();
3867    tcg_gen_neg_i32(tcg_t1, tcg_t0);
3868    tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
3869
3870    if (nzcv & 8) { /* N */
3871        tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
3872    } else {
3873        if (TCG_TARGET_HAS_andc_i32) {
3874            tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
3875        } else {
3876            tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
3877        }
3878    }
3879    if (nzcv & 4) { /* Z */
3880        if (TCG_TARGET_HAS_andc_i32) {
3881            tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
3882        } else {
3883            tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
3884        }
3885    } else {
3886        tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
3887    }
3888    if (nzcv & 2) { /* C */
3889        tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
3890    } else {
3891        if (TCG_TARGET_HAS_andc_i32) {
3892            tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
3893        } else {
3894            tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
3895        }
3896    }
3897    if (nzcv & 1) { /* V */
3898        tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
3899    } else {
3900        if (TCG_TARGET_HAS_andc_i32) {
3901            tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
3902        } else {
3903            tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
3904        }
3905    }
3906    tcg_temp_free_i32(tcg_t0);
3907    tcg_temp_free_i32(tcg_t1);
3908    tcg_temp_free_i32(tcg_t2);
3909}
3910
3911/* C3.5.6 Conditional select
3912 *   31   30  29  28             21 20  16 15  12 11 10 9    5 4    0
3913 * +----+----+---+-----------------+------+------+-----+------+------+
3914 * | sf | op | S | 1 1 0 1 0 1 0 0 |  Rm  | cond | op2 |  Rn  |  Rd  |
3915 * +----+----+---+-----------------+------+------+-----+------+------+
3916 */
3917static void disas_cond_select(DisasContext *s, uint32_t insn)
3918{
3919    unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
3920    TCGv_i64 tcg_rd, zero;
3921    DisasCompare64 c;
3922
3923    if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
3924        /* S == 1 or op2<1> == 1 */
3925        unallocated_encoding(s);
3926        return;
3927    }
3928    sf = extract32(insn, 31, 1);
3929    else_inv = extract32(insn, 30, 1);
3930    rm = extract32(insn, 16, 5);
3931    cond = extract32(insn, 12, 4);
3932    else_inc = extract32(insn, 10, 1);
3933    rn = extract32(insn, 5, 5);
3934    rd = extract32(insn, 0, 5);
3935
3936    tcg_rd = cpu_reg(s, rd);
3937
3938    a64_test_cc(&c, cond);
3939    zero = tcg_const_i64(0);
3940
3941    if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
3942        /* CSET & CSETM.  */
3943        tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
3944        if (else_inv) {
3945            tcg_gen_neg_i64(tcg_rd, tcg_rd);
3946        }
3947    } else {
3948        TCGv_i64 t_true = cpu_reg(s, rn);
3949        TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
3950        if (else_inv && else_inc) {
3951            tcg_gen_neg_i64(t_false, t_false);
3952        } else if (else_inv) {
3953            tcg_gen_not_i64(t_false, t_false);
3954        } else if (else_inc) {
3955            tcg_gen_addi_i64(t_false, t_false, 1);
3956        }
3957        tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
3958    }
3959
3960    tcg_temp_free_i64(zero);
3961    a64_free_cc(&c);
3962
3963    if (!sf) {
3964        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3965    }
3966}
3967
3968static void handle_clz(DisasContext *s, unsigned int sf,
3969                       unsigned int rn, unsigned int rd)
3970{
3971    TCGv_i64 tcg_rd, tcg_rn;
3972    tcg_rd = cpu_reg(s, rd);
3973    tcg_rn = cpu_reg(s, rn);
3974
3975    if (sf) {
3976        tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
3977    } else {
3978        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3979        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
3980        tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
3981        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
3982        tcg_temp_free_i32(tcg_tmp32);
3983    }
3984}
3985
3986static void handle_cls(DisasContext *s, unsigned int sf,
3987                       unsigned int rn, unsigned int rd)
3988{
3989    TCGv_i64 tcg_rd, tcg_rn;
3990    tcg_rd = cpu_reg(s, rd);
3991    tcg_rn = cpu_reg(s, rn);
3992
3993    if (sf) {
3994        tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
3995    } else {
3996        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3997        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
3998        tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
3999        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4000        tcg_temp_free_i32(tcg_tmp32);
4001    }
4002}
4003
4004static void handle_rbit(DisasContext *s, unsigned int sf,
4005                        unsigned int rn, unsigned int rd)
4006{
4007    TCGv_i64 tcg_rd, tcg_rn;
4008    tcg_rd = cpu_reg(s, rd);
4009    tcg_rn = cpu_reg(s, rn);
4010
4011    if (sf) {
4012        gen_helper_rbit64(tcg_rd, tcg_rn);
4013    } else {
4014        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4015        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4016        gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4017        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4018        tcg_temp_free_i32(tcg_tmp32);
4019    }
4020}
4021
4022/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
4023static void handle_rev64(DisasContext *s, unsigned int sf,
4024                         unsigned int rn, unsigned int rd)
4025{
4026    if (!sf) {
4027        unallocated_encoding(s);
4028        return;
4029    }
4030    tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4031}
4032
4033/* C5.6.149 REV with sf==0, opcode==2
4034 * C5.6.151 REV32 (sf==1, opcode==2)
4035 */
4036static void handle_rev32(DisasContext *s, unsigned int sf,
4037                         unsigned int rn, unsigned int rd)
4038{
4039    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4040
4041    if (sf) {
4042        TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4043        TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4044
4045        /* bswap32_i64 requires zero high word */
4046        tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4047        tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4048        tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4049        tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4050        tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4051
4052        tcg_temp_free_i64(tcg_tmp);
4053    } else {
4054        tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4055        tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4056    }
4057}
4058
4059/* C5.6.150 REV16 (opcode==1) */
4060static void handle_rev16(DisasContext *s, unsigned int sf,
4061                         unsigned int rn, unsigned int rd)
4062{
4063    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4064    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4065    TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4066    TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4067
4068    tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4069    tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4070    tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4071    tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4072    tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4073
4074    tcg_temp_free_i64(mask);
4075    tcg_temp_free_i64(tcg_tmp);
4076}
4077
4078/* C3.5.7 Data-processing (1 source)
4079 *   31  30  29  28             21 20     16 15    10 9    5 4    0
4080 * +----+---+---+-----------------+---------+--------+------+------+
4081 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode |  Rn  |  Rd  |
4082 * +----+---+---+-----------------+---------+--------+------+------+
4083 */
4084static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4085{
4086    unsigned int sf, opcode, rn, rd;
4087
4088    if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4089        unallocated_encoding(s);
4090        return;
4091    }
4092
4093    sf = extract32(insn, 31, 1);
4094    opcode = extract32(insn, 10, 6);
4095    rn = extract32(insn, 5, 5);
4096    rd = extract32(insn, 0, 5);
4097
4098    switch (opcode) {
4099    case 0: /* RBIT */
4100        handle_rbit(s, sf, rn, rd);
4101        break;
4102    case 1: /* REV16 */
4103        handle_rev16(s, sf, rn, rd);
4104        break;
4105    case 2: /* REV32 */
4106        handle_rev32(s, sf, rn, rd);
4107        break;
4108    case 3: /* REV64 */
4109        handle_rev64(s, sf, rn, rd);
4110        break;
4111    case 4: /* CLZ */
4112        handle_clz(s, sf, rn, rd);
4113        break;
4114    case 5: /* CLS */
4115        handle_cls(s, sf, rn, rd);
4116        break;
4117    }
4118}
4119
4120static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4121                       unsigned int rm, unsigned int rn, unsigned int rd)
4122{
4123    TCGv_i64 tcg_n, tcg_m, tcg_rd;
4124    tcg_rd = cpu_reg(s, rd);
4125
4126    if (!sf && is_signed) {
4127        tcg_n = new_tmp_a64(s);
4128        tcg_m = new_tmp_a64(s);
4129        tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4130        tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4131    } else {
4132        tcg_n = read_cpu_reg(s, rn, sf);
4133        tcg_m = read_cpu_reg(s, rm, sf);
4134    }
4135
4136    if (is_signed) {
4137        gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4138    } else {
4139        gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4140    }
4141
4142    if (!sf) { /* zero extend final result */
4143        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4144    }
4145}
4146
4147/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
4148static void handle_shift_reg(DisasContext *s,
4149                             enum a64_shift_type shift_type, unsigned int sf,
4150                             unsigned int rm, unsigned int rn, unsigned int rd)
4151{
4152    TCGv_i64 tcg_shift = tcg_temp_new_i64();
4153    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4154    TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4155
4156    tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4157    shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4158    tcg_temp_free_i64(tcg_shift);
4159}
4160
4161/* CRC32[BHWX], CRC32C[BHWX] */
4162static void handle_crc32(DisasContext *s,
4163                         unsigned int sf, unsigned int sz, bool crc32c,
4164                         unsigned int rm, unsigned int rn, unsigned int rd)
4165{
4166    TCGv_i64 tcg_acc, tcg_val;
4167    TCGv_i32 tcg_bytes;
4168
4169    if (!arm_dc_feature(s, ARM_FEATURE_CRC)
4170        || (sf == 1 && sz != 3)
4171        || (sf == 0 && sz == 3)) {
4172        unallocated_encoding(s);
4173        return;
4174    }
4175
4176    if (sz == 3) {
4177        tcg_val = cpu_reg(s, rm);
4178    } else {
4179        uint64_t mask;
4180        switch (sz) {
4181        case 0:
4182            mask = 0xFF;
4183            break;
4184        case 1:
4185            mask = 0xFFFF;
4186            break;
4187        case 2:
4188            mask = 0xFFFFFFFF;
4189            break;
4190        default:
4191            g_assert_not_reached();
4192        }
4193        tcg_val = new_tmp_a64(s);
4194        tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4195    }
4196
4197    tcg_acc = cpu_reg(s, rn);
4198    tcg_bytes = tcg_const_i32(1 << sz);
4199
4200    if (crc32c) {
4201        gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4202    } else {
4203        gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4204    }
4205
4206    tcg_temp_free_i32(tcg_bytes);
4207}
4208
4209/* C3.5.8 Data-processing (2 source)
4210 *   31   30  29 28             21 20  16 15    10 9    5 4    0
4211 * +----+---+---+-----------------+------+--------+------+------+
4212 * | sf | 0 | S | 1 1 0 1 0 1 1 0 |  Rm  | opcode |  Rn  |  Rd  |
4213 * +----+---+---+-----------------+------+--------+------+------+
4214 */
4215static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4216{
4217    unsigned int sf, rm, opcode, rn, rd;
4218    sf = extract32(insn, 31, 1);
4219    rm = extract32(insn, 16, 5);
4220    opcode = extract32(insn, 10, 6);
4221    rn = extract32(insn, 5, 5);
4222    rd = extract32(insn, 0, 5);
4223
4224    if (extract32(insn, 29, 1)) {
4225        unallocated_encoding(s);
4226        return;
4227    }
4228
4229    switch (opcode) {
4230    case 2: /* UDIV */
4231        handle_div(s, false, sf, rm, rn, rd);
4232        break;
4233    case 3: /* SDIV */
4234        handle_div(s, true, sf, rm, rn, rd);
4235        break;
4236    case 8: /* LSLV */
4237        handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4238        break;
4239    case 9: /* LSRV */
4240        handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4241        break;
4242    case 10: /* ASRV */
4243        handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4244        break;
4245    case 11: /* RORV */
4246        handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4247        break;
4248    case 16:
4249    case 17:
4250    case 18:
4251    case 19:
4252    case 20:
4253    case 21:
4254    case 22:
4255    case 23: /* CRC32 */
4256    {
4257        int sz = extract32(opcode, 0, 2);
4258        bool crc32c = extract32(opcode, 2, 1);
4259        handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4260        break;
4261    }
4262    default:
4263        unallocated_encoding(s);
4264        break;
4265    }
4266}
4267
4268/* C3.5 Data processing - register */
4269static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4270{
4271    switch (extract32(insn, 24, 5)) {
4272    case 0x0a: /* Logical (shifted register) */
4273        disas_logic_reg(s, insn);
4274        break;
4275    case 0x0b: /* Add/subtract */
4276        if (insn & (1 << 21)) { /* (extended register) */
4277            disas_add_sub_ext_reg(s, insn);
4278        } else {
4279            disas_add_sub_reg(s, insn);
4280        }
4281        break;
4282    case 0x1b: /* Data-processing (3 source) */
4283        disas_data_proc_3src(s, insn);
4284        break;
4285    case 0x1a:
4286        switch (extract32(insn, 21, 3)) {
4287        case 0x0: /* Add/subtract (with carry) */
4288            disas_adc_sbc(s, insn);
4289            break;
4290        case 0x2: /* Conditional compare */
4291            disas_cc(s, insn); /* both imm and reg forms */
4292            break;
4293        case 0x4: /* Conditional select */
4294            disas_cond_select(s, insn);
4295            break;
4296        case 0x6: /* Data-processing */
4297            if (insn & (1 << 30)) { /* (1 source) */
4298                disas_data_proc_1src(s, insn);
4299            } else {            /* (2 source) */
4300                disas_data_proc_2src(s, insn);
4301            }
4302            break;
4303        default:
4304            unallocated_encoding(s);
4305            break;
4306        }
4307        break;
4308    default:
4309        unallocated_encoding(s);
4310        break;
4311    }
4312}
4313
4314static void handle_fp_compare(DisasContext *s, bool is_double,
4315                              unsigned int rn, unsigned int rm,
4316                              bool cmp_with_zero, bool signal_all_nans)
4317{
4318    TCGv_i64 tcg_flags = tcg_temp_new_i64();
4319    TCGv_ptr fpst = get_fpstatus_ptr();
4320
4321    if (is_double) {
4322        TCGv_i64 tcg_vn, tcg_vm;
4323
4324        tcg_vn = read_fp_dreg(s, rn);
4325        if (cmp_with_zero) {
4326            tcg_vm = tcg_const_i64(0);
4327        } else {
4328            tcg_vm = read_fp_dreg(s, rm);
4329        }
4330        if (signal_all_nans) {
4331            gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4332        } else {
4333            gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4334        }
4335        tcg_temp_free_i64(tcg_vn);
4336        tcg_temp_free_i64(tcg_vm);
4337    } else {
4338        TCGv_i32 tcg_vn, tcg_vm;
4339
4340        tcg_vn = read_fp_sreg(s, rn);
4341        if (cmp_with_zero) {
4342            tcg_vm = tcg_const_i32(0);
4343        } else {
4344            tcg_vm = read_fp_sreg(s, rm);
4345        }
4346        if (signal_all_nans) {
4347            gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4348        } else {
4349            gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4350        }
4351        tcg_temp_free_i32(tcg_vn);
4352        tcg_temp_free_i32(tcg_vm);
4353    }
4354
4355    tcg_temp_free_ptr(fpst);
4356
4357    gen_set_nzcv(tcg_flags);
4358
4359    tcg_temp_free_i64(tcg_flags);
4360}
4361
4362/* C3.6.22 Floating point compare
4363 *   31  30  29 28       24 23  22  21 20  16 15 14 13  10    9    5 4     0
4364 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4365 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | op  | 1 0 0 0 |  Rn  |  op2  |
4366 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4367 */
4368static void disas_fp_compare(DisasContext *s, uint32_t insn)
4369{
4370    unsigned int mos, type, rm, op, rn, opc, op2r;
4371
4372    mos = extract32(insn, 29, 3);
4373    type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4374    rm = extract32(insn, 16, 5);
4375    op = extract32(insn, 14, 2);
4376    rn = extract32(insn, 5, 5);
4377    opc = extract32(insn, 3, 2);
4378    op2r = extract32(insn, 0, 3);
4379
4380    if (mos || op || op2r || type > 1) {
4381        unallocated_encoding(s);
4382        return;
4383    }
4384
4385    if (!fp_access_check(s)) {
4386        return;
4387    }
4388
4389    handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
4390}
4391
4392/* C3.6.23 Floating point conditional compare
4393 *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5  4   3    0
4394 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4395 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 0 1 |  Rn  | op | nzcv |
4396 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4397 */
4398static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4399{
4400    unsigned int mos, type, rm, cond, rn, op, nzcv;
4401    TCGv_i64 tcg_flags;
4402    TCGLabel *label_continue = NULL;
4403
4404    mos = extract32(insn, 29, 3);
4405    type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4406    rm = extract32(insn, 16, 5);
4407    cond = extract32(insn, 12, 4);
4408    rn = extract32(insn, 5, 5);
4409    op = extract32(insn, 4, 1);
4410    nzcv = extract32(insn, 0, 4);
4411
4412    if (mos || type > 1) {
4413        unallocated_encoding(s);
4414        return;
4415    }
4416
4417    if (!fp_access_check(s)) {
4418        return;
4419    }
4420
4421    if (cond < 0x0e) { /* not always */
4422        TCGLabel *label_match = gen_new_label();
4423        label_continue = gen_new_label();
4424        arm_gen_test_cc(cond, label_match);
4425        /* nomatch: */
4426        tcg_flags = tcg_const_i64(nzcv << 28);
4427        gen_set_nzcv(tcg_flags);
4428        tcg_temp_free_i64(tcg_flags);
4429        tcg_gen_br(label_continue);
4430        gen_set_label(label_match);
4431    }
4432
4433    handle_fp_compare(s, type, rn, rm, false, op);
4434
4435    if (cond < 0x0e) {
4436        gen_set_label(label_continue);
4437    }
4438}
4439
4440/* C3.6.24 Floating point conditional select
4441 *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5 4    0
4442 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4443 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 1 1 |  Rn  |  Rd  |
4444 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4445 */
4446static void disas_fp_csel(DisasContext *s, uint32_t insn)
4447{
4448    unsigned int mos, type, rm, cond, rn, rd;
4449    TCGv_i64 t_true, t_false, t_zero;
4450    DisasCompare64 c;
4451
4452    mos = extract32(insn, 29, 3);
4453    type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4454    rm = extract32(insn, 16, 5);
4455    cond = extract32(insn, 12, 4);
4456    rn = extract32(insn, 5, 5);
4457    rd = extract32(insn, 0, 5);
4458
4459    if (mos || type > 1) {
4460        unallocated_encoding(s);
4461        return;
4462    }
4463
4464    if (!fp_access_check(s)) {
4465        return;
4466    }
4467
4468    /* Zero extend sreg inputs to 64 bits now.  */
4469    t_true = tcg_temp_new_i64();
4470    t_false = tcg_temp_new_i64();
4471    read_vec_element(s, t_true, rn, 0, type ? MO_64 : MO_32);
4472    read_vec_element(s, t_false, rm, 0, type ? MO_64 : MO_32);
4473
4474    a64_test_cc(&c, cond);
4475    t_zero = tcg_const_i64(0);
4476    tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4477    tcg_temp_free_i64(t_zero);
4478    tcg_temp_free_i64(t_false);
4479    a64_free_cc(&c);
4480
4481    /* Note that sregs write back zeros to the high bits,
4482       and we've already done the zero-extension.  */
4483    write_fp_dreg(s, rd, t_true);
4484    tcg_temp_free_i64(t_true);
4485}
4486
4487/* C3.6.25 Floating-point data-processing (1 source) - single precision */
4488static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
4489{
4490    TCGv_ptr fpst;
4491    TCGv_i32 tcg_op;
4492    TCGv_i32 tcg_res;
4493
4494    fpst = get_fpstatus_ptr();
4495    tcg_op = read_fp_sreg(s, rn);
4496    tcg_res = tcg_temp_new_i32();
4497
4498    switch (opcode) {
4499    case 0x0: /* FMOV */
4500        tcg_gen_mov_i32(tcg_res, tcg_op);
4501        break;
4502    case 0x1: /* FABS */
4503        gen_helper_vfp_abss(tcg_res, tcg_op);
4504        break;
4505    case 0x2: /* FNEG */
4506        gen_helper_vfp_negs(tcg_res, tcg_op);
4507        break;
4508    case 0x3: /* FSQRT */
4509        gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
4510        break;
4511    case 0x8: /* FRINTN */
4512    case 0x9: /* FRINTP */
4513    case 0xa: /* FRINTM */
4514    case 0xb: /* FRINTZ */
4515    case 0xc: /* FRINTA */
4516    {
4517        TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4518
4519        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4520        gen_helper_rints(tcg_res, tcg_op, fpst);
4521
4522        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4523        tcg_temp_free_i32(tcg_rmode);
4524        break;
4525    }
4526    case 0xe: /* FRINTX */
4527        gen_helper_rints_exact(tcg_res, tcg_op, fpst);
4528        break;
4529    case 0xf: /* FRINTI */
4530        gen_helper_rints(tcg_res, tcg_op, fpst);
4531        break;
4532    default:
4533        abort();
4534    }
4535
4536    write_fp_sreg(s, rd, tcg_res);
4537
4538    tcg_temp_free_ptr(fpst);
4539    tcg_temp_free_i32(tcg_op);
4540    tcg_temp_free_i32(tcg_res);
4541}
4542
4543/* C3.6.25 Floating-point data-processing (1 source) - double precision */
4544static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
4545{
4546    TCGv_ptr fpst;
4547    TCGv_i64 tcg_op;
4548    TCGv_i64 tcg_res;
4549
4550    fpst = get_fpstatus_ptr();
4551    tcg_op = read_fp_dreg(s, rn);
4552    tcg_res = tcg_temp_new_i64();
4553
4554    switch (opcode) {
4555    case 0x0: /* FMOV */
4556        tcg_gen_mov_i64(tcg_res, tcg_op);
4557        break;
4558    case 0x1: /* FABS */
4559        gen_helper_vfp_absd(tcg_res, tcg_op);
4560        break;
4561    case 0x2: /* FNEG */
4562        gen_helper_vfp_negd(tcg_res, tcg_op);
4563        break;
4564    case 0x3: /* FSQRT */
4565        gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
4566        break;
4567    case 0x8: /* FRINTN */
4568    case 0x9: /* FRINTP */
4569    case 0xa: /* FRINTM */
4570    case 0xb: /* FRINTZ */
4571    case 0xc: /* FRINTA */
4572    {
4573        TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4574
4575        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4576        gen_helper_rintd(tcg_res, tcg_op, fpst);
4577
4578        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4579        tcg_temp_free_i32(tcg_rmode);
4580        break;
4581    }
4582    case 0xe: /* FRINTX */
4583        gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
4584        break;
4585    case 0xf: /* FRINTI */
4586        gen_helper_rintd(tcg_res, tcg_op, fpst);
4587        break;
4588    default:
4589        abort();
4590    }
4591
4592    write_fp_dreg(s, rd, tcg_res);
4593
4594    tcg_temp_free_ptr(fpst);
4595    tcg_temp_free_i64(tcg_op);
4596    tcg_temp_free_i64(tcg_res);
4597}
4598
4599static void handle_fp_fcvt(DisasContext *s, int opcode,
4600                           int rd, int rn, int dtype, int ntype)
4601{
4602    switch (ntype) {
4603    case 0x0:
4604    {
4605        TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4606        if (dtype == 1) {
4607            /* Single to double */
4608            TCGv_i64 tcg_rd = tcg_temp_new_i64();
4609            gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
4610            write_fp_dreg(s, rd, tcg_rd);
4611            tcg_temp_free_i64(tcg_rd);
4612        } else {
4613            /* Single to half */
4614            TCGv_i32 tcg_rd = tcg_temp_new_i32();
4615            gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
4616            /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4617            write_fp_sreg(s, rd, tcg_rd);
4618            tcg_temp_free_i32(tcg_rd);
4619        }
4620        tcg_temp_free_i32(tcg_rn);
4621        break;
4622    }
4623    case 0x1:
4624    {
4625        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
4626        TCGv_i32 tcg_rd = tcg_temp_new_i32();
4627        if (dtype == 0) {
4628            /* Double to single */
4629            gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
4630        } else {
4631            /* Double to half */
4632            gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
4633            /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4634        }
4635        write_fp_sreg(s, rd, tcg_rd);
4636        tcg_temp_free_i32(tcg_rd);
4637        tcg_temp_free_i64(tcg_rn);
4638        break;
4639    }
4640    case 0x3:
4641    {
4642        TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4643        tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
4644        if (dtype == 0) {
4645            /* Half to single */
4646            TCGv_i32 tcg_rd = tcg_temp_new_i32();
4647            gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
4648            write_fp_sreg(s, rd, tcg_rd);
4649            tcg_temp_free_i32(tcg_rd);
4650        } else {
4651            /* Half to double */
4652            TCGv_i64 tcg_rd = tcg_temp_new_i64();
4653            gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
4654            write_fp_dreg(s, rd, tcg_rd);
4655            tcg_temp_free_i64(tcg_rd);
4656        }
4657        tcg_temp_free_i32(tcg_rn);
4658        break;
4659    }
4660    default:
4661        abort();
4662    }
4663}
4664
4665/* C3.6.25 Floating point data-processing (1 source)
4666 *   31  30  29 28       24 23  22  21 20    15 14       10 9    5 4    0
4667 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4668 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 |  Rn  |  Rd  |
4669 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4670 */
4671static void disas_fp_1src(DisasContext *s, uint32_t insn)
4672{
4673    int type = extract32(insn, 22, 2);
4674    int opcode = extract32(insn, 15, 6);
4675    int rn = extract32(insn, 5, 5);
4676    int rd = extract32(insn, 0, 5);
4677
4678    switch (opcode) {
4679    case 0x4: case 0x5: case 0x7:
4680    {
4681        /* FCVT between half, single and double precision */
4682        int dtype = extract32(opcode, 0, 2);
4683        if (type == 2 || dtype == type) {
4684            unallocated_encoding(s);
4685            return;
4686        }
4687        if (!fp_access_check(s)) {
4688            return;
4689        }
4690
4691        handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
4692        break;
4693    }
4694    case 0x0 ... 0x3:
4695    case 0x8 ... 0xc:
4696    case 0xe ... 0xf:
4697        /* 32-to-32 and 64-to-64 ops */
4698        switch (type) {
4699        case 0:
4700            if (!fp_access_check(s)) {
4701                return;
4702            }
4703
4704            handle_fp_1src_single(s, opcode, rd, rn);
4705            break;
4706        case 1:
4707            if (!fp_access_check(s)) {
4708                return;
4709            }
4710
4711            handle_fp_1src_double(s, opcode, rd, rn);
4712            break;
4713        default:
4714            unallocated_encoding(s);
4715        }
4716        break;
4717    default:
4718        unallocated_encoding(s);
4719        break;
4720    }
4721}
4722
4723/* C3.6.26 Floating-point data-processing (2 source) - single precision */
4724static void handle_fp_2src_single(DisasContext *s, int opcode,
4725                                  int rd, int rn, int rm)
4726{
4727    TCGv_i32 tcg_op1;
4728    TCGv_i32 tcg_op2;
4729    TCGv_i32 tcg_res;
4730    TCGv_ptr fpst;
4731
4732    tcg_res = tcg_temp_new_i32();
4733    fpst = get_fpstatus_ptr();
4734    tcg_op1 = read_fp_sreg(s, rn);
4735    tcg_op2 = read_fp_sreg(s, rm);
4736
4737    switch (opcode) {
4738    case 0x0: /* FMUL */
4739        gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4740        break;
4741    case 0x1: /* FDIV */
4742        gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
4743        break;
4744    case 0x2: /* FADD */
4745        gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
4746        break;
4747    case 0x3: /* FSUB */
4748        gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
4749        break;
4750    case 0x4: /* FMAX */
4751        gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
4752        break;
4753    case 0x5: /* FMIN */
4754        gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
4755        break;
4756    case 0x6: /* FMAXNM */
4757        gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
4758        break;
4759    case 0x7: /* FMINNM */
4760        gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
4761        break;
4762    case 0x8: /* FNMUL */
4763        gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4764        gen_helper_vfp_negs(tcg_res, tcg_res);
4765        break;
4766    }
4767
4768    write_fp_sreg(s, rd, tcg_res);
4769
4770    tcg_temp_free_ptr(fpst);
4771    tcg_temp_free_i32(tcg_op1);
4772    tcg_temp_free_i32(tcg_op2);
4773    tcg_temp_free_i32(tcg_res);
4774}
4775
4776/* C3.6.26 Floating-point data-processing (2 source) - double precision */
4777static void handle_fp_2src_double(DisasContext *s, int opcode,
4778                                  int rd, int rn, int rm)
4779{
4780    TCGv_i64 tcg_op1;
4781    TCGv_i64 tcg_op2;
4782    TCGv_i64 tcg_res;
4783    TCGv_ptr fpst;
4784
4785    tcg_res = tcg_temp_new_i64();
4786    fpst = get_fpstatus_ptr();
4787    tcg_op1 = read_fp_dreg(s, rn);
4788    tcg_op2 = read_fp_dreg(s, rm);
4789
4790    switch (opcode) {
4791    case 0x0: /* FMUL */
4792        gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4793        break;
4794    case 0x1: /* FDIV */
4795        gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
4796        break;
4797    case 0x2: /* FADD */
4798        gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
4799        break;
4800    case 0x3: /* FSUB */
4801        gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
4802        break;
4803    case 0x4: /* FMAX */
4804        gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
4805        break;
4806    case 0x5: /* FMIN */
4807        gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
4808        break;
4809    case 0x6: /* FMAXNM */
4810        gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4811        break;
4812    case 0x7: /* FMINNM */
4813        gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4814        break;
4815    case 0x8: /* FNMUL */
4816        gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4817        gen_helper_vfp_negd(tcg_res, tcg_res);
4818        break;
4819    }
4820
4821    write_fp_dreg(s, rd, tcg_res);
4822
4823    tcg_temp_free_ptr(fpst);
4824    tcg_temp_free_i64(tcg_op1);
4825    tcg_temp_free_i64(tcg_op2);
4826    tcg_temp_free_i64(tcg_res);
4827}
4828
4829/* C3.6.26 Floating point data-processing (2 source)
4830 *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
4831 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4832 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | opcode | 1 0 |  Rn  |  Rd  |
4833 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4834 */
4835static void disas_fp_2src(DisasContext *s, uint32_t insn)
4836{
4837    int type = extract32(insn, 22, 2);
4838    int rd = extract32(insn, 0, 5);
4839    int rn = extract32(insn, 5, 5);
4840    int rm = extract32(insn, 16, 5);
4841    int opcode = extract32(insn, 12, 4);
4842
4843    if (opcode > 8) {
4844        unallocated_encoding(s);
4845        return;
4846    }
4847
4848    switch (type) {
4849    case 0:
4850        if (!fp_access_check(s)) {
4851            return;
4852        }
4853        handle_fp_2src_single(s, opcode, rd, rn, rm);
4854        break;
4855    case 1:
4856        if (!fp_access_check(s)) {
4857            return;
4858        }
4859        handle_fp_2src_double(s, opcode, rd, rn, rm);
4860        break;
4861    default:
4862        unallocated_encoding(s);
4863    }
4864}
4865
4866/* C3.6.27 Floating-point data-processing (3 source) - single precision */
4867static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
4868                                  int rd, int rn, int rm, int ra)
4869{
4870    TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
4871    TCGv_i32 tcg_res = tcg_temp_new_i32();
4872    TCGv_ptr fpst = get_fpstatus_ptr();
4873
4874    tcg_op1 = read_fp_sreg(s, rn);
4875    tcg_op2 = read_fp_sreg(s, rm);
4876    tcg_op3 = read_fp_sreg(s, ra);
4877
4878    /* These are fused multiply-add, and must be done as one
4879     * floating point operation with no rounding between the
4880     * multiplication and addition steps.
4881     * NB that doing the negations here as separate steps is
4882     * correct : an input NaN should come out with its sign bit
4883     * flipped if it is a negated-input.
4884     */
4885    if (o1 == true) {
4886        gen_helper_vfp_negs(tcg_op3, tcg_op3);
4887    }
4888
4889    if (o0 != o1) {
4890        gen_helper_vfp_negs(tcg_op1, tcg_op1);
4891    }
4892
4893    gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4894
4895    write_fp_sreg(s, rd, tcg_res);
4896
4897    tcg_temp_free_ptr(fpst);
4898    tcg_temp_free_i32(tcg_op1);
4899    tcg_temp_free_i32(tcg_op2);
4900    tcg_temp_free_i32(tcg_op3);
4901    tcg_temp_free_i32(tcg_res);
4902}
4903
4904/* C3.6.27 Floating-point data-processing (3 source) - double precision */
4905static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
4906                                  int rd, int rn, int rm, int ra)
4907{
4908    TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
4909    TCGv_i64 tcg_res = tcg_temp_new_i64();
4910    TCGv_ptr fpst = get_fpstatus_ptr();
4911
4912    tcg_op1 = read_fp_dreg(s, rn);
4913    tcg_op2 = read_fp_dreg(s, rm);
4914    tcg_op3 = read_fp_dreg(s, ra);
4915
4916    /* These are fused multiply-add, and must be done as one
4917     * floating point operation with no rounding between the
4918     * multiplication and addition steps.
4919     * NB that doing the negations here as separate steps is
4920     * correct : an input NaN should come out with its sign bit
4921     * flipped if it is a negated-input.
4922     */
4923    if (o1 == true) {
4924        gen_helper_vfp_negd(tcg_op3, tcg_op3);
4925    }
4926
4927    if (o0 != o1) {
4928        gen_helper_vfp_negd(tcg_op1, tcg_op1);
4929    }
4930
4931    gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4932
4933    write_fp_dreg(s, rd, tcg_res);
4934
4935    tcg_temp_free_ptr(fpst);
4936    tcg_temp_free_i64(tcg_op1);
4937    tcg_temp_free_i64(tcg_op2);
4938    tcg_temp_free_i64(tcg_op3);
4939    tcg_temp_free_i64(tcg_res);
4940}
4941
4942/* C3.6.27 Floating point data-processing (3 source)
4943 *   31  30  29 28       24 23  22  21  20  16  15  14  10 9    5 4    0
4944 * +---+---+---+-----------+------+----+------+----+------+------+------+
4945 * | M | 0 | S | 1 1 1 1 1 | type | o1 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
4946 * +---+---+---+-----------+------+----+------+----+------+------+------+
4947 */
4948static void disas_fp_3src(DisasContext *s, uint32_t insn)
4949{
4950    int type = extract32(insn, 22, 2);
4951    int rd = extract32(insn, 0, 5);
4952    int rn = extract32(insn, 5, 5);
4953    int ra = extract32(insn, 10, 5);
4954    int rm = extract32(insn, 16, 5);
4955    bool o0 = extract32(insn, 15, 1);
4956    bool o1 = extract32(insn, 21, 1);
4957
4958    switch (type) {
4959    case 0:
4960        if (!fp_access_check(s)) {
4961            return;
4962        }
4963        handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
4964        break;
4965    case 1:
4966        if (!fp_access_check(s)) {
4967            return;
4968        }
4969        handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
4970        break;
4971    default:
4972        unallocated_encoding(s);
4973    }
4974}
4975
4976/* C3.6.28 Floating point immediate
4977 *   31  30  29 28       24 23  22  21 20        13 12   10 9    5 4    0
4978 * +---+---+---+-----------+------+---+------------+-------+------+------+
4979 * | M | 0 | S | 1 1 1 1 0 | type | 1 |    imm8    | 1 0 0 | imm5 |  Rd  |
4980 * +---+---+---+-----------+------+---+------------+-------+------+------+
4981 */
4982static void disas_fp_imm(DisasContext *s, uint32_t insn)
4983{
4984    int rd = extract32(insn, 0, 5);
4985    int imm8 = extract32(insn, 13, 8);
4986    int is_double = extract32(insn, 22, 2);
4987    uint64_t imm;
4988    TCGv_i64 tcg_res;
4989
4990    if (is_double > 1) {
4991        unallocated_encoding(s);
4992        return;
4993    }
4994
4995    if (!fp_access_check(s)) {
4996        return;
4997    }
4998
4999    /* The imm8 encodes the sign bit, enough bits to represent
5000     * an exponent in the range 01....1xx to 10....0xx,
5001     * and the most significant 4 bits of the mantissa; see
5002     * VFPExpandImm() in the v8 ARM ARM.
5003     */
5004    if (is_double) {
5005        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5006            (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
5007            extract32(imm8, 0, 6);
5008        imm <<= 48;
5009    } else {
5010        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5011            (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
5012            (extract32(imm8, 0, 6) << 3);
5013        imm <<= 16;
5014    }
5015
5016    tcg_res = tcg_const_i64(imm);
5017    write_fp_dreg(s, rd, tcg_res);
5018    tcg_temp_free_i64(tcg_res);
5019}
5020
5021/* Handle floating point <=> fixed point conversions. Note that we can
5022 * also deal with fp <=> integer conversions as a special case (scale == 64)
5023 * OPTME: consider handling that special case specially or at least skipping
5024 * the call to scalbn in the helpers for zero shifts.
5025 */
5026static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
5027                           bool itof, int rmode, int scale, int sf, int type)
5028{
5029    bool is_signed = !(opcode & 1);
5030    bool is_double = type;
5031    TCGv_ptr tcg_fpstatus;
5032    TCGv_i32 tcg_shift;
5033
5034    tcg_fpstatus = get_fpstatus_ptr();
5035
5036    tcg_shift = tcg_const_i32(64 - scale);
5037
5038    if (itof) {
5039        TCGv_i64 tcg_int = cpu_reg(s, rn);
5040        if (!sf) {
5041            TCGv_i64 tcg_extend = new_tmp_a64(s);
5042
5043            if (is_signed) {
5044                tcg_gen_ext32s_i64(tcg_extend, tcg_int);
5045            } else {
5046                tcg_gen_ext32u_i64(tcg_extend, tcg_int);
5047            }
5048
5049            tcg_int = tcg_extend;
5050        }
5051
5052        if (is_double) {
5053            TCGv_i64 tcg_double = tcg_temp_new_i64();
5054            if (is_signed) {
5055                gen_helper_vfp_sqtod(tcg_double, tcg_int,
5056                                     tcg_shift, tcg_fpstatus);
5057            } else {
5058                gen_helper_vfp_uqtod(tcg_double, tcg_int,
5059                                     tcg_shift, tcg_fpstatus);
5060            }
5061            write_fp_dreg(s, rd, tcg_double);
5062            tcg_temp_free_i64(tcg_double);
5063        } else {
5064            TCGv_i32 tcg_single = tcg_temp_new_i32();
5065            if (is_signed) {
5066                gen_helper_vfp_sqtos(tcg_single, tcg_int,
5067                                     tcg_shift, tcg_fpstatus);
5068            } else {
5069                gen_helper_vfp_uqtos(tcg_single, tcg_int,
5070                                     tcg_shift, tcg_fpstatus);
5071            }
5072            write_fp_sreg(s, rd, tcg_single);
5073            tcg_temp_free_i32(tcg_single);
5074        }
5075    } else {
5076        TCGv_i64 tcg_int = cpu_reg(s, rd);
5077        TCGv_i32 tcg_rmode;
5078
5079        if (extract32(opcode, 2, 1)) {
5080            /* There are too many rounding modes to all fit into rmode,
5081             * so FCVTA[US] is a special case.
5082             */
5083            rmode = FPROUNDING_TIEAWAY;
5084        }
5085
5086        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5087
5088        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
5089
5090        if (is_double) {
5091            TCGv_i64 tcg_double = read_fp_dreg(s, rn);
5092            if (is_signed) {
5093                if (!sf) {
5094                    gen_helper_vfp_tosld(tcg_int, tcg_double,
5095                                         tcg_shift, tcg_fpstatus);
5096                } else {
5097                    gen_helper_vfp_tosqd(tcg_int, tcg_double,
5098                                         tcg_shift, tcg_fpstatus);
5099                }
5100            } else {
5101                if (!sf) {
5102                    gen_helper_vfp_tould(tcg_int, tcg_double,
5103                                         tcg_shift, tcg_fpstatus);
5104                } else {
5105                    gen_helper_vfp_touqd(tcg_int, tcg_double,
5106                                         tcg_shift, tcg_fpstatus);
5107                }
5108            }
5109            tcg_temp_free_i64(tcg_double);
5110        } else {
5111            TCGv_i32 tcg_single = read_fp_sreg(s, rn);
5112            if (sf) {
5113                if (is_signed) {
5114                    gen_helper_vfp_tosqs(tcg_int, tcg_single,
5115                                         tcg_shift, tcg_fpstatus);
5116                } else {
5117                    gen_helper_vfp_touqs(tcg_int, tcg_single,
5118                                         tcg_shift, tcg_fpstatus);
5119                }
5120            } else {
5121                TCGv_i32 tcg_dest = tcg_temp_new_i32();
5122                if (is_signed) {
5123                    gen_helper_vfp_tosls(tcg_dest, tcg_single,
5124                                         tcg_shift, tcg_fpstatus);
5125                } else {
5126                    gen_helper_vfp_touls(tcg_dest, tcg_single,
5127                                         tcg_shift, tcg_fpstatus);
5128                }
5129                tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5130                tcg_temp_free_i32(tcg_dest);
5131            }
5132            tcg_temp_free_i32(tcg_single);
5133        }
5134
5135        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
5136        tcg_temp_free_i32(tcg_rmode);
5137
5138        if (!sf) {
5139            tcg_gen_ext32u_i64(tcg_int, tcg_int);
5140        }
5141    }
5142
5143    tcg_temp_free_ptr(tcg_fpstatus);
5144    tcg_temp_free_i32(tcg_shift);
5145}
5146
5147/* C3.6.29 Floating point <-> fixed point conversions
5148 *   31   30  29 28       24 23  22  21 20   19 18    16 15   10 9    5 4    0
5149 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5150 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale |  Rn  |  Rd  |
5151 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5152 */
5153static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5154{
5155    int rd = extract32(insn, 0, 5);
5156    int rn = extract32(insn, 5, 5);
5157    int scale = extract32(insn, 10, 6);
5158    int opcode = extract32(insn, 16, 3);
5159    int rmode = extract32(insn, 19, 2);
5160    int type = extract32(insn, 22, 2);
5161    bool sbit = extract32(insn, 29, 1);
5162    bool sf = extract32(insn, 31, 1);
5163    bool itof;
5164
5165    if (sbit || (type > 1)
5166        || (!sf && scale < 32)) {
5167        unallocated_encoding(s);
5168        return;
5169    }
5170
5171    switch ((rmode << 3) | opcode) {
5172    case 0x2: /* SCVTF */
5173    case 0x3: /* UCVTF */
5174        itof = true;
5175        break;
5176    case 0x18: /* FCVTZS */
5177    case 0x19: /* FCVTZU */
5178        itof = false;
5179        break;
5180    default:
5181        unallocated_encoding(s);
5182        return;
5183    }
5184
5185    if (!fp_access_check(s)) {
5186        return;
5187    }
5188
5189    handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5190}
5191
5192static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5193{
5194    /* FMOV: gpr to or from float, double, or top half of quad fp reg,
5195     * without conversion.
5196     */
5197
5198    if (itof) {
5199        TCGv_i64 tcg_rn = cpu_reg(s, rn);
5200
5201        switch (type) {
5202        case 0:
5203        {
5204            /* 32 bit */
5205            TCGv_i64 tmp = tcg_temp_new_i64();
5206            tcg_gen_ext32u_i64(tmp, tcg_rn);
5207            tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
5208            tcg_gen_movi_i64(tmp, 0);
5209            tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5210            tcg_temp_free_i64(tmp);
5211            break;
5212        }
5213        case 1:
5214        {
5215            /* 64 bit */
5216            TCGv_i64 tmp = tcg_const_i64(0);
5217            tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
5218            tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5219            tcg_temp_free_i64(tmp);
5220            break;
5221        }
5222        case 2:
5223            /* 64 bit to top half. */
5224            tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5225            break;
5226        }
5227    } else {
5228        TCGv_i64 tcg_rd = cpu_reg(s, rd);
5229
5230        switch (type) {
5231        case 0:
5232            /* 32 bit */
5233            tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
5234            break;
5235        case 1:
5236            /* 64 bit */
5237            tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
5238            break;
5239        case 2:
5240            /* 64 bits from top half */
5241            tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
5242            break;
5243        }
5244    }
5245}
5246
5247/* C3.6.30 Floating point <-> integer conversions
5248 *   31   30  29 28       24 23  22  21 20   19 18 16 15         10 9  5 4  0
5249 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5250 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
5251 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5252 */
5253static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
5254{
5255    int rd = extract32(insn, 0, 5);
5256    int rn = extract32(insn, 5, 5);
5257    int opcode = extract32(insn, 16, 3);
5258    int rmode = extract32(insn, 19, 2);
5259    int type = extract32(insn, 22, 2);
5260    bool sbit = extract32(insn, 29, 1);
5261    bool sf = extract32(insn, 31, 1);
5262
5263    if (sbit) {
5264        unallocated_encoding(s);
5265        return;
5266    }
5267
5268    if (opcode > 5) {
5269        /* FMOV */
5270        bool itof = opcode & 1;
5271
5272        if (rmode >= 2) {
5273            unallocated_encoding(s);
5274            return;
5275        }
5276
5277        switch (sf << 3 | type << 1 | rmode) {
5278        case 0x0: /* 32 bit */
5279        case 0xa: /* 64 bit */
5280        case 0xd: /* 64 bit to top half of quad */
5281            break;
5282        default:
5283            /* all other sf/type/rmode combinations are invalid */
5284            unallocated_encoding(s);
5285            break;
5286        }
5287
5288        if (!fp_access_check(s)) {
5289            return;
5290        }
5291        handle_fmov(s, rd, rn, type, itof);
5292    } else {
5293        /* actual FP conversions */
5294        bool itof = extract32(opcode, 1, 1);
5295
5296        if (type > 1 || (rmode != 0 && opcode > 1)) {
5297            unallocated_encoding(s);
5298            return;
5299        }
5300
5301        if (!fp_access_check(s)) {
5302            return;
5303        }
5304        handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
5305    }
5306}
5307
5308/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
5309 *   31  30  29 28     25 24                          0
5310 * +---+---+---+---------+-----------------------------+
5311 * |   | 0 |   | 1 1 1 1 |                             |
5312 * +---+---+---+---------+-----------------------------+
5313 */
5314static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
5315{
5316    if (extract32(insn, 24, 1)) {
5317        /* Floating point data-processing (3 source) */
5318        disas_fp_3src(s, insn);
5319    } else if (extract32(insn, 21, 1) == 0) {
5320        /* Floating point to fixed point conversions */
5321        disas_fp_fixed_conv(s, insn);
5322    } else {
5323        switch (extract32(insn, 10, 2)) {
5324        case 1:
5325            /* Floating point conditional compare */
5326            disas_fp_ccomp(s, insn);
5327            break;
5328        case 2:
5329            /* Floating point data-processing (2 source) */
5330            disas_fp_2src(s, insn);
5331            break;
5332        case 3:
5333            /* Floating point conditional select */
5334            disas_fp_csel(s, insn);
5335            break;
5336        case 0:
5337            switch (ctz32(extract32(insn, 12, 4))) {
5338            case 0: /* [15:12] == xxx1 */
5339                /* Floating point immediate */
5340                disas_fp_imm(s, insn);
5341                break;
5342            case 1: /* [15:12] == xx10 */
5343                /* Floating point compare */
5344                disas_fp_compare(s, insn);
5345                break;
5346            case 2: /* [15:12] == x100 */
5347                /* Floating point data-processing (1 source) */
5348                disas_fp_1src(s, insn);
5349                break;
5350            case 3: /* [15:12] == 1000 */
5351                unallocated_encoding(s);
5352                break;
5353            default: /* [15:12] == 0000 */
5354                /* Floating point <-> integer conversions */
5355                disas_fp_int_conv(s, insn);
5356                break;
5357            }
5358            break;
5359        }
5360    }
5361}
5362
5363static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
5364                     int pos)
5365{
5366    /* Extract 64 bits from the middle of two concatenated 64 bit
5367     * vector register slices left:right. The extracted bits start
5368     * at 'pos' bits into the right (least significant) side.
5369     * We return the result in tcg_right, and guarantee not to
5370     * trash tcg_left.
5371     */
5372    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5373    assert(pos > 0 && pos < 64);
5374
5375    tcg_gen_shri_i64(tcg_right, tcg_right, pos);
5376    tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
5377    tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
5378
5379    tcg_temp_free_i64(tcg_tmp);
5380}
5381
5382/* C3.6.1 EXT
5383 *   31  30 29         24 23 22  21 20  16 15  14  11 10  9    5 4    0
5384 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5385 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | imm4 | 0 |  Rn  |  Rd  |
5386 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5387 */
5388static void disas_simd_ext(DisasContext *s, uint32_t insn)
5389{
5390    int is_q = extract32(insn, 30, 1);
5391    int op2 = extract32(insn, 22, 2);
5392    int imm4 = extract32(insn, 11, 4);
5393    int rm = extract32(insn, 16, 5);
5394    int rn = extract32(insn, 5, 5);
5395    int rd = extract32(insn, 0, 5);
5396    int pos = imm4 << 3;
5397    TCGv_i64 tcg_resl, tcg_resh;
5398
5399    if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
5400        unallocated_encoding(s);
5401        return;
5402    }
5403
5404    if (!fp_access_check(s)) {
5405        return;
5406    }
5407
5408    tcg_resh = tcg_temp_new_i64();
5409    tcg_resl = tcg_temp_new_i64();
5410
5411    /* Vd gets bits starting at pos bits into Vm:Vn. This is
5412     * either extracting 128 bits from a 128:128 concatenation, or
5413     * extracting 64 bits from a 64:64 concatenation.
5414     */
5415    if (!is_q) {
5416        read_vec_element(s, tcg_resl, rn, 0, MO_64);
5417        if (pos != 0) {
5418            read_vec_element(s, tcg_resh, rm, 0, MO_64);
5419            do_ext64(s, tcg_resh, tcg_resl, pos);
5420        }
5421        tcg_gen_movi_i64(tcg_resh, 0);
5422    } else {
5423        TCGv_i64 tcg_hh;
5424        typedef struct {
5425            int reg;
5426            int elt;
5427        } EltPosns;
5428        EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
5429        EltPosns *elt = eltposns;
5430
5431        if (pos >= 64) {
5432            elt++;
5433            pos -= 64;
5434        }
5435
5436        read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
5437        elt++;
5438        read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
5439        elt++;
5440        if (pos != 0) {
5441            do_ext64(s, tcg_resh, tcg_resl, pos);
5442            tcg_hh = tcg_temp_new_i64();
5443            read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
5444            do_ext64(s, tcg_hh, tcg_resh, pos);
5445            tcg_temp_free_i64(tcg_hh);
5446        }
5447    }
5448
5449    write_vec_element(s, tcg_resl, rd, 0, MO_64);
5450    tcg_temp_free_i64(tcg_resl);
5451    write_vec_element(s, tcg_resh, rd, 1, MO_64);
5452    tcg_temp_free_i64(tcg_resh);
5453}
5454
5455/* C3.6.2 TBL/TBX
5456 *   31  30 29         24 23 22  21 20  16 15  14 13  12  11 10 9    5 4    0
5457 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5458 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | len | op | 0 0 |  Rn  |  Rd  |
5459 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5460 */
5461static void disas_simd_tb(DisasContext *s, uint32_t insn)
5462{
5463    int op2 = extract32(insn, 22, 2);
5464    int is_q = extract32(insn, 30, 1);
5465    int rm = extract32(insn, 16, 5);
5466    int rn = extract32(insn, 5, 5);
5467    int rd = extract32(insn, 0, 5);
5468    int is_tblx = extract32(insn, 12, 1);
5469    int len = extract32(insn, 13, 2);
5470    TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
5471    TCGv_i32 tcg_regno, tcg_numregs;
5472
5473    if (op2 != 0) {
5474        unallocated_encoding(s);
5475        return;
5476    }
5477
5478    if (!fp_access_check(s)) {
5479        return;
5480    }
5481
5482    /* This does a table lookup: for every byte element in the input
5483     * we index into a table formed from up to four vector registers,
5484     * and then the output is the result of the lookups. Our helper
5485     * function does the lookup operation for a single 64 bit part of
5486     * the input.
5487     */
5488    tcg_resl = tcg_temp_new_i64();
5489    tcg_resh = tcg_temp_new_i64();
5490
5491    if (is_tblx) {
5492        read_vec_element(s, tcg_resl, rd, 0, MO_64);
5493    } else {
5494        tcg_gen_movi_i64(tcg_resl, 0);
5495    }
5496    if (is_tblx && is_q) {
5497        read_vec_element(s, tcg_resh, rd, 1, MO_64);
5498    } else {
5499        tcg_gen_movi_i64(tcg_resh, 0);
5500    }
5501
5502    tcg_idx = tcg_temp_new_i64();
5503    tcg_regno = tcg_const_i32(rn);
5504    tcg_numregs = tcg_const_i32(len + 1);
5505    read_vec_element(s, tcg_idx, rm, 0, MO_64);
5506    gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
5507                        tcg_regno, tcg_numregs);
5508    if (is_q) {
5509        read_vec_element(s, tcg_idx, rm, 1, MO_64);
5510        gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
5511                            tcg_regno, tcg_numregs);
5512    }
5513    tcg_temp_free_i64(tcg_idx);
5514    tcg_temp_free_i32(tcg_regno);
5515    tcg_temp_free_i32(tcg_numregs);
5516
5517    write_vec_element(s, tcg_resl, rd, 0, MO_64);
5518    tcg_temp_free_i64(tcg_resl);
5519    write_vec_element(s, tcg_resh, rd, 1, MO_64);
5520    tcg_temp_free_i64(tcg_resh);
5521}
5522
5523/* C3.6.3 ZIP/UZP/TRN
5524 *   31  30 29         24 23  22  21 20   16 15 14 12 11 10 9    5 4    0
5525 * +---+---+-------------+------+---+------+---+------------------+------+
5526 * | 0 | Q | 0 0 1 1 1 0 | size | 0 |  Rm  | 0 | opc | 1 0 |  Rn  |  Rd  |
5527 * +---+---+-------------+------+---+------+---+------------------+------+
5528 */
5529static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
5530{
5531    int rd = extract32(insn, 0, 5);
5532    int rn = extract32(insn, 5, 5);
5533    int rm = extract32(insn, 16, 5);
5534    int size = extract32(insn, 22, 2);
5535    /* opc field bits [1:0] indicate ZIP/UZP/TRN;
5536     * bit 2 indicates 1 vs 2 variant of the insn.
5537     */
5538    int opcode = extract32(insn, 12, 2);
5539    bool part = extract32(insn, 14, 1);
5540    bool is_q = extract32(insn, 30, 1);
5541    int esize = 8 << size;
5542    int i, ofs;
5543    int datasize = is_q ? 128 : 64;
5544    int elements = datasize / esize;
5545    TCGv_i64 tcg_res, tcg_resl, tcg_resh;
5546
5547    if (opcode == 0 || (size == 3 && !is_q)) {
5548        unallocated_encoding(s);
5549        return;
5550    }
5551
5552    if (!fp_access_check(s)) {
5553        return;
5554    }
5555
5556    tcg_resl = tcg_const_i64(0);
5557    tcg_resh = tcg_const_i64(0);
5558    tcg_res = tcg_temp_new_i64();
5559
5560    for (i = 0; i < elements; i++) {
5561        switch (opcode) {
5562        case 1: /* UZP1/2 */
5563        {
5564            int midpoint = elements / 2;
5565            if (i < midpoint) {
5566                read_vec_element(s, tcg_res, rn, 2 * i + part, size);
5567            } else {
5568                read_vec_element(s, tcg_res, rm,
5569                                 2 * (i - midpoint) + part, size);
5570            }
5571            break;
5572        }
5573        case 2: /* TRN1/2 */
5574            if (i & 1) {
5575                read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
5576            } else {
5577                read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
5578            }
5579            break;
5580        case 3: /* ZIP1/2 */
5581        {
5582            int base = part * elements / 2;
5583            if (i & 1) {
5584                read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
5585            } else {
5586                read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
5587            }
5588            break;
5589        }
5590        default:
5591            g_assert_not_reached();
5592        }
5593
5594        ofs = i * esize;
5595        if (ofs < 64) {
5596            tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
5597            tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
5598        } else {
5599            tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
5600            tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
5601        }
5602    }
5603
5604    tcg_temp_free_i64(tcg_res);
5605
5606    write_vec_element(s, tcg_resl, rd, 0, MO_64);
5607    tcg_temp_free_i64(tcg_resl);
5608    write_vec_element(s, tcg_resh, rd, 1, MO_64);
5609    tcg_temp_free_i64(tcg_resh);
5610}
5611
5612static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
5613                        int opc, bool is_min, TCGv_ptr fpst)
5614{
5615    /* Helper function for disas_simd_across_lanes: do a single precision
5616     * min/max operation on the specified two inputs,
5617     * and return the result in tcg_elt1.
5618     */
5619    if (opc == 0xc) {
5620        if (is_min) {
5621            gen_helper_vfp_minnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5622        } else {
5623            gen_helper_vfp_maxnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5624        }
5625    } else {
5626        assert(opc == 0xf);
5627        if (is_min) {
5628            gen_helper_vfp_mins(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5629        } else {
5630            gen_helper_vfp_maxs(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5631        }
5632    }
5633}
5634
5635/* C3.6.4 AdvSIMD across lanes
5636 *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
5637 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5638 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
5639 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5640 */
5641static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
5642{
5643    int rd = extract32(insn, 0, 5);
5644    int rn = extract32(insn, 5, 5);
5645    int size = extract32(insn, 22, 2);
5646    int opcode = extract32(insn, 12, 5);
5647    bool is_q = extract32(insn, 30, 1);
5648    bool is_u = extract32(insn, 29, 1);
5649    bool is_fp = false;
5650    bool is_min = false;
5651    int esize;
5652    int elements;
5653    int i;
5654    TCGv_i64 tcg_res, tcg_elt;
5655
5656    switch (opcode) {
5657    case 0x1b: /* ADDV */
5658        if (is_u) {
5659            unallocated_encoding(s);
5660            return;
5661        }
5662        /* fall through */
5663    case 0x3: /* SADDLV, UADDLV */
5664    case 0xa: /* SMAXV, UMAXV */
5665    case 0x1a: /* SMINV, UMINV */
5666        if (size == 3 || (size == 2 && !is_q)) {
5667            unallocated_encoding(s);
5668            return;
5669        }
5670        break;
5671    case 0xc: /* FMAXNMV, FMINNMV */
5672    case 0xf: /* FMAXV, FMINV */
5673        if (!is_u || !is_q || extract32(size, 0, 1)) {
5674            unallocated_encoding(s);
5675            return;
5676        }
5677        /* Bit 1 of size field encodes min vs max, and actual size is always
5678         * 32 bits: adjust the size variable so following code can rely on it
5679         */
5680        is_min = extract32(size, 1, 1);
5681        is_fp = true;
5682        size = 2;
5683        break;
5684    default:
5685        unallocated_encoding(s);
5686        return;
5687    }
5688
5689    if (!fp_access_check(s)) {
5690        return;
5691    }
5692
5693    esize = 8 << size;
5694    elements = (is_q ? 128 : 64) / esize;
5695
5696    tcg_res = tcg_temp_new_i64();
5697    tcg_elt = tcg_temp_new_i64();
5698
5699    /* These instructions operate across all lanes of a vector
5700     * to produce a single result. We can guarantee that a 64
5701     * bit intermediate is sufficient:
5702     *  + for [US]ADDLV the maximum element size is 32 bits, and
5703     *    the result type is 64 bits
5704     *  + for FMAX*V, FMIN*V, ADDV the intermediate type is the
5705     *    same as the element size, which is 32 bits at most
5706     * For the integer operations we can choose to work at 64
5707     * or 32 bits and truncate at the end; for simplicity
5708     * we use 64 bits always. The floating point
5709     * ops do require 32 bit intermediates, though.
5710     */
5711    if (!is_fp) {
5712        read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
5713
5714        for (i = 1; i < elements; i++) {
5715            read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
5716
5717            switch (opcode) {
5718            case 0x03: /* SADDLV / UADDLV */
5719            case 0x1b: /* ADDV */
5720                tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
5721                break;
5722            case 0x0a: /* SMAXV / UMAXV */
5723                tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
5724                                    tcg_res,
5725                                    tcg_res, tcg_elt, tcg_res, tcg_elt);
5726                break;
5727            case 0x1a: /* SMINV / UMINV */
5728                tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
5729                                    tcg_res,
5730                                    tcg_res, tcg_elt, tcg_res, tcg_elt);
5731                break;
5732                break;
5733            default:
5734                g_assert_not_reached();
5735            }
5736
5737        }
5738    } else {
5739        /* Floating point ops which work on 32 bit (single) intermediates.
5740         * Note that correct NaN propagation requires that we do these
5741         * operations in exactly the order specified by the pseudocode.
5742         */
5743        TCGv_i32 tcg_elt1 = tcg_temp_new_i32();
5744        TCGv_i32 tcg_elt2 = tcg_temp_new_i32();
5745        TCGv_i32 tcg_elt3 = tcg_temp_new_i32();
5746        TCGv_ptr fpst = get_fpstatus_ptr();
5747
5748        assert(esize == 32);
5749        assert(elements == 4);
5750
5751        read_vec_element(s, tcg_elt, rn, 0, MO_32);
5752        tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt);
5753        read_vec_element(s, tcg_elt, rn, 1, MO_32);
5754        tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
5755
5756        do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5757
5758        read_vec_element(s, tcg_elt, rn, 2, MO_32);
5759        tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
5760        read_vec_element(s, tcg_elt, rn, 3, MO_32);
5761        tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt);
5762
5763        do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
5764
5765        do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5766
5767        tcg_gen_extu_i32_i64(tcg_res, tcg_elt1);
5768        tcg_temp_free_i32(tcg_elt1);
5769        tcg_temp_free_i32(tcg_elt2);
5770        tcg_temp_free_i32(tcg_elt3);
5771        tcg_temp_free_ptr(fpst);
5772    }
5773
5774    tcg_temp_free_i64(tcg_elt);
5775
5776    /* Now truncate the result to the width required for the final output */
5777    if (opcode == 0x03) {
5778        /* SADDLV, UADDLV: result is 2*esize */
5779        size++;
5780    }
5781
5782    switch (size) {
5783    case 0:
5784        tcg_gen_ext8u_i64(tcg_res, tcg_res);
5785        break;
5786    case 1:
5787        tcg_gen_ext16u_i64(tcg_res, tcg_res);
5788        break;
5789    case 2:
5790        tcg_gen_ext32u_i64(tcg_res, tcg_res);
5791        break;
5792    case 3:
5793        break;
5794    default:
5795        g_assert_not_reached();
5796    }
5797
5798    write_fp_dreg(s, rd, tcg_res);
5799    tcg_temp_free_i64(tcg_res);
5800}
5801
5802/* C6.3.31 DUP (Element, Vector)
5803 *
5804 *  31  30   29              21 20    16 15        10  9    5 4    0
5805 * +---+---+-------------------+--------+-------------+------+------+
5806 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
5807 * +---+---+-------------------+--------+-------------+------+------+
5808 *
5809 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5810 */
5811static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
5812                             int imm5)
5813{
5814    int size = ctz32(imm5);
5815    int esize = 8 << size;
5816    int elements = (is_q ? 128 : 64) / esize;
5817    int index, i;
5818    TCGv_i64 tmp;
5819
5820    if (size > 3 || (size == 3 && !is_q)) {
5821        unallocated_encoding(s);
5822        return;
5823    }
5824
5825    if (!fp_access_check(s)) {
5826        return;
5827    }
5828
5829    index = imm5 >> (size + 1);
5830
5831    tmp = tcg_temp_new_i64();
5832    read_vec_element(s, tmp, rn, index, size);
5833
5834    for (i = 0; i < elements; i++) {
5835        write_vec_element(s, tmp, rd, i, size);
5836    }
5837
5838    if (!is_q) {
5839        clear_vec_high(s, rd);
5840    }
5841
5842    tcg_temp_free_i64(tmp);
5843}
5844
5845/* C6.3.31 DUP (element, scalar)
5846 *  31                   21 20    16 15        10  9    5 4    0
5847 * +-----------------------+--------+-------------+------+------+
5848 * | 0 1 0 1 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
5849 * +-----------------------+--------+-------------+------+------+
5850 */
5851static void handle_simd_dupes(DisasContext *s, int rd, int rn,
5852                              int imm5)
5853{
5854    int size = ctz32(imm5);
5855    int index;
5856    TCGv_i64 tmp;
5857
5858    if (size > 3) {
5859        unallocated_encoding(s);
5860        return;
5861    }
5862
5863    if (!fp_access_check(s)) {
5864        return;
5865    }
5866
5867    index = imm5 >> (size + 1);
5868
5869    /* This instruction just extracts the specified element and
5870     * zero-extends it into the bottom of the destination register.
5871     */
5872    tmp = tcg_temp_new_i64();
5873    read_vec_element(s, tmp, rn, index, size);
5874    write_fp_dreg(s, rd, tmp);
5875    tcg_temp_free_i64(tmp);
5876}
5877
5878/* C6.3.32 DUP (General)
5879 *
5880 *  31  30   29              21 20    16 15        10  9    5 4    0
5881 * +---+---+-------------------+--------+-------------+------+------+
5882 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 1 1 |  Rn  |  Rd  |
5883 * +---+---+-------------------+--------+-------------+------+------+
5884 *
5885 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5886 */
5887static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
5888                             int imm5)
5889{
5890    int size = ctz32(imm5);
5891    int esize = 8 << size;
5892    int elements = (is_q ? 128 : 64)/esize;
5893    int i = 0;
5894
5895    if (size > 3 || ((size == 3) && !is_q)) {
5896        unallocated_encoding(s);
5897        return;
5898    }
5899
5900    if (!fp_access_check(s)) {
5901        return;
5902    }
5903
5904    for (i = 0; i < elements; i++) {
5905        write_vec_element(s, cpu_reg(s, rn), rd, i, size);
5906    }
5907    if (!is_q) {
5908        clear_vec_high(s, rd);
5909    }
5910}
5911
5912/* C6.3.150 INS (Element)
5913 *
5914 *  31                   21 20    16 15  14    11  10 9    5 4    0
5915 * +-----------------------+--------+------------+---+------+------+
5916 * | 0 1 1 0 1 1 1 0 0 0 0 |  imm5  | 0 |  imm4  | 1 |  Rn  |  Rd  |
5917 * +-----------------------+--------+------------+---+------+------+
5918 *
5919 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5920 * index: encoded in imm5<4:size+1>
5921 */
5922static void handle_simd_inse(DisasContext *s, int rd, int rn,
5923                             int imm4, int imm5)
5924{
5925    int size = ctz32(imm5);
5926    int src_index, dst_index;
5927    TCGv_i64 tmp;
5928
5929    if (size > 3) {
5930        unallocated_encoding(s);
5931        return;
5932    }
5933
5934    if (!fp_access_check(s)) {
5935        return;
5936    }
5937
5938    dst_index = extract32(imm5, 1+size, 5);
5939    src_index = extract32(imm4, size, 4);
5940
5941    tmp = tcg_temp_new_i64();
5942
5943    read_vec_element(s, tmp, rn, src_index, size);
5944    write_vec_element(s, tmp, rd, dst_index, size);
5945
5946    tcg_temp_free_i64(tmp);
5947}
5948
5949
5950/* C6.3.151 INS (General)
5951 *
5952 *  31                   21 20    16 15        10  9    5 4    0
5953 * +-----------------------+--------+-------------+------+------+
5954 * | 0 1 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 1 1 1 |  Rn  |  Rd  |
5955 * +-----------------------+--------+-------------+------+------+
5956 *
5957 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5958 * index: encoded in imm5<4:size+1>
5959 */
5960static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
5961{
5962    int size = ctz32(imm5);
5963    int idx;
5964
5965    if (size > 3) {
5966        unallocated_encoding(s);
5967        return;
5968    }
5969
5970    if (!fp_access_check(s)) {
5971        return;
5972    }
5973
5974    idx = extract32(imm5, 1 + size, 4 - size);
5975    write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
5976}
5977
5978/*
5979 * C6.3.321 UMOV (General)
5980 * C6.3.237 SMOV (General)
5981 *
5982 *  31  30   29              21 20    16 15    12   10 9    5 4    0
5983 * +---+---+-------------------+--------+-------------+------+------+
5984 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 1 U 1 1 |  Rn  |  Rd  |
5985 * +---+---+-------------------+--------+-------------+------+------+
5986 *
5987 * U: unsigned when set
5988 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5989 */
5990static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
5991                                  int rn, int rd, int imm5)
5992{
5993    int size = ctz32(imm5);
5994    int element;
5995    TCGv_i64 tcg_rd;
5996
5997    /* Check for UnallocatedEncodings */
5998    if (is_signed) {
5999        if (size > 2 || (size == 2 && !is_q)) {
6000            unallocated_encoding(s);
6001            return;
6002        }
6003    } else {
6004        if (size > 3
6005            || (size < 3 && is_q)
6006            || (size == 3 && !is_q)) {
6007            unallocated_encoding(s);
6008            return;
6009        }
6010    }
6011
6012    if (!fp_access_check(s)) {
6013        return;
6014    }
6015
6016    element = extract32(imm5, 1+size, 4);
6017
6018    tcg_rd = cpu_reg(s, rd);
6019    read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
6020    if (is_signed && !is_q) {
6021        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6022    }
6023}
6024
6025/* C3.6.5 AdvSIMD copy
6026 *   31  30  29  28             21 20  16 15  14  11 10  9    5 4    0
6027 * +---+---+----+-----------------+------+---+------+---+------+------+
6028 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
6029 * +---+---+----+-----------------+------+---+------+---+------+------+
6030 */
6031static void disas_simd_copy(DisasContext *s, uint32_t insn)
6032{
6033    int rd = extract32(insn, 0, 5);
6034    int rn = extract32(insn, 5, 5);
6035    int imm4 = extract32(insn, 11, 4);
6036    int op = extract32(insn, 29, 1);
6037    int is_q = extract32(insn, 30, 1);
6038    int imm5 = extract32(insn, 16, 5);
6039
6040    if (op) {
6041        if (is_q) {
6042            /* INS (element) */
6043            handle_simd_inse(s, rd, rn, imm4, imm5);
6044        } else {
6045            unallocated_encoding(s);
6046        }
6047    } else {
6048        switch (imm4) {
6049        case 0:
6050            /* DUP (element - vector) */
6051            handle_simd_dupe(s, is_q, rd, rn, imm5);
6052            break;
6053        case 1:
6054            /* DUP (general) */
6055            handle_simd_dupg(s, is_q, rd, rn, imm5);
6056            break;
6057        case 3:
6058            if (is_q) {
6059                /* INS (general) */
6060                handle_simd_insg(s, rd, rn, imm5);
6061            } else {
6062                unallocated_encoding(s);
6063            }
6064            break;
6065        case 5:
6066        case 7:
6067            /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
6068            handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
6069            break;
6070        default:
6071            unallocated_encoding(s);
6072            break;
6073        }
6074    }
6075}
6076
6077/* C3.6.6 AdvSIMD modified immediate
6078 *  31  30   29  28                 19 18 16 15   12  11  10  9     5 4    0
6079 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6080 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh |  Rd  |
6081 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6082 *
6083 * There are a number of operations that can be carried out here:
6084 *   MOVI - move (shifted) imm into register
6085 *   MVNI - move inverted (shifted) imm into register
6086 *   ORR  - bitwise OR of (shifted) imm with register
6087 *   BIC  - bitwise clear of (shifted) imm with register
6088 */
6089static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6090{
6091    int rd = extract32(insn, 0, 5);
6092    int cmode = extract32(insn, 12, 4);
6093    int cmode_3_1 = extract32(cmode, 1, 3);
6094    int cmode_0 = extract32(cmode, 0, 1);
6095    int o2 = extract32(insn, 11, 1);
6096    uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6097    bool is_neg = extract32(insn, 29, 1);
6098    bool is_q = extract32(insn, 30, 1);
6099    uint64_t imm = 0;
6100    TCGv_i64 tcg_rd, tcg_imm;
6101    int i;
6102
6103    if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6104        unallocated_encoding(s);
6105        return;
6106    }
6107
6108    if (!fp_access_check(s)) {
6109        return;
6110    }
6111
6112    /* See AdvSIMDExpandImm() in ARM ARM */
6113    switch (cmode_3_1) {
6114    case 0: /* Replicate(Zeros(24):imm8, 2) */
6115    case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
6116    case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
6117    case 3: /* Replicate(imm8:Zeros(24), 2) */
6118    {
6119        int shift = cmode_3_1 * 8;
6120        imm = bitfield_replicate(abcdefgh << shift, 32);
6121        break;
6122    }
6123    case 4: /* Replicate(Zeros(8):imm8, 4) */
6124    case 5: /* Replicate(imm8:Zeros(8), 4) */
6125    {
6126        int shift = (cmode_3_1 & 0x1) * 8;
6127        imm = bitfield_replicate(abcdefgh << shift, 16);
6128        break;
6129    }
6130    case 6:
6131        if (cmode_0) {
6132            /* Replicate(Zeros(8):imm8:Ones(16), 2) */
6133            imm = (abcdefgh << 16) | 0xffff;
6134        } else {
6135            /* Replicate(Zeros(16):imm8:Ones(8), 2) */
6136            imm = (abcdefgh << 8) | 0xff;
6137        }
6138        imm = bitfield_replicate(imm, 32);
6139        break;
6140    case 7:
6141        if (!cmode_0 && !is_neg) {
6142            imm = bitfield_replicate(abcdefgh, 8);
6143        } else if (!cmode_0 && is_neg) {
6144            int i;
6145            imm = 0;
6146            for (i = 0; i < 8; i++) {
6147                if ((abcdefgh) & (1 << i)) {
6148                    imm |= 0xffULL << (i * 8);
6149                }
6150            }
6151        } else if (cmode_0) {
6152            if (is_neg) {
6153                imm = (abcdefgh & 0x3f) << 48;
6154                if (abcdefgh & 0x80) {
6155                    imm |= 0x8000000000000000ULL;
6156                }
6157                if (abcdefgh & 0x40) {
6158                    imm |= 0x3fc0000000000000ULL;
6159                } else {
6160                    imm |= 0x4000000000000000ULL;
6161                }
6162            } else {
6163                imm = (abcdefgh & 0x3f) << 19;
6164                if (abcdefgh & 0x80) {
6165                    imm |= 0x80000000;
6166                }
6167                if (abcdefgh & 0x40) {
6168                    imm |= 0x3e000000;
6169                } else {
6170                    imm |= 0x40000000;
6171                }
6172                imm |= (imm << 32);
6173            }
6174        }
6175        break;
6176    }
6177
6178    if (cmode_3_1 != 7 && is_neg) {
6179        imm = ~imm;
6180    }
6181
6182    tcg_imm = tcg_const_i64(imm);
6183    tcg_rd = new_tmp_a64(s);
6184
6185    for (i = 0; i < 2; i++) {
6186        int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64);
6187
6188        if (i == 1 && !is_q) {
6189            /* non-quad ops clear high half of vector */
6190            tcg_gen_movi_i64(tcg_rd, 0);
6191        } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) {
6192            tcg_gen_ld_i64(tcg_rd, cpu_env, foffs);
6193            if (is_neg) {
6194                /* AND (BIC) */
6195                tcg_gen_and_i64(tcg_rd, tcg_rd, tcg_imm);
6196            } else {
6197                /* ORR */
6198                tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_imm);
6199            }
6200        } else {
6201            /* MOVI */
6202            tcg_gen_mov_i64(tcg_rd, tcg_imm);
6203        }
6204        tcg_gen_st_i64(tcg_rd, cpu_env, foffs);
6205    }
6206
6207    tcg_temp_free_i64(tcg_imm);
6208}
6209
6210/* C3.6.7 AdvSIMD scalar copy
6211 *  31 30  29  28             21 20  16 15  14  11 10  9    5 4    0
6212 * +-----+----+-----------------+------+---+------+---+------+------+
6213 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
6214 * +-----+----+-----------------+------+---+------+---+------+------+
6215 */
6216static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
6217{
6218    int rd = extract32(insn, 0, 5);
6219    int rn = extract32(insn, 5, 5);
6220    int imm4 = extract32(insn, 11, 4);
6221    int imm5 = extract32(insn, 16, 5);
6222    int op = extract32(insn, 29, 1);
6223
6224    if (op != 0 || imm4 != 0) {
6225        unallocated_encoding(s);
6226        return;
6227    }
6228
6229    /* DUP (element, scalar) */
6230    handle_simd_dupes(s, rd, rn, imm5);
6231}
6232
6233/* C3.6.8 AdvSIMD scalar pairwise
6234 *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
6235 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6236 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
6237 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6238 */
6239static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
6240{
6241    int u = extract32(insn, 29, 1);
6242    int size = extract32(insn, 22, 2);
6243    int opcode = extract32(insn, 12, 5);
6244    int rn = extract32(insn, 5, 5);
6245    int rd = extract32(insn, 0, 5);
6246    TCGv_ptr fpst;
6247
6248    /* For some ops (the FP ones), size[1] is part of the encoding.
6249     * For ADDP strictly it is not but size[1] is always 1 for valid
6250     * encodings.
6251     */
6252    opcode |= (extract32(size, 1, 1) << 5);
6253
6254    switch (opcode) {
6255    case 0x3b: /* ADDP */
6256        if (u || size != 3) {
6257            unallocated_encoding(s);
6258            return;
6259        }
6260        if (!fp_access_check(s)) {
6261            return;
6262        }
6263
6264        TCGV_UNUSED_PTR(fpst);
6265        break;
6266    case 0xc: /* FMAXNMP */
6267    case 0xd: /* FADDP */
6268    case 0xf: /* FMAXP */
6269    case 0x2c: /* FMINNMP */
6270    case 0x2f: /* FMINP */
6271        /* FP op, size[0] is 32 or 64 bit */
6272        if (!u) {
6273            unallocated_encoding(s);
6274            return;
6275        }
6276        if (!fp_access_check(s)) {
6277            return;
6278        }
6279
6280        size = extract32(size, 0, 1) ? 3 : 2;
6281        fpst = get_fpstatus_ptr();
6282        break;
6283    default:
6284        unallocated_encoding(s);
6285        return;
6286    }
6287
6288    if (size == 3) {
6289        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6290        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6291        TCGv_i64 tcg_res = tcg_temp_new_i64();
6292
6293        read_vec_element(s, tcg_op1, rn, 0, MO_64);
6294        read_vec_element(s, tcg_op2, rn, 1, MO_64);
6295
6296        switch (opcode) {
6297        case 0x3b: /* ADDP */
6298            tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
6299            break;
6300        case 0xc: /* FMAXNMP */
6301            gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6302            break;
6303        case 0xd: /* FADDP */
6304            gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6305            break;
6306        case 0xf: /* FMAXP */
6307            gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6308            break;
6309        case 0x2c: /* FMINNMP */
6310            gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6311            break;
6312        case 0x2f: /* FMINP */
6313            gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6314            break;
6315        default:
6316            g_assert_not_reached();
6317        }
6318
6319        write_fp_dreg(s, rd, tcg_res);
6320
6321        tcg_temp_free_i64(tcg_op1);
6322        tcg_temp_free_i64(tcg_op2);
6323        tcg_temp_free_i64(tcg_res);
6324    } else {
6325        TCGv_i32 tcg_op1 = tcg_temp_new_i32();
6326        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
6327        TCGv_i32 tcg_res = tcg_temp_new_i32();
6328
6329        read_vec_element_i32(s, tcg_op1, rn, 0, MO_32);
6330        read_vec_element_i32(s, tcg_op2, rn, 1, MO_32);
6331
6332        switch (opcode) {
6333        case 0xc: /* FMAXNMP */
6334            gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6335            break;
6336        case 0xd: /* FADDP */
6337            gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6338            break;
6339        case 0xf: /* FMAXP */
6340            gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6341            break;
6342        case 0x2c: /* FMINNMP */
6343            gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6344            break;
6345        case 0x2f: /* FMINP */
6346            gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6347            break;
6348        default:
6349            g_assert_not_reached();
6350        }
6351
6352        write_fp_sreg(s, rd, tcg_res);
6353
6354        tcg_temp_free_i32(tcg_op1);
6355        tcg_temp_free_i32(tcg_op2);
6356        tcg_temp_free_i32(tcg_res);
6357    }
6358
6359    if (!TCGV_IS_UNUSED_PTR(fpst)) {
6360        tcg_temp_free_ptr(fpst);
6361    }
6362}
6363
6364/*
6365 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
6366 *
6367 * This code is handles the common shifting code and is used by both
6368 * the vector and scalar code.
6369 */
6370static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6371                                    TCGv_i64 tcg_rnd, bool accumulate,
6372                                    bool is_u, int size, int shift)
6373{
6374    bool extended_result = false;
6375    bool round = !TCGV_IS_UNUSED_I64(tcg_rnd);
6376    int ext_lshift = 0;
6377    TCGv_i64 tcg_src_hi;
6378
6379    if (round && size == 3) {
6380        extended_result = true;
6381        ext_lshift = 64 - shift;
6382        tcg_src_hi = tcg_temp_new_i64();
6383    } else if (shift == 64) {
6384        if (!accumulate && is_u) {
6385            /* result is zero */
6386            tcg_gen_movi_i64(tcg_res, 0);
6387            return;
6388        }
6389    }
6390
6391    /* Deal with the rounding step */
6392    if (round) {
6393        if (extended_result) {
6394            TCGv_i64 tcg_zero = tcg_const_i64(0);
6395            if (!is_u) {
6396                /* take care of sign extending tcg_res */
6397                tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
6398                tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6399                                 tcg_src, tcg_src_hi,
6400                                 tcg_rnd, tcg_zero);
6401            } else {
6402                tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6403                                 tcg_src, tcg_zero,
6404                                 tcg_rnd, tcg_zero);
6405            }
6406            tcg_temp_free_i64(tcg_zero);
6407        } else {
6408            tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
6409        }
6410    }
6411
6412    /* Now do the shift right */
6413    if (round && extended_result) {
6414        /* extended case, >64 bit precision required */
6415        if (ext_lshift == 0) {
6416            /* special case, only high bits matter */
6417            tcg_gen_mov_i64(tcg_src, tcg_src_hi);
6418        } else {
6419            tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6420            tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
6421            tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
6422        }
6423    } else {
6424        if (is_u) {
6425            if (shift == 64) {
6426                /* essentially shifting in 64 zeros */
6427                tcg_gen_movi_i64(tcg_src, 0);
6428            } else {
6429                tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6430            }
6431        } else {
6432            if (shift == 64) {
6433                /* effectively extending the sign-bit */
6434                tcg_gen_sari_i64(tcg_src, tcg_src, 63);
6435            } else {
6436                tcg_gen_sari_i64(tcg_src, tcg_src, shift);
6437            }
6438        }
6439    }
6440
6441    if (accumulate) {
6442        tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
6443    } else {
6444        tcg_gen_mov_i64(tcg_res, tcg_src);
6445    }
6446
6447    if (extended_result) {
6448        tcg_temp_free_i64(tcg_src_hi);
6449    }
6450}
6451
6452/* Common SHL/SLI - Shift left with an optional insert */
6453static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6454                                 bool insert, int shift)
6455{
6456    if (insert) { /* SLI */
6457        tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, shift, 64 - shift);
6458    } else { /* SHL */
6459        tcg_gen_shli_i64(tcg_res, tcg_src, shift);
6460    }
6461}
6462
6463/* SRI: shift right with insert */
6464static void handle_shri_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6465                                 int size, int shift)
6466{
6467    int esize = 8 << size;
6468
6469    /* shift count same as element size is valid but does nothing;
6470     * special case to avoid potential shift by 64.
6471     */
6472    if (shift != esize) {
6473        tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6474        tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, 0, esize - shift);
6475    }
6476}
6477
6478/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
6479static void handle_scalar_simd_shri(DisasContext *s,
6480                                    bool is_u, int immh, int immb,
6481                                    int opcode, int rn, int rd)
6482{
6483    const int size = 3;
6484    int immhb = immh << 3 | immb;
6485    int shift = 2 * (8 << size) - immhb;
6486    bool accumulate = false;
6487    bool round = false;
6488    bool insert = false;
6489    TCGv_i64 tcg_rn;
6490    TCGv_i64 tcg_rd;
6491    TCGv_i64 tcg_round;
6492
6493    if (!extract32(immh, 3, 1)) {
6494        unallocated_encoding(s);
6495        return;
6496    }
6497
6498    if (!fp_access_check(s)) {
6499        return;
6500    }
6501
6502    switch (opcode) {
6503    case 0x02: /* SSRA / USRA (accumulate) */
6504        accumulate = true;
6505        break;
6506    case 0x04: /* SRSHR / URSHR (rounding) */
6507        round = true;
6508        break;
6509    case 0x06: /* SRSRA / URSRA (accum + rounding) */
6510        accumulate = round = true;
6511        break;
6512    case 0x08: /* SRI */
6513        insert = true;
6514        break;
6515    }
6516
6517    if (round) {
6518        uint64_t round_const = 1ULL << (shift - 1);
6519        tcg_round = tcg_const_i64(round_const);
6520    } else {
6521        TCGV_UNUSED_I64(tcg_round);
6522    }
6523
6524    tcg_rn = read_fp_dreg(s, rn);
6525    tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6526
6527    if (insert) {
6528        handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
6529    } else {
6530        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6531                                accumulate, is_u, size, shift);
6532    }
6533
6534    write_fp_dreg(s, rd, tcg_rd);
6535
6536    tcg_temp_free_i64(tcg_rn);
6537    tcg_temp_free_i64(tcg_rd);
6538    if (round) {
6539        tcg_temp_free_i64(tcg_round);
6540    }
6541}
6542
6543/* SHL/SLI - Scalar shift left */
6544static void handle_scalar_simd_shli(DisasContext *s, bool insert,
6545                                    int immh, int immb, int opcode,
6546                                    int rn, int rd)
6547{
6548    int size = 32 - clz32(immh) - 1;
6549    int immhb = immh << 3 | immb;
6550    int shift = immhb - (8 << size);
6551    TCGv_i64 tcg_rn = new_tmp_a64(s);
6552    TCGv_i64 tcg_rd = new_tmp_a64(s);
6553
6554    if (!extract32(immh, 3, 1)) {
6555        unallocated_encoding(s);
6556        return;
6557    }
6558
6559    if (!fp_access_check(s)) {
6560        return;
6561    }
6562
6563    tcg_rn = read_fp_dreg(s, rn);
6564    tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6565
6566    handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
6567
6568    write_fp_dreg(s, rd, tcg_rd);
6569
6570    tcg_temp_free_i64(tcg_rn);
6571    tcg_temp_free_i64(tcg_rd);
6572}
6573
6574/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
6575 * (signed/unsigned) narrowing */
6576static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
6577                                   bool is_u_shift, bool is_u_narrow,
6578                                   int immh, int immb, int opcode,
6579                                   int rn, int rd)
6580{
6581    int immhb = immh << 3 | immb;
6582    int size = 32 - clz32(immh) - 1;
6583    int esize = 8 << size;
6584    int shift = (2 * esize) - immhb;
6585    int elements = is_scalar ? 1 : (64 / esize);
6586    bool round = extract32(opcode, 0, 1);
6587    TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
6588    TCGv_i64 tcg_rn, tcg_rd, tcg_round;
6589    TCGv_i32 tcg_rd_narrowed;
6590    TCGv_i64 tcg_final;
6591
6592    static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
6593        { gen_helper_neon_narrow_sat_s8,
6594          gen_helper_neon_unarrow_sat8 },
6595        { gen_helper_neon_narrow_sat_s16,
6596          gen_helper_neon_unarrow_sat16 },
6597        { gen_helper_neon_narrow_sat_s32,
6598          gen_helper_neon_unarrow_sat32 },
6599        { NULL, NULL },
6600    };
6601    static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
6602        gen_helper_neon_narrow_sat_u8,
6603        gen_helper_neon_narrow_sat_u16,
6604        gen_helper_neon_narrow_sat_u32,
6605        NULL
6606    };
6607    NeonGenNarrowEnvFn *narrowfn;
6608
6609    int i;
6610
6611    assert(size < 4);
6612
6613    if (extract32(immh, 3, 1)) {
6614        unallocated_encoding(s);
6615        return;
6616    }
6617
6618    if (!fp_access_check(s)) {
6619        return;
6620    }
6621
6622    if (is_u_shift) {
6623        narrowfn = unsigned_narrow_fns[size];
6624    } else {
6625        narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
6626    }
6627
6628    tcg_rn = tcg_temp_new_i64();
6629    tcg_rd = tcg_temp_new_i64();
6630    tcg_rd_narrowed = tcg_temp_new_i32();
6631    tcg_final = tcg_const_i64(0);
6632
6633    if (round) {
6634        uint64_t round_const = 1ULL << (shift - 1);
6635        tcg_round = tcg_const_i64(round_const);
6636    } else {
6637        TCGV_UNUSED_I64(tcg_round);
6638    }
6639
6640    for (i = 0; i < elements; i++) {
6641        read_vec_element(s, tcg_rn, rn, i, ldop);
6642        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6643                                false, is_u_shift, size+1, shift);
6644        narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
6645        tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
6646        tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
6647    }
6648
6649    if (!is_q) {
6650        clear_vec_high(s, rd);
6651        write_vec_element(s, tcg_final, rd, 0, MO_64);
6652    } else {
6653        write_vec_element(s, tcg_final, rd, 1, MO_64);
6654    }
6655
6656    if (round) {
6657        tcg_temp_free_i64(tcg_round);
6658    }
6659    tcg_temp_free_i64(tcg_rn);
6660    tcg_temp_free_i64(tcg_rd);
6661    tcg_temp_free_i32(tcg_rd_narrowed);
6662    tcg_temp_free_i64(tcg_final);
6663    return;
6664}
6665
6666/* SQSHLU, UQSHL, SQSHL: saturating left shifts */
6667static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
6668                             bool src_unsigned, bool dst_unsigned,
6669                             int immh, int immb, int rn, int rd)
6670{
6671    int immhb = immh << 3 | immb;
6672    int size = 32 - clz32(immh) - 1;
6673    int shift = immhb - (8 << size);
6674    int pass;
6675
6676    assert(immh != 0);
6677    assert(!(scalar && is_q));
6678
6679    if (!scalar) {
6680        if (!is_q && extract32(immh, 3, 1)) {
6681            unallocated_encoding(s);
6682            return;
6683        }
6684
6685        /* Since we use the variable-shift helpers we must
6686         * replicate the shift count into each element of
6687         * the tcg_shift value.
6688         */
6689        switch (size) {
6690        case 0:
6691            shift |= shift << 8;
6692            /* fall through */
6693        case 1:
6694            shift |= shift << 16;
6695            break;
6696        case 2:
6697        case 3:
6698            break;
6699        default:
6700            g_assert_not_reached();
6701        }
6702    }
6703
6704    if (!fp_access_check(s)) {
6705        return;
6706    }
6707
6708    if (size == 3) {
6709        TCGv_i64 tcg_shift = tcg_const_i64(shift);
6710        static NeonGenTwo64OpEnvFn * const fns[2][2] = {
6711            { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
6712            { NULL, gen_helper_neon_qshl_u64 },
6713        };
6714        NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
6715        int maxpass = is_q ? 2 : 1;
6716
6717        for (pass = 0; pass < maxpass; pass++) {
6718            TCGv_i64 tcg_op = tcg_temp_new_i64();
6719
6720            read_vec_element(s, tcg_op, rn, pass, MO_64);
6721            genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6722            write_vec_element(s, tcg_op, rd, pass, MO_64);
6723
6724            tcg_temp_free_i64(tcg_op);
6725        }
6726        tcg_temp_free_i64(tcg_shift);
6727
6728        if (!is_q) {
6729            clear_vec_high(s, rd);
6730        }
6731    } else {
6732        TCGv_i32 tcg_shift = tcg_const_i32(shift);
6733        static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
6734            {
6735                { gen_helper_neon_qshl_s8,
6736                  gen_helper_neon_qshl_s16,
6737                  gen_helper_neon_qshl_s32 },
6738                { gen_helper_neon_qshlu_s8,
6739                  gen_helper_neon_qshlu_s16,
6740                  gen_helper_neon_qshlu_s32 }
6741            }, {
6742                { NULL, NULL, NULL },
6743                { gen_helper_neon_qshl_u8,
6744                  gen_helper_neon_qshl_u16,
6745                  gen_helper_neon_qshl_u32 }
6746            }
6747        };
6748        NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
6749        TCGMemOp memop = scalar ? size : MO_32;
6750        int maxpass = scalar ? 1 : is_q ? 4 : 2;
6751
6752        for (pass = 0; pass < maxpass; pass++) {
6753            TCGv_i32 tcg_op = tcg_temp_new_i32();
6754
6755            read_vec_element_i32(s, tcg_op, rn, pass, memop);
6756            genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6757            if (scalar) {
6758                switch (size) {
6759                case 0:
6760                    tcg_gen_ext8u_i32(tcg_op, tcg_op);
6761                    break;
6762                case 1:
6763                    tcg_gen_ext16u_i32(tcg_op, tcg_op);
6764                    break;
6765                case 2:
6766                    break;
6767                default:
6768                    g_assert_not_reached();
6769                }
6770                write_fp_sreg(s, rd, tcg_op);
6771            } else {
6772                write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
6773            }
6774
6775            tcg_temp_free_i32(tcg_op);
6776        }
6777        tcg_temp_free_i32(tcg_shift);
6778
6779        if (!is_q && !scalar) {
6780            clear_vec_high(s, rd);
6781        }
6782    }
6783}
6784
6785/* Common vector code for handling integer to FP conversion */
6786static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
6787                                   int elements, int is_signed,
6788                                   int fracbits, int size)
6789{
6790    bool is_double = size == 3 ? true : false;
6791    TCGv_ptr tcg_fpst = get_fpstatus_ptr();
6792    TCGv_i32 tcg_shift = tcg_const_i32(fracbits);
6793    TCGv_i64 tcg_int = tcg_temp_new_i64();
6794    TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
6795    int pass;
6796
6797    for (pass = 0; pass < elements; pass++) {
6798        read_vec_element(s, tcg_int, rn, pass, mop);
6799
6800        if (is_double) {
6801            TCGv_i64 tcg_double = tcg_temp_new_i64();
6802            if (is_signed) {
6803                gen_helper_vfp_sqtod(tcg_double, tcg_int,
6804                                     tcg_shift, tcg_fpst);
6805            } else {
6806                gen_helper_vfp_uqtod(tcg_double, tcg_int,
6807                                     tcg_shift, tcg_fpst);
6808            }
6809            if (elements == 1) {
6810                write_fp_dreg(s, rd, tcg_double);
6811            } else {
6812                write_vec_element(s, tcg_double, rd, pass, MO_64);
6813            }
6814            tcg_temp_free_i64(tcg_double);
6815        } else {
6816            TCGv_i32 tcg_single = tcg_temp_new_i32();
6817            if (is_signed) {
6818                gen_helper_vfp_sqtos(tcg_single, tcg_int,
6819                                     tcg_shift, tcg_fpst);
6820            } else {
6821                gen_helper_vfp_uqtos(tcg_single, tcg_int,
6822                                     tcg_shift, tcg_fpst);
6823            }
6824            if (elements == 1) {
6825                write_fp_sreg(s, rd, tcg_single);
6826            } else {
6827                write_vec_element_i32(s, tcg_single, rd, pass, MO_32);
6828            }
6829            tcg_temp_free_i32(tcg_single);
6830        }
6831    }
6832
6833    if (!is_double && elements == 2) {
6834        clear_vec_high(s, rd);
6835    }
6836
6837    tcg_temp_free_i64(tcg_int);
6838    tcg_temp_free_ptr(tcg_fpst);
6839    tcg_temp_free_i32(tcg_shift);
6840}
6841
6842/* UCVTF/SCVTF - Integer to FP conversion */
6843static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
6844                                         bool is_q, bool is_u,
6845                                         int immh, int immb, int opcode,
6846                                         int rn, int rd)
6847{
6848    bool is_double = extract32(immh, 3, 1);
6849    int size = is_double ? MO_64 : MO_32;
6850    int elements;
6851    int immhb = immh << 3 | immb;
6852    int fracbits = (is_double ? 128 : 64) - immhb;
6853
6854    if (!extract32(immh, 2, 2)) {
6855        unallocated_encoding(s);
6856        return;
6857    }
6858
6859    if (is_scalar) {
6860        elements = 1;
6861    } else {
6862        elements = is_double ? 2 : is_q ? 4 : 2;
6863        if (is_double && !is_q) {
6864            unallocated_encoding(s);
6865            return;
6866        }
6867    }
6868
6869    if (!fp_access_check(s)) {
6870        return;
6871    }
6872
6873    /* immh == 0 would be a failure of the decode logic */
6874    g_assert(immh);
6875
6876    handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
6877}
6878
6879/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
6880static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
6881                                         bool is_q, bool is_u,
6882                                         int immh, int immb, int rn, int rd)
6883{
6884    bool is_double = extract32(immh, 3, 1);
6885    int immhb = immh << 3 | immb;
6886    int fracbits = (is_double ? 128 : 64) - immhb;
6887    int pass;
6888    TCGv_ptr tcg_fpstatus;
6889    TCGv_i32 tcg_rmode, tcg_shift;
6890
6891    if (!extract32(immh, 2, 2)) {
6892        unallocated_encoding(s);
6893        return;
6894    }
6895
6896    if (!is_scalar && !is_q && is_double) {
6897        unallocated_encoding(s);
6898        return;
6899    }
6900
6901    if (!fp_access_check(s)) {
6902        return;
6903    }
6904
6905    assert(!(is_scalar && is_q));
6906
6907    tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
6908    gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
6909    tcg_fpstatus = get_fpstatus_ptr();
6910    tcg_shift = tcg_const_i32(fracbits);
6911
6912    if (is_double) {
6913        int maxpass = is_scalar ? 1 : 2;
6914
6915        for (pass = 0; pass < maxpass; pass++) {
6916            TCGv_i64 tcg_op = tcg_temp_new_i64();
6917
6918            read_vec_element(s, tcg_op, rn, pass, MO_64);
6919            if (is_u) {
6920                gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6921            } else {
6922                gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6923            }
6924            write_vec_element(s, tcg_op, rd, pass, MO_64);
6925            tcg_temp_free_i64(tcg_op);
6926        }
6927        if (!is_q) {
6928            clear_vec_high(s, rd);
6929        }
6930    } else {
6931        int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
6932        for (pass = 0; pass < maxpass; pass++) {
6933            TCGv_i32 tcg_op = tcg_temp_new_i32();
6934
6935            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
6936            if (is_u) {
6937                gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6938            } else {
6939                gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6940            }
6941            if (is_scalar) {
6942                write_fp_sreg(s, rd, tcg_op);
6943            } else {
6944                write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
6945            }
6946            tcg_temp_free_i32(tcg_op);
6947        }
6948        if (!is_q && !is_scalar) {
6949            clear_vec_high(s, rd);
6950        }
6951    }
6952
6953    tcg_temp_free_ptr(tcg_fpstatus);
6954    tcg_temp_free_i32(tcg_shift);
6955    gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
6956    tcg_temp_free_i32(tcg_rmode);
6957}
6958
6959/* C3.6.9 AdvSIMD scalar shift by immediate
6960 *  31 30  29 28         23 22  19 18  16 15    11  10 9    5 4    0
6961 * +-----+---+-------------+------+------+--------+---+------+------+
6962 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
6963 * +-----+---+-------------+------+------+--------+---+------+------+
6964 *
6965 * This is the scalar version so it works on a fixed sized registers
6966 */
6967static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
6968{
6969    int rd = extract32(insn, 0, 5);
6970    int rn = extract32(insn, 5, 5);
6971    int opcode = extract32(insn, 11, 5);
6972    int immb = extract32(insn, 16, 3);
6973    int immh = extract32(insn, 19, 4);
6974    bool is_u = extract32(insn, 29, 1);
6975
6976    if (immh == 0) {
6977        unallocated_encoding(s);
6978        return;
6979    }
6980
6981    switch (opcode) {
6982    case 0x08: /* SRI */
6983        if (!is_u) {
6984            unallocated_encoding(s);
6985            return;
6986        }
6987        /* fall through */
6988    case 0x00: /* SSHR / USHR */
6989    case 0x02: /* SSRA / USRA */
6990    case 0x04: /* SRSHR / URSHR */
6991    case 0x06: /* SRSRA / URSRA */
6992        handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
6993        break;
6994    case 0x0a: /* SHL / SLI */
6995        handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
6996        break;
6997    case 0x1c: /* SCVTF, UCVTF */
6998        handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
6999                                     opcode, rn, rd);
7000        break;
7001    case 0x10: /* SQSHRUN, SQSHRUN2 */
7002    case 0x11: /* SQRSHRUN, SQRSHRUN2 */
7003        if (!is_u) {
7004            unallocated_encoding(s);
7005            return;
7006        }
7007        handle_vec_simd_sqshrn(s, true, false, false, true,
7008                               immh, immb, opcode, rn, rd);
7009        break;
7010    case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
7011    case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
7012        handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
7013                               immh, immb, opcode, rn, rd);
7014        break;
7015    case 0xc: /* SQSHLU */
7016        if (!is_u) {
7017            unallocated_encoding(s);
7018            return;
7019        }
7020        handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
7021        break;
7022    case 0xe: /* SQSHL, UQSHL */
7023        handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
7024        break;
7025    case 0x1f: /* FCVTZS, FCVTZU */
7026        handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
7027        break;
7028    default:
7029        unallocated_encoding(s);
7030        break;
7031    }
7032}
7033
7034/* C3.6.10 AdvSIMD scalar three different
7035 *  31 30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
7036 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7037 * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
7038 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7039 */
7040static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
7041{
7042    bool is_u = extract32(insn, 29, 1);
7043    int size = extract32(insn, 22, 2);
7044    int opcode = extract32(insn, 12, 4);
7045    int rm = extract32(insn, 16, 5);
7046    int rn = extract32(insn, 5, 5);
7047    int rd = extract32(insn, 0, 5);
7048
7049    if (is_u) {
7050        unallocated_encoding(s);
7051        return;
7052    }
7053
7054    switch (opcode) {
7055    case 0x9: /* SQDMLAL, SQDMLAL2 */
7056    case 0xb: /* SQDMLSL, SQDMLSL2 */
7057    case 0xd: /* SQDMULL, SQDMULL2 */
7058        if (size == 0 || size == 3) {
7059            unallocated_encoding(s);
7060            return;
7061        }
7062        break;
7063    default:
7064        unallocated_encoding(s);
7065        return;
7066    }
7067
7068    if (!fp_access_check(s)) {
7069        return;
7070    }
7071
7072    if (size == 2) {
7073        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7074        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7075        TCGv_i64 tcg_res = tcg_temp_new_i64();
7076
7077        read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7078        read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7079
7080        tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7081        gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7082
7083        switch (opcode) {
7084        case 0xd: /* SQDMULL, SQDMULL2 */
7085            break;
7086        case 0xb: /* SQDMLSL, SQDMLSL2 */
7087            tcg_gen_neg_i64(tcg_res, tcg_res);
7088            /* fall through */
7089        case 0x9: /* SQDMLAL, SQDMLAL2 */
7090            read_vec_element(s, tcg_op1, rd, 0, MO_64);
7091            gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7092                                              tcg_res, tcg_op1);
7093            break;
7094        default:
7095            g_assert_not_reached();
7096        }
7097
7098        write_fp_dreg(s, rd, tcg_res);
7099
7100        tcg_temp_free_i64(tcg_op1);
7101        tcg_temp_free_i64(tcg_op2);
7102        tcg_temp_free_i64(tcg_res);
7103    } else {
7104        TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7105        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7106        TCGv_i64 tcg_res = tcg_temp_new_i64();
7107
7108        read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
7109        read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
7110
7111        gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
7112        gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
7113
7114        switch (opcode) {
7115        case 0xd: /* SQDMULL, SQDMULL2 */
7116            break;
7117        case 0xb: /* SQDMLSL, SQDMLSL2 */
7118            gen_helper_neon_negl_u32(tcg_res, tcg_res);
7119            /* fall through */
7120        case 0x9: /* SQDMLAL, SQDMLAL2 */
7121        {
7122            TCGv_i64 tcg_op3 = tcg_temp_new_i64();
7123            read_vec_element(s, tcg_op3, rd, 0, MO_32);
7124            gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
7125                                              tcg_res, tcg_op3);
7126            tcg_temp_free_i64(tcg_op3);
7127            break;
7128        }
7129        default:
7130            g_assert_not_reached();
7131        }
7132
7133        tcg_gen_ext32u_i64(tcg_res, tcg_res);
7134        write_fp_dreg(s, rd, tcg_res);
7135
7136        tcg_temp_free_i32(tcg_op1);
7137        tcg_temp_free_i32(tcg_op2);
7138        tcg_temp_free_i64(tcg_res);
7139    }
7140}
7141
7142static void handle_3same_64(DisasContext *s, int opcode, bool u,
7143                            TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
7144{
7145    /* Handle 64x64->64 opcodes which are shared between the scalar
7146     * and vector 3-same groups. We cover every opcode where size == 3
7147     * is valid in either the three-reg-same (integer, not pairwise)
7148     * or scalar-three-reg-same groups. (Some opcodes are not yet
7149     * implemented.)
7150     */
7151    TCGCond cond;
7152
7153    switch (opcode) {
7154    case 0x1: /* SQADD */
7155        if (u) {
7156            gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7157        } else {
7158            gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7159        }
7160        break;
7161    case 0x5: /* SQSUB */
7162        if (u) {
7163            gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7164        } else {
7165            gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7166        }
7167        break;
7168    case 0x6: /* CMGT, CMHI */
7169        /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
7170         * We implement this using setcond (test) and then negating.
7171         */
7172        cond = u ? TCG_COND_GTU : TCG_COND_GT;
7173    do_cmop:
7174        tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
7175        tcg_gen_neg_i64(tcg_rd, tcg_rd);
7176        break;
7177    case 0x7: /* CMGE, CMHS */
7178        cond = u ? TCG_COND_GEU : TCG_COND_GE;
7179        goto do_cmop;
7180    case 0x11: /* CMTST, CMEQ */
7181        if (u) {
7182            cond = TCG_COND_EQ;
7183            goto do_cmop;
7184        }
7185        /* CMTST : test is "if (X & Y != 0)". */
7186        tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
7187        tcg_gen_setcondi_i64(TCG_COND_NE, tcg_rd, tcg_rd, 0);
7188        tcg_gen_neg_i64(tcg_rd, tcg_rd);
7189        break;
7190    case 0x8: /* SSHL, USHL */
7191        if (u) {
7192            gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
7193        } else {
7194            gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
7195        }
7196        break;
7197    case 0x9: /* SQSHL, UQSHL */
7198        if (u) {
7199            gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7200        } else {
7201            gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7202        }
7203        break;
7204    case 0xa: /* SRSHL, URSHL */
7205        if (u) {
7206            gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
7207        } else {
7208            gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
7209        }
7210        break;
7211    case 0xb: /* SQRSHL, UQRSHL */
7212        if (u) {
7213            gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7214        } else {
7215            gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7216        }
7217        break;
7218    case 0x10: /* ADD, SUB */
7219        if (u) {
7220            tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
7221        } else {
7222            tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
7223        }
7224        break;
7225    default:
7226        g_assert_not_reached();
7227    }
7228}
7229
7230/* Handle the 3-same-operands float operations; shared by the scalar
7231 * and vector encodings. The caller must filter out any encodings
7232 * not allocated for the encoding it is dealing with.
7233 */
7234static void handle_3same_float(DisasContext *s, int size, int elements,
7235                               int fpopcode, int rd, int rn, int rm)
7236{
7237    int pass;
7238    TCGv_ptr fpst = get_fpstatus_ptr();
7239
7240    for (pass = 0; pass < elements; pass++) {
7241        if (size) {
7242            /* Double */
7243            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7244            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7245            TCGv_i64 tcg_res = tcg_temp_new_i64();
7246
7247            read_vec_element(s, tcg_op1, rn, pass, MO_64);
7248            read_vec_element(s, tcg_op2, rm, pass, MO_64);
7249
7250            switch (fpopcode) {
7251            case 0x39: /* FMLS */
7252                /* As usual for ARM, separate negation for fused multiply-add */
7253                gen_helper_vfp_negd(tcg_op1, tcg_op1);
7254                /* fall through */
7255            case 0x19: /* FMLA */
7256                read_vec_element(s, tcg_res, rd, pass, MO_64);
7257                gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
7258                                       tcg_res, fpst);
7259                break;
7260            case 0x18: /* FMAXNM */
7261                gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7262                break;
7263            case 0x1a: /* FADD */
7264                gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7265                break;
7266            case 0x1b: /* FMULX */
7267                gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
7268                break;
7269            case 0x1c: /* FCMEQ */
7270                gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7271                break;
7272            case 0x1e: /* FMAX */
7273                gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7274                break;
7275            case 0x1f: /* FRECPS */
7276                gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7277                break;
7278            case 0x38: /* FMINNM */
7279                gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7280                break;
7281            case 0x3a: /* FSUB */
7282                gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7283                break;
7284            case 0x3e: /* FMIN */
7285                gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7286                break;
7287            case 0x3f: /* FRSQRTS */
7288                gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7289                break;
7290            case 0x5b: /* FMUL */
7291                gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
7292                break;
7293            case 0x5c: /* FCMGE */
7294                gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7295                break;
7296            case 0x5d: /* FACGE */
7297                gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7298                break;
7299            case 0x5f: /* FDIV */
7300                gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
7301                break;
7302            case 0x7a: /* FABD */
7303                gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7304                gen_helper_vfp_absd(tcg_res, tcg_res);
7305                break;
7306            case 0x7c: /* FCMGT */
7307                gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7308                break;
7309            case 0x7d: /* FACGT */
7310                gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7311                break;
7312            default:
7313                g_assert_not_reached();
7314            }
7315
7316            write_vec_element(s, tcg_res, rd, pass, MO_64);
7317
7318            tcg_temp_free_i64(tcg_res);
7319            tcg_temp_free_i64(tcg_op1);
7320            tcg_temp_free_i64(tcg_op2);
7321        } else {
7322            /* Single */
7323            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7324            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7325            TCGv_i32 tcg_res = tcg_temp_new_i32();
7326
7327            read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
7328            read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
7329
7330            switch (fpopcode) {
7331            case 0x39: /* FMLS */
7332                /* As usual for ARM, separate negation for fused multiply-add */
7333                gen_helper_vfp_negs(tcg_op1, tcg_op1);
7334                /* fall through */
7335            case 0x19: /* FMLA */
7336                read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7337                gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
7338                                       tcg_res, fpst);
7339                break;
7340            case 0x1a: /* FADD */
7341                gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7342                break;
7343            case 0x1b: /* FMULX */
7344                gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
7345                break;
7346            case 0x1c: /* FCMEQ */
7347                gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7348                break;
7349            case 0x1e: /* FMAX */
7350                gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7351                break;
7352            case 0x1f: /* FRECPS */
7353                gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7354                break;
7355            case 0x18: /* FMAXNM */
7356                gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7357                break;
7358            case 0x38: /* FMINNM */
7359                gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7360                break;
7361            case 0x3a: /* FSUB */
7362                gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7363                break;
7364            case 0x3e: /* FMIN */
7365                gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7366                break;
7367            case 0x3f: /* FRSQRTS */
7368                gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7369                break;
7370            case 0x5b: /* FMUL */
7371                gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
7372                break;
7373            case 0x5c: /* FCMGE */
7374                gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7375                break;
7376            case 0x5d: /* FACGE */
7377                gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7378                break;
7379            case 0x5f: /* FDIV */
7380                gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
7381                break;
7382            case 0x7a: /* FABD */
7383                gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7384                gen_helper_vfp_abss(tcg_res, tcg_res);
7385                break;
7386            case 0x7c: /* FCMGT */
7387                gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7388                break;
7389            case 0x7d: /* FACGT */
7390                gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7391                break;
7392            default:
7393                g_assert_not_reached();
7394            }
7395
7396            if (elements == 1) {
7397                /* scalar single so clear high part */
7398                TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7399
7400                tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
7401                write_vec_element(s, tcg_tmp, rd, pass, MO_64);
7402                tcg_temp_free_i64(tcg_tmp);
7403            } else {
7404                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7405            }
7406
7407            tcg_temp_free_i32(tcg_res);
7408            tcg_temp_free_i32(tcg_op1);
7409            tcg_temp_free_i32(tcg_op2);
7410        }
7411    }
7412
7413    tcg_temp_free_ptr(fpst);
7414
7415    if ((elements << size) < 4) {
7416        /* scalar, or non-quad vector op */
7417        clear_vec_high(s, rd);
7418    }
7419}
7420
7421/* C3.6.11 AdvSIMD scalar three same
7422 *  31 30  29 28       24 23  22  21 20  16 15    11  10 9    5 4    0
7423 * +-----+---+-----------+------+---+------+--------+---+------+------+
7424 * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
7425 * +-----+---+-----------+------+---+------+--------+---+------+------+
7426 */
7427static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
7428{
7429    int rd = extract32(insn, 0, 5);
7430    int rn = extract32(insn, 5, 5);
7431    int opcode = extract32(insn, 11, 5);
7432    int rm = extract32(insn, 16, 5);
7433    int size = extract32(insn, 22, 2);
7434    bool u = extract32(insn, 29, 1);
7435    TCGv_i64 tcg_rd;
7436
7437    if (opcode >= 0x18) {
7438        /* Floating point: U, size[1] and opcode indicate operation */
7439        int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
7440        switch (fpopcode) {
7441        case 0x1b: /* FMULX */
7442        case 0x1f: /* FRECPS */
7443        case 0x3f: /* FRSQRTS */
7444        case 0x5d: /* FACGE */
7445        case 0x7d: /* FACGT */
7446        case 0x1c: /* FCMEQ */
7447        case 0x5c: /* FCMGE */
7448        case 0x7c: /* FCMGT */
7449        case 0x7a: /* FABD */
7450            break;
7451        default:
7452            unallocated_encoding(s);
7453            return;
7454        }
7455
7456        if (!fp_access_check(s)) {
7457            return;
7458        }
7459
7460        handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
7461        return;
7462    }
7463
7464    switch (opcode) {
7465    case 0x1: /* SQADD, UQADD */
7466    case 0x5: /* SQSUB, UQSUB */
7467    case 0x9: /* SQSHL, UQSHL */
7468    case 0xb: /* SQRSHL, UQRSHL */
7469        break;
7470    case 0x8: /* SSHL, USHL */
7471    case 0xa: /* SRSHL, URSHL */
7472    case 0x6: /* CMGT, CMHI */
7473    case 0x7: /* CMGE, CMHS */
7474    case 0x11: /* CMTST, CMEQ */
7475    case 0x10: /* ADD, SUB (vector) */
7476        if (size != 3) {
7477            unallocated_encoding(s);
7478            return;
7479        }
7480        break;
7481    case 0x16: /* SQDMULH, SQRDMULH (vector) */
7482        if (size != 1 && size != 2) {
7483            unallocated_encoding(s);
7484            return;
7485        }
7486        break;
7487    default:
7488        unallocated_encoding(s);
7489        return;
7490    }
7491
7492    if (!fp_access_check(s)) {
7493        return;
7494    }
7495
7496    tcg_rd = tcg_temp_new_i64();
7497
7498    if (size == 3) {
7499        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
7500        TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
7501
7502        handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
7503        tcg_temp_free_i64(tcg_rn);
7504        tcg_temp_free_i64(tcg_rm);
7505    } else {
7506        /* Do a single operation on the lowest element in the vector.
7507         * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
7508         * no side effects for all these operations.
7509         * OPTME: special-purpose helpers would avoid doing some
7510         * unnecessary work in the helper for the 8 and 16 bit cases.
7511         */
7512        NeonGenTwoOpEnvFn *genenvfn;
7513        TCGv_i32 tcg_rn = tcg_temp_new_i32();
7514        TCGv_i32 tcg_rm = tcg_temp_new_i32();
7515        TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
7516
7517        read_vec_element_i32(s, tcg_rn, rn, 0, size);
7518        read_vec_element_i32(s, tcg_rm, rm, 0, size);
7519
7520        switch (opcode) {
7521        case 0x1: /* SQADD, UQADD */
7522        {
7523            static NeonGenTwoOpEnvFn * const fns[3][2] = {
7524                { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
7525                { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
7526                { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
7527            };
7528            genenvfn = fns[size][u];
7529            break;
7530        }
7531        case 0x5: /* SQSUB, UQSUB */
7532        {
7533            static NeonGenTwoOpEnvFn * const fns[3][2] = {
7534                { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
7535                { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
7536                { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
7537            };
7538            genenvfn = fns[size][u];
7539            break;
7540        }
7541        case 0x9: /* SQSHL, UQSHL */
7542        {
7543            static NeonGenTwoOpEnvFn * const fns[3][2] = {
7544                { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
7545                { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
7546                { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
7547            };
7548            genenvfn = fns[size][u];
7549            break;
7550        }
7551        case 0xb: /* SQRSHL, UQRSHL */
7552        {
7553            static NeonGenTwoOpEnvFn * const fns[3][2] = {
7554                { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
7555                { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
7556                { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
7557            };
7558            genenvfn = fns[size][u];
7559            break;
7560        }
7561        case 0x16: /* SQDMULH, SQRDMULH */
7562        {
7563            static NeonGenTwoOpEnvFn * const fns[2][2] = {
7564                { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
7565                { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
7566            };
7567            assert(size == 1 || size == 2);
7568            genenvfn = fns[size - 1][u];
7569            break;
7570        }
7571        default:
7572            g_assert_not_reached();
7573        }
7574
7575        genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
7576        tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
7577        tcg_temp_free_i32(tcg_rd32);
7578        tcg_temp_free_i32(tcg_rn);
7579        tcg_temp_free_i32(tcg_rm);
7580    }
7581
7582    write_fp_dreg(s, rd, tcg_rd);
7583
7584    tcg_temp_free_i64(tcg_rd);
7585}
7586
7587static void handle_2misc_64(DisasContext *s, int opcode, bool u,
7588                            TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
7589                            TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
7590{
7591    /* Handle 64->64 opcodes which are shared between the scalar and
7592     * vector 2-reg-misc groups. We cover every integer opcode where size == 3
7593     * is valid in either group and also the double-precision fp ops.
7594     * The caller only need provide tcg_rmode and tcg_fpstatus if the op
7595     * requires them.
7596     */
7597    TCGCond cond;
7598
7599    switch (opcode) {
7600    case 0x4: /* CLS, CLZ */
7601        if (u) {
7602            tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
7603        } else {
7604            tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
7605        }
7606        break;
7607    case 0x5: /* NOT */
7608        /* This opcode is shared with CNT and RBIT but we have earlier
7609         * enforced that size == 3 if and only if this is the NOT insn.
7610         */
7611        tcg_gen_not_i64(tcg_rd, tcg_rn);
7612        break;
7613    case 0x7: /* SQABS, SQNEG */
7614        if (u) {
7615            gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
7616        } else {
7617            gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
7618        }
7619        break;
7620    case 0xa: /* CMLT */
7621        /* 64 bit integer comparison against zero, result is
7622         * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
7623         * subtracting 1.
7624         */
7625        cond = TCG_COND_LT;
7626    do_cmop:
7627        tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
7628        tcg_gen_neg_i64(tcg_rd, tcg_rd);
7629        break;
7630    case 0x8: /* CMGT, CMGE */
7631        cond = u ? TCG_COND_GE : TCG_COND_GT;
7632        goto do_cmop;
7633    case 0x9: /* CMEQ, CMLE */
7634        cond = u ? TCG_COND_LE : TCG_COND_EQ;
7635        goto do_cmop;
7636    case 0xb: /* ABS, NEG */
7637        if (u) {
7638            tcg_gen_neg_i64(tcg_rd, tcg_rn);
7639        } else {
7640            TCGv_i64 tcg_zero = tcg_const_i64(0);
7641            tcg_gen_neg_i64(tcg_rd, tcg_rn);
7642            tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
7643                                tcg_rn, tcg_rd);
7644            tcg_temp_free_i64(tcg_zero);
7645        }
7646        break;
7647    case 0x2f: /* FABS */
7648        gen_helper_vfp_absd(tcg_rd, tcg_rn);
7649        break;
7650    case 0x6f: /* FNEG */
7651        gen_helper_vfp_negd(tcg_rd, tcg_rn);
7652        break;
7653    case 0x7f: /* FSQRT */
7654        gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
7655        break;
7656    case 0x1a: /* FCVTNS */
7657    case 0x1b: /* FCVTMS */
7658    case 0x1c: /* FCVTAS */
7659    case 0x3a: /* FCVTPS */
7660    case 0x3b: /* FCVTZS */
7661    {
7662        TCGv_i32 tcg_shift = tcg_const_i32(0);
7663        gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
7664        tcg_temp_free_i32(tcg_shift);
7665        break;
7666    }
7667    case 0x5a: /* FCVTNU */
7668    case 0x5b: /* FCVTMU */
7669    case 0x5c: /* FCVTAU */
7670    case 0x7a: /* FCVTPU */
7671    case 0x7b: /* FCVTZU */
7672    {
7673        TCGv_i32 tcg_shift = tcg_const_i32(0);
7674        gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
7675        tcg_temp_free_i32(tcg_shift);
7676        break;
7677    }
7678    case 0x18: /* FRINTN */
7679    case 0x19: /* FRINTM */
7680    case 0x38: /* FRINTP */
7681    case 0x39: /* FRINTZ */
7682    case 0x58: /* FRINTA */
7683    case 0x79: /* FRINTI */
7684        gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
7685        break;
7686    case 0x59: /* FRINTX */
7687        gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
7688        break;
7689    default:
7690        g_assert_not_reached();
7691    }
7692}
7693
7694static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
7695                                   bool is_scalar, bool is_u, bool is_q,
7696                                   int size, int rn, int rd)
7697{
7698    bool is_double = (size == 3);
7699    TCGv_ptr fpst;
7700
7701    if (!fp_access_check(s)) {
7702        return;
7703    }
7704
7705    fpst = get_fpstatus_ptr();
7706
7707    if (is_double) {
7708        TCGv_i64 tcg_op = tcg_temp_new_i64();
7709        TCGv_i64 tcg_zero = tcg_const_i64(0);
7710        TCGv_i64 tcg_res = tcg_temp_new_i64();
7711        NeonGenTwoDoubleOPFn *genfn;
7712        bool swap = false;
7713        int pass;
7714
7715        switch (opcode) {
7716        case 0x2e: /* FCMLT (zero) */
7717            swap = true;
7718            /* fallthrough */
7719        case 0x2c: /* FCMGT (zero) */
7720            genfn = gen_helper_neon_cgt_f64;
7721            break;
7722        case 0x2d: /* FCMEQ (zero) */
7723            genfn = gen_helper_neon_ceq_f64;
7724            break;
7725        case 0x6d: /* FCMLE (zero) */
7726            swap = true;
7727            /* fall through */
7728        case 0x6c: /* FCMGE (zero) */
7729            genfn = gen_helper_neon_cge_f64;
7730            break;
7731        default:
7732            g_assert_not_reached();
7733        }
7734
7735        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
7736            read_vec_element(s, tcg_op, rn, pass, MO_64);
7737            if (swap) {
7738                genfn(tcg_res, tcg_zero, tcg_op, fpst);
7739            } else {
7740                genfn(tcg_res, tcg_op, tcg_zero, fpst);
7741            }
7742            write_vec_element(s, tcg_res, rd, pass, MO_64);
7743        }
7744        if (is_scalar) {
7745            clear_vec_high(s, rd);
7746        }
7747
7748        tcg_temp_free_i64(tcg_res);
7749        tcg_temp_free_i64(tcg_zero);
7750        tcg_temp_free_i64(tcg_op);
7751    } else {
7752        TCGv_i32 tcg_op = tcg_temp_new_i32();
7753        TCGv_i32 tcg_zero = tcg_const_i32(0);
7754        TCGv_i32 tcg_res = tcg_temp_new_i32();
7755        NeonGenTwoSingleOPFn *genfn;
7756        bool swap = false;
7757        int pass, maxpasses;
7758
7759        switch (opcode) {
7760        case 0x2e: /* FCMLT (zero) */
7761            swap = true;
7762            /* fall through */
7763        case 0x2c: /* FCMGT (zero) */
7764            genfn = gen_helper_neon_cgt_f32;
7765            break;
7766        case 0x2d: /* FCMEQ (zero) */
7767            genfn = gen_helper_neon_ceq_f32;
7768            break;
7769        case 0x6d: /* FCMLE (zero) */
7770            swap = true;
7771            /* fall through */
7772        case 0x6c: /* FCMGE (zero) */
7773            genfn = gen_helper_neon_cge_f32;
7774            break;
7775        default:
7776            g_assert_not_reached();
7777        }
7778
7779        if (is_scalar) {
7780            maxpasses = 1;
7781        } else {
7782            maxpasses = is_q ? 4 : 2;
7783        }
7784
7785        for (pass = 0; pass < maxpasses; pass++) {
7786            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7787            if (swap) {
7788                genfn(tcg_res, tcg_zero, tcg_op, fpst);
7789            } else {
7790                genfn(tcg_res, tcg_op, tcg_zero, fpst);
7791            }
7792            if (is_scalar) {
7793                write_fp_sreg(s, rd, tcg_res);
7794            } else {
7795                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7796            }
7797        }
7798        tcg_temp_free_i32(tcg_res);
7799        tcg_temp_free_i32(tcg_zero);
7800        tcg_temp_free_i32(tcg_op);
7801        if (!is_q && !is_scalar) {
7802            clear_vec_high(s, rd);
7803        }
7804    }
7805
7806    tcg_temp_free_ptr(fpst);
7807}
7808
7809static void handle_2misc_reciprocal(DisasContext *s, int opcode,
7810                                    bool is_scalar, bool is_u, bool is_q,
7811                                    int size, int rn, int rd)
7812{
7813    bool is_double = (size == 3);
7814    TCGv_ptr fpst = get_fpstatus_ptr();
7815
7816    if (is_double) {
7817        TCGv_i64 tcg_op = tcg_temp_new_i64();
7818        TCGv_i64 tcg_res = tcg_temp_new_i64();
7819        int pass;
7820
7821        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
7822            read_vec_element(s, tcg_op, rn, pass, MO_64);
7823            switch (opcode) {
7824            case 0x3d: /* FRECPE */
7825                gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
7826                break;
7827            case 0x3f: /* FRECPX */
7828                gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
7829                break;
7830            case 0x7d: /* FRSQRTE */
7831                gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
7832                break;
7833            default:
7834                g_assert_not_reached();
7835            }
7836            write_vec_element(s, tcg_res, rd, pass, MO_64);
7837        }
7838        if (is_scalar) {
7839            clear_vec_high(s, rd);
7840        }
7841
7842        tcg_temp_free_i64(tcg_res);
7843        tcg_temp_free_i64(tcg_op);
7844    } else {
7845        TCGv_i32 tcg_op = tcg_temp_new_i32();
7846        TCGv_i32 tcg_res = tcg_temp_new_i32();
7847        int pass, maxpasses;
7848
7849        if (is_scalar) {
7850            maxpasses = 1;
7851        } else {
7852            maxpasses = is_q ? 4 : 2;
7853        }
7854
7855        for (pass = 0; pass < maxpasses; pass++) {
7856            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7857
7858            switch (opcode) {
7859            case 0x3c: /* URECPE */
7860                gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
7861                break;
7862            case 0x3d: /* FRECPE */
7863                gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
7864                break;
7865            case 0x3f: /* FRECPX */
7866                gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
7867                break;
7868            case 0x7d: /* FRSQRTE */
7869                gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
7870                break;
7871            default:
7872                g_assert_not_reached();
7873            }
7874
7875            if (is_scalar) {
7876                write_fp_sreg(s, rd, tcg_res);
7877            } else {
7878                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7879            }
7880        }
7881        tcg_temp_free_i32(tcg_res);
7882        tcg_temp_free_i32(tcg_op);
7883        if (!is_q && !is_scalar) {
7884            clear_vec_high(s, rd);
7885        }
7886    }
7887    tcg_temp_free_ptr(fpst);
7888}
7889
7890static void handle_2misc_narrow(DisasContext *s, bool scalar,
7891                                int opcode, bool u, bool is_q,
7892                                int size, int rn, int rd)
7893{
7894    /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
7895     * in the source becomes a size element in the destination).
7896     */
7897    int pass;
7898    TCGv_i32 tcg_res[2];
7899    int destelt = is_q ? 2 : 0;
7900    int passes = scalar ? 1 : 2;
7901
7902    if (scalar) {
7903        tcg_res[1] = tcg_const_i32(0);
7904    }
7905
7906    for (pass = 0; pass < passes; pass++) {
7907        TCGv_i64 tcg_op = tcg_temp_new_i64();
7908        NeonGenNarrowFn *genfn = NULL;
7909        NeonGenNarrowEnvFn *genenvfn = NULL;
7910
7911        if (scalar) {
7912            read_vec_element(s, tcg_op, rn, pass, size + 1);
7913        } else {
7914            read_vec_element(s, tcg_op, rn, pass, MO_64);
7915        }
7916        tcg_res[pass] = tcg_temp_new_i32();
7917
7918        switch (opcode) {
7919        case 0x12: /* XTN, SQXTUN */
7920        {
7921            static NeonGenNarrowFn * const xtnfns[3] = {
7922                gen_helper_neon_narrow_u8,
7923                gen_helper_neon_narrow_u16,
7924                tcg_gen_extrl_i64_i32,
7925            };
7926            static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
7927                gen_helper_neon_unarrow_sat8,
7928                gen_helper_neon_unarrow_sat16,
7929                gen_helper_neon_unarrow_sat32,
7930            };
7931            if (u) {
7932                genenvfn = sqxtunfns[size];
7933            } else {
7934                genfn = xtnfns[size];
7935            }
7936            break;
7937        }
7938        case 0x14: /* SQXTN, UQXTN */
7939        {
7940            static NeonGenNarrowEnvFn * const fns[3][2] = {
7941                { gen_helper_neon_narrow_sat_s8,
7942                  gen_helper_neon_narrow_sat_u8 },
7943                { gen_helper_neon_narrow_sat_s16,
7944                  gen_helper_neon_narrow_sat_u16 },
7945                { gen_helper_neon_narrow_sat_s32,
7946                  gen_helper_neon_narrow_sat_u32 },
7947            };
7948            genenvfn = fns[size][u];
7949            break;
7950        }
7951        case 0x16: /* FCVTN, FCVTN2 */
7952            /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
7953            if (size == 2) {
7954                gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
7955            } else {
7956                TCGv_i32 tcg_lo = tcg_temp_new_i32();
7957                TCGv_i32 tcg_hi = tcg_temp_new_i32();
7958                tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
7959                gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
7960                gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
7961                tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
7962                tcg_temp_free_i32(tcg_lo);
7963                tcg_temp_free_i32(tcg_hi);
7964            }
7965            break;
7966        case 0x56:  /* FCVTXN, FCVTXN2 */
7967            /* 64 bit to 32 bit float conversion
7968             * with von Neumann rounding (round to odd)
7969             */
7970            assert(size == 2);
7971            gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
7972            break;
7973        default:
7974            g_assert_not_reached();
7975        }
7976
7977        if (genfn) {
7978            genfn(tcg_res[pass], tcg_op);
7979        } else if (genenvfn) {
7980            genenvfn(tcg_res[pass], cpu_env, tcg_op);
7981        }
7982
7983        tcg_temp_free_i64(tcg_op);
7984    }
7985
7986    for (pass = 0; pass < 2; pass++) {
7987        write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
7988        tcg_temp_free_i32(tcg_res[pass]);
7989    }
7990    if (!is_q) {
7991        clear_vec_high(s, rd);
7992    }
7993}
7994
7995/* Remaining saturating accumulating ops */
7996static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
7997                                bool is_q, int size, int rn, int rd)
7998{
7999    bool is_double = (size == 3);
8000
8001    if (is_double) {
8002        TCGv_i64 tcg_rn = tcg_temp_new_i64();
8003        TCGv_i64 tcg_rd = tcg_temp_new_i64();
8004        int pass;
8005
8006        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8007            read_vec_element(s, tcg_rn, rn, pass, MO_64);
8008            read_vec_element(s, tcg_rd, rd, pass, MO_64);
8009
8010            if (is_u) { /* USQADD */
8011                gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8012            } else { /* SUQADD */
8013                gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8014            }
8015            write_vec_element(s, tcg_rd, rd, pass, MO_64);
8016        }
8017        if (is_scalar) {
8018            clear_vec_high(s, rd);
8019        }
8020
8021        tcg_temp_free_i64(tcg_rd);
8022        tcg_temp_free_i64(tcg_rn);
8023    } else {
8024        TCGv_i32 tcg_rn = tcg_temp_new_i32();
8025        TCGv_i32 tcg_rd = tcg_temp_new_i32();
8026        int pass, maxpasses;
8027
8028        if (is_scalar) {
8029            maxpasses = 1;
8030        } else {
8031            maxpasses = is_q ? 4 : 2;
8032        }
8033
8034        for (pass = 0; pass < maxpasses; pass++) {
8035            if (is_scalar) {
8036                read_vec_element_i32(s, tcg_rn, rn, pass, size);
8037                read_vec_element_i32(s, tcg_rd, rd, pass, size);
8038            } else {
8039                read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
8040                read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8041            }
8042
8043            if (is_u) { /* USQADD */
8044                switch (size) {
8045                case 0:
8046                    gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8047                    break;
8048                case 1:
8049                    gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8050                    break;
8051                case 2:
8052                    gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8053                    break;
8054                default:
8055                    g_assert_not_reached();
8056                }
8057            } else { /* SUQADD */
8058                switch (size) {
8059                case 0:
8060                    gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8061                    break;
8062                case 1:
8063                    gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8064                    break;
8065                case 2:
8066                    gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8067                    break;
8068                default:
8069                    g_assert_not_reached();
8070                }
8071            }
8072
8073            if (is_scalar) {
8074                TCGv_i64 tcg_zero = tcg_const_i64(0);
8075                write_vec_element(s, tcg_zero, rd, 0, MO_64);
8076                tcg_temp_free_i64(tcg_zero);
8077            }
8078            write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8079        }
8080
8081        if (!is_q) {
8082            clear_vec_high(s, rd);
8083        }
8084
8085        tcg_temp_free_i32(tcg_rd);
8086        tcg_temp_free_i32(tcg_rn);
8087    }
8088}
8089
8090/* C3.6.12 AdvSIMD scalar two reg misc
8091 *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
8092 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8093 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
8094 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8095 */
8096static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
8097{
8098    int rd = extract32(insn, 0, 5);
8099    int rn = extract32(insn, 5, 5);
8100    int opcode = extract32(insn, 12, 5);
8101    int size = extract32(insn, 22, 2);
8102    bool u = extract32(insn, 29, 1);
8103    bool is_fcvt = false;
8104    int rmode;
8105    TCGv_i32 tcg_rmode;
8106    TCGv_ptr tcg_fpstatus;
8107
8108    switch (opcode) {
8109    case 0x3: /* USQADD / SUQADD*/
8110        if (!fp_access_check(s)) {
8111            return;
8112        }
8113        handle_2misc_satacc(s, true, u, false, size, rn, rd);
8114        return;
8115    case 0x7: /* SQABS / SQNEG */
8116        break;
8117    case 0xa: /* CMLT */
8118        if (u) {
8119            unallocated_encoding(s);
8120            return;
8121        }
8122        /* fall through */
8123    case 0x8: /* CMGT, CMGE */
8124    case 0x9: /* CMEQ, CMLE */
8125    case 0xb: /* ABS, NEG */
8126        if (size != 3) {
8127            unallocated_encoding(s);
8128            return;
8129        }
8130        break;
8131    case 0x12: /* SQXTUN */
8132        if (!u) {
8133            unallocated_encoding(s);
8134            return;
8135        }
8136        /* fall through */
8137    case 0x14: /* SQXTN, UQXTN */
8138        if (size == 3) {
8139            unallocated_encoding(s);
8140            return;
8141        }
8142        if (!fp_access_check(s)) {
8143            return;
8144        }
8145        handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
8146        return;
8147    case 0xc ... 0xf:
8148    case 0x16 ... 0x1d:
8149    case 0x1f:
8150        /* Floating point: U, size[1] and opcode indicate operation;
8151         * size[0] indicates single or double precision.
8152         */
8153        opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
8154        size = extract32(size, 0, 1) ? 3 : 2;
8155        switch (opcode) {
8156        case 0x2c: /* FCMGT (zero) */
8157        case 0x2d: /* FCMEQ (zero) */
8158        case 0x2e: /* FCMLT (zero) */
8159        case 0x6c: /* FCMGE (zero) */
8160        case 0x6d: /* FCMLE (zero) */
8161            handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
8162            return;
8163        case 0x1d: /* SCVTF */
8164        case 0x5d: /* UCVTF */
8165        {
8166            bool is_signed = (opcode == 0x1d);
8167            if (!fp_access_check(s)) {
8168                return;
8169            }
8170            handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
8171            return;
8172        }
8173        case 0x3d: /* FRECPE */
8174        case 0x3f: /* FRECPX */
8175        case 0x7d: /* FRSQRTE */
8176            if (!fp_access_check(s)) {
8177                return;
8178            }
8179            handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
8180            return;
8181        case 0x1a: /* FCVTNS */
8182        case 0x1b: /* FCVTMS */
8183        case 0x3a: /* FCVTPS */
8184        case 0x3b: /* FCVTZS */
8185        case 0x5a: /* FCVTNU */
8186        case 0x5b: /* FCVTMU */
8187        case 0x7a: /* FCVTPU */
8188        case 0x7b: /* FCVTZU */
8189            is_fcvt = true;
8190            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
8191            break;
8192        case 0x1c: /* FCVTAS */
8193        case 0x5c: /* FCVTAU */
8194            /* TIEAWAY doesn't fit in the usual rounding mode encoding */
8195            is_fcvt = true;
8196            rmode = FPROUNDING_TIEAWAY;
8197            break;
8198        case 0x56: /* FCVTXN, FCVTXN2 */
8199            if (size == 2) {
8200                unallocated_encoding(s);
8201                return;
8202            }
8203            if (!fp_access_check(s)) {
8204                return;
8205            }
8206            handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
8207            return;
8208        default:
8209            unallocated_encoding(s);
8210            return;
8211        }
8212        break;
8213    default:
8214        unallocated_encoding(s);
8215        return;
8216    }
8217
8218    if (!fp_access_check(s)) {
8219        return;
8220    }
8221
8222    if (is_fcvt) {
8223        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8224        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
8225        tcg_fpstatus = get_fpstatus_ptr();
8226    } else {
8227        TCGV_UNUSED_I32(tcg_rmode);
8228        TCGV_UNUSED_PTR(tcg_fpstatus);
8229    }
8230
8231    if (size == 3) {
8232        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8233        TCGv_i64 tcg_rd = tcg_temp_new_i64();
8234
8235        handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
8236        write_fp_dreg(s, rd, tcg_rd);
8237        tcg_temp_free_i64(tcg_rd);
8238        tcg_temp_free_i64(tcg_rn);
8239    } else {
8240        TCGv_i32 tcg_rn = tcg_temp_new_i32();
8241        TCGv_i32 tcg_rd = tcg_temp_new_i32();
8242
8243        read_vec_element_i32(s, tcg_rn, rn, 0, size);
8244
8245        switch (opcode) {
8246        case 0x7: /* SQABS, SQNEG */
8247        {
8248            NeonGenOneOpEnvFn *genfn;
8249            static NeonGenOneOpEnvFn * const fns[3][2] = {
8250                { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
8251                { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
8252                { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
8253            };
8254            genfn = fns[size][u];
8255            genfn(tcg_rd, cpu_env, tcg_rn);
8256            break;
8257        }
8258        case 0x1a: /* FCVTNS */
8259        case 0x1b: /* FCVTMS */
8260        case 0x1c: /* FCVTAS */
8261        case 0x3a: /* FCVTPS */
8262        case 0x3b: /* FCVTZS */
8263        {
8264            TCGv_i32 tcg_shift = tcg_const_i32(0);
8265            gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8266            tcg_temp_free_i32(tcg_shift);
8267            break;
8268        }
8269        case 0x5a: /* FCVTNU */
8270        case 0x5b: /* FCVTMU */
8271        case 0x5c: /* FCVTAU */
8272        case 0x7a: /* FCVTPU */
8273        case 0x7b: /* FCVTZU */
8274        {
8275            TCGv_i32 tcg_shift = tcg_const_i32(0);
8276            gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8277            tcg_temp_free_i32(tcg_shift);
8278            break;
8279        }
8280        default:
8281            g_assert_not_reached();
8282        }
8283
8284        write_fp_sreg(s, rd, tcg_rd);
8285        tcg_temp_free_i32(tcg_rd);
8286        tcg_temp_free_i32(tcg_rn);
8287    }
8288
8289    if (is_fcvt) {
8290        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
8291        tcg_temp_free_i32(tcg_rmode);
8292        tcg_temp_free_ptr(tcg_fpstatus);
8293    }
8294}
8295
8296/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
8297static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
8298                                 int immh, int immb, int opcode, int rn, int rd)
8299{
8300    int size = 32 - clz32(immh) - 1;
8301    int immhb = immh << 3 | immb;
8302    int shift = 2 * (8 << size) - immhb;
8303    bool accumulate = false;
8304    bool round = false;
8305    bool insert = false;
8306    int dsize = is_q ? 128 : 64;
8307    int esize = 8 << size;
8308    int elements = dsize/esize;
8309    TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
8310    TCGv_i64 tcg_rn = new_tmp_a64(s);
8311    TCGv_i64 tcg_rd = new_tmp_a64(s);
8312    TCGv_i64 tcg_round;
8313    int i;
8314
8315    if (extract32(immh, 3, 1) && !is_q) {
8316        unallocated_encoding(s);
8317        return;
8318    }
8319
8320    if (size > 3 && !is_q) {
8321        unallocated_encoding(s);
8322        return;
8323    }
8324
8325    if (!fp_access_check(s)) {
8326        return;
8327    }
8328
8329    switch (opcode) {
8330    case 0x02: /* SSRA / USRA (accumulate) */
8331        accumulate = true;
8332        break;
8333    case 0x04: /* SRSHR / URSHR (rounding) */
8334        round = true;
8335        break;
8336    case 0x06: /* SRSRA / URSRA (accum + rounding) */
8337        accumulate = round = true;
8338        break;
8339    case 0x08: /* SRI */
8340        insert = true;
8341        break;
8342    }
8343
8344    if (round) {
8345        uint64_t round_const = 1ULL << (shift - 1);
8346        tcg_round = tcg_const_i64(round_const);
8347    } else {
8348        TCGV_UNUSED_I64(tcg_round);
8349    }
8350
8351    for (i = 0; i < elements; i++) {
8352        read_vec_element(s, tcg_rn, rn, i, memop);
8353        if (accumulate || insert) {
8354            read_vec_element(s, tcg_rd, rd, i, memop);
8355        }
8356
8357        if (insert) {
8358            handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
8359        } else {
8360            handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8361                                    accumulate, is_u, size, shift);
8362        }
8363
8364        write_vec_element(s, tcg_rd, rd, i, size);
8365    }
8366
8367    if (!is_q) {
8368        clear_vec_high(s, rd);
8369    }
8370
8371    if (round) {
8372        tcg_temp_free_i64(tcg_round);
8373    }
8374}
8375
8376/* SHL/SLI - Vector shift left */
8377static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
8378                                int immh, int immb, int opcode, int rn, int rd)
8379{
8380    int size = 32 - clz32(immh) - 1;
8381    int immhb = immh << 3 | immb;
8382    int shift = immhb - (8 << size);
8383    int dsize = is_q ? 128 : 64;
8384    int esize = 8 << size;
8385    int elements = dsize/esize;
8386    TCGv_i64 tcg_rn = new_tmp_a64(s);
8387    TCGv_i64 tcg_rd = new_tmp_a64(s);
8388    int i;
8389
8390    if (extract32(immh, 3, 1) && !is_q) {
8391        unallocated_encoding(s);
8392        return;
8393    }
8394
8395    if (size > 3 && !is_q) {
8396        unallocated_encoding(s);
8397        return;
8398    }
8399
8400    if (!fp_access_check(s)) {
8401        return;
8402    }
8403
8404    for (i = 0; i < elements; i++) {
8405        read_vec_element(s, tcg_rn, rn, i, size);
8406        if (insert) {
8407            read_vec_element(s, tcg_rd, rd, i, size);
8408        }
8409
8410        handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
8411
8412        write_vec_element(s, tcg_rd, rd, i, size);
8413    }
8414
8415    if (!is_q) {
8416        clear_vec_high(s, rd);
8417    }
8418}
8419
8420/* USHLL/SHLL - Vector shift left with widening */
8421static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
8422                                 int immh, int immb, int opcode, int rn, int rd)
8423{
8424    int size = 32 - clz32(immh) - 1;
8425    int immhb = immh << 3 | immb;
8426    int shift = immhb - (8 << size);
8427    int dsize = 64;
8428    int esize = 8 << size;
8429    int elements = dsize/esize;
8430    TCGv_i64 tcg_rn = new_tmp_a64(s);
8431    TCGv_i64 tcg_rd = new_tmp_a64(s);
8432    int i;
8433
8434    if (size >= 3) {
8435        unallocated_encoding(s);
8436        return;
8437    }
8438
8439    if (!fp_access_check(s)) {
8440        return;
8441    }
8442
8443    /* For the LL variants the store is larger than the load,
8444     * so if rd == rn we would overwrite parts of our input.
8445     * So load everything right now and use shifts in the main loop.
8446     */
8447    read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
8448
8449    for (i = 0; i < elements; i++) {
8450        tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
8451        ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
8452        tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
8453        write_vec_element(s, tcg_rd, rd, i, size + 1);
8454    }
8455}
8456
8457/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
8458static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
8459                                 int immh, int immb, int opcode, int rn, int rd)
8460{
8461    int immhb = immh << 3 | immb;
8462    int size = 32 - clz32(immh) - 1;
8463    int dsize = 64;
8464    int esize = 8 << size;
8465    int elements = dsize/esize;
8466    int shift = (2 * esize) - immhb;
8467    bool round = extract32(opcode, 0, 1);
8468    TCGv_i64 tcg_rn, tcg_rd, tcg_final;
8469    TCGv_i64 tcg_round;
8470    int i;
8471
8472    if (extract32(immh, 3, 1)) {
8473        unallocated_encoding(s);
8474        return;
8475    }
8476
8477    if (!fp_access_check(s)) {
8478        return;
8479    }
8480
8481    tcg_rn = tcg_temp_new_i64();
8482    tcg_rd = tcg_temp_new_i64();
8483    tcg_final = tcg_temp_new_i64();
8484    read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
8485
8486    if (round) {
8487        uint64_t round_const = 1ULL << (shift - 1);
8488        tcg_round = tcg_const_i64(round_const);
8489    } else {
8490        TCGV_UNUSED_I64(tcg_round);
8491    }
8492
8493    for (i = 0; i < elements; i++) {
8494        read_vec_element(s, tcg_rn, rn, i, size+1);
8495        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8496                                false, true, size+1, shift);
8497
8498        tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8499    }
8500
8501    if (!is_q) {
8502        clear_vec_high(s, rd);
8503        write_vec_element(s, tcg_final, rd, 0, MO_64);
8504    } else {
8505        write_vec_element(s, tcg_final, rd, 1, MO_64);
8506    }
8507
8508    if (round) {
8509        tcg_temp_free_i64(tcg_round);
8510    }
8511    tcg_temp_free_i64(tcg_rn);
8512    tcg_temp_free_i64(tcg_rd);
8513    tcg_temp_free_i64(tcg_final);
8514    return;
8515}
8516
8517
8518/* C3.6.14 AdvSIMD shift by immediate
8519 *  31  30   29 28         23 22  19 18  16 15    11  10 9    5 4    0
8520 * +---+---+---+-------------+------+------+--------+---+------+------+
8521 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
8522 * +---+---+---+-------------+------+------+--------+---+------+------+
8523 */
8524static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
8525{
8526    int rd = extract32(insn, 0, 5);
8527    int rn = extract32(insn, 5, 5);
8528    int opcode = extract32(insn, 11, 5);
8529    int immb = extract32(insn, 16, 3);
8530    int immh = extract32(insn, 19, 4);
8531    bool is_u = extract32(insn, 29, 1);
8532    bool is_q = extract32(insn, 30, 1);
8533
8534    switch (opcode) {
8535    case 0x08: /* SRI */
8536        if (!is_u) {
8537            unallocated_encoding(s);
8538            return;
8539        }
8540        /* fall through */
8541    case 0x00: /* SSHR / USHR */
8542    case 0x02: /* SSRA / USRA (accumulate) */
8543    case 0x04: /* SRSHR / URSHR (rounding) */
8544    case 0x06: /* SRSRA / URSRA (accum + rounding) */
8545        handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
8546        break;
8547    case 0x0a: /* SHL / SLI */
8548        handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
8549        break;
8550    case 0x10: /* SHRN */
8551    case 0x11: /* RSHRN / SQRSHRUN */
8552        if (is_u) {
8553            handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
8554                                   opcode, rn, rd);
8555        } else {
8556            handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
8557        }
8558        break;
8559    case 0x12: /* SQSHRN / UQSHRN */
8560    case 0x13: /* SQRSHRN / UQRSHRN */
8561        handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
8562                               opcode, rn, rd);
8563        break;
8564    case 0x14: /* SSHLL / USHLL */
8565        handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
8566        break;
8567    case 0x1c: /* SCVTF / UCVTF */
8568        handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
8569                                     opcode, rn, rd);
8570        break;
8571    case 0xc: /* SQSHLU */
8572        if (!is_u) {
8573            unallocated_encoding(s);
8574            return;
8575        }
8576        handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
8577        break;
8578    case 0xe: /* SQSHL, UQSHL */
8579        handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
8580        break;
8581    case 0x1f: /* FCVTZS/ FCVTZU */
8582        handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
8583        return;
8584    default:
8585        unallocated_encoding(s);
8586        return;
8587    }
8588}
8589
8590/* Generate code to do a "long" addition or subtraction, ie one done in
8591 * TCGv_i64 on vector lanes twice the width specified by size.
8592 */
8593static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
8594                          TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
8595{
8596    static NeonGenTwo64OpFn * const fns[3][2] = {
8597        { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
8598        { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
8599        { tcg_gen_add_i64, tcg_gen_sub_i64 },
8600    };
8601    NeonGenTwo64OpFn *genfn;
8602    assert(size < 3);
8603
8604    genfn = fns[size][is_sub];
8605    genfn(tcg_res, tcg_op1, tcg_op2);
8606}
8607
8608static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
8609                                int opcode, int rd, int rn, int rm)
8610{
8611    /* 3-reg-different widening insns: 64 x 64 -> 128 */
8612    TCGv_i64 tcg_res[2];
8613    int pass, accop;
8614
8615    tcg_res[0] = tcg_temp_new_i64();
8616    tcg_res[1] = tcg_temp_new_i64();
8617
8618    /* Does this op do an adding accumulate, a subtracting accumulate,
8619     * or no accumulate at all?
8620     */
8621    switch (opcode) {
8622    case 5:
8623    case 8:
8624    case 9:
8625        accop = 1;
8626        break;
8627    case 10:
8628    case 11:
8629        accop = -1;
8630        break;
8631    default:
8632        accop = 0;
8633        break;
8634    }
8635
8636    if (accop != 0) {
8637        read_vec_element(s, tcg_res[0], rd, 0, MO_64);
8638        read_vec_element(s, tcg_res[1], rd, 1, MO_64);
8639    }
8640
8641    /* size == 2 means two 32x32->64 operations; this is worth special
8642     * casing because we can generally handle it inline.
8643     */
8644    if (size == 2) {
8645        for (pass = 0; pass < 2; pass++) {
8646            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8647            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8648            TCGv_i64 tcg_passres;
8649            TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
8650
8651            int elt = pass + is_q * 2;
8652
8653            read_vec_element(s, tcg_op1, rn, elt, memop);
8654            read_vec_element(s, tcg_op2, rm, elt, memop);
8655
8656            if (accop == 0) {
8657                tcg_passres = tcg_res[pass];
8658            } else {
8659                tcg_passres = tcg_temp_new_i64();
8660            }
8661
8662            switch (opcode) {
8663            case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
8664                tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
8665                break;
8666            case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
8667                tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
8668                break;
8669            case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
8670            case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
8671            {
8672                TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
8673                TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
8674
8675                tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
8676                tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
8677                tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
8678                                    tcg_passres,
8679                                    tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
8680                tcg_temp_free_i64(tcg_tmp1);
8681                tcg_temp_free_i64(tcg_tmp2);
8682                break;
8683            }
8684            case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
8685            case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
8686            case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
8687                tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
8688                break;
8689            case 9: /* SQDMLAL, SQDMLAL2 */
8690            case 11: /* SQDMLSL, SQDMLSL2 */
8691            case 13: /* SQDMULL, SQDMULL2 */
8692                tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
8693                gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
8694                                                  tcg_passres, tcg_passres);
8695                break;
8696            default:
8697                g_assert_not_reached();
8698            }
8699
8700            if (opcode == 9 || opcode == 11) {
8701                /* saturating accumulate ops */
8702                if (accop < 0) {
8703                    tcg_gen_neg_i64(tcg_passres, tcg_passres);
8704                }
8705                gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
8706                                                  tcg_res[pass], tcg_passres);
8707            } else if (accop > 0) {
8708                tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
8709            } else if (accop < 0) {
8710                tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
8711            }
8712
8713            if (accop != 0) {
8714                tcg_temp_free_i64(tcg_passres);
8715            }
8716
8717            tcg_temp_free_i64(tcg_op1);
8718            tcg_temp_free_i64(tcg_op2);
8719        }
8720    } else {
8721        /* size 0 or 1, generally helper functions */
8722        for (pass = 0; pass < 2; pass++) {
8723            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8724            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8725            TCGv_i64 tcg_passres;
8726            int elt = pass + is_q * 2;
8727
8728            read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
8729            read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
8730
8731            if (accop == 0) {
8732                tcg_passres = tcg_res[pass];
8733            } else {
8734                tcg_passres = tcg_temp_new_i64();
8735            }
8736
8737            switch (opcode) {
8738            case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
8739            case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
8740            {
8741                TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
8742                static NeonGenWidenFn * const widenfns[2][2] = {
8743                    { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
8744                    { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
8745                };
8746                NeonGenWidenFn *widenfn = widenfns[size][is_u];
8747
8748                widenfn(tcg_op2_64, tcg_op2);
8749                widenfn(tcg_passres, tcg_op1);
8750                gen_neon_addl(size, (opcode == 2), tcg_passres,
8751                              tcg_passres, tcg_op2_64);
8752                tcg_temp_free_i64(tcg_op2_64);
8753                break;
8754            }
8755            case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
8756            case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
8757                if (size == 0) {
8758                    if (is_u) {
8759                        gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
8760                    } else {
8761                        gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
8762                    }
8763                } else {
8764                    if (is_u) {
8765                        gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
8766                    } else {
8767                        gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
8768                    }
8769                }
8770                break;
8771            case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
8772            case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
8773            case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
8774                if (size == 0) {
8775                    if (is_u) {
8776                        gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
8777                    } else {
8778                        gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
8779                    }
8780                } else {
8781                    if (is_u) {
8782                        gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
8783                    } else {
8784                        gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
8785                    }
8786                }
8787                break;
8788            case 9: /* SQDMLAL, SQDMLAL2 */
8789            case 11: /* SQDMLSL, SQDMLSL2 */
8790            case 13: /* SQDMULL, SQDMULL2 */
8791                assert(size == 1);
8792                gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
8793                gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
8794                                                  tcg_passres, tcg_passres);
8795                break;
8796            case 14: /* PMULL */
8797                assert(size == 0);
8798                gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
8799                break;
8800            default:
8801                g_assert_not_reached();
8802            }
8803            tcg_temp_free_i32(tcg_op1);
8804            tcg_temp_free_i32(tcg_op2);
8805
8806            if (accop != 0) {
8807                if (opcode == 9 || opcode == 11) {
8808                    /* saturating accumulate ops */
8809                    if (accop < 0) {
8810                        gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
8811                    }
8812                    gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
8813                                                      tcg_res[pass],
8814                                                      tcg_passres);
8815                } else {
8816                    gen_neon_addl(size, (accop < 0), tcg_res[pass],
8817                                  tcg_res[pass], tcg_passres);
8818                }
8819                tcg_temp_free_i64(tcg_passres);
8820            }
8821        }
8822    }
8823
8824    write_vec_element(s, tcg_res[0], rd, 0, MO_64);
8825    write_vec_element(s, tcg_res[1], rd, 1, MO_64);
8826    tcg_temp_free_i64(tcg_res[0]);
8827    tcg_temp_free_i64(tcg_res[1]);
8828}
8829
8830static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
8831                            int opcode, int rd, int rn, int rm)
8832{
8833    TCGv_i64 tcg_res[2];
8834    int part = is_q ? 2 : 0;
8835    int pass;
8836
8837    for (pass = 0; pass < 2; pass++) {
8838        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8839        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8840        TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
8841        static NeonGenWidenFn * const widenfns[3][2] = {
8842            { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
8843            { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
8844            { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
8845        };
8846        NeonGenWidenFn *widenfn = widenfns[size][is_u];
8847
8848        read_vec_element(s, tcg_op1, rn, pass, MO_64);
8849        read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
8850        widenfn(tcg_op2_wide, tcg_op2);
8851        tcg_temp_free_i32(tcg_op2);
8852        tcg_res[pass] = tcg_temp_new_i64();
8853        gen_neon_addl(size, (opcode == 3),
8854                      tcg_res[pass], tcg_op1, tcg_op2_wide);
8855        tcg_temp_free_i64(tcg_op1);
8856        tcg_temp_free_i64(tcg_op2_wide);
8857    }
8858
8859    for (pass = 0; pass < 2; pass++) {
8860        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
8861        tcg_temp_free_i64(tcg_res[pass]);
8862    }
8863}
8864
8865static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
8866{
8867    tcg_gen_addi_i64(in, in, 1U << 31);
8868    tcg_gen_extrh_i64_i32(res, in);
8869}
8870
8871static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
8872                                 int opcode, int rd, int rn, int rm)
8873{
8874    TCGv_i32 tcg_res[2];
8875    int part = is_q ? 2 : 0;
8876    int pass;
8877
8878    for (pass = 0; pass < 2; pass++) {
8879        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8880        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8881        TCGv_i64 tcg_wideres = tcg_temp_new_i64();
8882        static NeonGenNarrowFn * const narrowfns[3][2] = {
8883            { gen_helper_neon_narrow_high_u8,
8884              gen_helper_neon_narrow_round_high_u8 },
8885            { gen_helper_neon_narrow_high_u16,
8886              gen_helper_neon_narrow_round_high_u16 },
8887            { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
8888        };
8889        NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
8890
8891        read_vec_element(s, tcg_op1, rn, pass, MO_64);
8892        read_vec_element(s, tcg_op2, rm, pass, MO_64);
8893
8894        gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
8895
8896        tcg_temp_free_i64(tcg_op1);
8897        tcg_temp_free_i64(tcg_op2);
8898
8899        tcg_res[pass] = tcg_temp_new_i32();
8900        gennarrow(tcg_res[pass], tcg_wideres);
8901        tcg_temp_free_i64(tcg_wideres);
8902    }
8903
8904    for (pass = 0; pass < 2; pass++) {
8905        write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
8906        tcg_temp_free_i32(tcg_res[pass]);
8907    }
8908    if (!is_q) {
8909        clear_vec_high(s, rd);
8910    }
8911}
8912
8913static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
8914{
8915    /* PMULL of 64 x 64 -> 128 is an odd special case because it
8916     * is the only three-reg-diff instruction which produces a
8917     * 128-bit wide result from a single operation. However since
8918     * it's possible to calculate the two halves more or less
8919     * separately we just use two helper calls.
8920     */
8921    TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8922    TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8923    TCGv_i64 tcg_res = tcg_temp_new_i64();
8924
8925    read_vec_element(s, tcg_op1, rn, is_q, MO_64);
8926    read_vec_element(s, tcg_op2, rm, is_q, MO_64);
8927    gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
8928    write_vec_element(s, tcg_res, rd, 0, MO_64);
8929    gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
8930    write_vec_element(s, tcg_res, rd, 1, MO_64);
8931
8932    tcg_temp_free_i64(tcg_op1);
8933    tcg_temp_free_i64(tcg_op2);
8934    tcg_temp_free_i64(tcg_res);
8935}
8936
8937/* C3.6.15 AdvSIMD three different
8938 *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
8939 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
8940 * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
8941 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
8942 */
8943static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
8944{
8945    /* Instructions in this group fall into three basic classes
8946     * (in each case with the operation working on each element in
8947     * the input vectors):
8948     * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
8949     *     128 bit input)
8950     * (2) wide 64 x 128 -> 128
8951     * (3) narrowing 128 x 128 -> 64
8952     * Here we do initial decode, catch unallocated cases and
8953     * dispatch to separate functions for each class.
8954     */
8955    int is_q = extract32(insn, 30, 1);
8956    int is_u = extract32(insn, 29, 1);
8957    int size = extract32(insn, 22, 2);
8958    int opcode = extract32(insn, 12, 4);
8959    int rm = extract32(insn, 16, 5);
8960    int rn = extract32(insn, 5, 5);
8961    int rd = extract32(insn, 0, 5);
8962
8963    switch (opcode) {
8964    case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
8965    case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
8966        /* 64 x 128 -> 128 */
8967        if (size == 3) {
8968            unallocated_encoding(s);
8969            return;
8970        }
8971        if (!fp_access_check(s)) {
8972            return;
8973        }
8974        handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
8975        break;
8976    case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
8977    case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
8978        /* 128 x 128 -> 64 */
8979        if (size == 3) {
8980            unallocated_encoding(s);
8981            return;
8982        }
8983        if (!fp_access_check(s)) {
8984            return;
8985        }
8986        handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
8987        break;
8988    case 14: /* PMULL, PMULL2 */
8989        if (is_u || size == 1 || size == 2) {
8990            unallocated_encoding(s);
8991            return;
8992        }
8993        if (size == 3) {
8994            if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
8995                unallocated_encoding(s);
8996                return;
8997            }
8998            if (!fp_access_check(s)) {
8999                return;
9000            }
9001            handle_pmull_64(s, is_q, rd, rn, rm);
9002            return;
9003        }
9004        goto is_widening;
9005    case 9: /* SQDMLAL, SQDMLAL2 */
9006    case 11: /* SQDMLSL, SQDMLSL2 */
9007    case 13: /* SQDMULL, SQDMULL2 */
9008        if (is_u || size == 0) {
9009            unallocated_encoding(s);
9010            return;
9011        }
9012        /* fall through */
9013    case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9014    case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9015    case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9016    case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9017    case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9018    case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9019    case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
9020        /* 64 x 64 -> 128 */
9021        if (size == 3) {
9022            unallocated_encoding(s);
9023            return;
9024        }
9025    is_widening:
9026        if (!fp_access_check(s)) {
9027            return;
9028        }
9029
9030        handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
9031        break;
9032    default:
9033        /* opcode 15 not allocated */
9034        unallocated_encoding(s);
9035        break;
9036    }
9037}
9038
9039/* Logic op (opcode == 3) subgroup of C3.6.16. */
9040static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
9041{
9042    int rd = extract32(insn, 0, 5);
9043    int rn = extract32(insn, 5, 5);
9044    int rm = extract32(insn, 16, 5);
9045    int size = extract32(insn, 22, 2);
9046    bool is_u = extract32(insn, 29, 1);
9047    bool is_q = extract32(insn, 30, 1);
9048    TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
9049    int pass;
9050
9051    if (!fp_access_check(s)) {
9052        return;
9053    }
9054
9055    tcg_op1 = tcg_temp_new_i64();
9056    tcg_op2 = tcg_temp_new_i64();
9057    tcg_res[0] = tcg_temp_new_i64();
9058    tcg_res[1] = tcg_temp_new_i64();
9059
9060    for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
9061        read_vec_element(s, tcg_op1, rn, pass, MO_64);
9062        read_vec_element(s, tcg_op2, rm, pass, MO_64);
9063
9064        if (!is_u) {
9065            switch (size) {
9066            case 0: /* AND */
9067                tcg_gen_and_i64(tcg_res[pass], tcg_op1, tcg_op2);
9068                break;
9069            case 1: /* BIC */
9070                tcg_gen_andc_i64(tcg_res[pass], tcg_op1, tcg_op2);
9071                break;
9072            case 2: /* ORR */
9073                tcg_gen_or_i64(tcg_res[pass], tcg_op1, tcg_op2);
9074                break;
9075            case 3: /* ORN */
9076                tcg_gen_orc_i64(tcg_res[pass], tcg_op1, tcg_op2);
9077                break;
9078            }
9079        } else {
9080            if (size != 0) {
9081                /* B* ops need res loaded to operate on */
9082                read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9083            }
9084
9085            switch (size) {
9086            case 0: /* EOR */
9087                tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
9088                break;
9089            case 1: /* BSL bitwise select */
9090                tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_op2);
9091                tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_res[pass]);
9092                tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op1);
9093                break;
9094            case 2: /* BIT, bitwise insert if true */
9095                tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
9096                tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_op2);
9097                tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
9098                break;
9099            case 3: /* BIF, bitwise insert if false */
9100                tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
9101                tcg_gen_andc_i64(tcg_op1, tcg_op1, tcg_op2);
9102                tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
9103                break;
9104            }
9105        }
9106    }
9107
9108    write_vec_element(s, tcg_res[0], rd, 0, MO_64);
9109    if (!is_q) {
9110        tcg_gen_movi_i64(tcg_res[1], 0);
9111    }
9112    write_vec_element(s, tcg_res[1], rd, 1, MO_64);
9113
9114    tcg_temp_free_i64(tcg_op1);
9115    tcg_temp_free_i64(tcg_op2);
9116    tcg_temp_free_i64(tcg_res[0]);
9117    tcg_temp_free_i64(tcg_res[1]);
9118}
9119
9120/* Helper functions for 32 bit comparisons */
9121static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9122{
9123    tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
9124}
9125
9126static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9127{
9128    tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
9129}
9130
9131static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9132{
9133    tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
9134}
9135
9136static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9137{
9138    tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
9139}
9140
9141/* Pairwise op subgroup of C3.6.16.
9142 *
9143 * This is called directly or via the handle_3same_float for float pairwise
9144 * operations where the opcode and size are calculated differently.
9145 */
9146static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
9147                                   int size, int rn, int rm, int rd)
9148{
9149    TCGv_ptr fpst;
9150    int pass;
9151
9152    /* Floating point operations need fpst */
9153    if (opcode >= 0x58) {
9154        fpst = get_fpstatus_ptr();
9155    } else {
9156        TCGV_UNUSED_PTR(fpst);
9157    }
9158
9159    if (!fp_access_check(s)) {
9160        return;
9161    }
9162
9163    /* These operations work on the concatenated rm:rn, with each pair of
9164     * adjacent elements being operated on to produce an element in the result.
9165     */
9166    if (size == 3) {
9167        TCGv_i64 tcg_res[2];
9168
9169        for (pass = 0; pass < 2; pass++) {
9170            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9171            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9172            int passreg = (pass == 0) ? rn : rm;
9173
9174            read_vec_element(s, tcg_op1, passreg, 0, MO_64);
9175            read_vec_element(s, tcg_op2, passreg, 1, MO_64);
9176            tcg_res[pass] = tcg_temp_new_i64();
9177
9178            switch (opcode) {
9179            case 0x17: /* ADDP */
9180                tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
9181                break;
9182            case 0x58: /* FMAXNMP */
9183                gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9184                break;
9185            case 0x5a: /* FADDP */
9186                gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9187                break;
9188            case 0x5e: /* FMAXP */
9189                gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9190                break;
9191            case 0x78: /* FMINNMP */
9192                gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9193                break;
9194            case 0x7e: /* FMINP */
9195                gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9196                break;
9197            default:
9198                g_assert_not_reached();
9199            }
9200
9201            tcg_temp_free_i64(tcg_op1);
9202            tcg_temp_free_i64(tcg_op2);
9203        }
9204
9205        for (pass = 0; pass < 2; pass++) {
9206            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9207            tcg_temp_free_i64(tcg_res[pass]);
9208        }
9209    } else {
9210        int maxpass = is_q ? 4 : 2;
9211        TCGv_i32 tcg_res[4];
9212
9213        for (pass = 0; pass < maxpass; pass++) {
9214            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9215            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9216            NeonGenTwoOpFn *genfn = NULL;
9217            int passreg = pass < (maxpass / 2) ? rn : rm;
9218            int passelt = (is_q && (pass & 1)) ? 2 : 0;
9219
9220            read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
9221            read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
9222            tcg_res[pass] = tcg_temp_new_i32();
9223
9224            switch (opcode) {
9225            case 0x17: /* ADDP */
9226            {
9227                static NeonGenTwoOpFn * const fns[3] = {
9228                    gen_helper_neon_padd_u8,
9229                    gen_helper_neon_padd_u16,
9230                    tcg_gen_add_i32,
9231                };
9232                genfn = fns[size];
9233                break;
9234            }
9235            case 0x14: /* SMAXP, UMAXP */
9236            {
9237                static NeonGenTwoOpFn * const fns[3][2] = {
9238                    { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
9239                    { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
9240                    { gen_max_s32, gen_max_u32 },
9241                };
9242                genfn = fns[size][u];
9243                break;
9244            }
9245            case 0x15: /* SMINP, UMINP */
9246            {
9247                static NeonGenTwoOpFn * const fns[3][2] = {
9248                    { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
9249                    { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
9250                    { gen_min_s32, gen_min_u32 },
9251                };
9252                genfn = fns[size][u];
9253                break;
9254            }
9255            /* The FP operations are all on single floats (32 bit) */
9256            case 0x58: /* FMAXNMP */
9257                gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9258                break;
9259            case 0x5a: /* FADDP */
9260                gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9261                break;
9262            case 0x5e: /* FMAXP */
9263                gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9264                break;
9265            case 0x78: /* FMINNMP */
9266                gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9267                break;
9268            case 0x7e: /* FMINP */
9269                gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9270                break;
9271            default:
9272                g_assert_not_reached();
9273            }
9274
9275            /* FP ops called directly, otherwise call now */
9276            if (genfn) {
9277                genfn(tcg_res[pass], tcg_op1, tcg_op2);
9278            }
9279
9280            tcg_temp_free_i32(tcg_op1);
9281            tcg_temp_free_i32(tcg_op2);
9282        }
9283
9284        for (pass = 0; pass < maxpass; pass++) {
9285            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
9286            tcg_temp_free_i32(tcg_res[pass]);
9287        }
9288        if (!is_q) {
9289            clear_vec_high(s, rd);
9290        }
9291    }
9292
9293    if (!TCGV_IS_UNUSED_PTR(fpst)) {
9294        tcg_temp_free_ptr(fpst);
9295    }
9296}
9297
9298/* Floating point op subgroup of C3.6.16. */
9299static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
9300{
9301    /* For floating point ops, the U, size[1] and opcode bits
9302     * together indicate the operation. size[0] indicates single
9303     * or double.
9304     */
9305    int fpopcode = extract32(insn, 11, 5)
9306        | (extract32(insn, 23, 1) << 5)
9307        | (extract32(insn, 29, 1) << 6);
9308    int is_q = extract32(insn, 30, 1);
9309    int size = extract32(insn, 22, 1);
9310    int rm = extract32(insn, 16, 5);
9311    int rn = extract32(insn, 5, 5);
9312    int rd = extract32(insn, 0, 5);
9313
9314    int datasize = is_q ? 128 : 64;
9315    int esize = 32 << size;
9316    int elements = datasize / esize;
9317
9318    if (size == 1 && !is_q) {
9319        unallocated_encoding(s);
9320        return;
9321    }
9322
9323    switch (fpopcode) {
9324    case 0x58: /* FMAXNMP */
9325    case 0x5a: /* FADDP */
9326    case 0x5e: /* FMAXP */
9327    case 0x78: /* FMINNMP */
9328    case 0x7e: /* FMINP */
9329        if (size && !is_q) {
9330            unallocated_encoding(s);
9331            return;
9332        }
9333        handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
9334                               rn, rm, rd);
9335        return;
9336    case 0x1b: /* FMULX */
9337    case 0x1f: /* FRECPS */
9338    case 0x3f: /* FRSQRTS */
9339    case 0x5d: /* FACGE */
9340    case 0x7d: /* FACGT */
9341    case 0x19: /* FMLA */
9342    case 0x39: /* FMLS */
9343    case 0x18: /* FMAXNM */
9344    case 0x1a: /* FADD */
9345    case 0x1c: /* FCMEQ */
9346    case 0x1e: /* FMAX */
9347    case 0x38: /* FMINNM */
9348    case 0x3a: /* FSUB */
9349    case 0x3e: /* FMIN */
9350    case 0x5b: /* FMUL */
9351    case 0x5c: /* FCMGE */
9352    case 0x5f: /* FDIV */
9353    case 0x7a: /* FABD */
9354    case 0x7c: /* FCMGT */
9355        if (!fp_access_check(s)) {
9356            return;
9357        }
9358
9359        handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
9360        return;
9361    default:
9362        unallocated_encoding(s);
9363        return;
9364    }
9365}
9366
9367/* Integer op subgroup of C3.6.16. */
9368static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
9369{
9370    int is_q = extract32(insn, 30, 1);
9371    int u = extract32(insn, 29, 1);
9372    int size = extract32(insn, 22, 2);
9373    int opcode = extract32(insn, 11, 5);
9374    int rm = extract32(insn, 16, 5);
9375    int rn = extract32(insn, 5, 5);
9376    int rd = extract32(insn, 0, 5);
9377    int pass;
9378
9379    switch (opcode) {
9380    case 0x13: /* MUL, PMUL */
9381        if (u && size != 0) {
9382            unallocated_encoding(s);
9383            return;
9384        }
9385        /* fall through */
9386    case 0x0: /* SHADD, UHADD */
9387    case 0x2: /* SRHADD, URHADD */
9388    case 0x4: /* SHSUB, UHSUB */
9389    case 0xc: /* SMAX, UMAX */
9390    case 0xd: /* SMIN, UMIN */
9391    case 0xe: /* SABD, UABD */
9392    case 0xf: /* SABA, UABA */
9393    case 0x12: /* MLA, MLS */
9394        if (size == 3) {
9395            unallocated_encoding(s);
9396            return;
9397        }
9398        break;
9399    case 0x16: /* SQDMULH, SQRDMULH */
9400        if (size == 0 || size == 3) {
9401            unallocated_encoding(s);
9402            return;
9403        }
9404        break;
9405    default:
9406        if (size == 3 && !is_q) {
9407            unallocated_encoding(s);
9408            return;
9409        }
9410        break;
9411    }
9412
9413    if (!fp_access_check(s)) {
9414        return;
9415    }
9416
9417    if (size == 3) {
9418        assert(is_q);
9419        for (pass = 0; pass < 2; pass++) {
9420            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9421            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9422            TCGv_i64 tcg_res = tcg_temp_new_i64();
9423
9424            read_vec_element(s, tcg_op1, rn, pass, MO_64);
9425            read_vec_element(s, tcg_op2, rm, pass, MO_64);
9426
9427            handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
9428
9429            write_vec_element(s, tcg_res, rd, pass, MO_64);
9430
9431            tcg_temp_free_i64(tcg_res);
9432            tcg_temp_free_i64(tcg_op1);
9433            tcg_temp_free_i64(tcg_op2);
9434        }
9435    } else {
9436        for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
9437            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9438            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9439            TCGv_i32 tcg_res = tcg_temp_new_i32();
9440            NeonGenTwoOpFn *genfn = NULL;
9441            NeonGenTwoOpEnvFn *genenvfn = NULL;
9442
9443            read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9444            read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9445
9446            switch (opcode) {
9447            case 0x0: /* SHADD, UHADD */
9448            {
9449                static NeonGenTwoOpFn * const fns[3][2] = {
9450                    { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
9451                    { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
9452                    { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
9453                };
9454                genfn = fns[size][u];
9455                break;
9456            }
9457            case 0x1: /* SQADD, UQADD */
9458            {
9459                static NeonGenTwoOpEnvFn * const fns[3][2] = {
9460                    { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9461                    { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9462                    { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9463                };
9464                genenvfn = fns[size][u];
9465                break;
9466            }
9467            case 0x2: /* SRHADD, URHADD */
9468            {
9469                static NeonGenTwoOpFn * const fns[3][2] = {
9470                    { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
9471                    { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
9472                    { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
9473                };
9474                genfn = fns[size][u];
9475                break;
9476            }
9477            case 0x4: /* SHSUB, UHSUB */
9478            {
9479                static NeonGenTwoOpFn * const fns[3][2] = {
9480                    { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
9481                    { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
9482                    { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
9483                };
9484                genfn = fns[size][u];
9485                break;
9486            }
9487            case 0x5: /* SQSUB, UQSUB */
9488            {
9489                static NeonGenTwoOpEnvFn * const fns[3][2] = {
9490                    { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9491                    { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9492                    { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9493                };
9494                genenvfn = fns[size][u];
9495                break;
9496            }
9497            case 0x6: /* CMGT, CMHI */
9498            {
9499                static NeonGenTwoOpFn * const fns[3][2] = {
9500                    { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 },
9501                    { gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 },
9502                    { gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 },
9503                };
9504                genfn = fns[size][u];
9505                break;
9506            }
9507            case 0x7: /* CMGE, CMHS */
9508            {
9509                static NeonGenTwoOpFn * const fns[3][2] = {
9510                    { gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 },
9511                    { gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 },
9512                    { gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 },
9513                };
9514                genfn = fns[size][u];
9515                break;
9516            }
9517            case 0x8: /* SSHL, USHL */
9518            {
9519                static NeonGenTwoOpFn * const fns[3][2] = {
9520                    { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
9521                    { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
9522                    { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
9523                };
9524                genfn = fns[size][u];
9525                break;
9526            }
9527            case 0x9: /* SQSHL, UQSHL */
9528            {
9529                static NeonGenTwoOpEnvFn * const fns[3][2] = {
9530                    { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9531                    { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9532                    { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9533                };
9534                genenvfn = fns[size][u];
9535                break;
9536            }
9537            case 0xa: /* SRSHL, URSHL */
9538            {
9539                static NeonGenTwoOpFn * const fns[3][2] = {
9540                    { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
9541                    { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
9542                    { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
9543                };
9544                genfn = fns[size][u];
9545                break;
9546            }
9547            case 0xb: /* SQRSHL, UQRSHL */
9548            {
9549                static NeonGenTwoOpEnvFn * const fns[3][2] = {
9550                    { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9551                    { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9552                    { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9553                };
9554                genenvfn = fns[size][u];
9555                break;
9556            }
9557            case 0xc: /* SMAX, UMAX */
9558            {
9559                static NeonGenTwoOpFn * const fns[3][2] = {
9560                    { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
9561                    { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
9562                    { gen_max_s32, gen_max_u32 },
9563                };
9564                genfn = fns[size][u];
9565                break;
9566            }
9567
9568            case 0xd: /* SMIN, UMIN */
9569            {
9570                static NeonGenTwoOpFn * const fns[3][2] = {
9571                    { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
9572                    { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
9573                    { gen_min_s32, gen_min_u32 },
9574                };
9575                genfn = fns[size][u];
9576                break;
9577            }
9578            case 0xe: /* SABD, UABD */
9579            case 0xf: /* SABA, UABA */
9580            {
9581                static NeonGenTwoOpFn * const fns[3][2] = {
9582                    { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
9583                    { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
9584                    { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
9585                };
9586                genfn = fns[size][u];
9587                break;
9588            }
9589            case 0x10: /* ADD, SUB */
9590            {
9591                static NeonGenTwoOpFn * const fns[3][2] = {
9592                    { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
9593                    { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
9594                    { tcg_gen_add_i32, tcg_gen_sub_i32 },
9595                };
9596                genfn = fns[size][u];
9597                break;
9598            }
9599            case 0x11: /* CMTST, CMEQ */
9600            {
9601                static NeonGenTwoOpFn * const fns[3][2] = {
9602                    { gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 },
9603                    { gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 },
9604                    { gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 },
9605                };
9606                genfn = fns[size][u];
9607                break;
9608            }
9609            case 0x13: /* MUL, PMUL */
9610                if (u) {
9611                    /* PMUL */
9612                    assert(size == 0);
9613                    genfn = gen_helper_neon_mul_p8;
9614                    break;
9615                }
9616                /* fall through : MUL */
9617            case 0x12: /* MLA, MLS */
9618            {
9619                static NeonGenTwoOpFn * const fns[3] = {
9620                    gen_helper_neon_mul_u8,
9621                    gen_helper_neon_mul_u16,
9622                    tcg_gen_mul_i32,
9623                };
9624                genfn = fns[size];
9625                break;
9626            }
9627            case 0x16: /* SQDMULH, SQRDMULH */
9628            {
9629                static NeonGenTwoOpEnvFn * const fns[2][2] = {
9630                    { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9631                    { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9632                };
9633                assert(size == 1 || size == 2);
9634                genenvfn = fns[size - 1][u];
9635                break;
9636            }
9637            default:
9638                g_assert_not_reached();
9639            }
9640
9641            if (genenvfn) {
9642                genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
9643            } else {
9644                genfn(tcg_res, tcg_op1, tcg_op2);
9645            }
9646
9647            if (opcode == 0xf || opcode == 0x12) {
9648                /* SABA, UABA, MLA, MLS: accumulating ops */
9649                static NeonGenTwoOpFn * const fns[3][2] = {
9650                    { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
9651                    { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
9652                    { tcg_gen_add_i32, tcg_gen_sub_i32 },
9653                };
9654                bool is_sub = (opcode == 0x12 && u); /* MLS */
9655
9656                genfn = fns[size][is_sub];
9657                read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
9658                genfn(tcg_res, tcg_op1, tcg_res);
9659            }
9660
9661            write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9662
9663            tcg_temp_free_i32(tcg_res);
9664            tcg_temp_free_i32(tcg_op1);
9665            tcg_temp_free_i32(tcg_op2);
9666        }
9667    }
9668
9669    if (!is_q) {
9670        clear_vec_high(s, rd);
9671    }
9672}
9673
9674/* C3.6.16 AdvSIMD three same
9675 *  31  30  29  28       24 23  22  21 20  16 15    11  10 9    5 4    0
9676 * +---+---+---+-----------+------+---+------+--------+---+------+------+
9677 * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
9678 * +---+---+---+-----------+------+---+------+--------+---+------+------+
9679 */
9680static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
9681{
9682    int opcode = extract32(insn, 11, 5);
9683
9684    switch (opcode) {
9685    case 0x3: /* logic ops */
9686        disas_simd_3same_logic(s, insn);
9687        break;
9688    case 0x17: /* ADDP */
9689    case 0x14: /* SMAXP, UMAXP */
9690    case 0x15: /* SMINP, UMINP */
9691    {
9692        /* Pairwise operations */
9693        int is_q = extract32(insn, 30, 1);
9694        int u = extract32(insn, 29, 1);
9695        int size = extract32(insn, 22, 2);
9696        int rm = extract32(insn, 16, 5);
9697        int rn = extract32(insn, 5, 5);
9698        int rd = extract32(insn, 0, 5);
9699        if (opcode == 0x17) {
9700            if (u || (size == 3 && !is_q)) {
9701                unallocated_encoding(s);
9702                return;
9703            }
9704        } else {
9705            if (size == 3) {
9706                unallocated_encoding(s);
9707                return;
9708            }
9709        }
9710        handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
9711        break;
9712    }
9713    case 0x18 ... 0x31:
9714        /* floating point ops, sz[1] and U are part of opcode */
9715        disas_simd_3same_float(s, insn);
9716        break;
9717    default:
9718        disas_simd_3same_int(s, insn);
9719        break;
9720    }
9721}
9722
9723static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
9724                                  int size, int rn, int rd)
9725{
9726    /* Handle 2-reg-misc ops which are widening (so each size element
9727     * in the source becomes a 2*size element in the destination.
9728     * The only instruction like this is FCVTL.
9729     */
9730    int pass;
9731
9732    if (size == 3) {
9733        /* 32 -> 64 bit fp conversion */
9734        TCGv_i64 tcg_res[2];
9735        int srcelt = is_q ? 2 : 0;
9736
9737        for (pass = 0; pass < 2; pass++) {
9738            TCGv_i32 tcg_op = tcg_temp_new_i32();
9739            tcg_res[pass] = tcg_temp_new_i64();
9740
9741            read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
9742            gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
9743            tcg_temp_free_i32(tcg_op);
9744        }
9745        for (pass = 0; pass < 2; pass++) {
9746            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9747            tcg_temp_free_i64(tcg_res[pass]);
9748        }
9749    } else {
9750        /* 16 -> 32 bit fp conversion */
9751        int srcelt = is_q ? 4 : 0;
9752        TCGv_i32 tcg_res[4];
9753
9754        for (pass = 0; pass < 4; pass++) {
9755            tcg_res[pass] = tcg_temp_new_i32();
9756
9757            read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
9758            gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
9759                                           cpu_env);
9760        }
9761        for (pass = 0; pass < 4; pass++) {
9762            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
9763            tcg_temp_free_i32(tcg_res[pass]);
9764        }
9765    }
9766}
9767
9768static void handle_rev(DisasContext *s, int opcode, bool u,
9769                       bool is_q, int size, int rn, int rd)
9770{
9771    int op = (opcode << 1) | u;
9772    int opsz = op + size;
9773    int grp_size = 3 - opsz;
9774    int dsize = is_q ? 128 : 64;
9775    int i;
9776
9777    if (opsz >= 3) {
9778        unallocated_encoding(s);
9779        return;
9780    }
9781
9782    if (!fp_access_check(s)) {
9783        return;
9784    }
9785
9786    if (size == 0) {
9787        /* Special case bytes, use bswap op on each group of elements */
9788        int groups = dsize / (8 << grp_size);
9789
9790        for (i = 0; i < groups; i++) {
9791            TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9792
9793            read_vec_element(s, tcg_tmp, rn, i, grp_size);
9794            switch (grp_size) {
9795            case MO_16:
9796                tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
9797                break;
9798            case MO_32:
9799                tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
9800                break;
9801            case MO_64:
9802                tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
9803                break;
9804            default:
9805                g_assert_not_reached();
9806            }
9807            write_vec_element(s, tcg_tmp, rd, i, grp_size);
9808            tcg_temp_free_i64(tcg_tmp);
9809        }
9810        if (!is_q) {
9811            clear_vec_high(s, rd);
9812        }
9813    } else {
9814        int revmask = (1 << grp_size) - 1;
9815        int esize = 8 << size;
9816        int elements = dsize / esize;
9817        TCGv_i64 tcg_rn = tcg_temp_new_i64();
9818        TCGv_i64 tcg_rd = tcg_const_i64(0);
9819        TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
9820
9821        for (i = 0; i < elements; i++) {
9822            int e_rev = (i & 0xf) ^ revmask;
9823            int off = e_rev * esize;
9824            read_vec_element(s, tcg_rn, rn, i, size);
9825            if (off >= 64) {
9826                tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
9827                                    tcg_rn, off - 64, esize);
9828            } else {
9829                tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
9830            }
9831        }
9832        write_vec_element(s, tcg_rd, rd, 0, MO_64);
9833        write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
9834
9835        tcg_temp_free_i64(tcg_rd_hi);
9836        tcg_temp_free_i64(tcg_rd);
9837        tcg_temp_free_i64(tcg_rn);
9838    }
9839}
9840
9841static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
9842                                  bool is_q, int size, int rn, int rd)
9843{
9844    /* Implement the pairwise operations from 2-misc:
9845     * SADDLP, UADDLP, SADALP, UADALP.
9846     * These all add pairs of elements in the input to produce a
9847     * double-width result element in the output (possibly accumulating).
9848     */
9849    bool accum = (opcode == 0x6);
9850    int maxpass = is_q ? 2 : 1;
9851    int pass;
9852    TCGv_i64 tcg_res[2];
9853
9854    if (size == 2) {
9855        /* 32 + 32 -> 64 op */
9856        TCGMemOp memop = size + (u ? 0 : MO_SIGN);
9857
9858        for (pass = 0; pass < maxpass; pass++) {
9859            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9860            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9861
9862            tcg_res[pass] = tcg_temp_new_i64();
9863
9864            read_vec_element(s, tcg_op1, rn, pass * 2, memop);
9865            read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
9866            tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
9867            if (accum) {
9868                read_vec_element(s, tcg_op1, rd, pass, MO_64);
9869                tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
9870            }
9871
9872            tcg_temp_free_i64(tcg_op1);
9873            tcg_temp_free_i64(tcg_op2);
9874        }
9875    } else {
9876        for (pass = 0; pass < maxpass; pass++) {
9877            TCGv_i64 tcg_op = tcg_temp_new_i64();
9878            NeonGenOneOpFn *genfn;
9879            static NeonGenOneOpFn * const fns[2][2] = {
9880                { gen_helper_neon_addlp_s8,  gen_helper_neon_addlp_u8 },
9881                { gen_helper_neon_addlp_s16,  gen_helper_neon_addlp_u16 },
9882            };
9883
9884            genfn = fns[size][u];
9885
9886            tcg_res[pass] = tcg_temp_new_i64();
9887
9888            read_vec_element(s, tcg_op, rn, pass, MO_64);
9889            genfn(tcg_res[pass], tcg_op);
9890
9891            if (accum) {
9892                read_vec_element(s, tcg_op, rd, pass, MO_64);
9893                if (size == 0) {
9894                    gen_helper_neon_addl_u16(tcg_res[pass],
9895                                             tcg_res[pass], tcg_op);
9896                } else {
9897                    gen_helper_neon_addl_u32(tcg_res[pass],
9898                                             tcg_res[pass], tcg_op);
9899                }
9900            }
9901            tcg_temp_free_i64(tcg_op);
9902        }
9903    }
9904    if (!is_q) {
9905        tcg_res[1] = tcg_const_i64(0);
9906    }
9907    for (pass = 0; pass < 2; pass++) {
9908        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9909        tcg_temp_free_i64(tcg_res[pass]);
9910    }
9911}
9912
9913static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
9914{
9915    /* Implement SHLL and SHLL2 */
9916    int pass;
9917    int part = is_q ? 2 : 0;
9918    TCGv_i64 tcg_res[2];
9919
9920    for (pass = 0; pass < 2; pass++) {
9921        static NeonGenWidenFn * const widenfns[3] = {
9922            gen_helper_neon_widen_u8,
9923            gen_helper_neon_widen_u16,
9924            tcg_gen_extu_i32_i64,
9925        };
9926        NeonGenWidenFn *widenfn = widenfns[size];
9927        TCGv_i32 tcg_op = tcg_temp_new_i32();
9928
9929        read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
9930        tcg_res[pass] = tcg_temp_new_i64();
9931        widenfn(tcg_res[pass], tcg_op);
9932        tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
9933
9934        tcg_temp_free_i32(tcg_op);
9935    }
9936
9937    for (pass = 0; pass < 2; pass++) {
9938        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9939        tcg_temp_free_i64(tcg_res[pass]);
9940    }
9941}
9942
9943/* C3.6.17 AdvSIMD two reg misc
9944 *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
9945 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
9946 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
9947 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
9948 */
9949static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
9950{
9951    int size = extract32(insn, 22, 2);
9952    int opcode = extract32(insn, 12, 5);
9953    bool u = extract32(insn, 29, 1);
9954    bool is_q = extract32(insn, 30, 1);
9955    int rn = extract32(insn, 5, 5);
9956    int rd = extract32(insn, 0, 5);
9957    bool need_fpstatus = false;
9958    bool need_rmode = false;
9959    int rmode = -1;
9960    TCGv_i32 tcg_rmode;
9961    TCGv_ptr tcg_fpstatus;
9962
9963    switch (opcode) {
9964    case 0x0: /* REV64, REV32 */
9965    case 0x1: /* REV16 */
9966        handle_rev(s, opcode, u, is_q, size, rn, rd);
9967        return;
9968    case 0x5: /* CNT, NOT, RBIT */
9969        if (u && size == 0) {
9970            /* NOT: adjust size so we can use the 64-bits-at-a-time loop. */
9971            size = 3;
9972            break;
9973        } else if (u && size == 1) {
9974            /* RBIT */
9975            break;
9976        } else if (!u && size == 0) {
9977            /* CNT */
9978            break;
9979        }
9980        unallocated_encoding(s);
9981        return;
9982    case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
9983    case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
9984        if (size == 3) {
9985            unallocated_encoding(s);
9986            return;
9987        }
9988        if (!fp_access_check(s)) {
9989            return;
9990        }
9991
9992        handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
9993        return;
9994    case 0x4: /* CLS, CLZ */
9995        if (size == 3) {
9996            unallocated_encoding(s);
9997            return;
9998        }
9999        break;
10000    case 0x2: /* SADDLP, UADDLP */
10001    case 0x6: /* SADALP, UADALP */
10002        if (size == 3) {
10003            unallocated_encoding(s);
10004            return;
10005        }
10006        if (!fp_access_check(s)) {
10007            return;
10008        }
10009        handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
10010        return;
10011    case 0x13: /* SHLL, SHLL2 */
10012        if (u == 0 || size == 3) {
10013            unallocated_encoding(s);
10014            return;
10015        }
10016        if (!fp_access_check(s)) {
10017            return;
10018        }
10019        handle_shll(s, is_q, size, rn, rd);
10020        return;
10021    case 0xa: /* CMLT */
10022        if (u == 1) {
10023            unallocated_encoding(s);
10024            return;
10025        }
10026        /* fall through */
10027    case 0x8: /* CMGT, CMGE */
10028    case 0x9: /* CMEQ, CMLE */
10029    case 0xb: /* ABS, NEG */
10030        if (size == 3 && !is_q) {
10031            unallocated_encoding(s);
10032            return;
10033        }
10034        break;
10035    case 0x3: /* SUQADD, USQADD */
10036        if (size == 3 && !is_q) {
10037            unallocated_encoding(s);
10038            return;
10039        }
10040        if (!fp_access_check(s)) {
10041            return;
10042        }
10043        handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
10044        return;
10045    case 0x7: /* SQABS, SQNEG */
10046        if (size == 3 && !is_q) {
10047            unallocated_encoding(s);
10048            return;
10049        }
10050        break;
10051    case 0xc ... 0xf:
10052    case 0x16 ... 0x1d:
10053    case 0x1f:
10054    {
10055        /* Floating point: U, size[1] and opcode indicate operation;
10056         * size[0] indicates single or double precision.
10057         */
10058        int is_double = extract32(size, 0, 1);
10059        opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10060        size = is_double ? 3 : 2;
10061        switch (opcode) {
10062        case 0x2f: /* FABS */
10063        case 0x6f: /* FNEG */
10064            if (size == 3 && !is_q) {
10065                unallocated_encoding(s);
10066                return;
10067            }
10068            break;
10069        case 0x1d: /* SCVTF */
10070        case 0x5d: /* UCVTF */
10071        {
10072            bool is_signed = (opcode == 0x1d) ? true : false;
10073            int elements = is_double ? 2 : is_q ? 4 : 2;
10074            if (is_double && !is_q) {
10075                unallocated_encoding(s);
10076                return;
10077            }
10078            if (!fp_access_check(s)) {
10079                return;
10080            }
10081            handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
10082            return;
10083        }
10084        case 0x2c: /* FCMGT (zero) */
10085        case 0x2d: /* FCMEQ (zero) */
10086        case 0x2e: /* FCMLT (zero) */
10087        case 0x6c: /* FCMGE (zero) */
10088        case 0x6d: /* FCMLE (zero) */
10089            if (size == 3 && !is_q) {
10090                unallocated_encoding(s);
10091                return;
10092            }
10093            handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
10094            return;
10095        case 0x7f: /* FSQRT */
10096            if (size == 3 && !is_q) {
10097                unallocated_encoding(s);
10098                return;
10099            }
10100            break;
10101        case 0x1a: /* FCVTNS */
10102        case 0x1b: /* FCVTMS */
10103        case 0x3a: /* FCVTPS */
10104        case 0x3b: /* FCVTZS */
10105        case 0x5a: /* FCVTNU */
10106        case 0x5b: /* FCVTMU */
10107        case 0x7a: /* FCVTPU */
10108        case 0x7b: /* FCVTZU */
10109            need_fpstatus = true;
10110            need_rmode = true;
10111            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10112            if (size == 3 && !is_q) {
10113                unallocated_encoding(s);
10114                return;
10115            }
10116            break;
10117        case 0x5c: /* FCVTAU */
10118        case 0x1c: /* FCVTAS */
10119            need_fpstatus = true;
10120            need_rmode = true;
10121            rmode = FPROUNDING_TIEAWAY;
10122            if (size == 3 && !is_q) {
10123                unallocated_encoding(s);
10124                return;
10125            }
10126            break;
10127        case 0x3c: /* URECPE */
10128            if (size == 3) {
10129                unallocated_encoding(s);
10130                return;
10131            }
10132            /* fall through */
10133        case 0x3d: /* FRECPE */
10134        case 0x7d: /* FRSQRTE */
10135            if (size == 3 && !is_q) {
10136                unallocated_encoding(s);
10137                return;
10138            }
10139            if (!fp_access_check(s)) {
10140                return;
10141            }
10142            handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
10143            return;
10144        case 0x56: /* FCVTXN, FCVTXN2 */
10145            if (size == 2) {
10146                unallocated_encoding(s);
10147                return;
10148            }
10149            /* fall through */
10150        case 0x16: /* FCVTN, FCVTN2 */
10151            /* handle_2misc_narrow does a 2*size -> size operation, but these
10152             * instructions encode the source size rather than dest size.
10153             */
10154            if (!fp_access_check(s)) {
10155                return;
10156            }
10157            handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
10158            return;
10159        case 0x17: /* FCVTL, FCVTL2 */
10160            if (!fp_access_check(s)) {
10161                return;
10162            }
10163            handle_2misc_widening(s, opcode, is_q, size, rn, rd);
10164            return;
10165        case 0x18: /* FRINTN */
10166        case 0x19: /* FRINTM */
10167        case 0x38: /* FRINTP */
10168        case 0x39: /* FRINTZ */
10169            need_rmode = true;
10170            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10171            /* fall through */
10172        case 0x59: /* FRINTX */
10173        case 0x79: /* FRINTI */
10174            need_fpstatus = true;
10175            if (size == 3 && !is_q) {
10176                unallocated_encoding(s);
10177                return;
10178            }
10179            break;
10180        case 0x58: /* FRINTA */
10181            need_rmode = true;
10182            rmode = FPROUNDING_TIEAWAY;
10183            need_fpstatus = true;
10184            if (size == 3 && !is_q) {
10185                unallocated_encoding(s);
10186                return;
10187            }
10188            break;
10189        case 0x7c: /* URSQRTE */
10190            if (size == 3) {
10191                unallocated_encoding(s);
10192                return;
10193            }
10194            need_fpstatus = true;
10195            break;
10196        default:
10197            unallocated_encoding(s);
10198            return;
10199        }
10200        break;
10201    }
10202    default:
10203        unallocated_encoding(s);
10204        return;
10205    }
10206
10207    if (!fp_access_check(s)) {
10208        return;
10209    }
10210
10211    if (need_fpstatus) {
10212        tcg_fpstatus = get_fpstatus_ptr();
10213    } else {
10214        TCGV_UNUSED_PTR(tcg_fpstatus);
10215    }
10216    if (need_rmode) {
10217        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10218        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
10219    } else {
10220        TCGV_UNUSED_I32(tcg_rmode);
10221    }
10222
10223    if (size == 3) {
10224        /* All 64-bit element operations can be shared with scalar 2misc */
10225        int pass;
10226
10227        for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
10228            TCGv_i64 tcg_op = tcg_temp_new_i64();
10229            TCGv_i64 tcg_res = tcg_temp_new_i64();
10230
10231            read_vec_element(s, tcg_op, rn, pass, MO_64);
10232
10233            handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
10234                            tcg_rmode, tcg_fpstatus);
10235
10236            write_vec_element(s, tcg_res, rd, pass, MO_64);
10237
10238            tcg_temp_free_i64(tcg_res);
10239            tcg_temp_free_i64(tcg_op);
10240        }
10241    } else {
10242        int pass;
10243
10244        for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10245            TCGv_i32 tcg_op = tcg_temp_new_i32();
10246            TCGv_i32 tcg_res = tcg_temp_new_i32();
10247            TCGCond cond;
10248
10249            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10250
10251            if (size == 2) {
10252                /* Special cases for 32 bit elements */
10253                switch (opcode) {
10254                case 0xa: /* CMLT */
10255                    /* 32 bit integer comparison against zero, result is
10256                     * test ? (2^32 - 1) : 0. We implement via setcond(test)
10257                     * and inverting.
10258                     */
10259                    cond = TCG_COND_LT;
10260                do_cmop:
10261                    tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
10262                    tcg_gen_neg_i32(tcg_res, tcg_res);
10263                    break;
10264                case 0x8: /* CMGT, CMGE */
10265                    cond = u ? TCG_COND_GE : TCG_COND_GT;
10266                    goto do_cmop;
10267                case 0x9: /* CMEQ, CMLE */
10268                    cond = u ? TCG_COND_LE : TCG_COND_EQ;
10269                    goto do_cmop;
10270                case 0x4: /* CLS */
10271                    if (u) {
10272                        tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
10273                    } else {
10274                        tcg_gen_clrsb_i32(tcg_res, tcg_op);
10275                    }
10276                    break;
10277                case 0x7: /* SQABS, SQNEG */
10278                    if (u) {
10279                        gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
10280                    } else {
10281                        gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
10282                    }
10283                    break;
10284                case 0xb: /* ABS, NEG */
10285                    if (u) {
10286                        tcg_gen_neg_i32(tcg_res, tcg_op);
10287                    } else {
10288                        TCGv_i32 tcg_zero = tcg_const_i32(0);
10289                        tcg_gen_neg_i32(tcg_res, tcg_op);
10290                        tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
10291                                            tcg_zero, tcg_op, tcg_res);
10292                        tcg_temp_free_i32(tcg_zero);
10293                    }
10294                    break;
10295                case 0x2f: /* FABS */
10296                    gen_helper_vfp_abss(tcg_res, tcg_op);
10297                    break;
10298                case 0x6f: /* FNEG */
10299                    gen_helper_vfp_negs(tcg_res, tcg_op);
10300                    break;
10301                case 0x7f: /* FSQRT */
10302                    gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
10303                    break;
10304                case 0x1a: /* FCVTNS */
10305                case 0x1b: /* FCVTMS */
10306                case 0x1c: /* FCVTAS */
10307                case 0x3a: /* FCVTPS */
10308                case 0x3b: /* FCVTZS */
10309                {
10310                    TCGv_i32 tcg_shift = tcg_const_i32(0);
10311                    gen_helper_vfp_tosls(tcg_res, tcg_op,
10312                                         tcg_shift, tcg_fpstatus);
10313                    tcg_temp_free_i32(tcg_shift);
10314                    break;
10315                }
10316                case 0x5a: /* FCVTNU */
10317                case 0x5b: /* FCVTMU */
10318                case 0x5c: /* FCVTAU */
10319                case 0x7a: /* FCVTPU */
10320                case 0x7b: /* FCVTZU */
10321                {
10322                    TCGv_i32 tcg_shift = tcg_const_i32(0);
10323                    gen_helper_vfp_touls(tcg_res, tcg_op,
10324                                         tcg_shift, tcg_fpstatus);
10325                    tcg_temp_free_i32(tcg_shift);
10326                    break;
10327                }
10328                case 0x18: /* FRINTN */
10329                case 0x19: /* FRINTM */
10330                case 0x38: /* FRINTP */
10331                case 0x39: /* FRINTZ */
10332                case 0x58: /* FRINTA */
10333                case 0x79: /* FRINTI */
10334                    gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
10335                    break;
10336                case 0x59: /* FRINTX */
10337                    gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
10338                    break;
10339                case 0x7c: /* URSQRTE */
10340                    gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
10341                    break;
10342                default:
10343                    g_assert_not_reached();
10344                }
10345            } else {
10346                /* Use helpers for 8 and 16 bit elements */
10347                switch (opcode) {
10348                case 0x5: /* CNT, RBIT */
10349                    /* For these two insns size is part of the opcode specifier
10350                     * (handled earlier); they always operate on byte elements.
10351                     */
10352                    if (u) {
10353                        gen_helper_neon_rbit_u8(tcg_res, tcg_op);
10354                    } else {
10355                        gen_helper_neon_cnt_u8(tcg_res, tcg_op);
10356                    }
10357                    break;
10358                case 0x7: /* SQABS, SQNEG */
10359                {
10360                    NeonGenOneOpEnvFn *genfn;
10361                    static NeonGenOneOpEnvFn * const fns[2][2] = {
10362                        { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10363                        { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10364                    };
10365                    genfn = fns[size][u];
10366                    genfn(tcg_res, cpu_env, tcg_op);
10367                    break;
10368                }
10369                case 0x8: /* CMGT, CMGE */
10370                case 0x9: /* CMEQ, CMLE */
10371                case 0xa: /* CMLT */
10372                {
10373                    static NeonGenTwoOpFn * const fns[3][2] = {
10374                        { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
10375                        { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
10376                        { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
10377                    };
10378                    NeonGenTwoOpFn *genfn;
10379                    int comp;
10380                    bool reverse;
10381                    TCGv_i32 tcg_zero = tcg_const_i32(0);
10382
10383                    /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
10384                    comp = (opcode - 0x8) * 2 + u;
10385                    /* ...but LE, LT are implemented as reverse GE, GT */
10386                    reverse = (comp > 2);
10387                    if (reverse) {
10388                        comp = 4 - comp;
10389                    }
10390                    genfn = fns[comp][size];
10391                    if (reverse) {
10392                        genfn(tcg_res, tcg_zero, tcg_op);
10393                    } else {
10394                        genfn(tcg_res, tcg_op, tcg_zero);
10395                    }
10396                    tcg_temp_free_i32(tcg_zero);
10397                    break;
10398                }
10399                case 0xb: /* ABS, NEG */
10400                    if (u) {
10401                        TCGv_i32 tcg_zero = tcg_const_i32(0);
10402                        if (size) {
10403                            gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
10404                        } else {
10405                            gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
10406                        }
10407                        tcg_temp_free_i32(tcg_zero);
10408                    } else {
10409                        if (size) {
10410                            gen_helper_neon_abs_s16(tcg_res, tcg_op);
10411                        } else {
10412                            gen_helper_neon_abs_s8(tcg_res, tcg_op);
10413                        }
10414                    }
10415                    break;
10416                case 0x4: /* CLS, CLZ */
10417                    if (u) {
10418                        if (size == 0) {
10419                            gen_helper_neon_clz_u8(tcg_res, tcg_op);
10420                        } else {
10421                            gen_helper_neon_clz_u16(tcg_res, tcg_op);
10422                        }
10423                    } else {
10424                        if (size == 0) {
10425                            gen_helper_neon_cls_s8(tcg_res, tcg_op);
10426                        } else {
10427                            gen_helper_neon_cls_s16(tcg_res, tcg_op);
10428                        }
10429                    }
10430                    break;
10431                default:
10432                    g_assert_not_reached();
10433                }
10434            }
10435
10436            write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10437
10438            tcg_temp_free_i32(tcg_res);
10439            tcg_temp_free_i32(tcg_op);
10440        }
10441    }
10442    if (!is_q) {
10443        clear_vec_high(s, rd);
10444    }
10445
10446    if (need_rmode) {
10447        gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
10448        tcg_temp_free_i32(tcg_rmode);
10449    }
10450    if (need_fpstatus) {
10451        tcg_temp_free_ptr(tcg_fpstatus);
10452    }
10453}
10454
10455/* C3.6.13 AdvSIMD scalar x indexed element
10456 *  31 30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
10457 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
10458 * | 0 1 | U | 1 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
10459 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
10460 * C3.6.18 AdvSIMD vector x indexed element
10461 *   31  30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
10462 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
10463 * | 0 | Q | U | 0 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
10464 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
10465 */
10466static void disas_simd_indexed(DisasContext *s, uint32_t insn)
10467{
10468    /* This encoding has two kinds of instruction:
10469     *  normal, where we perform elt x idxelt => elt for each
10470     *     element in the vector
10471     *  long, where we perform elt x idxelt and generate a result of
10472     *     double the width of the input element
10473     * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
10474     */
10475    bool is_scalar = extract32(insn, 28, 1);
10476    bool is_q = extract32(insn, 30, 1);
10477    bool u = extract32(insn, 29, 1);
10478    int size = extract32(insn, 22, 2);
10479    int l = extract32(insn, 21, 1);
10480    int m = extract32(insn, 20, 1);
10481    /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
10482    int rm = extract32(insn, 16, 4);
10483    int opcode = extract32(insn, 12, 4);
10484    int h = extract32(insn, 11, 1);
10485    int rn = extract32(insn, 5, 5);
10486    int rd = extract32(insn, 0, 5);
10487    bool is_long = false;
10488    bool is_fp = false;
10489    int index;
10490    TCGv_ptr fpst;
10491
10492    switch (opcode) {
10493    case 0x0: /* MLA */
10494    case 0x4: /* MLS */
10495        if (!u || is_scalar) {
10496            unallocated_encoding(s);
10497            return;
10498        }
10499        break;
10500    case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10501    case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10502    case 0xa: /* SMULL, SMULL2, UMULL, UMULL2 */
10503        if (is_scalar) {
10504            unallocated_encoding(s);
10505            return;
10506        }
10507        is_long = true;
10508        break;
10509    case 0x3: /* SQDMLAL, SQDMLAL2 */
10510    case 0x7: /* SQDMLSL, SQDMLSL2 */
10511    case 0xb: /* SQDMULL, SQDMULL2 */
10512        is_long = true;
10513        /* fall through */
10514    case 0xc: /* SQDMULH */
10515    case 0xd: /* SQRDMULH */
10516        if (u) {
10517            unallocated_encoding(s);
10518            return;
10519        }
10520        break;
10521    case 0x8: /* MUL */
10522        if (u || is_scalar) {
10523            unallocated_encoding(s);
10524            return;
10525        }
10526        break;
10527    case 0x1: /* FMLA */
10528    case 0x5: /* FMLS */
10529        if (u) {
10530            unallocated_encoding(s);
10531            return;
10532        }
10533        /* fall through */
10534    case 0x9: /* FMUL, FMULX */
10535        if (!extract32(size, 1, 1)) {
10536            unallocated_encoding(s);
10537            return;
10538        }
10539        is_fp = true;
10540        break;
10541    default:
10542        unallocated_encoding(s);
10543        return;
10544    }
10545
10546    if (is_fp) {
10547        /* low bit of size indicates single/double */
10548        size = extract32(size, 0, 1) ? 3 : 2;
10549        if (size == 2) {
10550            index = h << 1 | l;
10551        } else {
10552            if (l || !is_q) {
10553                unallocated_encoding(s);
10554                return;
10555            }
10556            index = h;
10557        }
10558        rm |= (m << 4);
10559    } else {
10560        switch (size) {
10561        case 1:
10562            index = h << 2 | l << 1 | m;
10563            break;
10564        case 2:
10565            index = h << 1 | l;
10566            rm |= (m << 4);
10567            break;
10568        default:
10569            unallocated_encoding(s);
10570            return;
10571        }
10572    }
10573
10574    if (!fp_access_check(s)) {
10575        return;
10576    }
10577
10578    if (is_fp) {
10579        fpst = get_fpstatus_ptr();
10580    } else {
10581        TCGV_UNUSED_PTR(fpst);
10582    }
10583
10584    if (size == 3) {
10585        TCGv_i64 tcg_idx = tcg_temp_new_i64();
10586        int pass;
10587
10588        assert(is_fp && is_q && !is_long);
10589
10590        read_vec_element(s, tcg_idx, rm, index, MO_64);
10591
10592        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10593            TCGv_i64 tcg_op = tcg_temp_new_i64();
10594            TCGv_i64 tcg_res = tcg_temp_new_i64();
10595
10596            read_vec_element(s, tcg_op, rn, pass, MO_64);
10597
10598            switch (opcode) {
10599            case 0x5: /* FMLS */
10600                /* As usual for ARM, separate negation for fused multiply-add */
10601                gen_helper_vfp_negd(tcg_op, tcg_op);
10602                /* fall through */
10603            case 0x1: /* FMLA */
10604                read_vec_element(s, tcg_res, rd, pass, MO_64);
10605                gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
10606                break;
10607            case 0x9: /* FMUL, FMULX */
10608                if (u) {
10609                    gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
10610                } else {
10611                    gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
10612                }
10613                break;
10614            default:
10615                g_assert_not_reached();
10616            }
10617
10618            write_vec_element(s, tcg_res, rd, pass, MO_64);
10619            tcg_temp_free_i64(tcg_op);
10620            tcg_temp_free_i64(tcg_res);
10621        }
10622
10623        if (is_scalar) {
10624            clear_vec_high(s, rd);
10625        }
10626
10627        tcg_temp_free_i64(tcg_idx);
10628    } else if (!is_long) {
10629        /* 32 bit floating point, or 16 or 32 bit integer.
10630         * For the 16 bit scalar case we use the usual Neon helpers and
10631         * rely on the fact that 0 op 0 == 0 with no side effects.
10632         */
10633        TCGv_i32 tcg_idx = tcg_temp_new_i32();
10634        int pass, maxpasses;
10635
10636        if (is_scalar) {
10637            maxpasses = 1;
10638        } else {
10639            maxpasses = is_q ? 4 : 2;
10640        }
10641
10642        read_vec_element_i32(s, tcg_idx, rm, index, size);
10643
10644        if (size == 1 && !is_scalar) {
10645            /* The simplest way to handle the 16x16 indexed ops is to duplicate
10646             * the index into both halves of the 32 bit tcg_idx and then use
10647             * the usual Neon helpers.
10648             */
10649            tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
10650        }
10651
10652        for (pass = 0; pass < maxpasses; pass++) {
10653            TCGv_i32 tcg_op = tcg_temp_new_i32();
10654            TCGv_i32 tcg_res = tcg_temp_new_i32();
10655
10656            read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
10657
10658            switch (opcode) {
10659            case 0x0: /* MLA */
10660            case 0x4: /* MLS */
10661            case 0x8: /* MUL */
10662            {
10663                static NeonGenTwoOpFn * const fns[2][2] = {
10664                    { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
10665                    { tcg_gen_add_i32, tcg_gen_sub_i32 },
10666                };
10667                NeonGenTwoOpFn *genfn;
10668                bool is_sub = opcode == 0x4;
10669
10670                if (size == 1) {
10671                    gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
10672                } else {
10673                    tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
10674                }
10675                if (opcode == 0x8) {
10676                    break;
10677                }
10678                read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
10679                genfn = fns[size - 1][is_sub];
10680                genfn(tcg_res, tcg_op, tcg_res);
10681                break;
10682            }
10683            case 0x5: /* FMLS */
10684                /* As usual for ARM, separate negation for fused multiply-add */
10685                gen_helper_vfp_negs(tcg_op, tcg_op);
10686                /* fall through */
10687            case 0x1: /* FMLA */
10688                read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10689                gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
10690                break;
10691            case 0x9: /* FMUL, FMULX */
10692                if (u) {
10693                    gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
10694                } else {
10695                    gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
10696                }
10697                break;
10698            case 0xc: /* SQDMULH */
10699                if (size == 1) {
10700                    gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
10701                                               tcg_op, tcg_idx);
10702                } else {
10703                    gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
10704                                               tcg_op, tcg_idx);
10705                }
10706                break;
10707            case 0xd: /* SQRDMULH */
10708                if (size == 1) {
10709                    gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
10710                                                tcg_op, tcg_idx);
10711                } else {
10712                    gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
10713                                                tcg_op, tcg_idx);
10714                }
10715                break;
10716            default:
10717                g_assert_not_reached();
10718            }
10719
10720            if (is_scalar) {
10721                write_fp_sreg(s, rd, tcg_res);
10722            } else {
10723                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10724            }
10725
10726            tcg_temp_free_i32(tcg_op);
10727            tcg_temp_free_i32(tcg_res);
10728        }
10729
10730        tcg_temp_free_i32(tcg_idx);
10731
10732        if (!is_q) {
10733            clear_vec_high(s, rd);
10734        }
10735    } else {
10736        /* long ops: 16x16->32 or 32x32->64 */
10737        TCGv_i64 tcg_res[2];
10738        int pass;
10739        bool satop = extract32(opcode, 0, 1);
10740        TCGMemOp memop = MO_32;
10741
10742        if (satop || !u) {
10743            memop |= MO_SIGN;
10744        }
10745
10746        if (size == 2) {
10747            TCGv_i64 tcg_idx = tcg_temp_new_i64();
10748
10749            read_vec_element(s, tcg_idx, rm, index, memop);
10750
10751            for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10752                TCGv_i64 tcg_op = tcg_temp_new_i64();
10753                TCGv_i64 tcg_passres;
10754                int passelt;
10755
10756                if (is_scalar) {
10757                    passelt = 0;
10758                } else {
10759                    passelt = pass + (is_q * 2);
10760                }
10761
10762                read_vec_element(s, tcg_op, rn, passelt, memop);
10763
10764                tcg_res[pass] = tcg_temp_new_i64();
10765
10766                if (opcode == 0xa || opcode == 0xb) {
10767                    /* Non-accumulating ops */
10768                    tcg_passres = tcg_res[pass];
10769                } else {
10770                    tcg_passres = tcg_temp_new_i64();
10771                }
10772
10773                tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
10774                tcg_temp_free_i64(tcg_op);
10775
10776                if (satop) {
10777                    /* saturating, doubling */
10778                    gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10779                                                      tcg_passres, tcg_passres);
10780                }
10781
10782                if (opcode == 0xa || opcode == 0xb) {
10783                    continue;
10784                }
10785
10786                /* Accumulating op: handle accumulate step */
10787                read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10788
10789                switch (opcode) {
10790                case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10791                    tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10792                    break;
10793                case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10794                    tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10795                    break;
10796                case 0x7: /* SQDMLSL, SQDMLSL2 */
10797                    tcg_gen_neg_i64(tcg_passres, tcg_passres);
10798                    /* fall through */
10799                case 0x3: /* SQDMLAL, SQDMLAL2 */
10800                    gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10801                                                      tcg_res[pass],
10802                                                      tcg_passres);
10803                    break;
10804                default:
10805                    g_assert_not_reached();
10806                }
10807                tcg_temp_free_i64(tcg_passres);
10808            }
10809            tcg_temp_free_i64(tcg_idx);
10810
10811            if (is_scalar) {
10812                clear_vec_high(s, rd);
10813            }
10814        } else {
10815            TCGv_i32 tcg_idx = tcg_temp_new_i32();
10816
10817            assert(size == 1);
10818            read_vec_element_i32(s, tcg_idx, rm, index, size);
10819
10820            if (!is_scalar) {
10821                /* The simplest way to handle the 16x16 indexed ops is to
10822                 * duplicate the index into both halves of the 32 bit tcg_idx
10823                 * and then use the usual Neon helpers.
10824                 */
10825                tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
10826            }
10827
10828            for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10829                TCGv_i32 tcg_op = tcg_temp_new_i32();
10830                TCGv_i64 tcg_passres;
10831
10832                if (is_scalar) {
10833                    read_vec_element_i32(s, tcg_op, rn, pass, size);
10834                } else {
10835                    read_vec_element_i32(s, tcg_op, rn,
10836                                         pass + (is_q * 2), MO_32);
10837                }
10838
10839                tcg_res[pass] = tcg_temp_new_i64();
10840
10841                if (opcode == 0xa || opcode == 0xb) {
10842                    /* Non-accumulating ops */
10843                    tcg_passres = tcg_res[pass];
10844                } else {
10845                    tcg_passres = tcg_temp_new_i64();
10846                }
10847
10848                if (memop & MO_SIGN) {
10849                    gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
10850                } else {
10851                    gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
10852                }
10853                if (satop) {
10854                    gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10855                                                      tcg_passres, tcg_passres);
10856                }
10857                tcg_temp_free_i32(tcg_op);
10858
10859                if (opcode == 0xa || opcode == 0xb) {
10860                    continue;
10861                }
10862
10863                /* Accumulating op: handle accumulate step */
10864                read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10865
10866                switch (opcode) {
10867                case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10868                    gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
10869                                             tcg_passres);
10870                    break;
10871                case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10872                    gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
10873                                             tcg_passres);
10874                    break;
10875                case 0x7: /* SQDMLSL, SQDMLSL2 */
10876                    gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10877                    /* fall through */
10878                case 0x3: /* SQDMLAL, SQDMLAL2 */
10879                    gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10880                                                      tcg_res[pass],
10881                                                      tcg_passres);
10882                    break;
10883                default:
10884                    g_assert_not_reached();
10885                }
10886                tcg_temp_free_i64(tcg_passres);
10887            }
10888            tcg_temp_free_i32(tcg_idx);
10889
10890            if (is_scalar) {
10891                tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
10892            }
10893        }
10894
10895        if (is_scalar) {
10896            tcg_res[1] = tcg_const_i64(0);
10897        }
10898
10899        for (pass = 0; pass < 2; pass++) {
10900            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10901            tcg_temp_free_i64(tcg_res[pass]);
10902        }
10903    }
10904
10905    if (!TCGV_IS_UNUSED_PTR(fpst)) {
10906        tcg_temp_free_ptr(fpst);
10907    }
10908}
10909
10910/* C3.6.19 Crypto AES
10911 *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
10912 * +-----------------+------+-----------+--------+-----+------+------+
10913 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
10914 * +-----------------+------+-----------+--------+-----+------+------+
10915 */
10916static void disas_crypto_aes(DisasContext *s, uint32_t insn)
10917{
10918    int size = extract32(insn, 22, 2);
10919    int opcode = extract32(insn, 12, 5);
10920    int rn = extract32(insn, 5, 5);
10921    int rd = extract32(insn, 0, 5);
10922    int decrypt;
10923    TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_decrypt;
10924    CryptoThreeOpEnvFn *genfn;
10925
10926    if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
10927        || size != 0) {
10928        unallocated_encoding(s);
10929        return;
10930    }
10931
10932    switch (opcode) {
10933    case 0x4: /* AESE */
10934        decrypt = 0;
10935        genfn = gen_helper_crypto_aese;
10936        break;
10937    case 0x6: /* AESMC */
10938        decrypt = 0;
10939        genfn = gen_helper_crypto_aesmc;
10940        break;
10941    case 0x5: /* AESD */
10942        decrypt = 1;
10943        genfn = gen_helper_crypto_aese;
10944        break;
10945    case 0x7: /* AESIMC */
10946        decrypt = 1;
10947        genfn = gen_helper_crypto_aesmc;
10948        break;
10949    default:
10950        unallocated_encoding(s);
10951        return;
10952    }
10953
10954    if (!fp_access_check(s)) {
10955        return;
10956    }
10957
10958    /* Note that we convert the Vx register indexes into the
10959     * index within the vfp.regs[] array, so we can share the
10960     * helper with the AArch32 instructions.
10961     */
10962    tcg_rd_regno = tcg_const_i32(rd << 1);
10963    tcg_rn_regno = tcg_const_i32(rn << 1);
10964    tcg_decrypt = tcg_const_i32(decrypt);
10965
10966    genfn(cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_decrypt);
10967
10968    tcg_temp_free_i32(tcg_rd_regno);
10969    tcg_temp_free_i32(tcg_rn_regno);
10970    tcg_temp_free_i32(tcg_decrypt);
10971}
10972
10973/* C3.6.20 Crypto three-reg SHA
10974 *  31             24 23  22  21 20  16  15 14    12 11 10 9    5 4    0
10975 * +-----------------+------+---+------+---+--------+-----+------+------+
10976 * | 0 1 0 1 1 1 1 0 | size | 0 |  Rm  | 0 | opcode | 0 0 |  Rn  |  Rd  |
10977 * +-----------------+------+---+------+---+--------+-----+------+------+
10978 */
10979static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
10980{
10981    int size = extract32(insn, 22, 2);
10982    int opcode = extract32(insn, 12, 3);
10983    int rm = extract32(insn, 16, 5);
10984    int rn = extract32(insn, 5, 5);
10985    int rd = extract32(insn, 0, 5);
10986    CryptoThreeOpEnvFn *genfn;
10987    TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_rm_regno;
10988    int feature = ARM_FEATURE_V8_SHA256;
10989
10990    if (size != 0) {
10991        unallocated_encoding(s);
10992        return;
10993    }
10994
10995    switch (opcode) {
10996    case 0: /* SHA1C */
10997    case 1: /* SHA1P */
10998    case 2: /* SHA1M */
10999    case 3: /* SHA1SU0 */
11000        genfn = NULL;
11001        feature = ARM_FEATURE_V8_SHA1;
11002        break;
11003    case 4: /* SHA256H */
11004        genfn = gen_helper_crypto_sha256h;
11005        break;
11006    case 5: /* SHA256H2 */
11007        genfn = gen_helper_crypto_sha256h2;
11008        break;
11009    case 6: /* SHA256SU1 */
11010        genfn = gen_helper_crypto_sha256su1;
11011        break;
11012    default:
11013        unallocated_encoding(s);
11014        return;
11015    }
11016
11017    if (!arm_dc_feature(s, feature)) {
11018        unallocated_encoding(s);
11019        return;
11020    }
11021
11022    if (!fp_access_check(s)) {
11023        return;
11024    }
11025
11026    tcg_rd_regno = tcg_const_i32(rd << 1);
11027    tcg_rn_regno = tcg_const_i32(rn << 1);
11028    tcg_rm_regno = tcg_const_i32(rm << 1);
11029
11030    if (genfn) {
11031        genfn(cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_rm_regno);
11032    } else {
11033        TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
11034
11035        gen_helper_crypto_sha1_3reg(cpu_env, tcg_rd_regno,
11036                                    tcg_rn_regno, tcg_rm_regno, tcg_opcode);
11037        tcg_temp_free_i32(tcg_opcode);
11038    }
11039
11040    tcg_temp_free_i32(tcg_rd_regno);
11041    tcg_temp_free_i32(tcg_rn_regno);
11042    tcg_temp_free_i32(tcg_rm_regno);
11043}
11044
11045/* C3.6.21 Crypto two-reg SHA
11046 *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
11047 * +-----------------+------+-----------+--------+-----+------+------+
11048 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
11049 * +-----------------+------+-----------+--------+-----+------+------+
11050 */
11051static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
11052{
11053    int size = extract32(insn, 22, 2);
11054    int opcode = extract32(insn, 12, 5);
11055    int rn = extract32(insn, 5, 5);
11056    int rd = extract32(insn, 0, 5);
11057    CryptoTwoOpEnvFn *genfn;
11058    int feature;
11059    TCGv_i32 tcg_rd_regno, tcg_rn_regno;
11060
11061    if (size != 0) {
11062        unallocated_encoding(s);
11063        return;
11064    }
11065
11066    switch (opcode) {
11067    case 0: /* SHA1H */
11068        feature = ARM_FEATURE_V8_SHA1;
11069        genfn = gen_helper_crypto_sha1h;
11070        break;
11071    case 1: /* SHA1SU1 */
11072        feature = ARM_FEATURE_V8_SHA1;
11073        genfn = gen_helper_crypto_sha1su1;
11074        break;
11075    case 2: /* SHA256SU0 */
11076        feature = ARM_FEATURE_V8_SHA256;
11077        genfn = gen_helper_crypto_sha256su0;
11078        break;
11079    default:
11080        unallocated_encoding(s);
11081        return;
11082    }
11083
11084    if (!arm_dc_feature(s, feature)) {
11085        unallocated_encoding(s);
11086        return;
11087    }
11088
11089    if (!fp_access_check(s)) {
11090        return;
11091    }
11092
11093    tcg_rd_regno = tcg_const_i32(rd << 1);
11094    tcg_rn_regno = tcg_const_i32(rn << 1);
11095
11096    genfn(cpu_env, tcg_rd_regno, tcg_rn_regno);
11097
11098    tcg_temp_free_i32(tcg_rd_regno);
11099    tcg_temp_free_i32(tcg_rn_regno);
11100}
11101
11102/* C3.6 Data processing - SIMD, inc Crypto
11103 *
11104 * As the decode gets a little complex we are using a table based
11105 * approach for this part of the decode.
11106 */
11107static const AArch64DecodeTable data_proc_simd[] = {
11108    /* pattern  ,  mask     ,  fn                        */
11109    { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
11110    { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
11111    { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
11112    { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
11113    { 0x0e000400, 0x9fe08400, disas_simd_copy },
11114    { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
11115    /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
11116    { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
11117    { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
11118    { 0x0e000000, 0xbf208c00, disas_simd_tb },
11119    { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
11120    { 0x2e000000, 0xbf208400, disas_simd_ext },
11121    { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
11122    { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
11123    { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
11124    { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
11125    { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
11126    { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
11127    { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
11128    { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
11129    { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
11130    { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
11131    { 0x00000000, 0x00000000, NULL }
11132};
11133
11134static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
11135{
11136    /* Note that this is called with all non-FP cases from
11137     * table C3-6 so it must UNDEF for entries not specifically
11138     * allocated to instructions in that table.
11139     */
11140    AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
11141    if (fn) {
11142        fn(s, insn);
11143    } else {
11144        unallocated_encoding(s);
11145    }
11146}
11147
11148/* C3.6 Data processing - SIMD and floating point */
11149static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
11150{
11151    if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
11152        disas_data_proc_fp(s, insn);
11153    } else {
11154        /* SIMD, including crypto */
11155        disas_data_proc_simd(s, insn);
11156    }
11157}
11158
11159/* C3.1 A64 instruction index by encoding */
11160static void disas_a64_insn(CPUARMState *env, DisasContext *s)
11161{
11162    uint32_t insn;
11163
11164    insn = arm_ldl_code(env, s->pc, s->sctlr_b);
11165    s->insn = insn;
11166    s->pc += 4;
11167
11168    s->fp_access_checked = false;
11169
11170    switch (extract32(insn, 25, 4)) {
11171    case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
11172        unallocated_encoding(s);
11173        break;
11174    case 0x8: case 0x9: /* Data processing - immediate */
11175        disas_data_proc_imm(s, insn);
11176        break;
11177    case 0xa: case 0xb: /* Branch, exception generation and system insns */
11178        disas_b_exc_sys(s, insn);
11179        break;
11180    case 0x4:
11181    case 0x6:
11182    case 0xc:
11183    case 0xe:      /* Loads and stores */
11184        disas_ldst(s, insn);
11185        break;
11186    case 0x5:
11187    case 0xd:      /* Data processing - register */
11188        disas_data_proc_reg(s, insn);
11189        break;
11190    case 0x7:
11191    case 0xf:      /* Data processing - SIMD and floating point */
11192        disas_data_proc_simd_fp(s, insn);
11193        break;
11194    default:
11195        assert(FALSE); /* all 15 cases should be handled above */
11196        break;
11197    }
11198
11199    /* if we allocated any temporaries, free them here */
11200    free_tmp_a64(s);
11201}
11202
11203void gen_intermediate_code_a64(CPUState *cs, TranslationBlock *tb)
11204{
11205    CPUARMState *env = cs->env_ptr;
11206    ARMCPU *cpu = arm_env_get_cpu(env);
11207    DisasContext dc1, *dc = &dc1;
11208    target_ulong pc_start;
11209    target_ulong next_page_start;
11210    int num_insns;
11211    int max_insns;
11212
11213    pc_start = tb->pc;
11214
11215    dc->tb = tb;
11216
11217    dc->is_jmp = DISAS_NEXT;
11218    dc->pc = pc_start;
11219    dc->singlestep_enabled = cs->singlestep_enabled;
11220    dc->condjmp = 0;
11221
11222    dc->aarch64 = 1;
11223    /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11224     * there is no secure EL1, so we route exceptions to EL3.
11225     */
11226    dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11227                               !arm_el_is_aa64(env, 3);
11228    dc->thumb = 0;
11229    dc->sctlr_b = 0;
11230    dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
11231    dc->condexec_mask = 0;
11232    dc->condexec_cond = 0;
11233    dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
11234    dc->tbi0 = ARM_TBFLAG_TBI0(tb->flags);
11235    dc->tbi1 = ARM_TBFLAG_TBI1(tb->flags);
11236    dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11237#if !defined(CONFIG_USER_ONLY)
11238    dc->user = (dc->current_el == 0);
11239#endif
11240    dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11241    dc->vec_len = 0;
11242    dc->vec_stride = 0;
11243    dc->cp_regs = cpu->cp_regs;
11244    dc->features = env->features;
11245
11246    /* Single step state. The code-generation logic here is:
11247     *  SS_ACTIVE == 0:
11248     *   generate code with no special handling for single-stepping (except
11249     *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11250     *   this happens anyway because those changes are all system register or
11251     *   PSTATE writes).
11252     *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11253     *   emit code for one insn
11254     *   emit code to clear PSTATE.SS
11255     *   emit code to generate software step exception for completed step
11256     *   end TB (as usual for having generated an exception)
11257     *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11258     *   emit code to generate a software step exception
11259     *   end the TB
11260     */
11261    dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11262    dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11263    dc->is_ldex = false;
11264    dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
11265
11266    init_tmp_a64_array(dc);
11267
11268    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11269    num_insns = 0;
11270    max_insns = tb->cflags & CF_COUNT_MASK;
11271    if (max_insns == 0) {
11272        max_insns = CF_COUNT_MASK;
11273    }
11274    if (max_insns > TCG_MAX_INSNS) {
11275        max_insns = TCG_MAX_INSNS;
11276    }
11277
11278    gen_tb_start(tb);
11279
11280    tcg_clear_temp_count();
11281
11282    do {
11283        dc->insn_start_idx = tcg_op_buf_count();
11284        tcg_gen_insn_start(dc->pc, 0, 0);
11285        num_insns++;
11286
11287        if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11288            CPUBreakpoint *bp;
11289            QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11290                if (bp->pc == dc->pc) {
11291                    if (bp->flags & BP_CPU) {
11292                        gen_a64_set_pc_im(dc->pc);
11293                        gen_helper_check_breakpoints(cpu_env);
11294                        /* End the TB early; it likely won't be executed */
11295                        dc->is_jmp = DISAS_UPDATE;
11296                    } else {
11297                        gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11298                        /* The address covered by the breakpoint must be
11299                           included in [tb->pc, tb->pc + tb->size) in order
11300                           to for it to be properly cleared -- thus we
11301                           increment the PC here so that the logic setting
11302                           tb->size below does the right thing.  */
11303                        dc->pc += 4;
11304                        goto done_generating;
11305                    }
11306                    break;
11307                }
11308            }
11309        }
11310
11311        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11312            gen_io_start();
11313        }
11314
11315        if (dc->ss_active && !dc->pstate_ss) {
11316            /* Singlestep state is Active-pending.
11317             * If we're in this state at the start of a TB then either
11318             *  a) we just took an exception to an EL which is being debugged
11319             *     and this is the first insn in the exception handler
11320             *  b) debug exceptions were masked and we just unmasked them
11321             *     without changing EL (eg by clearing PSTATE.D)
11322             * In either case we're going to take a swstep exception in the
11323             * "did not step an insn" case, and so the syndrome ISV and EX
11324             * bits should be zero.
11325             */
11326            assert(num_insns == 1);
11327            gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11328                          default_exception_el(dc));
11329            dc->is_jmp = DISAS_EXC;
11330            break;
11331        }
11332
11333        disas_a64_insn(env, dc);
11334
11335        if (tcg_check_temp_count()) {
11336            fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11337                    dc->pc);
11338        }
11339
11340        /* Translation stops when a conditional branch is encountered.
11341         * Otherwise the subsequent code could get translated several times.
11342         * Also stop translation when a page boundary is reached.  This
11343         * ensures prefetch aborts occur at the right place.
11344         */
11345    } while (!dc->is_jmp && !tcg_op_buf_full() &&
11346             !cs->singlestep_enabled &&
11347             !singlestep &&
11348             !dc->ss_active &&
11349             dc->pc < next_page_start &&
11350             num_insns < max_insns);
11351
11352    if (tb->cflags & CF_LAST_IO) {
11353        gen_io_end();
11354    }
11355
11356    if (unlikely(cs->singlestep_enabled || dc->ss_active)
11357        && dc->is_jmp != DISAS_EXC) {
11358        /* Note that this means single stepping WFI doesn't halt the CPU.
11359         * For conditional branch insns this is harmless unreachable code as
11360         * gen_goto_tb() has already handled emitting the debug exception
11361         * (and thus a tb-jump is not possible when singlestepping).
11362         */
11363        assert(dc->is_jmp != DISAS_TB_JUMP);
11364        if (dc->is_jmp != DISAS_JUMP) {
11365            gen_a64_set_pc_im(dc->pc);
11366        }
11367        if (cs->singlestep_enabled) {
11368            gen_exception_internal(EXCP_DEBUG);
11369        } else {
11370            gen_step_complete_exception(dc);
11371        }
11372    } else {
11373        switch (dc->is_jmp) {
11374        case DISAS_NEXT:
11375            gen_goto_tb(dc, 1, dc->pc);
11376            break;
11377        case DISAS_JUMP:
11378            tcg_gen_lookup_and_goto_ptr(cpu_pc);
11379            break;
11380        case DISAS_TB_JUMP:
11381        case DISAS_EXC:
11382        case DISAS_SWI:
11383            break;
11384        case DISAS_WFE:
11385            gen_a64_set_pc_im(dc->pc);
11386            gen_helper_wfe(cpu_env);
11387            break;
11388        case DISAS_YIELD:
11389            gen_a64_set_pc_im(dc->pc);
11390            gen_helper_yield(cpu_env);
11391            break;
11392        case DISAS_WFI:
11393            /* This is a special case because we don't want to just halt the CPU
11394             * if trying to debug across a WFI.
11395             */
11396            gen_a64_set_pc_im(dc->pc);
11397            gen_helper_wfi(cpu_env);
11398            /* The helper doesn't necessarily throw an exception, but we
11399             * must go back to the main loop to check for interrupts anyway.
11400             */
11401            tcg_gen_exit_tb(0);
11402            break;
11403        case DISAS_UPDATE:
11404            gen_a64_set_pc_im(dc->pc);
11405            /* fall through */
11406        case DISAS_EXIT:
11407        default:
11408            tcg_gen_exit_tb(0);
11409            break;
11410        }
11411    }
11412
11413done_generating:
11414    gen_tb_end(tb, num_insns);
11415
11416#ifdef DEBUG_DISAS
11417    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
11418        qemu_log_in_addr_range(pc_start)) {
11419        qemu_log_lock();
11420        qemu_log("----------------\n");
11421        qemu_log("IN: %s\n", lookup_symbol(pc_start));
11422        log_target_disas(cs, pc_start, dc->pc - pc_start,
11423                         4 | (bswap_code(dc->sctlr_b) ? 2 : 0));
11424        qemu_log("\n");
11425        qemu_log_unlock();
11426    }
11427#endif
11428    tb->size = dc->pc - pc_start;
11429    tb->icount = num_insns;
11430}
11431