qemu/target/arm/translate-a64.c
<<
>>
Prefs
   1/*
   2 *  AArch64 translation
   3 *
   4 *  Copyright (c) 2013 Alexander Graf <agraf@suse.de>
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "tcg-op.h"
  24#include "tcg-op-gvec.h"
  25#include "qemu/log.h"
  26#include "arm_ldst.h"
  27#include "translate.h"
  28#include "internals.h"
  29#include "qemu/host-utils.h"
  30
  31#include "exec/semihost.h"
  32#include "exec/gen-icount.h"
  33
  34#include "exec/helper-proto.h"
  35#include "exec/helper-gen.h"
  36#include "exec/log.h"
  37
  38#include "trace-tcg.h"
  39#include "translate-a64.h"
  40
  41static TCGv_i64 cpu_X[32];
  42static TCGv_i64 cpu_pc;
  43
  44/* Load/store exclusive handling */
  45static TCGv_i64 cpu_exclusive_high;
  46
  47static const char *regnames[] = {
  48    "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
  49    "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
  50    "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
  51    "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
  52};
  53
  54enum a64_shift_type {
  55    A64_SHIFT_TYPE_LSL = 0,
  56    A64_SHIFT_TYPE_LSR = 1,
  57    A64_SHIFT_TYPE_ASR = 2,
  58    A64_SHIFT_TYPE_ROR = 3
  59};
  60
  61/* Table based decoder typedefs - used when the relevant bits for decode
  62 * are too awkwardly scattered across the instruction (eg SIMD).
  63 */
  64typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
  65
  66typedef struct AArch64DecodeTable {
  67    uint32_t pattern;
  68    uint32_t mask;
  69    AArch64DecodeFn *disas_fn;
  70} AArch64DecodeTable;
  71
  72/* Function prototype for gen_ functions for calling Neon helpers */
  73typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
  74typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
  75typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
  76typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
  77typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
  78typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
  79typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
  80typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
  81typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
  82typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
  83typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
  84typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
  85typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
  86typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
  87typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
  88
  89/* initialize TCG globals.  */
  90void a64_translate_init(void)
  91{
  92    int i;
  93
  94    cpu_pc = tcg_global_mem_new_i64(cpu_env,
  95                                    offsetof(CPUARMState, pc),
  96                                    "pc");
  97    for (i = 0; i < 32; i++) {
  98        cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
  99                                          offsetof(CPUARMState, xregs[i]),
 100                                          regnames[i]);
 101    }
 102
 103    cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
 104        offsetof(CPUARMState, exclusive_high), "exclusive_high");
 105}
 106
 107static inline int get_a64_user_mem_index(DisasContext *s)
 108{
 109    /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
 110     *  if EL1, access as if EL0; otherwise access at current EL
 111     */
 112    ARMMMUIdx useridx;
 113
 114    switch (s->mmu_idx) {
 115    case ARMMMUIdx_S12NSE1:
 116        useridx = ARMMMUIdx_S12NSE0;
 117        break;
 118    case ARMMMUIdx_S1SE1:
 119        useridx = ARMMMUIdx_S1SE0;
 120        break;
 121    case ARMMMUIdx_S2NS:
 122        g_assert_not_reached();
 123    default:
 124        useridx = s->mmu_idx;
 125        break;
 126    }
 127    return arm_to_core_mmu_idx(useridx);
 128}
 129
 130void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
 131                            fprintf_function cpu_fprintf, int flags)
 132{
 133    ARMCPU *cpu = ARM_CPU(cs);
 134    CPUARMState *env = &cpu->env;
 135    uint32_t psr = pstate_read(env);
 136    int i;
 137    int el = arm_current_el(env);
 138    const char *ns_status;
 139
 140    cpu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
 141    for (i = 0; i < 32; i++) {
 142        if (i == 31) {
 143            cpu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
 144        } else {
 145            cpu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
 146                        (i + 2) % 3 ? " " : "\n");
 147        }
 148    }
 149
 150    if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
 151        ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
 152    } else {
 153        ns_status = "";
 154    }
 155    cpu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
 156                psr,
 157                psr & PSTATE_N ? 'N' : '-',
 158                psr & PSTATE_Z ? 'Z' : '-',
 159                psr & PSTATE_C ? 'C' : '-',
 160                psr & PSTATE_V ? 'V' : '-',
 161                ns_status,
 162                el,
 163                psr & PSTATE_SP ? 'h' : 't');
 164
 165    if (!(flags & CPU_DUMP_FPU)) {
 166        cpu_fprintf(f, "\n");
 167        return;
 168    }
 169    cpu_fprintf(f, "     FPCR=%08x FPSR=%08x\n",
 170                vfp_get_fpcr(env), vfp_get_fpsr(env));
 171
 172    if (arm_feature(env, ARM_FEATURE_SVE)) {
 173        int j, zcr_len = env->vfp.zcr_el[1] & 0xf; /* fix for system mode */
 174
 175        for (i = 0; i <= FFR_PRED_NUM; i++) {
 176            bool eol;
 177            if (i == FFR_PRED_NUM) {
 178                cpu_fprintf(f, "FFR=");
 179                /* It's last, so end the line.  */
 180                eol = true;
 181            } else {
 182                cpu_fprintf(f, "P%02d=", i);
 183                switch (zcr_len) {
 184                case 0:
 185                    eol = i % 8 == 7;
 186                    break;
 187                case 1:
 188                    eol = i % 6 == 5;
 189                    break;
 190                case 2:
 191                case 3:
 192                    eol = i % 3 == 2;
 193                    break;
 194                default:
 195                    /* More than one quadword per predicate.  */
 196                    eol = true;
 197                    break;
 198                }
 199            }
 200            for (j = zcr_len / 4; j >= 0; j--) {
 201                int digits;
 202                if (j * 4 + 4 <= zcr_len + 1) {
 203                    digits = 16;
 204                } else {
 205                    digits = (zcr_len % 4 + 1) * 4;
 206                }
 207                cpu_fprintf(f, "%0*" PRIx64 "%s", digits,
 208                            env->vfp.pregs[i].p[j],
 209                            j ? ":" : eol ? "\n" : " ");
 210            }
 211        }
 212
 213        for (i = 0; i < 32; i++) {
 214            if (zcr_len == 0) {
 215                cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
 216                            i, env->vfp.zregs[i].d[1],
 217                            env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
 218            } else if (zcr_len == 1) {
 219                cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
 220                            ":%016" PRIx64 ":%016" PRIx64 "\n",
 221                            i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
 222                            env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
 223            } else {
 224                for (j = zcr_len; j >= 0; j--) {
 225                    bool odd = (zcr_len - j) % 2 != 0;
 226                    if (j == zcr_len) {
 227                        cpu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
 228                    } else if (!odd) {
 229                        if (j > 0) {
 230                            cpu_fprintf(f, "   [%x-%x]=", j, j - 1);
 231                        } else {
 232                            cpu_fprintf(f, "     [%x]=", j);
 233                        }
 234                    }
 235                    cpu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
 236                                env->vfp.zregs[i].d[j * 2 + 1],
 237                                env->vfp.zregs[i].d[j * 2],
 238                                odd || j == 0 ? "\n" : ":");
 239                }
 240            }
 241        }
 242    } else {
 243        for (i = 0; i < 32; i++) {
 244            uint64_t *q = aa64_vfp_qreg(env, i);
 245            cpu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
 246                        i, q[1], q[0], (i & 1 ? "\n" : " "));
 247        }
 248    }
 249}
 250
 251void gen_a64_set_pc_im(uint64_t val)
 252{
 253    tcg_gen_movi_i64(cpu_pc, val);
 254}
 255
 256/* Load the PC from a generic TCG variable.
 257 *
 258 * If address tagging is enabled via the TCR TBI bits, then loading
 259 * an address into the PC will clear out any tag in the it:
 260 *  + for EL2 and EL3 there is only one TBI bit, and if it is set
 261 *    then the address is zero-extended, clearing bits [63:56]
 262 *  + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
 263 *    and TBI1 controls addressses with bit 55 == 1.
 264 *    If the appropriate TBI bit is set for the address then
 265 *    the address is sign-extended from bit 55 into bits [63:56]
 266 *
 267 * We can avoid doing this for relative-branches, because the
 268 * PC + offset can never overflow into the tag bits (assuming
 269 * that virtual addresses are less than 56 bits wide, as they
 270 * are currently), but we must handle it for branch-to-register.
 271 */
 272static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
 273{
 274
 275    if (s->current_el <= 1) {
 276        /* Test if NEITHER or BOTH TBI values are set.  If so, no need to
 277         * examine bit 55 of address, can just generate code.
 278         * If mixed, then test via generated code
 279         */
 280        if (s->tbi0 && s->tbi1) {
 281            TCGv_i64 tmp_reg = tcg_temp_new_i64();
 282            /* Both bits set, sign extension from bit 55 into [63:56] will
 283             * cover both cases
 284             */
 285            tcg_gen_shli_i64(tmp_reg, src, 8);
 286            tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
 287            tcg_temp_free_i64(tmp_reg);
 288        } else if (!s->tbi0 && !s->tbi1) {
 289            /* Neither bit set, just load it as-is */
 290            tcg_gen_mov_i64(cpu_pc, src);
 291        } else {
 292            TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
 293            TCGv_i64 tcg_bit55  = tcg_temp_new_i64();
 294            TCGv_i64 tcg_zero   = tcg_const_i64(0);
 295
 296            tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
 297
 298            if (s->tbi0) {
 299                /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
 300                tcg_gen_andi_i64(tcg_tmpval, src,
 301                                 0x00FFFFFFFFFFFFFFull);
 302                tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
 303                                    tcg_tmpval, src);
 304            } else {
 305                /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
 306                tcg_gen_ori_i64(tcg_tmpval, src,
 307                                0xFF00000000000000ull);
 308                tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
 309                                    tcg_tmpval, src);
 310            }
 311            tcg_temp_free_i64(tcg_zero);
 312            tcg_temp_free_i64(tcg_bit55);
 313            tcg_temp_free_i64(tcg_tmpval);
 314        }
 315    } else {  /* EL > 1 */
 316        if (s->tbi0) {
 317            /* Force tag byte to all zero */
 318            tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
 319        } else {
 320            /* Load unmodified address */
 321            tcg_gen_mov_i64(cpu_pc, src);
 322        }
 323    }
 324}
 325
 326typedef struct DisasCompare64 {
 327    TCGCond cond;
 328    TCGv_i64 value;
 329} DisasCompare64;
 330
 331static void a64_test_cc(DisasCompare64 *c64, int cc)
 332{
 333    DisasCompare c32;
 334
 335    arm_test_cc(&c32, cc);
 336
 337    /* Sign-extend the 32-bit value so that the GE/LT comparisons work
 338       * properly.  The NE/EQ comparisons are also fine with this choice.  */
 339    c64->cond = c32.cond;
 340    c64->value = tcg_temp_new_i64();
 341    tcg_gen_ext_i32_i64(c64->value, c32.value);
 342
 343    arm_free_cc(&c32);
 344}
 345
 346static void a64_free_cc(DisasCompare64 *c64)
 347{
 348    tcg_temp_free_i64(c64->value);
 349}
 350
 351static void gen_exception_internal(int excp)
 352{
 353    TCGv_i32 tcg_excp = tcg_const_i32(excp);
 354
 355    assert(excp_is_internal(excp));
 356    gen_helper_exception_internal(cpu_env, tcg_excp);
 357    tcg_temp_free_i32(tcg_excp);
 358}
 359
 360static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
 361{
 362    TCGv_i32 tcg_excp = tcg_const_i32(excp);
 363    TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
 364    TCGv_i32 tcg_el = tcg_const_i32(target_el);
 365
 366    gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
 367                                       tcg_syn, tcg_el);
 368    tcg_temp_free_i32(tcg_el);
 369    tcg_temp_free_i32(tcg_syn);
 370    tcg_temp_free_i32(tcg_excp);
 371}
 372
 373static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
 374{
 375    gen_a64_set_pc_im(s->pc - offset);
 376    gen_exception_internal(excp);
 377    s->base.is_jmp = DISAS_NORETURN;
 378}
 379
 380static void gen_exception_insn(DisasContext *s, int offset, int excp,
 381                               uint32_t syndrome, uint32_t target_el)
 382{
 383    gen_a64_set_pc_im(s->pc - offset);
 384    gen_exception(excp, syndrome, target_el);
 385    s->base.is_jmp = DISAS_NORETURN;
 386}
 387
 388static void gen_exception_bkpt_insn(DisasContext *s, int offset,
 389                                    uint32_t syndrome)
 390{
 391    TCGv_i32 tcg_syn;
 392
 393    gen_a64_set_pc_im(s->pc - offset);
 394    tcg_syn = tcg_const_i32(syndrome);
 395    gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
 396    tcg_temp_free_i32(tcg_syn);
 397    s->base.is_jmp = DISAS_NORETURN;
 398}
 399
 400static void gen_ss_advance(DisasContext *s)
 401{
 402    /* If the singlestep state is Active-not-pending, advance to
 403     * Active-pending.
 404     */
 405    if (s->ss_active) {
 406        s->pstate_ss = 0;
 407        gen_helper_clear_pstate_ss(cpu_env);
 408    }
 409}
 410
 411static void gen_step_complete_exception(DisasContext *s)
 412{
 413    /* We just completed step of an insn. Move from Active-not-pending
 414     * to Active-pending, and then also take the swstep exception.
 415     * This corresponds to making the (IMPDEF) choice to prioritize
 416     * swstep exceptions over asynchronous exceptions taken to an exception
 417     * level where debug is disabled. This choice has the advantage that
 418     * we do not need to maintain internal state corresponding to the
 419     * ISV/EX syndrome bits between completion of the step and generation
 420     * of the exception, and our syndrome information is always correct.
 421     */
 422    gen_ss_advance(s);
 423    gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
 424                  default_exception_el(s));
 425    s->base.is_jmp = DISAS_NORETURN;
 426}
 427
 428static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
 429{
 430    /* No direct tb linking with singlestep (either QEMU's or the ARM
 431     * debug architecture kind) or deterministic io
 432     */
 433    if (s->base.singlestep_enabled || s->ss_active ||
 434        (tb_cflags(s->base.tb) & CF_LAST_IO)) {
 435        return false;
 436    }
 437
 438#ifndef CONFIG_USER_ONLY
 439    /* Only link tbs from inside the same guest page */
 440    if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
 441        return false;
 442    }
 443#endif
 444
 445    return true;
 446}
 447
 448static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
 449{
 450    TranslationBlock *tb;
 451
 452    tb = s->base.tb;
 453    if (use_goto_tb(s, n, dest)) {
 454        tcg_gen_goto_tb(n);
 455        gen_a64_set_pc_im(dest);
 456        tcg_gen_exit_tb(tb, n);
 457        s->base.is_jmp = DISAS_NORETURN;
 458    } else {
 459        gen_a64_set_pc_im(dest);
 460        if (s->ss_active) {
 461            gen_step_complete_exception(s);
 462        } else if (s->base.singlestep_enabled) {
 463            gen_exception_internal(EXCP_DEBUG);
 464        } else {
 465            tcg_gen_lookup_and_goto_ptr();
 466            s->base.is_jmp = DISAS_NORETURN;
 467        }
 468    }
 469}
 470
 471void unallocated_encoding(DisasContext *s)
 472{
 473    /* Unallocated and reserved encodings are uncategorized */
 474    gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
 475                       default_exception_el(s));
 476}
 477
 478static void init_tmp_a64_array(DisasContext *s)
 479{
 480#ifdef CONFIG_DEBUG_TCG
 481    memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
 482#endif
 483    s->tmp_a64_count = 0;
 484}
 485
 486static void free_tmp_a64(DisasContext *s)
 487{
 488    int i;
 489    for (i = 0; i < s->tmp_a64_count; i++) {
 490        tcg_temp_free_i64(s->tmp_a64[i]);
 491    }
 492    init_tmp_a64_array(s);
 493}
 494
 495TCGv_i64 new_tmp_a64(DisasContext *s)
 496{
 497    assert(s->tmp_a64_count < TMP_A64_MAX);
 498    return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
 499}
 500
 501TCGv_i64 new_tmp_a64_zero(DisasContext *s)
 502{
 503    TCGv_i64 t = new_tmp_a64(s);
 504    tcg_gen_movi_i64(t, 0);
 505    return t;
 506}
 507
 508/*
 509 * Register access functions
 510 *
 511 * These functions are used for directly accessing a register in where
 512 * changes to the final register value are likely to be made. If you
 513 * need to use a register for temporary calculation (e.g. index type
 514 * operations) use the read_* form.
 515 *
 516 * B1.2.1 Register mappings
 517 *
 518 * In instruction register encoding 31 can refer to ZR (zero register) or
 519 * the SP (stack pointer) depending on context. In QEMU's case we map SP
 520 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
 521 * This is the point of the _sp forms.
 522 */
 523TCGv_i64 cpu_reg(DisasContext *s, int reg)
 524{
 525    if (reg == 31) {
 526        return new_tmp_a64_zero(s);
 527    } else {
 528        return cpu_X[reg];
 529    }
 530}
 531
 532/* register access for when 31 == SP */
 533TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
 534{
 535    return cpu_X[reg];
 536}
 537
 538/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
 539 * representing the register contents. This TCGv is an auto-freed
 540 * temporary so it need not be explicitly freed, and may be modified.
 541 */
 542TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
 543{
 544    TCGv_i64 v = new_tmp_a64(s);
 545    if (reg != 31) {
 546        if (sf) {
 547            tcg_gen_mov_i64(v, cpu_X[reg]);
 548        } else {
 549            tcg_gen_ext32u_i64(v, cpu_X[reg]);
 550        }
 551    } else {
 552        tcg_gen_movi_i64(v, 0);
 553    }
 554    return v;
 555}
 556
 557TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
 558{
 559    TCGv_i64 v = new_tmp_a64(s);
 560    if (sf) {
 561        tcg_gen_mov_i64(v, cpu_X[reg]);
 562    } else {
 563        tcg_gen_ext32u_i64(v, cpu_X[reg]);
 564    }
 565    return v;
 566}
 567
 568/* Return the offset into CPUARMState of a slice (from
 569 * the least significant end) of FP register Qn (ie
 570 * Dn, Sn, Hn or Bn).
 571 * (Note that this is not the same mapping as for A32; see cpu.h)
 572 */
 573static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
 574{
 575    return vec_reg_offset(s, regno, 0, size);
 576}
 577
 578/* Offset of the high half of the 128 bit vector Qn */
 579static inline int fp_reg_hi_offset(DisasContext *s, int regno)
 580{
 581    return vec_reg_offset(s, regno, 1, MO_64);
 582}
 583
 584/* Convenience accessors for reading and writing single and double
 585 * FP registers. Writing clears the upper parts of the associated
 586 * 128 bit vector register, as required by the architecture.
 587 * Note that unlike the GP register accessors, the values returned
 588 * by the read functions must be manually freed.
 589 */
 590static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
 591{
 592    TCGv_i64 v = tcg_temp_new_i64();
 593
 594    tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
 595    return v;
 596}
 597
 598static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
 599{
 600    TCGv_i32 v = tcg_temp_new_i32();
 601
 602    tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
 603    return v;
 604}
 605
 606static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
 607{
 608    TCGv_i32 v = tcg_temp_new_i32();
 609
 610    tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
 611    return v;
 612}
 613
 614/* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
 615 * If SVE is not enabled, then there are only 128 bits in the vector.
 616 */
 617static void clear_vec_high(DisasContext *s, bool is_q, int rd)
 618{
 619    unsigned ofs = fp_reg_offset(s, rd, MO_64);
 620    unsigned vsz = vec_full_reg_size(s);
 621
 622    if (!is_q) {
 623        TCGv_i64 tcg_zero = tcg_const_i64(0);
 624        tcg_gen_st_i64(tcg_zero, cpu_env, ofs + 8);
 625        tcg_temp_free_i64(tcg_zero);
 626    }
 627    if (vsz > 16) {
 628        tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
 629    }
 630}
 631
 632void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
 633{
 634    unsigned ofs = fp_reg_offset(s, reg, MO_64);
 635
 636    tcg_gen_st_i64(v, cpu_env, ofs);
 637    clear_vec_high(s, false, reg);
 638}
 639
 640static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
 641{
 642    TCGv_i64 tmp = tcg_temp_new_i64();
 643
 644    tcg_gen_extu_i32_i64(tmp, v);
 645    write_fp_dreg(s, reg, tmp);
 646    tcg_temp_free_i64(tmp);
 647}
 648
 649TCGv_ptr get_fpstatus_ptr(bool is_f16)
 650{
 651    TCGv_ptr statusptr = tcg_temp_new_ptr();
 652    int offset;
 653
 654    /* In A64 all instructions (both FP and Neon) use the FPCR; there
 655     * is no equivalent of the A32 Neon "standard FPSCR value".
 656     * However half-precision operations operate under a different
 657     * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status.
 658     */
 659    if (is_f16) {
 660        offset = offsetof(CPUARMState, vfp.fp_status_f16);
 661    } else {
 662        offset = offsetof(CPUARMState, vfp.fp_status);
 663    }
 664    tcg_gen_addi_ptr(statusptr, cpu_env, offset);
 665    return statusptr;
 666}
 667
 668/* Expand a 2-operand AdvSIMD vector operation using an expander function.  */
 669static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
 670                         GVecGen2Fn *gvec_fn, int vece)
 671{
 672    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 673            is_q ? 16 : 8, vec_full_reg_size(s));
 674}
 675
 676/* Expand a 2-operand + immediate AdvSIMD vector operation using
 677 * an expander function.
 678 */
 679static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
 680                          int64_t imm, GVecGen2iFn *gvec_fn, int vece)
 681{
 682    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 683            imm, is_q ? 16 : 8, vec_full_reg_size(s));
 684}
 685
 686/* Expand a 3-operand AdvSIMD vector operation using an expander function.  */
 687static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
 688                         GVecGen3Fn *gvec_fn, int vece)
 689{
 690    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 691            vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
 692}
 693
 694/* Expand a 2-operand + immediate AdvSIMD vector operation using
 695 * an op descriptor.
 696 */
 697static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd,
 698                          int rn, int64_t imm, const GVecGen2i *gvec_op)
 699{
 700    tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 701                    is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op);
 702}
 703
 704/* Expand a 3-operand AdvSIMD vector operation using an op descriptor.  */
 705static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
 706                         int rn, int rm, const GVecGen3 *gvec_op)
 707{
 708    tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
 709                   vec_full_reg_offset(s, rm), is_q ? 16 : 8,
 710                   vec_full_reg_size(s), gvec_op);
 711}
 712
 713/* Expand a 3-operand operation using an out-of-line helper.  */
 714static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
 715                             int rn, int rm, int data, gen_helper_gvec_3 *fn)
 716{
 717    tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
 718                       vec_full_reg_offset(s, rn),
 719                       vec_full_reg_offset(s, rm),
 720                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 721}
 722
 723/* Expand a 3-operand + env pointer operation using
 724 * an out-of-line helper.
 725 */
 726static void gen_gvec_op3_env(DisasContext *s, bool is_q, int rd,
 727                             int rn, int rm, gen_helper_gvec_3_ptr *fn)
 728{
 729    tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
 730                       vec_full_reg_offset(s, rn),
 731                       vec_full_reg_offset(s, rm), cpu_env,
 732                       is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
 733}
 734
 735/* Expand a 3-operand + fpstatus pointer + simd data value operation using
 736 * an out-of-line helper.
 737 */
 738static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
 739                              int rm, bool is_fp16, int data,
 740                              gen_helper_gvec_3_ptr *fn)
 741{
 742    TCGv_ptr fpst = get_fpstatus_ptr(is_fp16);
 743    tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
 744                       vec_full_reg_offset(s, rn),
 745                       vec_full_reg_offset(s, rm), fpst,
 746                       is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
 747    tcg_temp_free_ptr(fpst);
 748}
 749
 750/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
 751 * than the 32 bit equivalent.
 752 */
 753static inline void gen_set_NZ64(TCGv_i64 result)
 754{
 755    tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
 756    tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
 757}
 758
 759/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
 760static inline void gen_logic_CC(int sf, TCGv_i64 result)
 761{
 762    if (sf) {
 763        gen_set_NZ64(result);
 764    } else {
 765        tcg_gen_extrl_i64_i32(cpu_ZF, result);
 766        tcg_gen_mov_i32(cpu_NF, cpu_ZF);
 767    }
 768    tcg_gen_movi_i32(cpu_CF, 0);
 769    tcg_gen_movi_i32(cpu_VF, 0);
 770}
 771
 772/* dest = T0 + T1; compute C, N, V and Z flags */
 773static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 774{
 775    if (sf) {
 776        TCGv_i64 result, flag, tmp;
 777        result = tcg_temp_new_i64();
 778        flag = tcg_temp_new_i64();
 779        tmp = tcg_temp_new_i64();
 780
 781        tcg_gen_movi_i64(tmp, 0);
 782        tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
 783
 784        tcg_gen_extrl_i64_i32(cpu_CF, flag);
 785
 786        gen_set_NZ64(result);
 787
 788        tcg_gen_xor_i64(flag, result, t0);
 789        tcg_gen_xor_i64(tmp, t0, t1);
 790        tcg_gen_andc_i64(flag, flag, tmp);
 791        tcg_temp_free_i64(tmp);
 792        tcg_gen_extrh_i64_i32(cpu_VF, flag);
 793
 794        tcg_gen_mov_i64(dest, result);
 795        tcg_temp_free_i64(result);
 796        tcg_temp_free_i64(flag);
 797    } else {
 798        /* 32 bit arithmetic */
 799        TCGv_i32 t0_32 = tcg_temp_new_i32();
 800        TCGv_i32 t1_32 = tcg_temp_new_i32();
 801        TCGv_i32 tmp = tcg_temp_new_i32();
 802
 803        tcg_gen_movi_i32(tmp, 0);
 804        tcg_gen_extrl_i64_i32(t0_32, t0);
 805        tcg_gen_extrl_i64_i32(t1_32, t1);
 806        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
 807        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 808        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 809        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 810        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
 811        tcg_gen_extu_i32_i64(dest, cpu_NF);
 812
 813        tcg_temp_free_i32(tmp);
 814        tcg_temp_free_i32(t0_32);
 815        tcg_temp_free_i32(t1_32);
 816    }
 817}
 818
 819/* dest = T0 - T1; compute C, N, V and Z flags */
 820static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 821{
 822    if (sf) {
 823        /* 64 bit arithmetic */
 824        TCGv_i64 result, flag, tmp;
 825
 826        result = tcg_temp_new_i64();
 827        flag = tcg_temp_new_i64();
 828        tcg_gen_sub_i64(result, t0, t1);
 829
 830        gen_set_NZ64(result);
 831
 832        tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
 833        tcg_gen_extrl_i64_i32(cpu_CF, flag);
 834
 835        tcg_gen_xor_i64(flag, result, t0);
 836        tmp = tcg_temp_new_i64();
 837        tcg_gen_xor_i64(tmp, t0, t1);
 838        tcg_gen_and_i64(flag, flag, tmp);
 839        tcg_temp_free_i64(tmp);
 840        tcg_gen_extrh_i64_i32(cpu_VF, flag);
 841        tcg_gen_mov_i64(dest, result);
 842        tcg_temp_free_i64(flag);
 843        tcg_temp_free_i64(result);
 844    } else {
 845        /* 32 bit arithmetic */
 846        TCGv_i32 t0_32 = tcg_temp_new_i32();
 847        TCGv_i32 t1_32 = tcg_temp_new_i32();
 848        TCGv_i32 tmp;
 849
 850        tcg_gen_extrl_i64_i32(t0_32, t0);
 851        tcg_gen_extrl_i64_i32(t1_32, t1);
 852        tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
 853        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 854        tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
 855        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 856        tmp = tcg_temp_new_i32();
 857        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 858        tcg_temp_free_i32(t0_32);
 859        tcg_temp_free_i32(t1_32);
 860        tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
 861        tcg_temp_free_i32(tmp);
 862        tcg_gen_extu_i32_i64(dest, cpu_NF);
 863    }
 864}
 865
 866/* dest = T0 + T1 + CF; do not compute flags. */
 867static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 868{
 869    TCGv_i64 flag = tcg_temp_new_i64();
 870    tcg_gen_extu_i32_i64(flag, cpu_CF);
 871    tcg_gen_add_i64(dest, t0, t1);
 872    tcg_gen_add_i64(dest, dest, flag);
 873    tcg_temp_free_i64(flag);
 874
 875    if (!sf) {
 876        tcg_gen_ext32u_i64(dest, dest);
 877    }
 878}
 879
 880/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
 881static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 882{
 883    if (sf) {
 884        TCGv_i64 result, cf_64, vf_64, tmp;
 885        result = tcg_temp_new_i64();
 886        cf_64 = tcg_temp_new_i64();
 887        vf_64 = tcg_temp_new_i64();
 888        tmp = tcg_const_i64(0);
 889
 890        tcg_gen_extu_i32_i64(cf_64, cpu_CF);
 891        tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
 892        tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
 893        tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
 894        gen_set_NZ64(result);
 895
 896        tcg_gen_xor_i64(vf_64, result, t0);
 897        tcg_gen_xor_i64(tmp, t0, t1);
 898        tcg_gen_andc_i64(vf_64, vf_64, tmp);
 899        tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
 900
 901        tcg_gen_mov_i64(dest, result);
 902
 903        tcg_temp_free_i64(tmp);
 904        tcg_temp_free_i64(vf_64);
 905        tcg_temp_free_i64(cf_64);
 906        tcg_temp_free_i64(result);
 907    } else {
 908        TCGv_i32 t0_32, t1_32, tmp;
 909        t0_32 = tcg_temp_new_i32();
 910        t1_32 = tcg_temp_new_i32();
 911        tmp = tcg_const_i32(0);
 912
 913        tcg_gen_extrl_i64_i32(t0_32, t0);
 914        tcg_gen_extrl_i64_i32(t1_32, t1);
 915        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
 916        tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
 917
 918        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
 919        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
 920        tcg_gen_xor_i32(tmp, t0_32, t1_32);
 921        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
 922        tcg_gen_extu_i32_i64(dest, cpu_NF);
 923
 924        tcg_temp_free_i32(tmp);
 925        tcg_temp_free_i32(t1_32);
 926        tcg_temp_free_i32(t0_32);
 927    }
 928}
 929
 930/*
 931 * Load/Store generators
 932 */
 933
 934/*
 935 * Store from GPR register to memory.
 936 */
 937static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
 938                             TCGv_i64 tcg_addr, int size, int memidx,
 939                             bool iss_valid,
 940                             unsigned int iss_srt,
 941                             bool iss_sf, bool iss_ar)
 942{
 943    g_assert(size <= 3);
 944    tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
 945
 946    if (iss_valid) {
 947        uint32_t syn;
 948
 949        syn = syn_data_abort_with_iss(0,
 950                                      size,
 951                                      false,
 952                                      iss_srt,
 953                                      iss_sf,
 954                                      iss_ar,
 955                                      0, 0, 0, 0, 0, false);
 956        disas_set_insn_syndrome(s, syn);
 957    }
 958}
 959
 960static void do_gpr_st(DisasContext *s, TCGv_i64 source,
 961                      TCGv_i64 tcg_addr, int size,
 962                      bool iss_valid,
 963                      unsigned int iss_srt,
 964                      bool iss_sf, bool iss_ar)
 965{
 966    do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
 967                     iss_valid, iss_srt, iss_sf, iss_ar);
 968}
 969
 970/*
 971 * Load from memory to GPR register
 972 */
 973static void do_gpr_ld_memidx(DisasContext *s,
 974                             TCGv_i64 dest, TCGv_i64 tcg_addr,
 975                             int size, bool is_signed,
 976                             bool extend, int memidx,
 977                             bool iss_valid, unsigned int iss_srt,
 978                             bool iss_sf, bool iss_ar)
 979{
 980    TCGMemOp memop = s->be_data + size;
 981
 982    g_assert(size <= 3);
 983
 984    if (is_signed) {
 985        memop += MO_SIGN;
 986    }
 987
 988    tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
 989
 990    if (extend && is_signed) {
 991        g_assert(size < 3);
 992        tcg_gen_ext32u_i64(dest, dest);
 993    }
 994
 995    if (iss_valid) {
 996        uint32_t syn;
 997
 998        syn = syn_data_abort_with_iss(0,
 999                                      size,
1000                                      is_signed,
1001                                      iss_srt,
1002                                      iss_sf,
1003                                      iss_ar,
1004                                      0, 0, 0, 0, 0, false);
1005        disas_set_insn_syndrome(s, syn);
1006    }
1007}
1008
1009static void do_gpr_ld(DisasContext *s,
1010                      TCGv_i64 dest, TCGv_i64 tcg_addr,
1011                      int size, bool is_signed, bool extend,
1012                      bool iss_valid, unsigned int iss_srt,
1013                      bool iss_sf, bool iss_ar)
1014{
1015    do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
1016                     get_mem_index(s),
1017                     iss_valid, iss_srt, iss_sf, iss_ar);
1018}
1019
1020/*
1021 * Store from FP register to memory
1022 */
1023static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
1024{
1025    /* This writes the bottom N bits of a 128 bit wide vector to memory */
1026    TCGv_i64 tmp = tcg_temp_new_i64();
1027    tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
1028    if (size < 4) {
1029        tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
1030                            s->be_data + size);
1031    } else {
1032        bool be = s->be_data == MO_BE;
1033        TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
1034
1035        tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1036        tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1037                            s->be_data | MO_Q);
1038        tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
1039        tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1040                            s->be_data | MO_Q);
1041        tcg_temp_free_i64(tcg_hiaddr);
1042    }
1043
1044    tcg_temp_free_i64(tmp);
1045}
1046
1047/*
1048 * Load from memory to FP register
1049 */
1050static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
1051{
1052    /* This always zero-extends and writes to a full 128 bit wide vector */
1053    TCGv_i64 tmplo = tcg_temp_new_i64();
1054    TCGv_i64 tmphi;
1055
1056    if (size < 4) {
1057        TCGMemOp memop = s->be_data + size;
1058        tmphi = tcg_const_i64(0);
1059        tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
1060    } else {
1061        bool be = s->be_data == MO_BE;
1062        TCGv_i64 tcg_hiaddr;
1063
1064        tmphi = tcg_temp_new_i64();
1065        tcg_hiaddr = tcg_temp_new_i64();
1066
1067        tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1068        tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1069                            s->be_data | MO_Q);
1070        tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1071                            s->be_data | MO_Q);
1072        tcg_temp_free_i64(tcg_hiaddr);
1073    }
1074
1075    tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1076    tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1077
1078    tcg_temp_free_i64(tmplo);
1079    tcg_temp_free_i64(tmphi);
1080
1081    clear_vec_high(s, true, destidx);
1082}
1083
1084/*
1085 * Vector load/store helpers.
1086 *
1087 * The principal difference between this and a FP load is that we don't
1088 * zero extend as we are filling a partial chunk of the vector register.
1089 * These functions don't support 128 bit loads/stores, which would be
1090 * normal load/store operations.
1091 *
1092 * The _i32 versions are useful when operating on 32 bit quantities
1093 * (eg for floating point single or using Neon helper functions).
1094 */
1095
1096/* Get value of an element within a vector register */
1097static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1098                             int element, TCGMemOp memop)
1099{
1100    int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1101    switch (memop) {
1102    case MO_8:
1103        tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1104        break;
1105    case MO_16:
1106        tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1107        break;
1108    case MO_32:
1109        tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1110        break;
1111    case MO_8|MO_SIGN:
1112        tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1113        break;
1114    case MO_16|MO_SIGN:
1115        tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1116        break;
1117    case MO_32|MO_SIGN:
1118        tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1119        break;
1120    case MO_64:
1121    case MO_64|MO_SIGN:
1122        tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1123        break;
1124    default:
1125        g_assert_not_reached();
1126    }
1127}
1128
1129static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1130                                 int element, TCGMemOp memop)
1131{
1132    int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1133    switch (memop) {
1134    case MO_8:
1135        tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1136        break;
1137    case MO_16:
1138        tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1139        break;
1140    case MO_8|MO_SIGN:
1141        tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1142        break;
1143    case MO_16|MO_SIGN:
1144        tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1145        break;
1146    case MO_32:
1147    case MO_32|MO_SIGN:
1148        tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1149        break;
1150    default:
1151        g_assert_not_reached();
1152    }
1153}
1154
1155/* Set value of an element within a vector register */
1156static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1157                              int element, TCGMemOp memop)
1158{
1159    int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1160    switch (memop) {
1161    case MO_8:
1162        tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1163        break;
1164    case MO_16:
1165        tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1166        break;
1167    case MO_32:
1168        tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1169        break;
1170    case MO_64:
1171        tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1172        break;
1173    default:
1174        g_assert_not_reached();
1175    }
1176}
1177
1178static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1179                                  int destidx, int element, TCGMemOp memop)
1180{
1181    int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1182    switch (memop) {
1183    case MO_8:
1184        tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1185        break;
1186    case MO_16:
1187        tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1188        break;
1189    case MO_32:
1190        tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1191        break;
1192    default:
1193        g_assert_not_reached();
1194    }
1195}
1196
1197/* Store from vector register to memory */
1198static void do_vec_st(DisasContext *s, int srcidx, int element,
1199                      TCGv_i64 tcg_addr, int size)
1200{
1201    TCGMemOp memop = s->be_data + size;
1202    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1203
1204    read_vec_element(s, tcg_tmp, srcidx, element, size);
1205    tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1206
1207    tcg_temp_free_i64(tcg_tmp);
1208}
1209
1210/* Load from memory to vector register */
1211static void do_vec_ld(DisasContext *s, int destidx, int element,
1212                      TCGv_i64 tcg_addr, int size)
1213{
1214    TCGMemOp memop = s->be_data + size;
1215    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1216
1217    tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1218    write_vec_element(s, tcg_tmp, destidx, element, size);
1219
1220    tcg_temp_free_i64(tcg_tmp);
1221}
1222
1223/* Check that FP/Neon access is enabled. If it is, return
1224 * true. If not, emit code to generate an appropriate exception,
1225 * and return false; the caller should not emit any code for
1226 * the instruction. Note that this check must happen after all
1227 * unallocated-encoding checks (otherwise the syndrome information
1228 * for the resulting exception will be incorrect).
1229 */
1230static inline bool fp_access_check(DisasContext *s)
1231{
1232    assert(!s->fp_access_checked);
1233    s->fp_access_checked = true;
1234
1235    if (!s->fp_excp_el) {
1236        return true;
1237    }
1238
1239    gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1240                       s->fp_excp_el);
1241    return false;
1242}
1243
1244/* Check that SVE access is enabled.  If it is, return true.
1245 * If not, emit code to generate an appropriate exception and return false.
1246 */
1247bool sve_access_check(DisasContext *s)
1248{
1249    if (s->sve_excp_el) {
1250        gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
1251                           s->sve_excp_el);
1252        return false;
1253    }
1254    return fp_access_check(s);
1255}
1256
1257/*
1258 * This utility function is for doing register extension with an
1259 * optional shift. You will likely want to pass a temporary for the
1260 * destination register. See DecodeRegExtend() in the ARM ARM.
1261 */
1262static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1263                              int option, unsigned int shift)
1264{
1265    int extsize = extract32(option, 0, 2);
1266    bool is_signed = extract32(option, 2, 1);
1267
1268    if (is_signed) {
1269        switch (extsize) {
1270        case 0:
1271            tcg_gen_ext8s_i64(tcg_out, tcg_in);
1272            break;
1273        case 1:
1274            tcg_gen_ext16s_i64(tcg_out, tcg_in);
1275            break;
1276        case 2:
1277            tcg_gen_ext32s_i64(tcg_out, tcg_in);
1278            break;
1279        case 3:
1280            tcg_gen_mov_i64(tcg_out, tcg_in);
1281            break;
1282        }
1283    } else {
1284        switch (extsize) {
1285        case 0:
1286            tcg_gen_ext8u_i64(tcg_out, tcg_in);
1287            break;
1288        case 1:
1289            tcg_gen_ext16u_i64(tcg_out, tcg_in);
1290            break;
1291        case 2:
1292            tcg_gen_ext32u_i64(tcg_out, tcg_in);
1293            break;
1294        case 3:
1295            tcg_gen_mov_i64(tcg_out, tcg_in);
1296            break;
1297        }
1298    }
1299
1300    if (shift) {
1301        tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1302    }
1303}
1304
1305static inline void gen_check_sp_alignment(DisasContext *s)
1306{
1307    /* The AArch64 architecture mandates that (if enabled via PSTATE
1308     * or SCTLR bits) there is a check that SP is 16-aligned on every
1309     * SP-relative load or store (with an exception generated if it is not).
1310     * In line with general QEMU practice regarding misaligned accesses,
1311     * we omit these checks for the sake of guest program performance.
1312     * This function is provided as a hook so we can more easily add these
1313     * checks in future (possibly as a "favour catching guest program bugs
1314     * over speed" user selectable option).
1315     */
1316}
1317
1318/*
1319 * This provides a simple table based table lookup decoder. It is
1320 * intended to be used when the relevant bits for decode are too
1321 * awkwardly placed and switch/if based logic would be confusing and
1322 * deeply nested. Since it's a linear search through the table, tables
1323 * should be kept small.
1324 *
1325 * It returns the first handler where insn & mask == pattern, or
1326 * NULL if there is no match.
1327 * The table is terminated by an empty mask (i.e. 0)
1328 */
1329static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1330                                               uint32_t insn)
1331{
1332    const AArch64DecodeTable *tptr = table;
1333
1334    while (tptr->mask) {
1335        if ((insn & tptr->mask) == tptr->pattern) {
1336            return tptr->disas_fn;
1337        }
1338        tptr++;
1339    }
1340    return NULL;
1341}
1342
1343/*
1344 * The instruction disassembly implemented here matches
1345 * the instruction encoding classifications in chapter C4
1346 * of the ARM Architecture Reference Manual (DDI0487B_a);
1347 * classification names and decode diagrams here should generally
1348 * match up with those in the manual.
1349 */
1350
1351/* Unconditional branch (immediate)
1352 *   31  30       26 25                                  0
1353 * +----+-----------+-------------------------------------+
1354 * | op | 0 0 1 0 1 |                 imm26               |
1355 * +----+-----------+-------------------------------------+
1356 */
1357static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1358{
1359    uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1360
1361    if (insn & (1U << 31)) {
1362        /* BL Branch with link */
1363        tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1364    }
1365
1366    /* B Branch / BL Branch with link */
1367    gen_goto_tb(s, 0, addr);
1368}
1369
1370/* Compare and branch (immediate)
1371 *   31  30         25  24  23                  5 4      0
1372 * +----+-------------+----+---------------------+--------+
1373 * | sf | 0 1 1 0 1 0 | op |         imm19       |   Rt   |
1374 * +----+-------------+----+---------------------+--------+
1375 */
1376static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1377{
1378    unsigned int sf, op, rt;
1379    uint64_t addr;
1380    TCGLabel *label_match;
1381    TCGv_i64 tcg_cmp;
1382
1383    sf = extract32(insn, 31, 1);
1384    op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1385    rt = extract32(insn, 0, 5);
1386    addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1387
1388    tcg_cmp = read_cpu_reg(s, rt, sf);
1389    label_match = gen_new_label();
1390
1391    tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1392                        tcg_cmp, 0, label_match);
1393
1394    gen_goto_tb(s, 0, s->pc);
1395    gen_set_label(label_match);
1396    gen_goto_tb(s, 1, addr);
1397}
1398
1399/* Test and branch (immediate)
1400 *   31  30         25  24  23   19 18          5 4    0
1401 * +----+-------------+----+-------+-------------+------+
1402 * | b5 | 0 1 1 0 1 1 | op |  b40  |    imm14    |  Rt  |
1403 * +----+-------------+----+-------+-------------+------+
1404 */
1405static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1406{
1407    unsigned int bit_pos, op, rt;
1408    uint64_t addr;
1409    TCGLabel *label_match;
1410    TCGv_i64 tcg_cmp;
1411
1412    bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1413    op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1414    addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1415    rt = extract32(insn, 0, 5);
1416
1417    tcg_cmp = tcg_temp_new_i64();
1418    tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1419    label_match = gen_new_label();
1420    tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1421                        tcg_cmp, 0, label_match);
1422    tcg_temp_free_i64(tcg_cmp);
1423    gen_goto_tb(s, 0, s->pc);
1424    gen_set_label(label_match);
1425    gen_goto_tb(s, 1, addr);
1426}
1427
1428/* Conditional branch (immediate)
1429 *  31           25  24  23                  5   4  3    0
1430 * +---------------+----+---------------------+----+------+
1431 * | 0 1 0 1 0 1 0 | o1 |         imm19       | o0 | cond |
1432 * +---------------+----+---------------------+----+------+
1433 */
1434static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1435{
1436    unsigned int cond;
1437    uint64_t addr;
1438
1439    if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1440        unallocated_encoding(s);
1441        return;
1442    }
1443    addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1444    cond = extract32(insn, 0, 4);
1445
1446    if (cond < 0x0e) {
1447        /* genuinely conditional branches */
1448        TCGLabel *label_match = gen_new_label();
1449        arm_gen_test_cc(cond, label_match);
1450        gen_goto_tb(s, 0, s->pc);
1451        gen_set_label(label_match);
1452        gen_goto_tb(s, 1, addr);
1453    } else {
1454        /* 0xe and 0xf are both "always" conditions */
1455        gen_goto_tb(s, 0, addr);
1456    }
1457}
1458
1459/* HINT instruction group, including various allocated HINTs */
1460static void handle_hint(DisasContext *s, uint32_t insn,
1461                        unsigned int op1, unsigned int op2, unsigned int crm)
1462{
1463    unsigned int selector = crm << 3 | op2;
1464
1465    if (op1 != 3) {
1466        unallocated_encoding(s);
1467        return;
1468    }
1469
1470    switch (selector) {
1471    case 0: /* NOP */
1472        return;
1473    case 3: /* WFI */
1474        s->base.is_jmp = DISAS_WFI;
1475        return;
1476        /* When running in MTTCG we don't generate jumps to the yield and
1477         * WFE helpers as it won't affect the scheduling of other vCPUs.
1478         * If we wanted to more completely model WFE/SEV so we don't busy
1479         * spin unnecessarily we would need to do something more involved.
1480         */
1481    case 1: /* YIELD */
1482        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1483            s->base.is_jmp = DISAS_YIELD;
1484        }
1485        return;
1486    case 2: /* WFE */
1487        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1488            s->base.is_jmp = DISAS_WFE;
1489        }
1490        return;
1491    case 4: /* SEV */
1492    case 5: /* SEVL */
1493        /* we treat all as NOP at least for now */
1494        return;
1495    default:
1496        /* default specified as NOP equivalent */
1497        return;
1498    }
1499}
1500
1501static void gen_clrex(DisasContext *s, uint32_t insn)
1502{
1503    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1504}
1505
1506/* CLREX, DSB, DMB, ISB */
1507static void handle_sync(DisasContext *s, uint32_t insn,
1508                        unsigned int op1, unsigned int op2, unsigned int crm)
1509{
1510    TCGBar bar;
1511
1512    if (op1 != 3) {
1513        unallocated_encoding(s);
1514        return;
1515    }
1516
1517    switch (op2) {
1518    case 2: /* CLREX */
1519        gen_clrex(s, insn);
1520        return;
1521    case 4: /* DSB */
1522    case 5: /* DMB */
1523        switch (crm & 3) {
1524        case 1: /* MBReqTypes_Reads */
1525            bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1526            break;
1527        case 2: /* MBReqTypes_Writes */
1528            bar = TCG_BAR_SC | TCG_MO_ST_ST;
1529            break;
1530        default: /* MBReqTypes_All */
1531            bar = TCG_BAR_SC | TCG_MO_ALL;
1532            break;
1533        }
1534        tcg_gen_mb(bar);
1535        return;
1536    case 6: /* ISB */
1537        /* We need to break the TB after this insn to execute
1538         * a self-modified code correctly and also to take
1539         * any pending interrupts immediately.
1540         */
1541        gen_goto_tb(s, 0, s->pc);
1542        return;
1543    default:
1544        unallocated_encoding(s);
1545        return;
1546    }
1547}
1548
1549/* MSR (immediate) - move immediate to processor state field */
1550static void handle_msr_i(DisasContext *s, uint32_t insn,
1551                         unsigned int op1, unsigned int op2, unsigned int crm)
1552{
1553    int op = op1 << 3 | op2;
1554    switch (op) {
1555    case 0x05: /* SPSel */
1556        if (s->current_el == 0) {
1557            unallocated_encoding(s);
1558            return;
1559        }
1560        /* fall through */
1561    case 0x1e: /* DAIFSet */
1562    case 0x1f: /* DAIFClear */
1563    {
1564        TCGv_i32 tcg_imm = tcg_const_i32(crm);
1565        TCGv_i32 tcg_op = tcg_const_i32(op);
1566        gen_a64_set_pc_im(s->pc - 4);
1567        gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1568        tcg_temp_free_i32(tcg_imm);
1569        tcg_temp_free_i32(tcg_op);
1570        /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs.  */
1571        gen_a64_set_pc_im(s->pc);
1572        s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
1573        break;
1574    }
1575    default:
1576        unallocated_encoding(s);
1577        return;
1578    }
1579}
1580
1581static void gen_get_nzcv(TCGv_i64 tcg_rt)
1582{
1583    TCGv_i32 tmp = tcg_temp_new_i32();
1584    TCGv_i32 nzcv = tcg_temp_new_i32();
1585
1586    /* build bit 31, N */
1587    tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1588    /* build bit 30, Z */
1589    tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1590    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1591    /* build bit 29, C */
1592    tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1593    /* build bit 28, V */
1594    tcg_gen_shri_i32(tmp, cpu_VF, 31);
1595    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1596    /* generate result */
1597    tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1598
1599    tcg_temp_free_i32(nzcv);
1600    tcg_temp_free_i32(tmp);
1601}
1602
1603static void gen_set_nzcv(TCGv_i64 tcg_rt)
1604
1605{
1606    TCGv_i32 nzcv = tcg_temp_new_i32();
1607
1608    /* take NZCV from R[t] */
1609    tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1610
1611    /* bit 31, N */
1612    tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1613    /* bit 30, Z */
1614    tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1615    tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1616    /* bit 29, C */
1617    tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1618    tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1619    /* bit 28, V */
1620    tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1621    tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1622    tcg_temp_free_i32(nzcv);
1623}
1624
1625/* MRS - move from system register
1626 * MSR (register) - move to system register
1627 * SYS
1628 * SYSL
1629 * These are all essentially the same insn in 'read' and 'write'
1630 * versions, with varying op0 fields.
1631 */
1632static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1633                       unsigned int op0, unsigned int op1, unsigned int op2,
1634                       unsigned int crn, unsigned int crm, unsigned int rt)
1635{
1636    const ARMCPRegInfo *ri;
1637    TCGv_i64 tcg_rt;
1638
1639    ri = get_arm_cp_reginfo(s->cp_regs,
1640                            ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1641                                               crn, crm, op0, op1, op2));
1642
1643    if (!ri) {
1644        /* Unknown register; this might be a guest error or a QEMU
1645         * unimplemented feature.
1646         */
1647        qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1648                      "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1649                      isread ? "read" : "write", op0, op1, crn, crm, op2);
1650        unallocated_encoding(s);
1651        return;
1652    }
1653
1654    /* Check access permissions */
1655    if (!cp_access_ok(s->current_el, ri, isread)) {
1656        unallocated_encoding(s);
1657        return;
1658    }
1659
1660    if (ri->accessfn) {
1661        /* Emit code to perform further access permissions checks at
1662         * runtime; this may result in an exception.
1663         */
1664        TCGv_ptr tmpptr;
1665        TCGv_i32 tcg_syn, tcg_isread;
1666        uint32_t syndrome;
1667
1668        gen_a64_set_pc_im(s->pc - 4);
1669        tmpptr = tcg_const_ptr(ri);
1670        syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1671        tcg_syn = tcg_const_i32(syndrome);
1672        tcg_isread = tcg_const_i32(isread);
1673        gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1674        tcg_temp_free_ptr(tmpptr);
1675        tcg_temp_free_i32(tcg_syn);
1676        tcg_temp_free_i32(tcg_isread);
1677    }
1678
1679    /* Handle special cases first */
1680    switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1681    case ARM_CP_NOP:
1682        return;
1683    case ARM_CP_NZCV:
1684        tcg_rt = cpu_reg(s, rt);
1685        if (isread) {
1686            gen_get_nzcv(tcg_rt);
1687        } else {
1688            gen_set_nzcv(tcg_rt);
1689        }
1690        return;
1691    case ARM_CP_CURRENTEL:
1692        /* Reads as current EL value from pstate, which is
1693         * guaranteed to be constant by the tb flags.
1694         */
1695        tcg_rt = cpu_reg(s, rt);
1696        tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1697        return;
1698    case ARM_CP_DC_ZVA:
1699        /* Writes clear the aligned block of memory which rt points into. */
1700        tcg_rt = cpu_reg(s, rt);
1701        gen_helper_dc_zva(cpu_env, tcg_rt);
1702        return;
1703    default:
1704        break;
1705    }
1706    if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1707        return;
1708    } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1709        return;
1710    }
1711
1712    if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1713        gen_io_start();
1714    }
1715
1716    tcg_rt = cpu_reg(s, rt);
1717
1718    if (isread) {
1719        if (ri->type & ARM_CP_CONST) {
1720            tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1721        } else if (ri->readfn) {
1722            TCGv_ptr tmpptr;
1723            tmpptr = tcg_const_ptr(ri);
1724            gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1725            tcg_temp_free_ptr(tmpptr);
1726        } else {
1727            tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1728        }
1729    } else {
1730        if (ri->type & ARM_CP_CONST) {
1731            /* If not forbidden by access permissions, treat as WI */
1732            return;
1733        } else if (ri->writefn) {
1734            TCGv_ptr tmpptr;
1735            tmpptr = tcg_const_ptr(ri);
1736            gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1737            tcg_temp_free_ptr(tmpptr);
1738        } else {
1739            tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1740        }
1741    }
1742
1743    if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1744        /* I/O operations must end the TB here (whether read or write) */
1745        gen_io_end();
1746        s->base.is_jmp = DISAS_UPDATE;
1747    } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1748        /* We default to ending the TB on a coprocessor register write,
1749         * but allow this to be suppressed by the register definition
1750         * (usually only necessary to work around guest bugs).
1751         */
1752        s->base.is_jmp = DISAS_UPDATE;
1753    }
1754}
1755
1756/* System
1757 *  31                 22 21  20 19 18 16 15   12 11    8 7   5 4    0
1758 * +---------------------+---+-----+-----+-------+-------+-----+------+
1759 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 |  CRn  |  CRm  | op2 |  Rt  |
1760 * +---------------------+---+-----+-----+-------+-------+-----+------+
1761 */
1762static void disas_system(DisasContext *s, uint32_t insn)
1763{
1764    unsigned int l, op0, op1, crn, crm, op2, rt;
1765    l = extract32(insn, 21, 1);
1766    op0 = extract32(insn, 19, 2);
1767    op1 = extract32(insn, 16, 3);
1768    crn = extract32(insn, 12, 4);
1769    crm = extract32(insn, 8, 4);
1770    op2 = extract32(insn, 5, 3);
1771    rt = extract32(insn, 0, 5);
1772
1773    if (op0 == 0) {
1774        if (l || rt != 31) {
1775            unallocated_encoding(s);
1776            return;
1777        }
1778        switch (crn) {
1779        case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1780            handle_hint(s, insn, op1, op2, crm);
1781            break;
1782        case 3: /* CLREX, DSB, DMB, ISB */
1783            handle_sync(s, insn, op1, op2, crm);
1784            break;
1785        case 4: /* MSR (immediate) */
1786            handle_msr_i(s, insn, op1, op2, crm);
1787            break;
1788        default:
1789            unallocated_encoding(s);
1790            break;
1791        }
1792        return;
1793    }
1794    handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1795}
1796
1797/* Exception generation
1798 *
1799 *  31             24 23 21 20                     5 4   2 1  0
1800 * +-----------------+-----+------------------------+-----+----+
1801 * | 1 1 0 1 0 1 0 0 | opc |          imm16         | op2 | LL |
1802 * +-----------------------+------------------------+----------+
1803 */
1804static void disas_exc(DisasContext *s, uint32_t insn)
1805{
1806    int opc = extract32(insn, 21, 3);
1807    int op2_ll = extract32(insn, 0, 5);
1808    int imm16 = extract32(insn, 5, 16);
1809    TCGv_i32 tmp;
1810
1811    switch (opc) {
1812    case 0:
1813        /* For SVC, HVC and SMC we advance the single-step state
1814         * machine before taking the exception. This is architecturally
1815         * mandated, to ensure that single-stepping a system call
1816         * instruction works properly.
1817         */
1818        switch (op2_ll) {
1819        case 1:                                                     /* SVC */
1820            gen_ss_advance(s);
1821            gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1822                               default_exception_el(s));
1823            break;
1824        case 2:                                                     /* HVC */
1825            if (s->current_el == 0) {
1826                unallocated_encoding(s);
1827                break;
1828            }
1829            /* The pre HVC helper handles cases when HVC gets trapped
1830             * as an undefined insn by runtime configuration.
1831             */
1832            gen_a64_set_pc_im(s->pc - 4);
1833            gen_helper_pre_hvc(cpu_env);
1834            gen_ss_advance(s);
1835            gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1836            break;
1837        case 3:                                                     /* SMC */
1838            if (s->current_el == 0) {
1839                unallocated_encoding(s);
1840                break;
1841            }
1842            gen_a64_set_pc_im(s->pc - 4);
1843            tmp = tcg_const_i32(syn_aa64_smc(imm16));
1844            gen_helper_pre_smc(cpu_env, tmp);
1845            tcg_temp_free_i32(tmp);
1846            gen_ss_advance(s);
1847            gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1848            break;
1849        default:
1850            unallocated_encoding(s);
1851            break;
1852        }
1853        break;
1854    case 1:
1855        if (op2_ll != 0) {
1856            unallocated_encoding(s);
1857            break;
1858        }
1859        /* BRK */
1860        gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
1861        break;
1862    case 2:
1863        if (op2_ll != 0) {
1864            unallocated_encoding(s);
1865            break;
1866        }
1867        /* HLT. This has two purposes.
1868         * Architecturally, it is an external halting debug instruction.
1869         * Since QEMU doesn't implement external debug, we treat this as
1870         * it is required for halting debug disabled: it will UNDEF.
1871         * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1872         */
1873        if (semihosting_enabled() && imm16 == 0xf000) {
1874#ifndef CONFIG_USER_ONLY
1875            /* In system mode, don't allow userspace access to semihosting,
1876             * to provide some semblance of security (and for consistency
1877             * with our 32-bit semihosting).
1878             */
1879            if (s->current_el == 0) {
1880                unsupported_encoding(s, insn);
1881                break;
1882            }
1883#endif
1884            gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1885        } else {
1886            unsupported_encoding(s, insn);
1887        }
1888        break;
1889    case 5:
1890        if (op2_ll < 1 || op2_ll > 3) {
1891            unallocated_encoding(s);
1892            break;
1893        }
1894        /* DCPS1, DCPS2, DCPS3 */
1895        unsupported_encoding(s, insn);
1896        break;
1897    default:
1898        unallocated_encoding(s);
1899        break;
1900    }
1901}
1902
1903/* Unconditional branch (register)
1904 *  31           25 24   21 20   16 15   10 9    5 4     0
1905 * +---------------+-------+-------+-------+------+-------+
1906 * | 1 1 0 1 0 1 1 |  opc  |  op2  |  op3  |  Rn  |  op4  |
1907 * +---------------+-------+-------+-------+------+-------+
1908 */
1909static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1910{
1911    unsigned int opc, op2, op3, rn, op4;
1912
1913    opc = extract32(insn, 21, 4);
1914    op2 = extract32(insn, 16, 5);
1915    op3 = extract32(insn, 10, 6);
1916    rn = extract32(insn, 5, 5);
1917    op4 = extract32(insn, 0, 5);
1918
1919    if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1920        unallocated_encoding(s);
1921        return;
1922    }
1923
1924    switch (opc) {
1925    case 0: /* BR */
1926    case 1: /* BLR */
1927    case 2: /* RET */
1928        gen_a64_set_pc(s, cpu_reg(s, rn));
1929        /* BLR also needs to load return address */
1930        if (opc == 1) {
1931            tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1932        }
1933        break;
1934    case 4: /* ERET */
1935        if (s->current_el == 0) {
1936            unallocated_encoding(s);
1937            return;
1938        }
1939        if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1940            gen_io_start();
1941        }
1942        gen_helper_exception_return(cpu_env);
1943        if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1944            gen_io_end();
1945        }
1946        /* Must exit loop to check un-masked IRQs */
1947        s->base.is_jmp = DISAS_EXIT;
1948        return;
1949    case 5: /* DRPS */
1950        if (rn != 0x1f) {
1951            unallocated_encoding(s);
1952        } else {
1953            unsupported_encoding(s, insn);
1954        }
1955        return;
1956    default:
1957        unallocated_encoding(s);
1958        return;
1959    }
1960
1961    s->base.is_jmp = DISAS_JUMP;
1962}
1963
1964/* Branches, exception generating and system instructions */
1965static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1966{
1967    switch (extract32(insn, 25, 7)) {
1968    case 0x0a: case 0x0b:
1969    case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1970        disas_uncond_b_imm(s, insn);
1971        break;
1972    case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1973        disas_comp_b_imm(s, insn);
1974        break;
1975    case 0x1b: case 0x5b: /* Test & branch (immediate) */
1976        disas_test_b_imm(s, insn);
1977        break;
1978    case 0x2a: /* Conditional branch (immediate) */
1979        disas_cond_b_imm(s, insn);
1980        break;
1981    case 0x6a: /* Exception generation / System */
1982        if (insn & (1 << 24)) {
1983            disas_system(s, insn);
1984        } else {
1985            disas_exc(s, insn);
1986        }
1987        break;
1988    case 0x6b: /* Unconditional branch (register) */
1989        disas_uncond_b_reg(s, insn);
1990        break;
1991    default:
1992        unallocated_encoding(s);
1993        break;
1994    }
1995}
1996
1997/*
1998 * Load/Store exclusive instructions are implemented by remembering
1999 * the value/address loaded, and seeing if these are the same
2000 * when the store is performed. This is not actually the architecturally
2001 * mandated semantics, but it works for typical guest code sequences
2002 * and avoids having to monitor regular stores.
2003 *
2004 * The store exclusive uses the atomic cmpxchg primitives to avoid
2005 * races in multi-threaded linux-user and when MTTCG softmmu is
2006 * enabled.
2007 */
2008static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2009                               TCGv_i64 addr, int size, bool is_pair)
2010{
2011    int idx = get_mem_index(s);
2012    TCGMemOp memop = s->be_data;
2013
2014    g_assert(size <= 3);
2015    if (is_pair) {
2016        g_assert(size >= 2);
2017        if (size == 2) {
2018            /* The pair must be single-copy atomic for the doubleword.  */
2019            memop |= MO_64 | MO_ALIGN;
2020            tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2021            if (s->be_data == MO_LE) {
2022                tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2023                tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2024            } else {
2025                tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2026                tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2027            }
2028        } else {
2029            /* The pair must be single-copy atomic for *each* doubleword, not
2030               the entire quadword, however it must be quadword aligned.  */
2031            memop |= MO_64;
2032            tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2033                                memop | MO_ALIGN_16);
2034
2035            TCGv_i64 addr2 = tcg_temp_new_i64();
2036            tcg_gen_addi_i64(addr2, addr, 8);
2037            tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2038            tcg_temp_free_i64(addr2);
2039
2040            tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2041            tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2042        }
2043    } else {
2044        memop |= size | MO_ALIGN;
2045        tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2046        tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2047    }
2048    tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2049}
2050
2051static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2052                                TCGv_i64 addr, int size, int is_pair)
2053{
2054    /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2055     *     && (!is_pair || env->exclusive_high == [addr + datasize])) {
2056     *     [addr] = {Rt};
2057     *     if (is_pair) {
2058     *         [addr + datasize] = {Rt2};
2059     *     }
2060     *     {Rd} = 0;
2061     * } else {
2062     *     {Rd} = 1;
2063     * }
2064     * env->exclusive_addr = -1;
2065     */
2066    TCGLabel *fail_label = gen_new_label();
2067    TCGLabel *done_label = gen_new_label();
2068    TCGv_i64 tmp;
2069
2070    tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2071
2072    tmp = tcg_temp_new_i64();
2073    if (is_pair) {
2074        if (size == 2) {
2075            if (s->be_data == MO_LE) {
2076                tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2077            } else {
2078                tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2079            }
2080            tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2081                                       cpu_exclusive_val, tmp,
2082                                       get_mem_index(s),
2083                                       MO_64 | MO_ALIGN | s->be_data);
2084            tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2085        } else if (s->be_data == MO_LE) {
2086            if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2087                gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2088                                                        cpu_exclusive_addr,
2089                                                        cpu_reg(s, rt),
2090                                                        cpu_reg(s, rt2));
2091            } else {
2092                gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2093                                               cpu_reg(s, rt), cpu_reg(s, rt2));
2094            }
2095        } else {
2096            if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2097                gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2098                                                        cpu_exclusive_addr,
2099                                                        cpu_reg(s, rt),
2100                                                        cpu_reg(s, rt2));
2101            } else {
2102                gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2103                                               cpu_reg(s, rt), cpu_reg(s, rt2));
2104            }
2105        }
2106    } else {
2107        tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2108                                   cpu_reg(s, rt), get_mem_index(s),
2109                                   size | MO_ALIGN | s->be_data);
2110        tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2111    }
2112    tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2113    tcg_temp_free_i64(tmp);
2114    tcg_gen_br(done_label);
2115
2116    gen_set_label(fail_label);
2117    tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2118    gen_set_label(done_label);
2119    tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2120}
2121
2122static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2123                                 int rn, int size)
2124{
2125    TCGv_i64 tcg_rs = cpu_reg(s, rs);
2126    TCGv_i64 tcg_rt = cpu_reg(s, rt);
2127    int memidx = get_mem_index(s);
2128    TCGv_i64 addr = cpu_reg_sp(s, rn);
2129
2130    if (rn == 31) {
2131        gen_check_sp_alignment(s);
2132    }
2133    tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
2134                               size | MO_ALIGN | s->be_data);
2135}
2136
2137static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2138                                      int rn, int size)
2139{
2140    TCGv_i64 s1 = cpu_reg(s, rs);
2141    TCGv_i64 s2 = cpu_reg(s, rs + 1);
2142    TCGv_i64 t1 = cpu_reg(s, rt);
2143    TCGv_i64 t2 = cpu_reg(s, rt + 1);
2144    TCGv_i64 addr = cpu_reg_sp(s, rn);
2145    int memidx = get_mem_index(s);
2146
2147    if (rn == 31) {
2148        gen_check_sp_alignment(s);
2149    }
2150
2151    if (size == 2) {
2152        TCGv_i64 cmp = tcg_temp_new_i64();
2153        TCGv_i64 val = tcg_temp_new_i64();
2154
2155        if (s->be_data == MO_LE) {
2156            tcg_gen_concat32_i64(val, t1, t2);
2157            tcg_gen_concat32_i64(cmp, s1, s2);
2158        } else {
2159            tcg_gen_concat32_i64(val, t2, t1);
2160            tcg_gen_concat32_i64(cmp, s2, s1);
2161        }
2162
2163        tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
2164                                   MO_64 | MO_ALIGN | s->be_data);
2165        tcg_temp_free_i64(val);
2166
2167        if (s->be_data == MO_LE) {
2168            tcg_gen_extr32_i64(s1, s2, cmp);
2169        } else {
2170            tcg_gen_extr32_i64(s2, s1, cmp);
2171        }
2172        tcg_temp_free_i64(cmp);
2173    } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2174        TCGv_i32 tcg_rs = tcg_const_i32(rs);
2175
2176        if (s->be_data == MO_LE) {
2177            gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
2178        } else {
2179            gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
2180        }
2181        tcg_temp_free_i32(tcg_rs);
2182    } else {
2183        TCGv_i64 d1 = tcg_temp_new_i64();
2184        TCGv_i64 d2 = tcg_temp_new_i64();
2185        TCGv_i64 a2 = tcg_temp_new_i64();
2186        TCGv_i64 c1 = tcg_temp_new_i64();
2187        TCGv_i64 c2 = tcg_temp_new_i64();
2188        TCGv_i64 zero = tcg_const_i64(0);
2189
2190        /* Load the two words, in memory order.  */
2191        tcg_gen_qemu_ld_i64(d1, addr, memidx,
2192                            MO_64 | MO_ALIGN_16 | s->be_data);
2193        tcg_gen_addi_i64(a2, addr, 8);
2194        tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
2195
2196        /* Compare the two words, also in memory order.  */
2197        tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2198        tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2199        tcg_gen_and_i64(c2, c2, c1);
2200
2201        /* If compare equal, write back new data, else write back old data.  */
2202        tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2203        tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2204        tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
2205        tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2206        tcg_temp_free_i64(a2);
2207        tcg_temp_free_i64(c1);
2208        tcg_temp_free_i64(c2);
2209        tcg_temp_free_i64(zero);
2210
2211        /* Write back the data from memory to Rs.  */
2212        tcg_gen_mov_i64(s1, d1);
2213        tcg_gen_mov_i64(s2, d2);
2214        tcg_temp_free_i64(d1);
2215        tcg_temp_free_i64(d2);
2216    }
2217}
2218
2219/* Update the Sixty-Four bit (SF) registersize. This logic is derived
2220 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2221 */
2222static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2223{
2224    int opc0 = extract32(opc, 0, 1);
2225    int regsize;
2226
2227    if (is_signed) {
2228        regsize = opc0 ? 32 : 64;
2229    } else {
2230        regsize = size == 3 ? 64 : 32;
2231    }
2232    return regsize == 64;
2233}
2234
2235/* Load/store exclusive
2236 *
2237 *  31 30 29         24  23  22   21  20  16  15  14   10 9    5 4    0
2238 * +-----+-------------+----+---+----+------+----+-------+------+------+
2239 * | sz  | 0 0 1 0 0 0 | o2 | L | o1 |  Rs  | o0 |  Rt2  |  Rn  | Rt   |
2240 * +-----+-------------+----+---+----+------+----+-------+------+------+
2241 *
2242 *  sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2243 *   L: 0 -> store, 1 -> load
2244 *  o2: 0 -> exclusive, 1 -> not
2245 *  o1: 0 -> single register, 1 -> register pair
2246 *  o0: 1 -> load-acquire/store-release, 0 -> not
2247 */
2248static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2249{
2250    int rt = extract32(insn, 0, 5);
2251    int rn = extract32(insn, 5, 5);
2252    int rt2 = extract32(insn, 10, 5);
2253    int rs = extract32(insn, 16, 5);
2254    int is_lasr = extract32(insn, 15, 1);
2255    int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2256    int size = extract32(insn, 30, 2);
2257    TCGv_i64 tcg_addr;
2258
2259    switch (o2_L_o1_o0) {
2260    case 0x0: /* STXR */
2261    case 0x1: /* STLXR */
2262        if (rn == 31) {
2263            gen_check_sp_alignment(s);
2264        }
2265        if (is_lasr) {
2266            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2267        }
2268        tcg_addr = read_cpu_reg_sp(s, rn, 1);
2269        gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
2270        return;
2271
2272    case 0x4: /* LDXR */
2273    case 0x5: /* LDAXR */
2274        if (rn == 31) {
2275            gen_check_sp_alignment(s);
2276        }
2277        tcg_addr = read_cpu_reg_sp(s, rn, 1);
2278        s->is_ldex = true;
2279        gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
2280        if (is_lasr) {
2281            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2282        }
2283        return;
2284
2285    case 0x9: /* STLR */
2286        /* Generate ISS for non-exclusive accesses including LASR.  */
2287        if (rn == 31) {
2288            gen_check_sp_alignment(s);
2289        }
2290        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2291        tcg_addr = read_cpu_reg_sp(s, rn, 1);
2292        do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
2293                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2294        return;
2295
2296    case 0xd: /* LDAR */
2297        /* Generate ISS for non-exclusive accesses including LASR.  */
2298        if (rn == 31) {
2299            gen_check_sp_alignment(s);
2300        }
2301        tcg_addr = read_cpu_reg_sp(s, rn, 1);
2302        do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
2303                  disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2304        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2305        return;
2306
2307    case 0x2: case 0x3: /* CASP / STXP */
2308        if (size & 2) { /* STXP / STLXP */
2309            if (rn == 31) {
2310                gen_check_sp_alignment(s);
2311            }
2312            if (is_lasr) {
2313                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2314            }
2315            tcg_addr = read_cpu_reg_sp(s, rn, 1);
2316            gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
2317            return;
2318        }
2319        if (rt2 == 31
2320            && ((rt | rs) & 1) == 0
2321            && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2322            /* CASP / CASPL */
2323            gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2324            return;
2325        }
2326        break;
2327
2328    case 0x6: case 0x7: /* CASPA / LDXP */
2329        if (size & 2) { /* LDXP / LDAXP */
2330            if (rn == 31) {
2331                gen_check_sp_alignment(s);
2332            }
2333            tcg_addr = read_cpu_reg_sp(s, rn, 1);
2334            s->is_ldex = true;
2335            gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
2336            if (is_lasr) {
2337                tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2338            }
2339            return;
2340        }
2341        if (rt2 == 31
2342            && ((rt | rs) & 1) == 0
2343            && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2344            /* CASPA / CASPAL */
2345            gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2346            return;
2347        }
2348        break;
2349
2350    case 0xa: /* CAS */
2351    case 0xb: /* CASL */
2352    case 0xe: /* CASA */
2353    case 0xf: /* CASAL */
2354        if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2355            gen_compare_and_swap(s, rs, rt, rn, size);
2356            return;
2357        }
2358        break;
2359    }
2360    unallocated_encoding(s);
2361}
2362
2363/*
2364 * Load register (literal)
2365 *
2366 *  31 30 29   27  26 25 24 23                5 4     0
2367 * +-----+-------+---+-----+-------------------+-------+
2368 * | opc | 0 1 1 | V | 0 0 |     imm19         |  Rt   |
2369 * +-----+-------+---+-----+-------------------+-------+
2370 *
2371 * V: 1 -> vector (simd/fp)
2372 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2373 *                   10-> 32 bit signed, 11 -> prefetch
2374 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2375 */
2376static void disas_ld_lit(DisasContext *s, uint32_t insn)
2377{
2378    int rt = extract32(insn, 0, 5);
2379    int64_t imm = sextract32(insn, 5, 19) << 2;
2380    bool is_vector = extract32(insn, 26, 1);
2381    int opc = extract32(insn, 30, 2);
2382    bool is_signed = false;
2383    int size = 2;
2384    TCGv_i64 tcg_rt, tcg_addr;
2385
2386    if (is_vector) {
2387        if (opc == 3) {
2388            unallocated_encoding(s);
2389            return;
2390        }
2391        size = 2 + opc;
2392        if (!fp_access_check(s)) {
2393            return;
2394        }
2395    } else {
2396        if (opc == 3) {
2397            /* PRFM (literal) : prefetch */
2398            return;
2399        }
2400        size = 2 + extract32(opc, 0, 1);
2401        is_signed = extract32(opc, 1, 1);
2402    }
2403
2404    tcg_rt = cpu_reg(s, rt);
2405
2406    tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2407    if (is_vector) {
2408        do_fp_ld(s, rt, tcg_addr, size);
2409    } else {
2410        /* Only unsigned 32bit loads target 32bit registers.  */
2411        bool iss_sf = opc != 0;
2412
2413        do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2414                  true, rt, iss_sf, false);
2415    }
2416    tcg_temp_free_i64(tcg_addr);
2417}
2418
2419/*
2420 * LDNP (Load Pair - non-temporal hint)
2421 * LDP (Load Pair - non vector)
2422 * LDPSW (Load Pair Signed Word - non vector)
2423 * STNP (Store Pair - non-temporal hint)
2424 * STP (Store Pair - non vector)
2425 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2426 * LDP (Load Pair of SIMD&FP)
2427 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2428 * STP (Store Pair of SIMD&FP)
2429 *
2430 *  31 30 29   27  26  25 24   23  22 21   15 14   10 9    5 4    0
2431 * +-----+-------+---+---+-------+---+-----------------------------+
2432 * | opc | 1 0 1 | V | 0 | index | L |  imm7 |  Rt2  |  Rn  | Rt   |
2433 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2434 *
2435 * opc: LDP/STP/LDNP/STNP        00 -> 32 bit, 10 -> 64 bit
2436 *      LDPSW                    01
2437 *      LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2438 *   V: 0 -> GPR, 1 -> Vector
2439 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2440 *      10 -> signed offset, 11 -> pre-index
2441 *   L: 0 -> Store 1 -> Load
2442 *
2443 * Rt, Rt2 = GPR or SIMD registers to be stored
2444 * Rn = general purpose register containing address
2445 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2446 */
2447static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2448{
2449    int rt = extract32(insn, 0, 5);
2450    int rn = extract32(insn, 5, 5);
2451    int rt2 = extract32(insn, 10, 5);
2452    uint64_t offset = sextract64(insn, 15, 7);
2453    int index = extract32(insn, 23, 2);
2454    bool is_vector = extract32(insn, 26, 1);
2455    bool is_load = extract32(insn, 22, 1);
2456    int opc = extract32(insn, 30, 2);
2457
2458    bool is_signed = false;
2459    bool postindex = false;
2460    bool wback = false;
2461
2462    TCGv_i64 tcg_addr; /* calculated address */
2463    int size;
2464
2465    if (opc == 3) {
2466        unallocated_encoding(s);
2467        return;
2468    }
2469
2470    if (is_vector) {
2471        size = 2 + opc;
2472    } else {
2473        size = 2 + extract32(opc, 1, 1);
2474        is_signed = extract32(opc, 0, 1);
2475        if (!is_load && is_signed) {
2476            unallocated_encoding(s);
2477            return;
2478        }
2479    }
2480
2481    switch (index) {
2482    case 1: /* post-index */
2483        postindex = true;
2484        wback = true;
2485        break;
2486    case 0:
2487        /* signed offset with "non-temporal" hint. Since we don't emulate
2488         * caches we don't care about hints to the cache system about
2489         * data access patterns, and handle this identically to plain
2490         * signed offset.
2491         */
2492        if (is_signed) {
2493            /* There is no non-temporal-hint version of LDPSW */
2494            unallocated_encoding(s);
2495            return;
2496        }
2497        postindex = false;
2498        break;
2499    case 2: /* signed offset, rn not updated */
2500        postindex = false;
2501        break;
2502    case 3: /* pre-index */
2503        postindex = false;
2504        wback = true;
2505        break;
2506    }
2507
2508    if (is_vector && !fp_access_check(s)) {
2509        return;
2510    }
2511
2512    offset <<= size;
2513
2514    if (rn == 31) {
2515        gen_check_sp_alignment(s);
2516    }
2517
2518    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2519
2520    if (!postindex) {
2521        tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2522    }
2523
2524    if (is_vector) {
2525        if (is_load) {
2526            do_fp_ld(s, rt, tcg_addr, size);
2527        } else {
2528            do_fp_st(s, rt, tcg_addr, size);
2529        }
2530        tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2531        if (is_load) {
2532            do_fp_ld(s, rt2, tcg_addr, size);
2533        } else {
2534            do_fp_st(s, rt2, tcg_addr, size);
2535        }
2536    } else {
2537        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2538        TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2539
2540        if (is_load) {
2541            TCGv_i64 tmp = tcg_temp_new_i64();
2542
2543            /* Do not modify tcg_rt before recognizing any exception
2544             * from the second load.
2545             */
2546            do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
2547                      false, 0, false, false);
2548            tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2549            do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2550                      false, 0, false, false);
2551
2552            tcg_gen_mov_i64(tcg_rt, tmp);
2553            tcg_temp_free_i64(tmp);
2554        } else {
2555            do_gpr_st(s, tcg_rt, tcg_addr, size,
2556                      false, 0, false, false);
2557            tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2558            do_gpr_st(s, tcg_rt2, tcg_addr, size,
2559                      false, 0, false, false);
2560        }
2561    }
2562
2563    if (wback) {
2564        if (postindex) {
2565            tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2566        } else {
2567            tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2568        }
2569        tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2570    }
2571}
2572
2573/*
2574 * Load/store (immediate post-indexed)
2575 * Load/store (immediate pre-indexed)
2576 * Load/store (unscaled immediate)
2577 *
2578 * 31 30 29   27  26 25 24 23 22 21  20    12 11 10 9    5 4    0
2579 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2580 * |size| 1 1 1 | V | 0 0 | opc | 0 |  imm9  | idx |  Rn  |  Rt  |
2581 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2582 *
2583 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2584         10 -> unprivileged
2585 * V = 0 -> non-vector
2586 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2587 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2588 */
2589static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2590                                int opc,
2591                                int size,
2592                                int rt,
2593                                bool is_vector)
2594{
2595    int rn = extract32(insn, 5, 5);
2596    int imm9 = sextract32(insn, 12, 9);
2597    int idx = extract32(insn, 10, 2);
2598    bool is_signed = false;
2599    bool is_store = false;
2600    bool is_extended = false;
2601    bool is_unpriv = (idx == 2);
2602    bool iss_valid = !is_vector;
2603    bool post_index;
2604    bool writeback;
2605
2606    TCGv_i64 tcg_addr;
2607
2608    if (is_vector) {
2609        size |= (opc & 2) << 1;
2610        if (size > 4 || is_unpriv) {
2611            unallocated_encoding(s);
2612            return;
2613        }
2614        is_store = ((opc & 1) == 0);
2615        if (!fp_access_check(s)) {
2616            return;
2617        }
2618    } else {
2619        if (size == 3 && opc == 2) {
2620            /* PRFM - prefetch */
2621            if (is_unpriv) {
2622                unallocated_encoding(s);
2623                return;
2624            }
2625            return;
2626        }
2627        if (opc == 3 && size > 1) {
2628            unallocated_encoding(s);
2629            return;
2630        }
2631        is_store = (opc == 0);
2632        is_signed = extract32(opc, 1, 1);
2633        is_extended = (size < 3) && extract32(opc, 0, 1);
2634    }
2635
2636    switch (idx) {
2637    case 0:
2638    case 2:
2639        post_index = false;
2640        writeback = false;
2641        break;
2642    case 1:
2643        post_index = true;
2644        writeback = true;
2645        break;
2646    case 3:
2647        post_index = false;
2648        writeback = true;
2649        break;
2650    default:
2651        g_assert_not_reached();
2652    }
2653
2654    if (rn == 31) {
2655        gen_check_sp_alignment(s);
2656    }
2657    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2658
2659    if (!post_index) {
2660        tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2661    }
2662
2663    if (is_vector) {
2664        if (is_store) {
2665            do_fp_st(s, rt, tcg_addr, size);
2666        } else {
2667            do_fp_ld(s, rt, tcg_addr, size);
2668        }
2669    } else {
2670        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2671        int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2672        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2673
2674        if (is_store) {
2675            do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2676                             iss_valid, rt, iss_sf, false);
2677        } else {
2678            do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2679                             is_signed, is_extended, memidx,
2680                             iss_valid, rt, iss_sf, false);
2681        }
2682    }
2683
2684    if (writeback) {
2685        TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2686        if (post_index) {
2687            tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2688        }
2689        tcg_gen_mov_i64(tcg_rn, tcg_addr);
2690    }
2691}
2692
2693/*
2694 * Load/store (register offset)
2695 *
2696 * 31 30 29   27  26 25 24 23 22 21  20  16 15 13 12 11 10 9  5 4  0
2697 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2698 * |size| 1 1 1 | V | 0 0 | opc | 1 |  Rm  | opt | S| 1 0 | Rn | Rt |
2699 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2700 *
2701 * For non-vector:
2702 *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2703 *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2704 * For vector:
2705 *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2706 *   opc<0>: 0 -> store, 1 -> load
2707 * V: 1 -> vector/simd
2708 * opt: extend encoding (see DecodeRegExtend)
2709 * S: if S=1 then scale (essentially index by sizeof(size))
2710 * Rt: register to transfer into/out of
2711 * Rn: address register or SP for base
2712 * Rm: offset register or ZR for offset
2713 */
2714static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2715                                   int opc,
2716                                   int size,
2717                                   int rt,
2718                                   bool is_vector)
2719{
2720    int rn = extract32(insn, 5, 5);
2721    int shift = extract32(insn, 12, 1);
2722    int rm = extract32(insn, 16, 5);
2723    int opt = extract32(insn, 13, 3);
2724    bool is_signed = false;
2725    bool is_store = false;
2726    bool is_extended = false;
2727
2728    TCGv_i64 tcg_rm;
2729    TCGv_i64 tcg_addr;
2730
2731    if (extract32(opt, 1, 1) == 0) {
2732        unallocated_encoding(s);
2733        return;
2734    }
2735
2736    if (is_vector) {
2737        size |= (opc & 2) << 1;
2738        if (size > 4) {
2739            unallocated_encoding(s);
2740            return;
2741        }
2742        is_store = !extract32(opc, 0, 1);
2743        if (!fp_access_check(s)) {
2744            return;
2745        }
2746    } else {
2747        if (size == 3 && opc == 2) {
2748            /* PRFM - prefetch */
2749            return;
2750        }
2751        if (opc == 3 && size > 1) {
2752            unallocated_encoding(s);
2753            return;
2754        }
2755        is_store = (opc == 0);
2756        is_signed = extract32(opc, 1, 1);
2757        is_extended = (size < 3) && extract32(opc, 0, 1);
2758    }
2759
2760    if (rn == 31) {
2761        gen_check_sp_alignment(s);
2762    }
2763    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2764
2765    tcg_rm = read_cpu_reg(s, rm, 1);
2766    ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2767
2768    tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2769
2770    if (is_vector) {
2771        if (is_store) {
2772            do_fp_st(s, rt, tcg_addr, size);
2773        } else {
2774            do_fp_ld(s, rt, tcg_addr, size);
2775        }
2776    } else {
2777        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2778        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2779        if (is_store) {
2780            do_gpr_st(s, tcg_rt, tcg_addr, size,
2781                      true, rt, iss_sf, false);
2782        } else {
2783            do_gpr_ld(s, tcg_rt, tcg_addr, size,
2784                      is_signed, is_extended,
2785                      true, rt, iss_sf, false);
2786        }
2787    }
2788}
2789
2790/*
2791 * Load/store (unsigned immediate)
2792 *
2793 * 31 30 29   27  26 25 24 23 22 21        10 9     5
2794 * +----+-------+---+-----+-----+------------+-------+------+
2795 * |size| 1 1 1 | V | 0 1 | opc |   imm12    |  Rn   |  Rt  |
2796 * +----+-------+---+-----+-----+------------+-------+------+
2797 *
2798 * For non-vector:
2799 *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2800 *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2801 * For vector:
2802 *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2803 *   opc<0>: 0 -> store, 1 -> load
2804 * Rn: base address register (inc SP)
2805 * Rt: target register
2806 */
2807static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2808                                        int opc,
2809                                        int size,
2810                                        int rt,
2811                                        bool is_vector)
2812{
2813    int rn = extract32(insn, 5, 5);
2814    unsigned int imm12 = extract32(insn, 10, 12);
2815    unsigned int offset;
2816
2817    TCGv_i64 tcg_addr;
2818
2819    bool is_store;
2820    bool is_signed = false;
2821    bool is_extended = false;
2822
2823    if (is_vector) {
2824        size |= (opc & 2) << 1;
2825        if (size > 4) {
2826            unallocated_encoding(s);
2827            return;
2828        }
2829        is_store = !extract32(opc, 0, 1);
2830        if (!fp_access_check(s)) {
2831            return;
2832        }
2833    } else {
2834        if (size == 3 && opc == 2) {
2835            /* PRFM - prefetch */
2836            return;
2837        }
2838        if (opc == 3 && size > 1) {
2839            unallocated_encoding(s);
2840            return;
2841        }
2842        is_store = (opc == 0);
2843        is_signed = extract32(opc, 1, 1);
2844        is_extended = (size < 3) && extract32(opc, 0, 1);
2845    }
2846
2847    if (rn == 31) {
2848        gen_check_sp_alignment(s);
2849    }
2850    tcg_addr = read_cpu_reg_sp(s, rn, 1);
2851    offset = imm12 << size;
2852    tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2853
2854    if (is_vector) {
2855        if (is_store) {
2856            do_fp_st(s, rt, tcg_addr, size);
2857        } else {
2858            do_fp_ld(s, rt, tcg_addr, size);
2859        }
2860    } else {
2861        TCGv_i64 tcg_rt = cpu_reg(s, rt);
2862        bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2863        if (is_store) {
2864            do_gpr_st(s, tcg_rt, tcg_addr, size,
2865                      true, rt, iss_sf, false);
2866        } else {
2867            do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2868                      true, rt, iss_sf, false);
2869        }
2870    }
2871}
2872
2873/* Atomic memory operations
2874 *
2875 *  31  30      27  26    24    22  21   16   15    12    10    5     0
2876 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
2877 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn |  Rt |
2878 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
2879 *
2880 * Rt: the result register
2881 * Rn: base address or SP
2882 * Rs: the source register for the operation
2883 * V: vector flag (always 0 as of v8.3)
2884 * A: acquire flag
2885 * R: release flag
2886 */
2887static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
2888                              int size, int rt, bool is_vector)
2889{
2890    int rs = extract32(insn, 16, 5);
2891    int rn = extract32(insn, 5, 5);
2892    int o3_opc = extract32(insn, 12, 4);
2893    int feature = ARM_FEATURE_V8_ATOMICS;
2894    TCGv_i64 tcg_rn, tcg_rs;
2895    AtomicThreeOpFn *fn;
2896
2897    if (is_vector) {
2898        unallocated_encoding(s);
2899        return;
2900    }
2901    switch (o3_opc) {
2902    case 000: /* LDADD */
2903        fn = tcg_gen_atomic_fetch_add_i64;
2904        break;
2905    case 001: /* LDCLR */
2906        fn = tcg_gen_atomic_fetch_and_i64;
2907        break;
2908    case 002: /* LDEOR */
2909        fn = tcg_gen_atomic_fetch_xor_i64;
2910        break;
2911    case 003: /* LDSET */
2912        fn = tcg_gen_atomic_fetch_or_i64;
2913        break;
2914    case 004: /* LDSMAX */
2915        fn = tcg_gen_atomic_fetch_smax_i64;
2916        break;
2917    case 005: /* LDSMIN */
2918        fn = tcg_gen_atomic_fetch_smin_i64;
2919        break;
2920    case 006: /* LDUMAX */
2921        fn = tcg_gen_atomic_fetch_umax_i64;
2922        break;
2923    case 007: /* LDUMIN */
2924        fn = tcg_gen_atomic_fetch_umin_i64;
2925        break;
2926    case 010: /* SWP */
2927        fn = tcg_gen_atomic_xchg_i64;
2928        break;
2929    default:
2930        unallocated_encoding(s);
2931        return;
2932    }
2933    if (!arm_dc_feature(s, feature)) {
2934        unallocated_encoding(s);
2935        return;
2936    }
2937
2938    if (rn == 31) {
2939        gen_check_sp_alignment(s);
2940    }
2941    tcg_rn = cpu_reg_sp(s, rn);
2942    tcg_rs = read_cpu_reg(s, rs, true);
2943
2944    if (o3_opc == 1) { /* LDCLR */
2945        tcg_gen_not_i64(tcg_rs, tcg_rs);
2946    }
2947
2948    /* The tcg atomic primitives are all full barriers.  Therefore we
2949     * can ignore the Acquire and Release bits of this instruction.
2950     */
2951    fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
2952       s->be_data | size | MO_ALIGN);
2953}
2954
2955/* Load/store register (all forms) */
2956static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2957{
2958    int rt = extract32(insn, 0, 5);
2959    int opc = extract32(insn, 22, 2);
2960    bool is_vector = extract32(insn, 26, 1);
2961    int size = extract32(insn, 30, 2);
2962
2963    switch (extract32(insn, 24, 2)) {
2964    case 0:
2965        if (extract32(insn, 21, 1) == 0) {
2966            /* Load/store register (unscaled immediate)
2967             * Load/store immediate pre/post-indexed
2968             * Load/store register unprivileged
2969             */
2970            disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2971            return;
2972        }
2973        switch (extract32(insn, 10, 2)) {
2974        case 0:
2975            disas_ldst_atomic(s, insn, size, rt, is_vector);
2976            return;
2977        case 2:
2978            disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2979            return;
2980        }
2981        break;
2982    case 1:
2983        disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2984        return;
2985    }
2986    unallocated_encoding(s);
2987}
2988
2989/* AdvSIMD load/store multiple structures
2990 *
2991 *  31  30  29           23 22  21         16 15    12 11  10 9    5 4    0
2992 * +---+---+---------------+---+-------------+--------+------+------+------+
2993 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size |  Rn  |  Rt  |
2994 * +---+---+---------------+---+-------------+--------+------+------+------+
2995 *
2996 * AdvSIMD load/store multiple structures (post-indexed)
2997 *
2998 *  31  30  29           23 22  21  20     16 15    12 11  10 9    5 4    0
2999 * +---+---+---------------+---+---+---------+--------+------+------+------+
3000 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 |   Rm    | opcode | size |  Rn  |  Rt  |
3001 * +---+---+---------------+---+---+---------+--------+------+------+------+
3002 *
3003 * Rt: first (or only) SIMD&FP register to be transferred
3004 * Rn: base address or SP
3005 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3006 */
3007static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3008{
3009    int rt = extract32(insn, 0, 5);
3010    int rn = extract32(insn, 5, 5);
3011    int size = extract32(insn, 10, 2);
3012    int opcode = extract32(insn, 12, 4);
3013    bool is_store = !extract32(insn, 22, 1);
3014    bool is_postidx = extract32(insn, 23, 1);
3015    bool is_q = extract32(insn, 30, 1);
3016    TCGv_i64 tcg_addr, tcg_rn;
3017
3018    int ebytes = 1 << size;
3019    int elements = (is_q ? 128 : 64) / (8 << size);
3020    int rpt;    /* num iterations */
3021    int selem;  /* structure elements */
3022    int r;
3023
3024    if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3025        unallocated_encoding(s);
3026        return;
3027    }
3028
3029    /* From the shared decode logic */
3030    switch (opcode) {
3031    case 0x0:
3032        rpt = 1;
3033        selem = 4;
3034        break;
3035    case 0x2:
3036        rpt = 4;
3037        selem = 1;
3038        break;
3039    case 0x4:
3040        rpt = 1;
3041        selem = 3;
3042        break;
3043    case 0x6:
3044        rpt = 3;
3045        selem = 1;
3046        break;
3047    case 0x7:
3048        rpt = 1;
3049        selem = 1;
3050        break;
3051    case 0x8:
3052        rpt = 1;
3053        selem = 2;
3054        break;
3055    case 0xa:
3056        rpt = 2;
3057        selem = 1;
3058        break;
3059    default:
3060        unallocated_encoding(s);
3061        return;
3062    }
3063
3064    if (size == 3 && !is_q && selem != 1) {
3065        /* reserved */
3066        unallocated_encoding(s);
3067        return;
3068    }
3069
3070    if (!fp_access_check(s)) {
3071        return;
3072    }
3073
3074    if (rn == 31) {
3075        gen_check_sp_alignment(s);
3076    }
3077
3078    tcg_rn = cpu_reg_sp(s, rn);
3079    tcg_addr = tcg_temp_new_i64();
3080    tcg_gen_mov_i64(tcg_addr, tcg_rn);
3081
3082    for (r = 0; r < rpt; r++) {
3083        int e;
3084        for (e = 0; e < elements; e++) {
3085            int tt = (rt + r) % 32;
3086            int xs;
3087            for (xs = 0; xs < selem; xs++) {
3088                if (is_store) {
3089                    do_vec_st(s, tt, e, tcg_addr, size);
3090                } else {
3091                    do_vec_ld(s, tt, e, tcg_addr, size);
3092
3093                    /* For non-quad operations, setting a slice of the low
3094                     * 64 bits of the register clears the high 64 bits (in
3095                     * the ARM ARM pseudocode this is implicit in the fact
3096                     * that 'rval' is a 64 bit wide variable).
3097                     * For quad operations, we might still need to zero the
3098                     * high bits of SVE.  We optimize by noticing that we only
3099                     * need to do this the first time we touch a register.
3100                     */
3101                    if (e == 0 && (r == 0 || xs == selem - 1)) {
3102                        clear_vec_high(s, is_q, tt);
3103                    }
3104                }
3105                tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
3106                tt = (tt + 1) % 32;
3107            }
3108        }
3109    }
3110
3111    if (is_postidx) {
3112        int rm = extract32(insn, 16, 5);
3113        if (rm == 31) {
3114            tcg_gen_mov_i64(tcg_rn, tcg_addr);
3115        } else {
3116            tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3117        }
3118    }
3119    tcg_temp_free_i64(tcg_addr);
3120}
3121
3122/* AdvSIMD load/store single structure
3123 *
3124 *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3125 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3126 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size |  Rn  |  Rt  |
3127 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3128 *
3129 * AdvSIMD load/store single structure (post-indexed)
3130 *
3131 *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3132 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3133 * | 0 | Q | 0 0 1 1 0 1 1 | L R |     Rm    | opc | S | size |  Rn  |  Rt  |
3134 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3135 *
3136 * Rt: first (or only) SIMD&FP register to be transferred
3137 * Rn: base address or SP
3138 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3139 * index = encoded in Q:S:size dependent on size
3140 *
3141 * lane_size = encoded in R, opc
3142 * transfer width = encoded in opc, S, size
3143 */
3144static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3145{
3146    int rt = extract32(insn, 0, 5);
3147    int rn = extract32(insn, 5, 5);
3148    int size = extract32(insn, 10, 2);
3149    int S = extract32(insn, 12, 1);
3150    int opc = extract32(insn, 13, 3);
3151    int R = extract32(insn, 21, 1);
3152    int is_load = extract32(insn, 22, 1);
3153    int is_postidx = extract32(insn, 23, 1);
3154    int is_q = extract32(insn, 30, 1);
3155
3156    int scale = extract32(opc, 1, 2);
3157    int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3158    bool replicate = false;
3159    int index = is_q << 3 | S << 2 | size;
3160    int ebytes, xs;
3161    TCGv_i64 tcg_addr, tcg_rn;
3162
3163    switch (scale) {
3164    case 3:
3165        if (!is_load || S) {
3166            unallocated_encoding(s);
3167            return;
3168        }
3169        scale = size;
3170        replicate = true;
3171        break;
3172    case 0:
3173        break;
3174    case 1:
3175        if (extract32(size, 0, 1)) {
3176            unallocated_encoding(s);
3177            return;
3178        }
3179        index >>= 1;
3180        break;
3181    case 2:
3182        if (extract32(size, 1, 1)) {
3183            unallocated_encoding(s);
3184            return;
3185        }
3186        if (!extract32(size, 0, 1)) {
3187            index >>= 2;
3188        } else {
3189            if (S) {
3190                unallocated_encoding(s);
3191                return;
3192            }
3193            index >>= 3;
3194            scale = 3;
3195        }
3196        break;
3197    default:
3198        g_assert_not_reached();
3199    }
3200
3201    if (!fp_access_check(s)) {
3202        return;
3203    }
3204
3205    ebytes = 1 << scale;
3206
3207    if (rn == 31) {
3208        gen_check_sp_alignment(s);
3209    }
3210
3211    tcg_rn = cpu_reg_sp(s, rn);
3212    tcg_addr = tcg_temp_new_i64();
3213    tcg_gen_mov_i64(tcg_addr, tcg_rn);
3214
3215    for (xs = 0; xs < selem; xs++) {
3216        if (replicate) {
3217            /* Load and replicate to all elements */
3218            uint64_t mulconst;
3219            TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3220
3221            tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
3222                                get_mem_index(s), s->be_data + scale);
3223            switch (scale) {
3224            case 0:
3225                mulconst = 0x0101010101010101ULL;
3226                break;
3227            case 1:
3228                mulconst = 0x0001000100010001ULL;
3229                break;
3230            case 2:
3231                mulconst = 0x0000000100000001ULL;
3232                break;
3233            case 3:
3234                mulconst = 0;
3235                break;
3236            default:
3237                g_assert_not_reached();
3238            }
3239            if (mulconst) {
3240                tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
3241            }
3242            write_vec_element(s, tcg_tmp, rt, 0, MO_64);
3243            if (is_q) {
3244                write_vec_element(s, tcg_tmp, rt, 1, MO_64);
3245            }
3246            tcg_temp_free_i64(tcg_tmp);
3247            clear_vec_high(s, is_q, rt);
3248        } else {
3249            /* Load/store one element per register */
3250            if (is_load) {
3251                do_vec_ld(s, rt, index, tcg_addr, scale);
3252            } else {
3253                do_vec_st(s, rt, index, tcg_addr, scale);
3254            }
3255        }
3256        tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
3257        rt = (rt + 1) % 32;
3258    }
3259
3260    if (is_postidx) {
3261        int rm = extract32(insn, 16, 5);
3262        if (rm == 31) {
3263            tcg_gen_mov_i64(tcg_rn, tcg_addr);
3264        } else {
3265            tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3266        }
3267    }
3268    tcg_temp_free_i64(tcg_addr);
3269}
3270
3271/* Loads and stores */
3272static void disas_ldst(DisasContext *s, uint32_t insn)
3273{
3274    switch (extract32(insn, 24, 6)) {
3275    case 0x08: /* Load/store exclusive */
3276        disas_ldst_excl(s, insn);
3277        break;
3278    case 0x18: case 0x1c: /* Load register (literal) */
3279        disas_ld_lit(s, insn);
3280        break;
3281    case 0x28: case 0x29:
3282    case 0x2c: case 0x2d: /* Load/store pair (all forms) */
3283        disas_ldst_pair(s, insn);
3284        break;
3285    case 0x38: case 0x39:
3286    case 0x3c: case 0x3d: /* Load/store register (all forms) */
3287        disas_ldst_reg(s, insn);
3288        break;
3289    case 0x0c: /* AdvSIMD load/store multiple structures */
3290        disas_ldst_multiple_struct(s, insn);
3291        break;
3292    case 0x0d: /* AdvSIMD load/store single structure */
3293        disas_ldst_single_struct(s, insn);
3294        break;
3295    default:
3296        unallocated_encoding(s);
3297        break;
3298    }
3299}
3300
3301/* PC-rel. addressing
3302 *   31  30   29 28       24 23                5 4    0
3303 * +----+-------+-----------+-------------------+------+
3304 * | op | immlo | 1 0 0 0 0 |       immhi       |  Rd  |
3305 * +----+-------+-----------+-------------------+------+
3306 */
3307static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
3308{
3309    unsigned int page, rd;
3310    uint64_t base;
3311    uint64_t offset;
3312
3313    page = extract32(insn, 31, 1);
3314    /* SignExtend(immhi:immlo) -> offset */
3315    offset = sextract64(insn, 5, 19);
3316    offset = offset << 2 | extract32(insn, 29, 2);
3317    rd = extract32(insn, 0, 5);
3318    base = s->pc - 4;
3319
3320    if (page) {
3321        /* ADRP (page based) */
3322        base &= ~0xfff;
3323        offset <<= 12;
3324    }
3325
3326    tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
3327}
3328
3329/*
3330 * Add/subtract (immediate)
3331 *
3332 *  31 30 29 28       24 23 22 21         10 9   5 4   0
3333 * +--+--+--+-----------+-----+-------------+-----+-----+
3334 * |sf|op| S| 1 0 0 0 1 |shift|    imm12    |  Rn | Rd  |
3335 * +--+--+--+-----------+-----+-------------+-----+-----+
3336 *
3337 *    sf: 0 -> 32bit, 1 -> 64bit
3338 *    op: 0 -> add  , 1 -> sub
3339 *     S: 1 -> set flags
3340 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3341 */
3342static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3343{
3344    int rd = extract32(insn, 0, 5);
3345    int rn = extract32(insn, 5, 5);
3346    uint64_t imm = extract32(insn, 10, 12);
3347    int shift = extract32(insn, 22, 2);
3348    bool setflags = extract32(insn, 29, 1);
3349    bool sub_op = extract32(insn, 30, 1);
3350    bool is_64bit = extract32(insn, 31, 1);
3351
3352    TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3353    TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3354    TCGv_i64 tcg_result;
3355
3356    switch (shift) {
3357    case 0x0:
3358        break;
3359    case 0x1:
3360        imm <<= 12;
3361        break;
3362    default:
3363        unallocated_encoding(s);
3364        return;
3365    }
3366
3367    tcg_result = tcg_temp_new_i64();
3368    if (!setflags) {
3369        if (sub_op) {
3370            tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3371        } else {
3372            tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3373        }
3374    } else {
3375        TCGv_i64 tcg_imm = tcg_const_i64(imm);
3376        if (sub_op) {
3377            gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3378        } else {
3379            gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3380        }
3381        tcg_temp_free_i64(tcg_imm);
3382    }
3383
3384    if (is_64bit) {
3385        tcg_gen_mov_i64(tcg_rd, tcg_result);
3386    } else {
3387        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3388    }
3389
3390    tcg_temp_free_i64(tcg_result);
3391}
3392
3393/* The input should be a value in the bottom e bits (with higher
3394 * bits zero); returns that value replicated into every element
3395 * of size e in a 64 bit integer.
3396 */
3397static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3398{
3399    assert(e != 0);
3400    while (e < 64) {
3401        mask |= mask << e;
3402        e *= 2;
3403    }
3404    return mask;
3405}
3406
3407/* Return a value with the bottom len bits set (where 0 < len <= 64) */
3408static inline uint64_t bitmask64(unsigned int length)
3409{
3410    assert(length > 0 && length <= 64);
3411    return ~0ULL >> (64 - length);
3412}
3413
3414/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3415 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3416 * value (ie should cause a guest UNDEF exception), and true if they are
3417 * valid, in which case the decoded bit pattern is written to result.
3418 */
3419bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3420                            unsigned int imms, unsigned int immr)
3421{
3422    uint64_t mask;
3423    unsigned e, levels, s, r;
3424    int len;
3425
3426    assert(immn < 2 && imms < 64 && immr < 64);
3427
3428    /* The bit patterns we create here are 64 bit patterns which
3429     * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3430     * 64 bits each. Each element contains the same value: a run
3431     * of between 1 and e-1 non-zero bits, rotated within the
3432     * element by between 0 and e-1 bits.
3433     *
3434     * The element size and run length are encoded into immn (1 bit)
3435     * and imms (6 bits) as follows:
3436     * 64 bit elements: immn = 1, imms = <length of run - 1>
3437     * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3438     * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3439     *  8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3440     *  4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3441     *  2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3442     * Notice that immn = 0, imms = 11111x is the only combination
3443     * not covered by one of the above options; this is reserved.
3444     * Further, <length of run - 1> all-ones is a reserved pattern.
3445     *
3446     * In all cases the rotation is by immr % e (and immr is 6 bits).
3447     */
3448
3449    /* First determine the element size */
3450    len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3451    if (len < 1) {
3452        /* This is the immn == 0, imms == 0x11111x case */
3453        return false;
3454    }
3455    e = 1 << len;
3456
3457    levels = e - 1;
3458    s = imms & levels;
3459    r = immr & levels;
3460
3461    if (s == levels) {
3462        /* <length of run - 1> mustn't be all-ones. */
3463        return false;
3464    }
3465
3466    /* Create the value of one element: s+1 set bits rotated
3467     * by r within the element (which is e bits wide)...
3468     */
3469    mask = bitmask64(s + 1);
3470    if (r) {
3471        mask = (mask >> r) | (mask << (e - r));
3472        mask &= bitmask64(e);
3473    }
3474    /* ...then replicate the element over the whole 64 bit value */
3475    mask = bitfield_replicate(mask, e);
3476    *result = mask;
3477    return true;
3478}
3479
3480/* Logical (immediate)
3481 *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
3482 * +----+-----+-------------+---+------+------+------+------+
3483 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms |  Rn  |  Rd  |
3484 * +----+-----+-------------+---+------+------+------+------+
3485 */
3486static void disas_logic_imm(DisasContext *s, uint32_t insn)
3487{
3488    unsigned int sf, opc, is_n, immr, imms, rn, rd;
3489    TCGv_i64 tcg_rd, tcg_rn;
3490    uint64_t wmask;
3491    bool is_and = false;
3492
3493    sf = extract32(insn, 31, 1);
3494    opc = extract32(insn, 29, 2);
3495    is_n = extract32(insn, 22, 1);
3496    immr = extract32(insn, 16, 6);
3497    imms = extract32(insn, 10, 6);
3498    rn = extract32(insn, 5, 5);
3499    rd = extract32(insn, 0, 5);
3500
3501    if (!sf && is_n) {
3502        unallocated_encoding(s);
3503        return;
3504    }
3505
3506    if (opc == 0x3) { /* ANDS */
3507        tcg_rd = cpu_reg(s, rd);
3508    } else {
3509        tcg_rd = cpu_reg_sp(s, rd);
3510    }
3511    tcg_rn = cpu_reg(s, rn);
3512
3513    if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3514        /* some immediate field values are reserved */
3515        unallocated_encoding(s);
3516        return;
3517    }
3518
3519    if (!sf) {
3520        wmask &= 0xffffffff;
3521    }
3522
3523    switch (opc) {
3524    case 0x3: /* ANDS */
3525    case 0x0: /* AND */
3526        tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3527        is_and = true;
3528        break;
3529    case 0x1: /* ORR */
3530        tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3531        break;
3532    case 0x2: /* EOR */
3533        tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3534        break;
3535    default:
3536        assert(FALSE); /* must handle all above */
3537        break;
3538    }
3539
3540    if (!sf && !is_and) {
3541        /* zero extend final result; we know we can skip this for AND
3542         * since the immediate had the high 32 bits clear.
3543         */
3544        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3545    }
3546
3547    if (opc == 3) { /* ANDS */
3548        gen_logic_CC(sf, tcg_rd);
3549    }
3550}
3551
3552/*
3553 * Move wide (immediate)
3554 *
3555 *  31 30 29 28         23 22 21 20             5 4    0
3556 * +--+-----+-------------+-----+----------------+------+
3557 * |sf| opc | 1 0 0 1 0 1 |  hw |  imm16         |  Rd  |
3558 * +--+-----+-------------+-----+----------------+------+
3559 *
3560 * sf: 0 -> 32 bit, 1 -> 64 bit
3561 * opc: 00 -> N, 10 -> Z, 11 -> K
3562 * hw: shift/16 (0,16, and sf only 32, 48)
3563 */
3564static void disas_movw_imm(DisasContext *s, uint32_t insn)
3565{
3566    int rd = extract32(insn, 0, 5);
3567    uint64_t imm = extract32(insn, 5, 16);
3568    int sf = extract32(insn, 31, 1);
3569    int opc = extract32(insn, 29, 2);
3570    int pos = extract32(insn, 21, 2) << 4;
3571    TCGv_i64 tcg_rd = cpu_reg(s, rd);
3572    TCGv_i64 tcg_imm;
3573
3574    if (!sf && (pos >= 32)) {
3575        unallocated_encoding(s);
3576        return;
3577    }
3578
3579    switch (opc) {
3580    case 0: /* MOVN */
3581    case 2: /* MOVZ */
3582        imm <<= pos;
3583        if (opc == 0) {
3584            imm = ~imm;
3585        }
3586        if (!sf) {
3587            imm &= 0xffffffffu;
3588        }
3589        tcg_gen_movi_i64(tcg_rd, imm);
3590        break;
3591    case 3: /* MOVK */
3592        tcg_imm = tcg_const_i64(imm);
3593        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3594        tcg_temp_free_i64(tcg_imm);
3595        if (!sf) {
3596            tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3597        }
3598        break;
3599    default:
3600        unallocated_encoding(s);
3601        break;
3602    }
3603}
3604
3605/* Bitfield
3606 *   31  30 29 28         23 22  21  16 15  10 9    5 4    0
3607 * +----+-----+-------------+---+------+------+------+------+
3608 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms |  Rn  |  Rd  |
3609 * +----+-----+-------------+---+------+------+------+------+
3610 */
3611static void disas_bitfield(DisasContext *s, uint32_t insn)
3612{
3613    unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3614    TCGv_i64 tcg_rd, tcg_tmp;
3615
3616    sf = extract32(insn, 31, 1);
3617    opc = extract32(insn, 29, 2);
3618    n = extract32(insn, 22, 1);
3619    ri = extract32(insn, 16, 6);
3620    si = extract32(insn, 10, 6);
3621    rn = extract32(insn, 5, 5);
3622    rd = extract32(insn, 0, 5);
3623    bitsize = sf ? 64 : 32;
3624
3625    if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3626        unallocated_encoding(s);
3627        return;
3628    }
3629
3630    tcg_rd = cpu_reg(s, rd);
3631
3632    /* Suppress the zero-extend for !sf.  Since RI and SI are constrained
3633       to be smaller than bitsize, we'll never reference data outside the
3634       low 32-bits anyway.  */
3635    tcg_tmp = read_cpu_reg(s, rn, 1);
3636
3637    /* Recognize simple(r) extractions.  */
3638    if (si >= ri) {
3639        /* Wd<s-r:0> = Wn<s:r> */
3640        len = (si - ri) + 1;
3641        if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
3642            tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
3643            goto done;
3644        } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
3645            tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
3646            return;
3647        }
3648        /* opc == 1, BXFIL fall through to deposit */
3649        tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
3650        pos = 0;
3651    } else {
3652        /* Handle the ri > si case with a deposit
3653         * Wd<32+s-r,32-r> = Wn<s:0>
3654         */
3655        len = si + 1;
3656        pos = (bitsize - ri) & (bitsize - 1);
3657    }
3658
3659    if (opc == 0 && len < ri) {
3660        /* SBFM: sign extend the destination field from len to fill
3661           the balance of the word.  Let the deposit below insert all
3662           of those sign bits.  */
3663        tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
3664        len = ri;
3665    }
3666
3667    if (opc == 1) { /* BFM, BXFIL */
3668        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3669    } else {
3670        /* SBFM or UBFM: We start with zero, and we haven't modified
3671           any bits outside bitsize, therefore the zero-extension
3672           below is unneeded.  */
3673        tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
3674        return;
3675    }
3676
3677 done:
3678    if (!sf) { /* zero extend final result */
3679        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3680    }
3681}
3682
3683/* Extract
3684 *   31  30  29 28         23 22   21  20  16 15    10 9    5 4    0
3685 * +----+------+-------------+---+----+------+--------+------+------+
3686 * | sf | op21 | 1 0 0 1 1 1 | N | o0 |  Rm  |  imms  |  Rn  |  Rd  |
3687 * +----+------+-------------+---+----+------+--------+------+------+
3688 */
3689static void disas_extract(DisasContext *s, uint32_t insn)
3690{
3691    unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3692
3693    sf = extract32(insn, 31, 1);
3694    n = extract32(insn, 22, 1);
3695    rm = extract32(insn, 16, 5);
3696    imm = extract32(insn, 10, 6);
3697    rn = extract32(insn, 5, 5);
3698    rd = extract32(insn, 0, 5);
3699    op21 = extract32(insn, 29, 2);
3700    op0 = extract32(insn, 21, 1);
3701    bitsize = sf ? 64 : 32;
3702
3703    if (sf != n || op21 || op0 || imm >= bitsize) {
3704        unallocated_encoding(s);
3705    } else {
3706        TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3707
3708        tcg_rd = cpu_reg(s, rd);
3709
3710        if (unlikely(imm == 0)) {
3711            /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
3712             * so an extract from bit 0 is a special case.
3713             */
3714            if (sf) {
3715                tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3716            } else {
3717                tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3718            }
3719        } else if (rm == rn) { /* ROR */
3720            tcg_rm = cpu_reg(s, rm);
3721            if (sf) {
3722                tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3723            } else {
3724                TCGv_i32 tmp = tcg_temp_new_i32();
3725                tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3726                tcg_gen_rotri_i32(tmp, tmp, imm);
3727                tcg_gen_extu_i32_i64(tcg_rd, tmp);
3728                tcg_temp_free_i32(tmp);
3729            }
3730        } else {
3731            tcg_rm = read_cpu_reg(s, rm, sf);
3732            tcg_rn = read_cpu_reg(s, rn, sf);
3733            tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3734            tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3735            tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3736            if (!sf) {
3737                tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3738            }
3739        }
3740    }
3741}
3742
3743/* Data processing - immediate */
3744static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3745{
3746    switch (extract32(insn, 23, 6)) {
3747    case 0x20: case 0x21: /* PC-rel. addressing */
3748        disas_pc_rel_adr(s, insn);
3749        break;
3750    case 0x22: case 0x23: /* Add/subtract (immediate) */
3751        disas_add_sub_imm(s, insn);
3752        break;
3753    case 0x24: /* Logical (immediate) */
3754        disas_logic_imm(s, insn);
3755        break;
3756    case 0x25: /* Move wide (immediate) */
3757        disas_movw_imm(s, insn);
3758        break;
3759    case 0x26: /* Bitfield */
3760        disas_bitfield(s, insn);
3761        break;
3762    case 0x27: /* Extract */
3763        disas_extract(s, insn);
3764        break;
3765    default:
3766        unallocated_encoding(s);
3767        break;
3768    }
3769}
3770
3771/* Shift a TCGv src by TCGv shift_amount, put result in dst.
3772 * Note that it is the caller's responsibility to ensure that the
3773 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
3774 * mandated semantics for out of range shifts.
3775 */
3776static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3777                      enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3778{
3779    switch (shift_type) {
3780    case A64_SHIFT_TYPE_LSL:
3781        tcg_gen_shl_i64(dst, src, shift_amount);
3782        break;
3783    case A64_SHIFT_TYPE_LSR:
3784        tcg_gen_shr_i64(dst, src, shift_amount);
3785        break;
3786    case A64_SHIFT_TYPE_ASR:
3787        if (!sf) {
3788            tcg_gen_ext32s_i64(dst, src);
3789        }
3790        tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3791        break;
3792    case A64_SHIFT_TYPE_ROR:
3793        if (sf) {
3794            tcg_gen_rotr_i64(dst, src, shift_amount);
3795        } else {
3796            TCGv_i32 t0, t1;
3797            t0 = tcg_temp_new_i32();
3798            t1 = tcg_temp_new_i32();
3799            tcg_gen_extrl_i64_i32(t0, src);
3800            tcg_gen_extrl_i64_i32(t1, shift_amount);
3801            tcg_gen_rotr_i32(t0, t0, t1);
3802            tcg_gen_extu_i32_i64(dst, t0);
3803            tcg_temp_free_i32(t0);
3804            tcg_temp_free_i32(t1);
3805        }
3806        break;
3807    default:
3808        assert(FALSE); /* all shift types should be handled */
3809        break;
3810    }
3811
3812    if (!sf) { /* zero extend final result */
3813        tcg_gen_ext32u_i64(dst, dst);
3814    }
3815}
3816
3817/* Shift a TCGv src by immediate, put result in dst.
3818 * The shift amount must be in range (this should always be true as the
3819 * relevant instructions will UNDEF on bad shift immediates).
3820 */
3821static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3822                          enum a64_shift_type shift_type, unsigned int shift_i)
3823{
3824    assert(shift_i < (sf ? 64 : 32));
3825
3826    if (shift_i == 0) {
3827        tcg_gen_mov_i64(dst, src);
3828    } else {
3829        TCGv_i64 shift_const;
3830
3831        shift_const = tcg_const_i64(shift_i);
3832        shift_reg(dst, src, sf, shift_type, shift_const);
3833        tcg_temp_free_i64(shift_const);
3834    }
3835}
3836
3837/* Logical (shifted register)
3838 *   31  30 29 28       24 23   22 21  20  16 15    10 9    5 4    0
3839 * +----+-----+-----------+-------+---+------+--------+------+------+
3840 * | sf | opc | 0 1 0 1 0 | shift | N |  Rm  |  imm6  |  Rn  |  Rd  |
3841 * +----+-----+-----------+-------+---+------+--------+------+------+
3842 */
3843static void disas_logic_reg(DisasContext *s, uint32_t insn)
3844{
3845    TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3846    unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3847
3848    sf = extract32(insn, 31, 1);
3849    opc = extract32(insn, 29, 2);
3850    shift_type = extract32(insn, 22, 2);
3851    invert = extract32(insn, 21, 1);
3852    rm = extract32(insn, 16, 5);
3853    shift_amount = extract32(insn, 10, 6);
3854    rn = extract32(insn, 5, 5);
3855    rd = extract32(insn, 0, 5);
3856
3857    if (!sf && (shift_amount & (1 << 5))) {
3858        unallocated_encoding(s);
3859        return;
3860    }
3861
3862    tcg_rd = cpu_reg(s, rd);
3863
3864    if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3865        /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
3866         * register-register MOV and MVN, so it is worth special casing.
3867         */
3868        tcg_rm = cpu_reg(s, rm);
3869        if (invert) {
3870            tcg_gen_not_i64(tcg_rd, tcg_rm);
3871            if (!sf) {
3872                tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3873            }
3874        } else {
3875            if (sf) {
3876                tcg_gen_mov_i64(tcg_rd, tcg_rm);
3877            } else {
3878                tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3879            }
3880        }
3881        return;
3882    }
3883
3884    tcg_rm = read_cpu_reg(s, rm, sf);
3885
3886    if (shift_amount) {
3887        shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3888    }
3889
3890    tcg_rn = cpu_reg(s, rn);
3891
3892    switch (opc | (invert << 2)) {
3893    case 0: /* AND */
3894    case 3: /* ANDS */
3895        tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3896        break;
3897    case 1: /* ORR */
3898        tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3899        break;
3900    case 2: /* EOR */
3901        tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3902        break;
3903    case 4: /* BIC */
3904    case 7: /* BICS */
3905        tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3906        break;
3907    case 5: /* ORN */
3908        tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3909        break;
3910    case 6: /* EON */
3911        tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3912        break;
3913    default:
3914        assert(FALSE);
3915        break;
3916    }
3917
3918    if (!sf) {
3919        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3920    }
3921
3922    if (opc == 3) {
3923        gen_logic_CC(sf, tcg_rd);
3924    }
3925}
3926
3927/*
3928 * Add/subtract (extended register)
3929 *
3930 *  31|30|29|28       24|23 22|21|20   16|15  13|12  10|9  5|4  0|
3931 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3932 * |sf|op| S| 0 1 0 1 1 | opt | 1|  Rm   |option| imm3 | Rn | Rd |
3933 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3934 *
3935 *  sf: 0 -> 32bit, 1 -> 64bit
3936 *  op: 0 -> add  , 1 -> sub
3937 *   S: 1 -> set flags
3938 * opt: 00
3939 * option: extension type (see DecodeRegExtend)
3940 * imm3: optional shift to Rm
3941 *
3942 * Rd = Rn + LSL(extend(Rm), amount)
3943 */
3944static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3945{
3946    int rd = extract32(insn, 0, 5);
3947    int rn = extract32(insn, 5, 5);
3948    int imm3 = extract32(insn, 10, 3);
3949    int option = extract32(insn, 13, 3);
3950    int rm = extract32(insn, 16, 5);
3951    bool setflags = extract32(insn, 29, 1);
3952    bool sub_op = extract32(insn, 30, 1);
3953    bool sf = extract32(insn, 31, 1);
3954
3955    TCGv_i64 tcg_rm, tcg_rn; /* temps */
3956    TCGv_i64 tcg_rd;
3957    TCGv_i64 tcg_result;
3958
3959    if (imm3 > 4) {
3960        unallocated_encoding(s);
3961        return;
3962    }
3963
3964    /* non-flag setting ops may use SP */
3965    if (!setflags) {
3966        tcg_rd = cpu_reg_sp(s, rd);
3967    } else {
3968        tcg_rd = cpu_reg(s, rd);
3969    }
3970    tcg_rn = read_cpu_reg_sp(s, rn, sf);
3971
3972    tcg_rm = read_cpu_reg(s, rm, sf);
3973    ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3974
3975    tcg_result = tcg_temp_new_i64();
3976
3977    if (!setflags) {
3978        if (sub_op) {
3979            tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3980        } else {
3981            tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3982        }
3983    } else {
3984        if (sub_op) {
3985            gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3986        } else {
3987            gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3988        }
3989    }
3990
3991    if (sf) {
3992        tcg_gen_mov_i64(tcg_rd, tcg_result);
3993    } else {
3994        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3995    }
3996
3997    tcg_temp_free_i64(tcg_result);
3998}
3999
4000/*
4001 * Add/subtract (shifted register)
4002 *
4003 *  31 30 29 28       24 23 22 21 20   16 15     10 9    5 4    0
4004 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4005 * |sf|op| S| 0 1 0 1 1 |shift| 0|  Rm   |  imm6   |  Rn  |  Rd  |
4006 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4007 *
4008 *    sf: 0 -> 32bit, 1 -> 64bit
4009 *    op: 0 -> add  , 1 -> sub
4010 *     S: 1 -> set flags
4011 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4012 *  imm6: Shift amount to apply to Rm before the add/sub
4013 */
4014static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4015{
4016    int rd = extract32(insn, 0, 5);
4017    int rn = extract32(insn, 5, 5);
4018    int imm6 = extract32(insn, 10, 6);
4019    int rm = extract32(insn, 16, 5);
4020    int shift_type = extract32(insn, 22, 2);
4021    bool setflags = extract32(insn, 29, 1);
4022    bool sub_op = extract32(insn, 30, 1);
4023    bool sf = extract32(insn, 31, 1);
4024
4025    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4026    TCGv_i64 tcg_rn, tcg_rm;
4027    TCGv_i64 tcg_result;
4028
4029    if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4030        unallocated_encoding(s);
4031        return;
4032    }
4033
4034    tcg_rn = read_cpu_reg(s, rn, sf);
4035    tcg_rm = read_cpu_reg(s, rm, sf);
4036
4037    shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4038
4039    tcg_result = tcg_temp_new_i64();
4040
4041    if (!setflags) {
4042        if (sub_op) {
4043            tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4044        } else {
4045            tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4046        }
4047    } else {
4048        if (sub_op) {
4049            gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4050        } else {
4051            gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4052        }
4053    }
4054
4055    if (sf) {
4056        tcg_gen_mov_i64(tcg_rd, tcg_result);
4057    } else {
4058        tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4059    }
4060
4061    tcg_temp_free_i64(tcg_result);
4062}
4063
4064/* Data-processing (3 source)
4065 *
4066 *    31 30  29 28       24 23 21  20  16  15  14  10 9    5 4    0
4067 *  +--+------+-----------+------+------+----+------+------+------+
4068 *  |sf| op54 | 1 1 0 1 1 | op31 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
4069 *  +--+------+-----------+------+------+----+------+------+------+
4070 */
4071static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4072{
4073    int rd = extract32(insn, 0, 5);
4074    int rn = extract32(insn, 5, 5);
4075    int ra = extract32(insn, 10, 5);
4076    int rm = extract32(insn, 16, 5);
4077    int op_id = (extract32(insn, 29, 3) << 4) |
4078        (extract32(insn, 21, 3) << 1) |
4079        extract32(insn, 15, 1);
4080    bool sf = extract32(insn, 31, 1);
4081    bool is_sub = extract32(op_id, 0, 1);
4082    bool is_high = extract32(op_id, 2, 1);
4083    bool is_signed = false;
4084    TCGv_i64 tcg_op1;
4085    TCGv_i64 tcg_op2;
4086    TCGv_i64 tcg_tmp;
4087
4088    /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4089    switch (op_id) {
4090    case 0x42: /* SMADDL */
4091    case 0x43: /* SMSUBL */
4092    case 0x44: /* SMULH */
4093        is_signed = true;
4094        break;
4095    case 0x0: /* MADD (32bit) */
4096    case 0x1: /* MSUB (32bit) */
4097    case 0x40: /* MADD (64bit) */
4098    case 0x41: /* MSUB (64bit) */
4099    case 0x4a: /* UMADDL */
4100    case 0x4b: /* UMSUBL */
4101    case 0x4c: /* UMULH */
4102        break;
4103    default:
4104        unallocated_encoding(s);
4105        return;
4106    }
4107
4108    if (is_high) {
4109        TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
4110        TCGv_i64 tcg_rd = cpu_reg(s, rd);
4111        TCGv_i64 tcg_rn = cpu_reg(s, rn);
4112        TCGv_i64 tcg_rm = cpu_reg(s, rm);
4113
4114        if (is_signed) {
4115            tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4116        } else {
4117            tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4118        }
4119
4120        tcg_temp_free_i64(low_bits);
4121        return;
4122    }
4123
4124    tcg_op1 = tcg_temp_new_i64();
4125    tcg_op2 = tcg_temp_new_i64();
4126    tcg_tmp = tcg_temp_new_i64();
4127
4128    if (op_id < 0x42) {
4129        tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4130        tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4131    } else {
4132        if (is_signed) {
4133            tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4134            tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4135        } else {
4136            tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4137            tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4138        }
4139    }
4140
4141    if (ra == 31 && !is_sub) {
4142        /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4143        tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4144    } else {
4145        tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4146        if (is_sub) {
4147            tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4148        } else {
4149            tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4150        }
4151    }
4152
4153    if (!sf) {
4154        tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4155    }
4156
4157    tcg_temp_free_i64(tcg_op1);
4158    tcg_temp_free_i64(tcg_op2);
4159    tcg_temp_free_i64(tcg_tmp);
4160}
4161
4162/* Add/subtract (with carry)
4163 *  31 30 29 28 27 26 25 24 23 22 21  20  16  15   10  9    5 4   0
4164 * +--+--+--+------------------------+------+---------+------+-----+
4165 * |sf|op| S| 1  1  0  1  0  0  0  0 |  rm  | opcode2 |  Rn  |  Rd |
4166 * +--+--+--+------------------------+------+---------+------+-----+
4167 *                                            [000000]
4168 */
4169
4170static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4171{
4172    unsigned int sf, op, setflags, rm, rn, rd;
4173    TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4174
4175    if (extract32(insn, 10, 6) != 0) {
4176        unallocated_encoding(s);
4177        return;
4178    }
4179
4180    sf = extract32(insn, 31, 1);
4181    op = extract32(insn, 30, 1);
4182    setflags = extract32(insn, 29, 1);
4183    rm = extract32(insn, 16, 5);
4184    rn = extract32(insn, 5, 5);
4185    rd = extract32(insn, 0, 5);
4186
4187    tcg_rd = cpu_reg(s, rd);
4188    tcg_rn = cpu_reg(s, rn);
4189
4190    if (op) {
4191        tcg_y = new_tmp_a64(s);
4192        tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4193    } else {
4194        tcg_y = cpu_reg(s, rm);
4195    }
4196
4197    if (setflags) {
4198        gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4199    } else {
4200        gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4201    }
4202}
4203
4204/* Conditional compare (immediate / register)
4205 *  31 30 29 28 27 26 25 24 23 22 21  20    16 15  12  11  10  9   5  4 3   0
4206 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4207 * |sf|op| S| 1  1  0  1  0  0  1  0 |imm5/rm | cond |i/r |o2|  Rn  |o3|nzcv |
4208 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4209 *        [1]                             y                [0]       [0]
4210 */
4211static void disas_cc(DisasContext *s, uint32_t insn)
4212{
4213    unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4214    TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4215    TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4216    DisasCompare c;
4217
4218    if (!extract32(insn, 29, 1)) {
4219        unallocated_encoding(s);
4220        return;
4221    }
4222    if (insn & (1 << 10 | 1 << 4)) {
4223        unallocated_encoding(s);
4224        return;
4225    }
4226    sf = extract32(insn, 31, 1);
4227    op = extract32(insn, 30, 1);
4228    is_imm = extract32(insn, 11, 1);
4229    y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
4230    cond = extract32(insn, 12, 4);
4231    rn = extract32(insn, 5, 5);
4232    nzcv = extract32(insn, 0, 4);
4233
4234    /* Set T0 = !COND.  */
4235    tcg_t0 = tcg_temp_new_i32();
4236    arm_test_cc(&c, cond);
4237    tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
4238    arm_free_cc(&c);
4239
4240    /* Load the arguments for the new comparison.  */
4241    if (is_imm) {
4242        tcg_y = new_tmp_a64(s);
4243        tcg_gen_movi_i64(tcg_y, y);
4244    } else {
4245        tcg_y = cpu_reg(s, y);
4246    }
4247    tcg_rn = cpu_reg(s, rn);
4248
4249    /* Set the flags for the new comparison.  */
4250    tcg_tmp = tcg_temp_new_i64();
4251    if (op) {
4252        gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4253    } else {
4254        gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4255    }
4256    tcg_temp_free_i64(tcg_tmp);
4257
4258    /* If COND was false, force the flags to #nzcv.  Compute two masks
4259     * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4260     * For tcg hosts that support ANDC, we can make do with just T1.
4261     * In either case, allow the tcg optimizer to delete any unused mask.
4262     */
4263    tcg_t1 = tcg_temp_new_i32();
4264    tcg_t2 = tcg_temp_new_i32();
4265    tcg_gen_neg_i32(tcg_t1, tcg_t0);
4266    tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
4267
4268    if (nzcv & 8) { /* N */
4269        tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
4270    } else {
4271        if (TCG_TARGET_HAS_andc_i32) {
4272            tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
4273        } else {
4274            tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
4275        }
4276    }
4277    if (nzcv & 4) { /* Z */
4278        if (TCG_TARGET_HAS_andc_i32) {
4279            tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
4280        } else {
4281            tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
4282        }
4283    } else {
4284        tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4285    }
4286    if (nzcv & 2) { /* C */
4287        tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4288    } else {
4289        if (TCG_TARGET_HAS_andc_i32) {
4290            tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4291        } else {
4292            tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4293        }
4294    }
4295    if (nzcv & 1) { /* V */
4296        tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4297    } else {
4298        if (TCG_TARGET_HAS_andc_i32) {
4299            tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4300        } else {
4301            tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4302        }
4303    }
4304    tcg_temp_free_i32(tcg_t0);
4305    tcg_temp_free_i32(tcg_t1);
4306    tcg_temp_free_i32(tcg_t2);
4307}
4308
4309/* Conditional select
4310 *   31   30  29  28             21 20  16 15  12 11 10 9    5 4    0
4311 * +----+----+---+-----------------+------+------+-----+------+------+
4312 * | sf | op | S | 1 1 0 1 0 1 0 0 |  Rm  | cond | op2 |  Rn  |  Rd  |
4313 * +----+----+---+-----------------+------+------+-----+------+------+
4314 */
4315static void disas_cond_select(DisasContext *s, uint32_t insn)
4316{
4317    unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4318    TCGv_i64 tcg_rd, zero;
4319    DisasCompare64 c;
4320
4321    if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4322        /* S == 1 or op2<1> == 1 */
4323        unallocated_encoding(s);
4324        return;
4325    }
4326    sf = extract32(insn, 31, 1);
4327    else_inv = extract32(insn, 30, 1);
4328    rm = extract32(insn, 16, 5);
4329    cond = extract32(insn, 12, 4);
4330    else_inc = extract32(insn, 10, 1);
4331    rn = extract32(insn, 5, 5);
4332    rd = extract32(insn, 0, 5);
4333
4334    tcg_rd = cpu_reg(s, rd);
4335
4336    a64_test_cc(&c, cond);
4337    zero = tcg_const_i64(0);
4338
4339    if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4340        /* CSET & CSETM.  */
4341        tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4342        if (else_inv) {
4343            tcg_gen_neg_i64(tcg_rd, tcg_rd);
4344        }
4345    } else {
4346        TCGv_i64 t_true = cpu_reg(s, rn);
4347        TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4348        if (else_inv && else_inc) {
4349            tcg_gen_neg_i64(t_false, t_false);
4350        } else if (else_inv) {
4351            tcg_gen_not_i64(t_false, t_false);
4352        } else if (else_inc) {
4353            tcg_gen_addi_i64(t_false, t_false, 1);
4354        }
4355        tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4356    }
4357
4358    tcg_temp_free_i64(zero);
4359    a64_free_cc(&c);
4360
4361    if (!sf) {
4362        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4363    }
4364}
4365
4366static void handle_clz(DisasContext *s, unsigned int sf,
4367                       unsigned int rn, unsigned int rd)
4368{
4369    TCGv_i64 tcg_rd, tcg_rn;
4370    tcg_rd = cpu_reg(s, rd);
4371    tcg_rn = cpu_reg(s, rn);
4372
4373    if (sf) {
4374        tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4375    } else {
4376        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4377        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4378        tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4379        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4380        tcg_temp_free_i32(tcg_tmp32);
4381    }
4382}
4383
4384static void handle_cls(DisasContext *s, unsigned int sf,
4385                       unsigned int rn, unsigned int rd)
4386{
4387    TCGv_i64 tcg_rd, tcg_rn;
4388    tcg_rd = cpu_reg(s, rd);
4389    tcg_rn = cpu_reg(s, rn);
4390
4391    if (sf) {
4392        tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4393    } else {
4394        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4395        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4396        tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4397        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4398        tcg_temp_free_i32(tcg_tmp32);
4399    }
4400}
4401
4402static void handle_rbit(DisasContext *s, unsigned int sf,
4403                        unsigned int rn, unsigned int rd)
4404{
4405    TCGv_i64 tcg_rd, tcg_rn;
4406    tcg_rd = cpu_reg(s, rd);
4407    tcg_rn = cpu_reg(s, rn);
4408
4409    if (sf) {
4410        gen_helper_rbit64(tcg_rd, tcg_rn);
4411    } else {
4412        TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4413        tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4414        gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4415        tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4416        tcg_temp_free_i32(tcg_tmp32);
4417    }
4418}
4419
4420/* REV with sf==1, opcode==3 ("REV64") */
4421static void handle_rev64(DisasContext *s, unsigned int sf,
4422                         unsigned int rn, unsigned int rd)
4423{
4424    if (!sf) {
4425        unallocated_encoding(s);
4426        return;
4427    }
4428    tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4429}
4430
4431/* REV with sf==0, opcode==2
4432 * REV32 (sf==1, opcode==2)
4433 */
4434static void handle_rev32(DisasContext *s, unsigned int sf,
4435                         unsigned int rn, unsigned int rd)
4436{
4437    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4438
4439    if (sf) {
4440        TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4441        TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4442
4443        /* bswap32_i64 requires zero high word */
4444        tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4445        tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4446        tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4447        tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4448        tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4449
4450        tcg_temp_free_i64(tcg_tmp);
4451    } else {
4452        tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4453        tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4454    }
4455}
4456
4457/* REV16 (opcode==1) */
4458static void handle_rev16(DisasContext *s, unsigned int sf,
4459                         unsigned int rn, unsigned int rd)
4460{
4461    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4462    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4463    TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4464    TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4465
4466    tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4467    tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4468    tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4469    tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4470    tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4471
4472    tcg_temp_free_i64(mask);
4473    tcg_temp_free_i64(tcg_tmp);
4474}
4475
4476/* Data-processing (1 source)
4477 *   31  30  29  28             21 20     16 15    10 9    5 4    0
4478 * +----+---+---+-----------------+---------+--------+------+------+
4479 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode |  Rn  |  Rd  |
4480 * +----+---+---+-----------------+---------+--------+------+------+
4481 */
4482static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4483{
4484    unsigned int sf, opcode, rn, rd;
4485
4486    if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4487        unallocated_encoding(s);
4488        return;
4489    }
4490
4491    sf = extract32(insn, 31, 1);
4492    opcode = extract32(insn, 10, 6);
4493    rn = extract32(insn, 5, 5);
4494    rd = extract32(insn, 0, 5);
4495
4496    switch (opcode) {
4497    case 0: /* RBIT */
4498        handle_rbit(s, sf, rn, rd);
4499        break;
4500    case 1: /* REV16 */
4501        handle_rev16(s, sf, rn, rd);
4502        break;
4503    case 2: /* REV32 */
4504        handle_rev32(s, sf, rn, rd);
4505        break;
4506    case 3: /* REV64 */
4507        handle_rev64(s, sf, rn, rd);
4508        break;
4509    case 4: /* CLZ */
4510        handle_clz(s, sf, rn, rd);
4511        break;
4512    case 5: /* CLS */
4513        handle_cls(s, sf, rn, rd);
4514        break;
4515    }
4516}
4517
4518static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4519                       unsigned int rm, unsigned int rn, unsigned int rd)
4520{
4521    TCGv_i64 tcg_n, tcg_m, tcg_rd;
4522    tcg_rd = cpu_reg(s, rd);
4523
4524    if (!sf && is_signed) {
4525        tcg_n = new_tmp_a64(s);
4526        tcg_m = new_tmp_a64(s);
4527        tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4528        tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4529    } else {
4530        tcg_n = read_cpu_reg(s, rn, sf);
4531        tcg_m = read_cpu_reg(s, rm, sf);
4532    }
4533
4534    if (is_signed) {
4535        gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4536    } else {
4537        gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4538    }
4539
4540    if (!sf) { /* zero extend final result */
4541        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4542    }
4543}
4544
4545/* LSLV, LSRV, ASRV, RORV */
4546static void handle_shift_reg(DisasContext *s,
4547                             enum a64_shift_type shift_type, unsigned int sf,
4548                             unsigned int rm, unsigned int rn, unsigned int rd)
4549{
4550    TCGv_i64 tcg_shift = tcg_temp_new_i64();
4551    TCGv_i64 tcg_rd = cpu_reg(s, rd);
4552    TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4553
4554    tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4555    shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4556    tcg_temp_free_i64(tcg_shift);
4557}
4558
4559/* CRC32[BHWX], CRC32C[BHWX] */
4560static void handle_crc32(DisasContext *s,
4561                         unsigned int sf, unsigned int sz, bool crc32c,
4562                         unsigned int rm, unsigned int rn, unsigned int rd)
4563{
4564    TCGv_i64 tcg_acc, tcg_val;
4565    TCGv_i32 tcg_bytes;
4566
4567    if (!arm_dc_feature(s, ARM_FEATURE_CRC)
4568        || (sf == 1 && sz != 3)
4569        || (sf == 0 && sz == 3)) {
4570        unallocated_encoding(s);
4571        return;
4572    }
4573
4574    if (sz == 3) {
4575        tcg_val = cpu_reg(s, rm);
4576    } else {
4577        uint64_t mask;
4578        switch (sz) {
4579        case 0:
4580            mask = 0xFF;
4581            break;
4582        case 1:
4583            mask = 0xFFFF;
4584            break;
4585        case 2:
4586            mask = 0xFFFFFFFF;
4587            break;
4588        default:
4589            g_assert_not_reached();
4590        }
4591        tcg_val = new_tmp_a64(s);
4592        tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4593    }
4594
4595    tcg_acc = cpu_reg(s, rn);
4596    tcg_bytes = tcg_const_i32(1 << sz);
4597
4598    if (crc32c) {
4599        gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4600    } else {
4601        gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4602    }
4603
4604    tcg_temp_free_i32(tcg_bytes);
4605}
4606
4607/* Data-processing (2 source)
4608 *   31   30  29 28             21 20  16 15    10 9    5 4    0
4609 * +----+---+---+-----------------+------+--------+------+------+
4610 * | sf | 0 | S | 1 1 0 1 0 1 1 0 |  Rm  | opcode |  Rn  |  Rd  |
4611 * +----+---+---+-----------------+------+--------+------+------+
4612 */
4613static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4614{
4615    unsigned int sf, rm, opcode, rn, rd;
4616    sf = extract32(insn, 31, 1);
4617    rm = extract32(insn, 16, 5);
4618    opcode = extract32(insn, 10, 6);
4619    rn = extract32(insn, 5, 5);
4620    rd = extract32(insn, 0, 5);
4621
4622    if (extract32(insn, 29, 1)) {
4623        unallocated_encoding(s);
4624        return;
4625    }
4626
4627    switch (opcode) {
4628    case 2: /* UDIV */
4629        handle_div(s, false, sf, rm, rn, rd);
4630        break;
4631    case 3: /* SDIV */
4632        handle_div(s, true, sf, rm, rn, rd);
4633        break;
4634    case 8: /* LSLV */
4635        handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4636        break;
4637    case 9: /* LSRV */
4638        handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4639        break;
4640    case 10: /* ASRV */
4641        handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4642        break;
4643    case 11: /* RORV */
4644        handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4645        break;
4646    case 16:
4647    case 17:
4648    case 18:
4649    case 19:
4650    case 20:
4651    case 21:
4652    case 22:
4653    case 23: /* CRC32 */
4654    {
4655        int sz = extract32(opcode, 0, 2);
4656        bool crc32c = extract32(opcode, 2, 1);
4657        handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4658        break;
4659    }
4660    default:
4661        unallocated_encoding(s);
4662        break;
4663    }
4664}
4665
4666/* Data processing - register */
4667static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4668{
4669    switch (extract32(insn, 24, 5)) {
4670    case 0x0a: /* Logical (shifted register) */
4671        disas_logic_reg(s, insn);
4672        break;
4673    case 0x0b: /* Add/subtract */
4674        if (insn & (1 << 21)) { /* (extended register) */
4675            disas_add_sub_ext_reg(s, insn);
4676        } else {
4677            disas_add_sub_reg(s, insn);
4678        }
4679        break;
4680    case 0x1b: /* Data-processing (3 source) */
4681        disas_data_proc_3src(s, insn);
4682        break;
4683    case 0x1a:
4684        switch (extract32(insn, 21, 3)) {
4685        case 0x0: /* Add/subtract (with carry) */
4686            disas_adc_sbc(s, insn);
4687            break;
4688        case 0x2: /* Conditional compare */
4689            disas_cc(s, insn); /* both imm and reg forms */
4690            break;
4691        case 0x4: /* Conditional select */
4692            disas_cond_select(s, insn);
4693            break;
4694        case 0x6: /* Data-processing */
4695            if (insn & (1 << 30)) { /* (1 source) */
4696                disas_data_proc_1src(s, insn);
4697            } else {            /* (2 source) */
4698                disas_data_proc_2src(s, insn);
4699            }
4700            break;
4701        default:
4702            unallocated_encoding(s);
4703            break;
4704        }
4705        break;
4706    default:
4707        unallocated_encoding(s);
4708        break;
4709    }
4710}
4711
4712static void handle_fp_compare(DisasContext *s, int size,
4713                              unsigned int rn, unsigned int rm,
4714                              bool cmp_with_zero, bool signal_all_nans)
4715{
4716    TCGv_i64 tcg_flags = tcg_temp_new_i64();
4717    TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
4718
4719    if (size == MO_64) {
4720        TCGv_i64 tcg_vn, tcg_vm;
4721
4722        tcg_vn = read_fp_dreg(s, rn);
4723        if (cmp_with_zero) {
4724            tcg_vm = tcg_const_i64(0);
4725        } else {
4726            tcg_vm = read_fp_dreg(s, rm);
4727        }
4728        if (signal_all_nans) {
4729            gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4730        } else {
4731            gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4732        }
4733        tcg_temp_free_i64(tcg_vn);
4734        tcg_temp_free_i64(tcg_vm);
4735    } else {
4736        TCGv_i32 tcg_vn = tcg_temp_new_i32();
4737        TCGv_i32 tcg_vm = tcg_temp_new_i32();
4738
4739        read_vec_element_i32(s, tcg_vn, rn, 0, size);
4740        if (cmp_with_zero) {
4741            tcg_gen_movi_i32(tcg_vm, 0);
4742        } else {
4743            read_vec_element_i32(s, tcg_vm, rm, 0, size);
4744        }
4745
4746        switch (size) {
4747        case MO_32:
4748            if (signal_all_nans) {
4749                gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4750            } else {
4751                gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4752            }
4753            break;
4754        case MO_16:
4755            if (signal_all_nans) {
4756                gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4757            } else {
4758                gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4759            }
4760            break;
4761        default:
4762            g_assert_not_reached();
4763        }
4764
4765        tcg_temp_free_i32(tcg_vn);
4766        tcg_temp_free_i32(tcg_vm);
4767    }
4768
4769    tcg_temp_free_ptr(fpst);
4770
4771    gen_set_nzcv(tcg_flags);
4772
4773    tcg_temp_free_i64(tcg_flags);
4774}
4775
4776/* Floating point compare
4777 *   31  30  29 28       24 23  22  21 20  16 15 14 13  10    9    5 4     0
4778 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4779 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | op  | 1 0 0 0 |  Rn  |  op2  |
4780 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4781 */
4782static void disas_fp_compare(DisasContext *s, uint32_t insn)
4783{
4784    unsigned int mos, type, rm, op, rn, opc, op2r;
4785    int size;
4786
4787    mos = extract32(insn, 29, 3);
4788    type = extract32(insn, 22, 2);
4789    rm = extract32(insn, 16, 5);
4790    op = extract32(insn, 14, 2);
4791    rn = extract32(insn, 5, 5);
4792    opc = extract32(insn, 3, 2);
4793    op2r = extract32(insn, 0, 3);
4794
4795    if (mos || op || op2r) {
4796        unallocated_encoding(s);
4797        return;
4798    }
4799
4800    switch (type) {
4801    case 0:
4802        size = MO_32;
4803        break;
4804    case 1:
4805        size = MO_64;
4806        break;
4807    case 3:
4808        size = MO_16;
4809        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
4810            break;
4811        }
4812        /* fallthru */
4813    default:
4814        unallocated_encoding(s);
4815        return;
4816    }
4817
4818    if (!fp_access_check(s)) {
4819        return;
4820    }
4821
4822    handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
4823}
4824
4825/* Floating point conditional compare
4826 *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5  4   3    0
4827 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4828 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 0 1 |  Rn  | op | nzcv |
4829 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4830 */
4831static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4832{
4833    unsigned int mos, type, rm, cond, rn, op, nzcv;
4834    TCGv_i64 tcg_flags;
4835    TCGLabel *label_continue = NULL;
4836    int size;
4837
4838    mos = extract32(insn, 29, 3);
4839    type = extract32(insn, 22, 2);
4840    rm = extract32(insn, 16, 5);
4841    cond = extract32(insn, 12, 4);
4842    rn = extract32(insn, 5, 5);
4843    op = extract32(insn, 4, 1);
4844    nzcv = extract32(insn, 0, 4);
4845
4846    if (mos) {
4847        unallocated_encoding(s);
4848        return;
4849    }
4850
4851    switch (type) {
4852    case 0:
4853        size = MO_32;
4854        break;
4855    case 1:
4856        size = MO_64;
4857        break;
4858    case 3:
4859        size = MO_16;
4860        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
4861            break;
4862        }
4863        /* fallthru */
4864    default:
4865        unallocated_encoding(s);
4866        return;
4867    }
4868
4869    if (!fp_access_check(s)) {
4870        return;
4871    }
4872
4873    if (cond < 0x0e) { /* not always */
4874        TCGLabel *label_match = gen_new_label();
4875        label_continue = gen_new_label();
4876        arm_gen_test_cc(cond, label_match);
4877        /* nomatch: */
4878        tcg_flags = tcg_const_i64(nzcv << 28);
4879        gen_set_nzcv(tcg_flags);
4880        tcg_temp_free_i64(tcg_flags);
4881        tcg_gen_br(label_continue);
4882        gen_set_label(label_match);
4883    }
4884
4885    handle_fp_compare(s, size, rn, rm, false, op);
4886
4887    if (cond < 0x0e) {
4888        gen_set_label(label_continue);
4889    }
4890}
4891
4892/* Floating point conditional select
4893 *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5 4    0
4894 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4895 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 1 1 |  Rn  |  Rd  |
4896 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4897 */
4898static void disas_fp_csel(DisasContext *s, uint32_t insn)
4899{
4900    unsigned int mos, type, rm, cond, rn, rd;
4901    TCGv_i64 t_true, t_false, t_zero;
4902    DisasCompare64 c;
4903    TCGMemOp sz;
4904
4905    mos = extract32(insn, 29, 3);
4906    type = extract32(insn, 22, 2);
4907    rm = extract32(insn, 16, 5);
4908    cond = extract32(insn, 12, 4);
4909    rn = extract32(insn, 5, 5);
4910    rd = extract32(insn, 0, 5);
4911
4912    if (mos) {
4913        unallocated_encoding(s);
4914        return;
4915    }
4916
4917    switch (type) {
4918    case 0:
4919        sz = MO_32;
4920        break;
4921    case 1:
4922        sz = MO_64;
4923        break;
4924    case 3:
4925        sz = MO_16;
4926        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
4927            break;
4928        }
4929        /* fallthru */
4930    default:
4931        unallocated_encoding(s);
4932        return;
4933    }
4934
4935    if (!fp_access_check(s)) {
4936        return;
4937    }
4938
4939    /* Zero extend sreg & hreg inputs to 64 bits now.  */
4940    t_true = tcg_temp_new_i64();
4941    t_false = tcg_temp_new_i64();
4942    read_vec_element(s, t_true, rn, 0, sz);
4943    read_vec_element(s, t_false, rm, 0, sz);
4944
4945    a64_test_cc(&c, cond);
4946    t_zero = tcg_const_i64(0);
4947    tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4948    tcg_temp_free_i64(t_zero);
4949    tcg_temp_free_i64(t_false);
4950    a64_free_cc(&c);
4951
4952    /* Note that sregs & hregs write back zeros to the high bits,
4953       and we've already done the zero-extension.  */
4954    write_fp_dreg(s, rd, t_true);
4955    tcg_temp_free_i64(t_true);
4956}
4957
4958/* Floating-point data-processing (1 source) - half precision */
4959static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
4960{
4961    TCGv_ptr fpst = NULL;
4962    TCGv_i32 tcg_op = read_fp_hreg(s, rn);
4963    TCGv_i32 tcg_res = tcg_temp_new_i32();
4964
4965    switch (opcode) {
4966    case 0x0: /* FMOV */
4967        tcg_gen_mov_i32(tcg_res, tcg_op);
4968        break;
4969    case 0x1: /* FABS */
4970        tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
4971        break;
4972    case 0x2: /* FNEG */
4973        tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
4974        break;
4975    case 0x3: /* FSQRT */
4976        fpst = get_fpstatus_ptr(true);
4977        gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
4978        break;
4979    case 0x8: /* FRINTN */
4980    case 0x9: /* FRINTP */
4981    case 0xa: /* FRINTM */
4982    case 0xb: /* FRINTZ */
4983    case 0xc: /* FRINTA */
4984    {
4985        TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4986        fpst = get_fpstatus_ptr(true);
4987
4988        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4989        gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
4990
4991        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4992        tcg_temp_free_i32(tcg_rmode);
4993        break;
4994    }
4995    case 0xe: /* FRINTX */
4996        fpst = get_fpstatus_ptr(true);
4997        gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
4998        break;
4999    case 0xf: /* FRINTI */
5000        fpst = get_fpstatus_ptr(true);
5001        gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5002        break;
5003    default:
5004        abort();
5005    }
5006
5007    write_fp_sreg(s, rd, tcg_res);
5008
5009    if (fpst) {
5010        tcg_temp_free_ptr(fpst);
5011    }
5012    tcg_temp_free_i32(tcg_op);
5013    tcg_temp_free_i32(tcg_res);
5014}
5015
5016/* Floating-point data-processing (1 source) - single precision */
5017static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5018{
5019    TCGv_ptr fpst;
5020    TCGv_i32 tcg_op;
5021    TCGv_i32 tcg_res;
5022
5023    fpst = get_fpstatus_ptr(false);
5024    tcg_op = read_fp_sreg(s, rn);
5025    tcg_res = tcg_temp_new_i32();
5026
5027    switch (opcode) {
5028    case 0x0: /* FMOV */
5029        tcg_gen_mov_i32(tcg_res, tcg_op);
5030        break;
5031    case 0x1: /* FABS */
5032        gen_helper_vfp_abss(tcg_res, tcg_op);
5033        break;
5034    case 0x2: /* FNEG */
5035        gen_helper_vfp_negs(tcg_res, tcg_op);
5036        break;
5037    case 0x3: /* FSQRT */
5038        gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
5039        break;
5040    case 0x8: /* FRINTN */
5041    case 0x9: /* FRINTP */
5042    case 0xa: /* FRINTM */
5043    case 0xb: /* FRINTZ */
5044    case 0xc: /* FRINTA */
5045    {
5046        TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5047
5048        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5049        gen_helper_rints(tcg_res, tcg_op, fpst);
5050
5051        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5052        tcg_temp_free_i32(tcg_rmode);
5053        break;
5054    }
5055    case 0xe: /* FRINTX */
5056        gen_helper_rints_exact(tcg_res, tcg_op, fpst);
5057        break;
5058    case 0xf: /* FRINTI */
5059        gen_helper_rints(tcg_res, tcg_op, fpst);
5060        break;
5061    default:
5062        abort();
5063    }
5064
5065    write_fp_sreg(s, rd, tcg_res);
5066
5067    tcg_temp_free_ptr(fpst);
5068    tcg_temp_free_i32(tcg_op);
5069    tcg_temp_free_i32(tcg_res);
5070}
5071
5072/* Floating-point data-processing (1 source) - double precision */
5073static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
5074{
5075    TCGv_ptr fpst;
5076    TCGv_i64 tcg_op;
5077    TCGv_i64 tcg_res;
5078
5079    switch (opcode) {
5080    case 0x0: /* FMOV */
5081        gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
5082        return;
5083    }
5084
5085    fpst = get_fpstatus_ptr(false);
5086    tcg_op = read_fp_dreg(s, rn);
5087    tcg_res = tcg_temp_new_i64();
5088
5089    switch (opcode) {
5090    case 0x1: /* FABS */
5091        gen_helper_vfp_absd(tcg_res, tcg_op);
5092        break;
5093    case 0x2: /* FNEG */
5094        gen_helper_vfp_negd(tcg_res, tcg_op);
5095        break;
5096    case 0x3: /* FSQRT */
5097        gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
5098        break;
5099    case 0x8: /* FRINTN */
5100    case 0x9: /* FRINTP */
5101    case 0xa: /* FRINTM */
5102    case 0xb: /* FRINTZ */
5103    case 0xc: /* FRINTA */
5104    {
5105        TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5106
5107        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5108        gen_helper_rintd(tcg_res, tcg_op, fpst);
5109
5110        gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5111        tcg_temp_free_i32(tcg_rmode);
5112        break;
5113    }
5114    case 0xe: /* FRINTX */
5115        gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
5116        break;
5117    case 0xf: /* FRINTI */
5118        gen_helper_rintd(tcg_res, tcg_op, fpst);
5119        break;
5120    default:
5121        abort();
5122    }
5123
5124    write_fp_dreg(s, rd, tcg_res);
5125
5126    tcg_temp_free_ptr(fpst);
5127    tcg_temp_free_i64(tcg_op);
5128    tcg_temp_free_i64(tcg_res);
5129}
5130
5131static void handle_fp_fcvt(DisasContext *s, int opcode,
5132                           int rd, int rn, int dtype, int ntype)
5133{
5134    switch (ntype) {
5135    case 0x0:
5136    {
5137        TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5138        if (dtype == 1) {
5139            /* Single to double */
5140            TCGv_i64 tcg_rd = tcg_temp_new_i64();
5141            gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
5142            write_fp_dreg(s, rd, tcg_rd);
5143            tcg_temp_free_i64(tcg_rd);
5144        } else {
5145            /* Single to half */
5146            TCGv_i32 tcg_rd = tcg_temp_new_i32();
5147            TCGv_i32 ahp = get_ahp_flag();
5148            TCGv_ptr fpst = get_fpstatus_ptr(false);
5149
5150            gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5151            /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5152            write_fp_sreg(s, rd, tcg_rd);
5153            tcg_temp_free_i32(tcg_rd);
5154            tcg_temp_free_i32(ahp);
5155            tcg_temp_free_ptr(fpst);
5156        }
5157        tcg_temp_free_i32(tcg_rn);
5158        break;
5159    }
5160    case 0x1:
5161    {
5162        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
5163        TCGv_i32 tcg_rd = tcg_temp_new_i32();
5164        if (dtype == 0) {
5165            /* Double to single */
5166            gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
5167        } else {
5168            TCGv_ptr fpst = get_fpstatus_ptr(false);
5169            TCGv_i32 ahp = get_ahp_flag();
5170            /* Double to half */
5171            gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5172            /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5173            tcg_temp_free_ptr(fpst);
5174            tcg_temp_free_i32(ahp);
5175        }
5176        write_fp_sreg(s, rd, tcg_rd);
5177        tcg_temp_free_i32(tcg_rd);
5178        tcg_temp_free_i64(tcg_rn);
5179        break;
5180    }
5181    case 0x3:
5182    {
5183        TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5184        TCGv_ptr tcg_fpst = get_fpstatus_ptr(false);
5185        TCGv_i32 tcg_ahp = get_ahp_flag();
5186        tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
5187        if (dtype == 0) {
5188            /* Half to single */
5189            TCGv_i32 tcg_rd = tcg_temp_new_i32();
5190            gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5191            write_fp_sreg(s, rd, tcg_rd);
5192            tcg_temp_free_ptr(tcg_fpst);
5193            tcg_temp_free_i32(tcg_ahp);
5194            tcg_temp_free_i32(tcg_rd);
5195        } else {
5196            /* Half to double */
5197            TCGv_i64 tcg_rd = tcg_temp_new_i64();
5198            gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5199            write_fp_dreg(s, rd, tcg_rd);
5200            tcg_temp_free_i64(tcg_rd);
5201        }
5202        tcg_temp_free_i32(tcg_rn);
5203        break;
5204    }
5205    default:
5206        abort();
5207    }
5208}
5209
5210/* Floating point data-processing (1 source)
5211 *   31  30  29 28       24 23  22  21 20    15 14       10 9    5 4    0
5212 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5213 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 |  Rn  |  Rd  |
5214 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5215 */
5216static void disas_fp_1src(DisasContext *s, uint32_t insn)
5217{
5218    int type = extract32(insn, 22, 2);
5219    int opcode = extract32(insn, 15, 6);
5220    int rn = extract32(insn, 5, 5);
5221    int rd = extract32(insn, 0, 5);
5222
5223    switch (opcode) {
5224    case 0x4: case 0x5: case 0x7:
5225    {
5226        /* FCVT between half, single and double precision */
5227        int dtype = extract32(opcode, 0, 2);
5228        if (type == 2 || dtype == type) {
5229            unallocated_encoding(s);
5230            return;
5231        }
5232        if (!fp_access_check(s)) {
5233            return;
5234        }
5235
5236        handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
5237        break;
5238    }
5239    case 0x0 ... 0x3:
5240    case 0x8 ... 0xc:
5241    case 0xe ... 0xf:
5242        /* 32-to-32 and 64-to-64 ops */
5243        switch (type) {
5244        case 0:
5245            if (!fp_access_check(s)) {
5246                return;
5247            }
5248
5249            handle_fp_1src_single(s, opcode, rd, rn);
5250            break;
5251        case 1:
5252            if (!fp_access_check(s)) {
5253                return;
5254            }
5255
5256            handle_fp_1src_double(s, opcode, rd, rn);
5257            break;
5258        case 3:
5259            if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5260                unallocated_encoding(s);
5261                return;
5262            }
5263
5264            if (!fp_access_check(s)) {
5265                return;
5266            }
5267
5268            handle_fp_1src_half(s, opcode, rd, rn);
5269            break;
5270        default:
5271            unallocated_encoding(s);
5272        }
5273        break;
5274    default:
5275        unallocated_encoding(s);
5276        break;
5277    }
5278}
5279
5280/* Floating-point data-processing (2 source) - single precision */
5281static void handle_fp_2src_single(DisasContext *s, int opcode,
5282                                  int rd, int rn, int rm)
5283{
5284    TCGv_i32 tcg_op1;
5285    TCGv_i32 tcg_op2;
5286    TCGv_i32 tcg_res;
5287    TCGv_ptr fpst;
5288
5289    tcg_res = tcg_temp_new_i32();
5290    fpst = get_fpstatus_ptr(false);
5291    tcg_op1 = read_fp_sreg(s, rn);
5292    tcg_op2 = read_fp_sreg(s, rm);
5293
5294    switch (opcode) {
5295    case 0x0: /* FMUL */
5296        gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5297        break;
5298    case 0x1: /* FDIV */
5299        gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
5300        break;
5301    case 0x2: /* FADD */
5302        gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
5303        break;
5304    case 0x3: /* FSUB */
5305        gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
5306        break;
5307    case 0x4: /* FMAX */
5308        gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
5309        break;
5310    case 0x5: /* FMIN */
5311        gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
5312        break;
5313    case 0x6: /* FMAXNM */
5314        gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
5315        break;
5316    case 0x7: /* FMINNM */
5317        gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
5318        break;
5319    case 0x8: /* FNMUL */
5320        gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5321        gen_helper_vfp_negs(tcg_res, tcg_res);
5322        break;
5323    }
5324
5325    write_fp_sreg(s, rd, tcg_res);
5326
5327    tcg_temp_free_ptr(fpst);
5328    tcg_temp_free_i32(tcg_op1);
5329    tcg_temp_free_i32(tcg_op2);
5330    tcg_temp_free_i32(tcg_res);
5331}
5332
5333/* Floating-point data-processing (2 source) - double precision */
5334static void handle_fp_2src_double(DisasContext *s, int opcode,
5335                                  int rd, int rn, int rm)
5336{
5337    TCGv_i64 tcg_op1;
5338    TCGv_i64 tcg_op2;
5339    TCGv_i64 tcg_res;
5340    TCGv_ptr fpst;
5341
5342    tcg_res = tcg_temp_new_i64();
5343    fpst = get_fpstatus_ptr(false);
5344    tcg_op1 = read_fp_dreg(s, rn);
5345    tcg_op2 = read_fp_dreg(s, rm);
5346
5347    switch (opcode) {
5348    case 0x0: /* FMUL */
5349        gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5350        break;
5351    case 0x1: /* FDIV */
5352        gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
5353        break;
5354    case 0x2: /* FADD */
5355        gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
5356        break;
5357    case 0x3: /* FSUB */
5358        gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
5359        break;
5360    case 0x4: /* FMAX */
5361        gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
5362        break;
5363    case 0x5: /* FMIN */
5364        gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
5365        break;
5366    case 0x6: /* FMAXNM */
5367        gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5368        break;
5369    case 0x7: /* FMINNM */
5370        gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5371        break;
5372    case 0x8: /* FNMUL */
5373        gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5374        gen_helper_vfp_negd(tcg_res, tcg_res);
5375        break;
5376    }
5377
5378    write_fp_dreg(s, rd, tcg_res);
5379
5380    tcg_temp_free_ptr(fpst);
5381    tcg_temp_free_i64(tcg_op1);
5382    tcg_temp_free_i64(tcg_op2);
5383    tcg_temp_free_i64(tcg_res);
5384}
5385
5386/* Floating-point data-processing (2 source) - half precision */
5387static void handle_fp_2src_half(DisasContext *s, int opcode,
5388                                int rd, int rn, int rm)
5389{
5390    TCGv_i32 tcg_op1;
5391    TCGv_i32 tcg_op2;
5392    TCGv_i32 tcg_res;
5393    TCGv_ptr fpst;
5394
5395    tcg_res = tcg_temp_new_i32();
5396    fpst = get_fpstatus_ptr(true);
5397    tcg_op1 = read_fp_hreg(s, rn);
5398    tcg_op2 = read_fp_hreg(s, rm);
5399
5400    switch (opcode) {
5401    case 0x0: /* FMUL */
5402        gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
5403        break;
5404    case 0x1: /* FDIV */
5405        gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
5406        break;
5407    case 0x2: /* FADD */
5408        gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
5409        break;
5410    case 0x3: /* FSUB */
5411        gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
5412        break;
5413    case 0x4: /* FMAX */
5414        gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
5415        break;
5416    case 0x5: /* FMIN */
5417        gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
5418        break;
5419    case 0x6: /* FMAXNM */
5420        gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
5421        break;
5422    case 0x7: /* FMINNM */
5423        gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
5424        break;
5425    case 0x8: /* FNMUL */
5426        gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
5427        tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
5428        break;
5429    default:
5430        g_assert_not_reached();
5431    }
5432
5433    write_fp_sreg(s, rd, tcg_res);
5434
5435    tcg_temp_free_ptr(fpst);
5436    tcg_temp_free_i32(tcg_op1);
5437    tcg_temp_free_i32(tcg_op2);
5438    tcg_temp_free_i32(tcg_res);
5439}
5440
5441/* Floating point data-processing (2 source)
5442 *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
5443 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5444 * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | opcode | 1 0 |  Rn  |  Rd  |
5445 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5446 */
5447static void disas_fp_2src(DisasContext *s, uint32_t insn)
5448{
5449    int type = extract32(insn, 22, 2);
5450    int rd = extract32(insn, 0, 5);
5451    int rn = extract32(insn, 5, 5);
5452    int rm = extract32(insn, 16, 5);
5453    int opcode = extract32(insn, 12, 4);
5454
5455    if (opcode > 8) {
5456        unallocated_encoding(s);
5457        return;
5458    }
5459
5460    switch (type) {
5461    case 0:
5462        if (!fp_access_check(s)) {
5463            return;
5464        }
5465        handle_fp_2src_single(s, opcode, rd, rn, rm);
5466        break;
5467    case 1:
5468        if (!fp_access_check(s)) {
5469            return;
5470        }
5471        handle_fp_2src_double(s, opcode, rd, rn, rm);
5472        break;
5473    case 3:
5474        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5475            unallocated_encoding(s);
5476            return;
5477        }
5478        if (!fp_access_check(s)) {
5479            return;
5480        }
5481        handle_fp_2src_half(s, opcode, rd, rn, rm);
5482        break;
5483    default:
5484        unallocated_encoding(s);
5485    }
5486}
5487
5488/* Floating-point data-processing (3 source) - single precision */
5489static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
5490                                  int rd, int rn, int rm, int ra)
5491{
5492    TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5493    TCGv_i32 tcg_res = tcg_temp_new_i32();
5494    TCGv_ptr fpst = get_fpstatus_ptr(false);
5495
5496    tcg_op1 = read_fp_sreg(s, rn);
5497    tcg_op2 = read_fp_sreg(s, rm);
5498    tcg_op3 = read_fp_sreg(s, ra);
5499
5500    /* These are fused multiply-add, and must be done as one
5501     * floating point operation with no rounding between the
5502     * multiplication and addition steps.
5503     * NB that doing the negations here as separate steps is
5504     * correct : an input NaN should come out with its sign bit
5505     * flipped if it is a negated-input.
5506     */
5507    if (o1 == true) {
5508        gen_helper_vfp_negs(tcg_op3, tcg_op3);
5509    }
5510
5511    if (o0 != o1) {
5512        gen_helper_vfp_negs(tcg_op1, tcg_op1);
5513    }
5514
5515    gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5516
5517    write_fp_sreg(s, rd, tcg_res);
5518
5519    tcg_temp_free_ptr(fpst);
5520    tcg_temp_free_i32(tcg_op1);
5521    tcg_temp_free_i32(tcg_op2);
5522    tcg_temp_free_i32(tcg_op3);
5523    tcg_temp_free_i32(tcg_res);
5524}
5525
5526/* Floating-point data-processing (3 source) - double precision */
5527static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
5528                                  int rd, int rn, int rm, int ra)
5529{
5530    TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
5531    TCGv_i64 tcg_res = tcg_temp_new_i64();
5532    TCGv_ptr fpst = get_fpstatus_ptr(false);
5533
5534    tcg_op1 = read_fp_dreg(s, rn);
5535    tcg_op2 = read_fp_dreg(s, rm);
5536    tcg_op3 = read_fp_dreg(s, ra);
5537
5538    /* These are fused multiply-add, and must be done as one
5539     * floating point operation with no rounding between the
5540     * multiplication and addition steps.
5541     * NB that doing the negations here as separate steps is
5542     * correct : an input NaN should come out with its sign bit
5543     * flipped if it is a negated-input.
5544     */
5545    if (o1 == true) {
5546        gen_helper_vfp_negd(tcg_op3, tcg_op3);
5547    }
5548
5549    if (o0 != o1) {
5550        gen_helper_vfp_negd(tcg_op1, tcg_op1);
5551    }
5552
5553    gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5554
5555    write_fp_dreg(s, rd, tcg_res);
5556
5557    tcg_temp_free_ptr(fpst);
5558    tcg_temp_free_i64(tcg_op1);
5559    tcg_temp_free_i64(tcg_op2);
5560    tcg_temp_free_i64(tcg_op3);
5561    tcg_temp_free_i64(tcg_res);
5562}
5563
5564/* Floating-point data-processing (3 source) - half precision */
5565static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
5566                                int rd, int rn, int rm, int ra)
5567{
5568    TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5569    TCGv_i32 tcg_res = tcg_temp_new_i32();
5570    TCGv_ptr fpst = get_fpstatus_ptr(true);
5571
5572    tcg_op1 = read_fp_hreg(s, rn);
5573    tcg_op2 = read_fp_hreg(s, rm);
5574    tcg_op3 = read_fp_hreg(s, ra);
5575
5576    /* These are fused multiply-add, and must be done as one
5577     * floating point operation with no rounding between the
5578     * multiplication and addition steps.
5579     * NB that doing the negations here as separate steps is
5580     * correct : an input NaN should come out with its sign bit
5581     * flipped if it is a negated-input.
5582     */
5583    if (o1 == true) {
5584        tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
5585    }
5586
5587    if (o0 != o1) {
5588        tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
5589    }
5590
5591    gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5592
5593    write_fp_sreg(s, rd, tcg_res);
5594
5595    tcg_temp_free_ptr(fpst);
5596    tcg_temp_free_i32(tcg_op1);
5597    tcg_temp_free_i32(tcg_op2);
5598    tcg_temp_free_i32(tcg_op3);
5599    tcg_temp_free_i32(tcg_res);
5600}
5601
5602/* Floating point data-processing (3 source)
5603 *   31  30  29 28       24 23  22  21  20  16  15  14  10 9    5 4    0
5604 * +---+---+---+-----------+------+----+------+----+------+------+------+
5605 * | M | 0 | S | 1 1 1 1 1 | type | o1 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
5606 * +---+---+---+-----------+------+----+------+----+------+------+------+
5607 */
5608static void disas_fp_3src(DisasContext *s, uint32_t insn)
5609{
5610    int type = extract32(insn, 22, 2);
5611    int rd = extract32(insn, 0, 5);
5612    int rn = extract32(insn, 5, 5);
5613    int ra = extract32(insn, 10, 5);
5614    int rm = extract32(insn, 16, 5);
5615    bool o0 = extract32(insn, 15, 1);
5616    bool o1 = extract32(insn, 21, 1);
5617
5618    switch (type) {
5619    case 0:
5620        if (!fp_access_check(s)) {
5621            return;
5622        }
5623        handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
5624        break;
5625    case 1:
5626        if (!fp_access_check(s)) {
5627            return;
5628        }
5629        handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
5630        break;
5631    case 3:
5632        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5633            unallocated_encoding(s);
5634            return;
5635        }
5636        if (!fp_access_check(s)) {
5637            return;
5638        }
5639        handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
5640        break;
5641    default:
5642        unallocated_encoding(s);
5643    }
5644}
5645
5646/* The imm8 encodes the sign bit, enough bits to represent an exponent in
5647 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
5648 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
5649 */
5650uint64_t vfp_expand_imm(int size, uint8_t imm8)
5651{
5652    uint64_t imm;
5653
5654    switch (size) {
5655    case MO_64:
5656        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5657            (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
5658            extract32(imm8, 0, 6);
5659        imm <<= 48;
5660        break;
5661    case MO_32:
5662        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5663            (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
5664            (extract32(imm8, 0, 6) << 3);
5665        imm <<= 16;
5666        break;
5667    case MO_16:
5668        imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5669            (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
5670            (extract32(imm8, 0, 6) << 6);
5671        break;
5672    default:
5673        g_assert_not_reached();
5674    }
5675    return imm;
5676}
5677
5678/* Floating point immediate
5679 *   31  30  29 28       24 23  22  21 20        13 12   10 9    5 4    0
5680 * +---+---+---+-----------+------+---+------------+-------+------+------+
5681 * | M | 0 | S | 1 1 1 1 0 | type | 1 |    imm8    | 1 0 0 | imm5 |  Rd  |
5682 * +---+---+---+-----------+------+---+------------+-------+------+------+
5683 */
5684static void disas_fp_imm(DisasContext *s, uint32_t insn)
5685{
5686    int rd = extract32(insn, 0, 5);
5687    int imm8 = extract32(insn, 13, 8);
5688    int type = extract32(insn, 22, 2);
5689    uint64_t imm;
5690    TCGv_i64 tcg_res;
5691    TCGMemOp sz;
5692
5693    switch (type) {
5694    case 0:
5695        sz = MO_32;
5696        break;
5697    case 1:
5698        sz = MO_64;
5699        break;
5700    case 3:
5701        sz = MO_16;
5702        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5703            break;
5704        }
5705        /* fallthru */
5706    default:
5707        unallocated_encoding(s);
5708        return;
5709    }
5710
5711    if (!fp_access_check(s)) {
5712        return;
5713    }
5714
5715    imm = vfp_expand_imm(sz, imm8);
5716
5717    tcg_res = tcg_const_i64(imm);
5718    write_fp_dreg(s, rd, tcg_res);
5719    tcg_temp_free_i64(tcg_res);
5720}
5721
5722/* Handle floating point <=> fixed point conversions. Note that we can
5723 * also deal with fp <=> integer conversions as a special case (scale == 64)
5724 * OPTME: consider handling that special case specially or at least skipping
5725 * the call to scalbn in the helpers for zero shifts.
5726 */
5727static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
5728                           bool itof, int rmode, int scale, int sf, int type)
5729{
5730    bool is_signed = !(opcode & 1);
5731    TCGv_ptr tcg_fpstatus;
5732    TCGv_i32 tcg_shift, tcg_single;
5733    TCGv_i64 tcg_double;
5734
5735    tcg_fpstatus = get_fpstatus_ptr(type == 3);
5736
5737    tcg_shift = tcg_const_i32(64 - scale);
5738
5739    if (itof) {
5740        TCGv_i64 tcg_int = cpu_reg(s, rn);
5741        if (!sf) {
5742            TCGv_i64 tcg_extend = new_tmp_a64(s);
5743
5744            if (is_signed) {
5745                tcg_gen_ext32s_i64(tcg_extend, tcg_int);
5746            } else {
5747                tcg_gen_ext32u_i64(tcg_extend, tcg_int);
5748            }
5749
5750            tcg_int = tcg_extend;
5751        }
5752
5753        switch (type) {
5754        case 1: /* float64 */
5755            tcg_double = tcg_temp_new_i64();
5756            if (is_signed) {
5757                gen_helper_vfp_sqtod(tcg_double, tcg_int,
5758                                     tcg_shift, tcg_fpstatus);
5759            } else {
5760                gen_helper_vfp_uqtod(tcg_double, tcg_int,
5761                                     tcg_shift, tcg_fpstatus);
5762            }
5763            write_fp_dreg(s, rd, tcg_double);
5764            tcg_temp_free_i64(tcg_double);
5765            break;
5766
5767        case 0: /* float32 */
5768            tcg_single = tcg_temp_new_i32();
5769            if (is_signed) {
5770                gen_helper_vfp_sqtos(tcg_single, tcg_int,
5771                                     tcg_shift, tcg_fpstatus);
5772            } else {
5773                gen_helper_vfp_uqtos(tcg_single, tcg_int,
5774                                     tcg_shift, tcg_fpstatus);
5775            }
5776            write_fp_sreg(s, rd, tcg_single);
5777            tcg_temp_free_i32(tcg_single);
5778            break;
5779
5780        case 3: /* float16 */
5781            tcg_single = tcg_temp_new_i32();
5782            if (is_signed) {
5783                gen_helper_vfp_sqtoh(tcg_single, tcg_int,
5784                                     tcg_shift, tcg_fpstatus);
5785            } else {
5786                gen_helper_vfp_uqtoh(tcg_single, tcg_int,
5787                                     tcg_shift, tcg_fpstatus);
5788            }
5789            write_fp_sreg(s, rd, tcg_single);
5790            tcg_temp_free_i32(tcg_single);
5791            break;
5792
5793        default:
5794            g_assert_not_reached();
5795        }
5796    } else {
5797        TCGv_i64 tcg_int = cpu_reg(s, rd);
5798        TCGv_i32 tcg_rmode;
5799
5800        if (extract32(opcode, 2, 1)) {
5801            /* There are too many rounding modes to all fit into rmode,
5802             * so FCVTA[US] is a special case.
5803             */
5804            rmode = FPROUNDING_TIEAWAY;
5805        }
5806
5807        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5808
5809        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5810
5811        switch (type) {
5812        case 1: /* float64 */
5813            tcg_double = read_fp_dreg(s, rn);
5814            if (is_signed) {
5815                if (!sf) {
5816                    gen_helper_vfp_tosld(tcg_int, tcg_double,
5817                                         tcg_shift, tcg_fpstatus);
5818                } else {
5819                    gen_helper_vfp_tosqd(tcg_int, tcg_double,
5820                                         tcg_shift, tcg_fpstatus);
5821                }
5822            } else {
5823                if (!sf) {
5824                    gen_helper_vfp_tould(tcg_int, tcg_double,
5825                                         tcg_shift, tcg_fpstatus);
5826                } else {
5827                    gen_helper_vfp_touqd(tcg_int, tcg_double,
5828                                         tcg_shift, tcg_fpstatus);
5829                }
5830            }
5831            if (!sf) {
5832                tcg_gen_ext32u_i64(tcg_int, tcg_int);
5833            }
5834            tcg_temp_free_i64(tcg_double);
5835            break;
5836
5837        case 0: /* float32 */
5838            tcg_single = read_fp_sreg(s, rn);
5839            if (sf) {
5840                if (is_signed) {
5841                    gen_helper_vfp_tosqs(tcg_int, tcg_single,
5842                                         tcg_shift, tcg_fpstatus);
5843                } else {
5844                    gen_helper_vfp_touqs(tcg_int, tcg_single,
5845                                         tcg_shift, tcg_fpstatus);
5846                }
5847            } else {
5848                TCGv_i32 tcg_dest = tcg_temp_new_i32();
5849                if (is_signed) {
5850                    gen_helper_vfp_tosls(tcg_dest, tcg_single,
5851                                         tcg_shift, tcg_fpstatus);
5852                } else {
5853                    gen_helper_vfp_touls(tcg_dest, tcg_single,
5854                                         tcg_shift, tcg_fpstatus);
5855                }
5856                tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5857                tcg_temp_free_i32(tcg_dest);
5858            }
5859            tcg_temp_free_i32(tcg_single);
5860            break;
5861
5862        case 3: /* float16 */
5863            tcg_single = read_fp_sreg(s, rn);
5864            if (sf) {
5865                if (is_signed) {
5866                    gen_helper_vfp_tosqh(tcg_int, tcg_single,
5867                                         tcg_shift, tcg_fpstatus);
5868                } else {
5869                    gen_helper_vfp_touqh(tcg_int, tcg_single,
5870                                         tcg_shift, tcg_fpstatus);
5871                }
5872            } else {
5873                TCGv_i32 tcg_dest = tcg_temp_new_i32();
5874                if (is_signed) {
5875                    gen_helper_vfp_toslh(tcg_dest, tcg_single,
5876                                         tcg_shift, tcg_fpstatus);
5877                } else {
5878                    gen_helper_vfp_toulh(tcg_dest, tcg_single,
5879                                         tcg_shift, tcg_fpstatus);
5880                }
5881                tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5882                tcg_temp_free_i32(tcg_dest);
5883            }
5884            tcg_temp_free_i32(tcg_single);
5885            break;
5886
5887        default:
5888            g_assert_not_reached();
5889        }
5890
5891        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5892        tcg_temp_free_i32(tcg_rmode);
5893    }
5894
5895    tcg_temp_free_ptr(tcg_fpstatus);
5896    tcg_temp_free_i32(tcg_shift);
5897}
5898
5899/* Floating point <-> fixed point conversions
5900 *   31   30  29 28       24 23  22  21 20   19 18    16 15   10 9    5 4    0
5901 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5902 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale |  Rn  |  Rd  |
5903 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5904 */
5905static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5906{
5907    int rd = extract32(insn, 0, 5);
5908    int rn = extract32(insn, 5, 5);
5909    int scale = extract32(insn, 10, 6);
5910    int opcode = extract32(insn, 16, 3);
5911    int rmode = extract32(insn, 19, 2);
5912    int type = extract32(insn, 22, 2);
5913    bool sbit = extract32(insn, 29, 1);
5914    bool sf = extract32(insn, 31, 1);
5915    bool itof;
5916
5917    if (sbit || (!sf && scale < 32)) {
5918        unallocated_encoding(s);
5919        return;
5920    }
5921
5922    switch (type) {
5923    case 0: /* float32 */
5924    case 1: /* float64 */
5925        break;
5926    case 3: /* float16 */
5927        if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5928            break;
5929        }
5930        /* fallthru */
5931    default:
5932        unallocated_encoding(s);
5933        return;
5934    }
5935
5936    switch ((rmode << 3) | opcode) {
5937    case 0x2: /* SCVTF */
5938    case 0x3: /* UCVTF */
5939        itof = true;
5940        break;
5941    case 0x18: /* FCVTZS */
5942    case 0x19: /* FCVTZU */
5943        itof = false;
5944        break;
5945    default:
5946        unallocated_encoding(s);
5947        return;
5948    }
5949
5950    if (!fp_access_check(s)) {
5951        return;
5952    }
5953
5954    handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5955}
5956
5957static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5958{
5959    /* FMOV: gpr to or from float, double, or top half of quad fp reg,
5960     * without conversion.
5961     */
5962
5963    if (itof) {
5964        TCGv_i64 tcg_rn = cpu_reg(s, rn);
5965        TCGv_i64 tmp;
5966
5967        switch (type) {
5968        case 0:
5969            /* 32 bit */
5970            tmp = tcg_temp_new_i64();
5971            tcg_gen_ext32u_i64(tmp, tcg_rn);
5972            write_fp_dreg(s, rd, tmp);
5973            tcg_temp_free_i64(tmp);
5974            break;
5975        case 1:
5976            /* 64 bit */
5977            write_fp_dreg(s, rd, tcg_rn);
5978            break;
5979        case 2:
5980            /* 64 bit to top half. */
5981            tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5982            clear_vec_high(s, true, rd);
5983            break;
5984        case 3:
5985            /* 16 bit */
5986            tmp = tcg_temp_new_i64();
5987            tcg_gen_ext16u_i64(tmp, tcg_rn);
5988            write_fp_dreg(s, rd, tmp);
5989            tcg_temp_free_i64(tmp);
5990            break;
5991        default:
5992            g_assert_not_reached();
5993        }
5994    } else {
5995        TCGv_i64 tcg_rd = cpu_reg(s, rd);
5996
5997        switch (type) {
5998        case 0:
5999            /* 32 bit */
6000            tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6001            break;
6002        case 1:
6003            /* 64 bit */
6004            tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6005            break;
6006        case 2:
6007            /* 64 bits from top half */
6008            tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6009            break;
6010        case 3:
6011            /* 16 bit */
6012            tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6013            break;
6014        default:
6015            g_assert_not_reached();
6016        }
6017    }
6018}
6019
6020/* Floating point <-> integer conversions
6021 *   31   30  29 28       24 23  22  21 20   19 18 16 15         10 9  5 4  0
6022 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6023 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
6024 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6025 */
6026static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6027{
6028    int rd = extract32(insn, 0, 5);
6029    int rn = extract32(insn, 5, 5);
6030    int opcode = extract32(insn, 16, 3);
6031    int rmode = extract32(insn, 19, 2);
6032    int type = extract32(insn, 22, 2);
6033    bool sbit = extract32(insn, 29, 1);
6034    bool sf = extract32(insn, 31, 1);
6035
6036    if (sbit) {
6037        unallocated_encoding(s);
6038        return;
6039    }
6040
6041    if (opcode > 5) {
6042        /* FMOV */
6043        bool itof = opcode & 1;
6044
6045        if (rmode >= 2) {
6046            unallocated_encoding(s);
6047            return;
6048        }
6049
6050        switch (sf << 3 | type << 1 | rmode) {
6051        case 0x0: /* 32 bit */
6052        case 0xa: /* 64 bit */
6053        case 0xd: /* 64 bit to top half of quad */
6054            break;
6055        case 0x6: /* 16-bit float, 32-bit int */
6056        case 0xe: /* 16-bit float, 64-bit int */
6057            if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6058                break;
6059            }
6060            /* fallthru */
6061        default:
6062            /* all other sf/type/rmode combinations are invalid */
6063            unallocated_encoding(s);
6064            return;
6065        }
6066
6067        if (!fp_access_check(s)) {
6068            return;
6069        }
6070        handle_fmov(s, rd, rn, type, itof);
6071    } else {
6072        /* actual FP conversions */
6073        bool itof = extract32(opcode, 1, 1);
6074
6075        if (rmode != 0 && opcode > 1) {
6076            unallocated_encoding(s);
6077            return;
6078        }
6079        switch (type) {
6080        case 0: /* float32 */
6081        case 1: /* float64 */
6082            break;
6083        case 3: /* float16 */
6084            if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6085                break;
6086            }
6087            /* fallthru */
6088        default:
6089            unallocated_encoding(s);
6090            return;
6091        }
6092
6093        if (!fp_access_check(s)) {
6094            return;
6095        }
6096        handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
6097    }
6098}
6099
6100/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
6101 *   31  30  29 28     25 24                          0
6102 * +---+---+---+---------+-----------------------------+
6103 * |   | 0 |   | 1 1 1 1 |                             |
6104 * +---+---+---+---------+-----------------------------+
6105 */
6106static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
6107{
6108    if (extract32(insn, 24, 1)) {
6109        /* Floating point data-processing (3 source) */
6110        disas_fp_3src(s, insn);
6111    } else if (extract32(insn, 21, 1) == 0) {
6112        /* Floating point to fixed point conversions */
6113        disas_fp_fixed_conv(s, insn);
6114    } else {
6115        switch (extract32(insn, 10, 2)) {
6116        case 1:
6117            /* Floating point conditional compare */
6118            disas_fp_ccomp(s, insn);
6119            break;
6120        case 2:
6121            /* Floating point data-processing (2 source) */
6122            disas_fp_2src(s, insn);
6123            break;
6124        case 3:
6125            /* Floating point conditional select */
6126            disas_fp_csel(s, insn);
6127            break;
6128        case 0:
6129            switch (ctz32(extract32(insn, 12, 4))) {
6130            case 0: /* [15:12] == xxx1 */
6131                /* Floating point immediate */
6132                disas_fp_imm(s, insn);
6133                break;
6134            case 1: /* [15:12] == xx10 */
6135                /* Floating point compare */
6136                disas_fp_compare(s, insn);
6137                break;
6138            case 2: /* [15:12] == x100 */
6139                /* Floating point data-processing (1 source) */
6140                disas_fp_1src(s, insn);
6141                break;
6142            case 3: /* [15:12] == 1000 */
6143                unallocated_encoding(s);
6144                break;
6145            default: /* [15:12] == 0000 */
6146                /* Floating point <-> integer conversions */
6147                disas_fp_int_conv(s, insn);
6148                break;
6149            }
6150            break;
6151        }
6152    }
6153}
6154
6155static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
6156                     int pos)
6157{
6158    /* Extract 64 bits from the middle of two concatenated 64 bit
6159     * vector register slices left:right. The extracted bits start
6160     * at 'pos' bits into the right (least significant) side.
6161     * We return the result in tcg_right, and guarantee not to
6162     * trash tcg_left.
6163     */
6164    TCGv_i64 tcg_tmp = tcg_temp_new_i64();
6165    assert(pos > 0 && pos < 64);
6166
6167    tcg_gen_shri_i64(tcg_right, tcg_right, pos);
6168    tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
6169    tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
6170
6171    tcg_temp_free_i64(tcg_tmp);
6172}
6173
6174/* EXT
6175 *   31  30 29         24 23 22  21 20  16 15  14  11 10  9    5 4    0
6176 * +---+---+-------------+-----+---+------+---+------+---+------+------+
6177 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | imm4 | 0 |  Rn  |  Rd  |
6178 * +---+---+-------------+-----+---+------+---+------+---+------+------+
6179 */
6180static void disas_simd_ext(DisasContext *s, uint32_t insn)
6181{
6182    int is_q = extract32(insn, 30, 1);
6183    int op2 = extract32(insn, 22, 2);
6184    int imm4 = extract32(insn, 11, 4);
6185    int rm = extract32(insn, 16, 5);
6186    int rn = extract32(insn, 5, 5);
6187    int rd = extract32(insn, 0, 5);
6188    int pos = imm4 << 3;
6189    TCGv_i64 tcg_resl, tcg_resh;
6190
6191    if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
6192        unallocated_encoding(s);
6193        return;
6194    }
6195
6196    if (!fp_access_check(s)) {
6197        return;
6198    }
6199
6200    tcg_resh = tcg_temp_new_i64();
6201    tcg_resl = tcg_temp_new_i64();
6202
6203    /* Vd gets bits starting at pos bits into Vm:Vn. This is
6204     * either extracting 128 bits from a 128:128 concatenation, or
6205     * extracting 64 bits from a 64:64 concatenation.
6206     */
6207    if (!is_q) {
6208        read_vec_element(s, tcg_resl, rn, 0, MO_64);
6209        if (pos != 0) {
6210            read_vec_element(s, tcg_resh, rm, 0, MO_64);
6211            do_ext64(s, tcg_resh, tcg_resl, pos);
6212        }
6213        tcg_gen_movi_i64(tcg_resh, 0);
6214    } else {
6215        TCGv_i64 tcg_hh;
6216        typedef struct {
6217            int reg;
6218            int elt;
6219        } EltPosns;
6220        EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
6221        EltPosns *elt = eltposns;
6222
6223        if (pos >= 64) {
6224            elt++;
6225            pos -= 64;
6226        }
6227
6228        read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
6229        elt++;
6230        read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
6231        elt++;
6232        if (pos != 0) {
6233            do_ext64(s, tcg_resh, tcg_resl, pos);
6234            tcg_hh = tcg_temp_new_i64();
6235            read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
6236            do_ext64(s, tcg_hh, tcg_resh, pos);
6237            tcg_temp_free_i64(tcg_hh);
6238        }
6239    }
6240
6241    write_vec_element(s, tcg_resl, rd, 0, MO_64);
6242    tcg_temp_free_i64(tcg_resl);
6243    write_vec_element(s, tcg_resh, rd, 1, MO_64);
6244    tcg_temp_free_i64(tcg_resh);
6245}
6246
6247/* TBL/TBX
6248 *   31  30 29         24 23 22  21 20  16 15  14 13  12  11 10 9    5 4    0
6249 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
6250 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | len | op | 0 0 |  Rn  |  Rd  |
6251 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
6252 */
6253static void disas_simd_tb(DisasContext *s, uint32_t insn)
6254{
6255    int op2 = extract32(insn, 22, 2);
6256    int is_q = extract32(insn, 30, 1);
6257    int rm = extract32(insn, 16, 5);
6258    int rn = extract32(insn, 5, 5);
6259    int rd = extract32(insn, 0, 5);
6260    int is_tblx = extract32(insn, 12, 1);
6261    int len = extract32(insn, 13, 2);
6262    TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
6263    TCGv_i32 tcg_regno, tcg_numregs;
6264
6265    if (op2 != 0) {
6266        unallocated_encoding(s);
6267        return;
6268    }
6269
6270    if (!fp_access_check(s)) {
6271        return;
6272    }
6273
6274    /* This does a table lookup: for every byte element in the input
6275     * we index into a table formed from up to four vector registers,
6276     * and then the output is the result of the lookups. Our helper
6277     * function does the lookup operation for a single 64 bit part of
6278     * the input.
6279     */
6280    tcg_resl = tcg_temp_new_i64();
6281    tcg_resh = tcg_temp_new_i64();
6282
6283    if (is_tblx) {
6284        read_vec_element(s, tcg_resl, rd, 0, MO_64);
6285    } else {
6286        tcg_gen_movi_i64(tcg_resl, 0);
6287    }
6288    if (is_tblx && is_q) {
6289        read_vec_element(s, tcg_resh, rd, 1, MO_64);
6290    } else {
6291        tcg_gen_movi_i64(tcg_resh, 0);
6292    }
6293
6294    tcg_idx = tcg_temp_new_i64();
6295    tcg_regno = tcg_const_i32(rn);
6296    tcg_numregs = tcg_const_i32(len + 1);
6297    read_vec_element(s, tcg_idx, rm, 0, MO_64);
6298    gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
6299                        tcg_regno, tcg_numregs);
6300    if (is_q) {
6301        read_vec_element(s, tcg_idx, rm, 1, MO_64);
6302        gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
6303                            tcg_regno, tcg_numregs);
6304    }
6305    tcg_temp_free_i64(tcg_idx);
6306    tcg_temp_free_i32(tcg_regno);
6307    tcg_temp_free_i32(tcg_numregs);
6308
6309    write_vec_element(s, tcg_resl, rd, 0, MO_64);
6310    tcg_temp_free_i64(tcg_resl);
6311    write_vec_element(s, tcg_resh, rd, 1, MO_64);
6312    tcg_temp_free_i64(tcg_resh);
6313}
6314
6315/* ZIP/UZP/TRN
6316 *   31  30 29         24 23  22  21 20   16 15 14 12 11 10 9    5 4    0
6317 * +---+---+-------------+------+---+------+---+------------------+------+
6318 * | 0 | Q | 0 0 1 1 1 0 | size | 0 |  Rm  | 0 | opc | 1 0 |  Rn  |  Rd  |
6319 * +---+---+-------------+------+---+------+---+------------------+------+
6320 */
6321static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
6322{
6323    int rd = extract32(insn, 0, 5);
6324    int rn = extract32(insn, 5, 5);
6325    int rm = extract32(insn, 16, 5);
6326    int size = extract32(insn, 22, 2);
6327    /* opc field bits [1:0] indicate ZIP/UZP/TRN;
6328     * bit 2 indicates 1 vs 2 variant of the insn.
6329     */
6330    int opcode = extract32(insn, 12, 2);
6331    bool part = extract32(insn, 14, 1);
6332    bool is_q = extract32(insn, 30, 1);
6333    int esize = 8 << size;
6334    int i, ofs;
6335    int datasize = is_q ? 128 : 64;
6336    int elements = datasize / esize;
6337    TCGv_i64 tcg_res, tcg_resl, tcg_resh;
6338
6339    if (opcode == 0 || (size == 3 && !is_q)) {
6340        unallocated_encoding(s);
6341        return;
6342    }
6343
6344    if (!fp_access_check(s)) {
6345        return;
6346    }
6347
6348    tcg_resl = tcg_const_i64(0);
6349    tcg_resh = tcg_const_i64(0);
6350    tcg_res = tcg_temp_new_i64();
6351
6352    for (i = 0; i < elements; i++) {
6353        switch (opcode) {
6354        case 1: /* UZP1/2 */
6355        {
6356            int midpoint = elements / 2;
6357            if (i < midpoint) {
6358                read_vec_element(s, tcg_res, rn, 2 * i + part, size);
6359            } else {
6360                read_vec_element(s, tcg_res, rm,
6361                                 2 * (i - midpoint) + part, size);
6362            }
6363            break;
6364        }
6365        case 2: /* TRN1/2 */
6366            if (i & 1) {
6367                read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
6368            } else {
6369                read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
6370            }
6371            break;
6372        case 3: /* ZIP1/2 */
6373        {
6374            int base = part * elements / 2;
6375            if (i & 1) {
6376                read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
6377            } else {
6378                read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
6379            }
6380            break;
6381        }
6382        default:
6383            g_assert_not_reached();
6384        }
6385
6386        ofs = i * esize;
6387        if (ofs < 64) {
6388            tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
6389            tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
6390        } else {
6391            tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
6392            tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
6393        }
6394    }
6395
6396    tcg_temp_free_i64(tcg_res);
6397
6398    write_vec_element(s, tcg_resl, rd, 0, MO_64);
6399    tcg_temp_free_i64(tcg_resl);
6400    write_vec_element(s, tcg_resh, rd, 1, MO_64);
6401    tcg_temp_free_i64(tcg_resh);
6402}
6403
6404/*
6405 * do_reduction_op helper
6406 *
6407 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
6408 * important for correct NaN propagation that we do these
6409 * operations in exactly the order specified by the pseudocode.
6410 *
6411 * This is a recursive function, TCG temps should be freed by the
6412 * calling function once it is done with the values.
6413 */
6414static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
6415                                int esize, int size, int vmap, TCGv_ptr fpst)
6416{
6417    if (esize == size) {
6418        int element;
6419        TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
6420        TCGv_i32 tcg_elem;
6421
6422        /* We should have one register left here */
6423        assert(ctpop8(vmap) == 1);
6424        element = ctz32(vmap);
6425        assert(element < 8);
6426
6427        tcg_elem = tcg_temp_new_i32();
6428        read_vec_element_i32(s, tcg_elem, rn, element, msize);
6429        return tcg_elem;
6430    } else {
6431        int bits = size / 2;
6432        int shift = ctpop8(vmap) / 2;
6433        int vmap_lo = (vmap >> shift) & vmap;
6434        int vmap_hi = (vmap & ~vmap_lo);
6435        TCGv_i32 tcg_hi, tcg_lo, tcg_res;
6436
6437        tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
6438        tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
6439        tcg_res = tcg_temp_new_i32();
6440
6441        switch (fpopcode) {
6442        case 0x0c: /* fmaxnmv half-precision */
6443            gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6444            break;
6445        case 0x0f: /* fmaxv half-precision */
6446            gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
6447            break;
6448        case 0x1c: /* fminnmv half-precision */
6449            gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6450            break;
6451        case 0x1f: /* fminv half-precision */
6452            gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
6453            break;
6454        case 0x2c: /* fmaxnmv */
6455            gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
6456            break;
6457        case 0x2f: /* fmaxv */
6458            gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
6459            break;
6460        case 0x3c: /* fminnmv */
6461            gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
6462            break;
6463        case 0x3f: /* fminv */
6464            gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
6465            break;
6466        default:
6467            g_assert_not_reached();
6468        }
6469
6470        tcg_temp_free_i32(tcg_hi);
6471        tcg_temp_free_i32(tcg_lo);
6472        return tcg_res;
6473    }
6474}
6475
6476/* AdvSIMD across lanes
6477 *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
6478 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6479 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
6480 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6481 */
6482static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
6483{
6484    int rd = extract32(insn, 0, 5);
6485    int rn = extract32(insn, 5, 5);
6486    int size = extract32(insn, 22, 2);
6487    int opcode = extract32(insn, 12, 5);
6488    bool is_q = extract32(insn, 30, 1);
6489    bool is_u = extract32(insn, 29, 1);
6490    bool is_fp = false;
6491    bool is_min = false;
6492    int esize;
6493    int elements;
6494    int i;
6495    TCGv_i64 tcg_res, tcg_elt;
6496
6497    switch (opcode) {
6498    case 0x1b: /* ADDV */
6499        if (is_u) {
6500            unallocated_encoding(s);
6501            return;
6502        }
6503        /* fall through */
6504    case 0x3: /* SADDLV, UADDLV */
6505    case 0xa: /* SMAXV, UMAXV */
6506    case 0x1a: /* SMINV, UMINV */
6507        if (size == 3 || (size == 2 && !is_q)) {
6508            unallocated_encoding(s);
6509            return;
6510        }
6511        break;
6512    case 0xc: /* FMAXNMV, FMINNMV */
6513    case 0xf: /* FMAXV, FMINV */
6514        /* Bit 1 of size field encodes min vs max and the actual size
6515         * depends on the encoding of the U bit. If not set (and FP16
6516         * enabled) then we do half-precision float instead of single
6517         * precision.
6518         */
6519        is_min = extract32(size, 1, 1);
6520        is_fp = true;
6521        if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6522            size = 1;
6523        } else if (!is_u || !is_q || extract32(size, 0, 1)) {
6524            unallocated_encoding(s);
6525            return;
6526        } else {
6527            size = 2;
6528        }
6529        break;
6530    default:
6531        unallocated_encoding(s);
6532        return;
6533    }
6534
6535    if (!fp_access_check(s)) {
6536        return;
6537    }
6538
6539    esize = 8 << size;
6540    elements = (is_q ? 128 : 64) / esize;
6541
6542    tcg_res = tcg_temp_new_i64();
6543    tcg_elt = tcg_temp_new_i64();
6544
6545    /* These instructions operate across all lanes of a vector
6546     * to produce a single result. We can guarantee that a 64
6547     * bit intermediate is sufficient:
6548     *  + for [US]ADDLV the maximum element size is 32 bits, and
6549     *    the result type is 64 bits
6550     *  + for FMAX*V, FMIN*V, ADDV the intermediate type is the
6551     *    same as the element size, which is 32 bits at most
6552     * For the integer operations we can choose to work at 64
6553     * or 32 bits and truncate at the end; for simplicity
6554     * we use 64 bits always. The floating point
6555     * ops do require 32 bit intermediates, though.
6556     */
6557    if (!is_fp) {
6558        read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
6559
6560        for (i = 1; i < elements; i++) {
6561            read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
6562
6563            switch (opcode) {
6564            case 0x03: /* SADDLV / UADDLV */
6565            case 0x1b: /* ADDV */
6566                tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
6567                break;
6568            case 0x0a: /* SMAXV / UMAXV */
6569                if (is_u) {
6570                    tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
6571                } else {
6572                    tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
6573                }
6574                break;
6575            case 0x1a: /* SMINV / UMINV */
6576                if (is_u) {
6577                    tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
6578                } else {
6579                    tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
6580                }
6581                break;
6582            default:
6583                g_assert_not_reached();
6584            }
6585
6586        }
6587    } else {
6588        /* Floating point vector reduction ops which work across 32
6589         * bit (single) or 16 bit (half-precision) intermediates.
6590         * Note that correct NaN propagation requires that we do these
6591         * operations in exactly the order specified by the pseudocode.
6592         */
6593        TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
6594        int fpopcode = opcode | is_min << 4 | is_u << 5;
6595        int vmap = (1 << elements) - 1;
6596        TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
6597                                             (is_q ? 128 : 64), vmap, fpst);
6598        tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
6599        tcg_temp_free_i32(tcg_res32);
6600        tcg_temp_free_ptr(fpst);
6601    }
6602
6603    tcg_temp_free_i64(tcg_elt);
6604
6605    /* Now truncate the result to the width required for the final output */
6606    if (opcode == 0x03) {
6607        /* SADDLV, UADDLV: result is 2*esize */
6608        size++;
6609    }
6610
6611    switch (size) {
6612    case 0:
6613        tcg_gen_ext8u_i64(tcg_res, tcg_res);
6614        break;
6615    case 1:
6616        tcg_gen_ext16u_i64(tcg_res, tcg_res);
6617        break;
6618    case 2:
6619        tcg_gen_ext32u_i64(tcg_res, tcg_res);
6620        break;
6621    case 3:
6622        break;
6623    default:
6624        g_assert_not_reached();
6625    }
6626
6627    write_fp_dreg(s, rd, tcg_res);
6628    tcg_temp_free_i64(tcg_res);
6629}
6630
6631/* DUP (Element, Vector)
6632 *
6633 *  31  30   29              21 20    16 15        10  9    5 4    0
6634 * +---+---+-------------------+--------+-------------+------+------+
6635 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
6636 * +---+---+-------------------+--------+-------------+------+------+
6637 *
6638 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6639 */
6640static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
6641                             int imm5)
6642{
6643    int size = ctz32(imm5);
6644    int index = imm5 >> (size + 1);
6645
6646    if (size > 3 || (size == 3 && !is_q)) {
6647        unallocated_encoding(s);
6648        return;
6649    }
6650
6651    if (!fp_access_check(s)) {
6652        return;
6653    }
6654
6655    tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
6656                         vec_reg_offset(s, rn, index, size),
6657                         is_q ? 16 : 8, vec_full_reg_size(s));
6658}
6659
6660/* DUP (element, scalar)
6661 *  31                   21 20    16 15        10  9    5 4    0
6662 * +-----------------------+--------+-------------+------+------+
6663 * | 0 1 0 1 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
6664 * +-----------------------+--------+-------------+------+------+
6665 */
6666static void handle_simd_dupes(DisasContext *s, int rd, int rn,
6667                              int imm5)
6668{
6669    int size = ctz32(imm5);
6670    int index;
6671    TCGv_i64 tmp;
6672
6673    if (size > 3) {
6674        unallocated_encoding(s);
6675        return;
6676    }
6677
6678    if (!fp_access_check(s)) {
6679        return;
6680    }
6681
6682    index = imm5 >> (size + 1);
6683
6684    /* This instruction just extracts the specified element and
6685     * zero-extends it into the bottom of the destination register.
6686     */
6687    tmp = tcg_temp_new_i64();
6688    read_vec_element(s, tmp, rn, index, size);
6689    write_fp_dreg(s, rd, tmp);
6690    tcg_temp_free_i64(tmp);
6691}
6692
6693/* DUP (General)
6694 *
6695 *  31  30   29              21 20    16 15        10  9    5 4    0
6696 * +---+---+-------------------+--------+-------------+------+------+
6697 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 1 1 |  Rn  |  Rd  |
6698 * +---+---+-------------------+--------+-------------+------+------+
6699 *
6700 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6701 */
6702static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
6703                             int imm5)
6704{
6705    int size = ctz32(imm5);
6706    uint32_t dofs, oprsz, maxsz;
6707
6708    if (size > 3 || ((size == 3) && !is_q)) {
6709        unallocated_encoding(s);
6710        return;
6711    }
6712
6713    if (!fp_access_check(s)) {
6714        return;
6715    }
6716
6717    dofs = vec_full_reg_offset(s, rd);
6718    oprsz = is_q ? 16 : 8;
6719    maxsz = vec_full_reg_size(s);
6720
6721    tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
6722}
6723
6724/* INS (Element)
6725 *
6726 *  31                   21 20    16 15  14    11  10 9    5 4    0
6727 * +-----------------------+--------+------------+---+------+------+
6728 * | 0 1 1 0 1 1 1 0 0 0 0 |  imm5  | 0 |  imm4  | 1 |  Rn  |  Rd  |
6729 * +-----------------------+--------+------------+---+------+------+
6730 *
6731 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6732 * index: encoded in imm5<4:size+1>
6733 */
6734static void handle_simd_inse(DisasContext *s, int rd, int rn,
6735                             int imm4, int imm5)
6736{
6737    int size = ctz32(imm5);
6738    int src_index, dst_index;
6739    TCGv_i64 tmp;
6740
6741    if (size > 3) {
6742        unallocated_encoding(s);
6743        return;
6744    }
6745
6746    if (!fp_access_check(s)) {
6747        return;
6748    }
6749
6750    dst_index = extract32(imm5, 1+size, 5);
6751    src_index = extract32(imm4, size, 4);
6752
6753    tmp = tcg_temp_new_i64();
6754
6755    read_vec_element(s, tmp, rn, src_index, size);
6756    write_vec_element(s, tmp, rd, dst_index, size);
6757
6758    tcg_temp_free_i64(tmp);
6759}
6760
6761
6762/* INS (General)
6763 *
6764 *  31                   21 20    16 15        10  9    5 4    0
6765 * +-----------------------+--------+-------------+------+------+
6766 * | 0 1 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 1 1 1 |  Rn  |  Rd  |
6767 * +-----------------------+--------+-------------+------+------+
6768 *
6769 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6770 * index: encoded in imm5<4:size+1>
6771 */
6772static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
6773{
6774    int size = ctz32(imm5);
6775    int idx;
6776
6777    if (size > 3) {
6778        unallocated_encoding(s);
6779        return;
6780    }
6781
6782    if (!fp_access_check(s)) {
6783        return;
6784    }
6785
6786    idx = extract32(imm5, 1 + size, 4 - size);
6787    write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
6788}
6789
6790/*
6791 * UMOV (General)
6792 * SMOV (General)
6793 *
6794 *  31  30   29              21 20    16 15    12   10 9    5 4    0
6795 * +---+---+-------------------+--------+-------------+------+------+
6796 * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 1 U 1 1 |  Rn  |  Rd  |
6797 * +---+---+-------------------+--------+-------------+------+------+
6798 *
6799 * U: unsigned when set
6800 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6801 */
6802static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
6803                                  int rn, int rd, int imm5)
6804{
6805    int size = ctz32(imm5);
6806    int element;
6807    TCGv_i64 tcg_rd;
6808
6809    /* Check for UnallocatedEncodings */
6810    if (is_signed) {
6811        if (size > 2 || (size == 2 && !is_q)) {
6812            unallocated_encoding(s);
6813            return;
6814        }
6815    } else {
6816        if (size > 3
6817            || (size < 3 && is_q)
6818            || (size == 3 && !is_q)) {
6819            unallocated_encoding(s);
6820            return;
6821        }
6822    }
6823
6824    if (!fp_access_check(s)) {
6825        return;
6826    }
6827
6828    element = extract32(imm5, 1+size, 4);
6829
6830    tcg_rd = cpu_reg(s, rd);
6831    read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
6832    if (is_signed && !is_q) {
6833        tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6834    }
6835}
6836
6837/* AdvSIMD copy
6838 *   31  30  29  28             21 20  16 15  14  11 10  9    5 4    0
6839 * +---+---+----+-----------------+------+---+------+---+------+------+
6840 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
6841 * +---+---+----+-----------------+------+---+------+---+------+------+
6842 */
6843static void disas_simd_copy(DisasContext *s, uint32_t insn)
6844{
6845    int rd = extract32(insn, 0, 5);
6846    int rn = extract32(insn, 5, 5);
6847    int imm4 = extract32(insn, 11, 4);
6848    int op = extract32(insn, 29, 1);
6849    int is_q = extract32(insn, 30, 1);
6850    int imm5 = extract32(insn, 16, 5);
6851
6852    if (op) {
6853        if (is_q) {
6854            /* INS (element) */
6855            handle_simd_inse(s, rd, rn, imm4, imm5);
6856        } else {
6857            unallocated_encoding(s);
6858        }
6859    } else {
6860        switch (imm4) {
6861        case 0:
6862            /* DUP (element - vector) */
6863            handle_simd_dupe(s, is_q, rd, rn, imm5);
6864            break;
6865        case 1:
6866            /* DUP (general) */
6867            handle_simd_dupg(s, is_q, rd, rn, imm5);
6868            break;
6869        case 3:
6870            if (is_q) {
6871                /* INS (general) */
6872                handle_simd_insg(s, rd, rn, imm5);
6873            } else {
6874                unallocated_encoding(s);
6875            }
6876            break;
6877        case 5:
6878        case 7:
6879            /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
6880            handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
6881            break;
6882        default:
6883            unallocated_encoding(s);
6884            break;
6885        }
6886    }
6887}
6888
6889/* AdvSIMD modified immediate
6890 *  31  30   29  28                 19 18 16 15   12  11  10  9     5 4    0
6891 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6892 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh |  Rd  |
6893 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6894 *
6895 * There are a number of operations that can be carried out here:
6896 *   MOVI - move (shifted) imm into register
6897 *   MVNI - move inverted (shifted) imm into register
6898 *   ORR  - bitwise OR of (shifted) imm with register
6899 *   BIC  - bitwise clear of (shifted) imm with register
6900 * With ARMv8.2 we also have:
6901 *   FMOV half-precision
6902 */
6903static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6904{
6905    int rd = extract32(insn, 0, 5);
6906    int cmode = extract32(insn, 12, 4);
6907    int cmode_3_1 = extract32(cmode, 1, 3);
6908    int cmode_0 = extract32(cmode, 0, 1);
6909    int o2 = extract32(insn, 11, 1);
6910    uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6911    bool is_neg = extract32(insn, 29, 1);
6912    bool is_q = extract32(insn, 30, 1);
6913    uint64_t imm = 0;
6914
6915    if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6916        /* Check for FMOV (vector, immediate) - half-precision */
6917        if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
6918            unallocated_encoding(s);
6919            return;
6920        }
6921    }
6922
6923    if (!fp_access_check(s)) {
6924        return;
6925    }
6926
6927    /* See AdvSIMDExpandImm() in ARM ARM */
6928    switch (cmode_3_1) {
6929    case 0: /* Replicate(Zeros(24):imm8, 2) */
6930    case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
6931    case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
6932    case 3: /* Replicate(imm8:Zeros(24), 2) */
6933    {
6934        int shift = cmode_3_1 * 8;
6935        imm = bitfield_replicate(abcdefgh << shift, 32);
6936        break;
6937    }
6938    case 4: /* Replicate(Zeros(8):imm8, 4) */
6939    case 5: /* Replicate(imm8:Zeros(8), 4) */
6940    {
6941        int shift = (cmode_3_1 & 0x1) * 8;
6942        imm = bitfield_replicate(abcdefgh << shift, 16);
6943        break;
6944    }
6945    case 6:
6946        if (cmode_0) {
6947            /* Replicate(Zeros(8):imm8:Ones(16), 2) */
6948            imm = (abcdefgh << 16) | 0xffff;
6949        } else {
6950            /* Replicate(Zeros(16):imm8:Ones(8), 2) */
6951            imm = (abcdefgh << 8) | 0xff;
6952        }
6953        imm = bitfield_replicate(imm, 32);
6954        break;
6955    case 7:
6956        if (!cmode_0 && !is_neg) {
6957            imm = bitfield_replicate(abcdefgh, 8);
6958        } else if (!cmode_0 && is_neg) {
6959            int i;
6960            imm = 0;
6961            for (i = 0; i < 8; i++) {
6962                if ((abcdefgh) & (1 << i)) {
6963                    imm |= 0xffULL << (i * 8);
6964                }
6965            }
6966        } else if (cmode_0) {
6967            if (is_neg) {
6968                imm = (abcdefgh & 0x3f) << 48;
6969                if (abcdefgh & 0x80) {
6970                    imm |= 0x8000000000000000ULL;
6971                }
6972                if (abcdefgh & 0x40) {
6973                    imm |= 0x3fc0000000000000ULL;
6974                } else {
6975                    imm |= 0x4000000000000000ULL;
6976                }
6977            } else {
6978                if (o2) {
6979                    /* FMOV (vector, immediate) - half-precision */
6980                    imm = vfp_expand_imm(MO_16, abcdefgh);
6981                    /* now duplicate across the lanes */
6982                    imm = bitfield_replicate(imm, 16);
6983                } else {
6984                    imm = (abcdefgh & 0x3f) << 19;
6985                    if (abcdefgh & 0x80) {
6986                        imm |= 0x80000000;
6987                    }
6988                    if (abcdefgh & 0x40) {
6989                        imm |= 0x3e000000;
6990                    } else {
6991                        imm |= 0x40000000;
6992                    }
6993                    imm |= (imm << 32);
6994                }
6995            }
6996        }
6997        break;
6998    default:
6999        fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
7000        g_assert_not_reached();
7001    }
7002
7003    if (cmode_3_1 != 7 && is_neg) {
7004        imm = ~imm;
7005    }
7006
7007    if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7008        /* MOVI or MVNI, with MVNI negation handled above.  */
7009        tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7010                            vec_full_reg_size(s), imm);
7011    } else {
7012        /* ORR or BIC, with BIC negation to AND handled above.  */
7013        if (is_neg) {
7014            gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7015        } else {
7016            gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7017        }
7018    }
7019}
7020
7021/* AdvSIMD scalar copy
7022 *  31 30  29  28             21 20  16 15  14  11 10  9    5 4    0
7023 * +-----+----+-----------------+------+---+------+---+------+------+
7024 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
7025 * +-----+----+-----------------+------+---+------+---+------+------+
7026 */
7027static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7028{
7029    int rd = extract32(insn, 0, 5);
7030    int rn = extract32(insn, 5, 5);
7031    int imm4 = extract32(insn, 11, 4);
7032    int imm5 = extract32(insn, 16, 5);
7033    int op = extract32(insn, 29, 1);
7034
7035    if (op != 0 || imm4 != 0) {
7036        unallocated_encoding(s);
7037        return;
7038    }
7039
7040    /* DUP (element, scalar) */
7041    handle_simd_dupes(s, rd, rn, imm5);
7042}
7043
7044/* AdvSIMD scalar pairwise
7045 *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7046 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7047 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7048 * +-----+---+-----------+------+-----------+--------+-----+------+------+
7049 */
7050static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7051{
7052    int u = extract32(insn, 29, 1);
7053    int size = extract32(insn, 22, 2);
7054    int opcode = extract32(insn, 12, 5);
7055    int rn = extract32(insn, 5, 5);
7056    int rd = extract32(insn, 0, 5);
7057    TCGv_ptr fpst;
7058
7059    /* For some ops (the FP ones), size[1] is part of the encoding.
7060     * For ADDP strictly it is not but size[1] is always 1 for valid
7061     * encodings.
7062     */
7063    opcode |= (extract32(size, 1, 1) << 5);
7064
7065    switch (opcode) {
7066    case 0x3b: /* ADDP */
7067        if (u || size != 3) {
7068            unallocated_encoding(s);
7069            return;
7070        }
7071        if (!fp_access_check(s)) {
7072            return;
7073        }
7074
7075        fpst = NULL;
7076        break;
7077    case 0xc: /* FMAXNMP */
7078    case 0xd: /* FADDP */
7079    case 0xf: /* FMAXP */
7080    case 0x2c: /* FMINNMP */
7081    case 0x2f: /* FMINP */
7082        /* FP op, size[0] is 32 or 64 bit*/
7083        if (!u) {
7084            if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7085                unallocated_encoding(s);
7086                return;
7087            } else {
7088                size = MO_16;
7089            }
7090        } else {
7091            size = extract32(size, 0, 1) ? MO_64 : MO_32;
7092        }
7093
7094        if (!fp_access_check(s)) {
7095            return;
7096        }
7097
7098        fpst = get_fpstatus_ptr(size == MO_16);
7099        break;
7100    default:
7101        unallocated_encoding(s);
7102        return;
7103    }
7104
7105    if (size == MO_64) {
7106        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7107        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7108        TCGv_i64 tcg_res = tcg_temp_new_i64();
7109
7110        read_vec_element(s, tcg_op1, rn, 0, MO_64);
7111        read_vec_element(s, tcg_op2, rn, 1, MO_64);
7112
7113        switch (opcode) {
7114        case 0x3b: /* ADDP */
7115            tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7116            break;
7117        case 0xc: /* FMAXNMP */
7118            gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7119            break;
7120        case 0xd: /* FADDP */
7121            gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7122            break;
7123        case 0xf: /* FMAXP */
7124            gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7125            break;
7126        case 0x2c: /* FMINNMP */
7127            gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7128            break;
7129        case 0x2f: /* FMINP */
7130            gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7131            break;
7132        default:
7133            g_assert_not_reached();
7134        }
7135
7136        write_fp_dreg(s, rd, tcg_res);
7137
7138        tcg_temp_free_i64(tcg_op1);
7139        tcg_temp_free_i64(tcg_op2);
7140        tcg_temp_free_i64(tcg_res);
7141    } else {
7142        TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7143        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7144        TCGv_i32 tcg_res = tcg_temp_new_i32();
7145
7146        read_vec_element_i32(s, tcg_op1, rn, 0, size);
7147        read_vec_element_i32(s, tcg_op2, rn, 1, size);
7148
7149        if (size == MO_16) {
7150            switch (opcode) {
7151            case 0xc: /* FMAXNMP */
7152                gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7153                break;
7154            case 0xd: /* FADDP */
7155                gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
7156                break;
7157            case 0xf: /* FMAXP */
7158                gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
7159                break;
7160            case 0x2c: /* FMINNMP */
7161                gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7162                break;
7163            case 0x2f: /* FMINP */
7164                gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
7165                break;
7166            default:
7167                g_assert_not_reached();
7168            }
7169        } else {
7170            switch (opcode) {
7171            case 0xc: /* FMAXNMP */
7172                gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7173                break;
7174            case 0xd: /* FADDP */
7175                gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7176                break;
7177            case 0xf: /* FMAXP */
7178                gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7179                break;
7180            case 0x2c: /* FMINNMP */
7181                gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7182                break;
7183            case 0x2f: /* FMINP */
7184                gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7185                break;
7186            default:
7187                g_assert_not_reached();
7188            }
7189        }
7190
7191        write_fp_sreg(s, rd, tcg_res);
7192
7193        tcg_temp_free_i32(tcg_op1);
7194        tcg_temp_free_i32(tcg_op2);
7195        tcg_temp_free_i32(tcg_res);
7196    }
7197
7198    if (fpst) {
7199        tcg_temp_free_ptr(fpst);
7200    }
7201}
7202
7203/*
7204 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
7205 *
7206 * This code is handles the common shifting code and is used by both
7207 * the vector and scalar code.
7208 */
7209static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
7210                                    TCGv_i64 tcg_rnd, bool accumulate,
7211                                    bool is_u, int size, int shift)
7212{
7213    bool extended_result = false;
7214    bool round = tcg_rnd != NULL;
7215    int ext_lshift = 0;
7216    TCGv_i64 tcg_src_hi;
7217
7218    if (round && size == 3) {
7219        extended_result = true;
7220        ext_lshift = 64 - shift;
7221        tcg_src_hi = tcg_temp_new_i64();
7222    } else if (shift == 64) {
7223        if (!accumulate && is_u) {
7224            /* result is zero */
7225            tcg_gen_movi_i64(tcg_res, 0);
7226            return;
7227        }
7228    }
7229
7230    /* Deal with the rounding step */
7231    if (round) {
7232        if (extended_result) {
7233            TCGv_i64 tcg_zero = tcg_const_i64(0);
7234            if (!is_u) {
7235                /* take care of sign extending tcg_res */
7236                tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
7237                tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7238                                 tcg_src, tcg_src_hi,
7239                                 tcg_rnd, tcg_zero);
7240            } else {
7241                tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7242                                 tcg_src, tcg_zero,
7243                                 tcg_rnd, tcg_zero);
7244            }
7245            tcg_temp_free_i64(tcg_zero);
7246        } else {
7247            tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
7248        }
7249    }
7250
7251    /* Now do the shift right */
7252    if (round && extended_result) {
7253        /* extended case, >64 bit precision required */
7254        if (ext_lshift == 0) {
7255            /* special case, only high bits matter */
7256            tcg_gen_mov_i64(tcg_src, tcg_src_hi);
7257        } else {
7258            tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7259            tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
7260            tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
7261        }
7262    } else {
7263        if (is_u) {
7264            if (shift == 64) {
7265                /* essentially shifting in 64 zeros */
7266                tcg_gen_movi_i64(tcg_src, 0);
7267            } else {
7268                tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7269            }
7270        } else {
7271            if (shift == 64) {
7272                /* effectively extending the sign-bit */
7273                tcg_gen_sari_i64(tcg_src, tcg_src, 63);
7274            } else {
7275                tcg_gen_sari_i64(tcg_src, tcg_src, shift);
7276            }
7277        }
7278    }
7279
7280    if (accumulate) {
7281        tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
7282    } else {
7283        tcg_gen_mov_i64(tcg_res, tcg_src);
7284    }
7285
7286    if (extended_result) {
7287        tcg_temp_free_i64(tcg_src_hi);
7288    }
7289}
7290
7291/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
7292static void handle_scalar_simd_shri(DisasContext *s,
7293                                    bool is_u, int immh, int immb,
7294                                    int opcode, int rn, int rd)
7295{
7296    const int size = 3;
7297    int immhb = immh << 3 | immb;
7298    int shift = 2 * (8 << size) - immhb;
7299    bool accumulate = false;
7300    bool round = false;
7301    bool insert = false;
7302    TCGv_i64 tcg_rn;
7303    TCGv_i64 tcg_rd;
7304    TCGv_i64 tcg_round;
7305
7306    if (!extract32(immh, 3, 1)) {
7307        unallocated_encoding(s);
7308        return;
7309    }
7310
7311    if (!fp_access_check(s)) {
7312        return;
7313    }
7314
7315    switch (opcode) {
7316    case 0x02: /* SSRA / USRA (accumulate) */
7317        accumulate = true;
7318        break;
7319    case 0x04: /* SRSHR / URSHR (rounding) */
7320        round = true;
7321        break;
7322    case 0x06: /* SRSRA / URSRA (accum + rounding) */
7323        accumulate = round = true;
7324        break;
7325    case 0x08: /* SRI */
7326        insert = true;
7327        break;
7328    }
7329
7330    if (round) {
7331        uint64_t round_const = 1ULL << (shift - 1);
7332        tcg_round = tcg_const_i64(round_const);
7333    } else {
7334        tcg_round = NULL;
7335    }
7336
7337    tcg_rn = read_fp_dreg(s, rn);
7338    tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7339
7340    if (insert) {
7341        /* shift count same as element size is valid but does nothing;
7342         * special case to avoid potential shift by 64.
7343         */
7344        int esize = 8 << size;
7345        if (shift != esize) {
7346            tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
7347            tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
7348        }
7349    } else {
7350        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7351                                accumulate, is_u, size, shift);
7352    }
7353
7354    write_fp_dreg(s, rd, tcg_rd);
7355
7356    tcg_temp_free_i64(tcg_rn);
7357    tcg_temp_free_i64(tcg_rd);
7358    if (round) {
7359        tcg_temp_free_i64(tcg_round);
7360    }
7361}
7362
7363/* SHL/SLI - Scalar shift left */
7364static void handle_scalar_simd_shli(DisasContext *s, bool insert,
7365                                    int immh, int immb, int opcode,
7366                                    int rn, int rd)
7367{
7368    int size = 32 - clz32(immh) - 1;
7369    int immhb = immh << 3 | immb;
7370    int shift = immhb - (8 << size);
7371    TCGv_i64 tcg_rn = new_tmp_a64(s);
7372    TCGv_i64 tcg_rd = new_tmp_a64(s);
7373
7374    if (!extract32(immh, 3, 1)) {
7375        unallocated_encoding(s);
7376        return;
7377    }
7378
7379    if (!fp_access_check(s)) {
7380        return;
7381    }
7382
7383    tcg_rn = read_fp_dreg(s, rn);
7384    tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7385
7386    if (insert) {
7387        tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
7388    } else {
7389        tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
7390    }
7391
7392    write_fp_dreg(s, rd, tcg_rd);
7393
7394    tcg_temp_free_i64(tcg_rn);
7395    tcg_temp_free_i64(tcg_rd);
7396}
7397
7398/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
7399 * (signed/unsigned) narrowing */
7400static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
7401                                   bool is_u_shift, bool is_u_narrow,
7402                                   int immh, int immb, int opcode,
7403                                   int rn, int rd)
7404{
7405    int immhb = immh << 3 | immb;
7406    int size = 32 - clz32(immh) - 1;
7407    int esize = 8 << size;
7408    int shift = (2 * esize) - immhb;
7409    int elements = is_scalar ? 1 : (64 / esize);
7410    bool round = extract32(opcode, 0, 1);
7411    TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
7412    TCGv_i64 tcg_rn, tcg_rd, tcg_round;
7413    TCGv_i32 tcg_rd_narrowed;
7414    TCGv_i64 tcg_final;
7415
7416    static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
7417        { gen_helper_neon_narrow_sat_s8,
7418          gen_helper_neon_unarrow_sat8 },
7419        { gen_helper_neon_narrow_sat_s16,
7420          gen_helper_neon_unarrow_sat16 },
7421        { gen_helper_neon_narrow_sat_s32,
7422          gen_helper_neon_unarrow_sat32 },
7423        { NULL, NULL },
7424    };
7425    static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
7426        gen_helper_neon_narrow_sat_u8,
7427        gen_helper_neon_narrow_sat_u16,
7428        gen_helper_neon_narrow_sat_u32,
7429        NULL
7430    };
7431    NeonGenNarrowEnvFn *narrowfn;
7432
7433    int i;
7434
7435    assert(size < 4);
7436
7437    if (extract32(immh, 3, 1)) {
7438        unallocated_encoding(s);
7439        return;
7440    }
7441
7442    if (!fp_access_check(s)) {
7443        return;
7444    }
7445
7446    if (is_u_shift) {
7447        narrowfn = unsigned_narrow_fns[size];
7448    } else {
7449        narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
7450    }
7451
7452    tcg_rn = tcg_temp_new_i64();
7453    tcg_rd = tcg_temp_new_i64();
7454    tcg_rd_narrowed = tcg_temp_new_i32();
7455    tcg_final = tcg_const_i64(0);
7456
7457    if (round) {
7458        uint64_t round_const = 1ULL << (shift - 1);
7459        tcg_round = tcg_const_i64(round_const);
7460    } else {
7461        tcg_round = NULL;
7462    }
7463
7464    for (i = 0; i < elements; i++) {
7465        read_vec_element(s, tcg_rn, rn, i, ldop);
7466        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7467                                false, is_u_shift, size+1, shift);
7468        narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
7469        tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
7470        tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
7471    }
7472
7473    if (!is_q) {
7474        write_vec_element(s, tcg_final, rd, 0, MO_64);
7475    } else {
7476        write_vec_element(s, tcg_final, rd, 1, MO_64);
7477    }
7478
7479    if (round) {
7480        tcg_temp_free_i64(tcg_round);
7481    }
7482    tcg_temp_free_i64(tcg_rn);
7483    tcg_temp_free_i64(tcg_rd);
7484    tcg_temp_free_i32(tcg_rd_narrowed);
7485    tcg_temp_free_i64(tcg_final);
7486
7487    clear_vec_high(s, is_q, rd);
7488}
7489
7490/* SQSHLU, UQSHL, SQSHL: saturating left shifts */
7491static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
7492                             bool src_unsigned, bool dst_unsigned,
7493                             int immh, int immb, int rn, int rd)
7494{
7495    int immhb = immh << 3 | immb;
7496    int size = 32 - clz32(immh) - 1;
7497    int shift = immhb - (8 << size);
7498    int pass;
7499
7500    assert(immh != 0);
7501    assert(!(scalar && is_q));
7502
7503    if (!scalar) {
7504        if (!is_q && extract32(immh, 3, 1)) {
7505            unallocated_encoding(s);
7506            return;
7507        }
7508
7509        /* Since we use the variable-shift helpers we must
7510         * replicate the shift count into each element of
7511         * the tcg_shift value.
7512         */
7513        switch (size) {
7514        case 0:
7515            shift |= shift << 8;
7516            /* fall through */
7517        case 1:
7518            shift |= shift << 16;
7519            break;
7520        case 2:
7521        case 3:
7522            break;
7523        default:
7524            g_assert_not_reached();
7525        }
7526    }
7527
7528    if (!fp_access_check(s)) {
7529        return;
7530    }
7531
7532    if (size == 3) {
7533        TCGv_i64 tcg_shift = tcg_const_i64(shift);
7534        static NeonGenTwo64OpEnvFn * const fns[2][2] = {
7535            { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
7536            { NULL, gen_helper_neon_qshl_u64 },
7537        };
7538        NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
7539        int maxpass = is_q ? 2 : 1;
7540
7541        for (pass = 0; pass < maxpass; pass++) {
7542            TCGv_i64 tcg_op = tcg_temp_new_i64();
7543
7544            read_vec_element(s, tcg_op, rn, pass, MO_64);
7545            genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7546            write_vec_element(s, tcg_op, rd, pass, MO_64);
7547
7548            tcg_temp_free_i64(tcg_op);
7549        }
7550        tcg_temp_free_i64(tcg_shift);
7551        clear_vec_high(s, is_q, rd);
7552    } else {
7553        TCGv_i32 tcg_shift = tcg_const_i32(shift);
7554        static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
7555            {
7556                { gen_helper_neon_qshl_s8,
7557                  gen_helper_neon_qshl_s16,
7558                  gen_helper_neon_qshl_s32 },
7559                { gen_helper_neon_qshlu_s8,
7560                  gen_helper_neon_qshlu_s16,
7561                  gen_helper_neon_qshlu_s32 }
7562            }, {
7563                { NULL, NULL, NULL },
7564                { gen_helper_neon_qshl_u8,
7565                  gen_helper_neon_qshl_u16,
7566                  gen_helper_neon_qshl_u32 }
7567            }
7568        };
7569        NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
7570        TCGMemOp memop = scalar ? size : MO_32;
7571        int maxpass = scalar ? 1 : is_q ? 4 : 2;
7572
7573        for (pass = 0; pass < maxpass; pass++) {
7574            TCGv_i32 tcg_op = tcg_temp_new_i32();
7575
7576            read_vec_element_i32(s, tcg_op, rn, pass, memop);
7577            genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7578            if (scalar) {
7579                switch (size) {
7580                case 0:
7581                    tcg_gen_ext8u_i32(tcg_op, tcg_op);
7582                    break;
7583                case 1:
7584                    tcg_gen_ext16u_i32(tcg_op, tcg_op);
7585                    break;
7586                case 2:
7587                    break;
7588                default:
7589                    g_assert_not_reached();
7590                }
7591                write_fp_sreg(s, rd, tcg_op);
7592            } else {
7593                write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
7594            }
7595
7596            tcg_temp_free_i32(tcg_op);
7597        }
7598        tcg_temp_free_i32(tcg_shift);
7599
7600        if (!scalar) {
7601            clear_vec_high(s, is_q, rd);
7602        }
7603    }
7604}
7605
7606/* Common vector code for handling integer to FP conversion */
7607static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
7608                                   int elements, int is_signed,
7609                                   int fracbits, int size)
7610{
7611    TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
7612    TCGv_i32 tcg_shift = NULL;
7613
7614    TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
7615    int pass;
7616
7617    if (fracbits || size == MO_64) {
7618        tcg_shift = tcg_const_i32(fracbits);
7619    }
7620
7621    if (size == MO_64) {
7622        TCGv_i64 tcg_int64 = tcg_temp_new_i64();
7623        TCGv_i64 tcg_double = tcg_temp_new_i64();
7624
7625        for (pass = 0; pass < elements; pass++) {
7626            read_vec_element(s, tcg_int64, rn, pass, mop);
7627
7628            if (is_signed) {
7629                gen_helper_vfp_sqtod(tcg_double, tcg_int64,
7630                                     tcg_shift, tcg_fpst);
7631            } else {
7632                gen_helper_vfp_uqtod(tcg_double, tcg_int64,
7633                                     tcg_shift, tcg_fpst);
7634            }
7635            if (elements == 1) {
7636                write_fp_dreg(s, rd, tcg_double);
7637            } else {
7638                write_vec_element(s, tcg_double, rd, pass, MO_64);
7639            }
7640        }
7641
7642        tcg_temp_free_i64(tcg_int64);
7643        tcg_temp_free_i64(tcg_double);
7644
7645    } else {
7646        TCGv_i32 tcg_int32 = tcg_temp_new_i32();
7647        TCGv_i32 tcg_float = tcg_temp_new_i32();
7648
7649        for (pass = 0; pass < elements; pass++) {
7650            read_vec_element_i32(s, tcg_int32, rn, pass, mop);
7651
7652            switch (size) {
7653            case MO_32:
7654                if (fracbits) {
7655                    if (is_signed) {
7656                        gen_helper_vfp_sltos(tcg_float, tcg_int32,
7657                                             tcg_shift, tcg_fpst);
7658                    } else {
7659                        gen_helper_vfp_ultos(tcg_float, tcg_int32,
7660                                             tcg_shift, tcg_fpst);
7661                    }
7662                } else {
7663                    if (is_signed) {
7664                        gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
7665                    } else {
7666                        gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
7667                    }
7668                }
7669                break;
7670            case MO_16:
7671                if (fracbits) {
7672                    if (is_signed) {
7673                        gen_helper_vfp_sltoh(tcg_float, tcg_int32,
7674                                             tcg_shift, tcg_fpst);
7675                    } else {
7676                        gen_helper_vfp_ultoh(tcg_float, tcg_int32,
7677                                             tcg_shift, tcg_fpst);
7678                    }
7679                } else {
7680                    if (is_signed) {
7681                        gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
7682                    } else {
7683                        gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
7684                    }
7685                }
7686                break;
7687            default:
7688                g_assert_not_reached();
7689            }
7690
7691            if (elements == 1) {
7692                write_fp_sreg(s, rd, tcg_float);
7693            } else {
7694                write_vec_element_i32(s, tcg_float, rd, pass, size);
7695            }
7696        }
7697
7698        tcg_temp_free_i32(tcg_int32);
7699        tcg_temp_free_i32(tcg_float);
7700    }
7701
7702    tcg_temp_free_ptr(tcg_fpst);
7703    if (tcg_shift) {
7704        tcg_temp_free_i32(tcg_shift);
7705    }
7706
7707    clear_vec_high(s, elements << size == 16, rd);
7708}
7709
7710/* UCVTF/SCVTF - Integer to FP conversion */
7711static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
7712                                         bool is_q, bool is_u,
7713                                         int immh, int immb, int opcode,
7714                                         int rn, int rd)
7715{
7716    int size, elements, fracbits;
7717    int immhb = immh << 3 | immb;
7718
7719    if (immh & 8) {
7720        size = MO_64;
7721        if (!is_scalar && !is_q) {
7722            unallocated_encoding(s);
7723            return;
7724        }
7725    } else if (immh & 4) {
7726        size = MO_32;
7727    } else if (immh & 2) {
7728        size = MO_16;
7729        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7730            unallocated_encoding(s);
7731            return;
7732        }
7733    } else {
7734        /* immh == 0 would be a failure of the decode logic */
7735        g_assert(immh == 1);
7736        unallocated_encoding(s);
7737        return;
7738    }
7739
7740    if (is_scalar) {
7741        elements = 1;
7742    } else {
7743        elements = (8 << is_q) >> size;
7744    }
7745    fracbits = (16 << size) - immhb;
7746
7747    if (!fp_access_check(s)) {
7748        return;
7749    }
7750
7751    handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
7752}
7753
7754/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
7755static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
7756                                         bool is_q, bool is_u,
7757                                         int immh, int immb, int rn, int rd)
7758{
7759    int immhb = immh << 3 | immb;
7760    int pass, size, fracbits;
7761    TCGv_ptr tcg_fpstatus;
7762    TCGv_i32 tcg_rmode, tcg_shift;
7763
7764    if (immh & 0x8) {
7765        size = MO_64;
7766        if (!is_scalar && !is_q) {
7767            unallocated_encoding(s);
7768            return;
7769        }
7770    } else if (immh & 0x4) {
7771        size = MO_32;
7772    } else if (immh & 0x2) {
7773        size = MO_16;
7774        if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7775            unallocated_encoding(s);
7776            return;
7777        }
7778    } else {
7779        /* Should have split out AdvSIMD modified immediate earlier.  */
7780        assert(immh == 1);
7781        unallocated_encoding(s);
7782        return;
7783    }
7784
7785    if (!fp_access_check(s)) {
7786        return;
7787    }
7788
7789    assert(!(is_scalar && is_q));
7790
7791    tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
7792    tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
7793    gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7794    fracbits = (16 << size) - immhb;
7795    tcg_shift = tcg_const_i32(fracbits);
7796
7797    if (size == MO_64) {
7798        int maxpass = is_scalar ? 1 : 2;
7799
7800        for (pass = 0; pass < maxpass; pass++) {
7801            TCGv_i64 tcg_op = tcg_temp_new_i64();
7802
7803            read_vec_element(s, tcg_op, rn, pass, MO_64);
7804            if (is_u) {
7805                gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7806            } else {
7807                gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7808            }
7809            write_vec_element(s, tcg_op, rd, pass, MO_64);
7810            tcg_temp_free_i64(tcg_op);
7811        }
7812        clear_vec_high(s, is_q, rd);
7813    } else {
7814        void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
7815        int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
7816
7817        switch (size) {
7818        case MO_16:
7819            if (is_u) {
7820                fn = gen_helper_vfp_touhh;
7821            } else {
7822                fn = gen_helper_vfp_toshh;
7823            }
7824            break;
7825        case MO_32:
7826            if (is_u) {
7827                fn = gen_helper_vfp_touls;
7828            } else {
7829                fn = gen_helper_vfp_tosls;
7830            }
7831            break;
7832        default:
7833            g_assert_not_reached();
7834        }
7835
7836        for (pass = 0; pass < maxpass; pass++) {
7837            TCGv_i32 tcg_op = tcg_temp_new_i32();
7838
7839            read_vec_element_i32(s, tcg_op, rn, pass, size);
7840            fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7841            if (is_scalar) {
7842                write_fp_sreg(s, rd, tcg_op);
7843            } else {
7844                write_vec_element_i32(s, tcg_op, rd, pass, size);
7845            }
7846            tcg_temp_free_i32(tcg_op);
7847        }
7848        if (!is_scalar) {
7849            clear_vec_high(s, is_q, rd);
7850        }
7851    }
7852
7853    tcg_temp_free_ptr(tcg_fpstatus);
7854    tcg_temp_free_i32(tcg_shift);
7855    gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7856    tcg_temp_free_i32(tcg_rmode);
7857}
7858
7859/* AdvSIMD scalar shift by immediate
7860 *  31 30  29 28         23 22  19 18  16 15    11  10 9    5 4    0
7861 * +-----+---+-------------+------+------+--------+---+------+------+
7862 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
7863 * +-----+---+-------------+------+------+--------+---+------+------+
7864 *
7865 * This is the scalar version so it works on a fixed sized registers
7866 */
7867static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
7868{
7869    int rd = extract32(insn, 0, 5);
7870    int rn = extract32(insn, 5, 5);
7871    int opcode = extract32(insn, 11, 5);
7872    int immb = extract32(insn, 16, 3);
7873    int immh = extract32(insn, 19, 4);
7874    bool is_u = extract32(insn, 29, 1);
7875
7876    if (immh == 0) {
7877        unallocated_encoding(s);
7878        return;
7879    }
7880
7881    switch (opcode) {
7882    case 0x08: /* SRI */
7883        if (!is_u) {
7884            unallocated_encoding(s);
7885            return;
7886        }
7887        /* fall through */
7888    case 0x00: /* SSHR / USHR */
7889    case 0x02: /* SSRA / USRA */
7890    case 0x04: /* SRSHR / URSHR */
7891    case 0x06: /* SRSRA / URSRA */
7892        handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
7893        break;
7894    case 0x0a: /* SHL / SLI */
7895        handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
7896        break;
7897    case 0x1c: /* SCVTF, UCVTF */
7898        handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
7899                                     opcode, rn, rd);
7900        break;
7901    case 0x10: /* SQSHRUN, SQSHRUN2 */
7902    case 0x11: /* SQRSHRUN, SQRSHRUN2 */
7903        if (!is_u) {
7904            unallocated_encoding(s);
7905            return;
7906        }
7907        handle_vec_simd_sqshrn(s, true, false, false, true,
7908                               immh, immb, opcode, rn, rd);
7909        break;
7910    case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
7911    case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
7912        handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
7913                               immh, immb, opcode, rn, rd);
7914        break;
7915    case 0xc: /* SQSHLU */
7916        if (!is_u) {
7917            unallocated_encoding(s);
7918            return;
7919        }
7920        handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
7921        break;
7922    case 0xe: /* SQSHL, UQSHL */
7923        handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
7924        break;
7925    case 0x1f: /* FCVTZS, FCVTZU */
7926        handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
7927        break;
7928    default:
7929        unallocated_encoding(s);
7930        break;
7931    }
7932}
7933
7934/* AdvSIMD scalar three different
7935 *  31 30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
7936 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7937 * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
7938 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7939 */
7940static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
7941{
7942    bool is_u = extract32(insn, 29, 1);
7943    int size = extract32(insn, 22, 2);
7944    int opcode = extract32(insn, 12, 4);
7945    int rm = extract32(insn, 16, 5);
7946    int rn = extract32(insn, 5, 5);
7947    int rd = extract32(insn, 0, 5);
7948
7949    if (is_u) {
7950        unallocated_encoding(s);
7951        return;
7952    }
7953
7954    switch (opcode) {
7955    case 0x9: /* SQDMLAL, SQDMLAL2 */
7956    case 0xb: /* SQDMLSL, SQDMLSL2 */
7957    case 0xd: /* SQDMULL, SQDMULL2 */
7958        if (size == 0 || size == 3) {
7959            unallocated_encoding(s);
7960            return;
7961        }
7962        break;
7963    default:
7964        unallocated_encoding(s);
7965        return;
7966    }
7967
7968    if (!fp_access_check(s)) {
7969        return;
7970    }
7971
7972    if (size == 2) {
7973        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7974        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7975        TCGv_i64 tcg_res = tcg_temp_new_i64();
7976
7977        read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7978        read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7979
7980        tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7981        gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7982
7983        switch (opcode) {
7984        case 0xd: /* SQDMULL, SQDMULL2 */
7985            break;
7986        case 0xb: /* SQDMLSL, SQDMLSL2 */
7987            tcg_gen_neg_i64(tcg_res, tcg_res);
7988            /* fall through */
7989        case 0x9: /* SQDMLAL, SQDMLAL2 */
7990            read_vec_element(s, tcg_op1, rd, 0, MO_64);
7991            gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7992                                              tcg_res, tcg_op1);
7993            break;
7994        default:
7995            g_assert_not_reached();
7996        }
7997
7998        write_fp_dreg(s, rd, tcg_res);
7999
8000        tcg_temp_free_i64(tcg_op1);
8001        tcg_temp_free_i64(tcg_op2);
8002        tcg_temp_free_i64(tcg_res);
8003    } else {
8004        TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8005        TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8006        TCGv_i64 tcg_res = tcg_temp_new_i64();
8007
8008        gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8009        gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8010
8011        switch (opcode) {
8012        case 0xd: /* SQDMULL, SQDMULL2 */
8013            break;
8014        case 0xb: /* SQDMLSL, SQDMLSL2 */
8015            gen_helper_neon_negl_u32(tcg_res, tcg_res);
8016            /* fall through */
8017        case 0x9: /* SQDMLAL, SQDMLAL2 */
8018        {
8019            TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8020            read_vec_element(s, tcg_op3, rd, 0, MO_32);
8021            gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8022                                              tcg_res, tcg_op3);
8023            tcg_temp_free_i64(tcg_op3);
8024            break;
8025        }
8026        default:
8027            g_assert_not_reached();
8028        }
8029
8030        tcg_gen_ext32u_i64(tcg_res, tcg_res);
8031        write_fp_dreg(s, rd, tcg_res);
8032
8033        tcg_temp_free_i32(tcg_op1);
8034        tcg_temp_free_i32(tcg_op2);
8035        tcg_temp_free_i64(tcg_res);
8036    }
8037}
8038
8039/* CMTST : test is "if (X & Y != 0)". */
8040static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
8041{
8042    tcg_gen_and_i32(d, a, b);
8043    tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
8044    tcg_gen_neg_i32(d, d);
8045}
8046
8047static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
8048{
8049    tcg_gen_and_i64(d, a, b);
8050    tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
8051    tcg_gen_neg_i64(d, d);
8052}
8053
8054static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
8055{
8056    tcg_gen_and_vec(vece, d, a, b);
8057    tcg_gen_dupi_vec(vece, a, 0);
8058    tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
8059}
8060
8061static void handle_3same_64(DisasContext *s, int opcode, bool u,
8062                            TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8063{
8064    /* Handle 64x64->64 opcodes which are shared between the scalar
8065     * and vector 3-same groups. We cover every opcode where size == 3
8066     * is valid in either the three-reg-same (integer, not pairwise)
8067     * or scalar-three-reg-same groups.
8068     */
8069    TCGCond cond;
8070
8071    switch (opcode) {
8072    case 0x1: /* SQADD */
8073        if (u) {
8074            gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8075        } else {
8076            gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8077        }
8078        break;
8079    case 0x5: /* SQSUB */
8080        if (u) {
8081            gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8082        } else {
8083            gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8084        }
8085        break;
8086    case 0x6: /* CMGT, CMHI */
8087        /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
8088         * We implement this using setcond (test) and then negating.
8089         */
8090        cond = u ? TCG_COND_GTU : TCG_COND_GT;
8091    do_cmop:
8092        tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8093        tcg_gen_neg_i64(tcg_rd, tcg_rd);
8094        break;
8095    case 0x7: /* CMGE, CMHS */
8096        cond = u ? TCG_COND_GEU : TCG_COND_GE;
8097        goto do_cmop;
8098    case 0x11: /* CMTST, CMEQ */
8099        if (u) {
8100            cond = TCG_COND_EQ;
8101            goto do_cmop;
8102        }
8103        gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8104        break;
8105    case 0x8: /* SSHL, USHL */
8106        if (u) {
8107            gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
8108        } else {
8109            gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
8110        }
8111        break;
8112    case 0x9: /* SQSHL, UQSHL */
8113        if (u) {
8114            gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8115        } else {
8116            gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8117        }
8118        break;
8119    case 0xa: /* SRSHL, URSHL */
8120        if (u) {
8121            gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8122        } else {
8123            gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8124        }
8125        break;
8126    case 0xb: /* SQRSHL, UQRSHL */
8127        if (u) {
8128            gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8129        } else {
8130            gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8131        }
8132        break;
8133    case 0x10: /* ADD, SUB */
8134        if (u) {
8135            tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8136        } else {
8137            tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8138        }
8139        break;
8140    default:
8141        g_assert_not_reached();
8142    }
8143}
8144
8145/* Handle the 3-same-operands float operations; shared by the scalar
8146 * and vector encodings. The caller must filter out any encodings
8147 * not allocated for the encoding it is dealing with.
8148 */
8149static void handle_3same_float(DisasContext *s, int size, int elements,
8150                               int fpopcode, int rd, int rn, int rm)
8151{
8152    int pass;
8153    TCGv_ptr fpst = get_fpstatus_ptr(false);
8154
8155    for (pass = 0; pass < elements; pass++) {
8156        if (size) {
8157            /* Double */
8158            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8159            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8160            TCGv_i64 tcg_res = tcg_temp_new_i64();
8161
8162            read_vec_element(s, tcg_op1, rn, pass, MO_64);
8163            read_vec_element(s, tcg_op2, rm, pass, MO_64);
8164
8165            switch (fpopcode) {
8166            case 0x39: /* FMLS */
8167                /* As usual for ARM, separate negation for fused multiply-add */
8168                gen_helper_vfp_negd(tcg_op1, tcg_op1);
8169                /* fall through */
8170            case 0x19: /* FMLA */
8171                read_vec_element(s, tcg_res, rd, pass, MO_64);
8172                gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8173                                       tcg_res, fpst);
8174                break;
8175            case 0x18: /* FMAXNM */
8176                gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8177                break;
8178            case 0x1a: /* FADD */
8179                gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8180                break;
8181            case 0x1b: /* FMULX */
8182                gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8183                break;
8184            case 0x1c: /* FCMEQ */
8185                gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8186                break;
8187            case 0x1e: /* FMAX */
8188                gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8189                break;
8190            case 0x1f: /* FRECPS */
8191                gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8192                break;
8193            case 0x38: /* FMINNM */
8194                gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8195                break;
8196            case 0x3a: /* FSUB */
8197                gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8198                break;
8199            case 0x3e: /* FMIN */
8200                gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8201                break;
8202            case 0x3f: /* FRSQRTS */
8203                gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8204                break;
8205            case 0x5b: /* FMUL */
8206                gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8207                break;
8208            case 0x5c: /* FCMGE */
8209                gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8210                break;
8211            case 0x5d: /* FACGE */
8212                gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8213                break;
8214            case 0x5f: /* FDIV */
8215                gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8216                break;
8217            case 0x7a: /* FABD */
8218                gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8219                gen_helper_vfp_absd(tcg_res, tcg_res);
8220                break;
8221            case 0x7c: /* FCMGT */
8222                gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8223                break;
8224            case 0x7d: /* FACGT */
8225                gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8226                break;
8227            default:
8228                g_assert_not_reached();
8229            }
8230
8231            write_vec_element(s, tcg_res, rd, pass, MO_64);
8232
8233            tcg_temp_free_i64(tcg_res);
8234            tcg_temp_free_i64(tcg_op1);
8235            tcg_temp_free_i64(tcg_op2);
8236        } else {
8237            /* Single */
8238            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8239            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8240            TCGv_i32 tcg_res = tcg_temp_new_i32();
8241
8242            read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
8243            read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
8244
8245            switch (fpopcode) {
8246            case 0x39: /* FMLS */
8247                /* As usual for ARM, separate negation for fused multiply-add */
8248                gen_helper_vfp_negs(tcg_op1, tcg_op1);
8249                /* fall through */
8250            case 0x19: /* FMLA */
8251                read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8252                gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
8253                                       tcg_res, fpst);
8254                break;
8255            case 0x1a: /* FADD */
8256                gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8257                break;
8258            case 0x1b: /* FMULX */
8259                gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
8260                break;
8261            case 0x1c: /* FCMEQ */
8262                gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8263                break;
8264            case 0x1e: /* FMAX */
8265                gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8266                break;
8267            case 0x1f: /* FRECPS */
8268                gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8269                break;
8270            case 0x18: /* FMAXNM */
8271                gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8272                break;
8273            case 0x38: /* FMINNM */
8274                gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8275                break;
8276            case 0x3a: /* FSUB */
8277                gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8278                break;
8279            case 0x3e: /* FMIN */
8280                gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8281                break;
8282            case 0x3f: /* FRSQRTS */
8283                gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8284                break;
8285            case 0x5b: /* FMUL */
8286                gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
8287                break;
8288            case 0x5c: /* FCMGE */
8289                gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8290                break;
8291            case 0x5d: /* FACGE */
8292                gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8293                break;
8294            case 0x5f: /* FDIV */
8295                gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
8296                break;
8297            case 0x7a: /* FABD */
8298                gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8299                gen_helper_vfp_abss(tcg_res, tcg_res);
8300                break;
8301            case 0x7c: /* FCMGT */
8302                gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8303                break;
8304            case 0x7d: /* FACGT */
8305                gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8306                break;
8307            default:
8308                g_assert_not_reached();
8309            }
8310
8311            if (elements == 1) {
8312                /* scalar single so clear high part */
8313                TCGv_i64 tcg_tmp = tcg_temp_new_i64();
8314
8315                tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
8316                write_vec_element(s, tcg_tmp, rd, pass, MO_64);
8317                tcg_temp_free_i64(tcg_tmp);
8318            } else {
8319                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8320            }
8321
8322            tcg_temp_free_i32(tcg_res);
8323            tcg_temp_free_i32(tcg_op1);
8324            tcg_temp_free_i32(tcg_op2);
8325        }
8326    }
8327
8328    tcg_temp_free_ptr(fpst);
8329
8330    clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
8331}
8332
8333/* AdvSIMD scalar three same
8334 *  31 30  29 28       24 23  22  21 20  16 15    11  10 9    5 4    0
8335 * +-----+---+-----------+------+---+------+--------+---+------+------+
8336 * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
8337 * +-----+---+-----------+------+---+------+--------+---+------+------+
8338 */
8339static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
8340{
8341    int rd = extract32(insn, 0, 5);
8342    int rn = extract32(insn, 5, 5);
8343    int opcode = extract32(insn, 11, 5);
8344    int rm = extract32(insn, 16, 5);
8345    int size = extract32(insn, 22, 2);
8346    bool u = extract32(insn, 29, 1);
8347    TCGv_i64 tcg_rd;
8348
8349    if (opcode >= 0x18) {
8350        /* Floating point: U, size[1] and opcode indicate operation */
8351        int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
8352        switch (fpopcode) {
8353        case 0x1b: /* FMULX */
8354        case 0x1f: /* FRECPS */
8355        case 0x3f: /* FRSQRTS */
8356        case 0x5d: /* FACGE */
8357        case 0x7d: /* FACGT */
8358        case 0x1c: /* FCMEQ */
8359        case 0x5c: /* FCMGE */
8360        case 0x7c: /* FCMGT */
8361        case 0x7a: /* FABD */
8362            break;
8363        default:
8364            unallocated_encoding(s);
8365            return;
8366        }
8367
8368        if (!fp_access_check(s)) {
8369            return;
8370        }
8371
8372        handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
8373        return;
8374    }
8375
8376    switch (opcode) {
8377    case 0x1: /* SQADD, UQADD */
8378    case 0x5: /* SQSUB, UQSUB */
8379    case 0x9: /* SQSHL, UQSHL */
8380    case 0xb: /* SQRSHL, UQRSHL */
8381        break;
8382    case 0x8: /* SSHL, USHL */
8383    case 0xa: /* SRSHL, URSHL */
8384    case 0x6: /* CMGT, CMHI */
8385    case 0x7: /* CMGE, CMHS */
8386    case 0x11: /* CMTST, CMEQ */
8387    case 0x10: /* ADD, SUB (vector) */
8388        if (size != 3) {
8389            unallocated_encoding(s);
8390            return;
8391        }
8392        break;
8393    case 0x16: /* SQDMULH, SQRDMULH (vector) */
8394        if (size != 1 && size != 2) {
8395            unallocated_encoding(s);
8396            return;
8397        }
8398        break;
8399    default:
8400        unallocated_encoding(s);
8401        return;
8402    }
8403
8404    if (!fp_access_check(s)) {
8405        return;
8406    }
8407
8408    tcg_rd = tcg_temp_new_i64();
8409
8410    if (size == 3) {
8411        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8412        TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
8413
8414        handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
8415        tcg_temp_free_i64(tcg_rn);
8416        tcg_temp_free_i64(tcg_rm);
8417    } else {
8418        /* Do a single operation on the lowest element in the vector.
8419         * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
8420         * no side effects for all these operations.
8421         * OPTME: special-purpose helpers would avoid doing some
8422         * unnecessary work in the helper for the 8 and 16 bit cases.
8423         */
8424        NeonGenTwoOpEnvFn *genenvfn;
8425        TCGv_i32 tcg_rn = tcg_temp_new_i32();
8426        TCGv_i32 tcg_rm = tcg_temp_new_i32();
8427        TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
8428
8429        read_vec_element_i32(s, tcg_rn, rn, 0, size);
8430        read_vec_element_i32(s, tcg_rm, rm, 0, size);
8431
8432        switch (opcode) {
8433        case 0x1: /* SQADD, UQADD */
8434        {
8435            static NeonGenTwoOpEnvFn * const fns[3][2] = {
8436                { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
8437                { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
8438                { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
8439            };
8440            genenvfn = fns[size][u];
8441            break;
8442        }
8443        case 0x5: /* SQSUB, UQSUB */
8444        {
8445            static NeonGenTwoOpEnvFn * const fns[3][2] = {
8446                { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
8447                { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
8448                { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
8449            };
8450            genenvfn = fns[size][u];
8451            break;
8452        }
8453        case 0x9: /* SQSHL, UQSHL */
8454        {
8455            static NeonGenTwoOpEnvFn * const fns[3][2] = {
8456                { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
8457                { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
8458                { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
8459            };
8460            genenvfn = fns[size][u];
8461            break;
8462        }
8463        case 0xb: /* SQRSHL, UQRSHL */
8464        {
8465            static NeonGenTwoOpEnvFn * const fns[3][2] = {
8466                { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
8467                { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
8468                { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
8469            };
8470            genenvfn = fns[size][u];
8471            break;
8472        }
8473        case 0x16: /* SQDMULH, SQRDMULH */
8474        {
8475            static NeonGenTwoOpEnvFn * const fns[2][2] = {
8476                { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
8477                { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
8478            };
8479            assert(size == 1 || size == 2);
8480            genenvfn = fns[size - 1][u];
8481            break;
8482        }
8483        default:
8484            g_assert_not_reached();
8485        }
8486
8487        genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
8488        tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
8489        tcg_temp_free_i32(tcg_rd32);
8490        tcg_temp_free_i32(tcg_rn);
8491        tcg_temp_free_i32(tcg_rm);
8492    }
8493
8494    write_fp_dreg(s, rd, tcg_rd);
8495
8496    tcg_temp_free_i64(tcg_rd);
8497}
8498
8499/* AdvSIMD scalar three same FP16
8500 *  31 30  29 28       24 23  22 21 20  16 15 14 13    11 10  9  5 4  0
8501 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
8502 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 | Rn | Rd |
8503 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
8504 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
8505 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
8506 */
8507static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
8508                                                  uint32_t insn)
8509{
8510    int rd = extract32(insn, 0, 5);
8511    int rn = extract32(insn, 5, 5);
8512    int opcode = extract32(insn, 11, 3);
8513    int rm = extract32(insn, 16, 5);
8514    bool u = extract32(insn, 29, 1);
8515    bool a = extract32(insn, 23, 1);
8516    int fpopcode = opcode | (a << 3) |  (u << 4);
8517    TCGv_ptr fpst;
8518    TCGv_i32 tcg_op1;
8519    TCGv_i32 tcg_op2;
8520    TCGv_i32 tcg_res;
8521
8522    switch (fpopcode) {
8523    case 0x03: /* FMULX */
8524    case 0x04: /* FCMEQ (reg) */
8525    case 0x07: /* FRECPS */
8526    case 0x0f: /* FRSQRTS */
8527    case 0x14: /* FCMGE (reg) */
8528    case 0x15: /* FACGE */
8529    case 0x1a: /* FABD */
8530    case 0x1c: /* FCMGT (reg) */
8531    case 0x1d: /* FACGT */
8532        break;
8533    default:
8534        unallocated_encoding(s);
8535        return;
8536    }
8537
8538    if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
8539        unallocated_encoding(s);
8540    }
8541
8542    if (!fp_access_check(s)) {
8543        return;
8544    }
8545
8546    fpst = get_fpstatus_ptr(true);
8547
8548    tcg_op1 = read_fp_hreg(s, rn);
8549    tcg_op2 = read_fp_hreg(s, rm);
8550    tcg_res = tcg_temp_new_i32();
8551
8552    switch (fpopcode) {
8553    case 0x03: /* FMULX */
8554        gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
8555        break;
8556    case 0x04: /* FCMEQ (reg) */
8557        gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8558        break;
8559    case 0x07: /* FRECPS */
8560        gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8561        break;
8562    case 0x0f: /* FRSQRTS */
8563        gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8564        break;
8565    case 0x14: /* FCMGE (reg) */
8566        gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8567        break;
8568    case 0x15: /* FACGE */
8569        gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8570        break;
8571    case 0x1a: /* FABD */
8572        gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
8573        tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
8574        break;
8575    case 0x1c: /* FCMGT (reg) */
8576        gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8577        break;
8578    case 0x1d: /* FACGT */
8579        gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8580        break;
8581    default:
8582        g_assert_not_reached();
8583    }
8584
8585    write_fp_sreg(s, rd, tcg_res);
8586
8587
8588    tcg_temp_free_i32(tcg_res);
8589    tcg_temp_free_i32(tcg_op1);
8590    tcg_temp_free_i32(tcg_op2);
8591    tcg_temp_free_ptr(fpst);
8592}
8593
8594/* AdvSIMD scalar three same extra
8595 *  31 30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
8596 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8597 * | 0 1 | U | 1 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
8598 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8599 */
8600static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
8601                                                   uint32_t insn)
8602{
8603    int rd = extract32(insn, 0, 5);
8604    int rn = extract32(insn, 5, 5);
8605    int opcode = extract32(insn, 11, 4);
8606    int rm = extract32(insn, 16, 5);
8607    int size = extract32(insn, 22, 2);
8608    bool u = extract32(insn, 29, 1);
8609    TCGv_i32 ele1, ele2, ele3;
8610    TCGv_i64 res;
8611    int feature;
8612
8613    switch (u * 16 + opcode) {
8614    case 0x10: /* SQRDMLAH (vector) */
8615    case 0x11: /* SQRDMLSH (vector) */
8616        if (size != 1 && size != 2) {
8617            unallocated_encoding(s);
8618            return;
8619        }
8620        feature = ARM_FEATURE_V8_RDM;
8621        break;
8622    default:
8623        unallocated_encoding(s);
8624        return;
8625    }
8626    if (!arm_dc_feature(s, feature)) {
8627        unallocated_encoding(s);
8628        return;
8629    }
8630    if (!fp_access_check(s)) {
8631        return;
8632    }
8633
8634    /* Do a single operation on the lowest element in the vector.
8635     * We use the standard Neon helpers and rely on 0 OP 0 == 0
8636     * with no side effects for all these operations.
8637     * OPTME: special-purpose helpers would avoid doing some
8638     * unnecessary work in the helper for the 16 bit cases.
8639     */
8640    ele1 = tcg_temp_new_i32();
8641    ele2 = tcg_temp_new_i32();
8642    ele3 = tcg_temp_new_i32();
8643
8644    read_vec_element_i32(s, ele1, rn, 0, size);
8645    read_vec_element_i32(s, ele2, rm, 0, size);
8646    read_vec_element_i32(s, ele3, rd, 0, size);
8647
8648    switch (opcode) {
8649    case 0x0: /* SQRDMLAH */
8650        if (size == 1) {
8651            gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
8652        } else {
8653            gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
8654        }
8655        break;
8656    case 0x1: /* SQRDMLSH */
8657        if (size == 1) {
8658            gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
8659        } else {
8660            gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
8661        }
8662        break;
8663    default:
8664        g_assert_not_reached();
8665    }
8666    tcg_temp_free_i32(ele1);
8667    tcg_temp_free_i32(ele2);
8668
8669    res = tcg_temp_new_i64();
8670    tcg_gen_extu_i32_i64(res, ele3);
8671    tcg_temp_free_i32(ele3);
8672
8673    write_fp_dreg(s, rd, res);
8674    tcg_temp_free_i64(res);
8675}
8676
8677static void handle_2misc_64(DisasContext *s, int opcode, bool u,
8678                            TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
8679                            TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
8680{
8681    /* Handle 64->64 opcodes which are shared between the scalar and
8682     * vector 2-reg-misc groups. We cover every integer opcode where size == 3
8683     * is valid in either group and also the double-precision fp ops.
8684     * The caller only need provide tcg_rmode and tcg_fpstatus if the op
8685     * requires them.
8686     */
8687    TCGCond cond;
8688
8689    switch (opcode) {
8690    case 0x4: /* CLS, CLZ */
8691        if (u) {
8692            tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
8693        } else {
8694            tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
8695        }
8696        break;
8697    case 0x5: /* NOT */
8698        /* This opcode is shared with CNT and RBIT but we have earlier
8699         * enforced that size == 3 if and only if this is the NOT insn.
8700         */
8701        tcg_gen_not_i64(tcg_rd, tcg_rn);
8702        break;
8703    case 0x7: /* SQABS, SQNEG */
8704        if (u) {
8705            gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
8706        } else {
8707            gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
8708        }
8709        break;
8710    case 0xa: /* CMLT */
8711        /* 64 bit integer comparison against zero, result is
8712         * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
8713         * subtracting 1.
8714         */
8715        cond = TCG_COND_LT;
8716    do_cmop:
8717        tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
8718        tcg_gen_neg_i64(tcg_rd, tcg_rd);
8719        break;
8720    case 0x8: /* CMGT, CMGE */
8721        cond = u ? TCG_COND_GE : TCG_COND_GT;
8722        goto do_cmop;
8723    case 0x9: /* CMEQ, CMLE */
8724        cond = u ? TCG_COND_LE : TCG_COND_EQ;
8725        goto do_cmop;
8726    case 0xb: /* ABS, NEG */
8727        if (u) {
8728            tcg_gen_neg_i64(tcg_rd, tcg_rn);
8729        } else {
8730            TCGv_i64 tcg_zero = tcg_const_i64(0);
8731            tcg_gen_neg_i64(tcg_rd, tcg_rn);
8732            tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
8733                                tcg_rn, tcg_rd);
8734            tcg_temp_free_i64(tcg_zero);
8735        }
8736        break;
8737    case 0x2f: /* FABS */
8738        gen_helper_vfp_absd(tcg_rd, tcg_rn);
8739        break;
8740    case 0x6f: /* FNEG */
8741        gen_helper_vfp_negd(tcg_rd, tcg_rn);
8742        break;
8743    case 0x7f: /* FSQRT */
8744        gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
8745        break;
8746    case 0x1a: /* FCVTNS */
8747    case 0x1b: /* FCVTMS */
8748    case 0x1c: /* FCVTAS */
8749    case 0x3a: /* FCVTPS */
8750    case 0x3b: /* FCVTZS */
8751    {
8752        TCGv_i32 tcg_shift = tcg_const_i32(0);
8753        gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8754        tcg_temp_free_i32(tcg_shift);
8755        break;
8756    }
8757    case 0x5a: /* FCVTNU */
8758    case 0x5b: /* FCVTMU */
8759    case 0x5c: /* FCVTAU */
8760    case 0x7a: /* FCVTPU */
8761    case 0x7b: /* FCVTZU */
8762    {
8763        TCGv_i32 tcg_shift = tcg_const_i32(0);
8764        gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8765        tcg_temp_free_i32(tcg_shift);
8766        break;
8767    }
8768    case 0x18: /* FRINTN */
8769    case 0x19: /* FRINTM */
8770    case 0x38: /* FRINTP */
8771    case 0x39: /* FRINTZ */
8772    case 0x58: /* FRINTA */
8773    case 0x79: /* FRINTI */
8774        gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
8775        break;
8776    case 0x59: /* FRINTX */
8777        gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
8778        break;
8779    default:
8780        g_assert_not_reached();
8781    }
8782}
8783
8784static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
8785                                   bool is_scalar, bool is_u, bool is_q,
8786                                   int size, int rn, int rd)
8787{
8788    bool is_double = (size == MO_64);
8789    TCGv_ptr fpst;
8790
8791    if (!fp_access_check(s)) {
8792        return;
8793    }
8794
8795    fpst = get_fpstatus_ptr(size == MO_16);
8796
8797    if (is_double) {
8798        TCGv_i64 tcg_op = tcg_temp_new_i64();
8799        TCGv_i64 tcg_zero = tcg_const_i64(0);
8800        TCGv_i64 tcg_res = tcg_temp_new_i64();
8801        NeonGenTwoDoubleOPFn *genfn;
8802        bool swap = false;
8803        int pass;
8804
8805        switch (opcode) {
8806        case 0x2e: /* FCMLT (zero) */
8807            swap = true;
8808            /* fallthrough */
8809        case 0x2c: /* FCMGT (zero) */
8810            genfn = gen_helper_neon_cgt_f64;
8811            break;
8812        case 0x2d: /* FCMEQ (zero) */
8813            genfn = gen_helper_neon_ceq_f64;
8814            break;
8815        case 0x6d: /* FCMLE (zero) */
8816            swap = true;
8817            /* fall through */
8818        case 0x6c: /* FCMGE (zero) */
8819            genfn = gen_helper_neon_cge_f64;
8820            break;
8821        default:
8822            g_assert_not_reached();
8823        }
8824
8825        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8826            read_vec_element(s, tcg_op, rn, pass, MO_64);
8827            if (swap) {
8828                genfn(tcg_res, tcg_zero, tcg_op, fpst);
8829            } else {
8830                genfn(tcg_res, tcg_op, tcg_zero, fpst);
8831            }
8832            write_vec_element(s, tcg_res, rd, pass, MO_64);
8833        }
8834        tcg_temp_free_i64(tcg_res);
8835        tcg_temp_free_i64(tcg_zero);
8836        tcg_temp_free_i64(tcg_op);
8837
8838        clear_vec_high(s, !is_scalar, rd);
8839    } else {
8840        TCGv_i32 tcg_op = tcg_temp_new_i32();
8841        TCGv_i32 tcg_zero = tcg_const_i32(0);
8842        TCGv_i32 tcg_res = tcg_temp_new_i32();
8843        NeonGenTwoSingleOPFn *genfn;
8844        bool swap = false;
8845        int pass, maxpasses;
8846
8847        if (size == MO_16) {
8848            switch (opcode) {
8849            case 0x2e: /* FCMLT (zero) */
8850                swap = true;
8851                /* fall through */
8852            case 0x2c: /* FCMGT (zero) */
8853                genfn = gen_helper_advsimd_cgt_f16;
8854                break;
8855            case 0x2d: /* FCMEQ (zero) */
8856                genfn = gen_helper_advsimd_ceq_f16;
8857                break;
8858            case 0x6d: /* FCMLE (zero) */
8859                swap = true;
8860                /* fall through */
8861            case 0x6c: /* FCMGE (zero) */
8862                genfn = gen_helper_advsimd_cge_f16;
8863                break;
8864            default:
8865                g_assert_not_reached();
8866            }
8867        } else {
8868            switch (opcode) {
8869            case 0x2e: /* FCMLT (zero) */
8870                swap = true;
8871                /* fall through */
8872            case 0x2c: /* FCMGT (zero) */
8873                genfn = gen_helper_neon_cgt_f32;
8874                break;
8875            case 0x2d: /* FCMEQ (zero) */
8876                genfn = gen_helper_neon_ceq_f32;
8877                break;
8878            case 0x6d: /* FCMLE (zero) */
8879                swap = true;
8880                /* fall through */
8881            case 0x6c: /* FCMGE (zero) */
8882                genfn = gen_helper_neon_cge_f32;
8883                break;
8884            default:
8885                g_assert_not_reached();
8886            }
8887        }
8888
8889        if (is_scalar) {
8890            maxpasses = 1;
8891        } else {
8892            int vector_size = 8 << is_q;
8893            maxpasses = vector_size >> size;
8894        }
8895
8896        for (pass = 0; pass < maxpasses; pass++) {
8897            read_vec_element_i32(s, tcg_op, rn, pass, size);
8898            if (swap) {
8899                genfn(tcg_res, tcg_zero, tcg_op, fpst);
8900            } else {
8901                genfn(tcg_res, tcg_op, tcg_zero, fpst);
8902            }
8903            if (is_scalar) {
8904                write_fp_sreg(s, rd, tcg_res);
8905            } else {
8906                write_vec_element_i32(s, tcg_res, rd, pass, size);
8907            }
8908        }
8909        tcg_temp_free_i32(tcg_res);
8910        tcg_temp_free_i32(tcg_zero);
8911        tcg_temp_free_i32(tcg_op);
8912        if (!is_scalar) {
8913            clear_vec_high(s, is_q, rd);
8914        }
8915    }
8916
8917    tcg_temp_free_ptr(fpst);
8918}
8919
8920static void handle_2misc_reciprocal(DisasContext *s, int opcode,
8921                                    bool is_scalar, bool is_u, bool is_q,
8922                                    int size, int rn, int rd)
8923{
8924    bool is_double = (size == 3);
8925    TCGv_ptr fpst = get_fpstatus_ptr(false);
8926
8927    if (is_double) {
8928        TCGv_i64 tcg_op = tcg_temp_new_i64();
8929        TCGv_i64 tcg_res = tcg_temp_new_i64();
8930        int pass;
8931
8932        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8933            read_vec_element(s, tcg_op, rn, pass, MO_64);
8934            switch (opcode) {
8935            case 0x3d: /* FRECPE */
8936                gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
8937                break;
8938            case 0x3f: /* FRECPX */
8939                gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
8940                break;
8941            case 0x7d: /* FRSQRTE */
8942                gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
8943                break;
8944            default:
8945                g_assert_not_reached();
8946            }
8947            write_vec_element(s, tcg_res, rd, pass, MO_64);
8948        }
8949        tcg_temp_free_i64(tcg_res);
8950        tcg_temp_free_i64(tcg_op);
8951        clear_vec_high(s, !is_scalar, rd);
8952    } else {
8953        TCGv_i32 tcg_op = tcg_temp_new_i32();
8954        TCGv_i32 tcg_res = tcg_temp_new_i32();
8955        int pass, maxpasses;
8956
8957        if (is_scalar) {
8958            maxpasses = 1;
8959        } else {
8960            maxpasses = is_q ? 4 : 2;
8961        }
8962
8963        for (pass = 0; pass < maxpasses; pass++) {
8964            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
8965
8966            switch (opcode) {
8967            case 0x3c: /* URECPE */
8968                gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
8969                break;
8970            case 0x3d: /* FRECPE */
8971                gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
8972                break;
8973            case 0x3f: /* FRECPX */
8974                gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
8975                break;
8976            case 0x7d: /* FRSQRTE */
8977                gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
8978                break;
8979            default:
8980                g_assert_not_reached();
8981            }
8982
8983            if (is_scalar) {
8984                write_fp_sreg(s, rd, tcg_res);
8985            } else {
8986                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8987            }
8988        }
8989        tcg_temp_free_i32(tcg_res);
8990        tcg_temp_free_i32(tcg_op);
8991        if (!is_scalar) {
8992            clear_vec_high(s, is_q, rd);
8993        }
8994    }
8995    tcg_temp_free_ptr(fpst);
8996}
8997
8998static void handle_2misc_narrow(DisasContext *s, bool scalar,
8999                                int opcode, bool u, bool is_q,
9000                                int size, int rn, int rd)
9001{
9002    /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9003     * in the source becomes a size element in the destination).
9004     */
9005    int pass;
9006    TCGv_i32 tcg_res[2];
9007    int destelt = is_q ? 2 : 0;
9008    int passes = scalar ? 1 : 2;
9009
9010    if (scalar) {
9011        tcg_res[1] = tcg_const_i32(0);
9012    }
9013
9014    for (pass = 0; pass < passes; pass++) {
9015        TCGv_i64 tcg_op = tcg_temp_new_i64();
9016        NeonGenNarrowFn *genfn = NULL;
9017        NeonGenNarrowEnvFn *genenvfn = NULL;
9018
9019        if (scalar) {
9020            read_vec_element(s, tcg_op, rn, pass, size + 1);
9021        } else {
9022            read_vec_element(s, tcg_op, rn, pass, MO_64);
9023        }
9024        tcg_res[pass] = tcg_temp_new_i32();
9025
9026        switch (opcode) {
9027        case 0x12: /* XTN, SQXTUN */
9028        {
9029            static NeonGenNarrowFn * const xtnfns[3] = {
9030                gen_helper_neon_narrow_u8,
9031                gen_helper_neon_narrow_u16,
9032                tcg_gen_extrl_i64_i32,
9033            };
9034            static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9035                gen_helper_neon_unarrow_sat8,
9036                gen_helper_neon_unarrow_sat16,
9037                gen_helper_neon_unarrow_sat32,
9038            };
9039            if (u) {
9040                genenvfn = sqxtunfns[size];
9041            } else {
9042                genfn = xtnfns[size];
9043            }
9044            break;
9045        }
9046        case 0x14: /* SQXTN, UQXTN */
9047        {
9048            static NeonGenNarrowEnvFn * const fns[3][2] = {
9049                { gen_helper_neon_narrow_sat_s8,
9050                  gen_helper_neon_narrow_sat_u8 },
9051                { gen_helper_neon_narrow_sat_s16,
9052                  gen_helper_neon_narrow_sat_u16 },
9053                { gen_helper_neon_narrow_sat_s32,
9054                  gen_helper_neon_narrow_sat_u32 },
9055            };
9056            genenvfn = fns[size][u];
9057            break;
9058        }
9059        case 0x16: /* FCVTN, FCVTN2 */
9060            /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9061            if (size == 2) {
9062                gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9063            } else {
9064                TCGv_i32 tcg_lo = tcg_temp_new_i32();
9065                TCGv_i32 tcg_hi = tcg_temp_new_i32();
9066                TCGv_ptr fpst = get_fpstatus_ptr(false);
9067                TCGv_i32 ahp = get_ahp_flag();
9068
9069                tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9070                gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9071                gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9072                tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9073                tcg_temp_free_i32(tcg_lo);
9074                tcg_temp_free_i32(tcg_hi);
9075                tcg_temp_free_ptr(fpst);
9076                tcg_temp_free_i32(ahp);
9077            }
9078            break;
9079        case 0x56:  /* FCVTXN, FCVTXN2 */
9080            /* 64 bit to 32 bit float conversion
9081             * with von Neumann rounding (round to odd)
9082             */
9083            assert(size == 2);
9084            gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9085            break;
9086        default:
9087            g_assert_not_reached();
9088        }
9089
9090        if (genfn) {
9091            genfn(tcg_res[pass], tcg_op);
9092        } else if (genenvfn) {
9093            genenvfn(tcg_res[pass], cpu_env, tcg_op);
9094        }
9095
9096        tcg_temp_free_i64(tcg_op);
9097    }
9098
9099    for (pass = 0; pass < 2; pass++) {
9100        write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9101        tcg_temp_free_i32(tcg_res[pass]);
9102    }
9103    clear_vec_high(s, is_q, rd);
9104}
9105
9106/* Remaining saturating accumulating ops */
9107static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9108                                bool is_q, int size, int rn, int rd)
9109{
9110    bool is_double = (size == 3);
9111
9112    if (is_double) {
9113        TCGv_i64 tcg_rn = tcg_temp_new_i64();
9114        TCGv_i64 tcg_rd = tcg_temp_new_i64();
9115        int pass;
9116
9117        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9118            read_vec_element(s, tcg_rn, rn, pass, MO_64);
9119            read_vec_element(s, tcg_rd, rd, pass, MO_64);
9120
9121            if (is_u) { /* USQADD */
9122                gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9123            } else { /* SUQADD */
9124                gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9125            }
9126            write_vec_element(s, tcg_rd, rd, pass, MO_64);
9127        }
9128        tcg_temp_free_i64(tcg_rd);
9129        tcg_temp_free_i64(tcg_rn);
9130        clear_vec_high(s, !is_scalar, rd);
9131    } else {
9132        TCGv_i32 tcg_rn = tcg_temp_new_i32();
9133        TCGv_i32 tcg_rd = tcg_temp_new_i32();
9134        int pass, maxpasses;
9135
9136        if (is_scalar) {
9137            maxpasses = 1;
9138        } else {
9139            maxpasses = is_q ? 4 : 2;
9140        }
9141
9142        for (pass = 0; pass < maxpasses; pass++) {
9143            if (is_scalar) {
9144                read_vec_element_i32(s, tcg_rn, rn, pass, size);
9145                read_vec_element_i32(s, tcg_rd, rd, pass, size);
9146            } else {
9147                read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9148                read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9149            }
9150
9151            if (is_u) { /* USQADD */
9152                switch (size) {
9153                case 0:
9154                    gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9155                    break;
9156                case 1:
9157                    gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9158                    break;
9159                case 2:
9160                    gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9161                    break;
9162                default:
9163                    g_assert_not_reached();
9164                }
9165            } else { /* SUQADD */
9166                switch (size) {
9167                case 0:
9168                    gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9169                    break;
9170                case 1:
9171                    gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9172                    break;
9173                case 2:
9174                    gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9175                    break;
9176                default:
9177                    g_assert_not_reached();
9178                }
9179            }
9180
9181            if (is_scalar) {
9182                TCGv_i64 tcg_zero = tcg_const_i64(0);
9183                write_vec_element(s, tcg_zero, rd, 0, MO_64);
9184                tcg_temp_free_i64(tcg_zero);
9185            }
9186            write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9187        }
9188        tcg_temp_free_i32(tcg_rd);
9189        tcg_temp_free_i32(tcg_rn);
9190        clear_vec_high(s, is_q, rd);
9191    }
9192}
9193
9194/* AdvSIMD scalar two reg misc
9195 *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
9196 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9197 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
9198 * +-----+---+-----------+------+-----------+--------+-----+------+------+
9199 */
9200static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9201{
9202    int rd = extract32(insn, 0, 5);
9203    int rn = extract32(insn, 5, 5);
9204    int opcode = extract32(insn, 12, 5);
9205    int size = extract32(insn, 22, 2);
9206    bool u = extract32(insn, 29, 1);
9207    bool is_fcvt = false;
9208    int rmode;
9209    TCGv_i32 tcg_rmode;
9210    TCGv_ptr tcg_fpstatus;
9211
9212    switch (opcode) {
9213    case 0x3: /* USQADD / SUQADD*/
9214        if (!fp_access_check(s)) {
9215            return;
9216        }
9217        handle_2misc_satacc(s, true, u, false, size, rn, rd);
9218        return;
9219    case 0x7: /* SQABS / SQNEG */
9220        break;
9221    case 0xa: /* CMLT */
9222        if (u) {
9223            unallocated_encoding(s);
9224            return;
9225        }
9226        /* fall through */
9227    case 0x8: /* CMGT, CMGE */
9228    case 0x9: /* CMEQ, CMLE */
9229    case 0xb: /* ABS, NEG */
9230        if (size != 3) {
9231            unallocated_encoding(s);
9232            return;
9233        }
9234        break;
9235    case 0x12: /* SQXTUN */
9236        if (!u) {
9237            unallocated_encoding(s);
9238            return;
9239        }
9240        /* fall through */
9241    case 0x14: /* SQXTN, UQXTN */
9242        if (size == 3) {
9243            unallocated_encoding(s);
9244            return;
9245        }
9246        if (!fp_access_check(s)) {
9247            return;
9248        }
9249        handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
9250        return;
9251    case 0xc ... 0xf:
9252    case 0x16 ... 0x1d:
9253    case 0x1f:
9254        /* Floating point: U, size[1] and opcode indicate operation;
9255         * size[0] indicates single or double precision.
9256         */
9257        opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
9258        size = extract32(size, 0, 1) ? 3 : 2;
9259        switch (opcode) {
9260        case 0x2c: /* FCMGT (zero) */
9261        case 0x2d: /* FCMEQ (zero) */
9262        case 0x2e: /* FCMLT (zero) */
9263        case 0x6c: /* FCMGE (zero) */
9264        case 0x6d: /* FCMLE (zero) */
9265            handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
9266            return;
9267        case 0x1d: /* SCVTF */
9268        case 0x5d: /* UCVTF */
9269        {
9270            bool is_signed = (opcode == 0x1d);
9271            if (!fp_access_check(s)) {
9272                return;
9273            }
9274            handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
9275            return;
9276        }
9277        case 0x3d: /* FRECPE */
9278        case 0x3f: /* FRECPX */
9279        case 0x7d: /* FRSQRTE */
9280            if (!fp_access_check(s)) {
9281                return;
9282            }
9283            handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
9284            return;
9285        case 0x1a: /* FCVTNS */
9286        case 0x1b: /* FCVTMS */
9287        case 0x3a: /* FCVTPS */
9288        case 0x3b: /* FCVTZS */
9289        case 0x5a: /* FCVTNU */
9290        case 0x5b: /* FCVTMU */
9291        case 0x7a: /* FCVTPU */
9292        case 0x7b: /* FCVTZU */
9293            is_fcvt = true;
9294            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
9295            break;
9296        case 0x1c: /* FCVTAS */
9297        case 0x5c: /* FCVTAU */
9298            /* TIEAWAY doesn't fit in the usual rounding mode encoding */
9299            is_fcvt = true;
9300            rmode = FPROUNDING_TIEAWAY;
9301            break;
9302        case 0x56: /* FCVTXN, FCVTXN2 */
9303            if (size == 2) {
9304                unallocated_encoding(s);
9305                return;
9306            }
9307            if (!fp_access_check(s)) {
9308                return;
9309            }
9310            handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
9311            return;
9312        default:
9313            unallocated_encoding(s);
9314            return;
9315        }
9316        break;
9317    default:
9318        unallocated_encoding(s);
9319        return;
9320    }
9321
9322    if (!fp_access_check(s)) {
9323        return;
9324    }
9325
9326    if (is_fcvt) {
9327        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
9328        tcg_fpstatus = get_fpstatus_ptr(false);
9329        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9330    } else {
9331        tcg_rmode = NULL;
9332        tcg_fpstatus = NULL;
9333    }
9334
9335    if (size == 3) {
9336        TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9337        TCGv_i64 tcg_rd = tcg_temp_new_i64();
9338
9339        handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
9340        write_fp_dreg(s, rd, tcg_rd);
9341        tcg_temp_free_i64(tcg_rd);
9342        tcg_temp_free_i64(tcg_rn);
9343    } else {
9344        TCGv_i32 tcg_rn = tcg_temp_new_i32();
9345        TCGv_i32 tcg_rd = tcg_temp_new_i32();
9346
9347        read_vec_element_i32(s, tcg_rn, rn, 0, size);
9348
9349        switch (opcode) {
9350        case 0x7: /* SQABS, SQNEG */
9351        {
9352            NeonGenOneOpEnvFn *genfn;
9353            static NeonGenOneOpEnvFn * const fns[3][2] = {
9354                { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
9355                { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
9356                { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
9357            };
9358            genfn = fns[size][u];
9359            genfn(tcg_rd, cpu_env, tcg_rn);
9360            break;
9361        }
9362        case 0x1a: /* FCVTNS */
9363        case 0x1b: /* FCVTMS */
9364        case 0x1c: /* FCVTAS */
9365        case 0x3a: /* FCVTPS */
9366        case 0x3b: /* FCVTZS */
9367        {
9368            TCGv_i32 tcg_shift = tcg_const_i32(0);
9369            gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9370            tcg_temp_free_i32(tcg_shift);
9371            break;
9372        }
9373        case 0x5a: /* FCVTNU */
9374        case 0x5b: /* FCVTMU */
9375        case 0x5c: /* FCVTAU */
9376        case 0x7a: /* FCVTPU */
9377        case 0x7b: /* FCVTZU */
9378        {
9379            TCGv_i32 tcg_shift = tcg_const_i32(0);
9380            gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9381            tcg_temp_free_i32(tcg_shift);
9382            break;
9383        }
9384        default:
9385            g_assert_not_reached();
9386        }
9387
9388        write_fp_sreg(s, rd, tcg_rd);
9389        tcg_temp_free_i32(tcg_rd);
9390        tcg_temp_free_i32(tcg_rn);
9391    }
9392
9393    if (is_fcvt) {
9394        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9395        tcg_temp_free_i32(tcg_rmode);
9396        tcg_temp_free_ptr(tcg_fpstatus);
9397    }
9398}
9399
9400static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9401{
9402    tcg_gen_vec_sar8i_i64(a, a, shift);
9403    tcg_gen_vec_add8_i64(d, d, a);
9404}
9405
9406static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9407{
9408    tcg_gen_vec_sar16i_i64(a, a, shift);
9409    tcg_gen_vec_add16_i64(d, d, a);
9410}
9411
9412static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9413{
9414    tcg_gen_sari_i32(a, a, shift);
9415    tcg_gen_add_i32(d, d, a);
9416}
9417
9418static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9419{
9420    tcg_gen_sari_i64(a, a, shift);
9421    tcg_gen_add_i64(d, d, a);
9422}
9423
9424static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9425{
9426    tcg_gen_sari_vec(vece, a, a, sh);
9427    tcg_gen_add_vec(vece, d, d, a);
9428}
9429
9430static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9431{
9432    tcg_gen_vec_shr8i_i64(a, a, shift);
9433    tcg_gen_vec_add8_i64(d, d, a);
9434}
9435
9436static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9437{
9438    tcg_gen_vec_shr16i_i64(a, a, shift);
9439    tcg_gen_vec_add16_i64(d, d, a);
9440}
9441
9442static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9443{
9444    tcg_gen_shri_i32(a, a, shift);
9445    tcg_gen_add_i32(d, d, a);
9446}
9447
9448static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9449{
9450    tcg_gen_shri_i64(a, a, shift);
9451    tcg_gen_add_i64(d, d, a);
9452}
9453
9454static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9455{
9456    tcg_gen_shri_vec(vece, a, a, sh);
9457    tcg_gen_add_vec(vece, d, d, a);
9458}
9459
9460static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9461{
9462    uint64_t mask = dup_const(MO_8, 0xff >> shift);
9463    TCGv_i64 t = tcg_temp_new_i64();
9464
9465    tcg_gen_shri_i64(t, a, shift);
9466    tcg_gen_andi_i64(t, t, mask);
9467    tcg_gen_andi_i64(d, d, ~mask);
9468    tcg_gen_or_i64(d, d, t);
9469    tcg_temp_free_i64(t);
9470}
9471
9472static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9473{
9474    uint64_t mask = dup_const(MO_16, 0xffff >> shift);
9475    TCGv_i64 t = tcg_temp_new_i64();
9476
9477    tcg_gen_shri_i64(t, a, shift);
9478    tcg_gen_andi_i64(t, t, mask);
9479    tcg_gen_andi_i64(d, d, ~mask);
9480    tcg_gen_or_i64(d, d, t);
9481    tcg_temp_free_i64(t);
9482}
9483
9484static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9485{
9486    tcg_gen_shri_i32(a, a, shift);
9487    tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
9488}
9489
9490static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9491{
9492    tcg_gen_shri_i64(a, a, shift);
9493    tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
9494}
9495
9496static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9497{
9498    uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
9499    TCGv_vec t = tcg_temp_new_vec_matching(d);
9500    TCGv_vec m = tcg_temp_new_vec_matching(d);
9501
9502    tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
9503    tcg_gen_shri_vec(vece, t, a, sh);
9504    tcg_gen_and_vec(vece, d, d, m);
9505    tcg_gen_or_vec(vece, d, d, t);
9506
9507    tcg_temp_free_vec(t);
9508    tcg_temp_free_vec(m);
9509}
9510
9511/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
9512static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
9513                                 int immh, int immb, int opcode, int rn, int rd)
9514{
9515    static const GVecGen2i ssra_op[4] = {
9516        { .fni8 = gen_ssra8_i64,
9517          .fniv = gen_ssra_vec,
9518          .load_dest = true,
9519          .opc = INDEX_op_sari_vec,
9520          .vece = MO_8 },
9521        { .fni8 = gen_ssra16_i64,
9522          .fniv = gen_ssra_vec,
9523          .load_dest = true,
9524          .opc = INDEX_op_sari_vec,
9525          .vece = MO_16 },
9526        { .fni4 = gen_ssra32_i32,
9527          .fniv = gen_ssra_vec,
9528          .load_dest = true,
9529          .opc = INDEX_op_sari_vec,
9530          .vece = MO_32 },
9531        { .fni8 = gen_ssra64_i64,
9532          .fniv = gen_ssra_vec,
9533          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9534          .load_dest = true,
9535          .opc = INDEX_op_sari_vec,
9536          .vece = MO_64 },
9537    };
9538    static const GVecGen2i usra_op[4] = {
9539        { .fni8 = gen_usra8_i64,
9540          .fniv = gen_usra_vec,
9541          .load_dest = true,
9542          .opc = INDEX_op_shri_vec,
9543          .vece = MO_8, },
9544        { .fni8 = gen_usra16_i64,
9545          .fniv = gen_usra_vec,
9546          .load_dest = true,
9547          .opc = INDEX_op_shri_vec,
9548          .vece = MO_16, },
9549        { .fni4 = gen_usra32_i32,
9550          .fniv = gen_usra_vec,
9551          .load_dest = true,
9552          .opc = INDEX_op_shri_vec,
9553          .vece = MO_32, },
9554        { .fni8 = gen_usra64_i64,
9555          .fniv = gen_usra_vec,
9556          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9557          .load_dest = true,
9558          .opc = INDEX_op_shri_vec,
9559          .vece = MO_64, },
9560    };
9561    static const GVecGen2i sri_op[4] = {
9562        { .fni8 = gen_shr8_ins_i64,
9563          .fniv = gen_shr_ins_vec,
9564          .load_dest = true,
9565          .opc = INDEX_op_shri_vec,
9566          .vece = MO_8 },
9567        { .fni8 = gen_shr16_ins_i64,
9568          .fniv = gen_shr_ins_vec,
9569          .load_dest = true,
9570          .opc = INDEX_op_shri_vec,
9571          .vece = MO_16 },
9572        { .fni4 = gen_shr32_ins_i32,
9573          .fniv = gen_shr_ins_vec,
9574          .load_dest = true,
9575          .opc = INDEX_op_shri_vec,
9576          .vece = MO_32 },
9577        { .fni8 = gen_shr64_ins_i64,
9578          .fniv = gen_shr_ins_vec,
9579          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9580          .load_dest = true,
9581          .opc = INDEX_op_shri_vec,
9582          .vece = MO_64 },
9583    };
9584
9585    int size = 32 - clz32(immh) - 1;
9586    int immhb = immh << 3 | immb;
9587    int shift = 2 * (8 << size) - immhb;
9588    bool accumulate = false;
9589    int dsize = is_q ? 128 : 64;
9590    int esize = 8 << size;
9591    int elements = dsize/esize;
9592    TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
9593    TCGv_i64 tcg_rn = new_tmp_a64(s);
9594    TCGv_i64 tcg_rd = new_tmp_a64(s);
9595    TCGv_i64 tcg_round;
9596    uint64_t round_const;
9597    int i;
9598
9599    if (extract32(immh, 3, 1) && !is_q) {
9600        unallocated_encoding(s);
9601        return;
9602    }
9603    tcg_debug_assert(size <= 3);
9604
9605    if (!fp_access_check(s)) {
9606        return;
9607    }
9608
9609    switch (opcode) {
9610    case 0x02: /* SSRA / USRA (accumulate) */
9611        if (is_u) {
9612            /* Shift count same as element size produces zero to add.  */
9613            if (shift == 8 << size) {
9614                goto done;
9615            }
9616            gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]);
9617        } else {
9618            /* Shift count same as element size produces all sign to add.  */
9619            if (shift == 8 << size) {
9620                shift -= 1;
9621            }
9622            gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]);
9623        }
9624        return;
9625    case 0x08: /* SRI */
9626        /* Shift count same as element size is valid but does nothing.  */
9627        if (shift == 8 << size) {
9628            goto done;
9629        }
9630        gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]);
9631        return;
9632
9633    case 0x00: /* SSHR / USHR */
9634        if (is_u) {
9635            if (shift == 8 << size) {
9636                /* Shift count the same size as element size produces zero.  */
9637                tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
9638                                   is_q ? 16 : 8, vec_full_reg_size(s), 0);
9639            } else {
9640                gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
9641            }
9642        } else {
9643            /* Shift count the same size as element size produces all sign.  */
9644            if (shift == 8 << size) {
9645                shift -= 1;
9646            }
9647            gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size);
9648        }
9649        return;
9650
9651    case 0x04: /* SRSHR / URSHR (rounding) */
9652        break;
9653    case 0x06: /* SRSRA / URSRA (accum + rounding) */
9654        accumulate = true;
9655        break;
9656    default:
9657        g_assert_not_reached();
9658    }
9659
9660    round_const = 1ULL << (shift - 1);
9661    tcg_round = tcg_const_i64(round_const);
9662
9663    for (i = 0; i < elements; i++) {
9664        read_vec_element(s, tcg_rn, rn, i, memop);
9665        if (accumulate) {
9666            read_vec_element(s, tcg_rd, rd, i, memop);
9667        }
9668
9669        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9670                                accumulate, is_u, size, shift);
9671
9672        write_vec_element(s, tcg_rd, rd, i, size);
9673    }
9674    tcg_temp_free_i64(tcg_round);
9675
9676 done:
9677    clear_vec_high(s, is_q, rd);
9678}
9679
9680static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9681{
9682    uint64_t mask = dup_const(MO_8, 0xff << shift);
9683    TCGv_i64 t = tcg_temp_new_i64();
9684
9685    tcg_gen_shli_i64(t, a, shift);
9686    tcg_gen_andi_i64(t, t, mask);
9687    tcg_gen_andi_i64(d, d, ~mask);
9688    tcg_gen_or_i64(d, d, t);
9689    tcg_temp_free_i64(t);
9690}
9691
9692static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9693{
9694    uint64_t mask = dup_const(MO_16, 0xffff << shift);
9695    TCGv_i64 t = tcg_temp_new_i64();
9696
9697    tcg_gen_shli_i64(t, a, shift);
9698    tcg_gen_andi_i64(t, t, mask);
9699    tcg_gen_andi_i64(d, d, ~mask);
9700    tcg_gen_or_i64(d, d, t);
9701    tcg_temp_free_i64(t);
9702}
9703
9704static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9705{
9706    tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
9707}
9708
9709static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9710{
9711    tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
9712}
9713
9714static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9715{
9716    uint64_t mask = (1ull << sh) - 1;
9717    TCGv_vec t = tcg_temp_new_vec_matching(d);
9718    TCGv_vec m = tcg_temp_new_vec_matching(d);
9719
9720    tcg_gen_dupi_vec(vece, m, mask);
9721    tcg_gen_shli_vec(vece, t, a, sh);
9722    tcg_gen_and_vec(vece, d, d, m);
9723    tcg_gen_or_vec(vece, d, d, t);
9724
9725    tcg_temp_free_vec(t);
9726    tcg_temp_free_vec(m);
9727}
9728
9729/* SHL/SLI - Vector shift left */
9730static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
9731                                 int immh, int immb, int opcode, int rn, int rd)
9732{
9733    static const GVecGen2i shi_op[4] = {
9734        { .fni8 = gen_shl8_ins_i64,
9735          .fniv = gen_shl_ins_vec,
9736          .opc = INDEX_op_shli_vec,
9737          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9738          .load_dest = true,
9739          .vece = MO_8 },
9740        { .fni8 = gen_shl16_ins_i64,
9741          .fniv = gen_shl_ins_vec,
9742          .opc = INDEX_op_shli_vec,
9743          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9744          .load_dest = true,
9745          .vece = MO_16 },
9746        { .fni4 = gen_shl32_ins_i32,
9747          .fniv = gen_shl_ins_vec,
9748          .opc = INDEX_op_shli_vec,
9749          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9750          .load_dest = true,
9751          .vece = MO_32 },
9752        { .fni8 = gen_shl64_ins_i64,
9753          .fniv = gen_shl_ins_vec,
9754          .opc = INDEX_op_shli_vec,
9755          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9756          .load_dest = true,
9757          .vece = MO_64 },
9758    };
9759    int size = 32 - clz32(immh) - 1;
9760    int immhb = immh << 3 | immb;
9761    int shift = immhb - (8 << size);
9762
9763    if (extract32(immh, 3, 1) && !is_q) {
9764        unallocated_encoding(s);
9765        return;
9766    }
9767
9768    if (size > 3 && !is_q) {
9769        unallocated_encoding(s);
9770        return;
9771    }
9772
9773    if (!fp_access_check(s)) {
9774        return;
9775    }
9776
9777    if (insert) {
9778        gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
9779    } else {
9780        gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
9781    }
9782}
9783
9784/* USHLL/SHLL - Vector shift left with widening */
9785static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
9786                                 int immh, int immb, int opcode, int rn, int rd)
9787{
9788    int size = 32 - clz32(immh) - 1;
9789    int immhb = immh << 3 | immb;
9790    int shift = immhb - (8 << size);
9791    int dsize = 64;
9792    int esize = 8 << size;
9793    int elements = dsize/esize;
9794    TCGv_i64 tcg_rn = new_tmp_a64(s);
9795    TCGv_i64 tcg_rd = new_tmp_a64(s);
9796    int i;
9797
9798    if (size >= 3) {
9799        unallocated_encoding(s);
9800        return;
9801    }
9802
9803    if (!fp_access_check(s)) {
9804        return;
9805    }
9806
9807    /* For the LL variants the store is larger than the load,
9808     * so if rd == rn we would overwrite parts of our input.
9809     * So load everything right now and use shifts in the main loop.
9810     */
9811    read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
9812
9813    for (i = 0; i < elements; i++) {
9814        tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
9815        ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
9816        tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
9817        write_vec_element(s, tcg_rd, rd, i, size + 1);
9818    }
9819}
9820
9821/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
9822static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
9823                                 int immh, int immb, int opcode, int rn, int rd)
9824{
9825    int immhb = immh << 3 | immb;
9826    int size = 32 - clz32(immh) - 1;
9827    int dsize = 64;
9828    int esize = 8 << size;
9829    int elements = dsize/esize;
9830    int shift = (2 * esize) - immhb;
9831    bool round = extract32(opcode, 0, 1);
9832    TCGv_i64 tcg_rn, tcg_rd, tcg_final;
9833    TCGv_i64 tcg_round;
9834    int i;
9835
9836    if (extract32(immh, 3, 1)) {
9837        unallocated_encoding(s);
9838        return;
9839    }
9840
9841    if (!fp_access_check(s)) {
9842        return;
9843    }
9844
9845    tcg_rn = tcg_temp_new_i64();
9846    tcg_rd = tcg_temp_new_i64();
9847    tcg_final = tcg_temp_new_i64();
9848    read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
9849
9850    if (round) {
9851        uint64_t round_const = 1ULL << (shift - 1);
9852        tcg_round = tcg_const_i64(round_const);
9853    } else {
9854        tcg_round = NULL;
9855    }
9856
9857    for (i = 0; i < elements; i++) {
9858        read_vec_element(s, tcg_rn, rn, i, size+1);
9859        handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9860                                false, true, size+1, shift);
9861
9862        tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
9863    }
9864
9865    if (!is_q) {
9866        write_vec_element(s, tcg_final, rd, 0, MO_64);
9867    } else {
9868        write_vec_element(s, tcg_final, rd, 1, MO_64);
9869    }
9870    if (round) {
9871        tcg_temp_free_i64(tcg_round);
9872    }
9873    tcg_temp_free_i64(tcg_rn);
9874    tcg_temp_free_i64(tcg_rd);
9875    tcg_temp_free_i64(tcg_final);
9876
9877    clear_vec_high(s, is_q, rd);
9878}
9879
9880
9881/* AdvSIMD shift by immediate
9882 *  31  30   29 28         23 22  19 18  16 15    11  10 9    5 4    0
9883 * +---+---+---+-------------+------+------+--------+---+------+------+
9884 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
9885 * +---+---+---+-------------+------+------+--------+---+------+------+
9886 */
9887static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
9888{
9889    int rd = extract32(insn, 0, 5);
9890    int rn = extract32(insn, 5, 5);
9891    int opcode = extract32(insn, 11, 5);
9892    int immb = extract32(insn, 16, 3);
9893    int immh = extract32(insn, 19, 4);
9894    bool is_u = extract32(insn, 29, 1);
9895    bool is_q = extract32(insn, 30, 1);
9896
9897    switch (opcode) {
9898    case 0x08: /* SRI */
9899        if (!is_u) {
9900            unallocated_encoding(s);
9901            return;
9902        }
9903        /* fall through */
9904    case 0x00: /* SSHR / USHR */
9905    case 0x02: /* SSRA / USRA (accumulate) */
9906    case 0x04: /* SRSHR / URSHR (rounding) */
9907    case 0x06: /* SRSRA / URSRA (accum + rounding) */
9908        handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
9909        break;
9910    case 0x0a: /* SHL / SLI */
9911        handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9912        break;
9913    case 0x10: /* SHRN */
9914    case 0x11: /* RSHRN / SQRSHRUN */
9915        if (is_u) {
9916            handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
9917                                   opcode, rn, rd);
9918        } else {
9919            handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
9920        }
9921        break;
9922    case 0x12: /* SQSHRN / UQSHRN */
9923    case 0x13: /* SQRSHRN / UQRSHRN */
9924        handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
9925                               opcode, rn, rd);
9926        break;
9927    case 0x14: /* SSHLL / USHLL */
9928        handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9929        break;
9930    case 0x1c: /* SCVTF / UCVTF */
9931        handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
9932                                     opcode, rn, rd);
9933        break;
9934    case 0xc: /* SQSHLU */
9935        if (!is_u) {
9936            unallocated_encoding(s);
9937            return;
9938        }
9939        handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
9940        break;
9941    case 0xe: /* SQSHL, UQSHL */
9942        handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
9943        break;
9944    case 0x1f: /* FCVTZS/ FCVTZU */
9945        handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
9946        return;
9947    default:
9948        unallocated_encoding(s);
9949        return;
9950    }
9951}
9952
9953/* Generate code to do a "long" addition or subtraction, ie one done in
9954 * TCGv_i64 on vector lanes twice the width specified by size.
9955 */
9956static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
9957                          TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
9958{
9959    static NeonGenTwo64OpFn * const fns[3][2] = {
9960        { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
9961        { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
9962        { tcg_gen_add_i64, tcg_gen_sub_i64 },
9963    };
9964    NeonGenTwo64OpFn *genfn;
9965    assert(size < 3);
9966
9967    genfn = fns[size][is_sub];
9968    genfn(tcg_res, tcg_op1, tcg_op2);
9969}
9970
9971static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
9972                                int opcode, int rd, int rn, int rm)
9973{
9974    /* 3-reg-different widening insns: 64 x 64 -> 128 */
9975    TCGv_i64 tcg_res[2];
9976    int pass, accop;
9977
9978    tcg_res[0] = tcg_temp_new_i64();
9979    tcg_res[1] = tcg_temp_new_i64();
9980
9981    /* Does this op do an adding accumulate, a subtracting accumulate,
9982     * or no accumulate at all?
9983     */
9984    switch (opcode) {
9985    case 5:
9986    case 8:
9987    case 9:
9988        accop = 1;
9989        break;
9990    case 10:
9991    case 11:
9992        accop = -1;
9993        break;
9994    default:
9995        accop = 0;
9996        break;
9997    }
9998
9999    if (accop != 0) {
10000        read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10001        read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10002    }
10003
10004    /* size == 2 means two 32x32->64 operations; this is worth special
10005     * casing because we can generally handle it inline.
10006     */
10007    if (size == 2) {
10008        for (pass = 0; pass < 2; pass++) {
10009            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10010            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10011            TCGv_i64 tcg_passres;
10012            TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10013
10014            int elt = pass + is_q * 2;
10015
10016            read_vec_element(s, tcg_op1, rn, elt, memop);
10017            read_vec_element(s, tcg_op2, rm, elt, memop);
10018
10019            if (accop == 0) {
10020                tcg_passres = tcg_res[pass];
10021            } else {
10022                tcg_passres = tcg_temp_new_i64();
10023            }
10024
10025            switch (opcode) {
10026            case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10027                tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10028                break;
10029            case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10030                tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10031                break;
10032            case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10033            case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10034            {
10035                TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10036                TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10037
10038                tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10039                tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10040                tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10041                                    tcg_passres,
10042                                    tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10043                tcg_temp_free_i64(tcg_tmp1);
10044                tcg_temp_free_i64(tcg_tmp2);
10045                break;
10046            }
10047            case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10048            case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10049            case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10050                tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10051                break;
10052            case 9: /* SQDMLAL, SQDMLAL2 */
10053            case 11: /* SQDMLSL, SQDMLSL2 */
10054            case 13: /* SQDMULL, SQDMULL2 */
10055                tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10056                gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10057                                                  tcg_passres, tcg_passres);
10058                break;
10059            default:
10060                g_assert_not_reached();
10061            }
10062
10063            if (opcode == 9 || opcode == 11) {
10064                /* saturating accumulate ops */
10065                if (accop < 0) {
10066                    tcg_gen_neg_i64(tcg_passres, tcg_passres);
10067                }
10068                gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10069                                                  tcg_res[pass], tcg_passres);
10070            } else if (accop > 0) {
10071                tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10072            } else if (accop < 0) {
10073                tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10074            }
10075
10076            if (accop != 0) {
10077                tcg_temp_free_i64(tcg_passres);
10078            }
10079
10080            tcg_temp_free_i64(tcg_op1);
10081            tcg_temp_free_i64(tcg_op2);
10082        }
10083    } else {
10084        /* size 0 or 1, generally helper functions */
10085        for (pass = 0; pass < 2; pass++) {
10086            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10087            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10088            TCGv_i64 tcg_passres;
10089            int elt = pass + is_q * 2;
10090
10091            read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10092            read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10093
10094            if (accop == 0) {
10095                tcg_passres = tcg_res[pass];
10096            } else {
10097                tcg_passres = tcg_temp_new_i64();
10098            }
10099
10100            switch (opcode) {
10101            case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10102            case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10103            {
10104                TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10105                static NeonGenWidenFn * const widenfns[2][2] = {
10106                    { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10107                    { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10108                };
10109                NeonGenWidenFn *widenfn = widenfns[size][is_u];
10110
10111                widenfn(tcg_op2_64, tcg_op2);
10112                widenfn(tcg_passres, tcg_op1);
10113                gen_neon_addl(size, (opcode == 2), tcg_passres,
10114                              tcg_passres, tcg_op2_64);
10115                tcg_temp_free_i64(tcg_op2_64);
10116                break;
10117            }
10118            case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10119            case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10120                if (size == 0) {
10121                    if (is_u) {
10122                        gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10123                    } else {
10124                        gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10125                    }
10126                } else {
10127                    if (is_u) {
10128                        gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10129                    } else {
10130                        gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10131                    }
10132                }
10133                break;
10134            case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10135            case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10136            case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10137                if (size == 0) {
10138                    if (is_u) {
10139                        gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10140                    } else {
10141                        gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10142                    }
10143                } else {
10144                    if (is_u) {
10145                        gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10146                    } else {
10147                        gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10148                    }
10149                }
10150                break;
10151            case 9: /* SQDMLAL, SQDMLAL2 */
10152            case 11: /* SQDMLSL, SQDMLSL2 */
10153            case 13: /* SQDMULL, SQDMULL2 */
10154                assert(size == 1);
10155                gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10156                gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10157                                                  tcg_passres, tcg_passres);
10158                break;
10159            case 14: /* PMULL */
10160                assert(size == 0);
10161                gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
10162                break;
10163            default:
10164                g_assert_not_reached();
10165            }
10166            tcg_temp_free_i32(tcg_op1);
10167            tcg_temp_free_i32(tcg_op2);
10168
10169            if (accop != 0) {
10170                if (opcode == 9 || opcode == 11) {
10171                    /* saturating accumulate ops */
10172                    if (accop < 0) {
10173                        gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10174                    }
10175                    gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10176                                                      tcg_res[pass],
10177                                                      tcg_passres);
10178                } else {
10179                    gen_neon_addl(size, (accop < 0), tcg_res[pass],
10180                                  tcg_res[pass], tcg_passres);
10181                }
10182                tcg_temp_free_i64(tcg_passres);
10183            }
10184        }
10185    }
10186
10187    write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10188    write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10189    tcg_temp_free_i64(tcg_res[0]);
10190    tcg_temp_free_i64(tcg_res[1]);
10191}
10192
10193static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10194                            int opcode, int rd, int rn, int rm)
10195{
10196    TCGv_i64 tcg_res[2];
10197    int part = is_q ? 2 : 0;
10198    int pass;
10199
10200    for (pass = 0; pass < 2; pass++) {
10201        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10202        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10203        TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10204        static NeonGenWidenFn * const widenfns[3][2] = {
10205            { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10206            { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10207            { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10208        };
10209        NeonGenWidenFn *widenfn = widenfns[size][is_u];
10210
10211        read_vec_element(s, tcg_op1, rn, pass, MO_64);
10212        read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10213        widenfn(tcg_op2_wide, tcg_op2);
10214        tcg_temp_free_i32(tcg_op2);
10215        tcg_res[pass] = tcg_temp_new_i64();
10216        gen_neon_addl(size, (opcode == 3),
10217                      tcg_res[pass], tcg_op1, tcg_op2_wide);
10218        tcg_temp_free_i64(tcg_op1);
10219        tcg_temp_free_i64(tcg_op2_wide);
10220    }
10221
10222    for (pass = 0; pass < 2; pass++) {
10223        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10224        tcg_temp_free_i64(tcg_res[pass]);
10225    }
10226}
10227
10228static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10229{
10230    tcg_gen_addi_i64(in, in, 1U << 31);
10231    tcg_gen_extrh_i64_i32(res, in);
10232}
10233
10234static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10235                                 int opcode, int rd, int rn, int rm)
10236{
10237    TCGv_i32 tcg_res[2];
10238    int part = is_q ? 2 : 0;
10239    int pass;
10240
10241    for (pass = 0; pass < 2; pass++) {
10242        TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10243        TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10244        TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10245        static NeonGenNarrowFn * const narrowfns[3][2] = {
10246            { gen_helper_neon_narrow_high_u8,
10247              gen_helper_neon_narrow_round_high_u8 },
10248            { gen_helper_neon_narrow_high_u16,
10249              gen_helper_neon_narrow_round_high_u16 },
10250            { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10251        };
10252        NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10253
10254        read_vec_element(s, tcg_op1, rn, pass, MO_64);
10255        read_vec_element(s, tcg_op2, rm, pass, MO_64);
10256
10257        gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10258
10259        tcg_temp_free_i64(tcg_op1);
10260        tcg_temp_free_i64(tcg_op2);
10261
10262        tcg_res[pass] = tcg_temp_new_i32();
10263        gennarrow(tcg_res[pass], tcg_wideres);
10264        tcg_temp_free_i64(tcg_wideres);
10265    }
10266
10267    for (pass = 0; pass < 2; pass++) {
10268        write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10269        tcg_temp_free_i32(tcg_res[pass]);
10270    }
10271    clear_vec_high(s, is_q, rd);
10272}
10273
10274static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
10275{
10276    /* PMULL of 64 x 64 -> 128 is an odd special case because it
10277     * is the only three-reg-diff instruction which produces a
10278     * 128-bit wide result from a single operation. However since
10279     * it's possible to calculate the two halves more or less
10280     * separately we just use two helper calls.
10281     */
10282    TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10283    TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10284    TCGv_i64 tcg_res = tcg_temp_new_i64();
10285
10286    read_vec_element(s, tcg_op1, rn, is_q, MO_64);
10287    read_vec_element(s, tcg_op2, rm, is_q, MO_64);
10288    gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
10289    write_vec_element(s, tcg_res, rd, 0, MO_64);
10290    gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
10291    write_vec_element(s, tcg_res, rd, 1, MO_64);
10292
10293    tcg_temp_free_i64(tcg_op1);
10294    tcg_temp_free_i64(tcg_op2);
10295    tcg_temp_free_i64(tcg_res);
10296}
10297
10298/* AdvSIMD three different
10299 *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
10300 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10301 * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
10302 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10303 */
10304static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10305{
10306    /* Instructions in this group fall into three basic classes
10307     * (in each case with the operation working on each element in
10308     * the input vectors):
10309     * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10310     *     128 bit input)
10311     * (2) wide 64 x 128 -> 128
10312     * (3) narrowing 128 x 128 -> 64
10313     * Here we do initial decode, catch unallocated cases and
10314     * dispatch to separate functions for each class.
10315     */
10316    int is_q = extract32(insn, 30, 1);
10317    int is_u = extract32(insn, 29, 1);
10318    int size = extract32(insn, 22, 2);
10319    int opcode = extract32(insn, 12, 4);
10320    int rm = extract32(insn, 16, 5);
10321    int rn = extract32(insn, 5, 5);
10322    int rd = extract32(insn, 0, 5);
10323
10324    switch (opcode) {
10325    case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10326    case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10327        /* 64 x 128 -> 128 */
10328        if (size == 3) {
10329            unallocated_encoding(s);
10330            return;
10331        }
10332        if (!fp_access_check(s)) {
10333            return;
10334        }
10335        handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10336        break;
10337    case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10338    case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10339        /* 128 x 128 -> 64 */
10340        if (size == 3) {
10341            unallocated_encoding(s);
10342            return;
10343        }
10344        if (!fp_access_check(s)) {
10345            return;
10346        }
10347        handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10348        break;
10349    case 14: /* PMULL, PMULL2 */
10350        if (is_u || size == 1 || size == 2) {
10351            unallocated_encoding(s);
10352            return;
10353        }
10354        if (size == 3) {
10355            if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
10356                unallocated_encoding(s);
10357                return;
10358            }
10359            if (!fp_access_check(s)) {
10360                return;
10361            }
10362            handle_pmull_64(s, is_q, rd, rn, rm);
10363            return;
10364        }
10365        goto is_widening;
10366    case 9: /* SQDMLAL, SQDMLAL2 */
10367    case 11: /* SQDMLSL, SQDMLSL2 */
10368    case 13: /* SQDMULL, SQDMULL2 */
10369        if (is_u || size == 0) {
10370            unallocated_encoding(s);
10371            return;
10372        }
10373        /* fall through */
10374    case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10375    case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10376    case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10377    case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10378    case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10379    case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10380    case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10381        /* 64 x 64 -> 128 */
10382        if (size == 3) {
10383            unallocated_encoding(s);
10384            return;
10385        }
10386    is_widening:
10387        if (!fp_access_check(s)) {
10388            return;
10389        }
10390
10391        handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10392        break;
10393    default:
10394        /* opcode 15 not allocated */
10395        unallocated_encoding(s);
10396        break;
10397    }
10398}
10399
10400static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10401{
10402    tcg_gen_xor_i64(rn, rn, rm);
10403    tcg_gen_and_i64(rn, rn, rd);
10404    tcg_gen_xor_i64(rd, rm, rn);
10405}
10406
10407static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10408{
10409    tcg_gen_xor_i64(rn, rn, rd);
10410    tcg_gen_and_i64(rn, rn, rm);
10411    tcg_gen_xor_i64(rd, rd, rn);
10412}
10413
10414static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10415{
10416    tcg_gen_xor_i64(rn, rn, rd);
10417    tcg_gen_andc_i64(rn, rn, rm);
10418    tcg_gen_xor_i64(rd, rd, rn);
10419}
10420
10421static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10422{
10423    tcg_gen_xor_vec(vece, rn, rn, rm);
10424    tcg_gen_and_vec(vece, rn, rn, rd);
10425    tcg_gen_xor_vec(vece, rd, rm, rn);
10426}
10427
10428static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10429{
10430    tcg_gen_xor_vec(vece, rn, rn, rd);
10431    tcg_gen_and_vec(vece, rn, rn, rm);
10432    tcg_gen_xor_vec(vece, rd, rd, rn);
10433}
10434
10435static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10436{
10437    tcg_gen_xor_vec(vece, rn, rn, rd);
10438    tcg_gen_andc_vec(vece, rn, rn, rm);
10439    tcg_gen_xor_vec(vece, rd, rd, rn);
10440}
10441
10442/* Logic op (opcode == 3) subgroup of C3.6.16. */
10443static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10444{
10445    static const GVecGen3 bsl_op = {
10446        .fni8 = gen_bsl_i64,
10447        .fniv = gen_bsl_vec,
10448        .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10449        .load_dest = true
10450    };
10451    static const GVecGen3 bit_op = {
10452        .fni8 = gen_bit_i64,
10453        .fniv = gen_bit_vec,
10454        .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10455        .load_dest = true
10456    };
10457    static const GVecGen3 bif_op = {
10458        .fni8 = gen_bif_i64,
10459        .fniv = gen_bif_vec,
10460        .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10461        .load_dest = true
10462    };
10463
10464    int rd = extract32(insn, 0, 5);
10465    int rn = extract32(insn, 5, 5);
10466    int rm = extract32(insn, 16, 5);
10467    int size = extract32(insn, 22, 2);
10468    bool is_u = extract32(insn, 29, 1);
10469    bool is_q = extract32(insn, 30, 1);
10470
10471    if (!fp_access_check(s)) {
10472        return;
10473    }
10474
10475    switch (size + 4 * is_u) {
10476    case 0: /* AND */
10477        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10478        return;
10479    case 1: /* BIC */
10480        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10481        return;
10482    case 2: /* ORR */
10483        if (rn == rm) { /* MOV */
10484            gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_mov, 0);
10485        } else {
10486            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10487        }
10488        return;
10489    case 3: /* ORN */
10490        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10491        return;
10492    case 4: /* EOR */
10493        gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10494        return;
10495
10496    case 5: /* BSL bitwise select */
10497        gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
10498        return;
10499    case 6: /* BIT, bitwise insert if true */
10500        gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
10501        return;
10502    case 7: /* BIF, bitwise insert if false */
10503        gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
10504        return;
10505
10506    default:
10507        g_assert_not_reached();
10508    }
10509}
10510
10511/* Pairwise op subgroup of C3.6.16.
10512 *
10513 * This is called directly or via the handle_3same_float for float pairwise
10514 * operations where the opcode and size are calculated differently.
10515 */
10516static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10517                                   int size, int rn, int rm, int rd)
10518{
10519    TCGv_ptr fpst;
10520    int pass;
10521
10522    /* Floating point operations need fpst */
10523    if (opcode >= 0x58) {
10524        fpst = get_fpstatus_ptr(false);
10525    } else {
10526        fpst = NULL;
10527    }
10528
10529    if (!fp_access_check(s)) {
10530        return;
10531    }
10532
10533    /* These operations work on the concatenated rm:rn, with each pair of
10534     * adjacent elements being operated on to produce an element in the result.
10535     */
10536    if (size == 3) {
10537        TCGv_i64 tcg_res[2];
10538
10539        for (pass = 0; pass < 2; pass++) {
10540            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10541            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10542            int passreg = (pass == 0) ? rn : rm;
10543
10544            read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10545            read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10546            tcg_res[pass] = tcg_temp_new_i64();
10547
10548            switch (opcode) {
10549            case 0x17: /* ADDP */
10550                tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10551                break;
10552            case 0x58: /* FMAXNMP */
10553                gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10554                break;
10555            case 0x5a: /* FADDP */
10556                gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10557                break;
10558            case 0x5e: /* FMAXP */
10559                gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10560                break;
10561            case 0x78: /* FMINNMP */
10562                gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10563                break;
10564            case 0x7e: /* FMINP */
10565                gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10566                break;
10567            default:
10568                g_assert_not_reached();
10569            }
10570
10571            tcg_temp_free_i64(tcg_op1);
10572            tcg_temp_free_i64(tcg_op2);
10573        }
10574
10575        for (pass = 0; pass < 2; pass++) {
10576            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10577            tcg_temp_free_i64(tcg_res[pass]);
10578        }
10579    } else {
10580        int maxpass = is_q ? 4 : 2;
10581        TCGv_i32 tcg_res[4];
10582
10583        for (pass = 0; pass < maxpass; pass++) {
10584            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10585            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10586            NeonGenTwoOpFn *genfn = NULL;
10587            int passreg = pass < (maxpass / 2) ? rn : rm;
10588            int passelt = (is_q && (pass & 1)) ? 2 : 0;
10589
10590            read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10591            read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10592            tcg_res[pass] = tcg_temp_new_i32();
10593
10594            switch (opcode) {
10595            case 0x17: /* ADDP */
10596            {
10597                static NeonGenTwoOpFn * const fns[3] = {
10598                    gen_helper_neon_padd_u8,
10599                    gen_helper_neon_padd_u16,
10600                    tcg_gen_add_i32,
10601                };
10602                genfn = fns[size];
10603                break;
10604            }
10605            case 0x14: /* SMAXP, UMAXP */
10606            {
10607                static NeonGenTwoOpFn * const fns[3][2] = {
10608                    { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10609                    { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10610                    { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10611                };
10612                genfn = fns[size][u];
10613                break;
10614            }
10615            case 0x15: /* SMINP, UMINP */
10616            {
10617                static NeonGenTwoOpFn * const fns[3][2] = {
10618                    { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
10619                    { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
10620                    { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10621                };
10622                genfn = fns[size][u];
10623                break;
10624            }
10625            /* The FP operations are all on single floats (32 bit) */
10626            case 0x58: /* FMAXNMP */
10627                gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10628                break;
10629            case 0x5a: /* FADDP */
10630                gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10631                break;
10632            case 0x5e: /* FMAXP */
10633                gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10634                break;
10635            case 0x78: /* FMINNMP */
10636                gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10637                break;
10638            case 0x7e: /* FMINP */
10639                gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10640                break;
10641            default:
10642                g_assert_not_reached();
10643            }
10644
10645            /* FP ops called directly, otherwise call now */
10646            if (genfn) {
10647                genfn(tcg_res[pass], tcg_op1, tcg_op2);
10648            }
10649
10650            tcg_temp_free_i32(tcg_op1);
10651            tcg_temp_free_i32(tcg_op2);
10652        }
10653
10654        for (pass = 0; pass < maxpass; pass++) {
10655            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10656            tcg_temp_free_i32(tcg_res[pass]);
10657        }
10658        clear_vec_high(s, is_q, rd);
10659    }
10660
10661    if (fpst) {
10662        tcg_temp_free_ptr(fpst);
10663    }
10664}
10665
10666/* Floating point op subgroup of C3.6.16. */
10667static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
10668{
10669    /* For floating point ops, the U, size[1] and opcode bits
10670     * together indicate the operation. size[0] indicates single
10671     * or double.
10672     */
10673    int fpopcode = extract32(insn, 11, 5)
10674        | (extract32(insn, 23, 1) << 5)
10675        | (extract32(insn, 29, 1) << 6);
10676    int is_q = extract32(insn, 30, 1);
10677    int size = extract32(insn, 22, 1);
10678    int rm = extract32(insn, 16, 5);
10679    int rn = extract32(insn, 5, 5);
10680    int rd = extract32(insn, 0, 5);
10681
10682    int datasize = is_q ? 128 : 64;
10683    int esize = 32 << size;
10684    int elements = datasize / esize;
10685
10686    if (size == 1 && !is_q) {
10687        unallocated_encoding(s);
10688        return;
10689    }
10690
10691    switch (fpopcode) {
10692    case 0x58: /* FMAXNMP */
10693    case 0x5a: /* FADDP */
10694    case 0x5e: /* FMAXP */
10695    case 0x78: /* FMINNMP */
10696    case 0x7e: /* FMINP */
10697        if (size && !is_q) {
10698            unallocated_encoding(s);
10699            return;
10700        }
10701        handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10702                               rn, rm, rd);
10703        return;
10704    case 0x1b: /* FMULX */
10705    case 0x1f: /* FRECPS */
10706    case 0x3f: /* FRSQRTS */
10707    case 0x5d: /* FACGE */
10708    case 0x7d: /* FACGT */
10709    case 0x19: /* FMLA */
10710    case 0x39: /* FMLS */
10711    case 0x18: /* FMAXNM */
10712    case 0x1a: /* FADD */
10713    case 0x1c: /* FCMEQ */
10714    case 0x1e: /* FMAX */
10715    case 0x38: /* FMINNM */
10716    case 0x3a: /* FSUB */
10717    case 0x3e: /* FMIN */
10718    case 0x5b: /* FMUL */
10719    case 0x5c: /* FCMGE */
10720    case 0x5f: /* FDIV */
10721    case 0x7a: /* FABD */
10722    case 0x7c: /* FCMGT */
10723        if (!fp_access_check(s)) {
10724            return;
10725        }
10726
10727        handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
10728        return;
10729    default:
10730        unallocated_encoding(s);
10731        return;
10732    }
10733}
10734
10735static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10736{
10737    gen_helper_neon_mul_u8(a, a, b);
10738    gen_helper_neon_add_u8(d, d, a);
10739}
10740
10741static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10742{
10743    gen_helper_neon_mul_u16(a, a, b);
10744    gen_helper_neon_add_u16(d, d, a);
10745}
10746
10747static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10748{
10749    tcg_gen_mul_i32(a, a, b);
10750    tcg_gen_add_i32(d, d, a);
10751}
10752
10753static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10754{
10755    tcg_gen_mul_i64(a, a, b);
10756    tcg_gen_add_i64(d, d, a);
10757}
10758
10759static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10760{
10761    tcg_gen_mul_vec(vece, a, a, b);
10762    tcg_gen_add_vec(vece, d, d, a);
10763}
10764
10765static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10766{
10767    gen_helper_neon_mul_u8(a, a, b);
10768    gen_helper_neon_sub_u8(d, d, a);
10769}
10770
10771static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10772{
10773    gen_helper_neon_mul_u16(a, a, b);
10774    gen_helper_neon_sub_u16(d, d, a);
10775}
10776
10777static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10778{
10779    tcg_gen_mul_i32(a, a, b);
10780    tcg_gen_sub_i32(d, d, a);
10781}
10782
10783static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10784{
10785    tcg_gen_mul_i64(a, a, b);
10786    tcg_gen_sub_i64(d, d, a);
10787}
10788
10789static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10790{
10791    tcg_gen_mul_vec(vece, a, a, b);
10792    tcg_gen_sub_vec(vece, d, d, a);
10793}
10794
10795/* Integer op subgroup of C3.6.16. */
10796static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
10797{
10798    static const GVecGen3 cmtst_op[4] = {
10799        { .fni4 = gen_helper_neon_tst_u8,
10800          .fniv = gen_cmtst_vec,
10801          .vece = MO_8 },
10802        { .fni4 = gen_helper_neon_tst_u16,
10803          .fniv = gen_cmtst_vec,
10804          .vece = MO_16 },
10805        { .fni4 = gen_cmtst_i32,
10806          .fniv = gen_cmtst_vec,
10807          .vece = MO_32 },
10808        { .fni8 = gen_cmtst_i64,
10809          .fniv = gen_cmtst_vec,
10810          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10811          .vece = MO_64 },
10812    };
10813    static const GVecGen3 mla_op[4] = {
10814        { .fni4 = gen_mla8_i32,
10815          .fniv = gen_mla_vec,
10816          .opc = INDEX_op_mul_vec,
10817          .load_dest = true,
10818          .vece = MO_8 },
10819        { .fni4 = gen_mla16_i32,
10820          .fniv = gen_mla_vec,
10821          .opc = INDEX_op_mul_vec,
10822          .load_dest = true,
10823          .vece = MO_16 },
10824        { .fni4 = gen_mla32_i32,
10825          .fniv = gen_mla_vec,
10826          .opc = INDEX_op_mul_vec,
10827          .load_dest = true,
10828          .vece = MO_32 },
10829        { .fni8 = gen_mla64_i64,
10830          .fniv = gen_mla_vec,
10831          .opc = INDEX_op_mul_vec,
10832          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10833          .load_dest = true,
10834          .vece = MO_64 },
10835    };
10836    static const GVecGen3 mls_op[4] = {
10837        { .fni4 = gen_mls8_i32,
10838          .fniv = gen_mls_vec,
10839          .opc = INDEX_op_mul_vec,
10840          .load_dest = true,
10841          .vece = MO_8 },
10842        { .fni4 = gen_mls16_i32,
10843          .fniv = gen_mls_vec,
10844          .opc = INDEX_op_mul_vec,
10845          .load_dest = true,
10846          .vece = MO_16 },
10847        { .fni4 = gen_mls32_i32,
10848          .fniv = gen_mls_vec,
10849          .opc = INDEX_op_mul_vec,
10850          .load_dest = true,
10851          .vece = MO_32 },
10852        { .fni8 = gen_mls64_i64,
10853          .fniv = gen_mls_vec,
10854          .opc = INDEX_op_mul_vec,
10855          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10856          .load_dest = true,
10857          .vece = MO_64 },
10858    };
10859
10860    int is_q = extract32(insn, 30, 1);
10861    int u = extract32(insn, 29, 1);
10862    int size = extract32(insn, 22, 2);
10863    int opcode = extract32(insn, 11, 5);
10864    int rm = extract32(insn, 16, 5);
10865    int rn = extract32(insn, 5, 5);
10866    int rd = extract32(insn, 0, 5);
10867    int pass;
10868    TCGCond cond;
10869
10870    switch (opcode) {
10871    case 0x13: /* MUL, PMUL */
10872        if (u && size != 0) {
10873            unallocated_encoding(s);
10874            return;
10875        }
10876        /* fall through */
10877    case 0x0: /* SHADD, UHADD */
10878    case 0x2: /* SRHADD, URHADD */
10879    case 0x4: /* SHSUB, UHSUB */
10880    case 0xc: /* SMAX, UMAX */
10881    case 0xd: /* SMIN, UMIN */
10882    case 0xe: /* SABD, UABD */
10883    case 0xf: /* SABA, UABA */
10884    case 0x12: /* MLA, MLS */
10885        if (size == 3) {
10886            unallocated_encoding(s);
10887            return;
10888        }
10889        break;
10890    case 0x16: /* SQDMULH, SQRDMULH */
10891        if (size == 0 || size == 3) {
10892            unallocated_encoding(s);
10893            return;
10894        }
10895        break;
10896    default:
10897        if (size == 3 && !is_q) {
10898            unallocated_encoding(s);
10899            return;
10900        }
10901        break;
10902    }
10903
10904    if (!fp_access_check(s)) {
10905        return;
10906    }
10907
10908    switch (opcode) {
10909    case 0x10: /* ADD, SUB */
10910        if (u) {
10911            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
10912        } else {
10913            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
10914        }
10915        return;
10916    case 0x13: /* MUL, PMUL */
10917        if (!u) { /* MUL */
10918            gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
10919            return;
10920        }
10921        break;
10922    case 0x12: /* MLA, MLS */
10923        if (u) {
10924            gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]);
10925        } else {
10926            gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]);
10927        }
10928        return;
10929    case 0x11:
10930        if (!u) { /* CMTST */
10931            gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]);
10932            return;
10933        }
10934        /* else CMEQ */
10935        cond = TCG_COND_EQ;
10936        goto do_gvec_cmp;
10937    case 0x06: /* CMGT, CMHI */
10938        cond = u ? TCG_COND_GTU : TCG_COND_GT;
10939        goto do_gvec_cmp;
10940    case 0x07: /* CMGE, CMHS */
10941        cond = u ? TCG_COND_GEU : TCG_COND_GE;
10942    do_gvec_cmp:
10943        tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
10944                         vec_full_reg_offset(s, rn),
10945                         vec_full_reg_offset(s, rm),
10946                         is_q ? 16 : 8, vec_full_reg_size(s));
10947        return;
10948    }
10949
10950    if (size == 3) {
10951        assert(is_q);
10952        for (pass = 0; pass < 2; pass++) {
10953            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10954            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10955            TCGv_i64 tcg_res = tcg_temp_new_i64();
10956
10957            read_vec_element(s, tcg_op1, rn, pass, MO_64);
10958            read_vec_element(s, tcg_op2, rm, pass, MO_64);
10959
10960            handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
10961
10962            write_vec_element(s, tcg_res, rd, pass, MO_64);
10963
10964            tcg_temp_free_i64(tcg_res);
10965            tcg_temp_free_i64(tcg_op1);
10966            tcg_temp_free_i64(tcg_op2);
10967        }
10968    } else {
10969        for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10970            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10971            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10972            TCGv_i32 tcg_res = tcg_temp_new_i32();
10973            NeonGenTwoOpFn *genfn = NULL;
10974            NeonGenTwoOpEnvFn *genenvfn = NULL;
10975
10976            read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
10977            read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
10978
10979            switch (opcode) {
10980            case 0x0: /* SHADD, UHADD */
10981            {
10982                static NeonGenTwoOpFn * const fns[3][2] = {
10983                    { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
10984                    { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
10985                    { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
10986                };
10987                genfn = fns[size][u];
10988                break;
10989            }
10990            case 0x1: /* SQADD, UQADD */
10991            {
10992                static NeonGenTwoOpEnvFn * const fns[3][2] = {
10993                    { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
10994                    { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
10995                    { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
10996                };
10997                genenvfn = fns[size][u];
10998                break;
10999            }
11000            case 0x2: /* SRHADD, URHADD */
11001            {
11002                static NeonGenTwoOpFn * const fns[3][2] = {
11003                    { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11004                    { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11005                    { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11006                };
11007                genfn = fns[size][u];
11008                break;
11009            }
11010            case 0x4: /* SHSUB, UHSUB */
11011            {
11012                static NeonGenTwoOpFn * const fns[3][2] = {
11013                    { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11014                    { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11015                    { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11016                };
11017                genfn = fns[size][u];
11018                break;
11019            }
11020            case 0x5: /* SQSUB, UQSUB */
11021            {
11022                static NeonGenTwoOpEnvFn * const fns[3][2] = {
11023                    { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
11024                    { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
11025                    { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
11026                };
11027                genenvfn = fns[size][u];
11028                break;
11029            }
11030            case 0x8: /* SSHL, USHL */
11031            {
11032                static NeonGenTwoOpFn * const fns[3][2] = {
11033                    { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
11034                    { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
11035                    { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
11036                };
11037                genfn = fns[size][u];
11038                break;
11039            }
11040            case 0x9: /* SQSHL, UQSHL */
11041            {
11042                static NeonGenTwoOpEnvFn * const fns[3][2] = {
11043                    { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11044                    { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11045                    { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11046                };
11047                genenvfn = fns[size][u];
11048                break;
11049            }
11050            case 0xa: /* SRSHL, URSHL */
11051            {
11052                static NeonGenTwoOpFn * const fns[3][2] = {
11053                    { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11054                    { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11055                    { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11056                };
11057                genfn = fns[size][u];
11058                break;
11059            }
11060            case 0xb: /* SQRSHL, UQRSHL */
11061            {
11062                static NeonGenTwoOpEnvFn * const fns[3][2] = {
11063                    { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11064                    { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11065                    { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11066                };
11067                genenvfn = fns[size][u];
11068                break;
11069            }
11070            case 0xc: /* SMAX, UMAX */
11071            {
11072                static NeonGenTwoOpFn * const fns[3][2] = {
11073                    { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
11074                    { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
11075                    { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11076                };
11077                genfn = fns[size][u];
11078                break;
11079            }
11080
11081            case 0xd: /* SMIN, UMIN */
11082            {
11083                static NeonGenTwoOpFn * const fns[3][2] = {
11084                    { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
11085                    { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
11086                    { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11087                };
11088                genfn = fns[size][u];
11089                break;
11090            }
11091            case 0xe: /* SABD, UABD */
11092            case 0xf: /* SABA, UABA */
11093            {
11094                static NeonGenTwoOpFn * const fns[3][2] = {
11095                    { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
11096                    { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
11097                    { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
11098                };
11099                genfn = fns[size][u];
11100                break;
11101            }
11102            case 0x13: /* MUL, PMUL */
11103                assert(u); /* PMUL */
11104                assert(size == 0);
11105                genfn = gen_helper_neon_mul_p8;
11106                break;
11107            case 0x16: /* SQDMULH, SQRDMULH */
11108            {
11109                static NeonGenTwoOpEnvFn * const fns[2][2] = {
11110                    { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
11111                    { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
11112                };
11113                assert(size == 1 || size == 2);
11114                genenvfn = fns[size - 1][u];
11115                break;
11116            }
11117            default:
11118                g_assert_not_reached();
11119            }
11120
11121            if (genenvfn) {
11122                genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11123            } else {
11124                genfn(tcg_res, tcg_op1, tcg_op2);
11125            }
11126
11127            if (opcode == 0xf) {
11128                /* SABA, UABA: accumulating ops */
11129                static NeonGenTwoOpFn * const fns[3] = {
11130                    gen_helper_neon_add_u8,
11131                    gen_helper_neon_add_u16,
11132                    tcg_gen_add_i32,
11133                };
11134
11135                read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
11136                fns[size](tcg_res, tcg_op1, tcg_res);
11137            }
11138
11139            write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11140
11141            tcg_temp_free_i32(tcg_res);
11142            tcg_temp_free_i32(tcg_op1);
11143            tcg_temp_free_i32(tcg_op2);
11144        }
11145    }
11146    clear_vec_high(s, is_q, rd);
11147}
11148
11149/* AdvSIMD three same
11150 *  31  30  29  28       24 23  22  21 20  16 15    11  10 9    5 4    0
11151 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11152 * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
11153 * +---+---+---+-----------+------+---+------+--------+---+------+------+
11154 */
11155static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11156{
11157    int opcode = extract32(insn, 11, 5);
11158
11159    switch (opcode) {
11160    case 0x3: /* logic ops */
11161        disas_simd_3same_logic(s, insn);
11162        break;
11163    case 0x17: /* ADDP */
11164    case 0x14: /* SMAXP, UMAXP */
11165    case 0x15: /* SMINP, UMINP */
11166    {
11167        /* Pairwise operations */
11168        int is_q = extract32(insn, 30, 1);
11169        int u = extract32(insn, 29, 1);
11170        int size = extract32(insn, 22, 2);
11171        int rm = extract32(insn, 16, 5);
11172        int rn = extract32(insn, 5, 5);
11173        int rd = extract32(insn, 0, 5);
11174        if (opcode == 0x17) {
11175            if (u || (size == 3 && !is_q)) {
11176                unallocated_encoding(s);
11177                return;
11178            }
11179        } else {
11180            if (size == 3) {
11181                unallocated_encoding(s);
11182                return;
11183            }
11184        }
11185        handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11186        break;
11187    }
11188    case 0x18 ... 0x31:
11189        /* floating point ops, sz[1] and U are part of opcode */
11190        disas_simd_3same_float(s, insn);
11191        break;
11192    default:
11193        disas_simd_3same_int(s, insn);
11194        break;
11195    }
11196}
11197
11198/*
11199 * Advanced SIMD three same (ARMv8.2 FP16 variants)
11200 *
11201 *  31  30  29  28       24 23  22 21 20  16 15 14 13    11 10  9    5 4    0
11202 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11203 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 |  Rn  |  Rd  |
11204 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11205 *
11206 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11207 * (register), FACGE, FABD, FCMGT (register) and FACGT.
11208 *
11209 */
11210static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11211{
11212    int opcode, fpopcode;
11213    int is_q, u, a, rm, rn, rd;
11214    int datasize, elements;
11215    int pass;
11216    TCGv_ptr fpst;
11217    bool pairwise = false;
11218
11219    if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
11220        unallocated_encoding(s);
11221        return;
11222    }
11223
11224    if (!fp_access_check(s)) {
11225        return;
11226    }
11227
11228    /* For these floating point ops, the U, a and opcode bits
11229     * together indicate the operation.
11230     */
11231    opcode = extract32(insn, 11, 3);
11232    u = extract32(insn, 29, 1);
11233    a = extract32(insn, 23, 1);
11234    is_q = extract32(insn, 30, 1);
11235    rm = extract32(insn, 16, 5);
11236    rn = extract32(insn, 5, 5);
11237    rd = extract32(insn, 0, 5);
11238
11239    fpopcode = opcode | (a << 3) |  (u << 4);
11240    datasize = is_q ? 128 : 64;
11241    elements = datasize / 16;
11242
11243    switch (fpopcode) {
11244    case 0x10: /* FMAXNMP */
11245    case 0x12: /* FADDP */
11246    case 0x16: /* FMAXP */
11247    case 0x18: /* FMINNMP */
11248    case 0x1e: /* FMINP */
11249        pairwise = true;
11250        break;
11251    }
11252
11253    fpst = get_fpstatus_ptr(true);
11254
11255    if (pairwise) {
11256        int maxpass = is_q ? 8 : 4;
11257        TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11258        TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11259        TCGv_i32 tcg_res[8];
11260
11261        for (pass = 0; pass < maxpass; pass++) {
11262            int passreg = pass < (maxpass / 2) ? rn : rm;
11263            int passelt = (pass << 1) & (maxpass - 1);
11264
11265            read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11266            read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11267            tcg_res[pass] = tcg_temp_new_i32();
11268
11269            switch (fpopcode) {
11270            case 0x10: /* FMAXNMP */
11271                gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11272                                           fpst);
11273                break;
11274            case 0x12: /* FADDP */
11275                gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11276                break;
11277            case 0x16: /* FMAXP */
11278                gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11279                break;
11280            case 0x18: /* FMINNMP */
11281                gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11282                                           fpst);
11283                break;
11284            case 0x1e: /* FMINP */
11285                gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11286                break;
11287            default:
11288                g_assert_not_reached();
11289            }
11290        }
11291
11292        for (pass = 0; pass < maxpass; pass++) {
11293            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11294            tcg_temp_free_i32(tcg_res[pass]);
11295        }
11296
11297        tcg_temp_free_i32(tcg_op1);
11298        tcg_temp_free_i32(tcg_op2);
11299
11300    } else {
11301        for (pass = 0; pass < elements; pass++) {
11302            TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11303            TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11304            TCGv_i32 tcg_res = tcg_temp_new_i32();
11305
11306            read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11307            read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11308
11309            switch (fpopcode) {
11310            case 0x0: /* FMAXNM */
11311                gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11312                break;
11313            case 0x1: /* FMLA */
11314                read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11315                gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11316                                           fpst);
11317                break;
11318            case 0x2: /* FADD */
11319                gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11320                break;
11321            case 0x3: /* FMULX */
11322                gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11323                break;
11324            case 0x4: /* FCMEQ */
11325                gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11326                break;
11327            case 0x6: /* FMAX */
11328                gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11329                break;
11330            case 0x7: /* FRECPS */
11331                gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11332                break;
11333            case 0x8: /* FMINNM */
11334                gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11335                break;
11336            case 0x9: /* FMLS */
11337                /* As usual for ARM, separate negation for fused multiply-add */
11338                tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11339                read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11340                gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11341                                           fpst);
11342                break;
11343            case 0xa: /* FSUB */
11344                gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11345                break;
11346            case 0xe: /* FMIN */
11347                gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11348                break;
11349            case 0xf: /* FRSQRTS */
11350                gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11351                break;
11352            case 0x13: /* FMUL */
11353                gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11354                break;
11355            case 0x14: /* FCMGE */
11356                gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11357                break;
11358            case 0x15: /* FACGE */
11359                gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11360                break;
11361            case 0x17: /* FDIV */
11362                gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11363                break;
11364            case 0x1a: /* FABD */
11365                gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11366                tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11367                break;
11368            case 0x1c: /* FCMGT */
11369                gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11370                break;
11371            case 0x1d: /* FACGT */
11372                gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11373                break;
11374            default:
11375                fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
11376                        __func__, insn, fpopcode, s->pc);
11377                g_assert_not_reached();
11378            }
11379
11380            write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11381            tcg_temp_free_i32(tcg_res);
11382            tcg_temp_free_i32(tcg_op1);
11383            tcg_temp_free_i32(tcg_op2);
11384        }
11385    }
11386
11387    tcg_temp_free_ptr(fpst);
11388
11389    clear_vec_high(s, is_q, rd);
11390}
11391
11392/* AdvSIMD three same extra
11393 *  31   30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
11394 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11395 * | 0 | Q | U | 0 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
11396 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11397 */
11398static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11399{
11400    int rd = extract32(insn, 0, 5);
11401    int rn = extract32(insn, 5, 5);
11402    int opcode = extract32(insn, 11, 4);
11403    int rm = extract32(insn, 16, 5);
11404    int size = extract32(insn, 22, 2);
11405    bool u = extract32(insn, 29, 1);
11406    bool is_q = extract32(insn, 30, 1);
11407    int feature, rot;
11408
11409    switch (u * 16 + opcode) {
11410    case 0x10: /* SQRDMLAH (vector) */
11411    case 0x11: /* SQRDMLSH (vector) */
11412        if (size != 1 && size != 2) {
11413            unallocated_encoding(s);
11414            return;
11415        }
11416        feature = ARM_FEATURE_V8_RDM;
11417        break;
11418    case 0x02: /* SDOT (vector) */
11419    case 0x12: /* UDOT (vector) */
11420        if (size != MO_32) {
11421            unallocated_encoding(s);
11422            return;
11423        }
11424        feature = ARM_FEATURE_V8_DOTPROD;
11425        break;
11426    case 0x8: /* FCMLA, #0 */
11427    case 0x9: /* FCMLA, #90 */
11428    case 0xa: /* FCMLA, #180 */
11429    case 0xb: /* FCMLA, #270 */
11430    case 0xc: /* FCADD, #90 */
11431    case 0xe: /* FCADD, #270 */
11432        if (size == 0
11433            || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
11434            || (size == 3 && !is_q)) {
11435            unallocated_encoding(s);
11436            return;
11437        }
11438        feature = ARM_FEATURE_V8_FCMA;
11439        break;
11440    default:
11441        unallocated_encoding(s);
11442        return;
11443    }
11444    if (!arm_dc_feature(s, feature)) {
11445        unallocated_encoding(s);
11446        return;
11447    }
11448    if (!fp_access_check(s)) {
11449        return;
11450    }
11451
11452    switch (opcode) {
11453    case 0x0: /* SQRDMLAH (vector) */
11454        switch (size) {
11455        case 1:
11456            gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s16);
11457            break;
11458        case 2:
11459            gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s32);
11460            break;
11461        default:
11462            g_assert_not_reached();
11463        }
11464        return;
11465
11466    case 0x1: /* SQRDMLSH (vector) */
11467        switch (size) {
11468        case 1:
11469            gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s16);
11470            break;
11471        case 2:
11472            gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s32);
11473            break;
11474        default:
11475            g_assert_not_reached();
11476        }
11477        return;
11478
11479    case 0x2: /* SDOT / UDOT */
11480        gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
11481                         u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11482        return;
11483
11484    case 0x8: /* FCMLA, #0 */
11485    case 0x9: /* FCMLA, #90 */
11486    case 0xa: /* FCMLA, #180 */
11487    case 0xb: /* FCMLA, #270 */
11488        rot = extract32(opcode, 0, 2);
11489        switch (size) {
11490        case 1:
11491            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
11492                              gen_helper_gvec_fcmlah);
11493            break;
11494        case 2:
11495            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11496                              gen_helper_gvec_fcmlas);
11497            break;
11498        case 3:
11499            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11500                              gen_helper_gvec_fcmlad);
11501            break;
11502        default:
11503            g_assert_not_reached();
11504        }
11505        return;
11506
11507    case 0xc: /* FCADD, #90 */
11508    case 0xe: /* FCADD, #270 */
11509        rot = extract32(opcode, 1, 1);
11510        switch (size) {
11511        case 1:
11512            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11513                              gen_helper_gvec_fcaddh);
11514            break;
11515        case 2:
11516            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11517                              gen_helper_gvec_fcadds);
11518            break;
11519        case 3:
11520            gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11521                              gen_helper_gvec_fcaddd);
11522            break;
11523        default:
11524            g_assert_not_reached();
11525        }
11526        return;
11527
11528    default:
11529        g_assert_not_reached();
11530    }
11531}
11532
11533static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11534                                  int size, int rn, int rd)
11535{
11536    /* Handle 2-reg-misc ops which are widening (so each size element
11537     * in the source becomes a 2*size element in the destination.
11538     * The only instruction like this is FCVTL.
11539     */
11540    int pass;
11541
11542    if (size == 3) {
11543        /* 32 -> 64 bit fp conversion */
11544        TCGv_i64 tcg_res[2];
11545        int srcelt = is_q ? 2 : 0;
11546
11547        for (pass = 0; pass < 2; pass++) {
11548            TCGv_i32 tcg_op = tcg_temp_new_i32();
11549            tcg_res[pass] = tcg_temp_new_i64();
11550
11551            read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11552            gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11553            tcg_temp_free_i32(tcg_op);
11554        }
11555        for (pass = 0; pass < 2; pass++) {
11556            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11557            tcg_temp_free_i64(tcg_res[pass]);
11558        }
11559    } else {
11560        /* 16 -> 32 bit fp conversion */
11561        int srcelt = is_q ? 4 : 0;
11562        TCGv_i32 tcg_res[4];
11563        TCGv_ptr fpst = get_fpstatus_ptr(false);
11564        TCGv_i32 ahp = get_ahp_flag();
11565
11566        for (pass = 0; pass < 4; pass++) {
11567            tcg_res[pass] = tcg_temp_new_i32();
11568
11569            read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11570            gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11571                                           fpst, ahp);
11572        }
11573        for (pass = 0; pass < 4; pass++) {
11574            write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11575            tcg_temp_free_i32(tcg_res[pass]);
11576        }
11577
11578        tcg_temp_free_ptr(fpst);
11579        tcg_temp_free_i32(ahp);
11580    }
11581}
11582
11583static void handle_rev(DisasContext *s, int opcode, bool u,
11584                       bool is_q, int size, int rn, int rd)
11585{
11586    int op = (opcode << 1) | u;
11587    int opsz = op + size;
11588    int grp_size = 3 - opsz;
11589    int dsize = is_q ? 128 : 64;
11590    int i;
11591
11592    if (opsz >= 3) {
11593        unallocated_encoding(s);
11594        return;
11595    }
11596
11597    if (!fp_access_check(s)) {
11598        return;
11599    }
11600
11601    if (size == 0) {
11602        /* Special case bytes, use bswap op on each group of elements */
11603        int groups = dsize / (8 << grp_size);
11604
11605        for (i = 0; i < groups; i++) {
11606            TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11607
11608            read_vec_element(s, tcg_tmp, rn, i, grp_size);
11609            switch (grp_size) {
11610            case MO_16:
11611                tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
11612                break;
11613            case MO_32:
11614                tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
11615                break;
11616            case MO_64:
11617                tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11618                break;
11619            default:
11620                g_assert_not_reached();
11621            }
11622            write_vec_element(s, tcg_tmp, rd, i, grp_size);
11623            tcg_temp_free_i64(tcg_tmp);
11624        }
11625        clear_vec_high(s, is_q, rd);
11626    } else {
11627        int revmask = (1 << grp_size) - 1;
11628        int esize = 8 << size;
11629        int elements = dsize / esize;
11630        TCGv_i64 tcg_rn = tcg_temp_new_i64();
11631        TCGv_i64 tcg_rd = tcg_const_i64(0);
11632        TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
11633
11634        for (i = 0; i < elements; i++) {
11635            int e_rev = (i & 0xf) ^ revmask;
11636            int off = e_rev * esize;
11637            read_vec_element(s, tcg_rn, rn, i, size);
11638            if (off >= 64) {
11639                tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
11640                                    tcg_rn, off - 64, esize);
11641            } else {
11642                tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
11643            }
11644        }
11645        write_vec_element(s, tcg_rd, rd, 0, MO_64);
11646        write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
11647
11648        tcg_temp_free_i64(tcg_rd_hi);
11649        tcg_temp_free_i64(tcg_rd);
11650        tcg_temp_free_i64(tcg_rn);
11651    }
11652}
11653
11654static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11655                                  bool is_q, int size, int rn, int rd)
11656{
11657    /* Implement the pairwise operations from 2-misc:
11658     * SADDLP, UADDLP, SADALP, UADALP.
11659     * These all add pairs of elements in the input to produce a
11660     * double-width result element in the output (possibly accumulating).
11661     */
11662    bool accum = (opcode == 0x6);
11663    int maxpass = is_q ? 2 : 1;
11664    int pass;
11665    TCGv_i64 tcg_res[2];
11666
11667    if (size == 2) {
11668        /* 32 + 32 -> 64 op */
11669        TCGMemOp memop = size + (u ? 0 : MO_SIGN);
11670
11671        for (pass = 0; pass < maxpass; pass++) {
11672            TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11673            TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11674
11675            tcg_res[pass] = tcg_temp_new_i64();
11676
11677            read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11678            read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11679            tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11680            if (accum) {
11681                read_vec_element(s, tcg_op1, rd, pass, MO_64);
11682                tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11683            }
11684
11685            tcg_temp_free_i64(tcg_op1);
11686            tcg_temp_free_i64(tcg_op2);
11687        }
11688    } else {
11689        for (pass = 0; pass < maxpass; pass++) {
11690            TCGv_i64 tcg_op = tcg_temp_new_i64();
11691            NeonGenOneOpFn *genfn;
11692            static NeonGenOneOpFn * const fns[2][2] = {
11693                { gen_helper_neon_addlp_s8,  gen_helper_neon_addlp_u8 },
11694                { gen_helper_neon_addlp_s16,  gen_helper_neon_addlp_u16 },
11695            };
11696
11697            genfn = fns[size][u];
11698
11699            tcg_res[pass] = tcg_temp_new_i64();
11700
11701            read_vec_element(s, tcg_op, rn, pass, MO_64);
11702            genfn(tcg_res[pass], tcg_op);
11703
11704            if (accum) {
11705                read_vec_element(s, tcg_op, rd, pass, MO_64);
11706                if (size == 0) {
11707                    gen_helper_neon_addl_u16(tcg_res[pass],
11708                                             tcg_res[pass], tcg_op);
11709                } else {
11710                    gen_helper_neon_addl_u32(tcg_res[pass],
11711                                             tcg_res[pass], tcg_op);
11712                }
11713            }
11714            tcg_temp_free_i64(tcg_op);
11715        }
11716    }
11717    if (!is_q) {
11718        tcg_res[1] = tcg_const_i64(0);
11719    }
11720    for (pass = 0; pass < 2; pass++) {
11721        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11722        tcg_temp_free_i64(tcg_res[pass]);
11723    }
11724}
11725
11726static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11727{
11728    /* Implement SHLL and SHLL2 */
11729    int pass;
11730    int part = is_q ? 2 : 0;
11731    TCGv_i64 tcg_res[2];
11732
11733    for (pass = 0; pass < 2; pass++) {
11734        static NeonGenWidenFn * const widenfns[3] = {
11735            gen_helper_neon_widen_u8,
11736            gen_helper_neon_widen_u16,
11737            tcg_gen_extu_i32_i64,
11738        };
11739        NeonGenWidenFn *widenfn = widenfns[size];
11740        TCGv_i32 tcg_op = tcg_temp_new_i32();
11741
11742        read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11743        tcg_res[pass] = tcg_temp_new_i64();
11744        widenfn(tcg_res[pass], tcg_op);
11745        tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11746
11747        tcg_temp_free_i32(tcg_op);
11748    }
11749
11750    for (pass = 0; pass < 2; pass++) {
11751        write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11752        tcg_temp_free_i64(tcg_res[pass]);
11753    }
11754}
11755
11756/* AdvSIMD two reg misc
11757 *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
11758 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11759 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
11760 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11761 */
11762static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11763{
11764    int size = extract32(insn, 22, 2);
11765    int opcode = extract32(insn, 12, 5);
11766    bool u = extract32(insn, 29, 1);
11767    bool is_q = extract32(insn, 30, 1);
11768    int rn = extract32(insn, 5, 5);
11769    int rd = extract32(insn, 0, 5);
11770    bool need_fpstatus = false;
11771    bool need_rmode = false;
11772    int rmode = -1;
11773    TCGv_i32 tcg_rmode;
11774    TCGv_ptr tcg_fpstatus;
11775
11776    switch (opcode) {
11777    case 0x0: /* REV64, REV32 */
11778    case 0x1: /* REV16 */
11779        handle_rev(s, opcode, u, is_q, size, rn, rd);
11780        return;
11781    case 0x5: /* CNT, NOT, RBIT */
11782        if (u && size == 0) {
11783            /* NOT */
11784            break;
11785        } else if (u && size == 1) {
11786            /* RBIT */
11787            break;
11788        } else if (!u && size == 0) {
11789            /* CNT */
11790            break;
11791        }
11792        unallocated_encoding(s);
11793        return;
11794    case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11795    case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11796        if (size == 3) {
11797            unallocated_encoding(s);
11798            return;
11799        }
11800        if (!fp_access_check(s)) {
11801            return;
11802        }
11803
11804        handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11805        return;
11806    case 0x4: /* CLS, CLZ */
11807        if (size == 3) {
11808            unallocated_encoding(s);
11809            return;
11810        }
11811        break;
11812    case 0x2: /* SADDLP, UADDLP */
11813    case 0x6: /* SADALP, UADALP */
11814        if (size == 3) {
11815            unallocated_encoding(s);
11816            return;
11817        }
11818        if (!fp_access_check(s)) {
11819            return;
11820        }
11821        handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
11822        return;
11823    case 0x13: /* SHLL, SHLL2 */
11824        if (u == 0 || size == 3) {
11825            unallocated_encoding(s);
11826            return;
11827        }
11828        if (!fp_access_check(s)) {
11829            return;
11830        }
11831        handle_shll(s, is_q, size, rn, rd);
11832        return;
11833    case 0xa: /* CMLT */
11834        if (u == 1) {
11835            unallocated_encoding(s);
11836            return;
11837        }
11838        /* fall through */
11839    case 0x8: /* CMGT, CMGE */
11840    case 0x9: /* CMEQ, CMLE */
11841    case 0xb: /* ABS, NEG */
11842        if (size == 3 && !is_q) {
11843            unallocated_encoding(s);
11844            return;
11845        }
11846        break;
11847    case 0x3: /* SUQADD, USQADD */
11848        if (size == 3 && !is_q) {
11849            unallocated_encoding(s);
11850            return;
11851        }
11852        if (!fp_access_check(s)) {
11853            return;
11854        }
11855        handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
11856        return;
11857    case 0x7: /* SQABS, SQNEG */
11858        if (size == 3 && !is_q) {
11859            unallocated_encoding(s);
11860            return;
11861        }
11862        break;
11863    case 0xc ... 0xf:
11864    case 0x16 ... 0x1d:
11865    case 0x1f:
11866    {
11867        /* Floating point: U, size[1] and opcode indicate operation;
11868         * size[0] indicates single or double precision.
11869         */
11870        int is_double = extract32(size, 0, 1);
11871        opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
11872        size = is_double ? 3 : 2;
11873        switch (opcode) {
11874        case 0x2f: /* FABS */
11875        case 0x6f: /* FNEG */
11876            if (size == 3 && !is_q) {
11877                unallocated_encoding(s);
11878                return;
11879            }
11880            break;
11881        case 0x1d: /* SCVTF */
11882        case 0x5d: /* UCVTF */
11883        {
11884            bool is_signed = (opcode == 0x1d) ? true : false;
11885            int elements = is_double ? 2 : is_q ? 4 : 2;
11886            if (is_double && !is_q) {
11887                unallocated_encoding(s);
11888                return;
11889            }
11890            if (!fp_access_check(s)) {
11891                return;
11892            }
11893            handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
11894            return;
11895        }
11896        case 0x2c: /* FCMGT (zero) */
11897        case 0x2d: /* FCMEQ (zero) */
11898        case 0x2e: /* FCMLT (zero) */
11899        case 0x6c: /* FCMGE (zero) */
11900        case 0x6d: /* FCMLE (zero) */
11901            if (size == 3 && !is_q) {
11902                unallocated_encoding(s);
11903                return;
11904            }
11905            handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
11906            return;
11907        case 0x7f: /* FSQRT */
11908            if (size == 3 && !is_q) {
11909                unallocated_encoding(s);
11910                return;
11911            }
11912            break;
11913        case 0x1a: /* FCVTNS */
11914        case 0x1b: /* FCVTMS */
11915        case 0x3a: /* FCVTPS */
11916        case 0x3b: /* FCVTZS */
11917        case 0x5a: /* FCVTNU */
11918        case 0x5b: /* FCVTMU */
11919        case 0x7a: /* FCVTPU */
11920        case 0x7b: /* FCVTZU */
11921            need_fpstatus = true;
11922            need_rmode = true;
11923            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11924            if (size == 3 && !is_q) {
11925                unallocated_encoding(s);
11926                return;
11927            }
11928            break;
11929        case 0x5c: /* FCVTAU */
11930        case 0x1c: /* FCVTAS */
11931            need_fpstatus = true;
11932            need_rmode = true;
11933            rmode = FPROUNDING_TIEAWAY;
11934            if (size == 3 && !is_q) {
11935                unallocated_encoding(s);
11936                return;
11937            }
11938            break;
11939        case 0x3c: /* URECPE */
11940            if (size == 3) {
11941                unallocated_encoding(s);
11942                return;
11943            }
11944            /* fall through */
11945        case 0x3d: /* FRECPE */
11946        case 0x7d: /* FRSQRTE */
11947            if (size == 3 && !is_q) {
11948                unallocated_encoding(s);
11949                return;
11950            }
11951            if (!fp_access_check(s)) {
11952                return;
11953            }
11954            handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
11955            return;
11956        case 0x56: /* FCVTXN, FCVTXN2 */
11957            if (size == 2) {
11958                unallocated_encoding(s);
11959                return;
11960            }
11961            /* fall through */
11962        case 0x16: /* FCVTN, FCVTN2 */
11963            /* handle_2misc_narrow does a 2*size -> size operation, but these
11964             * instructions encode the source size rather than dest size.
11965             */
11966            if (!fp_access_check(s)) {
11967                return;
11968            }
11969            handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11970            return;
11971        case 0x17: /* FCVTL, FCVTL2 */
11972            if (!fp_access_check(s)) {
11973                return;
11974            }
11975            handle_2misc_widening(s, opcode, is_q, size, rn, rd);
11976            return;
11977        case 0x18: /* FRINTN */
11978        case 0x19: /* FRINTM */
11979        case 0x38: /* FRINTP */
11980        case 0x39: /* FRINTZ */
11981            need_rmode = true;
11982            rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11983            /* fall through */
11984        case 0x59: /* FRINTX */
11985        case 0x79: /* FRINTI */
11986            need_fpstatus = true;
11987            if (size == 3 && !is_q) {
11988                unallocated_encoding(s);
11989                return;
11990            }
11991            break;
11992        case 0x58: /* FRINTA */
11993            need_rmode = true;
11994            rmode = FPROUNDING_TIEAWAY;
11995            need_fpstatus = true;
11996            if (size == 3 && !is_q) {
11997                unallocated_encoding(s);
11998                return;
11999            }
12000            break;
12001        case 0x7c: /* URSQRTE */
12002            if (size == 3) {
12003                unallocated_encoding(s);
12004                return;
12005            }
12006            need_fpstatus = true;
12007            break;
12008        default:
12009            unallocated_encoding(s);
12010            return;
12011        }
12012        break;
12013    }
12014    default:
12015        unallocated_encoding(s);
12016        return;
12017    }
12018
12019    if (!fp_access_check(s)) {
12020        return;
12021    }
12022
12023    if (need_fpstatus || need_rmode) {
12024        tcg_fpstatus = get_fpstatus_ptr(false);
12025    } else {
12026        tcg_fpstatus = NULL;
12027    }
12028    if (need_rmode) {
12029        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12030        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12031    } else {
12032        tcg_rmode = NULL;
12033    }
12034
12035    switch (opcode) {
12036    case 0x5:
12037        if (u && size == 0) { /* NOT */
12038            gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12039            return;
12040        }
12041        break;
12042    case 0xb:
12043        if (u) { /* NEG */
12044            gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12045            return;
12046        }
12047        break;
12048    }
12049
12050    if (size == 3) {
12051        /* All 64-bit element operations can be shared with scalar 2misc */
12052        int pass;
12053
12054        /* Coverity claims (size == 3 && !is_q) has been eliminated
12055         * from all paths leading to here.
12056         */
12057        tcg_debug_assert(is_q);
12058        for (pass = 0; pass < 2; pass++) {
12059            TCGv_i64 tcg_op = tcg_temp_new_i64();
12060            TCGv_i64 tcg_res = tcg_temp_new_i64();
12061
12062            read_vec_element(s, tcg_op, rn, pass, MO_64);
12063
12064            handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12065                            tcg_rmode, tcg_fpstatus);
12066
12067            write_vec_element(s, tcg_res, rd, pass, MO_64);
12068
12069            tcg_temp_free_i64(tcg_res);
12070            tcg_temp_free_i64(tcg_op);
12071        }
12072    } else {
12073        int pass;
12074
12075        for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12076            TCGv_i32 tcg_op = tcg_temp_new_i32();
12077            TCGv_i32 tcg_res = tcg_temp_new_i32();
12078            TCGCond cond;
12079
12080            read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12081
12082            if (size == 2) {
12083                /* Special cases for 32 bit elements */
12084                switch (opcode) {
12085                case 0xa: /* CMLT */
12086                    /* 32 bit integer comparison against zero, result is
12087                     * test ? (2^32 - 1) : 0. We implement via setcond(test)
12088                     * and inverting.
12089                     */
12090                    cond = TCG_COND_LT;
12091                do_cmop:
12092                    tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
12093                    tcg_gen_neg_i32(tcg_res, tcg_res);
12094                    break;
12095                case 0x8: /* CMGT, CMGE */
12096                    cond = u ? TCG_COND_GE : TCG_COND_GT;
12097                    goto do_cmop;
12098                case 0x9: /* CMEQ, CMLE */
12099                    cond = u ? TCG_COND_LE : TCG_COND_EQ;
12100                    goto do_cmop;
12101                case 0x4: /* CLS */
12102                    if (u) {
12103                        tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12104                    } else {
12105                        tcg_gen_clrsb_i32(tcg_res, tcg_op);
12106                    }
12107                    break;
12108                case 0x7: /* SQABS, SQNEG */
12109                    if (u) {
12110                        gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12111                    } else {
12112                        gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12113                    }
12114                    break;
12115                case 0xb: /* ABS, NEG */
12116                    if (u) {
12117                        tcg_gen_neg_i32(tcg_res, tcg_op);
12118                    } else {
12119                        TCGv_i32 tcg_zero = tcg_const_i32(0);
12120                        tcg_gen_neg_i32(tcg_res, tcg_op);
12121                        tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
12122                                            tcg_zero, tcg_op, tcg_res);
12123                        tcg_temp_free_i32(tcg_zero);
12124                    }
12125                    break;
12126                case 0x2f: /* FABS */
12127                    gen_helper_vfp_abss(tcg_res, tcg_op);
12128                    break;
12129                case 0x6f: /* FNEG */
12130                    gen_helper_vfp_negs(tcg_res, tcg_op);
12131                    break;
12132                case 0x7f: /* FSQRT */
12133                    gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12134                    break;
12135                case 0x1a: /* FCVTNS */
12136                case 0x1b: /* FCVTMS */
12137                case 0x1c: /* FCVTAS */
12138                case 0x3a: /* FCVTPS */
12139                case 0x3b: /* FCVTZS */
12140                {
12141                    TCGv_i32 tcg_shift = tcg_const_i32(0);
12142                    gen_helper_vfp_tosls(tcg_res, tcg_op,
12143                                         tcg_shift, tcg_fpstatus);
12144                    tcg_temp_free_i32(tcg_shift);
12145                    break;
12146                }
12147                case 0x5a: /* FCVTNU */
12148                case 0x5b: /* FCVTMU */
12149                case 0x5c: /* FCVTAU */
12150                case 0x7a: /* FCVTPU */
12151                case 0x7b: /* FCVTZU */
12152                {
12153                    TCGv_i32 tcg_shift = tcg_const_i32(0);
12154                    gen_helper_vfp_touls(tcg_res, tcg_op,
12155                                         tcg_shift, tcg_fpstatus);
12156                    tcg_temp_free_i32(tcg_shift);
12157                    break;
12158                }
12159                case 0x18: /* FRINTN */
12160                case 0x19: /* FRINTM */
12161                case 0x38: /* FRINTP */
12162                case 0x39: /* FRINTZ */
12163                case 0x58: /* FRINTA */
12164                case 0x79: /* FRINTI */
12165                    gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12166                    break;
12167                case 0x59: /* FRINTX */
12168                    gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12169                    break;
12170                case 0x7c: /* URSQRTE */
12171                    gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
12172                    break;
12173                default:
12174                    g_assert_not_reached();
12175                }
12176            } else {
12177                /* Use helpers for 8 and 16 bit elements */
12178                switch (opcode) {
12179                case 0x5: /* CNT, RBIT */
12180                    /* For these two insns size is part of the opcode specifier
12181                     * (handled earlier); they always operate on byte elements.
12182                     */
12183                    if (u) {
12184                        gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12185                    } else {
12186                        gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12187                    }
12188                    break;
12189                case 0x7: /* SQABS, SQNEG */
12190                {
12191                    NeonGenOneOpEnvFn *genfn;
12192                    static NeonGenOneOpEnvFn * const fns[2][2] = {
12193                        { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12194                        { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12195                    };
12196                    genfn = fns[size][u];
12197                    genfn(tcg_res, cpu_env, tcg_op);
12198                    break;
12199                }
12200                case 0x8: /* CMGT, CMGE */
12201                case 0x9: /* CMEQ, CMLE */
12202                case 0xa: /* CMLT */
12203                {
12204                    static NeonGenTwoOpFn * const fns[3][2] = {
12205                        { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
12206                        { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
12207                        { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
12208                    };
12209                    NeonGenTwoOpFn *genfn;
12210                    int comp;
12211                    bool reverse;
12212                    TCGv_i32 tcg_zero = tcg_const_i32(0);
12213
12214                    /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
12215                    comp = (opcode - 0x8) * 2 + u;
12216                    /* ...but LE, LT are implemented as reverse GE, GT */
12217                    reverse = (comp > 2);
12218                    if (reverse) {
12219                        comp = 4 - comp;
12220                    }
12221                    genfn = fns[comp][size];
12222                    if (reverse) {
12223                        genfn(tcg_res, tcg_zero, tcg_op);
12224                    } else {
12225                        genfn(tcg_res, tcg_op, tcg_zero);
12226                    }
12227                    tcg_temp_free_i32(tcg_zero);
12228                    break;
12229                }
12230                case 0xb: /* ABS, NEG */
12231                    if (u) {
12232                        TCGv_i32 tcg_zero = tcg_const_i32(0);
12233                        if (size) {
12234                            gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
12235                        } else {
12236                            gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
12237                        }
12238                        tcg_temp_free_i32(tcg_zero);
12239                    } else {
12240                        if (size) {
12241                            gen_helper_neon_abs_s16(tcg_res, tcg_op);
12242                        } else {
12243                            gen_helper_neon_abs_s8(tcg_res, tcg_op);
12244                        }
12245                    }
12246                    break;
12247                case 0x4: /* CLS, CLZ */
12248                    if (u) {
12249                        if (size == 0) {
12250                            gen_helper_neon_clz_u8(tcg_res, tcg_op);
12251                        } else {
12252                            gen_helper_neon_clz_u16(tcg_res, tcg_op);
12253                        }
12254                    } else {
12255                        if (size == 0) {
12256                            gen_helper_neon_cls_s8(tcg_res, tcg_op);
12257                        } else {
12258                            gen_helper_neon_cls_s16(tcg_res, tcg_op);
12259                        }
12260                    }
12261                    break;
12262                default:
12263                    g_assert_not_reached();
12264                }
12265            }
12266
12267            write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12268
12269            tcg_temp_free_i32(tcg_res);
12270            tcg_temp_free_i32(tcg_op);
12271        }
12272    }
12273    clear_vec_high(s, is_q, rd);
12274
12275    if (need_rmode) {
12276        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12277        tcg_temp_free_i32(tcg_rmode);
12278    }
12279    if (need_fpstatus) {
12280        tcg_temp_free_ptr(tcg_fpstatus);
12281    }
12282}
12283
12284/* AdvSIMD [scalar] two register miscellaneous (FP16)
12285 *
12286 *   31  30  29 28  27     24  23 22 21       17 16    12 11 10 9    5 4    0
12287 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12288 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
12289 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12290 *   mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12291 *   val:  0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12292 *
12293 * This actually covers two groups where scalar access is governed by
12294 * bit 28. A bunch of the instructions (float to integral) only exist
12295 * in the vector form and are un-allocated for the scalar decode. Also
12296 * in the scalar decode Q is always 1.
12297 */
12298static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12299{
12300    int fpop, opcode, a, u;
12301    int rn, rd;
12302    bool is_q;
12303    bool is_scalar;
12304    bool only_in_vector = false;
12305
12306    int pass;
12307    TCGv_i32 tcg_rmode = NULL;
12308    TCGv_ptr tcg_fpstatus = NULL;
12309    bool need_rmode = false;
12310    bool need_fpst = true;
12311    int rmode;
12312
12313    if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
12314        unallocated_encoding(s);
12315        return;
12316    }
12317
12318    rd = extract32(insn, 0, 5);
12319    rn = extract32(insn, 5, 5);
12320
12321    a = extract32(insn, 23, 1);
12322    u = extract32(insn, 29, 1);
12323    is_scalar = extract32(insn, 28, 1);
12324    is_q = extract32(insn, 30, 1);
12325
12326    opcode = extract32(insn, 12, 5);
12327    fpop = deposit32(opcode, 5, 1, a);
12328    fpop = deposit32(fpop, 6, 1, u);
12329
12330    rd = extract32(insn, 0, 5);
12331    rn = extract32(insn, 5, 5);
12332
12333    switch (fpop) {
12334    case 0x1d: /* SCVTF */
12335    case 0x5d: /* UCVTF */
12336    {
12337        int elements;
12338
12339        if (is_scalar) {
12340            elements = 1;
12341        } else {
12342            elements = (is_q ? 8 : 4);
12343        }
12344
12345        if (!fp_access_check(s)) {
12346            return;
12347        }
12348        handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12349        return;
12350    }
12351    break;
12352    case 0x2c: /* FCMGT (zero) */
12353    case 0x2d: /* FCMEQ (zero) */
12354    case 0x2e: /* FCMLT (zero) */
12355    case 0x6c: /* FCMGE (zero) */
12356    case 0x6d: /* FCMLE (zero) */
12357        handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12358        return;
12359    case 0x3d: /* FRECPE */
12360    case 0x3f: /* FRECPX */
12361        break;
12362    case 0x18: /* FRINTN */
12363        need_rmode = true;
12364        only_in_vector = true;
12365        rmode = FPROUNDING_TIEEVEN;
12366        break;
12367    case 0x19: /* FRINTM */
12368        need_rmode = true;
12369        only_in_vector = true;
12370        rmode = FPROUNDING_NEGINF;
12371        break;
12372    case 0x38: /* FRINTP */
12373        need_rmode = true;
12374        only_in_vector = true;
12375        rmode = FPROUNDING_POSINF;
12376        break;
12377    case 0x39: /* FRINTZ */
12378        need_rmode = true;
12379        only_in_vector = true;
12380        rmode = FPROUNDING_ZERO;
12381        break;
12382    case 0x58: /* FRINTA */
12383        need_rmode = true;
12384        only_in_vector = true;
12385        rmode = FPROUNDING_TIEAWAY;
12386        break;
12387    case 0x59: /* FRINTX */
12388    case 0x79: /* FRINTI */
12389        only_in_vector = true;
12390        /* current rounding mode */
12391        break;
12392    case 0x1a: /* FCVTNS */
12393        need_rmode = true;
12394        rmode = FPROUNDING_TIEEVEN;
12395        break;
12396    case 0x1b: /* FCVTMS */
12397        need_rmode = true;
12398        rmode = FPROUNDING_NEGINF;
12399        break;
12400    case 0x1c: /* FCVTAS */
12401        need_rmode = true;
12402        rmode = FPROUNDING_TIEAWAY;
12403        break;
12404    case 0x3a: /* FCVTPS */
12405        need_rmode = true;
12406        rmode = FPROUNDING_POSINF;
12407        break;
12408    case 0x3b: /* FCVTZS */
12409        need_rmode = true;
12410        rmode = FPROUNDING_ZERO;
12411        break;
12412    case 0x5a: /* FCVTNU */
12413        need_rmode = true;
12414        rmode = FPROUNDING_TIEEVEN;
12415        break;
12416    case 0x5b: /* FCVTMU */
12417        need_rmode = true;
12418        rmode = FPROUNDING_NEGINF;
12419        break;
12420    case 0x5c: /* FCVTAU */
12421        need_rmode = true;
12422        rmode = FPROUNDING_TIEAWAY;
12423        break;
12424    case 0x7a: /* FCVTPU */
12425        need_rmode = true;
12426        rmode = FPROUNDING_POSINF;
12427        break;
12428    case 0x7b: /* FCVTZU */
12429        need_rmode = true;
12430        rmode = FPROUNDING_ZERO;
12431        break;
12432    case 0x2f: /* FABS */
12433    case 0x6f: /* FNEG */
12434        need_fpst = false;
12435        break;
12436    case 0x7d: /* FRSQRTE */
12437    case 0x7f: /* FSQRT (vector) */
12438        break;
12439    default:
12440        fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop);
12441        g_assert_not_reached();
12442    }
12443
12444
12445    /* Check additional constraints for the scalar encoding */
12446    if (is_scalar) {
12447        if (!is_q) {
12448            unallocated_encoding(s);
12449            return;
12450        }
12451        /* FRINTxx is only in the vector form */
12452        if (only_in_vector) {
12453            unallocated_encoding(s);
12454            return;
12455        }
12456    }
12457
12458    if (!fp_access_check(s)) {
12459        return;
12460    }
12461
12462    if (need_rmode || need_fpst) {
12463        tcg_fpstatus = get_fpstatus_ptr(true);
12464    }
12465
12466    if (need_rmode) {
12467        tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12468        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12469    }
12470
12471    if (is_scalar) {
12472        TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12473        TCGv_i32 tcg_res = tcg_temp_new_i32();
12474
12475        switch (fpop) {
12476        case 0x1a: /* FCVTNS */
12477        case 0x1b: /* FCVTMS */
12478        case 0x1c: /* FCVTAS */
12479        case 0x3a: /* FCVTPS */
12480        case 0x3b: /* FCVTZS */
12481            gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12482            break;
12483        case 0x3d: /* FRECPE */
12484            gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12485            break;
12486        case 0x3f: /* FRECPX */
12487            gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12488            break;
12489        case 0x5a: /* FCVTNU */
12490        case 0x5b: /* FCVTMU */
12491        case 0x5c: /* FCVTAU */
12492        case 0x7a: /* FCVTPU */
12493        case 0x7b: /* FCVTZU */
12494            gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12495            break;
12496        case 0x6f: /* FNEG */
12497            tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12498            break;
12499        case 0x7d: /* FRSQRTE */
12500            gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12501            break;
12502        default:
12503            g_assert_not_reached();
12504        }
12505
12506        /* limit any sign extension going on */
12507        tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12508        write_fp_sreg(s, rd, tcg_res);
12509
12510        tcg_temp_free_i32(tcg_res);
12511        tcg_temp_free_i32(tcg_op);
12512    } else {
12513        for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12514            TCGv_i32 tcg_op = tcg_temp_new_i32();
12515            TCGv_i32 tcg_res = tcg_temp_new_i32();
12516
12517            read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12518
12519            switch (fpop) {
12520            case 0x1a: /* FCVTNS */
12521            case 0x1b: /* FCVTMS */
12522            case 0x1c: /* FCVTAS */
12523            case 0x3a: /* FCVTPS */
12524            case 0x3b: /* FCVTZS */
12525                gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12526                break;
12527            case 0x3d: /* FRECPE */
12528                gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12529                break;
12530            case 0x5a: /* FCVTNU */
12531            case 0x5b: /* FCVTMU */
12532            case 0x5c: /* FCVTAU */
12533            case 0x7a: /* FCVTPU */
12534            case 0x7b: /* FCVTZU */
12535                gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12536                break;
12537            case 0x18: /* FRINTN */
12538            case 0x19: /* FRINTM */
12539            case 0x38: /* FRINTP */
12540            case 0x39: /* FRINTZ */
12541            case 0x58: /* FRINTA */
12542            case 0x79: /* FRINTI */
12543                gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12544                break;
12545            case 0x59: /* FRINTX */
12546                gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12547                break;
12548            case 0x2f: /* FABS */
12549                tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12550                break;
12551            case 0x6f: /* FNEG */
12552                tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12553                break;
12554            case 0x7d: /* FRSQRTE */
12555                gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12556                break;
12557            case 0x7f: /* FSQRT */
12558                gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12559                break;
12560            default:
12561                g_assert_not_reached();
12562            }
12563
12564            write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12565
12566            tcg_temp_free_i32(tcg_res);
12567            tcg_temp_free_i32(tcg_op);
12568        }
12569
12570        clear_vec_high(s, is_q, rd);
12571    }
12572
12573    if (tcg_rmode) {
12574        gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12575        tcg_temp_free_i32(tcg_rmode);
12576    }
12577
12578    if (tcg_fpstatus) {
12579        tcg_temp_free_ptr(tcg_fpstatus);
12580    }
12581}
12582
12583/* AdvSIMD scalar x indexed element
12584 *  31 30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12585 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12586 * | 0 1 | U | 1 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12587 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12588 * AdvSIMD vector x indexed element
12589 *   31  30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12590 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12591 * | 0 | Q | U | 0 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12592 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12593 */
12594static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12595{
12596    /* This encoding has two kinds of instruction:
12597     *  normal, where we perform elt x idxelt => elt for each
12598     *     element in the vector
12599     *  long, where we perform elt x idxelt and generate a result of
12600     *     double the width of the input element
12601     * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12602     */
12603    bool is_scalar = extract32(insn, 28, 1);
12604    bool is_q = extract32(insn, 30, 1);
12605    bool u = extract32(insn, 29, 1);
12606    int size = extract32(insn, 22, 2);
12607    int l = extract32(insn, 21, 1);
12608    int m = extract32(insn, 20, 1);
12609    /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12610    int rm = extract32(insn, 16, 4);
12611    int opcode = extract32(insn, 12, 4);
12612    int h = extract32(insn, 11, 1);
12613    int rn = extract32(insn, 5, 5);
12614    int rd = extract32(insn, 0, 5);
12615    bool is_long = false;
12616    int is_fp = 0;
12617    bool is_fp16 = false;
12618    int index;
12619    TCGv_ptr fpst;
12620
12621    switch (16 * u + opcode) {
12622    case 0x08: /* MUL */
12623    case 0x10: /* MLA */
12624    case 0x14: /* MLS */
12625        if (is_scalar) {
12626            unallocated_encoding(s);
12627            return;
12628        }
12629        break;
12630    case 0x02: /* SMLAL, SMLAL2 */
12631    case 0x12: /* UMLAL, UMLAL2 */
12632    case 0x06: /* SMLSL, SMLSL2 */
12633    case 0x16: /* UMLSL, UMLSL2 */
12634    case 0x0a: /* SMULL, SMULL2 */
12635    case 0x1a: /* UMULL, UMULL2 */
12636        if (is_scalar) {
12637            unallocated_encoding(s);
12638            return;
12639        }
12640        is_long = true;
12641        break;
12642    case 0x03: /* SQDMLAL, SQDMLAL2 */
12643    case 0x07: /* SQDMLSL, SQDMLSL2 */
12644    case 0x0b: /* SQDMULL, SQDMULL2 */
12645        is_long = true;
12646        break;
12647    case 0x0c: /* SQDMULH */
12648    case 0x0d: /* SQRDMULH */
12649        break;
12650    case 0x01: /* FMLA */
12651    case 0x05: /* FMLS */
12652    case 0x09: /* FMUL */
12653    case 0x19: /* FMULX */
12654        is_fp = 1;
12655        break;
12656    case 0x1d: /* SQRDMLAH */
12657    case 0x1f: /* SQRDMLSH */
12658        if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
12659            unallocated_encoding(s);
12660            return;
12661        }
12662        break;
12663    case 0x0e: /* SDOT */
12664    case 0x1e: /* UDOT */
12665        if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
12666            unallocated_encoding(s);
12667            return;
12668        }
12669        break;
12670    case 0x11: /* FCMLA #0 */
12671    case 0x13: /* FCMLA #90 */
12672    case 0x15: /* FCMLA #180 */
12673    case 0x17: /* FCMLA #270 */
12674        if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
12675            unallocated_encoding(s);
12676            return;
12677        }
12678        is_fp = 2;
12679        break;
12680    default:
12681        unallocated_encoding(s);
12682        return;
12683    }
12684
12685    switch (is_fp) {
12686    case 1: /* normal fp */
12687        /* convert insn encoded size to TCGMemOp size */
12688        switch (size) {
12689        case 0: /* half-precision */
12690            size = MO_16;
12691            is_fp16 = true;
12692            break;
12693        case MO_32: /* single precision */
12694        case MO_64: /* double precision */
12695            break;
12696        default:
12697            unallocated_encoding(s);
12698            return;
12699        }
12700        break;
12701
12702    case 2: /* complex fp */
12703        /* Each indexable element is a complex pair.  */
12704        size <<= 1;
12705        switch (size) {
12706        case MO_32:
12707            if (h && !is_q) {
12708                unallocated_encoding(s);
12709                return;
12710            }
12711            is_fp16 = true;
12712            break;
12713        case MO_64:
12714            break;
12715        default:
12716            unallocated_encoding(s);
12717            return;
12718        }
12719        break;
12720
12721    default: /* integer */
12722        switch (size) {
12723        case MO_8:
12724        case MO_64:
12725            unallocated_encoding(s);
12726            return;
12727        }
12728        break;
12729    }
12730    if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
12731        unallocated_encoding(s);
12732        return;
12733    }
12734
12735    /* Given TCGMemOp size, adjust register and indexing.  */
12736    switch (size) {
12737    case MO_16:
12738        index = h << 2 | l << 1 | m;
12739        break;
12740    case MO_32:
12741        index = h << 1 | l;
12742        rm |= m << 4;
12743        break;
12744    case MO_64:
12745        if (l || !is_q) {
12746            unallocated_encoding(s);
12747            return;
12748        }
12749        index = h;
12750        rm |= m << 4;
12751        break;
12752    default:
12753        g_assert_not_reached();
12754    }
12755
12756    if (!fp_access_check(s)) {
12757        return;
12758    }
12759
12760    if (is_fp) {
12761        fpst = get_fpstatus_ptr(is_fp16);
12762    } else {
12763        fpst = NULL;
12764    }
12765
12766    switch (16 * u + opcode) {
12767    case 0x0e: /* SDOT */
12768    case 0x1e: /* UDOT */
12769        gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
12770                         u ? gen_helper_gvec_udot_idx_b
12771                         : gen_helper_gvec_sdot_idx_b);
12772        return;
12773    case 0x11: /* FCMLA #0 */
12774    case 0x13: /* FCMLA #90 */
12775    case 0x15: /* FCMLA #180 */
12776    case 0x17: /* FCMLA #270 */
12777        {
12778            int rot = extract32(insn, 13, 2);
12779            int data = (index << 2) | rot;
12780            tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12781                               vec_full_reg_offset(s, rn),
12782                               vec_full_reg_offset(s, rm), fpst,
12783                               is_q ? 16 : 8, vec_full_reg_size(s), data,
12784                               size == MO_64
12785                               ? gen_helper_gvec_fcmlas_idx
12786                               : gen_helper_gvec_fcmlah_idx);
12787            tcg_temp_free_ptr(fpst);
12788        }
12789        return;
12790    }
12791
12792    if (size == 3) {
12793        TCGv_i64 tcg_idx = tcg_temp_new_i64();
12794        int pass;
12795
12796        assert(is_fp && is_q && !is_long);
12797
12798        read_vec_element(s, tcg_idx, rm, index, MO_64);
12799
12800        for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12801            TCGv_i64 tcg_op = tcg_temp_new_i64();
12802            TCGv_i64 tcg_res = tcg_temp_new_i64();
12803
12804            read_vec_element(s, tcg_op, rn, pass, MO_64);
12805
12806            switch (16 * u + opcode) {
12807            case 0x05: /* FMLS */
12808                /* As usual for ARM, separate negation for fused multiply-add */
12809                gen_helper_vfp_negd(tcg_op, tcg_op);
12810                /* fall through */
12811            case 0x01: /* FMLA */
12812                read_vec_element(s, tcg_res, rd, pass, MO_64);
12813                gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
12814                break;
12815            case 0x09: /* FMUL */
12816                gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
12817                break;
12818            case 0x19: /* FMULX */
12819                gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
12820                break;
12821            default:
12822                g_assert_not_reached();
12823            }
12824
12825            write_vec_element(s, tcg_res, rd, pass, MO_64);
12826            tcg_temp_free_i64(tcg_op);
12827            tcg_temp_free_i64(tcg_res);
12828        }
12829
12830        tcg_temp_free_i64(tcg_idx);
12831        clear_vec_high(s, !is_scalar, rd);
12832    } else if (!is_long) {
12833        /* 32 bit floating point, or 16 or 32 bit integer.
12834         * For the 16 bit scalar case we use the usual Neon helpers and
12835         * rely on the fact that 0 op 0 == 0 with no side effects.
12836         */
12837        TCGv_i32 tcg_idx = tcg_temp_new_i32();
12838        int pass, maxpasses;
12839
12840        if (is_scalar) {
12841            maxpasses = 1;
12842        } else {
12843            maxpasses = is_q ? 4 : 2;
12844        }
12845
12846        read_vec_element_i32(s, tcg_idx, rm, index, size);
12847
12848        if (size == 1 && !is_scalar) {
12849            /* The simplest way to handle the 16x16 indexed ops is to duplicate
12850             * the index into both halves of the 32 bit tcg_idx and then use
12851             * the usual Neon helpers.
12852             */
12853            tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12854        }
12855
12856        for (pass = 0; pass < maxpasses; pass++) {
12857            TCGv_i32 tcg_op = tcg_temp_new_i32();
12858            TCGv_i32 tcg_res = tcg_temp_new_i32();
12859
12860            read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
12861
12862            switch (16 * u + opcode) {
12863            case 0x08: /* MUL */
12864            case 0x10: /* MLA */
12865            case 0x14: /* MLS */
12866            {
12867                static NeonGenTwoOpFn * const fns[2][2] = {
12868                    { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
12869                    { tcg_gen_add_i32, tcg_gen_sub_i32 },
12870                };
12871                NeonGenTwoOpFn *genfn;
12872                bool is_sub = opcode == 0x4;
12873
12874                if (size == 1) {
12875                    gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
12876                } else {
12877                    tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
12878                }
12879                if (opcode == 0x8) {
12880                    break;
12881                }
12882                read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
12883                genfn = fns[size - 1][is_sub];
12884                genfn(tcg_res, tcg_op, tcg_res);
12885                break;
12886            }
12887            case 0x05: /* FMLS */
12888            case 0x01: /* FMLA */
12889                read_vec_element_i32(s, tcg_res, rd, pass,
12890                                     is_scalar ? size : MO_32);
12891                switch (size) {
12892                case 1:
12893                    if (opcode == 0x5) {
12894                        /* As usual for ARM, separate negation for fused
12895                         * multiply-add */
12896                        tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
12897                    }
12898                    if (is_scalar) {
12899                        gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
12900                                                   tcg_res, fpst);
12901                    } else {
12902                        gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
12903                                                    tcg_res, fpst);
12904                    }
12905                    break;
12906                case 2:
12907                    if (opcode == 0x5) {
12908                        /* As usual for ARM, separate negation for
12909                         * fused multiply-add */
12910                        tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
12911                    }
12912                    gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
12913                                           tcg_res, fpst);
12914                    break;
12915                default:
12916                    g_assert_not_reached();
12917                }
12918                break;
12919            case 0x09: /* FMUL */
12920                switch (size) {
12921                case 1:
12922                    if (is_scalar) {
12923                        gen_helper_advsimd_mulh(tcg_res, tcg_op,
12924                                                tcg_idx, fpst);
12925                    } else {
12926                        gen_helper_advsimd_mul2h(tcg_res, tcg_op,
12927                                                 tcg_idx, fpst);
12928                    }
12929                    break;
12930                case 2:
12931                    gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
12932                    break;
12933                default:
12934                    g_assert_not_reached();
12935                }
12936                break;
12937            case 0x19: /* FMULX */
12938                switch (size) {
12939                case 1:
12940                    if (is_scalar) {
12941                        gen_helper_advsimd_mulxh(tcg_res, tcg_op,
12942                                                 tcg_idx, fpst);
12943                    } else {
12944                        gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
12945                                                  tcg_idx, fpst);
12946                    }
12947                    break;
12948                case 2:
12949                    gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
12950                    break;
12951                default:
12952                    g_assert_not_reached();
12953                }
12954                break;
12955            case 0x0c: /* SQDMULH */
12956                if (size == 1) {
12957                    gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
12958                                               tcg_op, tcg_idx);
12959                } else {
12960                    gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
12961                                               tcg_op, tcg_idx);
12962                }
12963                break;
12964            case 0x0d: /* SQRDMULH */
12965                if (size == 1) {
12966                    gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
12967                                                tcg_op, tcg_idx);
12968                } else {
12969                    gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
12970                                                tcg_op, tcg_idx);
12971                }
12972                break;
12973            case 0x1d: /* SQRDMLAH */
12974                read_vec_element_i32(s, tcg_res, rd, pass,
12975                                     is_scalar ? size : MO_32);
12976                if (size == 1) {
12977                    gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
12978                                                tcg_op, tcg_idx, tcg_res);
12979                } else {
12980                    gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
12981                                                tcg_op, tcg_idx, tcg_res);
12982                }
12983                break;
12984            case 0x1f: /* SQRDMLSH */
12985                read_vec_element_i32(s, tcg_res, rd, pass,
12986                                     is_scalar ? size : MO_32);
12987                if (size == 1) {
12988                    gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
12989                                                tcg_op, tcg_idx, tcg_res);
12990                } else {
12991                    gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
12992                                                tcg_op, tcg_idx, tcg_res);
12993                }
12994                break;
12995            default:
12996                g_assert_not_reached();
12997            }
12998
12999            if (is_scalar) {
13000                write_fp_sreg(s, rd, tcg_res);
13001            } else {
13002                write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13003            }
13004
13005            tcg_temp_free_i32(tcg_op);
13006            tcg_temp_free_i32(tcg_res);
13007        }
13008
13009        tcg_temp_free_i32(tcg_idx);
13010        clear_vec_high(s, is_q, rd);
13011    } else {
13012        /* long ops: 16x16->32 or 32x32->64 */
13013        TCGv_i64 tcg_res[2];
13014        int pass;
13015        bool satop = extract32(opcode, 0, 1);
13016        TCGMemOp memop = MO_32;
13017
13018        if (satop || !u) {
13019            memop |= MO_SIGN;
13020        }
13021
13022        if (size == 2) {
13023            TCGv_i64 tcg_idx = tcg_temp_new_i64();
13024
13025            read_vec_element(s, tcg_idx, rm, index, memop);
13026
13027            for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13028                TCGv_i64 tcg_op = tcg_temp_new_i64();
13029                TCGv_i64 tcg_passres;
13030                int passelt;
13031
13032                if (is_scalar) {
13033                    passelt = 0;
13034                } else {
13035                    passelt = pass + (is_q * 2);
13036                }
13037
13038                read_vec_element(s, tcg_op, rn, passelt, memop);
13039
13040                tcg_res[pass] = tcg_temp_new_i64();
13041
13042                if (opcode == 0xa || opcode == 0xb) {
13043                    /* Non-accumulating ops */
13044                    tcg_passres = tcg_res[pass];
13045                } else {
13046                    tcg_passres = tcg_temp_new_i64();
13047                }
13048
13049                tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13050                tcg_temp_free_i64(tcg_op);
13051
13052                if (satop) {
13053                    /* saturating, doubling */
13054                    gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13055                                                      tcg_passres, tcg_passres);
13056                }
13057
13058                if (opcode == 0xa || opcode == 0xb) {
13059                    continue;
13060                }
13061
13062                /* Accumulating op: handle accumulate step */
13063                read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13064
13065                switch (opcode) {
13066                case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13067                    tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13068                    break;
13069                case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13070                    tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13071                    break;
13072                case 0x7: /* SQDMLSL, SQDMLSL2 */
13073                    tcg_gen_neg_i64(tcg_passres, tcg_passres);
13074                    /* fall through */
13075                case 0x3: /* SQDMLAL, SQDMLAL2 */
13076                    gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13077                                                      tcg_res[pass],
13078                                                      tcg_passres);
13079                    break;
13080                default:
13081                    g_assert_not_reached();
13082                }
13083                tcg_temp_free_i64(tcg_passres);
13084            }
13085            tcg_temp_free_i64(tcg_idx);
13086
13087            clear_vec_high(s, !is_scalar, rd);
13088        } else {
13089            TCGv_i32 tcg_idx = tcg_temp_new_i32();
13090
13091            assert(size == 1);
13092            read_vec_element_i32(s, tcg_idx, rm, index, size);
13093
13094            if (!is_scalar) {
13095                /* The simplest way to handle the 16x16 indexed ops is to
13096                 * duplicate the index into both halves of the 32 bit tcg_idx
13097                 * and then use the usual Neon helpers.
13098                 */
13099                tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13100            }
13101
13102            for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13103                TCGv_i32 tcg_op = tcg_temp_new_i32();
13104                TCGv_i64 tcg_passres;
13105
13106                if (is_scalar) {
13107                    read_vec_element_i32(s, tcg_op, rn, pass, size);
13108                } else {
13109                    read_vec_element_i32(s, tcg_op, rn,
13110                                         pass + (is_q * 2), MO_32);
13111                }
13112
13113                tcg_res[pass] = tcg_temp_new_i64();
13114
13115                if (opcode == 0xa || opcode == 0xb) {
13116                    /* Non-accumulating ops */
13117                    tcg_passres = tcg_res[pass];
13118                } else {
13119                    tcg_passres = tcg_temp_new_i64();
13120                }
13121
13122                if (memop & MO_SIGN) {
13123                    gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13124                } else {
13125                    gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13126                }
13127                if (satop) {
13128                    gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13129                                                      tcg_passres, tcg_passres);
13130                }
13131                tcg_temp_free_i32(tcg_op);
13132
13133                if (opcode == 0xa || opcode == 0xb) {
13134                    continue;
13135                }
13136
13137                /* Accumulating op: handle accumulate step */
13138                read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13139
13140                switch (opcode) {
13141                case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13142                    gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13143                                             tcg_passres);
13144                    break;
13145                case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13146                    gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13147                                             tcg_passres);
13148                    break;
13149                case 0x7: /* SQDMLSL, SQDMLSL2 */
13150                    gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13151                    /* fall through */
13152                case 0x3: /* SQDMLAL, SQDMLAL2 */
13153                    gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13154                                                      tcg_res[pass],
13155                                                      tcg_passres);
13156                    break;
13157                default:
13158                    g_assert_not_reached();
13159                }
13160                tcg_temp_free_i64(tcg_passres);
13161            }
13162            tcg_temp_free_i32(tcg_idx);
13163
13164            if (is_scalar) {
13165                tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13166            }
13167        }
13168
13169        if (is_scalar) {
13170            tcg_res[1] = tcg_const_i64(0);
13171        }
13172
13173        for (pass = 0; pass < 2; pass++) {
13174            write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13175            tcg_temp_free_i64(tcg_res[pass]);
13176        }
13177    }
13178
13179    if (fpst) {
13180        tcg_temp_free_ptr(fpst);
13181    }
13182}
13183
13184/* Crypto AES
13185 *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13186 * +-----------------+------+-----------+--------+-----+------+------+
13187 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13188 * +-----------------+------+-----------+--------+-----+------+------+
13189 */
13190static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13191{
13192    int size = extract32(insn, 22, 2);
13193    int opcode = extract32(insn, 12, 5);
13194    int rn = extract32(insn, 5, 5);
13195    int rd = extract32(insn, 0, 5);
13196    int decrypt;
13197    TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13198    TCGv_i32 tcg_decrypt;
13199    CryptoThreeOpIntFn *genfn;
13200
13201    if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
13202        || size != 0) {
13203        unallocated_encoding(s);
13204        return;
13205    }
13206
13207    switch (opcode) {
13208    case 0x4: /* AESE */
13209        decrypt = 0;
13210        genfn = gen_helper_crypto_aese;
13211        break;
13212    case 0x6: /* AESMC */
13213        decrypt = 0;
13214        genfn = gen_helper_crypto_aesmc;
13215        break;
13216    case 0x5: /* AESD */
13217        decrypt = 1;
13218        genfn = gen_helper_crypto_aese;
13219        break;
13220    case 0x7: /* AESIMC */
13221        decrypt = 1;
13222        genfn = gen_helper_crypto_aesmc;
13223        break;
13224    default:
13225        unallocated_encoding(s);
13226        return;
13227    }
13228
13229    if (!fp_access_check(s)) {
13230        return;
13231    }
13232
13233    tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13234    tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13235    tcg_decrypt = tcg_const_i32(decrypt);
13236
13237    genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt);
13238
13239    tcg_temp_free_ptr(tcg_rd_ptr);
13240    tcg_temp_free_ptr(tcg_rn_ptr);
13241    tcg_temp_free_i32(tcg_decrypt);
13242}
13243
13244/* Crypto three-reg SHA
13245 *  31             24 23  22  21 20  16  15 14    12 11 10 9    5 4    0
13246 * +-----------------+------+---+------+---+--------+-----+------+------+
13247 * | 0 1 0 1 1 1 1 0 | size | 0 |  Rm  | 0 | opcode | 0 0 |  Rn  |  Rd  |
13248 * +-----------------+------+---+------+---+--------+-----+------+------+
13249 */
13250static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13251{
13252    int size = extract32(insn, 22, 2);
13253    int opcode = extract32(insn, 12, 3);
13254    int rm = extract32(insn, 16, 5);
13255    int rn = extract32(insn, 5, 5);
13256    int rd = extract32(insn, 0, 5);
13257    CryptoThreeOpFn *genfn;
13258    TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13259    int feature = ARM_FEATURE_V8_SHA256;
13260
13261    if (size != 0) {
13262        unallocated_encoding(s);
13263        return;
13264    }
13265
13266    switch (opcode) {
13267    case 0: /* SHA1C */
13268    case 1: /* SHA1P */
13269    case 2: /* SHA1M */
13270    case 3: /* SHA1SU0 */
13271        genfn = NULL;
13272        feature = ARM_FEATURE_V8_SHA1;
13273        break;
13274    case 4: /* SHA256H */
13275        genfn = gen_helper_crypto_sha256h;
13276        break;
13277    case 5: /* SHA256H2 */
13278        genfn = gen_helper_crypto_sha256h2;
13279        break;
13280    case 6: /* SHA256SU1 */
13281        genfn = gen_helper_crypto_sha256su1;
13282        break;
13283    default:
13284        unallocated_encoding(s);
13285        return;
13286    }
13287
13288    if (!arm_dc_feature(s, feature)) {
13289        unallocated_encoding(s);
13290        return;
13291    }
13292
13293    if (!fp_access_check(s)) {
13294        return;
13295    }
13296
13297    tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13298    tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13299    tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13300
13301    if (genfn) {
13302        genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
13303    } else {
13304        TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
13305
13306        gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
13307                                    tcg_rm_ptr, tcg_opcode);
13308        tcg_temp_free_i32(tcg_opcode);
13309    }
13310
13311    tcg_temp_free_ptr(tcg_rd_ptr);
13312    tcg_temp_free_ptr(tcg_rn_ptr);
13313    tcg_temp_free_ptr(tcg_rm_ptr);
13314}
13315
13316/* Crypto two-reg SHA
13317 *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13318 * +-----------------+------+-----------+--------+-----+------+------+
13319 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13320 * +-----------------+------+-----------+--------+-----+------+------+
13321 */
13322static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
13323{
13324    int size = extract32(insn, 22, 2);
13325    int opcode = extract32(insn, 12, 5);
13326    int rn = extract32(insn, 5, 5);
13327    int rd = extract32(insn, 0, 5);
13328    CryptoTwoOpFn *genfn;
13329    int feature;
13330    TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13331
13332    if (size != 0) {
13333        unallocated_encoding(s);
13334        return;
13335    }
13336
13337    switch (opcode) {
13338    case 0: /* SHA1H */
13339        feature = ARM_FEATURE_V8_SHA1;
13340        genfn = gen_helper_crypto_sha1h;
13341        break;
13342    case 1: /* SHA1SU1 */
13343        feature = ARM_FEATURE_V8_SHA1;
13344        genfn = gen_helper_crypto_sha1su1;
13345        break;
13346    case 2: /* SHA256SU0 */
13347        feature = ARM_FEATURE_V8_SHA256;
13348        genfn = gen_helper_crypto_sha256su0;
13349        break;
13350    default:
13351        unallocated_encoding(s);
13352        return;
13353    }
13354
13355    if (!arm_dc_feature(s, feature)) {
13356        unallocated_encoding(s);
13357        return;
13358    }
13359
13360    if (!fp_access_check(s)) {
13361        return;
13362    }
13363
13364    tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13365    tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13366
13367    genfn(tcg_rd_ptr, tcg_rn_ptr);
13368
13369    tcg_temp_free_ptr(tcg_rd_ptr);
13370    tcg_temp_free_ptr(tcg_rn_ptr);
13371}
13372
13373/* Crypto three-reg SHA512
13374 *  31                   21 20  16 15  14  13 12  11  10  9    5 4    0
13375 * +-----------------------+------+---+---+-----+--------+------+------+
13376 * | 1 1 0 0 1 1 1 0 0 1 1 |  Rm  | 1 | O | 0 0 | opcode |  Rn  |  Rd  |
13377 * +-----------------------+------+---+---+-----+--------+------+------+
13378 */
13379static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13380{
13381    int opcode = extract32(insn, 10, 2);
13382    int o =  extract32(insn, 14, 1);
13383    int rm = extract32(insn, 16, 5);
13384    int rn = extract32(insn, 5, 5);
13385    int rd = extract32(insn, 0, 5);
13386    int feature;
13387    CryptoThreeOpFn *genfn;
13388
13389    if (o == 0) {
13390        switch (opcode) {
13391        case 0: /* SHA512H */
13392            feature = ARM_FEATURE_V8_SHA512;
13393            genfn = gen_helper_crypto_sha512h;
13394            break;
13395        case 1: /* SHA512H2 */
13396            feature = ARM_FEATURE_V8_SHA512;
13397            genfn = gen_helper_crypto_sha512h2;
13398            break;
13399        case 2: /* SHA512SU1 */
13400            feature = ARM_FEATURE_V8_SHA512;
13401            genfn = gen_helper_crypto_sha512su1;
13402            break;
13403        case 3: /* RAX1 */
13404            feature = ARM_FEATURE_V8_SHA3;
13405            genfn = NULL;
13406            break;
13407        }
13408    } else {
13409        switch (opcode) {
13410        case 0: /* SM3PARTW1 */
13411            feature = ARM_FEATURE_V8_SM3;
13412            genfn = gen_helper_crypto_sm3partw1;
13413            break;
13414        case 1: /* SM3PARTW2 */
13415            feature = ARM_FEATURE_V8_SM3;
13416            genfn = gen_helper_crypto_sm3partw2;
13417            break;
13418        case 2: /* SM4EKEY */
13419            feature = ARM_FEATURE_V8_SM4;
13420            genfn = gen_helper_crypto_sm4ekey;
13421            break;
13422        default:
13423            unallocated_encoding(s);
13424            return;
13425        }
13426    }
13427
13428    if (!arm_dc_feature(s, feature)) {
13429        unallocated_encoding(s);
13430        return;
13431    }
13432
13433    if (!fp_access_check(s)) {
13434        return;
13435    }
13436
13437    if (genfn) {
13438        TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13439
13440        tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13441        tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13442        tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13443
13444        genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
13445
13446        tcg_temp_free_ptr(tcg_rd_ptr);
13447        tcg_temp_free_ptr(tcg_rn_ptr);
13448        tcg_temp_free_ptr(tcg_rm_ptr);
13449    } else {
13450        TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13451        int pass;
13452
13453        tcg_op1 = tcg_temp_new_i64();
13454        tcg_op2 = tcg_temp_new_i64();
13455        tcg_res[0] = tcg_temp_new_i64();
13456        tcg_res[1] = tcg_temp_new_i64();
13457
13458        for (pass = 0; pass < 2; pass++) {
13459            read_vec_element(s, tcg_op1, rn, pass, MO_64);
13460            read_vec_element(s, tcg_op2, rm, pass, MO_64);
13461
13462            tcg_gen_rotli_i64(tcg_res[pass], tcg_op2, 1);
13463            tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13464        }
13465        write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13466        write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13467
13468        tcg_temp_free_i64(tcg_op1);
13469        tcg_temp_free_i64(tcg_op2);
13470        tcg_temp_free_i64(tcg_res[0]);
13471        tcg_temp_free_i64(tcg_res[1]);
13472    }
13473}
13474
13475/* Crypto two-reg SHA512
13476 *  31                                     12  11  10  9    5 4    0
13477 * +-----------------------------------------+--------+------+------+
13478 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode |  Rn  |  Rd  |
13479 * +-----------------------------------------+--------+------+------+
13480 */
13481static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13482{
13483    int opcode = extract32(insn, 10, 2);
13484    int rn = extract32(insn, 5, 5);
13485    int rd = extract32(insn, 0, 5);
13486    TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13487    int feature;
13488    CryptoTwoOpFn *genfn;
13489
13490    switch (opcode) {
13491    case 0: /* SHA512SU0 */
13492        feature = ARM_FEATURE_V8_SHA512;
13493        genfn = gen_helper_crypto_sha512su0;
13494        break;
13495    case 1: /* SM4E */
13496        feature = ARM_FEATURE_V8_SM4;
13497        genfn = gen_helper_crypto_sm4e;
13498        break;
13499    default:
13500        unallocated_encoding(s);
13501        return;
13502    }
13503
13504    if (!arm_dc_feature(s, feature)) {
13505        unallocated_encoding(s);
13506        return;
13507    }
13508
13509    if (!fp_access_check(s)) {
13510        return;
13511    }
13512
13513    tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13514    tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13515
13516    genfn(tcg_rd_ptr, tcg_rn_ptr);
13517
13518    tcg_temp_free_ptr(tcg_rd_ptr);
13519    tcg_temp_free_ptr(tcg_rn_ptr);
13520}
13521
13522/* Crypto four-register
13523 *  31               23 22 21 20  16 15  14  10 9    5 4    0
13524 * +-------------------+-----+------+---+------+------+------+
13525 * | 1 1 0 0 1 1 1 0 0 | Op0 |  Rm  | 0 |  Ra  |  Rn  |  Rd  |
13526 * +-------------------+-----+------+---+------+------+------+
13527 */
13528static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13529{
13530    int op0 = extract32(insn, 21, 2);
13531    int rm = extract32(insn, 16, 5);
13532    int ra = extract32(insn, 10, 5);
13533    int rn = extract32(insn, 5, 5);
13534    int rd = extract32(insn, 0, 5);
13535    int feature;
13536
13537    switch (op0) {
13538    case 0: /* EOR3 */
13539    case 1: /* BCAX */
13540        feature = ARM_FEATURE_V8_SHA3;
13541        break;
13542    case 2: /* SM3SS1 */
13543        feature = ARM_FEATURE_V8_SM3;
13544        break;
13545    default:
13546        unallocated_encoding(s);
13547        return;
13548    }
13549
13550    if (!arm_dc_feature(s, feature)) {
13551        unallocated_encoding(s);
13552        return;
13553    }
13554
13555    if (!fp_access_check(s)) {
13556        return;
13557    }
13558
13559    if (op0 < 2) {
13560        TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13561        int pass;
13562
13563        tcg_op1 = tcg_temp_new_i64();
13564        tcg_op2 = tcg_temp_new_i64();
13565        tcg_op3 = tcg_temp_new_i64();
13566        tcg_res[0] = tcg_temp_new_i64();
13567        tcg_res[1] = tcg_temp_new_i64();
13568
13569        for (pass = 0; pass < 2; pass++) {
13570            read_vec_element(s, tcg_op1, rn, pass, MO_64);
13571            read_vec_element(s, tcg_op2, rm, pass, MO_64);
13572            read_vec_element(s, tcg_op3, ra, pass, MO_64);
13573
13574            if (op0 == 0) {
13575                /* EOR3 */
13576                tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13577            } else {
13578                /* BCAX */
13579                tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13580            }
13581            tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13582        }
13583        write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13584        write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13585
13586        tcg_temp_free_i64(tcg_op1);
13587        tcg_temp_free_i64(tcg_op2);
13588        tcg_temp_free_i64(tcg_op3);
13589        tcg_temp_free_i64(tcg_res[0]);
13590        tcg_temp_free_i64(tcg_res[1]);
13591    } else {
13592        TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13593
13594        tcg_op1 = tcg_temp_new_i32();
13595        tcg_op2 = tcg_temp_new_i32();
13596        tcg_op3 = tcg_temp_new_i32();
13597        tcg_res = tcg_temp_new_i32();
13598        tcg_zero = tcg_const_i32(0);
13599
13600        read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13601        read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13602        read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13603
13604        tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13605        tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13606        tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13607        tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13608
13609        write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13610        write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13611        write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13612        write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13613
13614        tcg_temp_free_i32(tcg_op1);
13615        tcg_temp_free_i32(tcg_op2);
13616        tcg_temp_free_i32(tcg_op3);
13617        tcg_temp_free_i32(tcg_res);
13618        tcg_temp_free_i32(tcg_zero);
13619    }
13620}
13621
13622/* Crypto XAR
13623 *  31                   21 20  16 15    10 9    5 4    0
13624 * +-----------------------+------+--------+------+------+
13625 * | 1 1 0 0 1 1 1 0 1 0 0 |  Rm  |  imm6  |  Rn  |  Rd  |
13626 * +-----------------------+------+--------+------+------+
13627 */
13628static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13629{
13630    int rm = extract32(insn, 16, 5);
13631    int imm6 = extract32(insn, 10, 6);
13632    int rn = extract32(insn, 5, 5);
13633    int rd = extract32(insn, 0, 5);
13634    TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13635    int pass;
13636
13637    if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
13638        unallocated_encoding(s);
13639        return;
13640    }
13641
13642    if (!fp_access_check(s)) {
13643        return;
13644    }
13645
13646    tcg_op1 = tcg_temp_new_i64();
13647    tcg_op2 = tcg_temp_new_i64();
13648    tcg_res[0] = tcg_temp_new_i64();
13649    tcg_res[1] = tcg_temp_new_i64();
13650
13651    for (pass = 0; pass < 2; pass++) {
13652        read_vec_element(s, tcg_op1, rn, pass, MO_64);
13653        read_vec_element(s, tcg_op2, rm, pass, MO_64);
13654
13655        tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
13656        tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
13657    }
13658    write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13659    write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13660
13661    tcg_temp_free_i64(tcg_op1);
13662    tcg_temp_free_i64(tcg_op2);
13663    tcg_temp_free_i64(tcg_res[0]);
13664    tcg_temp_free_i64(tcg_res[1]);
13665}
13666
13667/* Crypto three-reg imm2
13668 *  31                   21 20  16 15  14 13 12  11  10  9    5 4    0
13669 * +-----------------------+------+-----+------+--------+------+------+
13670 * | 1 1 0 0 1 1 1 0 0 1 0 |  Rm  | 1 0 | imm2 | opcode |  Rn  |  Rd  |
13671 * +-----------------------+------+-----+------+--------+------+------+
13672 */
13673static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13674{
13675    int opcode = extract32(insn, 10, 2);
13676    int imm2 = extract32(insn, 12, 2);
13677    int rm = extract32(insn, 16, 5);
13678    int rn = extract32(insn, 5, 5);
13679    int rd = extract32(insn, 0, 5);
13680    TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13681    TCGv_i32 tcg_imm2, tcg_opcode;
13682
13683    if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
13684        unallocated_encoding(s);
13685        return;
13686    }
13687
13688    if (!fp_access_check(s)) {
13689        return;
13690    }
13691
13692    tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13693    tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13694    tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13695    tcg_imm2   = tcg_const_i32(imm2);
13696    tcg_opcode = tcg_const_i32(opcode);
13697
13698    gen_helper_crypto_sm3tt(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2,
13699                            tcg_opcode);
13700
13701    tcg_temp_free_ptr(tcg_rd_ptr);
13702    tcg_temp_free_ptr(tcg_rn_ptr);
13703    tcg_temp_free_ptr(tcg_rm_ptr);
13704    tcg_temp_free_i32(tcg_imm2);
13705    tcg_temp_free_i32(tcg_opcode);
13706}
13707
13708/* C3.6 Data processing - SIMD, inc Crypto
13709 *
13710 * As the decode gets a little complex we are using a table based
13711 * approach for this part of the decode.
13712 */
13713static const AArch64DecodeTable data_proc_simd[] = {
13714    /* pattern  ,  mask     ,  fn                        */
13715    { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13716    { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13717    { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13718    { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13719    { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13720    { 0x0e000400, 0x9fe08400, disas_simd_copy },
13721    { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
13722    /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13723    { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13724    { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13725    { 0x0e000000, 0xbf208c00, disas_simd_tb },
13726    { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13727    { 0x2e000000, 0xbf208400, disas_simd_ext },
13728    { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13729    { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13730    { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13731    { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13732    { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13733    { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13734    { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
13735    { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13736    { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13737    { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13738    { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13739    { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13740    { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13741    { 0xce000000, 0xff808000, disas_crypto_four_reg },
13742    { 0xce800000, 0xffe00000, disas_crypto_xar },
13743    { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13744    { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13745    { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13746    { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13747    { 0x00000000, 0x00000000, NULL }
13748};
13749
13750static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13751{
13752    /* Note that this is called with all non-FP cases from
13753     * table C3-6 so it must UNDEF for entries not specifically
13754     * allocated to instructions in that table.
13755     */
13756    AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13757    if (fn) {
13758        fn(s, insn);
13759    } else {
13760        unallocated_encoding(s);
13761    }
13762}
13763
13764/* C3.6 Data processing - SIMD and floating point */
13765static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13766{
13767    if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13768        disas_data_proc_fp(s, insn);
13769    } else {
13770        /* SIMD, including crypto */
13771        disas_data_proc_simd(s, insn);
13772    }
13773}
13774
13775/* C3.1 A64 instruction index by encoding */
13776static void disas_a64_insn(CPUARMState *env, DisasContext *s)
13777{
13778    uint32_t insn;
13779
13780    insn = arm_ldl_code(env, s->pc, s->sctlr_b);
13781    s->insn = insn;
13782    s->pc += 4;
13783
13784    s->fp_access_checked = false;
13785
13786    switch (extract32(insn, 25, 4)) {
13787    case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
13788        unallocated_encoding(s);
13789        break;
13790    case 0x2:
13791        if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) {
13792            unallocated_encoding(s);
13793        }
13794        break;
13795    case 0x8: case 0x9: /* Data processing - immediate */
13796        disas_data_proc_imm(s, insn);
13797        break;
13798    case 0xa: case 0xb: /* Branch, exception generation and system insns */
13799        disas_b_exc_sys(s, insn);
13800        break;
13801    case 0x4:
13802    case 0x6:
13803    case 0xc:
13804    case 0xe:      /* Loads and stores */
13805        disas_ldst(s, insn);
13806        break;
13807    case 0x5:
13808    case 0xd:      /* Data processing - register */
13809        disas_data_proc_reg(s, insn);
13810        break;
13811    case 0x7:
13812    case 0xf:      /* Data processing - SIMD and floating point */
13813        disas_data_proc_simd_fp(s, insn);
13814        break;
13815    default:
13816        assert(FALSE); /* all 15 cases should be handled above */
13817        break;
13818    }
13819
13820    /* if we allocated any temporaries, free them here */
13821    free_tmp_a64(s);
13822}
13823
13824static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
13825                                          CPUState *cpu)
13826{
13827    DisasContext *dc = container_of(dcbase, DisasContext, base);
13828    CPUARMState *env = cpu->env_ptr;
13829    ARMCPU *arm_cpu = arm_env_get_cpu(env);
13830    int bound;
13831
13832    dc->pc = dc->base.pc_first;
13833    dc->condjmp = 0;
13834
13835    dc->aarch64 = 1;
13836    /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13837     * there is no secure EL1, so we route exceptions to EL3.
13838     */
13839    dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13840                               !arm_el_is_aa64(env, 3);
13841    dc->thumb = 0;
13842    dc->sctlr_b = 0;
13843    dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
13844    dc->condexec_mask = 0;
13845    dc->condexec_cond = 0;
13846    dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
13847    dc->tbi0 = ARM_TBFLAG_TBI0(dc->base.tb->flags);
13848    dc->tbi1 = ARM_TBFLAG_TBI1(dc->base.tb->flags);
13849    dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
13850#if !defined(CONFIG_USER_ONLY)
13851    dc->user = (dc->current_el == 0);
13852#endif
13853    dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
13854    dc->sve_excp_el = ARM_TBFLAG_SVEEXC_EL(dc->base.tb->flags);
13855    dc->sve_len = (ARM_TBFLAG_ZCR_LEN(dc->base.tb->flags) + 1) * 16;
13856    dc->vec_len = 0;
13857    dc->vec_stride = 0;
13858    dc->cp_regs = arm_cpu->cp_regs;
13859    dc->features = env->features;
13860
13861    /* Single step state. The code-generation logic here is:
13862     *  SS_ACTIVE == 0:
13863     *   generate code with no special handling for single-stepping (except
13864     *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13865     *   this happens anyway because those changes are all system register or
13866     *   PSTATE writes).
13867     *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13868     *   emit code for one insn
13869     *   emit code to clear PSTATE.SS
13870     *   emit code to generate software step exception for completed step
13871     *   end TB (as usual for having generated an exception)
13872     *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13873     *   emit code to generate a software step exception
13874     *   end the TB
13875     */
13876    dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
13877    dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
13878    dc->is_ldex = false;
13879    dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
13880
13881    /* Bound the number of insns to execute to those left on the page.  */
13882    bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
13883
13884    /* If architectural single step active, limit to 1.  */
13885    if (dc->ss_active) {
13886        bound = 1;
13887    }
13888    dc->base.max_insns = MIN(dc->base.max_insns, bound);
13889
13890    init_tmp_a64_array(dc);
13891}
13892
13893static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
13894{
13895    tcg_clear_temp_count();
13896}
13897
13898static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13899{
13900    DisasContext *dc = container_of(dcbase, DisasContext, base);
13901
13902    tcg_gen_insn_start(dc->pc, 0, 0);
13903    dc->insn_start = tcg_last_op();
13904}
13905
13906static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13907                                        const CPUBreakpoint *bp)
13908{
13909    DisasContext *dc = container_of(dcbase, DisasContext, base);
13910
13911    if (bp->flags & BP_CPU) {
13912        gen_a64_set_pc_im(dc->pc);
13913        gen_helper_check_breakpoints(cpu_env);
13914        /* End the TB early; it likely won't be executed */
13915        dc->base.is_jmp = DISAS_TOO_MANY;
13916    } else {
13917        gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13918        /* The address covered by the breakpoint must be
13919           included in [tb->pc, tb->pc + tb->size) in order
13920           to for it to be properly cleared -- thus we
13921           increment the PC here so that the logic setting
13922           tb->size below does the right thing.  */
13923        dc->pc += 4;
13924        dc->base.is_jmp = DISAS_NORETURN;
13925    }
13926
13927    return true;
13928}
13929
13930static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13931{
13932    DisasContext *dc = container_of(dcbase, DisasContext, base);
13933    CPUARMState *env = cpu->env_ptr;
13934
13935    if (dc->ss_active && !dc->pstate_ss) {
13936        /* Singlestep state is Active-pending.
13937         * If we're in this state at the start of a TB then either
13938         *  a) we just took an exception to an EL which is being debugged
13939         *     and this is the first insn in the exception handler
13940         *  b) debug exceptions were masked and we just unmasked them
13941         *     without changing EL (eg by clearing PSTATE.D)
13942         * In either case we're going to take a swstep exception in the
13943         * "did not step an insn" case, and so the syndrome ISV and EX
13944         * bits should be zero.
13945         */
13946        assert(dc->base.num_insns == 1);
13947        gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13948                      default_exception_el(dc));
13949        dc->base.is_jmp = DISAS_NORETURN;
13950    } else {
13951        disas_a64_insn(env, dc);
13952    }
13953
13954    dc->base.pc_next = dc->pc;
13955    translator_loop_temp_check(&dc->base);
13956}
13957
13958static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13959{
13960    DisasContext *dc = container_of(dcbase, DisasContext, base);
13961
13962    if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
13963        /* Note that this means single stepping WFI doesn't halt the CPU.
13964         * For conditional branch insns this is harmless unreachable code as
13965         * gen_goto_tb() has already handled emitting the debug exception
13966         * (and thus a tb-jump is not possible when singlestepping).
13967         */
13968        switch (dc->base.is_jmp) {
13969        default:
13970            gen_a64_set_pc_im(dc->pc);
13971            /* fall through */
13972        case DISAS_EXIT:
13973        case DISAS_JUMP:
13974            if (dc->base.singlestep_enabled) {
13975                gen_exception_internal(EXCP_DEBUG);
13976            } else {
13977                gen_step_complete_exception(dc);
13978            }
13979            break;
13980        case DISAS_NORETURN:
13981            break;
13982        }
13983    } else {
13984        switch (dc->base.is_jmp) {
13985        case DISAS_NEXT:
13986        case DISAS_TOO_MANY:
13987            gen_goto_tb(dc, 1, dc->pc);
13988            break;
13989        default:
13990        case DISAS_UPDATE:
13991            gen_a64_set_pc_im(dc->pc);
13992            /* fall through */
13993        case DISAS_EXIT:
13994            tcg_gen_exit_tb(NULL, 0);
13995            break;
13996        case DISAS_JUMP:
13997            tcg_gen_lookup_and_goto_ptr();
13998            break;
13999        case DISAS_NORETURN:
14000        case DISAS_SWI:
14001            break;
14002        case DISAS_WFE:
14003            gen_a64_set_pc_im(dc->pc);
14004            gen_helper_wfe(cpu_env);
14005            break;
14006        case DISAS_YIELD:
14007            gen_a64_set_pc_im(dc->pc);
14008            gen_helper_yield(cpu_env);
14009            break;
14010        case DISAS_WFI:
14011        {
14012            /* This is a special case because we don't want to just halt the CPU
14013             * if trying to debug across a WFI.
14014             */
14015            TCGv_i32 tmp = tcg_const_i32(4);
14016
14017            gen_a64_set_pc_im(dc->pc);
14018            gen_helper_wfi(cpu_env, tmp);
14019            tcg_temp_free_i32(tmp);
14020            /* The helper doesn't necessarily throw an exception, but we
14021             * must go back to the main loop to check for interrupts anyway.
14022             */
14023            tcg_gen_exit_tb(NULL, 0);
14024            break;
14025        }
14026        }
14027    }
14028
14029    /* Functions above can change dc->pc, so re-align db->pc_next */
14030    dc->base.pc_next = dc->pc;
14031}
14032
14033static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14034                                      CPUState *cpu)
14035{
14036    DisasContext *dc = container_of(dcbase, DisasContext, base);
14037
14038    qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
14039    log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
14040}
14041
14042const TranslatorOps aarch64_translator_ops = {
14043    .init_disas_context = aarch64_tr_init_disas_context,
14044    .tb_start           = aarch64_tr_tb_start,
14045    .insn_start         = aarch64_tr_insn_start,
14046    .breakpoint_check   = aarch64_tr_breakpoint_check,
14047    .translate_insn     = aarch64_tr_translate_insn,
14048    .tb_stop            = aarch64_tr_tb_stop,
14049    .disas_log          = aarch64_tr_disas_log,
14050};
14051