1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "tcg-op.h"
24#include "tcg-op-gvec.h"
25#include "qemu/log.h"
26#include "arm_ldst.h"
27#include "translate.h"
28#include "internals.h"
29#include "qemu/host-utils.h"
30
31#include "exec/semihost.h"
32#include "exec/gen-icount.h"
33
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
36#include "exec/log.h"
37
38#include "trace-tcg.h"
39#include "translate-a64.h"
40
41static TCGv_i64 cpu_X[32];
42static TCGv_i64 cpu_pc;
43
44
45static TCGv_i64 cpu_exclusive_high;
46
47static const char *regnames[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
52};
53
54enum a64_shift_type {
55 A64_SHIFT_TYPE_LSL = 0,
56 A64_SHIFT_TYPE_LSR = 1,
57 A64_SHIFT_TYPE_ASR = 2,
58 A64_SHIFT_TYPE_ROR = 3
59};
60
61
62
63
64typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
65
66typedef struct AArch64DecodeTable {
67 uint32_t pattern;
68 uint32_t mask;
69 AArch64DecodeFn *disas_fn;
70} AArch64DecodeTable;
71
72
73typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
74typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
75typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
76typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
77typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
78typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
79typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
80typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
81typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
82typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
83typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
84typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
85typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
86typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
87typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
88
89
90void a64_translate_init(void)
91{
92 int i;
93
94 cpu_pc = tcg_global_mem_new_i64(cpu_env,
95 offsetof(CPUARMState, pc),
96 "pc");
97 for (i = 0; i < 32; i++) {
98 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, xregs[i]),
100 regnames[i]);
101 }
102
103 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
104 offsetof(CPUARMState, exclusive_high), "exclusive_high");
105}
106
107static inline int get_a64_user_mem_index(DisasContext *s)
108{
109
110
111
112 ARMMMUIdx useridx;
113
114 switch (s->mmu_idx) {
115 case ARMMMUIdx_S12NSE1:
116 useridx = ARMMMUIdx_S12NSE0;
117 break;
118 case ARMMMUIdx_S1SE1:
119 useridx = ARMMMUIdx_S1SE0;
120 break;
121 case ARMMMUIdx_S2NS:
122 g_assert_not_reached();
123 default:
124 useridx = s->mmu_idx;
125 break;
126 }
127 return arm_to_core_mmu_idx(useridx);
128}
129
130void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
131 fprintf_function cpu_fprintf, int flags)
132{
133 ARMCPU *cpu = ARM_CPU(cs);
134 CPUARMState *env = &cpu->env;
135 uint32_t psr = pstate_read(env);
136 int i;
137 int el = arm_current_el(env);
138 const char *ns_status;
139
140 cpu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
141 for (i = 0; i < 32; i++) {
142 if (i == 31) {
143 cpu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
144 } else {
145 cpu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
146 (i + 2) % 3 ? " " : "\n");
147 }
148 }
149
150 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
151 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
152 } else {
153 ns_status = "";
154 }
155 cpu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
156 psr,
157 psr & PSTATE_N ? 'N' : '-',
158 psr & PSTATE_Z ? 'Z' : '-',
159 psr & PSTATE_C ? 'C' : '-',
160 psr & PSTATE_V ? 'V' : '-',
161 ns_status,
162 el,
163 psr & PSTATE_SP ? 'h' : 't');
164
165 if (!(flags & CPU_DUMP_FPU)) {
166 cpu_fprintf(f, "\n");
167 return;
168 }
169 cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
170 vfp_get_fpcr(env), vfp_get_fpsr(env));
171
172 if (arm_feature(env, ARM_FEATURE_SVE)) {
173 int j, zcr_len = env->vfp.zcr_el[1] & 0xf;
174
175 for (i = 0; i <= FFR_PRED_NUM; i++) {
176 bool eol;
177 if (i == FFR_PRED_NUM) {
178 cpu_fprintf(f, "FFR=");
179
180 eol = true;
181 } else {
182 cpu_fprintf(f, "P%02d=", i);
183 switch (zcr_len) {
184 case 0:
185 eol = i % 8 == 7;
186 break;
187 case 1:
188 eol = i % 6 == 5;
189 break;
190 case 2:
191 case 3:
192 eol = i % 3 == 2;
193 break;
194 default:
195
196 eol = true;
197 break;
198 }
199 }
200 for (j = zcr_len / 4; j >= 0; j--) {
201 int digits;
202 if (j * 4 + 4 <= zcr_len + 1) {
203 digits = 16;
204 } else {
205 digits = (zcr_len % 4 + 1) * 4;
206 }
207 cpu_fprintf(f, "%0*" PRIx64 "%s", digits,
208 env->vfp.pregs[i].p[j],
209 j ? ":" : eol ? "\n" : " ");
210 }
211 }
212
213 for (i = 0; i < 32; i++) {
214 if (zcr_len == 0) {
215 cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
216 i, env->vfp.zregs[i].d[1],
217 env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
218 } else if (zcr_len == 1) {
219 cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
220 ":%016" PRIx64 ":%016" PRIx64 "\n",
221 i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
222 env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
223 } else {
224 for (j = zcr_len; j >= 0; j--) {
225 bool odd = (zcr_len - j) % 2 != 0;
226 if (j == zcr_len) {
227 cpu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
228 } else if (!odd) {
229 if (j > 0) {
230 cpu_fprintf(f, " [%x-%x]=", j, j - 1);
231 } else {
232 cpu_fprintf(f, " [%x]=", j);
233 }
234 }
235 cpu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
236 env->vfp.zregs[i].d[j * 2 + 1],
237 env->vfp.zregs[i].d[j * 2],
238 odd || j == 0 ? "\n" : ":");
239 }
240 }
241 }
242 } else {
243 for (i = 0; i < 32; i++) {
244 uint64_t *q = aa64_vfp_qreg(env, i);
245 cpu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
246 i, q[1], q[0], (i & 1 ? "\n" : " "));
247 }
248 }
249}
250
251void gen_a64_set_pc_im(uint64_t val)
252{
253 tcg_gen_movi_i64(cpu_pc, val);
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
273{
274
275 if (s->current_el <= 1) {
276
277
278
279
280 if (s->tbi0 && s->tbi1) {
281 TCGv_i64 tmp_reg = tcg_temp_new_i64();
282
283
284
285 tcg_gen_shli_i64(tmp_reg, src, 8);
286 tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
287 tcg_temp_free_i64(tmp_reg);
288 } else if (!s->tbi0 && !s->tbi1) {
289
290 tcg_gen_mov_i64(cpu_pc, src);
291 } else {
292 TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
293 TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
294 TCGv_i64 tcg_zero = tcg_const_i64(0);
295
296 tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
297
298 if (s->tbi0) {
299
300 tcg_gen_andi_i64(tcg_tmpval, src,
301 0x00FFFFFFFFFFFFFFull);
302 tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
303 tcg_tmpval, src);
304 } else {
305
306 tcg_gen_ori_i64(tcg_tmpval, src,
307 0xFF00000000000000ull);
308 tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
309 tcg_tmpval, src);
310 }
311 tcg_temp_free_i64(tcg_zero);
312 tcg_temp_free_i64(tcg_bit55);
313 tcg_temp_free_i64(tcg_tmpval);
314 }
315 } else {
316 if (s->tbi0) {
317
318 tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
319 } else {
320
321 tcg_gen_mov_i64(cpu_pc, src);
322 }
323 }
324}
325
326typedef struct DisasCompare64 {
327 TCGCond cond;
328 TCGv_i64 value;
329} DisasCompare64;
330
331static void a64_test_cc(DisasCompare64 *c64, int cc)
332{
333 DisasCompare c32;
334
335 arm_test_cc(&c32, cc);
336
337
338
339 c64->cond = c32.cond;
340 c64->value = tcg_temp_new_i64();
341 tcg_gen_ext_i32_i64(c64->value, c32.value);
342
343 arm_free_cc(&c32);
344}
345
346static void a64_free_cc(DisasCompare64 *c64)
347{
348 tcg_temp_free_i64(c64->value);
349}
350
351static void gen_exception_internal(int excp)
352{
353 TCGv_i32 tcg_excp = tcg_const_i32(excp);
354
355 assert(excp_is_internal(excp));
356 gen_helper_exception_internal(cpu_env, tcg_excp);
357 tcg_temp_free_i32(tcg_excp);
358}
359
360static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
361{
362 TCGv_i32 tcg_excp = tcg_const_i32(excp);
363 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
364 TCGv_i32 tcg_el = tcg_const_i32(target_el);
365
366 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
367 tcg_syn, tcg_el);
368 tcg_temp_free_i32(tcg_el);
369 tcg_temp_free_i32(tcg_syn);
370 tcg_temp_free_i32(tcg_excp);
371}
372
373static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
374{
375 gen_a64_set_pc_im(s->pc - offset);
376 gen_exception_internal(excp);
377 s->base.is_jmp = DISAS_NORETURN;
378}
379
380static void gen_exception_insn(DisasContext *s, int offset, int excp,
381 uint32_t syndrome, uint32_t target_el)
382{
383 gen_a64_set_pc_im(s->pc - offset);
384 gen_exception(excp, syndrome, target_el);
385 s->base.is_jmp = DISAS_NORETURN;
386}
387
388static void gen_exception_bkpt_insn(DisasContext *s, int offset,
389 uint32_t syndrome)
390{
391 TCGv_i32 tcg_syn;
392
393 gen_a64_set_pc_im(s->pc - offset);
394 tcg_syn = tcg_const_i32(syndrome);
395 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
396 tcg_temp_free_i32(tcg_syn);
397 s->base.is_jmp = DISAS_NORETURN;
398}
399
400static void gen_ss_advance(DisasContext *s)
401{
402
403
404
405 if (s->ss_active) {
406 s->pstate_ss = 0;
407 gen_helper_clear_pstate_ss(cpu_env);
408 }
409}
410
411static void gen_step_complete_exception(DisasContext *s)
412{
413
414
415
416
417
418
419
420
421
422 gen_ss_advance(s);
423 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
424 default_exception_el(s));
425 s->base.is_jmp = DISAS_NORETURN;
426}
427
428static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
429{
430
431
432
433 if (s->base.singlestep_enabled || s->ss_active ||
434 (tb_cflags(s->base.tb) & CF_LAST_IO)) {
435 return false;
436 }
437
438#ifndef CONFIG_USER_ONLY
439
440 if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
441 return false;
442 }
443#endif
444
445 return true;
446}
447
448static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
449{
450 TranslationBlock *tb;
451
452 tb = s->base.tb;
453 if (use_goto_tb(s, n, dest)) {
454 tcg_gen_goto_tb(n);
455 gen_a64_set_pc_im(dest);
456 tcg_gen_exit_tb(tb, n);
457 s->base.is_jmp = DISAS_NORETURN;
458 } else {
459 gen_a64_set_pc_im(dest);
460 if (s->ss_active) {
461 gen_step_complete_exception(s);
462 } else if (s->base.singlestep_enabled) {
463 gen_exception_internal(EXCP_DEBUG);
464 } else {
465 tcg_gen_lookup_and_goto_ptr();
466 s->base.is_jmp = DISAS_NORETURN;
467 }
468 }
469}
470
471void unallocated_encoding(DisasContext *s)
472{
473
474 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
475 default_exception_el(s));
476}
477
478static void init_tmp_a64_array(DisasContext *s)
479{
480#ifdef CONFIG_DEBUG_TCG
481 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
482#endif
483 s->tmp_a64_count = 0;
484}
485
486static void free_tmp_a64(DisasContext *s)
487{
488 int i;
489 for (i = 0; i < s->tmp_a64_count; i++) {
490 tcg_temp_free_i64(s->tmp_a64[i]);
491 }
492 init_tmp_a64_array(s);
493}
494
495TCGv_i64 new_tmp_a64(DisasContext *s)
496{
497 assert(s->tmp_a64_count < TMP_A64_MAX);
498 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
499}
500
501TCGv_i64 new_tmp_a64_zero(DisasContext *s)
502{
503 TCGv_i64 t = new_tmp_a64(s);
504 tcg_gen_movi_i64(t, 0);
505 return t;
506}
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523TCGv_i64 cpu_reg(DisasContext *s, int reg)
524{
525 if (reg == 31) {
526 return new_tmp_a64_zero(s);
527 } else {
528 return cpu_X[reg];
529 }
530}
531
532
533TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
534{
535 return cpu_X[reg];
536}
537
538
539
540
541
542TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
543{
544 TCGv_i64 v = new_tmp_a64(s);
545 if (reg != 31) {
546 if (sf) {
547 tcg_gen_mov_i64(v, cpu_X[reg]);
548 } else {
549 tcg_gen_ext32u_i64(v, cpu_X[reg]);
550 }
551 } else {
552 tcg_gen_movi_i64(v, 0);
553 }
554 return v;
555}
556
557TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
558{
559 TCGv_i64 v = new_tmp_a64(s);
560 if (sf) {
561 tcg_gen_mov_i64(v, cpu_X[reg]);
562 } else {
563 tcg_gen_ext32u_i64(v, cpu_X[reg]);
564 }
565 return v;
566}
567
568
569
570
571
572
573static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
574{
575 return vec_reg_offset(s, regno, 0, size);
576}
577
578
579static inline int fp_reg_hi_offset(DisasContext *s, int regno)
580{
581 return vec_reg_offset(s, regno, 1, MO_64);
582}
583
584
585
586
587
588
589
590static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
591{
592 TCGv_i64 v = tcg_temp_new_i64();
593
594 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
595 return v;
596}
597
598static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
599{
600 TCGv_i32 v = tcg_temp_new_i32();
601
602 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
603 return v;
604}
605
606static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
607{
608 TCGv_i32 v = tcg_temp_new_i32();
609
610 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
611 return v;
612}
613
614
615
616
617static void clear_vec_high(DisasContext *s, bool is_q, int rd)
618{
619 unsigned ofs = fp_reg_offset(s, rd, MO_64);
620 unsigned vsz = vec_full_reg_size(s);
621
622 if (!is_q) {
623 TCGv_i64 tcg_zero = tcg_const_i64(0);
624 tcg_gen_st_i64(tcg_zero, cpu_env, ofs + 8);
625 tcg_temp_free_i64(tcg_zero);
626 }
627 if (vsz > 16) {
628 tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
629 }
630}
631
632void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
633{
634 unsigned ofs = fp_reg_offset(s, reg, MO_64);
635
636 tcg_gen_st_i64(v, cpu_env, ofs);
637 clear_vec_high(s, false, reg);
638}
639
640static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
641{
642 TCGv_i64 tmp = tcg_temp_new_i64();
643
644 tcg_gen_extu_i32_i64(tmp, v);
645 write_fp_dreg(s, reg, tmp);
646 tcg_temp_free_i64(tmp);
647}
648
649TCGv_ptr get_fpstatus_ptr(bool is_f16)
650{
651 TCGv_ptr statusptr = tcg_temp_new_ptr();
652 int offset;
653
654
655
656
657
658
659 if (is_f16) {
660 offset = offsetof(CPUARMState, vfp.fp_status_f16);
661 } else {
662 offset = offsetof(CPUARMState, vfp.fp_status);
663 }
664 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
665 return statusptr;
666}
667
668
669static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
670 GVecGen2Fn *gvec_fn, int vece)
671{
672 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
673 is_q ? 16 : 8, vec_full_reg_size(s));
674}
675
676
677
678
679static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
680 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
681{
682 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
683 imm, is_q ? 16 : 8, vec_full_reg_size(s));
684}
685
686
687static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
688 GVecGen3Fn *gvec_fn, int vece)
689{
690 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
691 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
692}
693
694
695
696
697static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd,
698 int rn, int64_t imm, const GVecGen2i *gvec_op)
699{
700 tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
701 is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op);
702}
703
704
705static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
706 int rn, int rm, const GVecGen3 *gvec_op)
707{
708 tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
709 vec_full_reg_offset(s, rm), is_q ? 16 : 8,
710 vec_full_reg_size(s), gvec_op);
711}
712
713
714static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
715 int rn, int rm, int data, gen_helper_gvec_3 *fn)
716{
717 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
718 vec_full_reg_offset(s, rn),
719 vec_full_reg_offset(s, rm),
720 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
721}
722
723
724
725
726static void gen_gvec_op3_env(DisasContext *s, bool is_q, int rd,
727 int rn, int rm, gen_helper_gvec_3_ptr *fn)
728{
729 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
730 vec_full_reg_offset(s, rn),
731 vec_full_reg_offset(s, rm), cpu_env,
732 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
733}
734
735
736
737
738static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
739 int rm, bool is_fp16, int data,
740 gen_helper_gvec_3_ptr *fn)
741{
742 TCGv_ptr fpst = get_fpstatus_ptr(is_fp16);
743 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
744 vec_full_reg_offset(s, rn),
745 vec_full_reg_offset(s, rm), fpst,
746 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
747 tcg_temp_free_ptr(fpst);
748}
749
750
751
752
753static inline void gen_set_NZ64(TCGv_i64 result)
754{
755 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
756 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
757}
758
759
760static inline void gen_logic_CC(int sf, TCGv_i64 result)
761{
762 if (sf) {
763 gen_set_NZ64(result);
764 } else {
765 tcg_gen_extrl_i64_i32(cpu_ZF, result);
766 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
767 }
768 tcg_gen_movi_i32(cpu_CF, 0);
769 tcg_gen_movi_i32(cpu_VF, 0);
770}
771
772
773static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
774{
775 if (sf) {
776 TCGv_i64 result, flag, tmp;
777 result = tcg_temp_new_i64();
778 flag = tcg_temp_new_i64();
779 tmp = tcg_temp_new_i64();
780
781 tcg_gen_movi_i64(tmp, 0);
782 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
783
784 tcg_gen_extrl_i64_i32(cpu_CF, flag);
785
786 gen_set_NZ64(result);
787
788 tcg_gen_xor_i64(flag, result, t0);
789 tcg_gen_xor_i64(tmp, t0, t1);
790 tcg_gen_andc_i64(flag, flag, tmp);
791 tcg_temp_free_i64(tmp);
792 tcg_gen_extrh_i64_i32(cpu_VF, flag);
793
794 tcg_gen_mov_i64(dest, result);
795 tcg_temp_free_i64(result);
796 tcg_temp_free_i64(flag);
797 } else {
798
799 TCGv_i32 t0_32 = tcg_temp_new_i32();
800 TCGv_i32 t1_32 = tcg_temp_new_i32();
801 TCGv_i32 tmp = tcg_temp_new_i32();
802
803 tcg_gen_movi_i32(tmp, 0);
804 tcg_gen_extrl_i64_i32(t0_32, t0);
805 tcg_gen_extrl_i64_i32(t1_32, t1);
806 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
807 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
808 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
809 tcg_gen_xor_i32(tmp, t0_32, t1_32);
810 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
811 tcg_gen_extu_i32_i64(dest, cpu_NF);
812
813 tcg_temp_free_i32(tmp);
814 tcg_temp_free_i32(t0_32);
815 tcg_temp_free_i32(t1_32);
816 }
817}
818
819
820static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
821{
822 if (sf) {
823
824 TCGv_i64 result, flag, tmp;
825
826 result = tcg_temp_new_i64();
827 flag = tcg_temp_new_i64();
828 tcg_gen_sub_i64(result, t0, t1);
829
830 gen_set_NZ64(result);
831
832 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
833 tcg_gen_extrl_i64_i32(cpu_CF, flag);
834
835 tcg_gen_xor_i64(flag, result, t0);
836 tmp = tcg_temp_new_i64();
837 tcg_gen_xor_i64(tmp, t0, t1);
838 tcg_gen_and_i64(flag, flag, tmp);
839 tcg_temp_free_i64(tmp);
840 tcg_gen_extrh_i64_i32(cpu_VF, flag);
841 tcg_gen_mov_i64(dest, result);
842 tcg_temp_free_i64(flag);
843 tcg_temp_free_i64(result);
844 } else {
845
846 TCGv_i32 t0_32 = tcg_temp_new_i32();
847 TCGv_i32 t1_32 = tcg_temp_new_i32();
848 TCGv_i32 tmp;
849
850 tcg_gen_extrl_i64_i32(t0_32, t0);
851 tcg_gen_extrl_i64_i32(t1_32, t1);
852 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
853 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
854 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
855 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
856 tmp = tcg_temp_new_i32();
857 tcg_gen_xor_i32(tmp, t0_32, t1_32);
858 tcg_temp_free_i32(t0_32);
859 tcg_temp_free_i32(t1_32);
860 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
861 tcg_temp_free_i32(tmp);
862 tcg_gen_extu_i32_i64(dest, cpu_NF);
863 }
864}
865
866
867static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
868{
869 TCGv_i64 flag = tcg_temp_new_i64();
870 tcg_gen_extu_i32_i64(flag, cpu_CF);
871 tcg_gen_add_i64(dest, t0, t1);
872 tcg_gen_add_i64(dest, dest, flag);
873 tcg_temp_free_i64(flag);
874
875 if (!sf) {
876 tcg_gen_ext32u_i64(dest, dest);
877 }
878}
879
880
881static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
882{
883 if (sf) {
884 TCGv_i64 result, cf_64, vf_64, tmp;
885 result = tcg_temp_new_i64();
886 cf_64 = tcg_temp_new_i64();
887 vf_64 = tcg_temp_new_i64();
888 tmp = tcg_const_i64(0);
889
890 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
891 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
892 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
893 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
894 gen_set_NZ64(result);
895
896 tcg_gen_xor_i64(vf_64, result, t0);
897 tcg_gen_xor_i64(tmp, t0, t1);
898 tcg_gen_andc_i64(vf_64, vf_64, tmp);
899 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
900
901 tcg_gen_mov_i64(dest, result);
902
903 tcg_temp_free_i64(tmp);
904 tcg_temp_free_i64(vf_64);
905 tcg_temp_free_i64(cf_64);
906 tcg_temp_free_i64(result);
907 } else {
908 TCGv_i32 t0_32, t1_32, tmp;
909 t0_32 = tcg_temp_new_i32();
910 t1_32 = tcg_temp_new_i32();
911 tmp = tcg_const_i32(0);
912
913 tcg_gen_extrl_i64_i32(t0_32, t0);
914 tcg_gen_extrl_i64_i32(t1_32, t1);
915 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
916 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
917
918 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
919 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
920 tcg_gen_xor_i32(tmp, t0_32, t1_32);
921 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
922 tcg_gen_extu_i32_i64(dest, cpu_NF);
923
924 tcg_temp_free_i32(tmp);
925 tcg_temp_free_i32(t1_32);
926 tcg_temp_free_i32(t0_32);
927 }
928}
929
930
931
932
933
934
935
936
937static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
938 TCGv_i64 tcg_addr, int size, int memidx,
939 bool iss_valid,
940 unsigned int iss_srt,
941 bool iss_sf, bool iss_ar)
942{
943 g_assert(size <= 3);
944 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
945
946 if (iss_valid) {
947 uint32_t syn;
948
949 syn = syn_data_abort_with_iss(0,
950 size,
951 false,
952 iss_srt,
953 iss_sf,
954 iss_ar,
955 0, 0, 0, 0, 0, false);
956 disas_set_insn_syndrome(s, syn);
957 }
958}
959
960static void do_gpr_st(DisasContext *s, TCGv_i64 source,
961 TCGv_i64 tcg_addr, int size,
962 bool iss_valid,
963 unsigned int iss_srt,
964 bool iss_sf, bool iss_ar)
965{
966 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
967 iss_valid, iss_srt, iss_sf, iss_ar);
968}
969
970
971
972
973static void do_gpr_ld_memidx(DisasContext *s,
974 TCGv_i64 dest, TCGv_i64 tcg_addr,
975 int size, bool is_signed,
976 bool extend, int memidx,
977 bool iss_valid, unsigned int iss_srt,
978 bool iss_sf, bool iss_ar)
979{
980 TCGMemOp memop = s->be_data + size;
981
982 g_assert(size <= 3);
983
984 if (is_signed) {
985 memop += MO_SIGN;
986 }
987
988 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
989
990 if (extend && is_signed) {
991 g_assert(size < 3);
992 tcg_gen_ext32u_i64(dest, dest);
993 }
994
995 if (iss_valid) {
996 uint32_t syn;
997
998 syn = syn_data_abort_with_iss(0,
999 size,
1000 is_signed,
1001 iss_srt,
1002 iss_sf,
1003 iss_ar,
1004 0, 0, 0, 0, 0, false);
1005 disas_set_insn_syndrome(s, syn);
1006 }
1007}
1008
1009static void do_gpr_ld(DisasContext *s,
1010 TCGv_i64 dest, TCGv_i64 tcg_addr,
1011 int size, bool is_signed, bool extend,
1012 bool iss_valid, unsigned int iss_srt,
1013 bool iss_sf, bool iss_ar)
1014{
1015 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
1016 get_mem_index(s),
1017 iss_valid, iss_srt, iss_sf, iss_ar);
1018}
1019
1020
1021
1022
1023static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
1024{
1025
1026 TCGv_i64 tmp = tcg_temp_new_i64();
1027 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
1028 if (size < 4) {
1029 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
1030 s->be_data + size);
1031 } else {
1032 bool be = s->be_data == MO_BE;
1033 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
1034
1035 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1036 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1037 s->be_data | MO_Q);
1038 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
1039 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1040 s->be_data | MO_Q);
1041 tcg_temp_free_i64(tcg_hiaddr);
1042 }
1043
1044 tcg_temp_free_i64(tmp);
1045}
1046
1047
1048
1049
1050static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
1051{
1052
1053 TCGv_i64 tmplo = tcg_temp_new_i64();
1054 TCGv_i64 tmphi;
1055
1056 if (size < 4) {
1057 TCGMemOp memop = s->be_data + size;
1058 tmphi = tcg_const_i64(0);
1059 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
1060 } else {
1061 bool be = s->be_data == MO_BE;
1062 TCGv_i64 tcg_hiaddr;
1063
1064 tmphi = tcg_temp_new_i64();
1065 tcg_hiaddr = tcg_temp_new_i64();
1066
1067 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1068 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1069 s->be_data | MO_Q);
1070 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1071 s->be_data | MO_Q);
1072 tcg_temp_free_i64(tcg_hiaddr);
1073 }
1074
1075 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1076 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1077
1078 tcg_temp_free_i64(tmplo);
1079 tcg_temp_free_i64(tmphi);
1080
1081 clear_vec_high(s, true, destidx);
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1098 int element, TCGMemOp memop)
1099{
1100 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1101 switch (memop) {
1102 case MO_8:
1103 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1104 break;
1105 case MO_16:
1106 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1107 break;
1108 case MO_32:
1109 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1110 break;
1111 case MO_8|MO_SIGN:
1112 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1113 break;
1114 case MO_16|MO_SIGN:
1115 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1116 break;
1117 case MO_32|MO_SIGN:
1118 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1119 break;
1120 case MO_64:
1121 case MO_64|MO_SIGN:
1122 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1123 break;
1124 default:
1125 g_assert_not_reached();
1126 }
1127}
1128
1129static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1130 int element, TCGMemOp memop)
1131{
1132 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1133 switch (memop) {
1134 case MO_8:
1135 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1136 break;
1137 case MO_16:
1138 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1139 break;
1140 case MO_8|MO_SIGN:
1141 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1142 break;
1143 case MO_16|MO_SIGN:
1144 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1145 break;
1146 case MO_32:
1147 case MO_32|MO_SIGN:
1148 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1149 break;
1150 default:
1151 g_assert_not_reached();
1152 }
1153}
1154
1155
1156static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1157 int element, TCGMemOp memop)
1158{
1159 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1160 switch (memop) {
1161 case MO_8:
1162 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1163 break;
1164 case MO_16:
1165 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1166 break;
1167 case MO_32:
1168 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1169 break;
1170 case MO_64:
1171 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1172 break;
1173 default:
1174 g_assert_not_reached();
1175 }
1176}
1177
1178static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1179 int destidx, int element, TCGMemOp memop)
1180{
1181 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1182 switch (memop) {
1183 case MO_8:
1184 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1185 break;
1186 case MO_16:
1187 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1188 break;
1189 case MO_32:
1190 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1191 break;
1192 default:
1193 g_assert_not_reached();
1194 }
1195}
1196
1197
1198static void do_vec_st(DisasContext *s, int srcidx, int element,
1199 TCGv_i64 tcg_addr, int size)
1200{
1201 TCGMemOp memop = s->be_data + size;
1202 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1203
1204 read_vec_element(s, tcg_tmp, srcidx, element, size);
1205 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1206
1207 tcg_temp_free_i64(tcg_tmp);
1208}
1209
1210
1211static void do_vec_ld(DisasContext *s, int destidx, int element,
1212 TCGv_i64 tcg_addr, int size)
1213{
1214 TCGMemOp memop = s->be_data + size;
1215 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1216
1217 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1218 write_vec_element(s, tcg_tmp, destidx, element, size);
1219
1220 tcg_temp_free_i64(tcg_tmp);
1221}
1222
1223
1224
1225
1226
1227
1228
1229
1230static inline bool fp_access_check(DisasContext *s)
1231{
1232 assert(!s->fp_access_checked);
1233 s->fp_access_checked = true;
1234
1235 if (!s->fp_excp_el) {
1236 return true;
1237 }
1238
1239 gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1240 s->fp_excp_el);
1241 return false;
1242}
1243
1244
1245
1246
1247bool sve_access_check(DisasContext *s)
1248{
1249 if (s->sve_excp_el) {
1250 gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
1251 s->sve_excp_el);
1252 return false;
1253 }
1254 return fp_access_check(s);
1255}
1256
1257
1258
1259
1260
1261
1262static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1263 int option, unsigned int shift)
1264{
1265 int extsize = extract32(option, 0, 2);
1266 bool is_signed = extract32(option, 2, 1);
1267
1268 if (is_signed) {
1269 switch (extsize) {
1270 case 0:
1271 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1272 break;
1273 case 1:
1274 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1275 break;
1276 case 2:
1277 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1278 break;
1279 case 3:
1280 tcg_gen_mov_i64(tcg_out, tcg_in);
1281 break;
1282 }
1283 } else {
1284 switch (extsize) {
1285 case 0:
1286 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1287 break;
1288 case 1:
1289 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1290 break;
1291 case 2:
1292 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1293 break;
1294 case 3:
1295 tcg_gen_mov_i64(tcg_out, tcg_in);
1296 break;
1297 }
1298 }
1299
1300 if (shift) {
1301 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1302 }
1303}
1304
1305static inline void gen_check_sp_alignment(DisasContext *s)
1306{
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1330 uint32_t insn)
1331{
1332 const AArch64DecodeTable *tptr = table;
1333
1334 while (tptr->mask) {
1335 if ((insn & tptr->mask) == tptr->pattern) {
1336 return tptr->disas_fn;
1337 }
1338 tptr++;
1339 }
1340 return NULL;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1358{
1359 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1360
1361 if (insn & (1U << 31)) {
1362
1363 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1364 }
1365
1366
1367 gen_goto_tb(s, 0, addr);
1368}
1369
1370
1371
1372
1373
1374
1375
1376static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1377{
1378 unsigned int sf, op, rt;
1379 uint64_t addr;
1380 TCGLabel *label_match;
1381 TCGv_i64 tcg_cmp;
1382
1383 sf = extract32(insn, 31, 1);
1384 op = extract32(insn, 24, 1);
1385 rt = extract32(insn, 0, 5);
1386 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1387
1388 tcg_cmp = read_cpu_reg(s, rt, sf);
1389 label_match = gen_new_label();
1390
1391 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1392 tcg_cmp, 0, label_match);
1393
1394 gen_goto_tb(s, 0, s->pc);
1395 gen_set_label(label_match);
1396 gen_goto_tb(s, 1, addr);
1397}
1398
1399
1400
1401
1402
1403
1404
1405static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1406{
1407 unsigned int bit_pos, op, rt;
1408 uint64_t addr;
1409 TCGLabel *label_match;
1410 TCGv_i64 tcg_cmp;
1411
1412 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1413 op = extract32(insn, 24, 1);
1414 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1415 rt = extract32(insn, 0, 5);
1416
1417 tcg_cmp = tcg_temp_new_i64();
1418 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1419 label_match = gen_new_label();
1420 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1421 tcg_cmp, 0, label_match);
1422 tcg_temp_free_i64(tcg_cmp);
1423 gen_goto_tb(s, 0, s->pc);
1424 gen_set_label(label_match);
1425 gen_goto_tb(s, 1, addr);
1426}
1427
1428
1429
1430
1431
1432
1433
1434static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1435{
1436 unsigned int cond;
1437 uint64_t addr;
1438
1439 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1440 unallocated_encoding(s);
1441 return;
1442 }
1443 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1444 cond = extract32(insn, 0, 4);
1445
1446 if (cond < 0x0e) {
1447
1448 TCGLabel *label_match = gen_new_label();
1449 arm_gen_test_cc(cond, label_match);
1450 gen_goto_tb(s, 0, s->pc);
1451 gen_set_label(label_match);
1452 gen_goto_tb(s, 1, addr);
1453 } else {
1454
1455 gen_goto_tb(s, 0, addr);
1456 }
1457}
1458
1459
1460static void handle_hint(DisasContext *s, uint32_t insn,
1461 unsigned int op1, unsigned int op2, unsigned int crm)
1462{
1463 unsigned int selector = crm << 3 | op2;
1464
1465 if (op1 != 3) {
1466 unallocated_encoding(s);
1467 return;
1468 }
1469
1470 switch (selector) {
1471 case 0:
1472 return;
1473 case 3:
1474 s->base.is_jmp = DISAS_WFI;
1475 return;
1476
1477
1478
1479
1480
1481 case 1:
1482 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1483 s->base.is_jmp = DISAS_YIELD;
1484 }
1485 return;
1486 case 2:
1487 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1488 s->base.is_jmp = DISAS_WFE;
1489 }
1490 return;
1491 case 4:
1492 case 5:
1493
1494 return;
1495 default:
1496
1497 return;
1498 }
1499}
1500
1501static void gen_clrex(DisasContext *s, uint32_t insn)
1502{
1503 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1504}
1505
1506
1507static void handle_sync(DisasContext *s, uint32_t insn,
1508 unsigned int op1, unsigned int op2, unsigned int crm)
1509{
1510 TCGBar bar;
1511
1512 if (op1 != 3) {
1513 unallocated_encoding(s);
1514 return;
1515 }
1516
1517 switch (op2) {
1518 case 2:
1519 gen_clrex(s, insn);
1520 return;
1521 case 4:
1522 case 5:
1523 switch (crm & 3) {
1524 case 1:
1525 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1526 break;
1527 case 2:
1528 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1529 break;
1530 default:
1531 bar = TCG_BAR_SC | TCG_MO_ALL;
1532 break;
1533 }
1534 tcg_gen_mb(bar);
1535 return;
1536 case 6:
1537
1538
1539
1540
1541 gen_goto_tb(s, 0, s->pc);
1542 return;
1543 default:
1544 unallocated_encoding(s);
1545 return;
1546 }
1547}
1548
1549
1550static void handle_msr_i(DisasContext *s, uint32_t insn,
1551 unsigned int op1, unsigned int op2, unsigned int crm)
1552{
1553 int op = op1 << 3 | op2;
1554 switch (op) {
1555 case 0x05:
1556 if (s->current_el == 0) {
1557 unallocated_encoding(s);
1558 return;
1559 }
1560
1561 case 0x1e:
1562 case 0x1f:
1563 {
1564 TCGv_i32 tcg_imm = tcg_const_i32(crm);
1565 TCGv_i32 tcg_op = tcg_const_i32(op);
1566 gen_a64_set_pc_im(s->pc - 4);
1567 gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1568 tcg_temp_free_i32(tcg_imm);
1569 tcg_temp_free_i32(tcg_op);
1570
1571 gen_a64_set_pc_im(s->pc);
1572 s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
1573 break;
1574 }
1575 default:
1576 unallocated_encoding(s);
1577 return;
1578 }
1579}
1580
1581static void gen_get_nzcv(TCGv_i64 tcg_rt)
1582{
1583 TCGv_i32 tmp = tcg_temp_new_i32();
1584 TCGv_i32 nzcv = tcg_temp_new_i32();
1585
1586
1587 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1588
1589 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1590 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1591
1592 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1593
1594 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1595 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1596
1597 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1598
1599 tcg_temp_free_i32(nzcv);
1600 tcg_temp_free_i32(tmp);
1601}
1602
1603static void gen_set_nzcv(TCGv_i64 tcg_rt)
1604
1605{
1606 TCGv_i32 nzcv = tcg_temp_new_i32();
1607
1608
1609 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1610
1611
1612 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1613
1614 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1615 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1616
1617 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1618 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1619
1620 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1621 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1622 tcg_temp_free_i32(nzcv);
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1633 unsigned int op0, unsigned int op1, unsigned int op2,
1634 unsigned int crn, unsigned int crm, unsigned int rt)
1635{
1636 const ARMCPRegInfo *ri;
1637 TCGv_i64 tcg_rt;
1638
1639 ri = get_arm_cp_reginfo(s->cp_regs,
1640 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1641 crn, crm, op0, op1, op2));
1642
1643 if (!ri) {
1644
1645
1646
1647 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1648 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1649 isread ? "read" : "write", op0, op1, crn, crm, op2);
1650 unallocated_encoding(s);
1651 return;
1652 }
1653
1654
1655 if (!cp_access_ok(s->current_el, ri, isread)) {
1656 unallocated_encoding(s);
1657 return;
1658 }
1659
1660 if (ri->accessfn) {
1661
1662
1663
1664 TCGv_ptr tmpptr;
1665 TCGv_i32 tcg_syn, tcg_isread;
1666 uint32_t syndrome;
1667
1668 gen_a64_set_pc_im(s->pc - 4);
1669 tmpptr = tcg_const_ptr(ri);
1670 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1671 tcg_syn = tcg_const_i32(syndrome);
1672 tcg_isread = tcg_const_i32(isread);
1673 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1674 tcg_temp_free_ptr(tmpptr);
1675 tcg_temp_free_i32(tcg_syn);
1676 tcg_temp_free_i32(tcg_isread);
1677 }
1678
1679
1680 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1681 case ARM_CP_NOP:
1682 return;
1683 case ARM_CP_NZCV:
1684 tcg_rt = cpu_reg(s, rt);
1685 if (isread) {
1686 gen_get_nzcv(tcg_rt);
1687 } else {
1688 gen_set_nzcv(tcg_rt);
1689 }
1690 return;
1691 case ARM_CP_CURRENTEL:
1692
1693
1694
1695 tcg_rt = cpu_reg(s, rt);
1696 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1697 return;
1698 case ARM_CP_DC_ZVA:
1699
1700 tcg_rt = cpu_reg(s, rt);
1701 gen_helper_dc_zva(cpu_env, tcg_rt);
1702 return;
1703 default:
1704 break;
1705 }
1706 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1707 return;
1708 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1709 return;
1710 }
1711
1712 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1713 gen_io_start();
1714 }
1715
1716 tcg_rt = cpu_reg(s, rt);
1717
1718 if (isread) {
1719 if (ri->type & ARM_CP_CONST) {
1720 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1721 } else if (ri->readfn) {
1722 TCGv_ptr tmpptr;
1723 tmpptr = tcg_const_ptr(ri);
1724 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1725 tcg_temp_free_ptr(tmpptr);
1726 } else {
1727 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1728 }
1729 } else {
1730 if (ri->type & ARM_CP_CONST) {
1731
1732 return;
1733 } else if (ri->writefn) {
1734 TCGv_ptr tmpptr;
1735 tmpptr = tcg_const_ptr(ri);
1736 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1737 tcg_temp_free_ptr(tmpptr);
1738 } else {
1739 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1740 }
1741 }
1742
1743 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1744
1745 gen_io_end();
1746 s->base.is_jmp = DISAS_UPDATE;
1747 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1748
1749
1750
1751
1752 s->base.is_jmp = DISAS_UPDATE;
1753 }
1754}
1755
1756
1757
1758
1759
1760
1761
1762static void disas_system(DisasContext *s, uint32_t insn)
1763{
1764 unsigned int l, op0, op1, crn, crm, op2, rt;
1765 l = extract32(insn, 21, 1);
1766 op0 = extract32(insn, 19, 2);
1767 op1 = extract32(insn, 16, 3);
1768 crn = extract32(insn, 12, 4);
1769 crm = extract32(insn, 8, 4);
1770 op2 = extract32(insn, 5, 3);
1771 rt = extract32(insn, 0, 5);
1772
1773 if (op0 == 0) {
1774 if (l || rt != 31) {
1775 unallocated_encoding(s);
1776 return;
1777 }
1778 switch (crn) {
1779 case 2:
1780 handle_hint(s, insn, op1, op2, crm);
1781 break;
1782 case 3:
1783 handle_sync(s, insn, op1, op2, crm);
1784 break;
1785 case 4:
1786 handle_msr_i(s, insn, op1, op2, crm);
1787 break;
1788 default:
1789 unallocated_encoding(s);
1790 break;
1791 }
1792 return;
1793 }
1794 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804static void disas_exc(DisasContext *s, uint32_t insn)
1805{
1806 int opc = extract32(insn, 21, 3);
1807 int op2_ll = extract32(insn, 0, 5);
1808 int imm16 = extract32(insn, 5, 16);
1809 TCGv_i32 tmp;
1810
1811 switch (opc) {
1812 case 0:
1813
1814
1815
1816
1817
1818 switch (op2_ll) {
1819 case 1:
1820 gen_ss_advance(s);
1821 gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1822 default_exception_el(s));
1823 break;
1824 case 2:
1825 if (s->current_el == 0) {
1826 unallocated_encoding(s);
1827 break;
1828 }
1829
1830
1831
1832 gen_a64_set_pc_im(s->pc - 4);
1833 gen_helper_pre_hvc(cpu_env);
1834 gen_ss_advance(s);
1835 gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1836 break;
1837 case 3:
1838 if (s->current_el == 0) {
1839 unallocated_encoding(s);
1840 break;
1841 }
1842 gen_a64_set_pc_im(s->pc - 4);
1843 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1844 gen_helper_pre_smc(cpu_env, tmp);
1845 tcg_temp_free_i32(tmp);
1846 gen_ss_advance(s);
1847 gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1848 break;
1849 default:
1850 unallocated_encoding(s);
1851 break;
1852 }
1853 break;
1854 case 1:
1855 if (op2_ll != 0) {
1856 unallocated_encoding(s);
1857 break;
1858 }
1859
1860 gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
1861 break;
1862 case 2:
1863 if (op2_ll != 0) {
1864 unallocated_encoding(s);
1865 break;
1866 }
1867
1868
1869
1870
1871
1872
1873 if (semihosting_enabled() && imm16 == 0xf000) {
1874#ifndef CONFIG_USER_ONLY
1875
1876
1877
1878
1879 if (s->current_el == 0) {
1880 unsupported_encoding(s, insn);
1881 break;
1882 }
1883#endif
1884 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1885 } else {
1886 unsupported_encoding(s, insn);
1887 }
1888 break;
1889 case 5:
1890 if (op2_ll < 1 || op2_ll > 3) {
1891 unallocated_encoding(s);
1892 break;
1893 }
1894
1895 unsupported_encoding(s, insn);
1896 break;
1897 default:
1898 unallocated_encoding(s);
1899 break;
1900 }
1901}
1902
1903
1904
1905
1906
1907
1908
1909static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1910{
1911 unsigned int opc, op2, op3, rn, op4;
1912
1913 opc = extract32(insn, 21, 4);
1914 op2 = extract32(insn, 16, 5);
1915 op3 = extract32(insn, 10, 6);
1916 rn = extract32(insn, 5, 5);
1917 op4 = extract32(insn, 0, 5);
1918
1919 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1920 unallocated_encoding(s);
1921 return;
1922 }
1923
1924 switch (opc) {
1925 case 0:
1926 case 1:
1927 case 2:
1928 gen_a64_set_pc(s, cpu_reg(s, rn));
1929
1930 if (opc == 1) {
1931 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1932 }
1933 break;
1934 case 4:
1935 if (s->current_el == 0) {
1936 unallocated_encoding(s);
1937 return;
1938 }
1939 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1940 gen_io_start();
1941 }
1942 gen_helper_exception_return(cpu_env);
1943 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1944 gen_io_end();
1945 }
1946
1947 s->base.is_jmp = DISAS_EXIT;
1948 return;
1949 case 5:
1950 if (rn != 0x1f) {
1951 unallocated_encoding(s);
1952 } else {
1953 unsupported_encoding(s, insn);
1954 }
1955 return;
1956 default:
1957 unallocated_encoding(s);
1958 return;
1959 }
1960
1961 s->base.is_jmp = DISAS_JUMP;
1962}
1963
1964
1965static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1966{
1967 switch (extract32(insn, 25, 7)) {
1968 case 0x0a: case 0x0b:
1969 case 0x4a: case 0x4b:
1970 disas_uncond_b_imm(s, insn);
1971 break;
1972 case 0x1a: case 0x5a:
1973 disas_comp_b_imm(s, insn);
1974 break;
1975 case 0x1b: case 0x5b:
1976 disas_test_b_imm(s, insn);
1977 break;
1978 case 0x2a:
1979 disas_cond_b_imm(s, insn);
1980 break;
1981 case 0x6a:
1982 if (insn & (1 << 24)) {
1983 disas_system(s, insn);
1984 } else {
1985 disas_exc(s, insn);
1986 }
1987 break;
1988 case 0x6b:
1989 disas_uncond_b_reg(s, insn);
1990 break;
1991 default:
1992 unallocated_encoding(s);
1993 break;
1994 }
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2009 TCGv_i64 addr, int size, bool is_pair)
2010{
2011 int idx = get_mem_index(s);
2012 TCGMemOp memop = s->be_data;
2013
2014 g_assert(size <= 3);
2015 if (is_pair) {
2016 g_assert(size >= 2);
2017 if (size == 2) {
2018
2019 memop |= MO_64 | MO_ALIGN;
2020 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2021 if (s->be_data == MO_LE) {
2022 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2023 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2024 } else {
2025 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2026 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2027 }
2028 } else {
2029
2030
2031 memop |= MO_64;
2032 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2033 memop | MO_ALIGN_16);
2034
2035 TCGv_i64 addr2 = tcg_temp_new_i64();
2036 tcg_gen_addi_i64(addr2, addr, 8);
2037 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2038 tcg_temp_free_i64(addr2);
2039
2040 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2041 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2042 }
2043 } else {
2044 memop |= size | MO_ALIGN;
2045 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2046 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2047 }
2048 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2049}
2050
2051static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2052 TCGv_i64 addr, int size, int is_pair)
2053{
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066 TCGLabel *fail_label = gen_new_label();
2067 TCGLabel *done_label = gen_new_label();
2068 TCGv_i64 tmp;
2069
2070 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2071
2072 tmp = tcg_temp_new_i64();
2073 if (is_pair) {
2074 if (size == 2) {
2075 if (s->be_data == MO_LE) {
2076 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2077 } else {
2078 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2079 }
2080 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2081 cpu_exclusive_val, tmp,
2082 get_mem_index(s),
2083 MO_64 | MO_ALIGN | s->be_data);
2084 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2085 } else if (s->be_data == MO_LE) {
2086 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2087 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2088 cpu_exclusive_addr,
2089 cpu_reg(s, rt),
2090 cpu_reg(s, rt2));
2091 } else {
2092 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2093 cpu_reg(s, rt), cpu_reg(s, rt2));
2094 }
2095 } else {
2096 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2097 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2098 cpu_exclusive_addr,
2099 cpu_reg(s, rt),
2100 cpu_reg(s, rt2));
2101 } else {
2102 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2103 cpu_reg(s, rt), cpu_reg(s, rt2));
2104 }
2105 }
2106 } else {
2107 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2108 cpu_reg(s, rt), get_mem_index(s),
2109 size | MO_ALIGN | s->be_data);
2110 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2111 }
2112 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2113 tcg_temp_free_i64(tmp);
2114 tcg_gen_br(done_label);
2115
2116 gen_set_label(fail_label);
2117 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2118 gen_set_label(done_label);
2119 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2120}
2121
2122static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2123 int rn, int size)
2124{
2125 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2126 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2127 int memidx = get_mem_index(s);
2128 TCGv_i64 addr = cpu_reg_sp(s, rn);
2129
2130 if (rn == 31) {
2131 gen_check_sp_alignment(s);
2132 }
2133 tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
2134 size | MO_ALIGN | s->be_data);
2135}
2136
2137static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2138 int rn, int size)
2139{
2140 TCGv_i64 s1 = cpu_reg(s, rs);
2141 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2142 TCGv_i64 t1 = cpu_reg(s, rt);
2143 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2144 TCGv_i64 addr = cpu_reg_sp(s, rn);
2145 int memidx = get_mem_index(s);
2146
2147 if (rn == 31) {
2148 gen_check_sp_alignment(s);
2149 }
2150
2151 if (size == 2) {
2152 TCGv_i64 cmp = tcg_temp_new_i64();
2153 TCGv_i64 val = tcg_temp_new_i64();
2154
2155 if (s->be_data == MO_LE) {
2156 tcg_gen_concat32_i64(val, t1, t2);
2157 tcg_gen_concat32_i64(cmp, s1, s2);
2158 } else {
2159 tcg_gen_concat32_i64(val, t2, t1);
2160 tcg_gen_concat32_i64(cmp, s2, s1);
2161 }
2162
2163 tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
2164 MO_64 | MO_ALIGN | s->be_data);
2165 tcg_temp_free_i64(val);
2166
2167 if (s->be_data == MO_LE) {
2168 tcg_gen_extr32_i64(s1, s2, cmp);
2169 } else {
2170 tcg_gen_extr32_i64(s2, s1, cmp);
2171 }
2172 tcg_temp_free_i64(cmp);
2173 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2174 TCGv_i32 tcg_rs = tcg_const_i32(rs);
2175
2176 if (s->be_data == MO_LE) {
2177 gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
2178 } else {
2179 gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
2180 }
2181 tcg_temp_free_i32(tcg_rs);
2182 } else {
2183 TCGv_i64 d1 = tcg_temp_new_i64();
2184 TCGv_i64 d2 = tcg_temp_new_i64();
2185 TCGv_i64 a2 = tcg_temp_new_i64();
2186 TCGv_i64 c1 = tcg_temp_new_i64();
2187 TCGv_i64 c2 = tcg_temp_new_i64();
2188 TCGv_i64 zero = tcg_const_i64(0);
2189
2190
2191 tcg_gen_qemu_ld_i64(d1, addr, memidx,
2192 MO_64 | MO_ALIGN_16 | s->be_data);
2193 tcg_gen_addi_i64(a2, addr, 8);
2194 tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
2195
2196
2197 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2198 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2199 tcg_gen_and_i64(c2, c2, c1);
2200
2201
2202 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2203 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2204 tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
2205 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2206 tcg_temp_free_i64(a2);
2207 tcg_temp_free_i64(c1);
2208 tcg_temp_free_i64(c2);
2209 tcg_temp_free_i64(zero);
2210
2211
2212 tcg_gen_mov_i64(s1, d1);
2213 tcg_gen_mov_i64(s2, d2);
2214 tcg_temp_free_i64(d1);
2215 tcg_temp_free_i64(d2);
2216 }
2217}
2218
2219
2220
2221
2222static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2223{
2224 int opc0 = extract32(opc, 0, 1);
2225 int regsize;
2226
2227 if (is_signed) {
2228 regsize = opc0 ? 32 : 64;
2229 } else {
2230 regsize = size == 3 ? 64 : 32;
2231 }
2232 return regsize == 64;
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2249{
2250 int rt = extract32(insn, 0, 5);
2251 int rn = extract32(insn, 5, 5);
2252 int rt2 = extract32(insn, 10, 5);
2253 int rs = extract32(insn, 16, 5);
2254 int is_lasr = extract32(insn, 15, 1);
2255 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2256 int size = extract32(insn, 30, 2);
2257 TCGv_i64 tcg_addr;
2258
2259 switch (o2_L_o1_o0) {
2260 case 0x0:
2261 case 0x1:
2262 if (rn == 31) {
2263 gen_check_sp_alignment(s);
2264 }
2265 if (is_lasr) {
2266 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2267 }
2268 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2269 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
2270 return;
2271
2272 case 0x4:
2273 case 0x5:
2274 if (rn == 31) {
2275 gen_check_sp_alignment(s);
2276 }
2277 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2278 s->is_ldex = true;
2279 gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
2280 if (is_lasr) {
2281 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2282 }
2283 return;
2284
2285 case 0x9:
2286
2287 if (rn == 31) {
2288 gen_check_sp_alignment(s);
2289 }
2290 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2291 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2292 do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
2293 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2294 return;
2295
2296 case 0xd:
2297
2298 if (rn == 31) {
2299 gen_check_sp_alignment(s);
2300 }
2301 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2302 do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
2303 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2304 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2305 return;
2306
2307 case 0x2: case 0x3:
2308 if (size & 2) {
2309 if (rn == 31) {
2310 gen_check_sp_alignment(s);
2311 }
2312 if (is_lasr) {
2313 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2314 }
2315 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2316 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
2317 return;
2318 }
2319 if (rt2 == 31
2320 && ((rt | rs) & 1) == 0
2321 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2322
2323 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2324 return;
2325 }
2326 break;
2327
2328 case 0x6: case 0x7:
2329 if (size & 2) {
2330 if (rn == 31) {
2331 gen_check_sp_alignment(s);
2332 }
2333 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2334 s->is_ldex = true;
2335 gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
2336 if (is_lasr) {
2337 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2338 }
2339 return;
2340 }
2341 if (rt2 == 31
2342 && ((rt | rs) & 1) == 0
2343 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2344
2345 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2346 return;
2347 }
2348 break;
2349
2350 case 0xa:
2351 case 0xb:
2352 case 0xe:
2353 case 0xf:
2354 if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2355 gen_compare_and_swap(s, rs, rt, rn, size);
2356 return;
2357 }
2358 break;
2359 }
2360 unallocated_encoding(s);
2361}
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376static void disas_ld_lit(DisasContext *s, uint32_t insn)
2377{
2378 int rt = extract32(insn, 0, 5);
2379 int64_t imm = sextract32(insn, 5, 19) << 2;
2380 bool is_vector = extract32(insn, 26, 1);
2381 int opc = extract32(insn, 30, 2);
2382 bool is_signed = false;
2383 int size = 2;
2384 TCGv_i64 tcg_rt, tcg_addr;
2385
2386 if (is_vector) {
2387 if (opc == 3) {
2388 unallocated_encoding(s);
2389 return;
2390 }
2391 size = 2 + opc;
2392 if (!fp_access_check(s)) {
2393 return;
2394 }
2395 } else {
2396 if (opc == 3) {
2397
2398 return;
2399 }
2400 size = 2 + extract32(opc, 0, 1);
2401 is_signed = extract32(opc, 1, 1);
2402 }
2403
2404 tcg_rt = cpu_reg(s, rt);
2405
2406 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2407 if (is_vector) {
2408 do_fp_ld(s, rt, tcg_addr, size);
2409 } else {
2410
2411 bool iss_sf = opc != 0;
2412
2413 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2414 true, rt, iss_sf, false);
2415 }
2416 tcg_temp_free_i64(tcg_addr);
2417}
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2448{
2449 int rt = extract32(insn, 0, 5);
2450 int rn = extract32(insn, 5, 5);
2451 int rt2 = extract32(insn, 10, 5);
2452 uint64_t offset = sextract64(insn, 15, 7);
2453 int index = extract32(insn, 23, 2);
2454 bool is_vector = extract32(insn, 26, 1);
2455 bool is_load = extract32(insn, 22, 1);
2456 int opc = extract32(insn, 30, 2);
2457
2458 bool is_signed = false;
2459 bool postindex = false;
2460 bool wback = false;
2461
2462 TCGv_i64 tcg_addr;
2463 int size;
2464
2465 if (opc == 3) {
2466 unallocated_encoding(s);
2467 return;
2468 }
2469
2470 if (is_vector) {
2471 size = 2 + opc;
2472 } else {
2473 size = 2 + extract32(opc, 1, 1);
2474 is_signed = extract32(opc, 0, 1);
2475 if (!is_load && is_signed) {
2476 unallocated_encoding(s);
2477 return;
2478 }
2479 }
2480
2481 switch (index) {
2482 case 1:
2483 postindex = true;
2484 wback = true;
2485 break;
2486 case 0:
2487
2488
2489
2490
2491
2492 if (is_signed) {
2493
2494 unallocated_encoding(s);
2495 return;
2496 }
2497 postindex = false;
2498 break;
2499 case 2:
2500 postindex = false;
2501 break;
2502 case 3:
2503 postindex = false;
2504 wback = true;
2505 break;
2506 }
2507
2508 if (is_vector && !fp_access_check(s)) {
2509 return;
2510 }
2511
2512 offset <<= size;
2513
2514 if (rn == 31) {
2515 gen_check_sp_alignment(s);
2516 }
2517
2518 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2519
2520 if (!postindex) {
2521 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2522 }
2523
2524 if (is_vector) {
2525 if (is_load) {
2526 do_fp_ld(s, rt, tcg_addr, size);
2527 } else {
2528 do_fp_st(s, rt, tcg_addr, size);
2529 }
2530 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2531 if (is_load) {
2532 do_fp_ld(s, rt2, tcg_addr, size);
2533 } else {
2534 do_fp_st(s, rt2, tcg_addr, size);
2535 }
2536 } else {
2537 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2538 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2539
2540 if (is_load) {
2541 TCGv_i64 tmp = tcg_temp_new_i64();
2542
2543
2544
2545
2546 do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
2547 false, 0, false, false);
2548 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2549 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2550 false, 0, false, false);
2551
2552 tcg_gen_mov_i64(tcg_rt, tmp);
2553 tcg_temp_free_i64(tmp);
2554 } else {
2555 do_gpr_st(s, tcg_rt, tcg_addr, size,
2556 false, 0, false, false);
2557 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2558 do_gpr_st(s, tcg_rt2, tcg_addr, size,
2559 false, 0, false, false);
2560 }
2561 }
2562
2563 if (wback) {
2564 if (postindex) {
2565 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2566 } else {
2567 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2568 }
2569 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2570 }
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2590 int opc,
2591 int size,
2592 int rt,
2593 bool is_vector)
2594{
2595 int rn = extract32(insn, 5, 5);
2596 int imm9 = sextract32(insn, 12, 9);
2597 int idx = extract32(insn, 10, 2);
2598 bool is_signed = false;
2599 bool is_store = false;
2600 bool is_extended = false;
2601 bool is_unpriv = (idx == 2);
2602 bool iss_valid = !is_vector;
2603 bool post_index;
2604 bool writeback;
2605
2606 TCGv_i64 tcg_addr;
2607
2608 if (is_vector) {
2609 size |= (opc & 2) << 1;
2610 if (size > 4 || is_unpriv) {
2611 unallocated_encoding(s);
2612 return;
2613 }
2614 is_store = ((opc & 1) == 0);
2615 if (!fp_access_check(s)) {
2616 return;
2617 }
2618 } else {
2619 if (size == 3 && opc == 2) {
2620
2621 if (is_unpriv) {
2622 unallocated_encoding(s);
2623 return;
2624 }
2625 return;
2626 }
2627 if (opc == 3 && size > 1) {
2628 unallocated_encoding(s);
2629 return;
2630 }
2631 is_store = (opc == 0);
2632 is_signed = extract32(opc, 1, 1);
2633 is_extended = (size < 3) && extract32(opc, 0, 1);
2634 }
2635
2636 switch (idx) {
2637 case 0:
2638 case 2:
2639 post_index = false;
2640 writeback = false;
2641 break;
2642 case 1:
2643 post_index = true;
2644 writeback = true;
2645 break;
2646 case 3:
2647 post_index = false;
2648 writeback = true;
2649 break;
2650 default:
2651 g_assert_not_reached();
2652 }
2653
2654 if (rn == 31) {
2655 gen_check_sp_alignment(s);
2656 }
2657 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2658
2659 if (!post_index) {
2660 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2661 }
2662
2663 if (is_vector) {
2664 if (is_store) {
2665 do_fp_st(s, rt, tcg_addr, size);
2666 } else {
2667 do_fp_ld(s, rt, tcg_addr, size);
2668 }
2669 } else {
2670 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2671 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2672 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2673
2674 if (is_store) {
2675 do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2676 iss_valid, rt, iss_sf, false);
2677 } else {
2678 do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2679 is_signed, is_extended, memidx,
2680 iss_valid, rt, iss_sf, false);
2681 }
2682 }
2683
2684 if (writeback) {
2685 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2686 if (post_index) {
2687 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2688 }
2689 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2690 }
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2715 int opc,
2716 int size,
2717 int rt,
2718 bool is_vector)
2719{
2720 int rn = extract32(insn, 5, 5);
2721 int shift = extract32(insn, 12, 1);
2722 int rm = extract32(insn, 16, 5);
2723 int opt = extract32(insn, 13, 3);
2724 bool is_signed = false;
2725 bool is_store = false;
2726 bool is_extended = false;
2727
2728 TCGv_i64 tcg_rm;
2729 TCGv_i64 tcg_addr;
2730
2731 if (extract32(opt, 1, 1) == 0) {
2732 unallocated_encoding(s);
2733 return;
2734 }
2735
2736 if (is_vector) {
2737 size |= (opc & 2) << 1;
2738 if (size > 4) {
2739 unallocated_encoding(s);
2740 return;
2741 }
2742 is_store = !extract32(opc, 0, 1);
2743 if (!fp_access_check(s)) {
2744 return;
2745 }
2746 } else {
2747 if (size == 3 && opc == 2) {
2748
2749 return;
2750 }
2751 if (opc == 3 && size > 1) {
2752 unallocated_encoding(s);
2753 return;
2754 }
2755 is_store = (opc == 0);
2756 is_signed = extract32(opc, 1, 1);
2757 is_extended = (size < 3) && extract32(opc, 0, 1);
2758 }
2759
2760 if (rn == 31) {
2761 gen_check_sp_alignment(s);
2762 }
2763 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2764
2765 tcg_rm = read_cpu_reg(s, rm, 1);
2766 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2767
2768 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2769
2770 if (is_vector) {
2771 if (is_store) {
2772 do_fp_st(s, rt, tcg_addr, size);
2773 } else {
2774 do_fp_ld(s, rt, tcg_addr, size);
2775 }
2776 } else {
2777 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2778 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2779 if (is_store) {
2780 do_gpr_st(s, tcg_rt, tcg_addr, size,
2781 true, rt, iss_sf, false);
2782 } else {
2783 do_gpr_ld(s, tcg_rt, tcg_addr, size,
2784 is_signed, is_extended,
2785 true, rt, iss_sf, false);
2786 }
2787 }
2788}
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2808 int opc,
2809 int size,
2810 int rt,
2811 bool is_vector)
2812{
2813 int rn = extract32(insn, 5, 5);
2814 unsigned int imm12 = extract32(insn, 10, 12);
2815 unsigned int offset;
2816
2817 TCGv_i64 tcg_addr;
2818
2819 bool is_store;
2820 bool is_signed = false;
2821 bool is_extended = false;
2822
2823 if (is_vector) {
2824 size |= (opc & 2) << 1;
2825 if (size > 4) {
2826 unallocated_encoding(s);
2827 return;
2828 }
2829 is_store = !extract32(opc, 0, 1);
2830 if (!fp_access_check(s)) {
2831 return;
2832 }
2833 } else {
2834 if (size == 3 && opc == 2) {
2835
2836 return;
2837 }
2838 if (opc == 3 && size > 1) {
2839 unallocated_encoding(s);
2840 return;
2841 }
2842 is_store = (opc == 0);
2843 is_signed = extract32(opc, 1, 1);
2844 is_extended = (size < 3) && extract32(opc, 0, 1);
2845 }
2846
2847 if (rn == 31) {
2848 gen_check_sp_alignment(s);
2849 }
2850 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2851 offset = imm12 << size;
2852 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2853
2854 if (is_vector) {
2855 if (is_store) {
2856 do_fp_st(s, rt, tcg_addr, size);
2857 } else {
2858 do_fp_ld(s, rt, tcg_addr, size);
2859 }
2860 } else {
2861 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2862 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2863 if (is_store) {
2864 do_gpr_st(s, tcg_rt, tcg_addr, size,
2865 true, rt, iss_sf, false);
2866 } else {
2867 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2868 true, rt, iss_sf, false);
2869 }
2870 }
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
2888 int size, int rt, bool is_vector)
2889{
2890 int rs = extract32(insn, 16, 5);
2891 int rn = extract32(insn, 5, 5);
2892 int o3_opc = extract32(insn, 12, 4);
2893 int feature = ARM_FEATURE_V8_ATOMICS;
2894 TCGv_i64 tcg_rn, tcg_rs;
2895 AtomicThreeOpFn *fn;
2896
2897 if (is_vector) {
2898 unallocated_encoding(s);
2899 return;
2900 }
2901 switch (o3_opc) {
2902 case 000:
2903 fn = tcg_gen_atomic_fetch_add_i64;
2904 break;
2905 case 001:
2906 fn = tcg_gen_atomic_fetch_and_i64;
2907 break;
2908 case 002:
2909 fn = tcg_gen_atomic_fetch_xor_i64;
2910 break;
2911 case 003:
2912 fn = tcg_gen_atomic_fetch_or_i64;
2913 break;
2914 case 004:
2915 fn = tcg_gen_atomic_fetch_smax_i64;
2916 break;
2917 case 005:
2918 fn = tcg_gen_atomic_fetch_smin_i64;
2919 break;
2920 case 006:
2921 fn = tcg_gen_atomic_fetch_umax_i64;
2922 break;
2923 case 007:
2924 fn = tcg_gen_atomic_fetch_umin_i64;
2925 break;
2926 case 010:
2927 fn = tcg_gen_atomic_xchg_i64;
2928 break;
2929 default:
2930 unallocated_encoding(s);
2931 return;
2932 }
2933 if (!arm_dc_feature(s, feature)) {
2934 unallocated_encoding(s);
2935 return;
2936 }
2937
2938 if (rn == 31) {
2939 gen_check_sp_alignment(s);
2940 }
2941 tcg_rn = cpu_reg_sp(s, rn);
2942 tcg_rs = read_cpu_reg(s, rs, true);
2943
2944 if (o3_opc == 1) {
2945 tcg_gen_not_i64(tcg_rs, tcg_rs);
2946 }
2947
2948
2949
2950
2951 fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
2952 s->be_data | size | MO_ALIGN);
2953}
2954
2955
2956static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2957{
2958 int rt = extract32(insn, 0, 5);
2959 int opc = extract32(insn, 22, 2);
2960 bool is_vector = extract32(insn, 26, 1);
2961 int size = extract32(insn, 30, 2);
2962
2963 switch (extract32(insn, 24, 2)) {
2964 case 0:
2965 if (extract32(insn, 21, 1) == 0) {
2966
2967
2968
2969
2970 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2971 return;
2972 }
2973 switch (extract32(insn, 10, 2)) {
2974 case 0:
2975 disas_ldst_atomic(s, insn, size, rt, is_vector);
2976 return;
2977 case 2:
2978 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2979 return;
2980 }
2981 break;
2982 case 1:
2983 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2984 return;
2985 }
2986 unallocated_encoding(s);
2987}
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3008{
3009 int rt = extract32(insn, 0, 5);
3010 int rn = extract32(insn, 5, 5);
3011 int size = extract32(insn, 10, 2);
3012 int opcode = extract32(insn, 12, 4);
3013 bool is_store = !extract32(insn, 22, 1);
3014 bool is_postidx = extract32(insn, 23, 1);
3015 bool is_q = extract32(insn, 30, 1);
3016 TCGv_i64 tcg_addr, tcg_rn;
3017
3018 int ebytes = 1 << size;
3019 int elements = (is_q ? 128 : 64) / (8 << size);
3020 int rpt;
3021 int selem;
3022 int r;
3023
3024 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3025 unallocated_encoding(s);
3026 return;
3027 }
3028
3029
3030 switch (opcode) {
3031 case 0x0:
3032 rpt = 1;
3033 selem = 4;
3034 break;
3035 case 0x2:
3036 rpt = 4;
3037 selem = 1;
3038 break;
3039 case 0x4:
3040 rpt = 1;
3041 selem = 3;
3042 break;
3043 case 0x6:
3044 rpt = 3;
3045 selem = 1;
3046 break;
3047 case 0x7:
3048 rpt = 1;
3049 selem = 1;
3050 break;
3051 case 0x8:
3052 rpt = 1;
3053 selem = 2;
3054 break;
3055 case 0xa:
3056 rpt = 2;
3057 selem = 1;
3058 break;
3059 default:
3060 unallocated_encoding(s);
3061 return;
3062 }
3063
3064 if (size == 3 && !is_q && selem != 1) {
3065
3066 unallocated_encoding(s);
3067 return;
3068 }
3069
3070 if (!fp_access_check(s)) {
3071 return;
3072 }
3073
3074 if (rn == 31) {
3075 gen_check_sp_alignment(s);
3076 }
3077
3078 tcg_rn = cpu_reg_sp(s, rn);
3079 tcg_addr = tcg_temp_new_i64();
3080 tcg_gen_mov_i64(tcg_addr, tcg_rn);
3081
3082 for (r = 0; r < rpt; r++) {
3083 int e;
3084 for (e = 0; e < elements; e++) {
3085 int tt = (rt + r) % 32;
3086 int xs;
3087 for (xs = 0; xs < selem; xs++) {
3088 if (is_store) {
3089 do_vec_st(s, tt, e, tcg_addr, size);
3090 } else {
3091 do_vec_ld(s, tt, e, tcg_addr, size);
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101 if (e == 0 && (r == 0 || xs == selem - 1)) {
3102 clear_vec_high(s, is_q, tt);
3103 }
3104 }
3105 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
3106 tt = (tt + 1) % 32;
3107 }
3108 }
3109 }
3110
3111 if (is_postidx) {
3112 int rm = extract32(insn, 16, 5);
3113 if (rm == 31) {
3114 tcg_gen_mov_i64(tcg_rn, tcg_addr);
3115 } else {
3116 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3117 }
3118 }
3119 tcg_temp_free_i64(tcg_addr);
3120}
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3145{
3146 int rt = extract32(insn, 0, 5);
3147 int rn = extract32(insn, 5, 5);
3148 int size = extract32(insn, 10, 2);
3149 int S = extract32(insn, 12, 1);
3150 int opc = extract32(insn, 13, 3);
3151 int R = extract32(insn, 21, 1);
3152 int is_load = extract32(insn, 22, 1);
3153 int is_postidx = extract32(insn, 23, 1);
3154 int is_q = extract32(insn, 30, 1);
3155
3156 int scale = extract32(opc, 1, 2);
3157 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3158 bool replicate = false;
3159 int index = is_q << 3 | S << 2 | size;
3160 int ebytes, xs;
3161 TCGv_i64 tcg_addr, tcg_rn;
3162
3163 switch (scale) {
3164 case 3:
3165 if (!is_load || S) {
3166 unallocated_encoding(s);
3167 return;
3168 }
3169 scale = size;
3170 replicate = true;
3171 break;
3172 case 0:
3173 break;
3174 case 1:
3175 if (extract32(size, 0, 1)) {
3176 unallocated_encoding(s);
3177 return;
3178 }
3179 index >>= 1;
3180 break;
3181 case 2:
3182 if (extract32(size, 1, 1)) {
3183 unallocated_encoding(s);
3184 return;
3185 }
3186 if (!extract32(size, 0, 1)) {
3187 index >>= 2;
3188 } else {
3189 if (S) {
3190 unallocated_encoding(s);
3191 return;
3192 }
3193 index >>= 3;
3194 scale = 3;
3195 }
3196 break;
3197 default:
3198 g_assert_not_reached();
3199 }
3200
3201 if (!fp_access_check(s)) {
3202 return;
3203 }
3204
3205 ebytes = 1 << scale;
3206
3207 if (rn == 31) {
3208 gen_check_sp_alignment(s);
3209 }
3210
3211 tcg_rn = cpu_reg_sp(s, rn);
3212 tcg_addr = tcg_temp_new_i64();
3213 tcg_gen_mov_i64(tcg_addr, tcg_rn);
3214
3215 for (xs = 0; xs < selem; xs++) {
3216 if (replicate) {
3217
3218 uint64_t mulconst;
3219 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3220
3221 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
3222 get_mem_index(s), s->be_data + scale);
3223 switch (scale) {
3224 case 0:
3225 mulconst = 0x0101010101010101ULL;
3226 break;
3227 case 1:
3228 mulconst = 0x0001000100010001ULL;
3229 break;
3230 case 2:
3231 mulconst = 0x0000000100000001ULL;
3232 break;
3233 case 3:
3234 mulconst = 0;
3235 break;
3236 default:
3237 g_assert_not_reached();
3238 }
3239 if (mulconst) {
3240 tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
3241 }
3242 write_vec_element(s, tcg_tmp, rt, 0, MO_64);
3243 if (is_q) {
3244 write_vec_element(s, tcg_tmp, rt, 1, MO_64);
3245 }
3246 tcg_temp_free_i64(tcg_tmp);
3247 clear_vec_high(s, is_q, rt);
3248 } else {
3249
3250 if (is_load) {
3251 do_vec_ld(s, rt, index, tcg_addr, scale);
3252 } else {
3253 do_vec_st(s, rt, index, tcg_addr, scale);
3254 }
3255 }
3256 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
3257 rt = (rt + 1) % 32;
3258 }
3259
3260 if (is_postidx) {
3261 int rm = extract32(insn, 16, 5);
3262 if (rm == 31) {
3263 tcg_gen_mov_i64(tcg_rn, tcg_addr);
3264 } else {
3265 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3266 }
3267 }
3268 tcg_temp_free_i64(tcg_addr);
3269}
3270
3271
3272static void disas_ldst(DisasContext *s, uint32_t insn)
3273{
3274 switch (extract32(insn, 24, 6)) {
3275 case 0x08:
3276 disas_ldst_excl(s, insn);
3277 break;
3278 case 0x18: case 0x1c:
3279 disas_ld_lit(s, insn);
3280 break;
3281 case 0x28: case 0x29:
3282 case 0x2c: case 0x2d:
3283 disas_ldst_pair(s, insn);
3284 break;
3285 case 0x38: case 0x39:
3286 case 0x3c: case 0x3d:
3287 disas_ldst_reg(s, insn);
3288 break;
3289 case 0x0c:
3290 disas_ldst_multiple_struct(s, insn);
3291 break;
3292 case 0x0d:
3293 disas_ldst_single_struct(s, insn);
3294 break;
3295 default:
3296 unallocated_encoding(s);
3297 break;
3298 }
3299}
3300
3301
3302
3303
3304
3305
3306
3307static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
3308{
3309 unsigned int page, rd;
3310 uint64_t base;
3311 uint64_t offset;
3312
3313 page = extract32(insn, 31, 1);
3314
3315 offset = sextract64(insn, 5, 19);
3316 offset = offset << 2 | extract32(insn, 29, 2);
3317 rd = extract32(insn, 0, 5);
3318 base = s->pc - 4;
3319
3320 if (page) {
3321
3322 base &= ~0xfff;
3323 offset <<= 12;
3324 }
3325
3326 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
3327}
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3343{
3344 int rd = extract32(insn, 0, 5);
3345 int rn = extract32(insn, 5, 5);
3346 uint64_t imm = extract32(insn, 10, 12);
3347 int shift = extract32(insn, 22, 2);
3348 bool setflags = extract32(insn, 29, 1);
3349 bool sub_op = extract32(insn, 30, 1);
3350 bool is_64bit = extract32(insn, 31, 1);
3351
3352 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3353 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3354 TCGv_i64 tcg_result;
3355
3356 switch (shift) {
3357 case 0x0:
3358 break;
3359 case 0x1:
3360 imm <<= 12;
3361 break;
3362 default:
3363 unallocated_encoding(s);
3364 return;
3365 }
3366
3367 tcg_result = tcg_temp_new_i64();
3368 if (!setflags) {
3369 if (sub_op) {
3370 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3371 } else {
3372 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3373 }
3374 } else {
3375 TCGv_i64 tcg_imm = tcg_const_i64(imm);
3376 if (sub_op) {
3377 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3378 } else {
3379 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3380 }
3381 tcg_temp_free_i64(tcg_imm);
3382 }
3383
3384 if (is_64bit) {
3385 tcg_gen_mov_i64(tcg_rd, tcg_result);
3386 } else {
3387 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3388 }
3389
3390 tcg_temp_free_i64(tcg_result);
3391}
3392
3393
3394
3395
3396
3397static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3398{
3399 assert(e != 0);
3400 while (e < 64) {
3401 mask |= mask << e;
3402 e *= 2;
3403 }
3404 return mask;
3405}
3406
3407
3408static inline uint64_t bitmask64(unsigned int length)
3409{
3410 assert(length > 0 && length <= 64);
3411 return ~0ULL >> (64 - length);
3412}
3413
3414
3415
3416
3417
3418
3419bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3420 unsigned int imms, unsigned int immr)
3421{
3422 uint64_t mask;
3423 unsigned e, levels, s, r;
3424 int len;
3425
3426 assert(immn < 2 && imms < 64 && immr < 64);
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3451 if (len < 1) {
3452
3453 return false;
3454 }
3455 e = 1 << len;
3456
3457 levels = e - 1;
3458 s = imms & levels;
3459 r = immr & levels;
3460
3461 if (s == levels) {
3462
3463 return false;
3464 }
3465
3466
3467
3468
3469 mask = bitmask64(s + 1);
3470 if (r) {
3471 mask = (mask >> r) | (mask << (e - r));
3472 mask &= bitmask64(e);
3473 }
3474
3475 mask = bitfield_replicate(mask, e);
3476 *result = mask;
3477 return true;
3478}
3479
3480
3481
3482
3483
3484
3485
3486static void disas_logic_imm(DisasContext *s, uint32_t insn)
3487{
3488 unsigned int sf, opc, is_n, immr, imms, rn, rd;
3489 TCGv_i64 tcg_rd, tcg_rn;
3490 uint64_t wmask;
3491 bool is_and = false;
3492
3493 sf = extract32(insn, 31, 1);
3494 opc = extract32(insn, 29, 2);
3495 is_n = extract32(insn, 22, 1);
3496 immr = extract32(insn, 16, 6);
3497 imms = extract32(insn, 10, 6);
3498 rn = extract32(insn, 5, 5);
3499 rd = extract32(insn, 0, 5);
3500
3501 if (!sf && is_n) {
3502 unallocated_encoding(s);
3503 return;
3504 }
3505
3506 if (opc == 0x3) {
3507 tcg_rd = cpu_reg(s, rd);
3508 } else {
3509 tcg_rd = cpu_reg_sp(s, rd);
3510 }
3511 tcg_rn = cpu_reg(s, rn);
3512
3513 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3514
3515 unallocated_encoding(s);
3516 return;
3517 }
3518
3519 if (!sf) {
3520 wmask &= 0xffffffff;
3521 }
3522
3523 switch (opc) {
3524 case 0x3:
3525 case 0x0:
3526 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3527 is_and = true;
3528 break;
3529 case 0x1:
3530 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3531 break;
3532 case 0x2:
3533 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3534 break;
3535 default:
3536 assert(FALSE);
3537 break;
3538 }
3539
3540 if (!sf && !is_and) {
3541
3542
3543
3544 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3545 }
3546
3547 if (opc == 3) {
3548 gen_logic_CC(sf, tcg_rd);
3549 }
3550}
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564static void disas_movw_imm(DisasContext *s, uint32_t insn)
3565{
3566 int rd = extract32(insn, 0, 5);
3567 uint64_t imm = extract32(insn, 5, 16);
3568 int sf = extract32(insn, 31, 1);
3569 int opc = extract32(insn, 29, 2);
3570 int pos = extract32(insn, 21, 2) << 4;
3571 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3572 TCGv_i64 tcg_imm;
3573
3574 if (!sf && (pos >= 32)) {
3575 unallocated_encoding(s);
3576 return;
3577 }
3578
3579 switch (opc) {
3580 case 0:
3581 case 2:
3582 imm <<= pos;
3583 if (opc == 0) {
3584 imm = ~imm;
3585 }
3586 if (!sf) {
3587 imm &= 0xffffffffu;
3588 }
3589 tcg_gen_movi_i64(tcg_rd, imm);
3590 break;
3591 case 3:
3592 tcg_imm = tcg_const_i64(imm);
3593 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3594 tcg_temp_free_i64(tcg_imm);
3595 if (!sf) {
3596 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3597 }
3598 break;
3599 default:
3600 unallocated_encoding(s);
3601 break;
3602 }
3603}
3604
3605
3606
3607
3608
3609
3610
3611static void disas_bitfield(DisasContext *s, uint32_t insn)
3612{
3613 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3614 TCGv_i64 tcg_rd, tcg_tmp;
3615
3616 sf = extract32(insn, 31, 1);
3617 opc = extract32(insn, 29, 2);
3618 n = extract32(insn, 22, 1);
3619 ri = extract32(insn, 16, 6);
3620 si = extract32(insn, 10, 6);
3621 rn = extract32(insn, 5, 5);
3622 rd = extract32(insn, 0, 5);
3623 bitsize = sf ? 64 : 32;
3624
3625 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3626 unallocated_encoding(s);
3627 return;
3628 }
3629
3630 tcg_rd = cpu_reg(s, rd);
3631
3632
3633
3634
3635 tcg_tmp = read_cpu_reg(s, rn, 1);
3636
3637
3638 if (si >= ri) {
3639
3640 len = (si - ri) + 1;
3641 if (opc == 0) {
3642 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
3643 goto done;
3644 } else if (opc == 2) {
3645 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
3646 return;
3647 }
3648
3649 tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
3650 pos = 0;
3651 } else {
3652
3653
3654
3655 len = si + 1;
3656 pos = (bitsize - ri) & (bitsize - 1);
3657 }
3658
3659 if (opc == 0 && len < ri) {
3660
3661
3662
3663 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
3664 len = ri;
3665 }
3666
3667 if (opc == 1) {
3668 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3669 } else {
3670
3671
3672
3673 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
3674 return;
3675 }
3676
3677 done:
3678 if (!sf) {
3679 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3680 }
3681}
3682
3683
3684
3685
3686
3687
3688
3689static void disas_extract(DisasContext *s, uint32_t insn)
3690{
3691 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3692
3693 sf = extract32(insn, 31, 1);
3694 n = extract32(insn, 22, 1);
3695 rm = extract32(insn, 16, 5);
3696 imm = extract32(insn, 10, 6);
3697 rn = extract32(insn, 5, 5);
3698 rd = extract32(insn, 0, 5);
3699 op21 = extract32(insn, 29, 2);
3700 op0 = extract32(insn, 21, 1);
3701 bitsize = sf ? 64 : 32;
3702
3703 if (sf != n || op21 || op0 || imm >= bitsize) {
3704 unallocated_encoding(s);
3705 } else {
3706 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3707
3708 tcg_rd = cpu_reg(s, rd);
3709
3710 if (unlikely(imm == 0)) {
3711
3712
3713
3714 if (sf) {
3715 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3716 } else {
3717 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3718 }
3719 } else if (rm == rn) {
3720 tcg_rm = cpu_reg(s, rm);
3721 if (sf) {
3722 tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3723 } else {
3724 TCGv_i32 tmp = tcg_temp_new_i32();
3725 tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3726 tcg_gen_rotri_i32(tmp, tmp, imm);
3727 tcg_gen_extu_i32_i64(tcg_rd, tmp);
3728 tcg_temp_free_i32(tmp);
3729 }
3730 } else {
3731 tcg_rm = read_cpu_reg(s, rm, sf);
3732 tcg_rn = read_cpu_reg(s, rn, sf);
3733 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3734 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3735 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3736 if (!sf) {
3737 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3738 }
3739 }
3740 }
3741}
3742
3743
3744static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3745{
3746 switch (extract32(insn, 23, 6)) {
3747 case 0x20: case 0x21:
3748 disas_pc_rel_adr(s, insn);
3749 break;
3750 case 0x22: case 0x23:
3751 disas_add_sub_imm(s, insn);
3752 break;
3753 case 0x24:
3754 disas_logic_imm(s, insn);
3755 break;
3756 case 0x25:
3757 disas_movw_imm(s, insn);
3758 break;
3759 case 0x26:
3760 disas_bitfield(s, insn);
3761 break;
3762 case 0x27:
3763 disas_extract(s, insn);
3764 break;
3765 default:
3766 unallocated_encoding(s);
3767 break;
3768 }
3769}
3770
3771
3772
3773
3774
3775
3776static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3777 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3778{
3779 switch (shift_type) {
3780 case A64_SHIFT_TYPE_LSL:
3781 tcg_gen_shl_i64(dst, src, shift_amount);
3782 break;
3783 case A64_SHIFT_TYPE_LSR:
3784 tcg_gen_shr_i64(dst, src, shift_amount);
3785 break;
3786 case A64_SHIFT_TYPE_ASR:
3787 if (!sf) {
3788 tcg_gen_ext32s_i64(dst, src);
3789 }
3790 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3791 break;
3792 case A64_SHIFT_TYPE_ROR:
3793 if (sf) {
3794 tcg_gen_rotr_i64(dst, src, shift_amount);
3795 } else {
3796 TCGv_i32 t0, t1;
3797 t0 = tcg_temp_new_i32();
3798 t1 = tcg_temp_new_i32();
3799 tcg_gen_extrl_i64_i32(t0, src);
3800 tcg_gen_extrl_i64_i32(t1, shift_amount);
3801 tcg_gen_rotr_i32(t0, t0, t1);
3802 tcg_gen_extu_i32_i64(dst, t0);
3803 tcg_temp_free_i32(t0);
3804 tcg_temp_free_i32(t1);
3805 }
3806 break;
3807 default:
3808 assert(FALSE);
3809 break;
3810 }
3811
3812 if (!sf) {
3813 tcg_gen_ext32u_i64(dst, dst);
3814 }
3815}
3816
3817
3818
3819
3820
3821static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3822 enum a64_shift_type shift_type, unsigned int shift_i)
3823{
3824 assert(shift_i < (sf ? 64 : 32));
3825
3826 if (shift_i == 0) {
3827 tcg_gen_mov_i64(dst, src);
3828 } else {
3829 TCGv_i64 shift_const;
3830
3831 shift_const = tcg_const_i64(shift_i);
3832 shift_reg(dst, src, sf, shift_type, shift_const);
3833 tcg_temp_free_i64(shift_const);
3834 }
3835}
3836
3837
3838
3839
3840
3841
3842
3843static void disas_logic_reg(DisasContext *s, uint32_t insn)
3844{
3845 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3846 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3847
3848 sf = extract32(insn, 31, 1);
3849 opc = extract32(insn, 29, 2);
3850 shift_type = extract32(insn, 22, 2);
3851 invert = extract32(insn, 21, 1);
3852 rm = extract32(insn, 16, 5);
3853 shift_amount = extract32(insn, 10, 6);
3854 rn = extract32(insn, 5, 5);
3855 rd = extract32(insn, 0, 5);
3856
3857 if (!sf && (shift_amount & (1 << 5))) {
3858 unallocated_encoding(s);
3859 return;
3860 }
3861
3862 tcg_rd = cpu_reg(s, rd);
3863
3864 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3865
3866
3867
3868 tcg_rm = cpu_reg(s, rm);
3869 if (invert) {
3870 tcg_gen_not_i64(tcg_rd, tcg_rm);
3871 if (!sf) {
3872 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3873 }
3874 } else {
3875 if (sf) {
3876 tcg_gen_mov_i64(tcg_rd, tcg_rm);
3877 } else {
3878 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3879 }
3880 }
3881 return;
3882 }
3883
3884 tcg_rm = read_cpu_reg(s, rm, sf);
3885
3886 if (shift_amount) {
3887 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3888 }
3889
3890 tcg_rn = cpu_reg(s, rn);
3891
3892 switch (opc | (invert << 2)) {
3893 case 0:
3894 case 3:
3895 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3896 break;
3897 case 1:
3898 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3899 break;
3900 case 2:
3901 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3902 break;
3903 case 4:
3904 case 7:
3905 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3906 break;
3907 case 5:
3908 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3909 break;
3910 case 6:
3911 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3912 break;
3913 default:
3914 assert(FALSE);
3915 break;
3916 }
3917
3918 if (!sf) {
3919 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3920 }
3921
3922 if (opc == 3) {
3923 gen_logic_CC(sf, tcg_rd);
3924 }
3925}
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3945{
3946 int rd = extract32(insn, 0, 5);
3947 int rn = extract32(insn, 5, 5);
3948 int imm3 = extract32(insn, 10, 3);
3949 int option = extract32(insn, 13, 3);
3950 int rm = extract32(insn, 16, 5);
3951 bool setflags = extract32(insn, 29, 1);
3952 bool sub_op = extract32(insn, 30, 1);
3953 bool sf = extract32(insn, 31, 1);
3954
3955 TCGv_i64 tcg_rm, tcg_rn;
3956 TCGv_i64 tcg_rd;
3957 TCGv_i64 tcg_result;
3958
3959 if (imm3 > 4) {
3960 unallocated_encoding(s);
3961 return;
3962 }
3963
3964
3965 if (!setflags) {
3966 tcg_rd = cpu_reg_sp(s, rd);
3967 } else {
3968 tcg_rd = cpu_reg(s, rd);
3969 }
3970 tcg_rn = read_cpu_reg_sp(s, rn, sf);
3971
3972 tcg_rm = read_cpu_reg(s, rm, sf);
3973 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3974
3975 tcg_result = tcg_temp_new_i64();
3976
3977 if (!setflags) {
3978 if (sub_op) {
3979 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3980 } else {
3981 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3982 }
3983 } else {
3984 if (sub_op) {
3985 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3986 } else {
3987 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3988 }
3989 }
3990
3991 if (sf) {
3992 tcg_gen_mov_i64(tcg_rd, tcg_result);
3993 } else {
3994 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3995 }
3996
3997 tcg_temp_free_i64(tcg_result);
3998}
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4015{
4016 int rd = extract32(insn, 0, 5);
4017 int rn = extract32(insn, 5, 5);
4018 int imm6 = extract32(insn, 10, 6);
4019 int rm = extract32(insn, 16, 5);
4020 int shift_type = extract32(insn, 22, 2);
4021 bool setflags = extract32(insn, 29, 1);
4022 bool sub_op = extract32(insn, 30, 1);
4023 bool sf = extract32(insn, 31, 1);
4024
4025 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4026 TCGv_i64 tcg_rn, tcg_rm;
4027 TCGv_i64 tcg_result;
4028
4029 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4030 unallocated_encoding(s);
4031 return;
4032 }
4033
4034 tcg_rn = read_cpu_reg(s, rn, sf);
4035 tcg_rm = read_cpu_reg(s, rm, sf);
4036
4037 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4038
4039 tcg_result = tcg_temp_new_i64();
4040
4041 if (!setflags) {
4042 if (sub_op) {
4043 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4044 } else {
4045 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4046 }
4047 } else {
4048 if (sub_op) {
4049 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4050 } else {
4051 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4052 }
4053 }
4054
4055 if (sf) {
4056 tcg_gen_mov_i64(tcg_rd, tcg_result);
4057 } else {
4058 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4059 }
4060
4061 tcg_temp_free_i64(tcg_result);
4062}
4063
4064
4065
4066
4067
4068
4069
4070
4071static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4072{
4073 int rd = extract32(insn, 0, 5);
4074 int rn = extract32(insn, 5, 5);
4075 int ra = extract32(insn, 10, 5);
4076 int rm = extract32(insn, 16, 5);
4077 int op_id = (extract32(insn, 29, 3) << 4) |
4078 (extract32(insn, 21, 3) << 1) |
4079 extract32(insn, 15, 1);
4080 bool sf = extract32(insn, 31, 1);
4081 bool is_sub = extract32(op_id, 0, 1);
4082 bool is_high = extract32(op_id, 2, 1);
4083 bool is_signed = false;
4084 TCGv_i64 tcg_op1;
4085 TCGv_i64 tcg_op2;
4086 TCGv_i64 tcg_tmp;
4087
4088
4089 switch (op_id) {
4090 case 0x42:
4091 case 0x43:
4092 case 0x44:
4093 is_signed = true;
4094 break;
4095 case 0x0:
4096 case 0x1:
4097 case 0x40:
4098 case 0x41:
4099 case 0x4a:
4100 case 0x4b:
4101 case 0x4c:
4102 break;
4103 default:
4104 unallocated_encoding(s);
4105 return;
4106 }
4107
4108 if (is_high) {
4109 TCGv_i64 low_bits = tcg_temp_new_i64();
4110 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4111 TCGv_i64 tcg_rn = cpu_reg(s, rn);
4112 TCGv_i64 tcg_rm = cpu_reg(s, rm);
4113
4114 if (is_signed) {
4115 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4116 } else {
4117 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4118 }
4119
4120 tcg_temp_free_i64(low_bits);
4121 return;
4122 }
4123
4124 tcg_op1 = tcg_temp_new_i64();
4125 tcg_op2 = tcg_temp_new_i64();
4126 tcg_tmp = tcg_temp_new_i64();
4127
4128 if (op_id < 0x42) {
4129 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4130 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4131 } else {
4132 if (is_signed) {
4133 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4134 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4135 } else {
4136 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4137 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4138 }
4139 }
4140
4141 if (ra == 31 && !is_sub) {
4142
4143 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4144 } else {
4145 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4146 if (is_sub) {
4147 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4148 } else {
4149 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4150 }
4151 }
4152
4153 if (!sf) {
4154 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4155 }
4156
4157 tcg_temp_free_i64(tcg_op1);
4158 tcg_temp_free_i64(tcg_op2);
4159 tcg_temp_free_i64(tcg_tmp);
4160}
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4171{
4172 unsigned int sf, op, setflags, rm, rn, rd;
4173 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4174
4175 if (extract32(insn, 10, 6) != 0) {
4176 unallocated_encoding(s);
4177 return;
4178 }
4179
4180 sf = extract32(insn, 31, 1);
4181 op = extract32(insn, 30, 1);
4182 setflags = extract32(insn, 29, 1);
4183 rm = extract32(insn, 16, 5);
4184 rn = extract32(insn, 5, 5);
4185 rd = extract32(insn, 0, 5);
4186
4187 tcg_rd = cpu_reg(s, rd);
4188 tcg_rn = cpu_reg(s, rn);
4189
4190 if (op) {
4191 tcg_y = new_tmp_a64(s);
4192 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4193 } else {
4194 tcg_y = cpu_reg(s, rm);
4195 }
4196
4197 if (setflags) {
4198 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4199 } else {
4200 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4201 }
4202}
4203
4204
4205
4206
4207
4208
4209
4210
4211static void disas_cc(DisasContext *s, uint32_t insn)
4212{
4213 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4214 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4215 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4216 DisasCompare c;
4217
4218 if (!extract32(insn, 29, 1)) {
4219 unallocated_encoding(s);
4220 return;
4221 }
4222 if (insn & (1 << 10 | 1 << 4)) {
4223 unallocated_encoding(s);
4224 return;
4225 }
4226 sf = extract32(insn, 31, 1);
4227 op = extract32(insn, 30, 1);
4228 is_imm = extract32(insn, 11, 1);
4229 y = extract32(insn, 16, 5);
4230 cond = extract32(insn, 12, 4);
4231 rn = extract32(insn, 5, 5);
4232 nzcv = extract32(insn, 0, 4);
4233
4234
4235 tcg_t0 = tcg_temp_new_i32();
4236 arm_test_cc(&c, cond);
4237 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
4238 arm_free_cc(&c);
4239
4240
4241 if (is_imm) {
4242 tcg_y = new_tmp_a64(s);
4243 tcg_gen_movi_i64(tcg_y, y);
4244 } else {
4245 tcg_y = cpu_reg(s, y);
4246 }
4247 tcg_rn = cpu_reg(s, rn);
4248
4249
4250 tcg_tmp = tcg_temp_new_i64();
4251 if (op) {
4252 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4253 } else {
4254 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4255 }
4256 tcg_temp_free_i64(tcg_tmp);
4257
4258
4259
4260
4261
4262
4263 tcg_t1 = tcg_temp_new_i32();
4264 tcg_t2 = tcg_temp_new_i32();
4265 tcg_gen_neg_i32(tcg_t1, tcg_t0);
4266 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
4267
4268 if (nzcv & 8) {
4269 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
4270 } else {
4271 if (TCG_TARGET_HAS_andc_i32) {
4272 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
4273 } else {
4274 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
4275 }
4276 }
4277 if (nzcv & 4) {
4278 if (TCG_TARGET_HAS_andc_i32) {
4279 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
4280 } else {
4281 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
4282 }
4283 } else {
4284 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4285 }
4286 if (nzcv & 2) {
4287 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4288 } else {
4289 if (TCG_TARGET_HAS_andc_i32) {
4290 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4291 } else {
4292 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4293 }
4294 }
4295 if (nzcv & 1) {
4296 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4297 } else {
4298 if (TCG_TARGET_HAS_andc_i32) {
4299 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4300 } else {
4301 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4302 }
4303 }
4304 tcg_temp_free_i32(tcg_t0);
4305 tcg_temp_free_i32(tcg_t1);
4306 tcg_temp_free_i32(tcg_t2);
4307}
4308
4309
4310
4311
4312
4313
4314
4315static void disas_cond_select(DisasContext *s, uint32_t insn)
4316{
4317 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4318 TCGv_i64 tcg_rd, zero;
4319 DisasCompare64 c;
4320
4321 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4322
4323 unallocated_encoding(s);
4324 return;
4325 }
4326 sf = extract32(insn, 31, 1);
4327 else_inv = extract32(insn, 30, 1);
4328 rm = extract32(insn, 16, 5);
4329 cond = extract32(insn, 12, 4);
4330 else_inc = extract32(insn, 10, 1);
4331 rn = extract32(insn, 5, 5);
4332 rd = extract32(insn, 0, 5);
4333
4334 tcg_rd = cpu_reg(s, rd);
4335
4336 a64_test_cc(&c, cond);
4337 zero = tcg_const_i64(0);
4338
4339 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4340
4341 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4342 if (else_inv) {
4343 tcg_gen_neg_i64(tcg_rd, tcg_rd);
4344 }
4345 } else {
4346 TCGv_i64 t_true = cpu_reg(s, rn);
4347 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4348 if (else_inv && else_inc) {
4349 tcg_gen_neg_i64(t_false, t_false);
4350 } else if (else_inv) {
4351 tcg_gen_not_i64(t_false, t_false);
4352 } else if (else_inc) {
4353 tcg_gen_addi_i64(t_false, t_false, 1);
4354 }
4355 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4356 }
4357
4358 tcg_temp_free_i64(zero);
4359 a64_free_cc(&c);
4360
4361 if (!sf) {
4362 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4363 }
4364}
4365
4366static void handle_clz(DisasContext *s, unsigned int sf,
4367 unsigned int rn, unsigned int rd)
4368{
4369 TCGv_i64 tcg_rd, tcg_rn;
4370 tcg_rd = cpu_reg(s, rd);
4371 tcg_rn = cpu_reg(s, rn);
4372
4373 if (sf) {
4374 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4375 } else {
4376 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4377 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4378 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4379 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4380 tcg_temp_free_i32(tcg_tmp32);
4381 }
4382}
4383
4384static void handle_cls(DisasContext *s, unsigned int sf,
4385 unsigned int rn, unsigned int rd)
4386{
4387 TCGv_i64 tcg_rd, tcg_rn;
4388 tcg_rd = cpu_reg(s, rd);
4389 tcg_rn = cpu_reg(s, rn);
4390
4391 if (sf) {
4392 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4393 } else {
4394 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4395 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4396 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4397 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4398 tcg_temp_free_i32(tcg_tmp32);
4399 }
4400}
4401
4402static void handle_rbit(DisasContext *s, unsigned int sf,
4403 unsigned int rn, unsigned int rd)
4404{
4405 TCGv_i64 tcg_rd, tcg_rn;
4406 tcg_rd = cpu_reg(s, rd);
4407 tcg_rn = cpu_reg(s, rn);
4408
4409 if (sf) {
4410 gen_helper_rbit64(tcg_rd, tcg_rn);
4411 } else {
4412 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4413 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4414 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4415 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4416 tcg_temp_free_i32(tcg_tmp32);
4417 }
4418}
4419
4420
4421static void handle_rev64(DisasContext *s, unsigned int sf,
4422 unsigned int rn, unsigned int rd)
4423{
4424 if (!sf) {
4425 unallocated_encoding(s);
4426 return;
4427 }
4428 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4429}
4430
4431
4432
4433
4434static void handle_rev32(DisasContext *s, unsigned int sf,
4435 unsigned int rn, unsigned int rd)
4436{
4437 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4438
4439 if (sf) {
4440 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4441 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4442
4443
4444 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4445 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4446 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4447 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4448 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4449
4450 tcg_temp_free_i64(tcg_tmp);
4451 } else {
4452 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4453 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4454 }
4455}
4456
4457
4458static void handle_rev16(DisasContext *s, unsigned int sf,
4459 unsigned int rn, unsigned int rd)
4460{
4461 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4462 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4463 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4464 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4465
4466 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4467 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4468 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4469 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4470 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4471
4472 tcg_temp_free_i64(mask);
4473 tcg_temp_free_i64(tcg_tmp);
4474}
4475
4476
4477
4478
4479
4480
4481
4482static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4483{
4484 unsigned int sf, opcode, rn, rd;
4485
4486 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4487 unallocated_encoding(s);
4488 return;
4489 }
4490
4491 sf = extract32(insn, 31, 1);
4492 opcode = extract32(insn, 10, 6);
4493 rn = extract32(insn, 5, 5);
4494 rd = extract32(insn, 0, 5);
4495
4496 switch (opcode) {
4497 case 0:
4498 handle_rbit(s, sf, rn, rd);
4499 break;
4500 case 1:
4501 handle_rev16(s, sf, rn, rd);
4502 break;
4503 case 2:
4504 handle_rev32(s, sf, rn, rd);
4505 break;
4506 case 3:
4507 handle_rev64(s, sf, rn, rd);
4508 break;
4509 case 4:
4510 handle_clz(s, sf, rn, rd);
4511 break;
4512 case 5:
4513 handle_cls(s, sf, rn, rd);
4514 break;
4515 }
4516}
4517
4518static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4519 unsigned int rm, unsigned int rn, unsigned int rd)
4520{
4521 TCGv_i64 tcg_n, tcg_m, tcg_rd;
4522 tcg_rd = cpu_reg(s, rd);
4523
4524 if (!sf && is_signed) {
4525 tcg_n = new_tmp_a64(s);
4526 tcg_m = new_tmp_a64(s);
4527 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4528 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4529 } else {
4530 tcg_n = read_cpu_reg(s, rn, sf);
4531 tcg_m = read_cpu_reg(s, rm, sf);
4532 }
4533
4534 if (is_signed) {
4535 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4536 } else {
4537 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4538 }
4539
4540 if (!sf) {
4541 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4542 }
4543}
4544
4545
4546static void handle_shift_reg(DisasContext *s,
4547 enum a64_shift_type shift_type, unsigned int sf,
4548 unsigned int rm, unsigned int rn, unsigned int rd)
4549{
4550 TCGv_i64 tcg_shift = tcg_temp_new_i64();
4551 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4552 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4553
4554 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4555 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4556 tcg_temp_free_i64(tcg_shift);
4557}
4558
4559
4560static void handle_crc32(DisasContext *s,
4561 unsigned int sf, unsigned int sz, bool crc32c,
4562 unsigned int rm, unsigned int rn, unsigned int rd)
4563{
4564 TCGv_i64 tcg_acc, tcg_val;
4565 TCGv_i32 tcg_bytes;
4566
4567 if (!arm_dc_feature(s, ARM_FEATURE_CRC)
4568 || (sf == 1 && sz != 3)
4569 || (sf == 0 && sz == 3)) {
4570 unallocated_encoding(s);
4571 return;
4572 }
4573
4574 if (sz == 3) {
4575 tcg_val = cpu_reg(s, rm);
4576 } else {
4577 uint64_t mask;
4578 switch (sz) {
4579 case 0:
4580 mask = 0xFF;
4581 break;
4582 case 1:
4583 mask = 0xFFFF;
4584 break;
4585 case 2:
4586 mask = 0xFFFFFFFF;
4587 break;
4588 default:
4589 g_assert_not_reached();
4590 }
4591 tcg_val = new_tmp_a64(s);
4592 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4593 }
4594
4595 tcg_acc = cpu_reg(s, rn);
4596 tcg_bytes = tcg_const_i32(1 << sz);
4597
4598 if (crc32c) {
4599 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4600 } else {
4601 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4602 }
4603
4604 tcg_temp_free_i32(tcg_bytes);
4605}
4606
4607
4608
4609
4610
4611
4612
4613static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4614{
4615 unsigned int sf, rm, opcode, rn, rd;
4616 sf = extract32(insn, 31, 1);
4617 rm = extract32(insn, 16, 5);
4618 opcode = extract32(insn, 10, 6);
4619 rn = extract32(insn, 5, 5);
4620 rd = extract32(insn, 0, 5);
4621
4622 if (extract32(insn, 29, 1)) {
4623 unallocated_encoding(s);
4624 return;
4625 }
4626
4627 switch (opcode) {
4628 case 2:
4629 handle_div(s, false, sf, rm, rn, rd);
4630 break;
4631 case 3:
4632 handle_div(s, true, sf, rm, rn, rd);
4633 break;
4634 case 8:
4635 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4636 break;
4637 case 9:
4638 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4639 break;
4640 case 10:
4641 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4642 break;
4643 case 11:
4644 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4645 break;
4646 case 16:
4647 case 17:
4648 case 18:
4649 case 19:
4650 case 20:
4651 case 21:
4652 case 22:
4653 case 23:
4654 {
4655 int sz = extract32(opcode, 0, 2);
4656 bool crc32c = extract32(opcode, 2, 1);
4657 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4658 break;
4659 }
4660 default:
4661 unallocated_encoding(s);
4662 break;
4663 }
4664}
4665
4666
4667static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4668{
4669 switch (extract32(insn, 24, 5)) {
4670 case 0x0a:
4671 disas_logic_reg(s, insn);
4672 break;
4673 case 0x0b:
4674 if (insn & (1 << 21)) {
4675 disas_add_sub_ext_reg(s, insn);
4676 } else {
4677 disas_add_sub_reg(s, insn);
4678 }
4679 break;
4680 case 0x1b:
4681 disas_data_proc_3src(s, insn);
4682 break;
4683 case 0x1a:
4684 switch (extract32(insn, 21, 3)) {
4685 case 0x0:
4686 disas_adc_sbc(s, insn);
4687 break;
4688 case 0x2:
4689 disas_cc(s, insn);
4690 break;
4691 case 0x4:
4692 disas_cond_select(s, insn);
4693 break;
4694 case 0x6:
4695 if (insn & (1 << 30)) {
4696 disas_data_proc_1src(s, insn);
4697 } else {
4698 disas_data_proc_2src(s, insn);
4699 }
4700 break;
4701 default:
4702 unallocated_encoding(s);
4703 break;
4704 }
4705 break;
4706 default:
4707 unallocated_encoding(s);
4708 break;
4709 }
4710}
4711
4712static void handle_fp_compare(DisasContext *s, int size,
4713 unsigned int rn, unsigned int rm,
4714 bool cmp_with_zero, bool signal_all_nans)
4715{
4716 TCGv_i64 tcg_flags = tcg_temp_new_i64();
4717 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
4718
4719 if (size == MO_64) {
4720 TCGv_i64 tcg_vn, tcg_vm;
4721
4722 tcg_vn = read_fp_dreg(s, rn);
4723 if (cmp_with_zero) {
4724 tcg_vm = tcg_const_i64(0);
4725 } else {
4726 tcg_vm = read_fp_dreg(s, rm);
4727 }
4728 if (signal_all_nans) {
4729 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4730 } else {
4731 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4732 }
4733 tcg_temp_free_i64(tcg_vn);
4734 tcg_temp_free_i64(tcg_vm);
4735 } else {
4736 TCGv_i32 tcg_vn = tcg_temp_new_i32();
4737 TCGv_i32 tcg_vm = tcg_temp_new_i32();
4738
4739 read_vec_element_i32(s, tcg_vn, rn, 0, size);
4740 if (cmp_with_zero) {
4741 tcg_gen_movi_i32(tcg_vm, 0);
4742 } else {
4743 read_vec_element_i32(s, tcg_vm, rm, 0, size);
4744 }
4745
4746 switch (size) {
4747 case MO_32:
4748 if (signal_all_nans) {
4749 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4750 } else {
4751 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4752 }
4753 break;
4754 case MO_16:
4755 if (signal_all_nans) {
4756 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4757 } else {
4758 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4759 }
4760 break;
4761 default:
4762 g_assert_not_reached();
4763 }
4764
4765 tcg_temp_free_i32(tcg_vn);
4766 tcg_temp_free_i32(tcg_vm);
4767 }
4768
4769 tcg_temp_free_ptr(fpst);
4770
4771 gen_set_nzcv(tcg_flags);
4772
4773 tcg_temp_free_i64(tcg_flags);
4774}
4775
4776
4777
4778
4779
4780
4781
4782static void disas_fp_compare(DisasContext *s, uint32_t insn)
4783{
4784 unsigned int mos, type, rm, op, rn, opc, op2r;
4785 int size;
4786
4787 mos = extract32(insn, 29, 3);
4788 type = extract32(insn, 22, 2);
4789 rm = extract32(insn, 16, 5);
4790 op = extract32(insn, 14, 2);
4791 rn = extract32(insn, 5, 5);
4792 opc = extract32(insn, 3, 2);
4793 op2r = extract32(insn, 0, 3);
4794
4795 if (mos || op || op2r) {
4796 unallocated_encoding(s);
4797 return;
4798 }
4799
4800 switch (type) {
4801 case 0:
4802 size = MO_32;
4803 break;
4804 case 1:
4805 size = MO_64;
4806 break;
4807 case 3:
4808 size = MO_16;
4809 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
4810 break;
4811 }
4812
4813 default:
4814 unallocated_encoding(s);
4815 return;
4816 }
4817
4818 if (!fp_access_check(s)) {
4819 return;
4820 }
4821
4822 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
4823}
4824
4825
4826
4827
4828
4829
4830
4831static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4832{
4833 unsigned int mos, type, rm, cond, rn, op, nzcv;
4834 TCGv_i64 tcg_flags;
4835 TCGLabel *label_continue = NULL;
4836 int size;
4837
4838 mos = extract32(insn, 29, 3);
4839 type = extract32(insn, 22, 2);
4840 rm = extract32(insn, 16, 5);
4841 cond = extract32(insn, 12, 4);
4842 rn = extract32(insn, 5, 5);
4843 op = extract32(insn, 4, 1);
4844 nzcv = extract32(insn, 0, 4);
4845
4846 if (mos) {
4847 unallocated_encoding(s);
4848 return;
4849 }
4850
4851 switch (type) {
4852 case 0:
4853 size = MO_32;
4854 break;
4855 case 1:
4856 size = MO_64;
4857 break;
4858 case 3:
4859 size = MO_16;
4860 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
4861 break;
4862 }
4863
4864 default:
4865 unallocated_encoding(s);
4866 return;
4867 }
4868
4869 if (!fp_access_check(s)) {
4870 return;
4871 }
4872
4873 if (cond < 0x0e) {
4874 TCGLabel *label_match = gen_new_label();
4875 label_continue = gen_new_label();
4876 arm_gen_test_cc(cond, label_match);
4877
4878 tcg_flags = tcg_const_i64(nzcv << 28);
4879 gen_set_nzcv(tcg_flags);
4880 tcg_temp_free_i64(tcg_flags);
4881 tcg_gen_br(label_continue);
4882 gen_set_label(label_match);
4883 }
4884
4885 handle_fp_compare(s, size, rn, rm, false, op);
4886
4887 if (cond < 0x0e) {
4888 gen_set_label(label_continue);
4889 }
4890}
4891
4892
4893
4894
4895
4896
4897
4898static void disas_fp_csel(DisasContext *s, uint32_t insn)
4899{
4900 unsigned int mos, type, rm, cond, rn, rd;
4901 TCGv_i64 t_true, t_false, t_zero;
4902 DisasCompare64 c;
4903 TCGMemOp sz;
4904
4905 mos = extract32(insn, 29, 3);
4906 type = extract32(insn, 22, 2);
4907 rm = extract32(insn, 16, 5);
4908 cond = extract32(insn, 12, 4);
4909 rn = extract32(insn, 5, 5);
4910 rd = extract32(insn, 0, 5);
4911
4912 if (mos) {
4913 unallocated_encoding(s);
4914 return;
4915 }
4916
4917 switch (type) {
4918 case 0:
4919 sz = MO_32;
4920 break;
4921 case 1:
4922 sz = MO_64;
4923 break;
4924 case 3:
4925 sz = MO_16;
4926 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
4927 break;
4928 }
4929
4930 default:
4931 unallocated_encoding(s);
4932 return;
4933 }
4934
4935 if (!fp_access_check(s)) {
4936 return;
4937 }
4938
4939
4940 t_true = tcg_temp_new_i64();
4941 t_false = tcg_temp_new_i64();
4942 read_vec_element(s, t_true, rn, 0, sz);
4943 read_vec_element(s, t_false, rm, 0, sz);
4944
4945 a64_test_cc(&c, cond);
4946 t_zero = tcg_const_i64(0);
4947 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4948 tcg_temp_free_i64(t_zero);
4949 tcg_temp_free_i64(t_false);
4950 a64_free_cc(&c);
4951
4952
4953
4954 write_fp_dreg(s, rd, t_true);
4955 tcg_temp_free_i64(t_true);
4956}
4957
4958
4959static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
4960{
4961 TCGv_ptr fpst = NULL;
4962 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
4963 TCGv_i32 tcg_res = tcg_temp_new_i32();
4964
4965 switch (opcode) {
4966 case 0x0:
4967 tcg_gen_mov_i32(tcg_res, tcg_op);
4968 break;
4969 case 0x1:
4970 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
4971 break;
4972 case 0x2:
4973 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
4974 break;
4975 case 0x3:
4976 fpst = get_fpstatus_ptr(true);
4977 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
4978 break;
4979 case 0x8:
4980 case 0x9:
4981 case 0xa:
4982 case 0xb:
4983 case 0xc:
4984 {
4985 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4986 fpst = get_fpstatus_ptr(true);
4987
4988 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4989 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
4990
4991 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4992 tcg_temp_free_i32(tcg_rmode);
4993 break;
4994 }
4995 case 0xe:
4996 fpst = get_fpstatus_ptr(true);
4997 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
4998 break;
4999 case 0xf:
5000 fpst = get_fpstatus_ptr(true);
5001 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5002 break;
5003 default:
5004 abort();
5005 }
5006
5007 write_fp_sreg(s, rd, tcg_res);
5008
5009 if (fpst) {
5010 tcg_temp_free_ptr(fpst);
5011 }
5012 tcg_temp_free_i32(tcg_op);
5013 tcg_temp_free_i32(tcg_res);
5014}
5015
5016
5017static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5018{
5019 TCGv_ptr fpst;
5020 TCGv_i32 tcg_op;
5021 TCGv_i32 tcg_res;
5022
5023 fpst = get_fpstatus_ptr(false);
5024 tcg_op = read_fp_sreg(s, rn);
5025 tcg_res = tcg_temp_new_i32();
5026
5027 switch (opcode) {
5028 case 0x0:
5029 tcg_gen_mov_i32(tcg_res, tcg_op);
5030 break;
5031 case 0x1:
5032 gen_helper_vfp_abss(tcg_res, tcg_op);
5033 break;
5034 case 0x2:
5035 gen_helper_vfp_negs(tcg_res, tcg_op);
5036 break;
5037 case 0x3:
5038 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
5039 break;
5040 case 0x8:
5041 case 0x9:
5042 case 0xa:
5043 case 0xb:
5044 case 0xc:
5045 {
5046 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5047
5048 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5049 gen_helper_rints(tcg_res, tcg_op, fpst);
5050
5051 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5052 tcg_temp_free_i32(tcg_rmode);
5053 break;
5054 }
5055 case 0xe:
5056 gen_helper_rints_exact(tcg_res, tcg_op, fpst);
5057 break;
5058 case 0xf:
5059 gen_helper_rints(tcg_res, tcg_op, fpst);
5060 break;
5061 default:
5062 abort();
5063 }
5064
5065 write_fp_sreg(s, rd, tcg_res);
5066
5067 tcg_temp_free_ptr(fpst);
5068 tcg_temp_free_i32(tcg_op);
5069 tcg_temp_free_i32(tcg_res);
5070}
5071
5072
5073static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
5074{
5075 TCGv_ptr fpst;
5076 TCGv_i64 tcg_op;
5077 TCGv_i64 tcg_res;
5078
5079 switch (opcode) {
5080 case 0x0:
5081 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
5082 return;
5083 }
5084
5085 fpst = get_fpstatus_ptr(false);
5086 tcg_op = read_fp_dreg(s, rn);
5087 tcg_res = tcg_temp_new_i64();
5088
5089 switch (opcode) {
5090 case 0x1:
5091 gen_helper_vfp_absd(tcg_res, tcg_op);
5092 break;
5093 case 0x2:
5094 gen_helper_vfp_negd(tcg_res, tcg_op);
5095 break;
5096 case 0x3:
5097 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
5098 break;
5099 case 0x8:
5100 case 0x9:
5101 case 0xa:
5102 case 0xb:
5103 case 0xc:
5104 {
5105 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5106
5107 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5108 gen_helper_rintd(tcg_res, tcg_op, fpst);
5109
5110 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5111 tcg_temp_free_i32(tcg_rmode);
5112 break;
5113 }
5114 case 0xe:
5115 gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
5116 break;
5117 case 0xf:
5118 gen_helper_rintd(tcg_res, tcg_op, fpst);
5119 break;
5120 default:
5121 abort();
5122 }
5123
5124 write_fp_dreg(s, rd, tcg_res);
5125
5126 tcg_temp_free_ptr(fpst);
5127 tcg_temp_free_i64(tcg_op);
5128 tcg_temp_free_i64(tcg_res);
5129}
5130
5131static void handle_fp_fcvt(DisasContext *s, int opcode,
5132 int rd, int rn, int dtype, int ntype)
5133{
5134 switch (ntype) {
5135 case 0x0:
5136 {
5137 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5138 if (dtype == 1) {
5139
5140 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5141 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
5142 write_fp_dreg(s, rd, tcg_rd);
5143 tcg_temp_free_i64(tcg_rd);
5144 } else {
5145
5146 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5147 TCGv_i32 ahp = get_ahp_flag();
5148 TCGv_ptr fpst = get_fpstatus_ptr(false);
5149
5150 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5151
5152 write_fp_sreg(s, rd, tcg_rd);
5153 tcg_temp_free_i32(tcg_rd);
5154 tcg_temp_free_i32(ahp);
5155 tcg_temp_free_ptr(fpst);
5156 }
5157 tcg_temp_free_i32(tcg_rn);
5158 break;
5159 }
5160 case 0x1:
5161 {
5162 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
5163 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5164 if (dtype == 0) {
5165
5166 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
5167 } else {
5168 TCGv_ptr fpst = get_fpstatus_ptr(false);
5169 TCGv_i32 ahp = get_ahp_flag();
5170
5171 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5172
5173 tcg_temp_free_ptr(fpst);
5174 tcg_temp_free_i32(ahp);
5175 }
5176 write_fp_sreg(s, rd, tcg_rd);
5177 tcg_temp_free_i32(tcg_rd);
5178 tcg_temp_free_i64(tcg_rn);
5179 break;
5180 }
5181 case 0x3:
5182 {
5183 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5184 TCGv_ptr tcg_fpst = get_fpstatus_ptr(false);
5185 TCGv_i32 tcg_ahp = get_ahp_flag();
5186 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
5187 if (dtype == 0) {
5188
5189 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5190 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5191 write_fp_sreg(s, rd, tcg_rd);
5192 tcg_temp_free_ptr(tcg_fpst);
5193 tcg_temp_free_i32(tcg_ahp);
5194 tcg_temp_free_i32(tcg_rd);
5195 } else {
5196
5197 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5198 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5199 write_fp_dreg(s, rd, tcg_rd);
5200 tcg_temp_free_i64(tcg_rd);
5201 }
5202 tcg_temp_free_i32(tcg_rn);
5203 break;
5204 }
5205 default:
5206 abort();
5207 }
5208}
5209
5210
5211
5212
5213
5214
5215
5216static void disas_fp_1src(DisasContext *s, uint32_t insn)
5217{
5218 int type = extract32(insn, 22, 2);
5219 int opcode = extract32(insn, 15, 6);
5220 int rn = extract32(insn, 5, 5);
5221 int rd = extract32(insn, 0, 5);
5222
5223 switch (opcode) {
5224 case 0x4: case 0x5: case 0x7:
5225 {
5226
5227 int dtype = extract32(opcode, 0, 2);
5228 if (type == 2 || dtype == type) {
5229 unallocated_encoding(s);
5230 return;
5231 }
5232 if (!fp_access_check(s)) {
5233 return;
5234 }
5235
5236 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
5237 break;
5238 }
5239 case 0x0 ... 0x3:
5240 case 0x8 ... 0xc:
5241 case 0xe ... 0xf:
5242
5243 switch (type) {
5244 case 0:
5245 if (!fp_access_check(s)) {
5246 return;
5247 }
5248
5249 handle_fp_1src_single(s, opcode, rd, rn);
5250 break;
5251 case 1:
5252 if (!fp_access_check(s)) {
5253 return;
5254 }
5255
5256 handle_fp_1src_double(s, opcode, rd, rn);
5257 break;
5258 case 3:
5259 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5260 unallocated_encoding(s);
5261 return;
5262 }
5263
5264 if (!fp_access_check(s)) {
5265 return;
5266 }
5267
5268 handle_fp_1src_half(s, opcode, rd, rn);
5269 break;
5270 default:
5271 unallocated_encoding(s);
5272 }
5273 break;
5274 default:
5275 unallocated_encoding(s);
5276 break;
5277 }
5278}
5279
5280
5281static void handle_fp_2src_single(DisasContext *s, int opcode,
5282 int rd, int rn, int rm)
5283{
5284 TCGv_i32 tcg_op1;
5285 TCGv_i32 tcg_op2;
5286 TCGv_i32 tcg_res;
5287 TCGv_ptr fpst;
5288
5289 tcg_res = tcg_temp_new_i32();
5290 fpst = get_fpstatus_ptr(false);
5291 tcg_op1 = read_fp_sreg(s, rn);
5292 tcg_op2 = read_fp_sreg(s, rm);
5293
5294 switch (opcode) {
5295 case 0x0:
5296 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5297 break;
5298 case 0x1:
5299 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
5300 break;
5301 case 0x2:
5302 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
5303 break;
5304 case 0x3:
5305 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
5306 break;
5307 case 0x4:
5308 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
5309 break;
5310 case 0x5:
5311 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
5312 break;
5313 case 0x6:
5314 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
5315 break;
5316 case 0x7:
5317 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
5318 break;
5319 case 0x8:
5320 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5321 gen_helper_vfp_negs(tcg_res, tcg_res);
5322 break;
5323 }
5324
5325 write_fp_sreg(s, rd, tcg_res);
5326
5327 tcg_temp_free_ptr(fpst);
5328 tcg_temp_free_i32(tcg_op1);
5329 tcg_temp_free_i32(tcg_op2);
5330 tcg_temp_free_i32(tcg_res);
5331}
5332
5333
5334static void handle_fp_2src_double(DisasContext *s, int opcode,
5335 int rd, int rn, int rm)
5336{
5337 TCGv_i64 tcg_op1;
5338 TCGv_i64 tcg_op2;
5339 TCGv_i64 tcg_res;
5340 TCGv_ptr fpst;
5341
5342 tcg_res = tcg_temp_new_i64();
5343 fpst = get_fpstatus_ptr(false);
5344 tcg_op1 = read_fp_dreg(s, rn);
5345 tcg_op2 = read_fp_dreg(s, rm);
5346
5347 switch (opcode) {
5348 case 0x0:
5349 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5350 break;
5351 case 0x1:
5352 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
5353 break;
5354 case 0x2:
5355 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
5356 break;
5357 case 0x3:
5358 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
5359 break;
5360 case 0x4:
5361 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
5362 break;
5363 case 0x5:
5364 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
5365 break;
5366 case 0x6:
5367 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5368 break;
5369 case 0x7:
5370 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5371 break;
5372 case 0x8:
5373 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5374 gen_helper_vfp_negd(tcg_res, tcg_res);
5375 break;
5376 }
5377
5378 write_fp_dreg(s, rd, tcg_res);
5379
5380 tcg_temp_free_ptr(fpst);
5381 tcg_temp_free_i64(tcg_op1);
5382 tcg_temp_free_i64(tcg_op2);
5383 tcg_temp_free_i64(tcg_res);
5384}
5385
5386
5387static void handle_fp_2src_half(DisasContext *s, int opcode,
5388 int rd, int rn, int rm)
5389{
5390 TCGv_i32 tcg_op1;
5391 TCGv_i32 tcg_op2;
5392 TCGv_i32 tcg_res;
5393 TCGv_ptr fpst;
5394
5395 tcg_res = tcg_temp_new_i32();
5396 fpst = get_fpstatus_ptr(true);
5397 tcg_op1 = read_fp_hreg(s, rn);
5398 tcg_op2 = read_fp_hreg(s, rm);
5399
5400 switch (opcode) {
5401 case 0x0:
5402 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
5403 break;
5404 case 0x1:
5405 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
5406 break;
5407 case 0x2:
5408 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
5409 break;
5410 case 0x3:
5411 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
5412 break;
5413 case 0x4:
5414 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
5415 break;
5416 case 0x5:
5417 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
5418 break;
5419 case 0x6:
5420 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
5421 break;
5422 case 0x7:
5423 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
5424 break;
5425 case 0x8:
5426 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
5427 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
5428 break;
5429 default:
5430 g_assert_not_reached();
5431 }
5432
5433 write_fp_sreg(s, rd, tcg_res);
5434
5435 tcg_temp_free_ptr(fpst);
5436 tcg_temp_free_i32(tcg_op1);
5437 tcg_temp_free_i32(tcg_op2);
5438 tcg_temp_free_i32(tcg_res);
5439}
5440
5441
5442
5443
5444
5445
5446
5447static void disas_fp_2src(DisasContext *s, uint32_t insn)
5448{
5449 int type = extract32(insn, 22, 2);
5450 int rd = extract32(insn, 0, 5);
5451 int rn = extract32(insn, 5, 5);
5452 int rm = extract32(insn, 16, 5);
5453 int opcode = extract32(insn, 12, 4);
5454
5455 if (opcode > 8) {
5456 unallocated_encoding(s);
5457 return;
5458 }
5459
5460 switch (type) {
5461 case 0:
5462 if (!fp_access_check(s)) {
5463 return;
5464 }
5465 handle_fp_2src_single(s, opcode, rd, rn, rm);
5466 break;
5467 case 1:
5468 if (!fp_access_check(s)) {
5469 return;
5470 }
5471 handle_fp_2src_double(s, opcode, rd, rn, rm);
5472 break;
5473 case 3:
5474 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5475 unallocated_encoding(s);
5476 return;
5477 }
5478 if (!fp_access_check(s)) {
5479 return;
5480 }
5481 handle_fp_2src_half(s, opcode, rd, rn, rm);
5482 break;
5483 default:
5484 unallocated_encoding(s);
5485 }
5486}
5487
5488
5489static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
5490 int rd, int rn, int rm, int ra)
5491{
5492 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5493 TCGv_i32 tcg_res = tcg_temp_new_i32();
5494 TCGv_ptr fpst = get_fpstatus_ptr(false);
5495
5496 tcg_op1 = read_fp_sreg(s, rn);
5497 tcg_op2 = read_fp_sreg(s, rm);
5498 tcg_op3 = read_fp_sreg(s, ra);
5499
5500
5501
5502
5503
5504
5505
5506
5507 if (o1 == true) {
5508 gen_helper_vfp_negs(tcg_op3, tcg_op3);
5509 }
5510
5511 if (o0 != o1) {
5512 gen_helper_vfp_negs(tcg_op1, tcg_op1);
5513 }
5514
5515 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5516
5517 write_fp_sreg(s, rd, tcg_res);
5518
5519 tcg_temp_free_ptr(fpst);
5520 tcg_temp_free_i32(tcg_op1);
5521 tcg_temp_free_i32(tcg_op2);
5522 tcg_temp_free_i32(tcg_op3);
5523 tcg_temp_free_i32(tcg_res);
5524}
5525
5526
5527static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
5528 int rd, int rn, int rm, int ra)
5529{
5530 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
5531 TCGv_i64 tcg_res = tcg_temp_new_i64();
5532 TCGv_ptr fpst = get_fpstatus_ptr(false);
5533
5534 tcg_op1 = read_fp_dreg(s, rn);
5535 tcg_op2 = read_fp_dreg(s, rm);
5536 tcg_op3 = read_fp_dreg(s, ra);
5537
5538
5539
5540
5541
5542
5543
5544
5545 if (o1 == true) {
5546 gen_helper_vfp_negd(tcg_op3, tcg_op3);
5547 }
5548
5549 if (o0 != o1) {
5550 gen_helper_vfp_negd(tcg_op1, tcg_op1);
5551 }
5552
5553 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5554
5555 write_fp_dreg(s, rd, tcg_res);
5556
5557 tcg_temp_free_ptr(fpst);
5558 tcg_temp_free_i64(tcg_op1);
5559 tcg_temp_free_i64(tcg_op2);
5560 tcg_temp_free_i64(tcg_op3);
5561 tcg_temp_free_i64(tcg_res);
5562}
5563
5564
5565static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
5566 int rd, int rn, int rm, int ra)
5567{
5568 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5569 TCGv_i32 tcg_res = tcg_temp_new_i32();
5570 TCGv_ptr fpst = get_fpstatus_ptr(true);
5571
5572 tcg_op1 = read_fp_hreg(s, rn);
5573 tcg_op2 = read_fp_hreg(s, rm);
5574 tcg_op3 = read_fp_hreg(s, ra);
5575
5576
5577
5578
5579
5580
5581
5582
5583 if (o1 == true) {
5584 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
5585 }
5586
5587 if (o0 != o1) {
5588 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
5589 }
5590
5591 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5592
5593 write_fp_sreg(s, rd, tcg_res);
5594
5595 tcg_temp_free_ptr(fpst);
5596 tcg_temp_free_i32(tcg_op1);
5597 tcg_temp_free_i32(tcg_op2);
5598 tcg_temp_free_i32(tcg_op3);
5599 tcg_temp_free_i32(tcg_res);
5600}
5601
5602
5603
5604
5605
5606
5607
5608static void disas_fp_3src(DisasContext *s, uint32_t insn)
5609{
5610 int type = extract32(insn, 22, 2);
5611 int rd = extract32(insn, 0, 5);
5612 int rn = extract32(insn, 5, 5);
5613 int ra = extract32(insn, 10, 5);
5614 int rm = extract32(insn, 16, 5);
5615 bool o0 = extract32(insn, 15, 1);
5616 bool o1 = extract32(insn, 21, 1);
5617
5618 switch (type) {
5619 case 0:
5620 if (!fp_access_check(s)) {
5621 return;
5622 }
5623 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
5624 break;
5625 case 1:
5626 if (!fp_access_check(s)) {
5627 return;
5628 }
5629 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
5630 break;
5631 case 3:
5632 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5633 unallocated_encoding(s);
5634 return;
5635 }
5636 if (!fp_access_check(s)) {
5637 return;
5638 }
5639 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
5640 break;
5641 default:
5642 unallocated_encoding(s);
5643 }
5644}
5645
5646
5647
5648
5649
5650uint64_t vfp_expand_imm(int size, uint8_t imm8)
5651{
5652 uint64_t imm;
5653
5654 switch (size) {
5655 case MO_64:
5656 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5657 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
5658 extract32(imm8, 0, 6);
5659 imm <<= 48;
5660 break;
5661 case MO_32:
5662 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5663 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
5664 (extract32(imm8, 0, 6) << 3);
5665 imm <<= 16;
5666 break;
5667 case MO_16:
5668 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5669 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
5670 (extract32(imm8, 0, 6) << 6);
5671 break;
5672 default:
5673 g_assert_not_reached();
5674 }
5675 return imm;
5676}
5677
5678
5679
5680
5681
5682
5683
5684static void disas_fp_imm(DisasContext *s, uint32_t insn)
5685{
5686 int rd = extract32(insn, 0, 5);
5687 int imm8 = extract32(insn, 13, 8);
5688 int type = extract32(insn, 22, 2);
5689 uint64_t imm;
5690 TCGv_i64 tcg_res;
5691 TCGMemOp sz;
5692
5693 switch (type) {
5694 case 0:
5695 sz = MO_32;
5696 break;
5697 case 1:
5698 sz = MO_64;
5699 break;
5700 case 3:
5701 sz = MO_16;
5702 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5703 break;
5704 }
5705
5706 default:
5707 unallocated_encoding(s);
5708 return;
5709 }
5710
5711 if (!fp_access_check(s)) {
5712 return;
5713 }
5714
5715 imm = vfp_expand_imm(sz, imm8);
5716
5717 tcg_res = tcg_const_i64(imm);
5718 write_fp_dreg(s, rd, tcg_res);
5719 tcg_temp_free_i64(tcg_res);
5720}
5721
5722
5723
5724
5725
5726
5727static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
5728 bool itof, int rmode, int scale, int sf, int type)
5729{
5730 bool is_signed = !(opcode & 1);
5731 TCGv_ptr tcg_fpstatus;
5732 TCGv_i32 tcg_shift, tcg_single;
5733 TCGv_i64 tcg_double;
5734
5735 tcg_fpstatus = get_fpstatus_ptr(type == 3);
5736
5737 tcg_shift = tcg_const_i32(64 - scale);
5738
5739 if (itof) {
5740 TCGv_i64 tcg_int = cpu_reg(s, rn);
5741 if (!sf) {
5742 TCGv_i64 tcg_extend = new_tmp_a64(s);
5743
5744 if (is_signed) {
5745 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
5746 } else {
5747 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
5748 }
5749
5750 tcg_int = tcg_extend;
5751 }
5752
5753 switch (type) {
5754 case 1:
5755 tcg_double = tcg_temp_new_i64();
5756 if (is_signed) {
5757 gen_helper_vfp_sqtod(tcg_double, tcg_int,
5758 tcg_shift, tcg_fpstatus);
5759 } else {
5760 gen_helper_vfp_uqtod(tcg_double, tcg_int,
5761 tcg_shift, tcg_fpstatus);
5762 }
5763 write_fp_dreg(s, rd, tcg_double);
5764 tcg_temp_free_i64(tcg_double);
5765 break;
5766
5767 case 0:
5768 tcg_single = tcg_temp_new_i32();
5769 if (is_signed) {
5770 gen_helper_vfp_sqtos(tcg_single, tcg_int,
5771 tcg_shift, tcg_fpstatus);
5772 } else {
5773 gen_helper_vfp_uqtos(tcg_single, tcg_int,
5774 tcg_shift, tcg_fpstatus);
5775 }
5776 write_fp_sreg(s, rd, tcg_single);
5777 tcg_temp_free_i32(tcg_single);
5778 break;
5779
5780 case 3:
5781 tcg_single = tcg_temp_new_i32();
5782 if (is_signed) {
5783 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
5784 tcg_shift, tcg_fpstatus);
5785 } else {
5786 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
5787 tcg_shift, tcg_fpstatus);
5788 }
5789 write_fp_sreg(s, rd, tcg_single);
5790 tcg_temp_free_i32(tcg_single);
5791 break;
5792
5793 default:
5794 g_assert_not_reached();
5795 }
5796 } else {
5797 TCGv_i64 tcg_int = cpu_reg(s, rd);
5798 TCGv_i32 tcg_rmode;
5799
5800 if (extract32(opcode, 2, 1)) {
5801
5802
5803
5804 rmode = FPROUNDING_TIEAWAY;
5805 }
5806
5807 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5808
5809 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5810
5811 switch (type) {
5812 case 1:
5813 tcg_double = read_fp_dreg(s, rn);
5814 if (is_signed) {
5815 if (!sf) {
5816 gen_helper_vfp_tosld(tcg_int, tcg_double,
5817 tcg_shift, tcg_fpstatus);
5818 } else {
5819 gen_helper_vfp_tosqd(tcg_int, tcg_double,
5820 tcg_shift, tcg_fpstatus);
5821 }
5822 } else {
5823 if (!sf) {
5824 gen_helper_vfp_tould(tcg_int, tcg_double,
5825 tcg_shift, tcg_fpstatus);
5826 } else {
5827 gen_helper_vfp_touqd(tcg_int, tcg_double,
5828 tcg_shift, tcg_fpstatus);
5829 }
5830 }
5831 if (!sf) {
5832 tcg_gen_ext32u_i64(tcg_int, tcg_int);
5833 }
5834 tcg_temp_free_i64(tcg_double);
5835 break;
5836
5837 case 0:
5838 tcg_single = read_fp_sreg(s, rn);
5839 if (sf) {
5840 if (is_signed) {
5841 gen_helper_vfp_tosqs(tcg_int, tcg_single,
5842 tcg_shift, tcg_fpstatus);
5843 } else {
5844 gen_helper_vfp_touqs(tcg_int, tcg_single,
5845 tcg_shift, tcg_fpstatus);
5846 }
5847 } else {
5848 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5849 if (is_signed) {
5850 gen_helper_vfp_tosls(tcg_dest, tcg_single,
5851 tcg_shift, tcg_fpstatus);
5852 } else {
5853 gen_helper_vfp_touls(tcg_dest, tcg_single,
5854 tcg_shift, tcg_fpstatus);
5855 }
5856 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5857 tcg_temp_free_i32(tcg_dest);
5858 }
5859 tcg_temp_free_i32(tcg_single);
5860 break;
5861
5862 case 3:
5863 tcg_single = read_fp_sreg(s, rn);
5864 if (sf) {
5865 if (is_signed) {
5866 gen_helper_vfp_tosqh(tcg_int, tcg_single,
5867 tcg_shift, tcg_fpstatus);
5868 } else {
5869 gen_helper_vfp_touqh(tcg_int, tcg_single,
5870 tcg_shift, tcg_fpstatus);
5871 }
5872 } else {
5873 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5874 if (is_signed) {
5875 gen_helper_vfp_toslh(tcg_dest, tcg_single,
5876 tcg_shift, tcg_fpstatus);
5877 } else {
5878 gen_helper_vfp_toulh(tcg_dest, tcg_single,
5879 tcg_shift, tcg_fpstatus);
5880 }
5881 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5882 tcg_temp_free_i32(tcg_dest);
5883 }
5884 tcg_temp_free_i32(tcg_single);
5885 break;
5886
5887 default:
5888 g_assert_not_reached();
5889 }
5890
5891 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5892 tcg_temp_free_i32(tcg_rmode);
5893 }
5894
5895 tcg_temp_free_ptr(tcg_fpstatus);
5896 tcg_temp_free_i32(tcg_shift);
5897}
5898
5899
5900
5901
5902
5903
5904
5905static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5906{
5907 int rd = extract32(insn, 0, 5);
5908 int rn = extract32(insn, 5, 5);
5909 int scale = extract32(insn, 10, 6);
5910 int opcode = extract32(insn, 16, 3);
5911 int rmode = extract32(insn, 19, 2);
5912 int type = extract32(insn, 22, 2);
5913 bool sbit = extract32(insn, 29, 1);
5914 bool sf = extract32(insn, 31, 1);
5915 bool itof;
5916
5917 if (sbit || (!sf && scale < 32)) {
5918 unallocated_encoding(s);
5919 return;
5920 }
5921
5922 switch (type) {
5923 case 0:
5924 case 1:
5925 break;
5926 case 3:
5927 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5928 break;
5929 }
5930
5931 default:
5932 unallocated_encoding(s);
5933 return;
5934 }
5935
5936 switch ((rmode << 3) | opcode) {
5937 case 0x2:
5938 case 0x3:
5939 itof = true;
5940 break;
5941 case 0x18:
5942 case 0x19:
5943 itof = false;
5944 break;
5945 default:
5946 unallocated_encoding(s);
5947 return;
5948 }
5949
5950 if (!fp_access_check(s)) {
5951 return;
5952 }
5953
5954 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5955}
5956
5957static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5958{
5959
5960
5961
5962
5963 if (itof) {
5964 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5965 TCGv_i64 tmp;
5966
5967 switch (type) {
5968 case 0:
5969
5970 tmp = tcg_temp_new_i64();
5971 tcg_gen_ext32u_i64(tmp, tcg_rn);
5972 write_fp_dreg(s, rd, tmp);
5973 tcg_temp_free_i64(tmp);
5974 break;
5975 case 1:
5976
5977 write_fp_dreg(s, rd, tcg_rn);
5978 break;
5979 case 2:
5980
5981 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5982 clear_vec_high(s, true, rd);
5983 break;
5984 case 3:
5985
5986 tmp = tcg_temp_new_i64();
5987 tcg_gen_ext16u_i64(tmp, tcg_rn);
5988 write_fp_dreg(s, rd, tmp);
5989 tcg_temp_free_i64(tmp);
5990 break;
5991 default:
5992 g_assert_not_reached();
5993 }
5994 } else {
5995 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5996
5997 switch (type) {
5998 case 0:
5999
6000 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6001 break;
6002 case 1:
6003
6004 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6005 break;
6006 case 2:
6007
6008 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6009 break;
6010 case 3:
6011
6012 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6013 break;
6014 default:
6015 g_assert_not_reached();
6016 }
6017 }
6018}
6019
6020
6021
6022
6023
6024
6025
6026static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6027{
6028 int rd = extract32(insn, 0, 5);
6029 int rn = extract32(insn, 5, 5);
6030 int opcode = extract32(insn, 16, 3);
6031 int rmode = extract32(insn, 19, 2);
6032 int type = extract32(insn, 22, 2);
6033 bool sbit = extract32(insn, 29, 1);
6034 bool sf = extract32(insn, 31, 1);
6035
6036 if (sbit) {
6037 unallocated_encoding(s);
6038 return;
6039 }
6040
6041 if (opcode > 5) {
6042
6043 bool itof = opcode & 1;
6044
6045 if (rmode >= 2) {
6046 unallocated_encoding(s);
6047 return;
6048 }
6049
6050 switch (sf << 3 | type << 1 | rmode) {
6051 case 0x0:
6052 case 0xa:
6053 case 0xd:
6054 break;
6055 case 0x6:
6056 case 0xe:
6057 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6058 break;
6059 }
6060
6061 default:
6062
6063 unallocated_encoding(s);
6064 return;
6065 }
6066
6067 if (!fp_access_check(s)) {
6068 return;
6069 }
6070 handle_fmov(s, rd, rn, type, itof);
6071 } else {
6072
6073 bool itof = extract32(opcode, 1, 1);
6074
6075 if (rmode != 0 && opcode > 1) {
6076 unallocated_encoding(s);
6077 return;
6078 }
6079 switch (type) {
6080 case 0:
6081 case 1:
6082 break;
6083 case 3:
6084 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6085 break;
6086 }
6087
6088 default:
6089 unallocated_encoding(s);
6090 return;
6091 }
6092
6093 if (!fp_access_check(s)) {
6094 return;
6095 }
6096 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
6097 }
6098}
6099
6100
6101
6102
6103
6104
6105
6106static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
6107{
6108 if (extract32(insn, 24, 1)) {
6109
6110 disas_fp_3src(s, insn);
6111 } else if (extract32(insn, 21, 1) == 0) {
6112
6113 disas_fp_fixed_conv(s, insn);
6114 } else {
6115 switch (extract32(insn, 10, 2)) {
6116 case 1:
6117
6118 disas_fp_ccomp(s, insn);
6119 break;
6120 case 2:
6121
6122 disas_fp_2src(s, insn);
6123 break;
6124 case 3:
6125
6126 disas_fp_csel(s, insn);
6127 break;
6128 case 0:
6129 switch (ctz32(extract32(insn, 12, 4))) {
6130 case 0:
6131
6132 disas_fp_imm(s, insn);
6133 break;
6134 case 1:
6135
6136 disas_fp_compare(s, insn);
6137 break;
6138 case 2:
6139
6140 disas_fp_1src(s, insn);
6141 break;
6142 case 3:
6143 unallocated_encoding(s);
6144 break;
6145 default:
6146
6147 disas_fp_int_conv(s, insn);
6148 break;
6149 }
6150 break;
6151 }
6152 }
6153}
6154
6155static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
6156 int pos)
6157{
6158
6159
6160
6161
6162
6163
6164 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
6165 assert(pos > 0 && pos < 64);
6166
6167 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
6168 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
6169 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
6170
6171 tcg_temp_free_i64(tcg_tmp);
6172}
6173
6174
6175
6176
6177
6178
6179
6180static void disas_simd_ext(DisasContext *s, uint32_t insn)
6181{
6182 int is_q = extract32(insn, 30, 1);
6183 int op2 = extract32(insn, 22, 2);
6184 int imm4 = extract32(insn, 11, 4);
6185 int rm = extract32(insn, 16, 5);
6186 int rn = extract32(insn, 5, 5);
6187 int rd = extract32(insn, 0, 5);
6188 int pos = imm4 << 3;
6189 TCGv_i64 tcg_resl, tcg_resh;
6190
6191 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
6192 unallocated_encoding(s);
6193 return;
6194 }
6195
6196 if (!fp_access_check(s)) {
6197 return;
6198 }
6199
6200 tcg_resh = tcg_temp_new_i64();
6201 tcg_resl = tcg_temp_new_i64();
6202
6203
6204
6205
6206
6207 if (!is_q) {
6208 read_vec_element(s, tcg_resl, rn, 0, MO_64);
6209 if (pos != 0) {
6210 read_vec_element(s, tcg_resh, rm, 0, MO_64);
6211 do_ext64(s, tcg_resh, tcg_resl, pos);
6212 }
6213 tcg_gen_movi_i64(tcg_resh, 0);
6214 } else {
6215 TCGv_i64 tcg_hh;
6216 typedef struct {
6217 int reg;
6218 int elt;
6219 } EltPosns;
6220 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
6221 EltPosns *elt = eltposns;
6222
6223 if (pos >= 64) {
6224 elt++;
6225 pos -= 64;
6226 }
6227
6228 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
6229 elt++;
6230 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
6231 elt++;
6232 if (pos != 0) {
6233 do_ext64(s, tcg_resh, tcg_resl, pos);
6234 tcg_hh = tcg_temp_new_i64();
6235 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
6236 do_ext64(s, tcg_hh, tcg_resh, pos);
6237 tcg_temp_free_i64(tcg_hh);
6238 }
6239 }
6240
6241 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6242 tcg_temp_free_i64(tcg_resl);
6243 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6244 tcg_temp_free_i64(tcg_resh);
6245}
6246
6247
6248
6249
6250
6251
6252
6253static void disas_simd_tb(DisasContext *s, uint32_t insn)
6254{
6255 int op2 = extract32(insn, 22, 2);
6256 int is_q = extract32(insn, 30, 1);
6257 int rm = extract32(insn, 16, 5);
6258 int rn = extract32(insn, 5, 5);
6259 int rd = extract32(insn, 0, 5);
6260 int is_tblx = extract32(insn, 12, 1);
6261 int len = extract32(insn, 13, 2);
6262 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
6263 TCGv_i32 tcg_regno, tcg_numregs;
6264
6265 if (op2 != 0) {
6266 unallocated_encoding(s);
6267 return;
6268 }
6269
6270 if (!fp_access_check(s)) {
6271 return;
6272 }
6273
6274
6275
6276
6277
6278
6279
6280 tcg_resl = tcg_temp_new_i64();
6281 tcg_resh = tcg_temp_new_i64();
6282
6283 if (is_tblx) {
6284 read_vec_element(s, tcg_resl, rd, 0, MO_64);
6285 } else {
6286 tcg_gen_movi_i64(tcg_resl, 0);
6287 }
6288 if (is_tblx && is_q) {
6289 read_vec_element(s, tcg_resh, rd, 1, MO_64);
6290 } else {
6291 tcg_gen_movi_i64(tcg_resh, 0);
6292 }
6293
6294 tcg_idx = tcg_temp_new_i64();
6295 tcg_regno = tcg_const_i32(rn);
6296 tcg_numregs = tcg_const_i32(len + 1);
6297 read_vec_element(s, tcg_idx, rm, 0, MO_64);
6298 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
6299 tcg_regno, tcg_numregs);
6300 if (is_q) {
6301 read_vec_element(s, tcg_idx, rm, 1, MO_64);
6302 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
6303 tcg_regno, tcg_numregs);
6304 }
6305 tcg_temp_free_i64(tcg_idx);
6306 tcg_temp_free_i32(tcg_regno);
6307 tcg_temp_free_i32(tcg_numregs);
6308
6309 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6310 tcg_temp_free_i64(tcg_resl);
6311 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6312 tcg_temp_free_i64(tcg_resh);
6313}
6314
6315
6316
6317
6318
6319
6320
6321static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
6322{
6323 int rd = extract32(insn, 0, 5);
6324 int rn = extract32(insn, 5, 5);
6325 int rm = extract32(insn, 16, 5);
6326 int size = extract32(insn, 22, 2);
6327
6328
6329
6330 int opcode = extract32(insn, 12, 2);
6331 bool part = extract32(insn, 14, 1);
6332 bool is_q = extract32(insn, 30, 1);
6333 int esize = 8 << size;
6334 int i, ofs;
6335 int datasize = is_q ? 128 : 64;
6336 int elements = datasize / esize;
6337 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
6338
6339 if (opcode == 0 || (size == 3 && !is_q)) {
6340 unallocated_encoding(s);
6341 return;
6342 }
6343
6344 if (!fp_access_check(s)) {
6345 return;
6346 }
6347
6348 tcg_resl = tcg_const_i64(0);
6349 tcg_resh = tcg_const_i64(0);
6350 tcg_res = tcg_temp_new_i64();
6351
6352 for (i = 0; i < elements; i++) {
6353 switch (opcode) {
6354 case 1:
6355 {
6356 int midpoint = elements / 2;
6357 if (i < midpoint) {
6358 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
6359 } else {
6360 read_vec_element(s, tcg_res, rm,
6361 2 * (i - midpoint) + part, size);
6362 }
6363 break;
6364 }
6365 case 2:
6366 if (i & 1) {
6367 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
6368 } else {
6369 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
6370 }
6371 break;
6372 case 3:
6373 {
6374 int base = part * elements / 2;
6375 if (i & 1) {
6376 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
6377 } else {
6378 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
6379 }
6380 break;
6381 }
6382 default:
6383 g_assert_not_reached();
6384 }
6385
6386 ofs = i * esize;
6387 if (ofs < 64) {
6388 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
6389 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
6390 } else {
6391 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
6392 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
6393 }
6394 }
6395
6396 tcg_temp_free_i64(tcg_res);
6397
6398 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6399 tcg_temp_free_i64(tcg_resl);
6400 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6401 tcg_temp_free_i64(tcg_resh);
6402}
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
6415 int esize, int size, int vmap, TCGv_ptr fpst)
6416{
6417 if (esize == size) {
6418 int element;
6419 TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
6420 TCGv_i32 tcg_elem;
6421
6422
6423 assert(ctpop8(vmap) == 1);
6424 element = ctz32(vmap);
6425 assert(element < 8);
6426
6427 tcg_elem = tcg_temp_new_i32();
6428 read_vec_element_i32(s, tcg_elem, rn, element, msize);
6429 return tcg_elem;
6430 } else {
6431 int bits = size / 2;
6432 int shift = ctpop8(vmap) / 2;
6433 int vmap_lo = (vmap >> shift) & vmap;
6434 int vmap_hi = (vmap & ~vmap_lo);
6435 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
6436
6437 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
6438 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
6439 tcg_res = tcg_temp_new_i32();
6440
6441 switch (fpopcode) {
6442 case 0x0c:
6443 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6444 break;
6445 case 0x0f:
6446 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
6447 break;
6448 case 0x1c:
6449 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6450 break;
6451 case 0x1f:
6452 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
6453 break;
6454 case 0x2c:
6455 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
6456 break;
6457 case 0x2f:
6458 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
6459 break;
6460 case 0x3c:
6461 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
6462 break;
6463 case 0x3f:
6464 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
6465 break;
6466 default:
6467 g_assert_not_reached();
6468 }
6469
6470 tcg_temp_free_i32(tcg_hi);
6471 tcg_temp_free_i32(tcg_lo);
6472 return tcg_res;
6473 }
6474}
6475
6476
6477
6478
6479
6480
6481
6482static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
6483{
6484 int rd = extract32(insn, 0, 5);
6485 int rn = extract32(insn, 5, 5);
6486 int size = extract32(insn, 22, 2);
6487 int opcode = extract32(insn, 12, 5);
6488 bool is_q = extract32(insn, 30, 1);
6489 bool is_u = extract32(insn, 29, 1);
6490 bool is_fp = false;
6491 bool is_min = false;
6492 int esize;
6493 int elements;
6494 int i;
6495 TCGv_i64 tcg_res, tcg_elt;
6496
6497 switch (opcode) {
6498 case 0x1b:
6499 if (is_u) {
6500 unallocated_encoding(s);
6501 return;
6502 }
6503
6504 case 0x3:
6505 case 0xa:
6506 case 0x1a:
6507 if (size == 3 || (size == 2 && !is_q)) {
6508 unallocated_encoding(s);
6509 return;
6510 }
6511 break;
6512 case 0xc:
6513 case 0xf:
6514
6515
6516
6517
6518
6519 is_min = extract32(size, 1, 1);
6520 is_fp = true;
6521 if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6522 size = 1;
6523 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
6524 unallocated_encoding(s);
6525 return;
6526 } else {
6527 size = 2;
6528 }
6529 break;
6530 default:
6531 unallocated_encoding(s);
6532 return;
6533 }
6534
6535 if (!fp_access_check(s)) {
6536 return;
6537 }
6538
6539 esize = 8 << size;
6540 elements = (is_q ? 128 : 64) / esize;
6541
6542 tcg_res = tcg_temp_new_i64();
6543 tcg_elt = tcg_temp_new_i64();
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557 if (!is_fp) {
6558 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
6559
6560 for (i = 1; i < elements; i++) {
6561 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
6562
6563 switch (opcode) {
6564 case 0x03:
6565 case 0x1b:
6566 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
6567 break;
6568 case 0x0a:
6569 if (is_u) {
6570 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
6571 } else {
6572 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
6573 }
6574 break;
6575 case 0x1a:
6576 if (is_u) {
6577 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
6578 } else {
6579 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
6580 }
6581 break;
6582 default:
6583 g_assert_not_reached();
6584 }
6585
6586 }
6587 } else {
6588
6589
6590
6591
6592
6593 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
6594 int fpopcode = opcode | is_min << 4 | is_u << 5;
6595 int vmap = (1 << elements) - 1;
6596 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
6597 (is_q ? 128 : 64), vmap, fpst);
6598 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
6599 tcg_temp_free_i32(tcg_res32);
6600 tcg_temp_free_ptr(fpst);
6601 }
6602
6603 tcg_temp_free_i64(tcg_elt);
6604
6605
6606 if (opcode == 0x03) {
6607
6608 size++;
6609 }
6610
6611 switch (size) {
6612 case 0:
6613 tcg_gen_ext8u_i64(tcg_res, tcg_res);
6614 break;
6615 case 1:
6616 tcg_gen_ext16u_i64(tcg_res, tcg_res);
6617 break;
6618 case 2:
6619 tcg_gen_ext32u_i64(tcg_res, tcg_res);
6620 break;
6621 case 3:
6622 break;
6623 default:
6624 g_assert_not_reached();
6625 }
6626
6627 write_fp_dreg(s, rd, tcg_res);
6628 tcg_temp_free_i64(tcg_res);
6629}
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
6641 int imm5)
6642{
6643 int size = ctz32(imm5);
6644 int index = imm5 >> (size + 1);
6645
6646 if (size > 3 || (size == 3 && !is_q)) {
6647 unallocated_encoding(s);
6648 return;
6649 }
6650
6651 if (!fp_access_check(s)) {
6652 return;
6653 }
6654
6655 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
6656 vec_reg_offset(s, rn, index, size),
6657 is_q ? 16 : 8, vec_full_reg_size(s));
6658}
6659
6660
6661
6662
6663
6664
6665
6666static void handle_simd_dupes(DisasContext *s, int rd, int rn,
6667 int imm5)
6668{
6669 int size = ctz32(imm5);
6670 int index;
6671 TCGv_i64 tmp;
6672
6673 if (size > 3) {
6674 unallocated_encoding(s);
6675 return;
6676 }
6677
6678 if (!fp_access_check(s)) {
6679 return;
6680 }
6681
6682 index = imm5 >> (size + 1);
6683
6684
6685
6686
6687 tmp = tcg_temp_new_i64();
6688 read_vec_element(s, tmp, rn, index, size);
6689 write_fp_dreg(s, rd, tmp);
6690 tcg_temp_free_i64(tmp);
6691}
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
6703 int imm5)
6704{
6705 int size = ctz32(imm5);
6706 uint32_t dofs, oprsz, maxsz;
6707
6708 if (size > 3 || ((size == 3) && !is_q)) {
6709 unallocated_encoding(s);
6710 return;
6711 }
6712
6713 if (!fp_access_check(s)) {
6714 return;
6715 }
6716
6717 dofs = vec_full_reg_offset(s, rd);
6718 oprsz = is_q ? 16 : 8;
6719 maxsz = vec_full_reg_size(s);
6720
6721 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
6722}
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734static void handle_simd_inse(DisasContext *s, int rd, int rn,
6735 int imm4, int imm5)
6736{
6737 int size = ctz32(imm5);
6738 int src_index, dst_index;
6739 TCGv_i64 tmp;
6740
6741 if (size > 3) {
6742 unallocated_encoding(s);
6743 return;
6744 }
6745
6746 if (!fp_access_check(s)) {
6747 return;
6748 }
6749
6750 dst_index = extract32(imm5, 1+size, 5);
6751 src_index = extract32(imm4, size, 4);
6752
6753 tmp = tcg_temp_new_i64();
6754
6755 read_vec_element(s, tmp, rn, src_index, size);
6756 write_vec_element(s, tmp, rd, dst_index, size);
6757
6758 tcg_temp_free_i64(tmp);
6759}
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
6773{
6774 int size = ctz32(imm5);
6775 int idx;
6776
6777 if (size > 3) {
6778 unallocated_encoding(s);
6779 return;
6780 }
6781
6782 if (!fp_access_check(s)) {
6783 return;
6784 }
6785
6786 idx = extract32(imm5, 1 + size, 4 - size);
6787 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
6788}
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
6803 int rn, int rd, int imm5)
6804{
6805 int size = ctz32(imm5);
6806 int element;
6807 TCGv_i64 tcg_rd;
6808
6809
6810 if (is_signed) {
6811 if (size > 2 || (size == 2 && !is_q)) {
6812 unallocated_encoding(s);
6813 return;
6814 }
6815 } else {
6816 if (size > 3
6817 || (size < 3 && is_q)
6818 || (size == 3 && !is_q)) {
6819 unallocated_encoding(s);
6820 return;
6821 }
6822 }
6823
6824 if (!fp_access_check(s)) {
6825 return;
6826 }
6827
6828 element = extract32(imm5, 1+size, 4);
6829
6830 tcg_rd = cpu_reg(s, rd);
6831 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
6832 if (is_signed && !is_q) {
6833 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6834 }
6835}
6836
6837
6838
6839
6840
6841
6842
6843static void disas_simd_copy(DisasContext *s, uint32_t insn)
6844{
6845 int rd = extract32(insn, 0, 5);
6846 int rn = extract32(insn, 5, 5);
6847 int imm4 = extract32(insn, 11, 4);
6848 int op = extract32(insn, 29, 1);
6849 int is_q = extract32(insn, 30, 1);
6850 int imm5 = extract32(insn, 16, 5);
6851
6852 if (op) {
6853 if (is_q) {
6854
6855 handle_simd_inse(s, rd, rn, imm4, imm5);
6856 } else {
6857 unallocated_encoding(s);
6858 }
6859 } else {
6860 switch (imm4) {
6861 case 0:
6862
6863 handle_simd_dupe(s, is_q, rd, rn, imm5);
6864 break;
6865 case 1:
6866
6867 handle_simd_dupg(s, is_q, rd, rn, imm5);
6868 break;
6869 case 3:
6870 if (is_q) {
6871
6872 handle_simd_insg(s, rd, rn, imm5);
6873 } else {
6874 unallocated_encoding(s);
6875 }
6876 break;
6877 case 5:
6878 case 7:
6879
6880 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
6881 break;
6882 default:
6883 unallocated_encoding(s);
6884 break;
6885 }
6886 }
6887}
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6904{
6905 int rd = extract32(insn, 0, 5);
6906 int cmode = extract32(insn, 12, 4);
6907 int cmode_3_1 = extract32(cmode, 1, 3);
6908 int cmode_0 = extract32(cmode, 0, 1);
6909 int o2 = extract32(insn, 11, 1);
6910 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6911 bool is_neg = extract32(insn, 29, 1);
6912 bool is_q = extract32(insn, 30, 1);
6913 uint64_t imm = 0;
6914
6915 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6916
6917 if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
6918 unallocated_encoding(s);
6919 return;
6920 }
6921 }
6922
6923 if (!fp_access_check(s)) {
6924 return;
6925 }
6926
6927
6928 switch (cmode_3_1) {
6929 case 0:
6930 case 1:
6931 case 2:
6932 case 3:
6933 {
6934 int shift = cmode_3_1 * 8;
6935 imm = bitfield_replicate(abcdefgh << shift, 32);
6936 break;
6937 }
6938 case 4:
6939 case 5:
6940 {
6941 int shift = (cmode_3_1 & 0x1) * 8;
6942 imm = bitfield_replicate(abcdefgh << shift, 16);
6943 break;
6944 }
6945 case 6:
6946 if (cmode_0) {
6947
6948 imm = (abcdefgh << 16) | 0xffff;
6949 } else {
6950
6951 imm = (abcdefgh << 8) | 0xff;
6952 }
6953 imm = bitfield_replicate(imm, 32);
6954 break;
6955 case 7:
6956 if (!cmode_0 && !is_neg) {
6957 imm = bitfield_replicate(abcdefgh, 8);
6958 } else if (!cmode_0 && is_neg) {
6959 int i;
6960 imm = 0;
6961 for (i = 0; i < 8; i++) {
6962 if ((abcdefgh) & (1 << i)) {
6963 imm |= 0xffULL << (i * 8);
6964 }
6965 }
6966 } else if (cmode_0) {
6967 if (is_neg) {
6968 imm = (abcdefgh & 0x3f) << 48;
6969 if (abcdefgh & 0x80) {
6970 imm |= 0x8000000000000000ULL;
6971 }
6972 if (abcdefgh & 0x40) {
6973 imm |= 0x3fc0000000000000ULL;
6974 } else {
6975 imm |= 0x4000000000000000ULL;
6976 }
6977 } else {
6978 if (o2) {
6979
6980 imm = vfp_expand_imm(MO_16, abcdefgh);
6981
6982 imm = bitfield_replicate(imm, 16);
6983 } else {
6984 imm = (abcdefgh & 0x3f) << 19;
6985 if (abcdefgh & 0x80) {
6986 imm |= 0x80000000;
6987 }
6988 if (abcdefgh & 0x40) {
6989 imm |= 0x3e000000;
6990 } else {
6991 imm |= 0x40000000;
6992 }
6993 imm |= (imm << 32);
6994 }
6995 }
6996 }
6997 break;
6998 default:
6999 fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
7000 g_assert_not_reached();
7001 }
7002
7003 if (cmode_3_1 != 7 && is_neg) {
7004 imm = ~imm;
7005 }
7006
7007 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7008
7009 tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7010 vec_full_reg_size(s), imm);
7011 } else {
7012
7013 if (is_neg) {
7014 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7015 } else {
7016 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7017 }
7018 }
7019}
7020
7021
7022
7023
7024
7025
7026
7027static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7028{
7029 int rd = extract32(insn, 0, 5);
7030 int rn = extract32(insn, 5, 5);
7031 int imm4 = extract32(insn, 11, 4);
7032 int imm5 = extract32(insn, 16, 5);
7033 int op = extract32(insn, 29, 1);
7034
7035 if (op != 0 || imm4 != 0) {
7036 unallocated_encoding(s);
7037 return;
7038 }
7039
7040
7041 handle_simd_dupes(s, rd, rn, imm5);
7042}
7043
7044
7045
7046
7047
7048
7049
7050static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7051{
7052 int u = extract32(insn, 29, 1);
7053 int size = extract32(insn, 22, 2);
7054 int opcode = extract32(insn, 12, 5);
7055 int rn = extract32(insn, 5, 5);
7056 int rd = extract32(insn, 0, 5);
7057 TCGv_ptr fpst;
7058
7059
7060
7061
7062
7063 opcode |= (extract32(size, 1, 1) << 5);
7064
7065 switch (opcode) {
7066 case 0x3b:
7067 if (u || size != 3) {
7068 unallocated_encoding(s);
7069 return;
7070 }
7071 if (!fp_access_check(s)) {
7072 return;
7073 }
7074
7075 fpst = NULL;
7076 break;
7077 case 0xc:
7078 case 0xd:
7079 case 0xf:
7080 case 0x2c:
7081 case 0x2f:
7082
7083 if (!u) {
7084 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7085 unallocated_encoding(s);
7086 return;
7087 } else {
7088 size = MO_16;
7089 }
7090 } else {
7091 size = extract32(size, 0, 1) ? MO_64 : MO_32;
7092 }
7093
7094 if (!fp_access_check(s)) {
7095 return;
7096 }
7097
7098 fpst = get_fpstatus_ptr(size == MO_16);
7099 break;
7100 default:
7101 unallocated_encoding(s);
7102 return;
7103 }
7104
7105 if (size == MO_64) {
7106 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7107 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7108 TCGv_i64 tcg_res = tcg_temp_new_i64();
7109
7110 read_vec_element(s, tcg_op1, rn, 0, MO_64);
7111 read_vec_element(s, tcg_op2, rn, 1, MO_64);
7112
7113 switch (opcode) {
7114 case 0x3b:
7115 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7116 break;
7117 case 0xc:
7118 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7119 break;
7120 case 0xd:
7121 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7122 break;
7123 case 0xf:
7124 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7125 break;
7126 case 0x2c:
7127 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7128 break;
7129 case 0x2f:
7130 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7131 break;
7132 default:
7133 g_assert_not_reached();
7134 }
7135
7136 write_fp_dreg(s, rd, tcg_res);
7137
7138 tcg_temp_free_i64(tcg_op1);
7139 tcg_temp_free_i64(tcg_op2);
7140 tcg_temp_free_i64(tcg_res);
7141 } else {
7142 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7143 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7144 TCGv_i32 tcg_res = tcg_temp_new_i32();
7145
7146 read_vec_element_i32(s, tcg_op1, rn, 0, size);
7147 read_vec_element_i32(s, tcg_op2, rn, 1, size);
7148
7149 if (size == MO_16) {
7150 switch (opcode) {
7151 case 0xc:
7152 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7153 break;
7154 case 0xd:
7155 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
7156 break;
7157 case 0xf:
7158 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
7159 break;
7160 case 0x2c:
7161 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7162 break;
7163 case 0x2f:
7164 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
7165 break;
7166 default:
7167 g_assert_not_reached();
7168 }
7169 } else {
7170 switch (opcode) {
7171 case 0xc:
7172 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7173 break;
7174 case 0xd:
7175 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7176 break;
7177 case 0xf:
7178 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7179 break;
7180 case 0x2c:
7181 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7182 break;
7183 case 0x2f:
7184 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7185 break;
7186 default:
7187 g_assert_not_reached();
7188 }
7189 }
7190
7191 write_fp_sreg(s, rd, tcg_res);
7192
7193 tcg_temp_free_i32(tcg_op1);
7194 tcg_temp_free_i32(tcg_op2);
7195 tcg_temp_free_i32(tcg_res);
7196 }
7197
7198 if (fpst) {
7199 tcg_temp_free_ptr(fpst);
7200 }
7201}
7202
7203
7204
7205
7206
7207
7208
7209static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
7210 TCGv_i64 tcg_rnd, bool accumulate,
7211 bool is_u, int size, int shift)
7212{
7213 bool extended_result = false;
7214 bool round = tcg_rnd != NULL;
7215 int ext_lshift = 0;
7216 TCGv_i64 tcg_src_hi;
7217
7218 if (round && size == 3) {
7219 extended_result = true;
7220 ext_lshift = 64 - shift;
7221 tcg_src_hi = tcg_temp_new_i64();
7222 } else if (shift == 64) {
7223 if (!accumulate && is_u) {
7224
7225 tcg_gen_movi_i64(tcg_res, 0);
7226 return;
7227 }
7228 }
7229
7230
7231 if (round) {
7232 if (extended_result) {
7233 TCGv_i64 tcg_zero = tcg_const_i64(0);
7234 if (!is_u) {
7235
7236 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
7237 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7238 tcg_src, tcg_src_hi,
7239 tcg_rnd, tcg_zero);
7240 } else {
7241 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7242 tcg_src, tcg_zero,
7243 tcg_rnd, tcg_zero);
7244 }
7245 tcg_temp_free_i64(tcg_zero);
7246 } else {
7247 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
7248 }
7249 }
7250
7251
7252 if (round && extended_result) {
7253
7254 if (ext_lshift == 0) {
7255
7256 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
7257 } else {
7258 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7259 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
7260 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
7261 }
7262 } else {
7263 if (is_u) {
7264 if (shift == 64) {
7265
7266 tcg_gen_movi_i64(tcg_src, 0);
7267 } else {
7268 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7269 }
7270 } else {
7271 if (shift == 64) {
7272
7273 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
7274 } else {
7275 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
7276 }
7277 }
7278 }
7279
7280 if (accumulate) {
7281 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
7282 } else {
7283 tcg_gen_mov_i64(tcg_res, tcg_src);
7284 }
7285
7286 if (extended_result) {
7287 tcg_temp_free_i64(tcg_src_hi);
7288 }
7289}
7290
7291
7292static void handle_scalar_simd_shri(DisasContext *s,
7293 bool is_u, int immh, int immb,
7294 int opcode, int rn, int rd)
7295{
7296 const int size = 3;
7297 int immhb = immh << 3 | immb;
7298 int shift = 2 * (8 << size) - immhb;
7299 bool accumulate = false;
7300 bool round = false;
7301 bool insert = false;
7302 TCGv_i64 tcg_rn;
7303 TCGv_i64 tcg_rd;
7304 TCGv_i64 tcg_round;
7305
7306 if (!extract32(immh, 3, 1)) {
7307 unallocated_encoding(s);
7308 return;
7309 }
7310
7311 if (!fp_access_check(s)) {
7312 return;
7313 }
7314
7315 switch (opcode) {
7316 case 0x02:
7317 accumulate = true;
7318 break;
7319 case 0x04:
7320 round = true;
7321 break;
7322 case 0x06:
7323 accumulate = round = true;
7324 break;
7325 case 0x08:
7326 insert = true;
7327 break;
7328 }
7329
7330 if (round) {
7331 uint64_t round_const = 1ULL << (shift - 1);
7332 tcg_round = tcg_const_i64(round_const);
7333 } else {
7334 tcg_round = NULL;
7335 }
7336
7337 tcg_rn = read_fp_dreg(s, rn);
7338 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7339
7340 if (insert) {
7341
7342
7343
7344 int esize = 8 << size;
7345 if (shift != esize) {
7346 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
7347 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
7348 }
7349 } else {
7350 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7351 accumulate, is_u, size, shift);
7352 }
7353
7354 write_fp_dreg(s, rd, tcg_rd);
7355
7356 tcg_temp_free_i64(tcg_rn);
7357 tcg_temp_free_i64(tcg_rd);
7358 if (round) {
7359 tcg_temp_free_i64(tcg_round);
7360 }
7361}
7362
7363
7364static void handle_scalar_simd_shli(DisasContext *s, bool insert,
7365 int immh, int immb, int opcode,
7366 int rn, int rd)
7367{
7368 int size = 32 - clz32(immh) - 1;
7369 int immhb = immh << 3 | immb;
7370 int shift = immhb - (8 << size);
7371 TCGv_i64 tcg_rn = new_tmp_a64(s);
7372 TCGv_i64 tcg_rd = new_tmp_a64(s);
7373
7374 if (!extract32(immh, 3, 1)) {
7375 unallocated_encoding(s);
7376 return;
7377 }
7378
7379 if (!fp_access_check(s)) {
7380 return;
7381 }
7382
7383 tcg_rn = read_fp_dreg(s, rn);
7384 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7385
7386 if (insert) {
7387 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
7388 } else {
7389 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
7390 }
7391
7392 write_fp_dreg(s, rd, tcg_rd);
7393
7394 tcg_temp_free_i64(tcg_rn);
7395 tcg_temp_free_i64(tcg_rd);
7396}
7397
7398
7399
7400static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
7401 bool is_u_shift, bool is_u_narrow,
7402 int immh, int immb, int opcode,
7403 int rn, int rd)
7404{
7405 int immhb = immh << 3 | immb;
7406 int size = 32 - clz32(immh) - 1;
7407 int esize = 8 << size;
7408 int shift = (2 * esize) - immhb;
7409 int elements = is_scalar ? 1 : (64 / esize);
7410 bool round = extract32(opcode, 0, 1);
7411 TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
7412 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
7413 TCGv_i32 tcg_rd_narrowed;
7414 TCGv_i64 tcg_final;
7415
7416 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
7417 { gen_helper_neon_narrow_sat_s8,
7418 gen_helper_neon_unarrow_sat8 },
7419 { gen_helper_neon_narrow_sat_s16,
7420 gen_helper_neon_unarrow_sat16 },
7421 { gen_helper_neon_narrow_sat_s32,
7422 gen_helper_neon_unarrow_sat32 },
7423 { NULL, NULL },
7424 };
7425 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
7426 gen_helper_neon_narrow_sat_u8,
7427 gen_helper_neon_narrow_sat_u16,
7428 gen_helper_neon_narrow_sat_u32,
7429 NULL
7430 };
7431 NeonGenNarrowEnvFn *narrowfn;
7432
7433 int i;
7434
7435 assert(size < 4);
7436
7437 if (extract32(immh, 3, 1)) {
7438 unallocated_encoding(s);
7439 return;
7440 }
7441
7442 if (!fp_access_check(s)) {
7443 return;
7444 }
7445
7446 if (is_u_shift) {
7447 narrowfn = unsigned_narrow_fns[size];
7448 } else {
7449 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
7450 }
7451
7452 tcg_rn = tcg_temp_new_i64();
7453 tcg_rd = tcg_temp_new_i64();
7454 tcg_rd_narrowed = tcg_temp_new_i32();
7455 tcg_final = tcg_const_i64(0);
7456
7457 if (round) {
7458 uint64_t round_const = 1ULL << (shift - 1);
7459 tcg_round = tcg_const_i64(round_const);
7460 } else {
7461 tcg_round = NULL;
7462 }
7463
7464 for (i = 0; i < elements; i++) {
7465 read_vec_element(s, tcg_rn, rn, i, ldop);
7466 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7467 false, is_u_shift, size+1, shift);
7468 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
7469 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
7470 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
7471 }
7472
7473 if (!is_q) {
7474 write_vec_element(s, tcg_final, rd, 0, MO_64);
7475 } else {
7476 write_vec_element(s, tcg_final, rd, 1, MO_64);
7477 }
7478
7479 if (round) {
7480 tcg_temp_free_i64(tcg_round);
7481 }
7482 tcg_temp_free_i64(tcg_rn);
7483 tcg_temp_free_i64(tcg_rd);
7484 tcg_temp_free_i32(tcg_rd_narrowed);
7485 tcg_temp_free_i64(tcg_final);
7486
7487 clear_vec_high(s, is_q, rd);
7488}
7489
7490
7491static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
7492 bool src_unsigned, bool dst_unsigned,
7493 int immh, int immb, int rn, int rd)
7494{
7495 int immhb = immh << 3 | immb;
7496 int size = 32 - clz32(immh) - 1;
7497 int shift = immhb - (8 << size);
7498 int pass;
7499
7500 assert(immh != 0);
7501 assert(!(scalar && is_q));
7502
7503 if (!scalar) {
7504 if (!is_q && extract32(immh, 3, 1)) {
7505 unallocated_encoding(s);
7506 return;
7507 }
7508
7509
7510
7511
7512
7513 switch (size) {
7514 case 0:
7515 shift |= shift << 8;
7516
7517 case 1:
7518 shift |= shift << 16;
7519 break;
7520 case 2:
7521 case 3:
7522 break;
7523 default:
7524 g_assert_not_reached();
7525 }
7526 }
7527
7528 if (!fp_access_check(s)) {
7529 return;
7530 }
7531
7532 if (size == 3) {
7533 TCGv_i64 tcg_shift = tcg_const_i64(shift);
7534 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
7535 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
7536 { NULL, gen_helper_neon_qshl_u64 },
7537 };
7538 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
7539 int maxpass = is_q ? 2 : 1;
7540
7541 for (pass = 0; pass < maxpass; pass++) {
7542 TCGv_i64 tcg_op = tcg_temp_new_i64();
7543
7544 read_vec_element(s, tcg_op, rn, pass, MO_64);
7545 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7546 write_vec_element(s, tcg_op, rd, pass, MO_64);
7547
7548 tcg_temp_free_i64(tcg_op);
7549 }
7550 tcg_temp_free_i64(tcg_shift);
7551 clear_vec_high(s, is_q, rd);
7552 } else {
7553 TCGv_i32 tcg_shift = tcg_const_i32(shift);
7554 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
7555 {
7556 { gen_helper_neon_qshl_s8,
7557 gen_helper_neon_qshl_s16,
7558 gen_helper_neon_qshl_s32 },
7559 { gen_helper_neon_qshlu_s8,
7560 gen_helper_neon_qshlu_s16,
7561 gen_helper_neon_qshlu_s32 }
7562 }, {
7563 { NULL, NULL, NULL },
7564 { gen_helper_neon_qshl_u8,
7565 gen_helper_neon_qshl_u16,
7566 gen_helper_neon_qshl_u32 }
7567 }
7568 };
7569 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
7570 TCGMemOp memop = scalar ? size : MO_32;
7571 int maxpass = scalar ? 1 : is_q ? 4 : 2;
7572
7573 for (pass = 0; pass < maxpass; pass++) {
7574 TCGv_i32 tcg_op = tcg_temp_new_i32();
7575
7576 read_vec_element_i32(s, tcg_op, rn, pass, memop);
7577 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7578 if (scalar) {
7579 switch (size) {
7580 case 0:
7581 tcg_gen_ext8u_i32(tcg_op, tcg_op);
7582 break;
7583 case 1:
7584 tcg_gen_ext16u_i32(tcg_op, tcg_op);
7585 break;
7586 case 2:
7587 break;
7588 default:
7589 g_assert_not_reached();
7590 }
7591 write_fp_sreg(s, rd, tcg_op);
7592 } else {
7593 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
7594 }
7595
7596 tcg_temp_free_i32(tcg_op);
7597 }
7598 tcg_temp_free_i32(tcg_shift);
7599
7600 if (!scalar) {
7601 clear_vec_high(s, is_q, rd);
7602 }
7603 }
7604}
7605
7606
7607static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
7608 int elements, int is_signed,
7609 int fracbits, int size)
7610{
7611 TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
7612 TCGv_i32 tcg_shift = NULL;
7613
7614 TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
7615 int pass;
7616
7617 if (fracbits || size == MO_64) {
7618 tcg_shift = tcg_const_i32(fracbits);
7619 }
7620
7621 if (size == MO_64) {
7622 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
7623 TCGv_i64 tcg_double = tcg_temp_new_i64();
7624
7625 for (pass = 0; pass < elements; pass++) {
7626 read_vec_element(s, tcg_int64, rn, pass, mop);
7627
7628 if (is_signed) {
7629 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
7630 tcg_shift, tcg_fpst);
7631 } else {
7632 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
7633 tcg_shift, tcg_fpst);
7634 }
7635 if (elements == 1) {
7636 write_fp_dreg(s, rd, tcg_double);
7637 } else {
7638 write_vec_element(s, tcg_double, rd, pass, MO_64);
7639 }
7640 }
7641
7642 tcg_temp_free_i64(tcg_int64);
7643 tcg_temp_free_i64(tcg_double);
7644
7645 } else {
7646 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
7647 TCGv_i32 tcg_float = tcg_temp_new_i32();
7648
7649 for (pass = 0; pass < elements; pass++) {
7650 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
7651
7652 switch (size) {
7653 case MO_32:
7654 if (fracbits) {
7655 if (is_signed) {
7656 gen_helper_vfp_sltos(tcg_float, tcg_int32,
7657 tcg_shift, tcg_fpst);
7658 } else {
7659 gen_helper_vfp_ultos(tcg_float, tcg_int32,
7660 tcg_shift, tcg_fpst);
7661 }
7662 } else {
7663 if (is_signed) {
7664 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
7665 } else {
7666 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
7667 }
7668 }
7669 break;
7670 case MO_16:
7671 if (fracbits) {
7672 if (is_signed) {
7673 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
7674 tcg_shift, tcg_fpst);
7675 } else {
7676 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
7677 tcg_shift, tcg_fpst);
7678 }
7679 } else {
7680 if (is_signed) {
7681 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
7682 } else {
7683 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
7684 }
7685 }
7686 break;
7687 default:
7688 g_assert_not_reached();
7689 }
7690
7691 if (elements == 1) {
7692 write_fp_sreg(s, rd, tcg_float);
7693 } else {
7694 write_vec_element_i32(s, tcg_float, rd, pass, size);
7695 }
7696 }
7697
7698 tcg_temp_free_i32(tcg_int32);
7699 tcg_temp_free_i32(tcg_float);
7700 }
7701
7702 tcg_temp_free_ptr(tcg_fpst);
7703 if (tcg_shift) {
7704 tcg_temp_free_i32(tcg_shift);
7705 }
7706
7707 clear_vec_high(s, elements << size == 16, rd);
7708}
7709
7710
7711static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
7712 bool is_q, bool is_u,
7713 int immh, int immb, int opcode,
7714 int rn, int rd)
7715{
7716 int size, elements, fracbits;
7717 int immhb = immh << 3 | immb;
7718
7719 if (immh & 8) {
7720 size = MO_64;
7721 if (!is_scalar && !is_q) {
7722 unallocated_encoding(s);
7723 return;
7724 }
7725 } else if (immh & 4) {
7726 size = MO_32;
7727 } else if (immh & 2) {
7728 size = MO_16;
7729 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7730 unallocated_encoding(s);
7731 return;
7732 }
7733 } else {
7734
7735 g_assert(immh == 1);
7736 unallocated_encoding(s);
7737 return;
7738 }
7739
7740 if (is_scalar) {
7741 elements = 1;
7742 } else {
7743 elements = (8 << is_q) >> size;
7744 }
7745 fracbits = (16 << size) - immhb;
7746
7747 if (!fp_access_check(s)) {
7748 return;
7749 }
7750
7751 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
7752}
7753
7754
7755static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
7756 bool is_q, bool is_u,
7757 int immh, int immb, int rn, int rd)
7758{
7759 int immhb = immh << 3 | immb;
7760 int pass, size, fracbits;
7761 TCGv_ptr tcg_fpstatus;
7762 TCGv_i32 tcg_rmode, tcg_shift;
7763
7764 if (immh & 0x8) {
7765 size = MO_64;
7766 if (!is_scalar && !is_q) {
7767 unallocated_encoding(s);
7768 return;
7769 }
7770 } else if (immh & 0x4) {
7771 size = MO_32;
7772 } else if (immh & 0x2) {
7773 size = MO_16;
7774 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7775 unallocated_encoding(s);
7776 return;
7777 }
7778 } else {
7779
7780 assert(immh == 1);
7781 unallocated_encoding(s);
7782 return;
7783 }
7784
7785 if (!fp_access_check(s)) {
7786 return;
7787 }
7788
7789 assert(!(is_scalar && is_q));
7790
7791 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
7792 tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
7793 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7794 fracbits = (16 << size) - immhb;
7795 tcg_shift = tcg_const_i32(fracbits);
7796
7797 if (size == MO_64) {
7798 int maxpass = is_scalar ? 1 : 2;
7799
7800 for (pass = 0; pass < maxpass; pass++) {
7801 TCGv_i64 tcg_op = tcg_temp_new_i64();
7802
7803 read_vec_element(s, tcg_op, rn, pass, MO_64);
7804 if (is_u) {
7805 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7806 } else {
7807 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7808 }
7809 write_vec_element(s, tcg_op, rd, pass, MO_64);
7810 tcg_temp_free_i64(tcg_op);
7811 }
7812 clear_vec_high(s, is_q, rd);
7813 } else {
7814 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
7815 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
7816
7817 switch (size) {
7818 case MO_16:
7819 if (is_u) {
7820 fn = gen_helper_vfp_touhh;
7821 } else {
7822 fn = gen_helper_vfp_toshh;
7823 }
7824 break;
7825 case MO_32:
7826 if (is_u) {
7827 fn = gen_helper_vfp_touls;
7828 } else {
7829 fn = gen_helper_vfp_tosls;
7830 }
7831 break;
7832 default:
7833 g_assert_not_reached();
7834 }
7835
7836 for (pass = 0; pass < maxpass; pass++) {
7837 TCGv_i32 tcg_op = tcg_temp_new_i32();
7838
7839 read_vec_element_i32(s, tcg_op, rn, pass, size);
7840 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7841 if (is_scalar) {
7842 write_fp_sreg(s, rd, tcg_op);
7843 } else {
7844 write_vec_element_i32(s, tcg_op, rd, pass, size);
7845 }
7846 tcg_temp_free_i32(tcg_op);
7847 }
7848 if (!is_scalar) {
7849 clear_vec_high(s, is_q, rd);
7850 }
7851 }
7852
7853 tcg_temp_free_ptr(tcg_fpstatus);
7854 tcg_temp_free_i32(tcg_shift);
7855 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7856 tcg_temp_free_i32(tcg_rmode);
7857}
7858
7859
7860
7861
7862
7863
7864
7865
7866
7867static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
7868{
7869 int rd = extract32(insn, 0, 5);
7870 int rn = extract32(insn, 5, 5);
7871 int opcode = extract32(insn, 11, 5);
7872 int immb = extract32(insn, 16, 3);
7873 int immh = extract32(insn, 19, 4);
7874 bool is_u = extract32(insn, 29, 1);
7875
7876 if (immh == 0) {
7877 unallocated_encoding(s);
7878 return;
7879 }
7880
7881 switch (opcode) {
7882 case 0x08:
7883 if (!is_u) {
7884 unallocated_encoding(s);
7885 return;
7886 }
7887
7888 case 0x00:
7889 case 0x02:
7890 case 0x04:
7891 case 0x06:
7892 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
7893 break;
7894 case 0x0a:
7895 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
7896 break;
7897 case 0x1c:
7898 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
7899 opcode, rn, rd);
7900 break;
7901 case 0x10:
7902 case 0x11:
7903 if (!is_u) {
7904 unallocated_encoding(s);
7905 return;
7906 }
7907 handle_vec_simd_sqshrn(s, true, false, false, true,
7908 immh, immb, opcode, rn, rd);
7909 break;
7910 case 0x12:
7911 case 0x13:
7912 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
7913 immh, immb, opcode, rn, rd);
7914 break;
7915 case 0xc:
7916 if (!is_u) {
7917 unallocated_encoding(s);
7918 return;
7919 }
7920 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
7921 break;
7922 case 0xe:
7923 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
7924 break;
7925 case 0x1f:
7926 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
7927 break;
7928 default:
7929 unallocated_encoding(s);
7930 break;
7931 }
7932}
7933
7934
7935
7936
7937
7938
7939
7940static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
7941{
7942 bool is_u = extract32(insn, 29, 1);
7943 int size = extract32(insn, 22, 2);
7944 int opcode = extract32(insn, 12, 4);
7945 int rm = extract32(insn, 16, 5);
7946 int rn = extract32(insn, 5, 5);
7947 int rd = extract32(insn, 0, 5);
7948
7949 if (is_u) {
7950 unallocated_encoding(s);
7951 return;
7952 }
7953
7954 switch (opcode) {
7955 case 0x9:
7956 case 0xb:
7957 case 0xd:
7958 if (size == 0 || size == 3) {
7959 unallocated_encoding(s);
7960 return;
7961 }
7962 break;
7963 default:
7964 unallocated_encoding(s);
7965 return;
7966 }
7967
7968 if (!fp_access_check(s)) {
7969 return;
7970 }
7971
7972 if (size == 2) {
7973 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7974 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7975 TCGv_i64 tcg_res = tcg_temp_new_i64();
7976
7977 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7978 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7979
7980 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7981 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7982
7983 switch (opcode) {
7984 case 0xd:
7985 break;
7986 case 0xb:
7987 tcg_gen_neg_i64(tcg_res, tcg_res);
7988
7989 case 0x9:
7990 read_vec_element(s, tcg_op1, rd, 0, MO_64);
7991 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7992 tcg_res, tcg_op1);
7993 break;
7994 default:
7995 g_assert_not_reached();
7996 }
7997
7998 write_fp_dreg(s, rd, tcg_res);
7999
8000 tcg_temp_free_i64(tcg_op1);
8001 tcg_temp_free_i64(tcg_op2);
8002 tcg_temp_free_i64(tcg_res);
8003 } else {
8004 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8005 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8006 TCGv_i64 tcg_res = tcg_temp_new_i64();
8007
8008 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8009 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8010
8011 switch (opcode) {
8012 case 0xd:
8013 break;
8014 case 0xb:
8015 gen_helper_neon_negl_u32(tcg_res, tcg_res);
8016
8017 case 0x9:
8018 {
8019 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8020 read_vec_element(s, tcg_op3, rd, 0, MO_32);
8021 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8022 tcg_res, tcg_op3);
8023 tcg_temp_free_i64(tcg_op3);
8024 break;
8025 }
8026 default:
8027 g_assert_not_reached();
8028 }
8029
8030 tcg_gen_ext32u_i64(tcg_res, tcg_res);
8031 write_fp_dreg(s, rd, tcg_res);
8032
8033 tcg_temp_free_i32(tcg_op1);
8034 tcg_temp_free_i32(tcg_op2);
8035 tcg_temp_free_i64(tcg_res);
8036 }
8037}
8038
8039
8040static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
8041{
8042 tcg_gen_and_i32(d, a, b);
8043 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
8044 tcg_gen_neg_i32(d, d);
8045}
8046
8047static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
8048{
8049 tcg_gen_and_i64(d, a, b);
8050 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
8051 tcg_gen_neg_i64(d, d);
8052}
8053
8054static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
8055{
8056 tcg_gen_and_vec(vece, d, a, b);
8057 tcg_gen_dupi_vec(vece, a, 0);
8058 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
8059}
8060
8061static void handle_3same_64(DisasContext *s, int opcode, bool u,
8062 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8063{
8064
8065
8066
8067
8068
8069 TCGCond cond;
8070
8071 switch (opcode) {
8072 case 0x1:
8073 if (u) {
8074 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8075 } else {
8076 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8077 }
8078 break;
8079 case 0x5:
8080 if (u) {
8081 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8082 } else {
8083 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8084 }
8085 break;
8086 case 0x6:
8087
8088
8089
8090 cond = u ? TCG_COND_GTU : TCG_COND_GT;
8091 do_cmop:
8092 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8093 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8094 break;
8095 case 0x7:
8096 cond = u ? TCG_COND_GEU : TCG_COND_GE;
8097 goto do_cmop;
8098 case 0x11:
8099 if (u) {
8100 cond = TCG_COND_EQ;
8101 goto do_cmop;
8102 }
8103 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8104 break;
8105 case 0x8:
8106 if (u) {
8107 gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
8108 } else {
8109 gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
8110 }
8111 break;
8112 case 0x9:
8113 if (u) {
8114 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8115 } else {
8116 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8117 }
8118 break;
8119 case 0xa:
8120 if (u) {
8121 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8122 } else {
8123 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8124 }
8125 break;
8126 case 0xb:
8127 if (u) {
8128 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8129 } else {
8130 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8131 }
8132 break;
8133 case 0x10:
8134 if (u) {
8135 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8136 } else {
8137 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8138 }
8139 break;
8140 default:
8141 g_assert_not_reached();
8142 }
8143}
8144
8145
8146
8147
8148
8149static void handle_3same_float(DisasContext *s, int size, int elements,
8150 int fpopcode, int rd, int rn, int rm)
8151{
8152 int pass;
8153 TCGv_ptr fpst = get_fpstatus_ptr(false);
8154
8155 for (pass = 0; pass < elements; pass++) {
8156 if (size) {
8157
8158 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8159 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8160 TCGv_i64 tcg_res = tcg_temp_new_i64();
8161
8162 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8163 read_vec_element(s, tcg_op2, rm, pass, MO_64);
8164
8165 switch (fpopcode) {
8166 case 0x39:
8167
8168 gen_helper_vfp_negd(tcg_op1, tcg_op1);
8169
8170 case 0x19:
8171 read_vec_element(s, tcg_res, rd, pass, MO_64);
8172 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8173 tcg_res, fpst);
8174 break;
8175 case 0x18:
8176 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8177 break;
8178 case 0x1a:
8179 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8180 break;
8181 case 0x1b:
8182 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8183 break;
8184 case 0x1c:
8185 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8186 break;
8187 case 0x1e:
8188 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8189 break;
8190 case 0x1f:
8191 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8192 break;
8193 case 0x38:
8194 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8195 break;
8196 case 0x3a:
8197 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8198 break;
8199 case 0x3e:
8200 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8201 break;
8202 case 0x3f:
8203 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8204 break;
8205 case 0x5b:
8206 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8207 break;
8208 case 0x5c:
8209 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8210 break;
8211 case 0x5d:
8212 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8213 break;
8214 case 0x5f:
8215 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8216 break;
8217 case 0x7a:
8218 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8219 gen_helper_vfp_absd(tcg_res, tcg_res);
8220 break;
8221 case 0x7c:
8222 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8223 break;
8224 case 0x7d:
8225 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8226 break;
8227 default:
8228 g_assert_not_reached();
8229 }
8230
8231 write_vec_element(s, tcg_res, rd, pass, MO_64);
8232
8233 tcg_temp_free_i64(tcg_res);
8234 tcg_temp_free_i64(tcg_op1);
8235 tcg_temp_free_i64(tcg_op2);
8236 } else {
8237
8238 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8239 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8240 TCGv_i32 tcg_res = tcg_temp_new_i32();
8241
8242 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
8243 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
8244
8245 switch (fpopcode) {
8246 case 0x39:
8247
8248 gen_helper_vfp_negs(tcg_op1, tcg_op1);
8249
8250 case 0x19:
8251 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8252 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
8253 tcg_res, fpst);
8254 break;
8255 case 0x1a:
8256 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8257 break;
8258 case 0x1b:
8259 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
8260 break;
8261 case 0x1c:
8262 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8263 break;
8264 case 0x1e:
8265 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8266 break;
8267 case 0x1f:
8268 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8269 break;
8270 case 0x18:
8271 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8272 break;
8273 case 0x38:
8274 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8275 break;
8276 case 0x3a:
8277 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8278 break;
8279 case 0x3e:
8280 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8281 break;
8282 case 0x3f:
8283 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8284 break;
8285 case 0x5b:
8286 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
8287 break;
8288 case 0x5c:
8289 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8290 break;
8291 case 0x5d:
8292 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8293 break;
8294 case 0x5f:
8295 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
8296 break;
8297 case 0x7a:
8298 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8299 gen_helper_vfp_abss(tcg_res, tcg_res);
8300 break;
8301 case 0x7c:
8302 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8303 break;
8304 case 0x7d:
8305 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8306 break;
8307 default:
8308 g_assert_not_reached();
8309 }
8310
8311 if (elements == 1) {
8312
8313 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
8314
8315 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
8316 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
8317 tcg_temp_free_i64(tcg_tmp);
8318 } else {
8319 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8320 }
8321
8322 tcg_temp_free_i32(tcg_res);
8323 tcg_temp_free_i32(tcg_op1);
8324 tcg_temp_free_i32(tcg_op2);
8325 }
8326 }
8327
8328 tcg_temp_free_ptr(fpst);
8329
8330 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
8331}
8332
8333
8334
8335
8336
8337
8338
8339static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
8340{
8341 int rd = extract32(insn, 0, 5);
8342 int rn = extract32(insn, 5, 5);
8343 int opcode = extract32(insn, 11, 5);
8344 int rm = extract32(insn, 16, 5);
8345 int size = extract32(insn, 22, 2);
8346 bool u = extract32(insn, 29, 1);
8347 TCGv_i64 tcg_rd;
8348
8349 if (opcode >= 0x18) {
8350
8351 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
8352 switch (fpopcode) {
8353 case 0x1b:
8354 case 0x1f:
8355 case 0x3f:
8356 case 0x5d:
8357 case 0x7d:
8358 case 0x1c:
8359 case 0x5c:
8360 case 0x7c:
8361 case 0x7a:
8362 break;
8363 default:
8364 unallocated_encoding(s);
8365 return;
8366 }
8367
8368 if (!fp_access_check(s)) {
8369 return;
8370 }
8371
8372 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
8373 return;
8374 }
8375
8376 switch (opcode) {
8377 case 0x1:
8378 case 0x5:
8379 case 0x9:
8380 case 0xb:
8381 break;
8382 case 0x8:
8383 case 0xa:
8384 case 0x6:
8385 case 0x7:
8386 case 0x11:
8387 case 0x10:
8388 if (size != 3) {
8389 unallocated_encoding(s);
8390 return;
8391 }
8392 break;
8393 case 0x16:
8394 if (size != 1 && size != 2) {
8395 unallocated_encoding(s);
8396 return;
8397 }
8398 break;
8399 default:
8400 unallocated_encoding(s);
8401 return;
8402 }
8403
8404 if (!fp_access_check(s)) {
8405 return;
8406 }
8407
8408 tcg_rd = tcg_temp_new_i64();
8409
8410 if (size == 3) {
8411 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8412 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
8413
8414 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
8415 tcg_temp_free_i64(tcg_rn);
8416 tcg_temp_free_i64(tcg_rm);
8417 } else {
8418
8419
8420
8421
8422
8423
8424 NeonGenTwoOpEnvFn *genenvfn;
8425 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8426 TCGv_i32 tcg_rm = tcg_temp_new_i32();
8427 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
8428
8429 read_vec_element_i32(s, tcg_rn, rn, 0, size);
8430 read_vec_element_i32(s, tcg_rm, rm, 0, size);
8431
8432 switch (opcode) {
8433 case 0x1:
8434 {
8435 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8436 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
8437 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
8438 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
8439 };
8440 genenvfn = fns[size][u];
8441 break;
8442 }
8443 case 0x5:
8444 {
8445 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8446 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
8447 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
8448 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
8449 };
8450 genenvfn = fns[size][u];
8451 break;
8452 }
8453 case 0x9:
8454 {
8455 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8456 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
8457 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
8458 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
8459 };
8460 genenvfn = fns[size][u];
8461 break;
8462 }
8463 case 0xb:
8464 {
8465 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8466 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
8467 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
8468 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
8469 };
8470 genenvfn = fns[size][u];
8471 break;
8472 }
8473 case 0x16:
8474 {
8475 static NeonGenTwoOpEnvFn * const fns[2][2] = {
8476 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
8477 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
8478 };
8479 assert(size == 1 || size == 2);
8480 genenvfn = fns[size - 1][u];
8481 break;
8482 }
8483 default:
8484 g_assert_not_reached();
8485 }
8486
8487 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
8488 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
8489 tcg_temp_free_i32(tcg_rd32);
8490 tcg_temp_free_i32(tcg_rn);
8491 tcg_temp_free_i32(tcg_rm);
8492 }
8493
8494 write_fp_dreg(s, rd, tcg_rd);
8495
8496 tcg_temp_free_i64(tcg_rd);
8497}
8498
8499
8500
8501
8502
8503
8504
8505
8506
8507static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
8508 uint32_t insn)
8509{
8510 int rd = extract32(insn, 0, 5);
8511 int rn = extract32(insn, 5, 5);
8512 int opcode = extract32(insn, 11, 3);
8513 int rm = extract32(insn, 16, 5);
8514 bool u = extract32(insn, 29, 1);
8515 bool a = extract32(insn, 23, 1);
8516 int fpopcode = opcode | (a << 3) | (u << 4);
8517 TCGv_ptr fpst;
8518 TCGv_i32 tcg_op1;
8519 TCGv_i32 tcg_op2;
8520 TCGv_i32 tcg_res;
8521
8522 switch (fpopcode) {
8523 case 0x03:
8524 case 0x04:
8525 case 0x07:
8526 case 0x0f:
8527 case 0x14:
8528 case 0x15:
8529 case 0x1a:
8530 case 0x1c:
8531 case 0x1d:
8532 break;
8533 default:
8534 unallocated_encoding(s);
8535 return;
8536 }
8537
8538 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
8539 unallocated_encoding(s);
8540 }
8541
8542 if (!fp_access_check(s)) {
8543 return;
8544 }
8545
8546 fpst = get_fpstatus_ptr(true);
8547
8548 tcg_op1 = read_fp_hreg(s, rn);
8549 tcg_op2 = read_fp_hreg(s, rm);
8550 tcg_res = tcg_temp_new_i32();
8551
8552 switch (fpopcode) {
8553 case 0x03:
8554 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
8555 break;
8556 case 0x04:
8557 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8558 break;
8559 case 0x07:
8560 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8561 break;
8562 case 0x0f:
8563 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8564 break;
8565 case 0x14:
8566 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8567 break;
8568 case 0x15:
8569 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8570 break;
8571 case 0x1a:
8572 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
8573 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
8574 break;
8575 case 0x1c:
8576 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8577 break;
8578 case 0x1d:
8579 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8580 break;
8581 default:
8582 g_assert_not_reached();
8583 }
8584
8585 write_fp_sreg(s, rd, tcg_res);
8586
8587
8588 tcg_temp_free_i32(tcg_res);
8589 tcg_temp_free_i32(tcg_op1);
8590 tcg_temp_free_i32(tcg_op2);
8591 tcg_temp_free_ptr(fpst);
8592}
8593
8594
8595
8596
8597
8598
8599
8600static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
8601 uint32_t insn)
8602{
8603 int rd = extract32(insn, 0, 5);
8604 int rn = extract32(insn, 5, 5);
8605 int opcode = extract32(insn, 11, 4);
8606 int rm = extract32(insn, 16, 5);
8607 int size = extract32(insn, 22, 2);
8608 bool u = extract32(insn, 29, 1);
8609 TCGv_i32 ele1, ele2, ele3;
8610 TCGv_i64 res;
8611 int feature;
8612
8613 switch (u * 16 + opcode) {
8614 case 0x10:
8615 case 0x11:
8616 if (size != 1 && size != 2) {
8617 unallocated_encoding(s);
8618 return;
8619 }
8620 feature = ARM_FEATURE_V8_RDM;
8621 break;
8622 default:
8623 unallocated_encoding(s);
8624 return;
8625 }
8626 if (!arm_dc_feature(s, feature)) {
8627 unallocated_encoding(s);
8628 return;
8629 }
8630 if (!fp_access_check(s)) {
8631 return;
8632 }
8633
8634
8635
8636
8637
8638
8639
8640 ele1 = tcg_temp_new_i32();
8641 ele2 = tcg_temp_new_i32();
8642 ele3 = tcg_temp_new_i32();
8643
8644 read_vec_element_i32(s, ele1, rn, 0, size);
8645 read_vec_element_i32(s, ele2, rm, 0, size);
8646 read_vec_element_i32(s, ele3, rd, 0, size);
8647
8648 switch (opcode) {
8649 case 0x0:
8650 if (size == 1) {
8651 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
8652 } else {
8653 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
8654 }
8655 break;
8656 case 0x1:
8657 if (size == 1) {
8658 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
8659 } else {
8660 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
8661 }
8662 break;
8663 default:
8664 g_assert_not_reached();
8665 }
8666 tcg_temp_free_i32(ele1);
8667 tcg_temp_free_i32(ele2);
8668
8669 res = tcg_temp_new_i64();
8670 tcg_gen_extu_i32_i64(res, ele3);
8671 tcg_temp_free_i32(ele3);
8672
8673 write_fp_dreg(s, rd, res);
8674 tcg_temp_free_i64(res);
8675}
8676
8677static void handle_2misc_64(DisasContext *s, int opcode, bool u,
8678 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
8679 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
8680{
8681
8682
8683
8684
8685
8686
8687 TCGCond cond;
8688
8689 switch (opcode) {
8690 case 0x4:
8691 if (u) {
8692 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
8693 } else {
8694 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
8695 }
8696 break;
8697 case 0x5:
8698
8699
8700
8701 tcg_gen_not_i64(tcg_rd, tcg_rn);
8702 break;
8703 case 0x7:
8704 if (u) {
8705 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
8706 } else {
8707 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
8708 }
8709 break;
8710 case 0xa:
8711
8712
8713
8714
8715 cond = TCG_COND_LT;
8716 do_cmop:
8717 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
8718 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8719 break;
8720 case 0x8:
8721 cond = u ? TCG_COND_GE : TCG_COND_GT;
8722 goto do_cmop;
8723 case 0x9:
8724 cond = u ? TCG_COND_LE : TCG_COND_EQ;
8725 goto do_cmop;
8726 case 0xb:
8727 if (u) {
8728 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8729 } else {
8730 TCGv_i64 tcg_zero = tcg_const_i64(0);
8731 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8732 tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
8733 tcg_rn, tcg_rd);
8734 tcg_temp_free_i64(tcg_zero);
8735 }
8736 break;
8737 case 0x2f:
8738 gen_helper_vfp_absd(tcg_rd, tcg_rn);
8739 break;
8740 case 0x6f:
8741 gen_helper_vfp_negd(tcg_rd, tcg_rn);
8742 break;
8743 case 0x7f:
8744 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
8745 break;
8746 case 0x1a:
8747 case 0x1b:
8748 case 0x1c:
8749 case 0x3a:
8750 case 0x3b:
8751 {
8752 TCGv_i32 tcg_shift = tcg_const_i32(0);
8753 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8754 tcg_temp_free_i32(tcg_shift);
8755 break;
8756 }
8757 case 0x5a:
8758 case 0x5b:
8759 case 0x5c:
8760 case 0x7a:
8761 case 0x7b:
8762 {
8763 TCGv_i32 tcg_shift = tcg_const_i32(0);
8764 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8765 tcg_temp_free_i32(tcg_shift);
8766 break;
8767 }
8768 case 0x18:
8769 case 0x19:
8770 case 0x38:
8771 case 0x39:
8772 case 0x58:
8773 case 0x79:
8774 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
8775 break;
8776 case 0x59:
8777 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
8778 break;
8779 default:
8780 g_assert_not_reached();
8781 }
8782}
8783
8784static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
8785 bool is_scalar, bool is_u, bool is_q,
8786 int size, int rn, int rd)
8787{
8788 bool is_double = (size == MO_64);
8789 TCGv_ptr fpst;
8790
8791 if (!fp_access_check(s)) {
8792 return;
8793 }
8794
8795 fpst = get_fpstatus_ptr(size == MO_16);
8796
8797 if (is_double) {
8798 TCGv_i64 tcg_op = tcg_temp_new_i64();
8799 TCGv_i64 tcg_zero = tcg_const_i64(0);
8800 TCGv_i64 tcg_res = tcg_temp_new_i64();
8801 NeonGenTwoDoubleOPFn *genfn;
8802 bool swap = false;
8803 int pass;
8804
8805 switch (opcode) {
8806 case 0x2e:
8807 swap = true;
8808
8809 case 0x2c:
8810 genfn = gen_helper_neon_cgt_f64;
8811 break;
8812 case 0x2d:
8813 genfn = gen_helper_neon_ceq_f64;
8814 break;
8815 case 0x6d:
8816 swap = true;
8817
8818 case 0x6c:
8819 genfn = gen_helper_neon_cge_f64;
8820 break;
8821 default:
8822 g_assert_not_reached();
8823 }
8824
8825 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8826 read_vec_element(s, tcg_op, rn, pass, MO_64);
8827 if (swap) {
8828 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8829 } else {
8830 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8831 }
8832 write_vec_element(s, tcg_res, rd, pass, MO_64);
8833 }
8834 tcg_temp_free_i64(tcg_res);
8835 tcg_temp_free_i64(tcg_zero);
8836 tcg_temp_free_i64(tcg_op);
8837
8838 clear_vec_high(s, !is_scalar, rd);
8839 } else {
8840 TCGv_i32 tcg_op = tcg_temp_new_i32();
8841 TCGv_i32 tcg_zero = tcg_const_i32(0);
8842 TCGv_i32 tcg_res = tcg_temp_new_i32();
8843 NeonGenTwoSingleOPFn *genfn;
8844 bool swap = false;
8845 int pass, maxpasses;
8846
8847 if (size == MO_16) {
8848 switch (opcode) {
8849 case 0x2e:
8850 swap = true;
8851
8852 case 0x2c:
8853 genfn = gen_helper_advsimd_cgt_f16;
8854 break;
8855 case 0x2d:
8856 genfn = gen_helper_advsimd_ceq_f16;
8857 break;
8858 case 0x6d:
8859 swap = true;
8860
8861 case 0x6c:
8862 genfn = gen_helper_advsimd_cge_f16;
8863 break;
8864 default:
8865 g_assert_not_reached();
8866 }
8867 } else {
8868 switch (opcode) {
8869 case 0x2e:
8870 swap = true;
8871
8872 case 0x2c:
8873 genfn = gen_helper_neon_cgt_f32;
8874 break;
8875 case 0x2d:
8876 genfn = gen_helper_neon_ceq_f32;
8877 break;
8878 case 0x6d:
8879 swap = true;
8880
8881 case 0x6c:
8882 genfn = gen_helper_neon_cge_f32;
8883 break;
8884 default:
8885 g_assert_not_reached();
8886 }
8887 }
8888
8889 if (is_scalar) {
8890 maxpasses = 1;
8891 } else {
8892 int vector_size = 8 << is_q;
8893 maxpasses = vector_size >> size;
8894 }
8895
8896 for (pass = 0; pass < maxpasses; pass++) {
8897 read_vec_element_i32(s, tcg_op, rn, pass, size);
8898 if (swap) {
8899 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8900 } else {
8901 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8902 }
8903 if (is_scalar) {
8904 write_fp_sreg(s, rd, tcg_res);
8905 } else {
8906 write_vec_element_i32(s, tcg_res, rd, pass, size);
8907 }
8908 }
8909 tcg_temp_free_i32(tcg_res);
8910 tcg_temp_free_i32(tcg_zero);
8911 tcg_temp_free_i32(tcg_op);
8912 if (!is_scalar) {
8913 clear_vec_high(s, is_q, rd);
8914 }
8915 }
8916
8917 tcg_temp_free_ptr(fpst);
8918}
8919
8920static void handle_2misc_reciprocal(DisasContext *s, int opcode,
8921 bool is_scalar, bool is_u, bool is_q,
8922 int size, int rn, int rd)
8923{
8924 bool is_double = (size == 3);
8925 TCGv_ptr fpst = get_fpstatus_ptr(false);
8926
8927 if (is_double) {
8928 TCGv_i64 tcg_op = tcg_temp_new_i64();
8929 TCGv_i64 tcg_res = tcg_temp_new_i64();
8930 int pass;
8931
8932 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8933 read_vec_element(s, tcg_op, rn, pass, MO_64);
8934 switch (opcode) {
8935 case 0x3d:
8936 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
8937 break;
8938 case 0x3f:
8939 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
8940 break;
8941 case 0x7d:
8942 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
8943 break;
8944 default:
8945 g_assert_not_reached();
8946 }
8947 write_vec_element(s, tcg_res, rd, pass, MO_64);
8948 }
8949 tcg_temp_free_i64(tcg_res);
8950 tcg_temp_free_i64(tcg_op);
8951 clear_vec_high(s, !is_scalar, rd);
8952 } else {
8953 TCGv_i32 tcg_op = tcg_temp_new_i32();
8954 TCGv_i32 tcg_res = tcg_temp_new_i32();
8955 int pass, maxpasses;
8956
8957 if (is_scalar) {
8958 maxpasses = 1;
8959 } else {
8960 maxpasses = is_q ? 4 : 2;
8961 }
8962
8963 for (pass = 0; pass < maxpasses; pass++) {
8964 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
8965
8966 switch (opcode) {
8967 case 0x3c:
8968 gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
8969 break;
8970 case 0x3d:
8971 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
8972 break;
8973 case 0x3f:
8974 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
8975 break;
8976 case 0x7d:
8977 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
8978 break;
8979 default:
8980 g_assert_not_reached();
8981 }
8982
8983 if (is_scalar) {
8984 write_fp_sreg(s, rd, tcg_res);
8985 } else {
8986 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8987 }
8988 }
8989 tcg_temp_free_i32(tcg_res);
8990 tcg_temp_free_i32(tcg_op);
8991 if (!is_scalar) {
8992 clear_vec_high(s, is_q, rd);
8993 }
8994 }
8995 tcg_temp_free_ptr(fpst);
8996}
8997
8998static void handle_2misc_narrow(DisasContext *s, bool scalar,
8999 int opcode, bool u, bool is_q,
9000 int size, int rn, int rd)
9001{
9002
9003
9004
9005 int pass;
9006 TCGv_i32 tcg_res[2];
9007 int destelt = is_q ? 2 : 0;
9008 int passes = scalar ? 1 : 2;
9009
9010 if (scalar) {
9011 tcg_res[1] = tcg_const_i32(0);
9012 }
9013
9014 for (pass = 0; pass < passes; pass++) {
9015 TCGv_i64 tcg_op = tcg_temp_new_i64();
9016 NeonGenNarrowFn *genfn = NULL;
9017 NeonGenNarrowEnvFn *genenvfn = NULL;
9018
9019 if (scalar) {
9020 read_vec_element(s, tcg_op, rn, pass, size + 1);
9021 } else {
9022 read_vec_element(s, tcg_op, rn, pass, MO_64);
9023 }
9024 tcg_res[pass] = tcg_temp_new_i32();
9025
9026 switch (opcode) {
9027 case 0x12:
9028 {
9029 static NeonGenNarrowFn * const xtnfns[3] = {
9030 gen_helper_neon_narrow_u8,
9031 gen_helper_neon_narrow_u16,
9032 tcg_gen_extrl_i64_i32,
9033 };
9034 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9035 gen_helper_neon_unarrow_sat8,
9036 gen_helper_neon_unarrow_sat16,
9037 gen_helper_neon_unarrow_sat32,
9038 };
9039 if (u) {
9040 genenvfn = sqxtunfns[size];
9041 } else {
9042 genfn = xtnfns[size];
9043 }
9044 break;
9045 }
9046 case 0x14:
9047 {
9048 static NeonGenNarrowEnvFn * const fns[3][2] = {
9049 { gen_helper_neon_narrow_sat_s8,
9050 gen_helper_neon_narrow_sat_u8 },
9051 { gen_helper_neon_narrow_sat_s16,
9052 gen_helper_neon_narrow_sat_u16 },
9053 { gen_helper_neon_narrow_sat_s32,
9054 gen_helper_neon_narrow_sat_u32 },
9055 };
9056 genenvfn = fns[size][u];
9057 break;
9058 }
9059 case 0x16:
9060
9061 if (size == 2) {
9062 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9063 } else {
9064 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9065 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9066 TCGv_ptr fpst = get_fpstatus_ptr(false);
9067 TCGv_i32 ahp = get_ahp_flag();
9068
9069 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9070 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9071 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9072 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9073 tcg_temp_free_i32(tcg_lo);
9074 tcg_temp_free_i32(tcg_hi);
9075 tcg_temp_free_ptr(fpst);
9076 tcg_temp_free_i32(ahp);
9077 }
9078 break;
9079 case 0x56:
9080
9081
9082
9083 assert(size == 2);
9084 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9085 break;
9086 default:
9087 g_assert_not_reached();
9088 }
9089
9090 if (genfn) {
9091 genfn(tcg_res[pass], tcg_op);
9092 } else if (genenvfn) {
9093 genenvfn(tcg_res[pass], cpu_env, tcg_op);
9094 }
9095
9096 tcg_temp_free_i64(tcg_op);
9097 }
9098
9099 for (pass = 0; pass < 2; pass++) {
9100 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9101 tcg_temp_free_i32(tcg_res[pass]);
9102 }
9103 clear_vec_high(s, is_q, rd);
9104}
9105
9106
9107static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9108 bool is_q, int size, int rn, int rd)
9109{
9110 bool is_double = (size == 3);
9111
9112 if (is_double) {
9113 TCGv_i64 tcg_rn = tcg_temp_new_i64();
9114 TCGv_i64 tcg_rd = tcg_temp_new_i64();
9115 int pass;
9116
9117 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9118 read_vec_element(s, tcg_rn, rn, pass, MO_64);
9119 read_vec_element(s, tcg_rd, rd, pass, MO_64);
9120
9121 if (is_u) {
9122 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9123 } else {
9124 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9125 }
9126 write_vec_element(s, tcg_rd, rd, pass, MO_64);
9127 }
9128 tcg_temp_free_i64(tcg_rd);
9129 tcg_temp_free_i64(tcg_rn);
9130 clear_vec_high(s, !is_scalar, rd);
9131 } else {
9132 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9133 TCGv_i32 tcg_rd = tcg_temp_new_i32();
9134 int pass, maxpasses;
9135
9136 if (is_scalar) {
9137 maxpasses = 1;
9138 } else {
9139 maxpasses = is_q ? 4 : 2;
9140 }
9141
9142 for (pass = 0; pass < maxpasses; pass++) {
9143 if (is_scalar) {
9144 read_vec_element_i32(s, tcg_rn, rn, pass, size);
9145 read_vec_element_i32(s, tcg_rd, rd, pass, size);
9146 } else {
9147 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9148 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9149 }
9150
9151 if (is_u) {
9152 switch (size) {
9153 case 0:
9154 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9155 break;
9156 case 1:
9157 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9158 break;
9159 case 2:
9160 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9161 break;
9162 default:
9163 g_assert_not_reached();
9164 }
9165 } else {
9166 switch (size) {
9167 case 0:
9168 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9169 break;
9170 case 1:
9171 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9172 break;
9173 case 2:
9174 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9175 break;
9176 default:
9177 g_assert_not_reached();
9178 }
9179 }
9180
9181 if (is_scalar) {
9182 TCGv_i64 tcg_zero = tcg_const_i64(0);
9183 write_vec_element(s, tcg_zero, rd, 0, MO_64);
9184 tcg_temp_free_i64(tcg_zero);
9185 }
9186 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9187 }
9188 tcg_temp_free_i32(tcg_rd);
9189 tcg_temp_free_i32(tcg_rn);
9190 clear_vec_high(s, is_q, rd);
9191 }
9192}
9193
9194
9195
9196
9197
9198
9199
9200static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9201{
9202 int rd = extract32(insn, 0, 5);
9203 int rn = extract32(insn, 5, 5);
9204 int opcode = extract32(insn, 12, 5);
9205 int size = extract32(insn, 22, 2);
9206 bool u = extract32(insn, 29, 1);
9207 bool is_fcvt = false;
9208 int rmode;
9209 TCGv_i32 tcg_rmode;
9210 TCGv_ptr tcg_fpstatus;
9211
9212 switch (opcode) {
9213 case 0x3:
9214 if (!fp_access_check(s)) {
9215 return;
9216 }
9217 handle_2misc_satacc(s, true, u, false, size, rn, rd);
9218 return;
9219 case 0x7:
9220 break;
9221 case 0xa:
9222 if (u) {
9223 unallocated_encoding(s);
9224 return;
9225 }
9226
9227 case 0x8:
9228 case 0x9:
9229 case 0xb:
9230 if (size != 3) {
9231 unallocated_encoding(s);
9232 return;
9233 }
9234 break;
9235 case 0x12:
9236 if (!u) {
9237 unallocated_encoding(s);
9238 return;
9239 }
9240
9241 case 0x14:
9242 if (size == 3) {
9243 unallocated_encoding(s);
9244 return;
9245 }
9246 if (!fp_access_check(s)) {
9247 return;
9248 }
9249 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
9250 return;
9251 case 0xc ... 0xf:
9252 case 0x16 ... 0x1d:
9253 case 0x1f:
9254
9255
9256
9257 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
9258 size = extract32(size, 0, 1) ? 3 : 2;
9259 switch (opcode) {
9260 case 0x2c:
9261 case 0x2d:
9262 case 0x2e:
9263 case 0x6c:
9264 case 0x6d:
9265 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
9266 return;
9267 case 0x1d:
9268 case 0x5d:
9269 {
9270 bool is_signed = (opcode == 0x1d);
9271 if (!fp_access_check(s)) {
9272 return;
9273 }
9274 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
9275 return;
9276 }
9277 case 0x3d:
9278 case 0x3f:
9279 case 0x7d:
9280 if (!fp_access_check(s)) {
9281 return;
9282 }
9283 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
9284 return;
9285 case 0x1a:
9286 case 0x1b:
9287 case 0x3a:
9288 case 0x3b:
9289 case 0x5a:
9290 case 0x5b:
9291 case 0x7a:
9292 case 0x7b:
9293 is_fcvt = true;
9294 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
9295 break;
9296 case 0x1c:
9297 case 0x5c:
9298
9299 is_fcvt = true;
9300 rmode = FPROUNDING_TIEAWAY;
9301 break;
9302 case 0x56:
9303 if (size == 2) {
9304 unallocated_encoding(s);
9305 return;
9306 }
9307 if (!fp_access_check(s)) {
9308 return;
9309 }
9310 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
9311 return;
9312 default:
9313 unallocated_encoding(s);
9314 return;
9315 }
9316 break;
9317 default:
9318 unallocated_encoding(s);
9319 return;
9320 }
9321
9322 if (!fp_access_check(s)) {
9323 return;
9324 }
9325
9326 if (is_fcvt) {
9327 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
9328 tcg_fpstatus = get_fpstatus_ptr(false);
9329 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9330 } else {
9331 tcg_rmode = NULL;
9332 tcg_fpstatus = NULL;
9333 }
9334
9335 if (size == 3) {
9336 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9337 TCGv_i64 tcg_rd = tcg_temp_new_i64();
9338
9339 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
9340 write_fp_dreg(s, rd, tcg_rd);
9341 tcg_temp_free_i64(tcg_rd);
9342 tcg_temp_free_i64(tcg_rn);
9343 } else {
9344 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9345 TCGv_i32 tcg_rd = tcg_temp_new_i32();
9346
9347 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9348
9349 switch (opcode) {
9350 case 0x7:
9351 {
9352 NeonGenOneOpEnvFn *genfn;
9353 static NeonGenOneOpEnvFn * const fns[3][2] = {
9354 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
9355 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
9356 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
9357 };
9358 genfn = fns[size][u];
9359 genfn(tcg_rd, cpu_env, tcg_rn);
9360 break;
9361 }
9362 case 0x1a:
9363 case 0x1b:
9364 case 0x1c:
9365 case 0x3a:
9366 case 0x3b:
9367 {
9368 TCGv_i32 tcg_shift = tcg_const_i32(0);
9369 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9370 tcg_temp_free_i32(tcg_shift);
9371 break;
9372 }
9373 case 0x5a:
9374 case 0x5b:
9375 case 0x5c:
9376 case 0x7a:
9377 case 0x7b:
9378 {
9379 TCGv_i32 tcg_shift = tcg_const_i32(0);
9380 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9381 tcg_temp_free_i32(tcg_shift);
9382 break;
9383 }
9384 default:
9385 g_assert_not_reached();
9386 }
9387
9388 write_fp_sreg(s, rd, tcg_rd);
9389 tcg_temp_free_i32(tcg_rd);
9390 tcg_temp_free_i32(tcg_rn);
9391 }
9392
9393 if (is_fcvt) {
9394 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9395 tcg_temp_free_i32(tcg_rmode);
9396 tcg_temp_free_ptr(tcg_fpstatus);
9397 }
9398}
9399
9400static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9401{
9402 tcg_gen_vec_sar8i_i64(a, a, shift);
9403 tcg_gen_vec_add8_i64(d, d, a);
9404}
9405
9406static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9407{
9408 tcg_gen_vec_sar16i_i64(a, a, shift);
9409 tcg_gen_vec_add16_i64(d, d, a);
9410}
9411
9412static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9413{
9414 tcg_gen_sari_i32(a, a, shift);
9415 tcg_gen_add_i32(d, d, a);
9416}
9417
9418static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9419{
9420 tcg_gen_sari_i64(a, a, shift);
9421 tcg_gen_add_i64(d, d, a);
9422}
9423
9424static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9425{
9426 tcg_gen_sari_vec(vece, a, a, sh);
9427 tcg_gen_add_vec(vece, d, d, a);
9428}
9429
9430static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9431{
9432 tcg_gen_vec_shr8i_i64(a, a, shift);
9433 tcg_gen_vec_add8_i64(d, d, a);
9434}
9435
9436static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9437{
9438 tcg_gen_vec_shr16i_i64(a, a, shift);
9439 tcg_gen_vec_add16_i64(d, d, a);
9440}
9441
9442static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9443{
9444 tcg_gen_shri_i32(a, a, shift);
9445 tcg_gen_add_i32(d, d, a);
9446}
9447
9448static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9449{
9450 tcg_gen_shri_i64(a, a, shift);
9451 tcg_gen_add_i64(d, d, a);
9452}
9453
9454static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9455{
9456 tcg_gen_shri_vec(vece, a, a, sh);
9457 tcg_gen_add_vec(vece, d, d, a);
9458}
9459
9460static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9461{
9462 uint64_t mask = dup_const(MO_8, 0xff >> shift);
9463 TCGv_i64 t = tcg_temp_new_i64();
9464
9465 tcg_gen_shri_i64(t, a, shift);
9466 tcg_gen_andi_i64(t, t, mask);
9467 tcg_gen_andi_i64(d, d, ~mask);
9468 tcg_gen_or_i64(d, d, t);
9469 tcg_temp_free_i64(t);
9470}
9471
9472static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9473{
9474 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
9475 TCGv_i64 t = tcg_temp_new_i64();
9476
9477 tcg_gen_shri_i64(t, a, shift);
9478 tcg_gen_andi_i64(t, t, mask);
9479 tcg_gen_andi_i64(d, d, ~mask);
9480 tcg_gen_or_i64(d, d, t);
9481 tcg_temp_free_i64(t);
9482}
9483
9484static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9485{
9486 tcg_gen_shri_i32(a, a, shift);
9487 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
9488}
9489
9490static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9491{
9492 tcg_gen_shri_i64(a, a, shift);
9493 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
9494}
9495
9496static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9497{
9498 uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
9499 TCGv_vec t = tcg_temp_new_vec_matching(d);
9500 TCGv_vec m = tcg_temp_new_vec_matching(d);
9501
9502 tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
9503 tcg_gen_shri_vec(vece, t, a, sh);
9504 tcg_gen_and_vec(vece, d, d, m);
9505 tcg_gen_or_vec(vece, d, d, t);
9506
9507 tcg_temp_free_vec(t);
9508 tcg_temp_free_vec(m);
9509}
9510
9511
9512static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
9513 int immh, int immb, int opcode, int rn, int rd)
9514{
9515 static const GVecGen2i ssra_op[4] = {
9516 { .fni8 = gen_ssra8_i64,
9517 .fniv = gen_ssra_vec,
9518 .load_dest = true,
9519 .opc = INDEX_op_sari_vec,
9520 .vece = MO_8 },
9521 { .fni8 = gen_ssra16_i64,
9522 .fniv = gen_ssra_vec,
9523 .load_dest = true,
9524 .opc = INDEX_op_sari_vec,
9525 .vece = MO_16 },
9526 { .fni4 = gen_ssra32_i32,
9527 .fniv = gen_ssra_vec,
9528 .load_dest = true,
9529 .opc = INDEX_op_sari_vec,
9530 .vece = MO_32 },
9531 { .fni8 = gen_ssra64_i64,
9532 .fniv = gen_ssra_vec,
9533 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9534 .load_dest = true,
9535 .opc = INDEX_op_sari_vec,
9536 .vece = MO_64 },
9537 };
9538 static const GVecGen2i usra_op[4] = {
9539 { .fni8 = gen_usra8_i64,
9540 .fniv = gen_usra_vec,
9541 .load_dest = true,
9542 .opc = INDEX_op_shri_vec,
9543 .vece = MO_8, },
9544 { .fni8 = gen_usra16_i64,
9545 .fniv = gen_usra_vec,
9546 .load_dest = true,
9547 .opc = INDEX_op_shri_vec,
9548 .vece = MO_16, },
9549 { .fni4 = gen_usra32_i32,
9550 .fniv = gen_usra_vec,
9551 .load_dest = true,
9552 .opc = INDEX_op_shri_vec,
9553 .vece = MO_32, },
9554 { .fni8 = gen_usra64_i64,
9555 .fniv = gen_usra_vec,
9556 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9557 .load_dest = true,
9558 .opc = INDEX_op_shri_vec,
9559 .vece = MO_64, },
9560 };
9561 static const GVecGen2i sri_op[4] = {
9562 { .fni8 = gen_shr8_ins_i64,
9563 .fniv = gen_shr_ins_vec,
9564 .load_dest = true,
9565 .opc = INDEX_op_shri_vec,
9566 .vece = MO_8 },
9567 { .fni8 = gen_shr16_ins_i64,
9568 .fniv = gen_shr_ins_vec,
9569 .load_dest = true,
9570 .opc = INDEX_op_shri_vec,
9571 .vece = MO_16 },
9572 { .fni4 = gen_shr32_ins_i32,
9573 .fniv = gen_shr_ins_vec,
9574 .load_dest = true,
9575 .opc = INDEX_op_shri_vec,
9576 .vece = MO_32 },
9577 { .fni8 = gen_shr64_ins_i64,
9578 .fniv = gen_shr_ins_vec,
9579 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9580 .load_dest = true,
9581 .opc = INDEX_op_shri_vec,
9582 .vece = MO_64 },
9583 };
9584
9585 int size = 32 - clz32(immh) - 1;
9586 int immhb = immh << 3 | immb;
9587 int shift = 2 * (8 << size) - immhb;
9588 bool accumulate = false;
9589 int dsize = is_q ? 128 : 64;
9590 int esize = 8 << size;
9591 int elements = dsize/esize;
9592 TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
9593 TCGv_i64 tcg_rn = new_tmp_a64(s);
9594 TCGv_i64 tcg_rd = new_tmp_a64(s);
9595 TCGv_i64 tcg_round;
9596 uint64_t round_const;
9597 int i;
9598
9599 if (extract32(immh, 3, 1) && !is_q) {
9600 unallocated_encoding(s);
9601 return;
9602 }
9603 tcg_debug_assert(size <= 3);
9604
9605 if (!fp_access_check(s)) {
9606 return;
9607 }
9608
9609 switch (opcode) {
9610 case 0x02:
9611 if (is_u) {
9612
9613 if (shift == 8 << size) {
9614 goto done;
9615 }
9616 gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]);
9617 } else {
9618
9619 if (shift == 8 << size) {
9620 shift -= 1;
9621 }
9622 gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]);
9623 }
9624 return;
9625 case 0x08:
9626
9627 if (shift == 8 << size) {
9628 goto done;
9629 }
9630 gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]);
9631 return;
9632
9633 case 0x00:
9634 if (is_u) {
9635 if (shift == 8 << size) {
9636
9637 tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
9638 is_q ? 16 : 8, vec_full_reg_size(s), 0);
9639 } else {
9640 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
9641 }
9642 } else {
9643
9644 if (shift == 8 << size) {
9645 shift -= 1;
9646 }
9647 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size);
9648 }
9649 return;
9650
9651 case 0x04:
9652 break;
9653 case 0x06:
9654 accumulate = true;
9655 break;
9656 default:
9657 g_assert_not_reached();
9658 }
9659
9660 round_const = 1ULL << (shift - 1);
9661 tcg_round = tcg_const_i64(round_const);
9662
9663 for (i = 0; i < elements; i++) {
9664 read_vec_element(s, tcg_rn, rn, i, memop);
9665 if (accumulate) {
9666 read_vec_element(s, tcg_rd, rd, i, memop);
9667 }
9668
9669 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9670 accumulate, is_u, size, shift);
9671
9672 write_vec_element(s, tcg_rd, rd, i, size);
9673 }
9674 tcg_temp_free_i64(tcg_round);
9675
9676 done:
9677 clear_vec_high(s, is_q, rd);
9678}
9679
9680static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9681{
9682 uint64_t mask = dup_const(MO_8, 0xff << shift);
9683 TCGv_i64 t = tcg_temp_new_i64();
9684
9685 tcg_gen_shli_i64(t, a, shift);
9686 tcg_gen_andi_i64(t, t, mask);
9687 tcg_gen_andi_i64(d, d, ~mask);
9688 tcg_gen_or_i64(d, d, t);
9689 tcg_temp_free_i64(t);
9690}
9691
9692static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9693{
9694 uint64_t mask = dup_const(MO_16, 0xffff << shift);
9695 TCGv_i64 t = tcg_temp_new_i64();
9696
9697 tcg_gen_shli_i64(t, a, shift);
9698 tcg_gen_andi_i64(t, t, mask);
9699 tcg_gen_andi_i64(d, d, ~mask);
9700 tcg_gen_or_i64(d, d, t);
9701 tcg_temp_free_i64(t);
9702}
9703
9704static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9705{
9706 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
9707}
9708
9709static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9710{
9711 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
9712}
9713
9714static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9715{
9716 uint64_t mask = (1ull << sh) - 1;
9717 TCGv_vec t = tcg_temp_new_vec_matching(d);
9718 TCGv_vec m = tcg_temp_new_vec_matching(d);
9719
9720 tcg_gen_dupi_vec(vece, m, mask);
9721 tcg_gen_shli_vec(vece, t, a, sh);
9722 tcg_gen_and_vec(vece, d, d, m);
9723 tcg_gen_or_vec(vece, d, d, t);
9724
9725 tcg_temp_free_vec(t);
9726 tcg_temp_free_vec(m);
9727}
9728
9729
9730static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
9731 int immh, int immb, int opcode, int rn, int rd)
9732{
9733 static const GVecGen2i shi_op[4] = {
9734 { .fni8 = gen_shl8_ins_i64,
9735 .fniv = gen_shl_ins_vec,
9736 .opc = INDEX_op_shli_vec,
9737 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9738 .load_dest = true,
9739 .vece = MO_8 },
9740 { .fni8 = gen_shl16_ins_i64,
9741 .fniv = gen_shl_ins_vec,
9742 .opc = INDEX_op_shli_vec,
9743 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9744 .load_dest = true,
9745 .vece = MO_16 },
9746 { .fni4 = gen_shl32_ins_i32,
9747 .fniv = gen_shl_ins_vec,
9748 .opc = INDEX_op_shli_vec,
9749 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9750 .load_dest = true,
9751 .vece = MO_32 },
9752 { .fni8 = gen_shl64_ins_i64,
9753 .fniv = gen_shl_ins_vec,
9754 .opc = INDEX_op_shli_vec,
9755 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9756 .load_dest = true,
9757 .vece = MO_64 },
9758 };
9759 int size = 32 - clz32(immh) - 1;
9760 int immhb = immh << 3 | immb;
9761 int shift = immhb - (8 << size);
9762
9763 if (extract32(immh, 3, 1) && !is_q) {
9764 unallocated_encoding(s);
9765 return;
9766 }
9767
9768 if (size > 3 && !is_q) {
9769 unallocated_encoding(s);
9770 return;
9771 }
9772
9773 if (!fp_access_check(s)) {
9774 return;
9775 }
9776
9777 if (insert) {
9778 gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
9779 } else {
9780 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
9781 }
9782}
9783
9784
9785static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
9786 int immh, int immb, int opcode, int rn, int rd)
9787{
9788 int size = 32 - clz32(immh) - 1;
9789 int immhb = immh << 3 | immb;
9790 int shift = immhb - (8 << size);
9791 int dsize = 64;
9792 int esize = 8 << size;
9793 int elements = dsize/esize;
9794 TCGv_i64 tcg_rn = new_tmp_a64(s);
9795 TCGv_i64 tcg_rd = new_tmp_a64(s);
9796 int i;
9797
9798 if (size >= 3) {
9799 unallocated_encoding(s);
9800 return;
9801 }
9802
9803 if (!fp_access_check(s)) {
9804 return;
9805 }
9806
9807
9808
9809
9810
9811 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
9812
9813 for (i = 0; i < elements; i++) {
9814 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
9815 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
9816 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
9817 write_vec_element(s, tcg_rd, rd, i, size + 1);
9818 }
9819}
9820
9821
9822static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
9823 int immh, int immb, int opcode, int rn, int rd)
9824{
9825 int immhb = immh << 3 | immb;
9826 int size = 32 - clz32(immh) - 1;
9827 int dsize = 64;
9828 int esize = 8 << size;
9829 int elements = dsize/esize;
9830 int shift = (2 * esize) - immhb;
9831 bool round = extract32(opcode, 0, 1);
9832 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
9833 TCGv_i64 tcg_round;
9834 int i;
9835
9836 if (extract32(immh, 3, 1)) {
9837 unallocated_encoding(s);
9838 return;
9839 }
9840
9841 if (!fp_access_check(s)) {
9842 return;
9843 }
9844
9845 tcg_rn = tcg_temp_new_i64();
9846 tcg_rd = tcg_temp_new_i64();
9847 tcg_final = tcg_temp_new_i64();
9848 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
9849
9850 if (round) {
9851 uint64_t round_const = 1ULL << (shift - 1);
9852 tcg_round = tcg_const_i64(round_const);
9853 } else {
9854 tcg_round = NULL;
9855 }
9856
9857 for (i = 0; i < elements; i++) {
9858 read_vec_element(s, tcg_rn, rn, i, size+1);
9859 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9860 false, true, size+1, shift);
9861
9862 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
9863 }
9864
9865 if (!is_q) {
9866 write_vec_element(s, tcg_final, rd, 0, MO_64);
9867 } else {
9868 write_vec_element(s, tcg_final, rd, 1, MO_64);
9869 }
9870 if (round) {
9871 tcg_temp_free_i64(tcg_round);
9872 }
9873 tcg_temp_free_i64(tcg_rn);
9874 tcg_temp_free_i64(tcg_rd);
9875 tcg_temp_free_i64(tcg_final);
9876
9877 clear_vec_high(s, is_q, rd);
9878}
9879
9880
9881
9882
9883
9884
9885
9886
9887static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
9888{
9889 int rd = extract32(insn, 0, 5);
9890 int rn = extract32(insn, 5, 5);
9891 int opcode = extract32(insn, 11, 5);
9892 int immb = extract32(insn, 16, 3);
9893 int immh = extract32(insn, 19, 4);
9894 bool is_u = extract32(insn, 29, 1);
9895 bool is_q = extract32(insn, 30, 1);
9896
9897 switch (opcode) {
9898 case 0x08:
9899 if (!is_u) {
9900 unallocated_encoding(s);
9901 return;
9902 }
9903
9904 case 0x00:
9905 case 0x02:
9906 case 0x04:
9907 case 0x06:
9908 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
9909 break;
9910 case 0x0a:
9911 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9912 break;
9913 case 0x10:
9914 case 0x11:
9915 if (is_u) {
9916 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
9917 opcode, rn, rd);
9918 } else {
9919 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
9920 }
9921 break;
9922 case 0x12:
9923 case 0x13:
9924 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
9925 opcode, rn, rd);
9926 break;
9927 case 0x14:
9928 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9929 break;
9930 case 0x1c:
9931 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
9932 opcode, rn, rd);
9933 break;
9934 case 0xc:
9935 if (!is_u) {
9936 unallocated_encoding(s);
9937 return;
9938 }
9939 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
9940 break;
9941 case 0xe:
9942 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
9943 break;
9944 case 0x1f:
9945 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
9946 return;
9947 default:
9948 unallocated_encoding(s);
9949 return;
9950 }
9951}
9952
9953
9954
9955
9956static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
9957 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
9958{
9959 static NeonGenTwo64OpFn * const fns[3][2] = {
9960 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
9961 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
9962 { tcg_gen_add_i64, tcg_gen_sub_i64 },
9963 };
9964 NeonGenTwo64OpFn *genfn;
9965 assert(size < 3);
9966
9967 genfn = fns[size][is_sub];
9968 genfn(tcg_res, tcg_op1, tcg_op2);
9969}
9970
9971static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
9972 int opcode, int rd, int rn, int rm)
9973{
9974
9975 TCGv_i64 tcg_res[2];
9976 int pass, accop;
9977
9978 tcg_res[0] = tcg_temp_new_i64();
9979 tcg_res[1] = tcg_temp_new_i64();
9980
9981
9982
9983
9984 switch (opcode) {
9985 case 5:
9986 case 8:
9987 case 9:
9988 accop = 1;
9989 break;
9990 case 10:
9991 case 11:
9992 accop = -1;
9993 break;
9994 default:
9995 accop = 0;
9996 break;
9997 }
9998
9999 if (accop != 0) {
10000 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10001 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10002 }
10003
10004
10005
10006
10007 if (size == 2) {
10008 for (pass = 0; pass < 2; pass++) {
10009 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10010 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10011 TCGv_i64 tcg_passres;
10012 TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10013
10014 int elt = pass + is_q * 2;
10015
10016 read_vec_element(s, tcg_op1, rn, elt, memop);
10017 read_vec_element(s, tcg_op2, rm, elt, memop);
10018
10019 if (accop == 0) {
10020 tcg_passres = tcg_res[pass];
10021 } else {
10022 tcg_passres = tcg_temp_new_i64();
10023 }
10024
10025 switch (opcode) {
10026 case 0:
10027 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10028 break;
10029 case 2:
10030 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10031 break;
10032 case 5:
10033 case 7:
10034 {
10035 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10036 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10037
10038 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10039 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10040 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10041 tcg_passres,
10042 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10043 tcg_temp_free_i64(tcg_tmp1);
10044 tcg_temp_free_i64(tcg_tmp2);
10045 break;
10046 }
10047 case 8:
10048 case 10:
10049 case 12:
10050 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10051 break;
10052 case 9:
10053 case 11:
10054 case 13:
10055 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10056 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10057 tcg_passres, tcg_passres);
10058 break;
10059 default:
10060 g_assert_not_reached();
10061 }
10062
10063 if (opcode == 9 || opcode == 11) {
10064
10065 if (accop < 0) {
10066 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10067 }
10068 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10069 tcg_res[pass], tcg_passres);
10070 } else if (accop > 0) {
10071 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10072 } else if (accop < 0) {
10073 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10074 }
10075
10076 if (accop != 0) {
10077 tcg_temp_free_i64(tcg_passres);
10078 }
10079
10080 tcg_temp_free_i64(tcg_op1);
10081 tcg_temp_free_i64(tcg_op2);
10082 }
10083 } else {
10084
10085 for (pass = 0; pass < 2; pass++) {
10086 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10087 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10088 TCGv_i64 tcg_passres;
10089 int elt = pass + is_q * 2;
10090
10091 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10092 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10093
10094 if (accop == 0) {
10095 tcg_passres = tcg_res[pass];
10096 } else {
10097 tcg_passres = tcg_temp_new_i64();
10098 }
10099
10100 switch (opcode) {
10101 case 0:
10102 case 2:
10103 {
10104 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10105 static NeonGenWidenFn * const widenfns[2][2] = {
10106 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10107 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10108 };
10109 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10110
10111 widenfn(tcg_op2_64, tcg_op2);
10112 widenfn(tcg_passres, tcg_op1);
10113 gen_neon_addl(size, (opcode == 2), tcg_passres,
10114 tcg_passres, tcg_op2_64);
10115 tcg_temp_free_i64(tcg_op2_64);
10116 break;
10117 }
10118 case 5:
10119 case 7:
10120 if (size == 0) {
10121 if (is_u) {
10122 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10123 } else {
10124 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10125 }
10126 } else {
10127 if (is_u) {
10128 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10129 } else {
10130 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10131 }
10132 }
10133 break;
10134 case 8:
10135 case 10:
10136 case 12:
10137 if (size == 0) {
10138 if (is_u) {
10139 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10140 } else {
10141 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10142 }
10143 } else {
10144 if (is_u) {
10145 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10146 } else {
10147 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10148 }
10149 }
10150 break;
10151 case 9:
10152 case 11:
10153 case 13:
10154 assert(size == 1);
10155 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10156 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10157 tcg_passres, tcg_passres);
10158 break;
10159 case 14:
10160 assert(size == 0);
10161 gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
10162 break;
10163 default:
10164 g_assert_not_reached();
10165 }
10166 tcg_temp_free_i32(tcg_op1);
10167 tcg_temp_free_i32(tcg_op2);
10168
10169 if (accop != 0) {
10170 if (opcode == 9 || opcode == 11) {
10171
10172 if (accop < 0) {
10173 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10174 }
10175 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10176 tcg_res[pass],
10177 tcg_passres);
10178 } else {
10179 gen_neon_addl(size, (accop < 0), tcg_res[pass],
10180 tcg_res[pass], tcg_passres);
10181 }
10182 tcg_temp_free_i64(tcg_passres);
10183 }
10184 }
10185 }
10186
10187 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10188 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10189 tcg_temp_free_i64(tcg_res[0]);
10190 tcg_temp_free_i64(tcg_res[1]);
10191}
10192
10193static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10194 int opcode, int rd, int rn, int rm)
10195{
10196 TCGv_i64 tcg_res[2];
10197 int part = is_q ? 2 : 0;
10198 int pass;
10199
10200 for (pass = 0; pass < 2; pass++) {
10201 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10202 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10203 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10204 static NeonGenWidenFn * const widenfns[3][2] = {
10205 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10206 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10207 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10208 };
10209 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10210
10211 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10212 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10213 widenfn(tcg_op2_wide, tcg_op2);
10214 tcg_temp_free_i32(tcg_op2);
10215 tcg_res[pass] = tcg_temp_new_i64();
10216 gen_neon_addl(size, (opcode == 3),
10217 tcg_res[pass], tcg_op1, tcg_op2_wide);
10218 tcg_temp_free_i64(tcg_op1);
10219 tcg_temp_free_i64(tcg_op2_wide);
10220 }
10221
10222 for (pass = 0; pass < 2; pass++) {
10223 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10224 tcg_temp_free_i64(tcg_res[pass]);
10225 }
10226}
10227
10228static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10229{
10230 tcg_gen_addi_i64(in, in, 1U << 31);
10231 tcg_gen_extrh_i64_i32(res, in);
10232}
10233
10234static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10235 int opcode, int rd, int rn, int rm)
10236{
10237 TCGv_i32 tcg_res[2];
10238 int part = is_q ? 2 : 0;
10239 int pass;
10240
10241 for (pass = 0; pass < 2; pass++) {
10242 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10243 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10244 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10245 static NeonGenNarrowFn * const narrowfns[3][2] = {
10246 { gen_helper_neon_narrow_high_u8,
10247 gen_helper_neon_narrow_round_high_u8 },
10248 { gen_helper_neon_narrow_high_u16,
10249 gen_helper_neon_narrow_round_high_u16 },
10250 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10251 };
10252 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10253
10254 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10255 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10256
10257 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10258
10259 tcg_temp_free_i64(tcg_op1);
10260 tcg_temp_free_i64(tcg_op2);
10261
10262 tcg_res[pass] = tcg_temp_new_i32();
10263 gennarrow(tcg_res[pass], tcg_wideres);
10264 tcg_temp_free_i64(tcg_wideres);
10265 }
10266
10267 for (pass = 0; pass < 2; pass++) {
10268 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10269 tcg_temp_free_i32(tcg_res[pass]);
10270 }
10271 clear_vec_high(s, is_q, rd);
10272}
10273
10274static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
10275{
10276
10277
10278
10279
10280
10281
10282 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10283 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10284 TCGv_i64 tcg_res = tcg_temp_new_i64();
10285
10286 read_vec_element(s, tcg_op1, rn, is_q, MO_64);
10287 read_vec_element(s, tcg_op2, rm, is_q, MO_64);
10288 gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
10289 write_vec_element(s, tcg_res, rd, 0, MO_64);
10290 gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
10291 write_vec_element(s, tcg_res, rd, 1, MO_64);
10292
10293 tcg_temp_free_i64(tcg_op1);
10294 tcg_temp_free_i64(tcg_op2);
10295 tcg_temp_free_i64(tcg_res);
10296}
10297
10298
10299
10300
10301
10302
10303
10304static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10305{
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316 int is_q = extract32(insn, 30, 1);
10317 int is_u = extract32(insn, 29, 1);
10318 int size = extract32(insn, 22, 2);
10319 int opcode = extract32(insn, 12, 4);
10320 int rm = extract32(insn, 16, 5);
10321 int rn = extract32(insn, 5, 5);
10322 int rd = extract32(insn, 0, 5);
10323
10324 switch (opcode) {
10325 case 1:
10326 case 3:
10327
10328 if (size == 3) {
10329 unallocated_encoding(s);
10330 return;
10331 }
10332 if (!fp_access_check(s)) {
10333 return;
10334 }
10335 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10336 break;
10337 case 4:
10338 case 6:
10339
10340 if (size == 3) {
10341 unallocated_encoding(s);
10342 return;
10343 }
10344 if (!fp_access_check(s)) {
10345 return;
10346 }
10347 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10348 break;
10349 case 14:
10350 if (is_u || size == 1 || size == 2) {
10351 unallocated_encoding(s);
10352 return;
10353 }
10354 if (size == 3) {
10355 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
10356 unallocated_encoding(s);
10357 return;
10358 }
10359 if (!fp_access_check(s)) {
10360 return;
10361 }
10362 handle_pmull_64(s, is_q, rd, rn, rm);
10363 return;
10364 }
10365 goto is_widening;
10366 case 9:
10367 case 11:
10368 case 13:
10369 if (is_u || size == 0) {
10370 unallocated_encoding(s);
10371 return;
10372 }
10373
10374 case 0:
10375 case 2:
10376 case 5:
10377 case 7:
10378 case 8:
10379 case 10:
10380 case 12:
10381
10382 if (size == 3) {
10383 unallocated_encoding(s);
10384 return;
10385 }
10386 is_widening:
10387 if (!fp_access_check(s)) {
10388 return;
10389 }
10390
10391 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10392 break;
10393 default:
10394
10395 unallocated_encoding(s);
10396 break;
10397 }
10398}
10399
10400static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10401{
10402 tcg_gen_xor_i64(rn, rn, rm);
10403 tcg_gen_and_i64(rn, rn, rd);
10404 tcg_gen_xor_i64(rd, rm, rn);
10405}
10406
10407static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10408{
10409 tcg_gen_xor_i64(rn, rn, rd);
10410 tcg_gen_and_i64(rn, rn, rm);
10411 tcg_gen_xor_i64(rd, rd, rn);
10412}
10413
10414static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10415{
10416 tcg_gen_xor_i64(rn, rn, rd);
10417 tcg_gen_andc_i64(rn, rn, rm);
10418 tcg_gen_xor_i64(rd, rd, rn);
10419}
10420
10421static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10422{
10423 tcg_gen_xor_vec(vece, rn, rn, rm);
10424 tcg_gen_and_vec(vece, rn, rn, rd);
10425 tcg_gen_xor_vec(vece, rd, rm, rn);
10426}
10427
10428static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10429{
10430 tcg_gen_xor_vec(vece, rn, rn, rd);
10431 tcg_gen_and_vec(vece, rn, rn, rm);
10432 tcg_gen_xor_vec(vece, rd, rd, rn);
10433}
10434
10435static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10436{
10437 tcg_gen_xor_vec(vece, rn, rn, rd);
10438 tcg_gen_andc_vec(vece, rn, rn, rm);
10439 tcg_gen_xor_vec(vece, rd, rd, rn);
10440}
10441
10442
10443static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10444{
10445 static const GVecGen3 bsl_op = {
10446 .fni8 = gen_bsl_i64,
10447 .fniv = gen_bsl_vec,
10448 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10449 .load_dest = true
10450 };
10451 static const GVecGen3 bit_op = {
10452 .fni8 = gen_bit_i64,
10453 .fniv = gen_bit_vec,
10454 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10455 .load_dest = true
10456 };
10457 static const GVecGen3 bif_op = {
10458 .fni8 = gen_bif_i64,
10459 .fniv = gen_bif_vec,
10460 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10461 .load_dest = true
10462 };
10463
10464 int rd = extract32(insn, 0, 5);
10465 int rn = extract32(insn, 5, 5);
10466 int rm = extract32(insn, 16, 5);
10467 int size = extract32(insn, 22, 2);
10468 bool is_u = extract32(insn, 29, 1);
10469 bool is_q = extract32(insn, 30, 1);
10470
10471 if (!fp_access_check(s)) {
10472 return;
10473 }
10474
10475 switch (size + 4 * is_u) {
10476 case 0:
10477 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10478 return;
10479 case 1:
10480 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10481 return;
10482 case 2:
10483 if (rn == rm) {
10484 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_mov, 0);
10485 } else {
10486 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10487 }
10488 return;
10489 case 3:
10490 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10491 return;
10492 case 4:
10493 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10494 return;
10495
10496 case 5:
10497 gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
10498 return;
10499 case 6:
10500 gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
10501 return;
10502 case 7:
10503 gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
10504 return;
10505
10506 default:
10507 g_assert_not_reached();
10508 }
10509}
10510
10511
10512
10513
10514
10515
10516static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10517 int size, int rn, int rm, int rd)
10518{
10519 TCGv_ptr fpst;
10520 int pass;
10521
10522
10523 if (opcode >= 0x58) {
10524 fpst = get_fpstatus_ptr(false);
10525 } else {
10526 fpst = NULL;
10527 }
10528
10529 if (!fp_access_check(s)) {
10530 return;
10531 }
10532
10533
10534
10535
10536 if (size == 3) {
10537 TCGv_i64 tcg_res[2];
10538
10539 for (pass = 0; pass < 2; pass++) {
10540 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10541 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10542 int passreg = (pass == 0) ? rn : rm;
10543
10544 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10545 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10546 tcg_res[pass] = tcg_temp_new_i64();
10547
10548 switch (opcode) {
10549 case 0x17:
10550 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10551 break;
10552 case 0x58:
10553 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10554 break;
10555 case 0x5a:
10556 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10557 break;
10558 case 0x5e:
10559 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10560 break;
10561 case 0x78:
10562 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10563 break;
10564 case 0x7e:
10565 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10566 break;
10567 default:
10568 g_assert_not_reached();
10569 }
10570
10571 tcg_temp_free_i64(tcg_op1);
10572 tcg_temp_free_i64(tcg_op2);
10573 }
10574
10575 for (pass = 0; pass < 2; pass++) {
10576 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10577 tcg_temp_free_i64(tcg_res[pass]);
10578 }
10579 } else {
10580 int maxpass = is_q ? 4 : 2;
10581 TCGv_i32 tcg_res[4];
10582
10583 for (pass = 0; pass < maxpass; pass++) {
10584 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10585 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10586 NeonGenTwoOpFn *genfn = NULL;
10587 int passreg = pass < (maxpass / 2) ? rn : rm;
10588 int passelt = (is_q && (pass & 1)) ? 2 : 0;
10589
10590 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10591 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10592 tcg_res[pass] = tcg_temp_new_i32();
10593
10594 switch (opcode) {
10595 case 0x17:
10596 {
10597 static NeonGenTwoOpFn * const fns[3] = {
10598 gen_helper_neon_padd_u8,
10599 gen_helper_neon_padd_u16,
10600 tcg_gen_add_i32,
10601 };
10602 genfn = fns[size];
10603 break;
10604 }
10605 case 0x14:
10606 {
10607 static NeonGenTwoOpFn * const fns[3][2] = {
10608 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10609 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10610 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10611 };
10612 genfn = fns[size][u];
10613 break;
10614 }
10615 case 0x15:
10616 {
10617 static NeonGenTwoOpFn * const fns[3][2] = {
10618 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
10619 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
10620 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10621 };
10622 genfn = fns[size][u];
10623 break;
10624 }
10625
10626 case 0x58:
10627 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10628 break;
10629 case 0x5a:
10630 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10631 break;
10632 case 0x5e:
10633 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10634 break;
10635 case 0x78:
10636 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10637 break;
10638 case 0x7e:
10639 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10640 break;
10641 default:
10642 g_assert_not_reached();
10643 }
10644
10645
10646 if (genfn) {
10647 genfn(tcg_res[pass], tcg_op1, tcg_op2);
10648 }
10649
10650 tcg_temp_free_i32(tcg_op1);
10651 tcg_temp_free_i32(tcg_op2);
10652 }
10653
10654 for (pass = 0; pass < maxpass; pass++) {
10655 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10656 tcg_temp_free_i32(tcg_res[pass]);
10657 }
10658 clear_vec_high(s, is_q, rd);
10659 }
10660
10661 if (fpst) {
10662 tcg_temp_free_ptr(fpst);
10663 }
10664}
10665
10666
10667static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
10668{
10669
10670
10671
10672
10673 int fpopcode = extract32(insn, 11, 5)
10674 | (extract32(insn, 23, 1) << 5)
10675 | (extract32(insn, 29, 1) << 6);
10676 int is_q = extract32(insn, 30, 1);
10677 int size = extract32(insn, 22, 1);
10678 int rm = extract32(insn, 16, 5);
10679 int rn = extract32(insn, 5, 5);
10680 int rd = extract32(insn, 0, 5);
10681
10682 int datasize = is_q ? 128 : 64;
10683 int esize = 32 << size;
10684 int elements = datasize / esize;
10685
10686 if (size == 1 && !is_q) {
10687 unallocated_encoding(s);
10688 return;
10689 }
10690
10691 switch (fpopcode) {
10692 case 0x58:
10693 case 0x5a:
10694 case 0x5e:
10695 case 0x78:
10696 case 0x7e:
10697 if (size && !is_q) {
10698 unallocated_encoding(s);
10699 return;
10700 }
10701 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10702 rn, rm, rd);
10703 return;
10704 case 0x1b:
10705 case 0x1f:
10706 case 0x3f:
10707 case 0x5d:
10708 case 0x7d:
10709 case 0x19:
10710 case 0x39:
10711 case 0x18:
10712 case 0x1a:
10713 case 0x1c:
10714 case 0x1e:
10715 case 0x38:
10716 case 0x3a:
10717 case 0x3e:
10718 case 0x5b:
10719 case 0x5c:
10720 case 0x5f:
10721 case 0x7a:
10722 case 0x7c:
10723 if (!fp_access_check(s)) {
10724 return;
10725 }
10726
10727 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
10728 return;
10729 default:
10730 unallocated_encoding(s);
10731 return;
10732 }
10733}
10734
10735static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10736{
10737 gen_helper_neon_mul_u8(a, a, b);
10738 gen_helper_neon_add_u8(d, d, a);
10739}
10740
10741static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10742{
10743 gen_helper_neon_mul_u16(a, a, b);
10744 gen_helper_neon_add_u16(d, d, a);
10745}
10746
10747static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10748{
10749 tcg_gen_mul_i32(a, a, b);
10750 tcg_gen_add_i32(d, d, a);
10751}
10752
10753static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10754{
10755 tcg_gen_mul_i64(a, a, b);
10756 tcg_gen_add_i64(d, d, a);
10757}
10758
10759static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10760{
10761 tcg_gen_mul_vec(vece, a, a, b);
10762 tcg_gen_add_vec(vece, d, d, a);
10763}
10764
10765static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10766{
10767 gen_helper_neon_mul_u8(a, a, b);
10768 gen_helper_neon_sub_u8(d, d, a);
10769}
10770
10771static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10772{
10773 gen_helper_neon_mul_u16(a, a, b);
10774 gen_helper_neon_sub_u16(d, d, a);
10775}
10776
10777static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10778{
10779 tcg_gen_mul_i32(a, a, b);
10780 tcg_gen_sub_i32(d, d, a);
10781}
10782
10783static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10784{
10785 tcg_gen_mul_i64(a, a, b);
10786 tcg_gen_sub_i64(d, d, a);
10787}
10788
10789static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10790{
10791 tcg_gen_mul_vec(vece, a, a, b);
10792 tcg_gen_sub_vec(vece, d, d, a);
10793}
10794
10795
10796static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
10797{
10798 static const GVecGen3 cmtst_op[4] = {
10799 { .fni4 = gen_helper_neon_tst_u8,
10800 .fniv = gen_cmtst_vec,
10801 .vece = MO_8 },
10802 { .fni4 = gen_helper_neon_tst_u16,
10803 .fniv = gen_cmtst_vec,
10804 .vece = MO_16 },
10805 { .fni4 = gen_cmtst_i32,
10806 .fniv = gen_cmtst_vec,
10807 .vece = MO_32 },
10808 { .fni8 = gen_cmtst_i64,
10809 .fniv = gen_cmtst_vec,
10810 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10811 .vece = MO_64 },
10812 };
10813 static const GVecGen3 mla_op[4] = {
10814 { .fni4 = gen_mla8_i32,
10815 .fniv = gen_mla_vec,
10816 .opc = INDEX_op_mul_vec,
10817 .load_dest = true,
10818 .vece = MO_8 },
10819 { .fni4 = gen_mla16_i32,
10820 .fniv = gen_mla_vec,
10821 .opc = INDEX_op_mul_vec,
10822 .load_dest = true,
10823 .vece = MO_16 },
10824 { .fni4 = gen_mla32_i32,
10825 .fniv = gen_mla_vec,
10826 .opc = INDEX_op_mul_vec,
10827 .load_dest = true,
10828 .vece = MO_32 },
10829 { .fni8 = gen_mla64_i64,
10830 .fniv = gen_mla_vec,
10831 .opc = INDEX_op_mul_vec,
10832 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10833 .load_dest = true,
10834 .vece = MO_64 },
10835 };
10836 static const GVecGen3 mls_op[4] = {
10837 { .fni4 = gen_mls8_i32,
10838 .fniv = gen_mls_vec,
10839 .opc = INDEX_op_mul_vec,
10840 .load_dest = true,
10841 .vece = MO_8 },
10842 { .fni4 = gen_mls16_i32,
10843 .fniv = gen_mls_vec,
10844 .opc = INDEX_op_mul_vec,
10845 .load_dest = true,
10846 .vece = MO_16 },
10847 { .fni4 = gen_mls32_i32,
10848 .fniv = gen_mls_vec,
10849 .opc = INDEX_op_mul_vec,
10850 .load_dest = true,
10851 .vece = MO_32 },
10852 { .fni8 = gen_mls64_i64,
10853 .fniv = gen_mls_vec,
10854 .opc = INDEX_op_mul_vec,
10855 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10856 .load_dest = true,
10857 .vece = MO_64 },
10858 };
10859
10860 int is_q = extract32(insn, 30, 1);
10861 int u = extract32(insn, 29, 1);
10862 int size = extract32(insn, 22, 2);
10863 int opcode = extract32(insn, 11, 5);
10864 int rm = extract32(insn, 16, 5);
10865 int rn = extract32(insn, 5, 5);
10866 int rd = extract32(insn, 0, 5);
10867 int pass;
10868 TCGCond cond;
10869
10870 switch (opcode) {
10871 case 0x13:
10872 if (u && size != 0) {
10873 unallocated_encoding(s);
10874 return;
10875 }
10876
10877 case 0x0:
10878 case 0x2:
10879 case 0x4:
10880 case 0xc:
10881 case 0xd:
10882 case 0xe:
10883 case 0xf:
10884 case 0x12:
10885 if (size == 3) {
10886 unallocated_encoding(s);
10887 return;
10888 }
10889 break;
10890 case 0x16:
10891 if (size == 0 || size == 3) {
10892 unallocated_encoding(s);
10893 return;
10894 }
10895 break;
10896 default:
10897 if (size == 3 && !is_q) {
10898 unallocated_encoding(s);
10899 return;
10900 }
10901 break;
10902 }
10903
10904 if (!fp_access_check(s)) {
10905 return;
10906 }
10907
10908 switch (opcode) {
10909 case 0x10:
10910 if (u) {
10911 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
10912 } else {
10913 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
10914 }
10915 return;
10916 case 0x13:
10917 if (!u) {
10918 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
10919 return;
10920 }
10921 break;
10922 case 0x12:
10923 if (u) {
10924 gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]);
10925 } else {
10926 gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]);
10927 }
10928 return;
10929 case 0x11:
10930 if (!u) {
10931 gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]);
10932 return;
10933 }
10934
10935 cond = TCG_COND_EQ;
10936 goto do_gvec_cmp;
10937 case 0x06:
10938 cond = u ? TCG_COND_GTU : TCG_COND_GT;
10939 goto do_gvec_cmp;
10940 case 0x07:
10941 cond = u ? TCG_COND_GEU : TCG_COND_GE;
10942 do_gvec_cmp:
10943 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
10944 vec_full_reg_offset(s, rn),
10945 vec_full_reg_offset(s, rm),
10946 is_q ? 16 : 8, vec_full_reg_size(s));
10947 return;
10948 }
10949
10950 if (size == 3) {
10951 assert(is_q);
10952 for (pass = 0; pass < 2; pass++) {
10953 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10954 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10955 TCGv_i64 tcg_res = tcg_temp_new_i64();
10956
10957 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10958 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10959
10960 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
10961
10962 write_vec_element(s, tcg_res, rd, pass, MO_64);
10963
10964 tcg_temp_free_i64(tcg_res);
10965 tcg_temp_free_i64(tcg_op1);
10966 tcg_temp_free_i64(tcg_op2);
10967 }
10968 } else {
10969 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10970 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10971 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10972 TCGv_i32 tcg_res = tcg_temp_new_i32();
10973 NeonGenTwoOpFn *genfn = NULL;
10974 NeonGenTwoOpEnvFn *genenvfn = NULL;
10975
10976 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
10977 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
10978
10979 switch (opcode) {
10980 case 0x0:
10981 {
10982 static NeonGenTwoOpFn * const fns[3][2] = {
10983 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
10984 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
10985 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
10986 };
10987 genfn = fns[size][u];
10988 break;
10989 }
10990 case 0x1:
10991 {
10992 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10993 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
10994 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
10995 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
10996 };
10997 genenvfn = fns[size][u];
10998 break;
10999 }
11000 case 0x2:
11001 {
11002 static NeonGenTwoOpFn * const fns[3][2] = {
11003 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11004 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11005 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11006 };
11007 genfn = fns[size][u];
11008 break;
11009 }
11010 case 0x4:
11011 {
11012 static NeonGenTwoOpFn * const fns[3][2] = {
11013 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11014 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11015 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11016 };
11017 genfn = fns[size][u];
11018 break;
11019 }
11020 case 0x5:
11021 {
11022 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11023 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
11024 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
11025 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
11026 };
11027 genenvfn = fns[size][u];
11028 break;
11029 }
11030 case 0x8:
11031 {
11032 static NeonGenTwoOpFn * const fns[3][2] = {
11033 { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
11034 { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
11035 { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
11036 };
11037 genfn = fns[size][u];
11038 break;
11039 }
11040 case 0x9:
11041 {
11042 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11043 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11044 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11045 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11046 };
11047 genenvfn = fns[size][u];
11048 break;
11049 }
11050 case 0xa:
11051 {
11052 static NeonGenTwoOpFn * const fns[3][2] = {
11053 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11054 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11055 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11056 };
11057 genfn = fns[size][u];
11058 break;
11059 }
11060 case 0xb:
11061 {
11062 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11063 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11064 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11065 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11066 };
11067 genenvfn = fns[size][u];
11068 break;
11069 }
11070 case 0xc:
11071 {
11072 static NeonGenTwoOpFn * const fns[3][2] = {
11073 { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
11074 { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
11075 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11076 };
11077 genfn = fns[size][u];
11078 break;
11079 }
11080
11081 case 0xd:
11082 {
11083 static NeonGenTwoOpFn * const fns[3][2] = {
11084 { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
11085 { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
11086 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11087 };
11088 genfn = fns[size][u];
11089 break;
11090 }
11091 case 0xe:
11092 case 0xf:
11093 {
11094 static NeonGenTwoOpFn * const fns[3][2] = {
11095 { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
11096 { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
11097 { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
11098 };
11099 genfn = fns[size][u];
11100 break;
11101 }
11102 case 0x13:
11103 assert(u);
11104 assert(size == 0);
11105 genfn = gen_helper_neon_mul_p8;
11106 break;
11107 case 0x16:
11108 {
11109 static NeonGenTwoOpEnvFn * const fns[2][2] = {
11110 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
11111 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
11112 };
11113 assert(size == 1 || size == 2);
11114 genenvfn = fns[size - 1][u];
11115 break;
11116 }
11117 default:
11118 g_assert_not_reached();
11119 }
11120
11121 if (genenvfn) {
11122 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11123 } else {
11124 genfn(tcg_res, tcg_op1, tcg_op2);
11125 }
11126
11127 if (opcode == 0xf) {
11128
11129 static NeonGenTwoOpFn * const fns[3] = {
11130 gen_helper_neon_add_u8,
11131 gen_helper_neon_add_u16,
11132 tcg_gen_add_i32,
11133 };
11134
11135 read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
11136 fns[size](tcg_res, tcg_op1, tcg_res);
11137 }
11138
11139 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11140
11141 tcg_temp_free_i32(tcg_res);
11142 tcg_temp_free_i32(tcg_op1);
11143 tcg_temp_free_i32(tcg_op2);
11144 }
11145 }
11146 clear_vec_high(s, is_q, rd);
11147}
11148
11149
11150
11151
11152
11153
11154
11155static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11156{
11157 int opcode = extract32(insn, 11, 5);
11158
11159 switch (opcode) {
11160 case 0x3:
11161 disas_simd_3same_logic(s, insn);
11162 break;
11163 case 0x17:
11164 case 0x14:
11165 case 0x15:
11166 {
11167
11168 int is_q = extract32(insn, 30, 1);
11169 int u = extract32(insn, 29, 1);
11170 int size = extract32(insn, 22, 2);
11171 int rm = extract32(insn, 16, 5);
11172 int rn = extract32(insn, 5, 5);
11173 int rd = extract32(insn, 0, 5);
11174 if (opcode == 0x17) {
11175 if (u || (size == 3 && !is_q)) {
11176 unallocated_encoding(s);
11177 return;
11178 }
11179 } else {
11180 if (size == 3) {
11181 unallocated_encoding(s);
11182 return;
11183 }
11184 }
11185 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11186 break;
11187 }
11188 case 0x18 ... 0x31:
11189
11190 disas_simd_3same_float(s, insn);
11191 break;
11192 default:
11193 disas_simd_3same_int(s, insn);
11194 break;
11195 }
11196}
11197
11198
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209
11210static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11211{
11212 int opcode, fpopcode;
11213 int is_q, u, a, rm, rn, rd;
11214 int datasize, elements;
11215 int pass;
11216 TCGv_ptr fpst;
11217 bool pairwise = false;
11218
11219 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
11220 unallocated_encoding(s);
11221 return;
11222 }
11223
11224 if (!fp_access_check(s)) {
11225 return;
11226 }
11227
11228
11229
11230
11231 opcode = extract32(insn, 11, 3);
11232 u = extract32(insn, 29, 1);
11233 a = extract32(insn, 23, 1);
11234 is_q = extract32(insn, 30, 1);
11235 rm = extract32(insn, 16, 5);
11236 rn = extract32(insn, 5, 5);
11237 rd = extract32(insn, 0, 5);
11238
11239 fpopcode = opcode | (a << 3) | (u << 4);
11240 datasize = is_q ? 128 : 64;
11241 elements = datasize / 16;
11242
11243 switch (fpopcode) {
11244 case 0x10:
11245 case 0x12:
11246 case 0x16:
11247 case 0x18:
11248 case 0x1e:
11249 pairwise = true;
11250 break;
11251 }
11252
11253 fpst = get_fpstatus_ptr(true);
11254
11255 if (pairwise) {
11256 int maxpass = is_q ? 8 : 4;
11257 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11258 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11259 TCGv_i32 tcg_res[8];
11260
11261 for (pass = 0; pass < maxpass; pass++) {
11262 int passreg = pass < (maxpass / 2) ? rn : rm;
11263 int passelt = (pass << 1) & (maxpass - 1);
11264
11265 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11266 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11267 tcg_res[pass] = tcg_temp_new_i32();
11268
11269 switch (fpopcode) {
11270 case 0x10:
11271 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11272 fpst);
11273 break;
11274 case 0x12:
11275 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11276 break;
11277 case 0x16:
11278 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11279 break;
11280 case 0x18:
11281 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11282 fpst);
11283 break;
11284 case 0x1e:
11285 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11286 break;
11287 default:
11288 g_assert_not_reached();
11289 }
11290 }
11291
11292 for (pass = 0; pass < maxpass; pass++) {
11293 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11294 tcg_temp_free_i32(tcg_res[pass]);
11295 }
11296
11297 tcg_temp_free_i32(tcg_op1);
11298 tcg_temp_free_i32(tcg_op2);
11299
11300 } else {
11301 for (pass = 0; pass < elements; pass++) {
11302 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11303 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11304 TCGv_i32 tcg_res = tcg_temp_new_i32();
11305
11306 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11307 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11308
11309 switch (fpopcode) {
11310 case 0x0:
11311 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11312 break;
11313 case 0x1:
11314 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11315 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11316 fpst);
11317 break;
11318 case 0x2:
11319 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11320 break;
11321 case 0x3:
11322 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11323 break;
11324 case 0x4:
11325 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11326 break;
11327 case 0x6:
11328 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11329 break;
11330 case 0x7:
11331 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11332 break;
11333 case 0x8:
11334 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11335 break;
11336 case 0x9:
11337
11338 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11339 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11340 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11341 fpst);
11342 break;
11343 case 0xa:
11344 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11345 break;
11346 case 0xe:
11347 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11348 break;
11349 case 0xf:
11350 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11351 break;
11352 case 0x13:
11353 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11354 break;
11355 case 0x14:
11356 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11357 break;
11358 case 0x15:
11359 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11360 break;
11361 case 0x17:
11362 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11363 break;
11364 case 0x1a:
11365 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11366 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11367 break;
11368 case 0x1c:
11369 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11370 break;
11371 case 0x1d:
11372 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11373 break;
11374 default:
11375 fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
11376 __func__, insn, fpopcode, s->pc);
11377 g_assert_not_reached();
11378 }
11379
11380 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11381 tcg_temp_free_i32(tcg_res);
11382 tcg_temp_free_i32(tcg_op1);
11383 tcg_temp_free_i32(tcg_op2);
11384 }
11385 }
11386
11387 tcg_temp_free_ptr(fpst);
11388
11389 clear_vec_high(s, is_q, rd);
11390}
11391
11392
11393
11394
11395
11396
11397
11398static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11399{
11400 int rd = extract32(insn, 0, 5);
11401 int rn = extract32(insn, 5, 5);
11402 int opcode = extract32(insn, 11, 4);
11403 int rm = extract32(insn, 16, 5);
11404 int size = extract32(insn, 22, 2);
11405 bool u = extract32(insn, 29, 1);
11406 bool is_q = extract32(insn, 30, 1);
11407 int feature, rot;
11408
11409 switch (u * 16 + opcode) {
11410 case 0x10:
11411 case 0x11:
11412 if (size != 1 && size != 2) {
11413 unallocated_encoding(s);
11414 return;
11415 }
11416 feature = ARM_FEATURE_V8_RDM;
11417 break;
11418 case 0x02:
11419 case 0x12:
11420 if (size != MO_32) {
11421 unallocated_encoding(s);
11422 return;
11423 }
11424 feature = ARM_FEATURE_V8_DOTPROD;
11425 break;
11426 case 0x8:
11427 case 0x9:
11428 case 0xa:
11429 case 0xb:
11430 case 0xc:
11431 case 0xe:
11432 if (size == 0
11433 || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
11434 || (size == 3 && !is_q)) {
11435 unallocated_encoding(s);
11436 return;
11437 }
11438 feature = ARM_FEATURE_V8_FCMA;
11439 break;
11440 default:
11441 unallocated_encoding(s);
11442 return;
11443 }
11444 if (!arm_dc_feature(s, feature)) {
11445 unallocated_encoding(s);
11446 return;
11447 }
11448 if (!fp_access_check(s)) {
11449 return;
11450 }
11451
11452 switch (opcode) {
11453 case 0x0:
11454 switch (size) {
11455 case 1:
11456 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s16);
11457 break;
11458 case 2:
11459 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s32);
11460 break;
11461 default:
11462 g_assert_not_reached();
11463 }
11464 return;
11465
11466 case 0x1:
11467 switch (size) {
11468 case 1:
11469 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s16);
11470 break;
11471 case 2:
11472 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s32);
11473 break;
11474 default:
11475 g_assert_not_reached();
11476 }
11477 return;
11478
11479 case 0x2:
11480 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
11481 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11482 return;
11483
11484 case 0x8:
11485 case 0x9:
11486 case 0xa:
11487 case 0xb:
11488 rot = extract32(opcode, 0, 2);
11489 switch (size) {
11490 case 1:
11491 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
11492 gen_helper_gvec_fcmlah);
11493 break;
11494 case 2:
11495 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11496 gen_helper_gvec_fcmlas);
11497 break;
11498 case 3:
11499 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11500 gen_helper_gvec_fcmlad);
11501 break;
11502 default:
11503 g_assert_not_reached();
11504 }
11505 return;
11506
11507 case 0xc:
11508 case 0xe:
11509 rot = extract32(opcode, 1, 1);
11510 switch (size) {
11511 case 1:
11512 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11513 gen_helper_gvec_fcaddh);
11514 break;
11515 case 2:
11516 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11517 gen_helper_gvec_fcadds);
11518 break;
11519 case 3:
11520 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11521 gen_helper_gvec_fcaddd);
11522 break;
11523 default:
11524 g_assert_not_reached();
11525 }
11526 return;
11527
11528 default:
11529 g_assert_not_reached();
11530 }
11531}
11532
11533static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11534 int size, int rn, int rd)
11535{
11536
11537
11538
11539
11540 int pass;
11541
11542 if (size == 3) {
11543
11544 TCGv_i64 tcg_res[2];
11545 int srcelt = is_q ? 2 : 0;
11546
11547 for (pass = 0; pass < 2; pass++) {
11548 TCGv_i32 tcg_op = tcg_temp_new_i32();
11549 tcg_res[pass] = tcg_temp_new_i64();
11550
11551 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11552 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11553 tcg_temp_free_i32(tcg_op);
11554 }
11555 for (pass = 0; pass < 2; pass++) {
11556 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11557 tcg_temp_free_i64(tcg_res[pass]);
11558 }
11559 } else {
11560
11561 int srcelt = is_q ? 4 : 0;
11562 TCGv_i32 tcg_res[4];
11563 TCGv_ptr fpst = get_fpstatus_ptr(false);
11564 TCGv_i32 ahp = get_ahp_flag();
11565
11566 for (pass = 0; pass < 4; pass++) {
11567 tcg_res[pass] = tcg_temp_new_i32();
11568
11569 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11570 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11571 fpst, ahp);
11572 }
11573 for (pass = 0; pass < 4; pass++) {
11574 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11575 tcg_temp_free_i32(tcg_res[pass]);
11576 }
11577
11578 tcg_temp_free_ptr(fpst);
11579 tcg_temp_free_i32(ahp);
11580 }
11581}
11582
11583static void handle_rev(DisasContext *s, int opcode, bool u,
11584 bool is_q, int size, int rn, int rd)
11585{
11586 int op = (opcode << 1) | u;
11587 int opsz = op + size;
11588 int grp_size = 3 - opsz;
11589 int dsize = is_q ? 128 : 64;
11590 int i;
11591
11592 if (opsz >= 3) {
11593 unallocated_encoding(s);
11594 return;
11595 }
11596
11597 if (!fp_access_check(s)) {
11598 return;
11599 }
11600
11601 if (size == 0) {
11602
11603 int groups = dsize / (8 << grp_size);
11604
11605 for (i = 0; i < groups; i++) {
11606 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11607
11608 read_vec_element(s, tcg_tmp, rn, i, grp_size);
11609 switch (grp_size) {
11610 case MO_16:
11611 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
11612 break;
11613 case MO_32:
11614 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
11615 break;
11616 case MO_64:
11617 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11618 break;
11619 default:
11620 g_assert_not_reached();
11621 }
11622 write_vec_element(s, tcg_tmp, rd, i, grp_size);
11623 tcg_temp_free_i64(tcg_tmp);
11624 }
11625 clear_vec_high(s, is_q, rd);
11626 } else {
11627 int revmask = (1 << grp_size) - 1;
11628 int esize = 8 << size;
11629 int elements = dsize / esize;
11630 TCGv_i64 tcg_rn = tcg_temp_new_i64();
11631 TCGv_i64 tcg_rd = tcg_const_i64(0);
11632 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
11633
11634 for (i = 0; i < elements; i++) {
11635 int e_rev = (i & 0xf) ^ revmask;
11636 int off = e_rev * esize;
11637 read_vec_element(s, tcg_rn, rn, i, size);
11638 if (off >= 64) {
11639 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
11640 tcg_rn, off - 64, esize);
11641 } else {
11642 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
11643 }
11644 }
11645 write_vec_element(s, tcg_rd, rd, 0, MO_64);
11646 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
11647
11648 tcg_temp_free_i64(tcg_rd_hi);
11649 tcg_temp_free_i64(tcg_rd);
11650 tcg_temp_free_i64(tcg_rn);
11651 }
11652}
11653
11654static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11655 bool is_q, int size, int rn, int rd)
11656{
11657
11658
11659
11660
11661
11662 bool accum = (opcode == 0x6);
11663 int maxpass = is_q ? 2 : 1;
11664 int pass;
11665 TCGv_i64 tcg_res[2];
11666
11667 if (size == 2) {
11668
11669 TCGMemOp memop = size + (u ? 0 : MO_SIGN);
11670
11671 for (pass = 0; pass < maxpass; pass++) {
11672 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11673 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11674
11675 tcg_res[pass] = tcg_temp_new_i64();
11676
11677 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11678 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11679 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11680 if (accum) {
11681 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11682 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11683 }
11684
11685 tcg_temp_free_i64(tcg_op1);
11686 tcg_temp_free_i64(tcg_op2);
11687 }
11688 } else {
11689 for (pass = 0; pass < maxpass; pass++) {
11690 TCGv_i64 tcg_op = tcg_temp_new_i64();
11691 NeonGenOneOpFn *genfn;
11692 static NeonGenOneOpFn * const fns[2][2] = {
11693 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
11694 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
11695 };
11696
11697 genfn = fns[size][u];
11698
11699 tcg_res[pass] = tcg_temp_new_i64();
11700
11701 read_vec_element(s, tcg_op, rn, pass, MO_64);
11702 genfn(tcg_res[pass], tcg_op);
11703
11704 if (accum) {
11705 read_vec_element(s, tcg_op, rd, pass, MO_64);
11706 if (size == 0) {
11707 gen_helper_neon_addl_u16(tcg_res[pass],
11708 tcg_res[pass], tcg_op);
11709 } else {
11710 gen_helper_neon_addl_u32(tcg_res[pass],
11711 tcg_res[pass], tcg_op);
11712 }
11713 }
11714 tcg_temp_free_i64(tcg_op);
11715 }
11716 }
11717 if (!is_q) {
11718 tcg_res[1] = tcg_const_i64(0);
11719 }
11720 for (pass = 0; pass < 2; pass++) {
11721 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11722 tcg_temp_free_i64(tcg_res[pass]);
11723 }
11724}
11725
11726static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11727{
11728
11729 int pass;
11730 int part = is_q ? 2 : 0;
11731 TCGv_i64 tcg_res[2];
11732
11733 for (pass = 0; pass < 2; pass++) {
11734 static NeonGenWidenFn * const widenfns[3] = {
11735 gen_helper_neon_widen_u8,
11736 gen_helper_neon_widen_u16,
11737 tcg_gen_extu_i32_i64,
11738 };
11739 NeonGenWidenFn *widenfn = widenfns[size];
11740 TCGv_i32 tcg_op = tcg_temp_new_i32();
11741
11742 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11743 tcg_res[pass] = tcg_temp_new_i64();
11744 widenfn(tcg_res[pass], tcg_op);
11745 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11746
11747 tcg_temp_free_i32(tcg_op);
11748 }
11749
11750 for (pass = 0; pass < 2; pass++) {
11751 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11752 tcg_temp_free_i64(tcg_res[pass]);
11753 }
11754}
11755
11756
11757
11758
11759
11760
11761
11762static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11763{
11764 int size = extract32(insn, 22, 2);
11765 int opcode = extract32(insn, 12, 5);
11766 bool u = extract32(insn, 29, 1);
11767 bool is_q = extract32(insn, 30, 1);
11768 int rn = extract32(insn, 5, 5);
11769 int rd = extract32(insn, 0, 5);
11770 bool need_fpstatus = false;
11771 bool need_rmode = false;
11772 int rmode = -1;
11773 TCGv_i32 tcg_rmode;
11774 TCGv_ptr tcg_fpstatus;
11775
11776 switch (opcode) {
11777 case 0x0:
11778 case 0x1:
11779 handle_rev(s, opcode, u, is_q, size, rn, rd);
11780 return;
11781 case 0x5:
11782 if (u && size == 0) {
11783
11784 break;
11785 } else if (u && size == 1) {
11786
11787 break;
11788 } else if (!u && size == 0) {
11789
11790 break;
11791 }
11792 unallocated_encoding(s);
11793 return;
11794 case 0x12:
11795 case 0x14:
11796 if (size == 3) {
11797 unallocated_encoding(s);
11798 return;
11799 }
11800 if (!fp_access_check(s)) {
11801 return;
11802 }
11803
11804 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11805 return;
11806 case 0x4:
11807 if (size == 3) {
11808 unallocated_encoding(s);
11809 return;
11810 }
11811 break;
11812 case 0x2:
11813 case 0x6:
11814 if (size == 3) {
11815 unallocated_encoding(s);
11816 return;
11817 }
11818 if (!fp_access_check(s)) {
11819 return;
11820 }
11821 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
11822 return;
11823 case 0x13:
11824 if (u == 0 || size == 3) {
11825 unallocated_encoding(s);
11826 return;
11827 }
11828 if (!fp_access_check(s)) {
11829 return;
11830 }
11831 handle_shll(s, is_q, size, rn, rd);
11832 return;
11833 case 0xa:
11834 if (u == 1) {
11835 unallocated_encoding(s);
11836 return;
11837 }
11838
11839 case 0x8:
11840 case 0x9:
11841 case 0xb:
11842 if (size == 3 && !is_q) {
11843 unallocated_encoding(s);
11844 return;
11845 }
11846 break;
11847 case 0x3:
11848 if (size == 3 && !is_q) {
11849 unallocated_encoding(s);
11850 return;
11851 }
11852 if (!fp_access_check(s)) {
11853 return;
11854 }
11855 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
11856 return;
11857 case 0x7:
11858 if (size == 3 && !is_q) {
11859 unallocated_encoding(s);
11860 return;
11861 }
11862 break;
11863 case 0xc ... 0xf:
11864 case 0x16 ... 0x1d:
11865 case 0x1f:
11866 {
11867
11868
11869
11870 int is_double = extract32(size, 0, 1);
11871 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
11872 size = is_double ? 3 : 2;
11873 switch (opcode) {
11874 case 0x2f:
11875 case 0x6f:
11876 if (size == 3 && !is_q) {
11877 unallocated_encoding(s);
11878 return;
11879 }
11880 break;
11881 case 0x1d:
11882 case 0x5d:
11883 {
11884 bool is_signed = (opcode == 0x1d) ? true : false;
11885 int elements = is_double ? 2 : is_q ? 4 : 2;
11886 if (is_double && !is_q) {
11887 unallocated_encoding(s);
11888 return;
11889 }
11890 if (!fp_access_check(s)) {
11891 return;
11892 }
11893 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
11894 return;
11895 }
11896 case 0x2c:
11897 case 0x2d:
11898 case 0x2e:
11899 case 0x6c:
11900 case 0x6d:
11901 if (size == 3 && !is_q) {
11902 unallocated_encoding(s);
11903 return;
11904 }
11905 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
11906 return;
11907 case 0x7f:
11908 if (size == 3 && !is_q) {
11909 unallocated_encoding(s);
11910 return;
11911 }
11912 break;
11913 case 0x1a:
11914 case 0x1b:
11915 case 0x3a:
11916 case 0x3b:
11917 case 0x5a:
11918 case 0x5b:
11919 case 0x7a:
11920 case 0x7b:
11921 need_fpstatus = true;
11922 need_rmode = true;
11923 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11924 if (size == 3 && !is_q) {
11925 unallocated_encoding(s);
11926 return;
11927 }
11928 break;
11929 case 0x5c:
11930 case 0x1c:
11931 need_fpstatus = true;
11932 need_rmode = true;
11933 rmode = FPROUNDING_TIEAWAY;
11934 if (size == 3 && !is_q) {
11935 unallocated_encoding(s);
11936 return;
11937 }
11938 break;
11939 case 0x3c:
11940 if (size == 3) {
11941 unallocated_encoding(s);
11942 return;
11943 }
11944
11945 case 0x3d:
11946 case 0x7d:
11947 if (size == 3 && !is_q) {
11948 unallocated_encoding(s);
11949 return;
11950 }
11951 if (!fp_access_check(s)) {
11952 return;
11953 }
11954 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
11955 return;
11956 case 0x56:
11957 if (size == 2) {
11958 unallocated_encoding(s);
11959 return;
11960 }
11961
11962 case 0x16:
11963
11964
11965
11966 if (!fp_access_check(s)) {
11967 return;
11968 }
11969 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11970 return;
11971 case 0x17:
11972 if (!fp_access_check(s)) {
11973 return;
11974 }
11975 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
11976 return;
11977 case 0x18:
11978 case 0x19:
11979 case 0x38:
11980 case 0x39:
11981 need_rmode = true;
11982 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11983
11984 case 0x59:
11985 case 0x79:
11986 need_fpstatus = true;
11987 if (size == 3 && !is_q) {
11988 unallocated_encoding(s);
11989 return;
11990 }
11991 break;
11992 case 0x58:
11993 need_rmode = true;
11994 rmode = FPROUNDING_TIEAWAY;
11995 need_fpstatus = true;
11996 if (size == 3 && !is_q) {
11997 unallocated_encoding(s);
11998 return;
11999 }
12000 break;
12001 case 0x7c:
12002 if (size == 3) {
12003 unallocated_encoding(s);
12004 return;
12005 }
12006 need_fpstatus = true;
12007 break;
12008 default:
12009 unallocated_encoding(s);
12010 return;
12011 }
12012 break;
12013 }
12014 default:
12015 unallocated_encoding(s);
12016 return;
12017 }
12018
12019 if (!fp_access_check(s)) {
12020 return;
12021 }
12022
12023 if (need_fpstatus || need_rmode) {
12024 tcg_fpstatus = get_fpstatus_ptr(false);
12025 } else {
12026 tcg_fpstatus = NULL;
12027 }
12028 if (need_rmode) {
12029 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12030 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12031 } else {
12032 tcg_rmode = NULL;
12033 }
12034
12035 switch (opcode) {
12036 case 0x5:
12037 if (u && size == 0) {
12038 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12039 return;
12040 }
12041 break;
12042 case 0xb:
12043 if (u) {
12044 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12045 return;
12046 }
12047 break;
12048 }
12049
12050 if (size == 3) {
12051
12052 int pass;
12053
12054
12055
12056
12057 tcg_debug_assert(is_q);
12058 for (pass = 0; pass < 2; pass++) {
12059 TCGv_i64 tcg_op = tcg_temp_new_i64();
12060 TCGv_i64 tcg_res = tcg_temp_new_i64();
12061
12062 read_vec_element(s, tcg_op, rn, pass, MO_64);
12063
12064 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12065 tcg_rmode, tcg_fpstatus);
12066
12067 write_vec_element(s, tcg_res, rd, pass, MO_64);
12068
12069 tcg_temp_free_i64(tcg_res);
12070 tcg_temp_free_i64(tcg_op);
12071 }
12072 } else {
12073 int pass;
12074
12075 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12076 TCGv_i32 tcg_op = tcg_temp_new_i32();
12077 TCGv_i32 tcg_res = tcg_temp_new_i32();
12078 TCGCond cond;
12079
12080 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12081
12082 if (size == 2) {
12083
12084 switch (opcode) {
12085 case 0xa:
12086
12087
12088
12089
12090 cond = TCG_COND_LT;
12091 do_cmop:
12092 tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
12093 tcg_gen_neg_i32(tcg_res, tcg_res);
12094 break;
12095 case 0x8:
12096 cond = u ? TCG_COND_GE : TCG_COND_GT;
12097 goto do_cmop;
12098 case 0x9:
12099 cond = u ? TCG_COND_LE : TCG_COND_EQ;
12100 goto do_cmop;
12101 case 0x4:
12102 if (u) {
12103 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12104 } else {
12105 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12106 }
12107 break;
12108 case 0x7:
12109 if (u) {
12110 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12111 } else {
12112 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12113 }
12114 break;
12115 case 0xb:
12116 if (u) {
12117 tcg_gen_neg_i32(tcg_res, tcg_op);
12118 } else {
12119 TCGv_i32 tcg_zero = tcg_const_i32(0);
12120 tcg_gen_neg_i32(tcg_res, tcg_op);
12121 tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
12122 tcg_zero, tcg_op, tcg_res);
12123 tcg_temp_free_i32(tcg_zero);
12124 }
12125 break;
12126 case 0x2f:
12127 gen_helper_vfp_abss(tcg_res, tcg_op);
12128 break;
12129 case 0x6f:
12130 gen_helper_vfp_negs(tcg_res, tcg_op);
12131 break;
12132 case 0x7f:
12133 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12134 break;
12135 case 0x1a:
12136 case 0x1b:
12137 case 0x1c:
12138 case 0x3a:
12139 case 0x3b:
12140 {
12141 TCGv_i32 tcg_shift = tcg_const_i32(0);
12142 gen_helper_vfp_tosls(tcg_res, tcg_op,
12143 tcg_shift, tcg_fpstatus);
12144 tcg_temp_free_i32(tcg_shift);
12145 break;
12146 }
12147 case 0x5a:
12148 case 0x5b:
12149 case 0x5c:
12150 case 0x7a:
12151 case 0x7b:
12152 {
12153 TCGv_i32 tcg_shift = tcg_const_i32(0);
12154 gen_helper_vfp_touls(tcg_res, tcg_op,
12155 tcg_shift, tcg_fpstatus);
12156 tcg_temp_free_i32(tcg_shift);
12157 break;
12158 }
12159 case 0x18:
12160 case 0x19:
12161 case 0x38:
12162 case 0x39:
12163 case 0x58:
12164 case 0x79:
12165 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12166 break;
12167 case 0x59:
12168 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12169 break;
12170 case 0x7c:
12171 gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
12172 break;
12173 default:
12174 g_assert_not_reached();
12175 }
12176 } else {
12177
12178 switch (opcode) {
12179 case 0x5:
12180
12181
12182
12183 if (u) {
12184 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12185 } else {
12186 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12187 }
12188 break;
12189 case 0x7:
12190 {
12191 NeonGenOneOpEnvFn *genfn;
12192 static NeonGenOneOpEnvFn * const fns[2][2] = {
12193 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12194 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12195 };
12196 genfn = fns[size][u];
12197 genfn(tcg_res, cpu_env, tcg_op);
12198 break;
12199 }
12200 case 0x8:
12201 case 0x9:
12202 case 0xa:
12203 {
12204 static NeonGenTwoOpFn * const fns[3][2] = {
12205 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
12206 { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
12207 { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
12208 };
12209 NeonGenTwoOpFn *genfn;
12210 int comp;
12211 bool reverse;
12212 TCGv_i32 tcg_zero = tcg_const_i32(0);
12213
12214
12215 comp = (opcode - 0x8) * 2 + u;
12216
12217 reverse = (comp > 2);
12218 if (reverse) {
12219 comp = 4 - comp;
12220 }
12221 genfn = fns[comp][size];
12222 if (reverse) {
12223 genfn(tcg_res, tcg_zero, tcg_op);
12224 } else {
12225 genfn(tcg_res, tcg_op, tcg_zero);
12226 }
12227 tcg_temp_free_i32(tcg_zero);
12228 break;
12229 }
12230 case 0xb:
12231 if (u) {
12232 TCGv_i32 tcg_zero = tcg_const_i32(0);
12233 if (size) {
12234 gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
12235 } else {
12236 gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
12237 }
12238 tcg_temp_free_i32(tcg_zero);
12239 } else {
12240 if (size) {
12241 gen_helper_neon_abs_s16(tcg_res, tcg_op);
12242 } else {
12243 gen_helper_neon_abs_s8(tcg_res, tcg_op);
12244 }
12245 }
12246 break;
12247 case 0x4:
12248 if (u) {
12249 if (size == 0) {
12250 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12251 } else {
12252 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12253 }
12254 } else {
12255 if (size == 0) {
12256 gen_helper_neon_cls_s8(tcg_res, tcg_op);
12257 } else {
12258 gen_helper_neon_cls_s16(tcg_res, tcg_op);
12259 }
12260 }
12261 break;
12262 default:
12263 g_assert_not_reached();
12264 }
12265 }
12266
12267 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12268
12269 tcg_temp_free_i32(tcg_res);
12270 tcg_temp_free_i32(tcg_op);
12271 }
12272 }
12273 clear_vec_high(s, is_q, rd);
12274
12275 if (need_rmode) {
12276 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12277 tcg_temp_free_i32(tcg_rmode);
12278 }
12279 if (need_fpstatus) {
12280 tcg_temp_free_ptr(tcg_fpstatus);
12281 }
12282}
12283
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12299{
12300 int fpop, opcode, a, u;
12301 int rn, rd;
12302 bool is_q;
12303 bool is_scalar;
12304 bool only_in_vector = false;
12305
12306 int pass;
12307 TCGv_i32 tcg_rmode = NULL;
12308 TCGv_ptr tcg_fpstatus = NULL;
12309 bool need_rmode = false;
12310 bool need_fpst = true;
12311 int rmode;
12312
12313 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
12314 unallocated_encoding(s);
12315 return;
12316 }
12317
12318 rd = extract32(insn, 0, 5);
12319 rn = extract32(insn, 5, 5);
12320
12321 a = extract32(insn, 23, 1);
12322 u = extract32(insn, 29, 1);
12323 is_scalar = extract32(insn, 28, 1);
12324 is_q = extract32(insn, 30, 1);
12325
12326 opcode = extract32(insn, 12, 5);
12327 fpop = deposit32(opcode, 5, 1, a);
12328 fpop = deposit32(fpop, 6, 1, u);
12329
12330 rd = extract32(insn, 0, 5);
12331 rn = extract32(insn, 5, 5);
12332
12333 switch (fpop) {
12334 case 0x1d:
12335 case 0x5d:
12336 {
12337 int elements;
12338
12339 if (is_scalar) {
12340 elements = 1;
12341 } else {
12342 elements = (is_q ? 8 : 4);
12343 }
12344
12345 if (!fp_access_check(s)) {
12346 return;
12347 }
12348 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12349 return;
12350 }
12351 break;
12352 case 0x2c:
12353 case 0x2d:
12354 case 0x2e:
12355 case 0x6c:
12356 case 0x6d:
12357 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12358 return;
12359 case 0x3d:
12360 case 0x3f:
12361 break;
12362 case 0x18:
12363 need_rmode = true;
12364 only_in_vector = true;
12365 rmode = FPROUNDING_TIEEVEN;
12366 break;
12367 case 0x19:
12368 need_rmode = true;
12369 only_in_vector = true;
12370 rmode = FPROUNDING_NEGINF;
12371 break;
12372 case 0x38:
12373 need_rmode = true;
12374 only_in_vector = true;
12375 rmode = FPROUNDING_POSINF;
12376 break;
12377 case 0x39:
12378 need_rmode = true;
12379 only_in_vector = true;
12380 rmode = FPROUNDING_ZERO;
12381 break;
12382 case 0x58:
12383 need_rmode = true;
12384 only_in_vector = true;
12385 rmode = FPROUNDING_TIEAWAY;
12386 break;
12387 case 0x59:
12388 case 0x79:
12389 only_in_vector = true;
12390
12391 break;
12392 case 0x1a:
12393 need_rmode = true;
12394 rmode = FPROUNDING_TIEEVEN;
12395 break;
12396 case 0x1b:
12397 need_rmode = true;
12398 rmode = FPROUNDING_NEGINF;
12399 break;
12400 case 0x1c:
12401 need_rmode = true;
12402 rmode = FPROUNDING_TIEAWAY;
12403 break;
12404 case 0x3a:
12405 need_rmode = true;
12406 rmode = FPROUNDING_POSINF;
12407 break;
12408 case 0x3b:
12409 need_rmode = true;
12410 rmode = FPROUNDING_ZERO;
12411 break;
12412 case 0x5a:
12413 need_rmode = true;
12414 rmode = FPROUNDING_TIEEVEN;
12415 break;
12416 case 0x5b:
12417 need_rmode = true;
12418 rmode = FPROUNDING_NEGINF;
12419 break;
12420 case 0x5c:
12421 need_rmode = true;
12422 rmode = FPROUNDING_TIEAWAY;
12423 break;
12424 case 0x7a:
12425 need_rmode = true;
12426 rmode = FPROUNDING_POSINF;
12427 break;
12428 case 0x7b:
12429 need_rmode = true;
12430 rmode = FPROUNDING_ZERO;
12431 break;
12432 case 0x2f:
12433 case 0x6f:
12434 need_fpst = false;
12435 break;
12436 case 0x7d:
12437 case 0x7f:
12438 break;
12439 default:
12440 fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop);
12441 g_assert_not_reached();
12442 }
12443
12444
12445
12446 if (is_scalar) {
12447 if (!is_q) {
12448 unallocated_encoding(s);
12449 return;
12450 }
12451
12452 if (only_in_vector) {
12453 unallocated_encoding(s);
12454 return;
12455 }
12456 }
12457
12458 if (!fp_access_check(s)) {
12459 return;
12460 }
12461
12462 if (need_rmode || need_fpst) {
12463 tcg_fpstatus = get_fpstatus_ptr(true);
12464 }
12465
12466 if (need_rmode) {
12467 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12468 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12469 }
12470
12471 if (is_scalar) {
12472 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12473 TCGv_i32 tcg_res = tcg_temp_new_i32();
12474
12475 switch (fpop) {
12476 case 0x1a:
12477 case 0x1b:
12478 case 0x1c:
12479 case 0x3a:
12480 case 0x3b:
12481 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12482 break;
12483 case 0x3d:
12484 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12485 break;
12486 case 0x3f:
12487 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12488 break;
12489 case 0x5a:
12490 case 0x5b:
12491 case 0x5c:
12492 case 0x7a:
12493 case 0x7b:
12494 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12495 break;
12496 case 0x6f:
12497 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12498 break;
12499 case 0x7d:
12500 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12501 break;
12502 default:
12503 g_assert_not_reached();
12504 }
12505
12506
12507 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12508 write_fp_sreg(s, rd, tcg_res);
12509
12510 tcg_temp_free_i32(tcg_res);
12511 tcg_temp_free_i32(tcg_op);
12512 } else {
12513 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12514 TCGv_i32 tcg_op = tcg_temp_new_i32();
12515 TCGv_i32 tcg_res = tcg_temp_new_i32();
12516
12517 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12518
12519 switch (fpop) {
12520 case 0x1a:
12521 case 0x1b:
12522 case 0x1c:
12523 case 0x3a:
12524 case 0x3b:
12525 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12526 break;
12527 case 0x3d:
12528 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12529 break;
12530 case 0x5a:
12531 case 0x5b:
12532 case 0x5c:
12533 case 0x7a:
12534 case 0x7b:
12535 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12536 break;
12537 case 0x18:
12538 case 0x19:
12539 case 0x38:
12540 case 0x39:
12541 case 0x58:
12542 case 0x79:
12543 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12544 break;
12545 case 0x59:
12546 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12547 break;
12548 case 0x2f:
12549 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12550 break;
12551 case 0x6f:
12552 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12553 break;
12554 case 0x7d:
12555 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12556 break;
12557 case 0x7f:
12558 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12559 break;
12560 default:
12561 g_assert_not_reached();
12562 }
12563
12564 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12565
12566 tcg_temp_free_i32(tcg_res);
12567 tcg_temp_free_i32(tcg_op);
12568 }
12569
12570 clear_vec_high(s, is_q, rd);
12571 }
12572
12573 if (tcg_rmode) {
12574 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12575 tcg_temp_free_i32(tcg_rmode);
12576 }
12577
12578 if (tcg_fpstatus) {
12579 tcg_temp_free_ptr(tcg_fpstatus);
12580 }
12581}
12582
12583
12584
12585
12586
12587
12588
12589
12590
12591
12592
12593
12594static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12595{
12596
12597
12598
12599
12600
12601
12602
12603 bool is_scalar = extract32(insn, 28, 1);
12604 bool is_q = extract32(insn, 30, 1);
12605 bool u = extract32(insn, 29, 1);
12606 int size = extract32(insn, 22, 2);
12607 int l = extract32(insn, 21, 1);
12608 int m = extract32(insn, 20, 1);
12609
12610 int rm = extract32(insn, 16, 4);
12611 int opcode = extract32(insn, 12, 4);
12612 int h = extract32(insn, 11, 1);
12613 int rn = extract32(insn, 5, 5);
12614 int rd = extract32(insn, 0, 5);
12615 bool is_long = false;
12616 int is_fp = 0;
12617 bool is_fp16 = false;
12618 int index;
12619 TCGv_ptr fpst;
12620
12621 switch (16 * u + opcode) {
12622 case 0x08:
12623 case 0x10:
12624 case 0x14:
12625 if (is_scalar) {
12626 unallocated_encoding(s);
12627 return;
12628 }
12629 break;
12630 case 0x02:
12631 case 0x12:
12632 case 0x06:
12633 case 0x16:
12634 case 0x0a:
12635 case 0x1a:
12636 if (is_scalar) {
12637 unallocated_encoding(s);
12638 return;
12639 }
12640 is_long = true;
12641 break;
12642 case 0x03:
12643 case 0x07:
12644 case 0x0b:
12645 is_long = true;
12646 break;
12647 case 0x0c:
12648 case 0x0d:
12649 break;
12650 case 0x01:
12651 case 0x05:
12652 case 0x09:
12653 case 0x19:
12654 is_fp = 1;
12655 break;
12656 case 0x1d:
12657 case 0x1f:
12658 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
12659 unallocated_encoding(s);
12660 return;
12661 }
12662 break;
12663 case 0x0e:
12664 case 0x1e:
12665 if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
12666 unallocated_encoding(s);
12667 return;
12668 }
12669 break;
12670 case 0x11:
12671 case 0x13:
12672 case 0x15:
12673 case 0x17:
12674 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
12675 unallocated_encoding(s);
12676 return;
12677 }
12678 is_fp = 2;
12679 break;
12680 default:
12681 unallocated_encoding(s);
12682 return;
12683 }
12684
12685 switch (is_fp) {
12686 case 1:
12687
12688 switch (size) {
12689 case 0:
12690 size = MO_16;
12691 is_fp16 = true;
12692 break;
12693 case MO_32:
12694 case MO_64:
12695 break;
12696 default:
12697 unallocated_encoding(s);
12698 return;
12699 }
12700 break;
12701
12702 case 2:
12703
12704 size <<= 1;
12705 switch (size) {
12706 case MO_32:
12707 if (h && !is_q) {
12708 unallocated_encoding(s);
12709 return;
12710 }
12711 is_fp16 = true;
12712 break;
12713 case MO_64:
12714 break;
12715 default:
12716 unallocated_encoding(s);
12717 return;
12718 }
12719 break;
12720
12721 default:
12722 switch (size) {
12723 case MO_8:
12724 case MO_64:
12725 unallocated_encoding(s);
12726 return;
12727 }
12728 break;
12729 }
12730 if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
12731 unallocated_encoding(s);
12732 return;
12733 }
12734
12735
12736 switch (size) {
12737 case MO_16:
12738 index = h << 2 | l << 1 | m;
12739 break;
12740 case MO_32:
12741 index = h << 1 | l;
12742 rm |= m << 4;
12743 break;
12744 case MO_64:
12745 if (l || !is_q) {
12746 unallocated_encoding(s);
12747 return;
12748 }
12749 index = h;
12750 rm |= m << 4;
12751 break;
12752 default:
12753 g_assert_not_reached();
12754 }
12755
12756 if (!fp_access_check(s)) {
12757 return;
12758 }
12759
12760 if (is_fp) {
12761 fpst = get_fpstatus_ptr(is_fp16);
12762 } else {
12763 fpst = NULL;
12764 }
12765
12766 switch (16 * u + opcode) {
12767 case 0x0e:
12768 case 0x1e:
12769 gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
12770 u ? gen_helper_gvec_udot_idx_b
12771 : gen_helper_gvec_sdot_idx_b);
12772 return;
12773 case 0x11:
12774 case 0x13:
12775 case 0x15:
12776 case 0x17:
12777 {
12778 int rot = extract32(insn, 13, 2);
12779 int data = (index << 2) | rot;
12780 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12781 vec_full_reg_offset(s, rn),
12782 vec_full_reg_offset(s, rm), fpst,
12783 is_q ? 16 : 8, vec_full_reg_size(s), data,
12784 size == MO_64
12785 ? gen_helper_gvec_fcmlas_idx
12786 : gen_helper_gvec_fcmlah_idx);
12787 tcg_temp_free_ptr(fpst);
12788 }
12789 return;
12790 }
12791
12792 if (size == 3) {
12793 TCGv_i64 tcg_idx = tcg_temp_new_i64();
12794 int pass;
12795
12796 assert(is_fp && is_q && !is_long);
12797
12798 read_vec_element(s, tcg_idx, rm, index, MO_64);
12799
12800 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12801 TCGv_i64 tcg_op = tcg_temp_new_i64();
12802 TCGv_i64 tcg_res = tcg_temp_new_i64();
12803
12804 read_vec_element(s, tcg_op, rn, pass, MO_64);
12805
12806 switch (16 * u + opcode) {
12807 case 0x05:
12808
12809 gen_helper_vfp_negd(tcg_op, tcg_op);
12810
12811 case 0x01:
12812 read_vec_element(s, tcg_res, rd, pass, MO_64);
12813 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
12814 break;
12815 case 0x09:
12816 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
12817 break;
12818 case 0x19:
12819 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
12820 break;
12821 default:
12822 g_assert_not_reached();
12823 }
12824
12825 write_vec_element(s, tcg_res, rd, pass, MO_64);
12826 tcg_temp_free_i64(tcg_op);
12827 tcg_temp_free_i64(tcg_res);
12828 }
12829
12830 tcg_temp_free_i64(tcg_idx);
12831 clear_vec_high(s, !is_scalar, rd);
12832 } else if (!is_long) {
12833
12834
12835
12836
12837 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12838 int pass, maxpasses;
12839
12840 if (is_scalar) {
12841 maxpasses = 1;
12842 } else {
12843 maxpasses = is_q ? 4 : 2;
12844 }
12845
12846 read_vec_element_i32(s, tcg_idx, rm, index, size);
12847
12848 if (size == 1 && !is_scalar) {
12849
12850
12851
12852
12853 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12854 }
12855
12856 for (pass = 0; pass < maxpasses; pass++) {
12857 TCGv_i32 tcg_op = tcg_temp_new_i32();
12858 TCGv_i32 tcg_res = tcg_temp_new_i32();
12859
12860 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
12861
12862 switch (16 * u + opcode) {
12863 case 0x08:
12864 case 0x10:
12865 case 0x14:
12866 {
12867 static NeonGenTwoOpFn * const fns[2][2] = {
12868 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
12869 { tcg_gen_add_i32, tcg_gen_sub_i32 },
12870 };
12871 NeonGenTwoOpFn *genfn;
12872 bool is_sub = opcode == 0x4;
12873
12874 if (size == 1) {
12875 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
12876 } else {
12877 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
12878 }
12879 if (opcode == 0x8) {
12880 break;
12881 }
12882 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
12883 genfn = fns[size - 1][is_sub];
12884 genfn(tcg_res, tcg_op, tcg_res);
12885 break;
12886 }
12887 case 0x05:
12888 case 0x01:
12889 read_vec_element_i32(s, tcg_res, rd, pass,
12890 is_scalar ? size : MO_32);
12891 switch (size) {
12892 case 1:
12893 if (opcode == 0x5) {
12894
12895
12896 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
12897 }
12898 if (is_scalar) {
12899 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
12900 tcg_res, fpst);
12901 } else {
12902 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
12903 tcg_res, fpst);
12904 }
12905 break;
12906 case 2:
12907 if (opcode == 0x5) {
12908
12909
12910 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
12911 }
12912 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
12913 tcg_res, fpst);
12914 break;
12915 default:
12916 g_assert_not_reached();
12917 }
12918 break;
12919 case 0x09:
12920 switch (size) {
12921 case 1:
12922 if (is_scalar) {
12923 gen_helper_advsimd_mulh(tcg_res, tcg_op,
12924 tcg_idx, fpst);
12925 } else {
12926 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
12927 tcg_idx, fpst);
12928 }
12929 break;
12930 case 2:
12931 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
12932 break;
12933 default:
12934 g_assert_not_reached();
12935 }
12936 break;
12937 case 0x19:
12938 switch (size) {
12939 case 1:
12940 if (is_scalar) {
12941 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
12942 tcg_idx, fpst);
12943 } else {
12944 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
12945 tcg_idx, fpst);
12946 }
12947 break;
12948 case 2:
12949 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
12950 break;
12951 default:
12952 g_assert_not_reached();
12953 }
12954 break;
12955 case 0x0c:
12956 if (size == 1) {
12957 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
12958 tcg_op, tcg_idx);
12959 } else {
12960 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
12961 tcg_op, tcg_idx);
12962 }
12963 break;
12964 case 0x0d:
12965 if (size == 1) {
12966 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
12967 tcg_op, tcg_idx);
12968 } else {
12969 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
12970 tcg_op, tcg_idx);
12971 }
12972 break;
12973 case 0x1d:
12974 read_vec_element_i32(s, tcg_res, rd, pass,
12975 is_scalar ? size : MO_32);
12976 if (size == 1) {
12977 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
12978 tcg_op, tcg_idx, tcg_res);
12979 } else {
12980 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
12981 tcg_op, tcg_idx, tcg_res);
12982 }
12983 break;
12984 case 0x1f:
12985 read_vec_element_i32(s, tcg_res, rd, pass,
12986 is_scalar ? size : MO_32);
12987 if (size == 1) {
12988 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
12989 tcg_op, tcg_idx, tcg_res);
12990 } else {
12991 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
12992 tcg_op, tcg_idx, tcg_res);
12993 }
12994 break;
12995 default:
12996 g_assert_not_reached();
12997 }
12998
12999 if (is_scalar) {
13000 write_fp_sreg(s, rd, tcg_res);
13001 } else {
13002 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13003 }
13004
13005 tcg_temp_free_i32(tcg_op);
13006 tcg_temp_free_i32(tcg_res);
13007 }
13008
13009 tcg_temp_free_i32(tcg_idx);
13010 clear_vec_high(s, is_q, rd);
13011 } else {
13012
13013 TCGv_i64 tcg_res[2];
13014 int pass;
13015 bool satop = extract32(opcode, 0, 1);
13016 TCGMemOp memop = MO_32;
13017
13018 if (satop || !u) {
13019 memop |= MO_SIGN;
13020 }
13021
13022 if (size == 2) {
13023 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13024
13025 read_vec_element(s, tcg_idx, rm, index, memop);
13026
13027 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13028 TCGv_i64 tcg_op = tcg_temp_new_i64();
13029 TCGv_i64 tcg_passres;
13030 int passelt;
13031
13032 if (is_scalar) {
13033 passelt = 0;
13034 } else {
13035 passelt = pass + (is_q * 2);
13036 }
13037
13038 read_vec_element(s, tcg_op, rn, passelt, memop);
13039
13040 tcg_res[pass] = tcg_temp_new_i64();
13041
13042 if (opcode == 0xa || opcode == 0xb) {
13043
13044 tcg_passres = tcg_res[pass];
13045 } else {
13046 tcg_passres = tcg_temp_new_i64();
13047 }
13048
13049 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13050 tcg_temp_free_i64(tcg_op);
13051
13052 if (satop) {
13053
13054 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13055 tcg_passres, tcg_passres);
13056 }
13057
13058 if (opcode == 0xa || opcode == 0xb) {
13059 continue;
13060 }
13061
13062
13063 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13064
13065 switch (opcode) {
13066 case 0x2:
13067 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13068 break;
13069 case 0x6:
13070 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13071 break;
13072 case 0x7:
13073 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13074
13075 case 0x3:
13076 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13077 tcg_res[pass],
13078 tcg_passres);
13079 break;
13080 default:
13081 g_assert_not_reached();
13082 }
13083 tcg_temp_free_i64(tcg_passres);
13084 }
13085 tcg_temp_free_i64(tcg_idx);
13086
13087 clear_vec_high(s, !is_scalar, rd);
13088 } else {
13089 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13090
13091 assert(size == 1);
13092 read_vec_element_i32(s, tcg_idx, rm, index, size);
13093
13094 if (!is_scalar) {
13095
13096
13097
13098
13099 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13100 }
13101
13102 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13103 TCGv_i32 tcg_op = tcg_temp_new_i32();
13104 TCGv_i64 tcg_passres;
13105
13106 if (is_scalar) {
13107 read_vec_element_i32(s, tcg_op, rn, pass, size);
13108 } else {
13109 read_vec_element_i32(s, tcg_op, rn,
13110 pass + (is_q * 2), MO_32);
13111 }
13112
13113 tcg_res[pass] = tcg_temp_new_i64();
13114
13115 if (opcode == 0xa || opcode == 0xb) {
13116
13117 tcg_passres = tcg_res[pass];
13118 } else {
13119 tcg_passres = tcg_temp_new_i64();
13120 }
13121
13122 if (memop & MO_SIGN) {
13123 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13124 } else {
13125 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13126 }
13127 if (satop) {
13128 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13129 tcg_passres, tcg_passres);
13130 }
13131 tcg_temp_free_i32(tcg_op);
13132
13133 if (opcode == 0xa || opcode == 0xb) {
13134 continue;
13135 }
13136
13137
13138 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13139
13140 switch (opcode) {
13141 case 0x2:
13142 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13143 tcg_passres);
13144 break;
13145 case 0x6:
13146 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13147 tcg_passres);
13148 break;
13149 case 0x7:
13150 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13151
13152 case 0x3:
13153 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13154 tcg_res[pass],
13155 tcg_passres);
13156 break;
13157 default:
13158 g_assert_not_reached();
13159 }
13160 tcg_temp_free_i64(tcg_passres);
13161 }
13162 tcg_temp_free_i32(tcg_idx);
13163
13164 if (is_scalar) {
13165 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13166 }
13167 }
13168
13169 if (is_scalar) {
13170 tcg_res[1] = tcg_const_i64(0);
13171 }
13172
13173 for (pass = 0; pass < 2; pass++) {
13174 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13175 tcg_temp_free_i64(tcg_res[pass]);
13176 }
13177 }
13178
13179 if (fpst) {
13180 tcg_temp_free_ptr(fpst);
13181 }
13182}
13183
13184
13185
13186
13187
13188
13189
13190static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13191{
13192 int size = extract32(insn, 22, 2);
13193 int opcode = extract32(insn, 12, 5);
13194 int rn = extract32(insn, 5, 5);
13195 int rd = extract32(insn, 0, 5);
13196 int decrypt;
13197 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13198 TCGv_i32 tcg_decrypt;
13199 CryptoThreeOpIntFn *genfn;
13200
13201 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
13202 || size != 0) {
13203 unallocated_encoding(s);
13204 return;
13205 }
13206
13207 switch (opcode) {
13208 case 0x4:
13209 decrypt = 0;
13210 genfn = gen_helper_crypto_aese;
13211 break;
13212 case 0x6:
13213 decrypt = 0;
13214 genfn = gen_helper_crypto_aesmc;
13215 break;
13216 case 0x5:
13217 decrypt = 1;
13218 genfn = gen_helper_crypto_aese;
13219 break;
13220 case 0x7:
13221 decrypt = 1;
13222 genfn = gen_helper_crypto_aesmc;
13223 break;
13224 default:
13225 unallocated_encoding(s);
13226 return;
13227 }
13228
13229 if (!fp_access_check(s)) {
13230 return;
13231 }
13232
13233 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13234 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13235 tcg_decrypt = tcg_const_i32(decrypt);
13236
13237 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt);
13238
13239 tcg_temp_free_ptr(tcg_rd_ptr);
13240 tcg_temp_free_ptr(tcg_rn_ptr);
13241 tcg_temp_free_i32(tcg_decrypt);
13242}
13243
13244
13245
13246
13247
13248
13249
13250static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13251{
13252 int size = extract32(insn, 22, 2);
13253 int opcode = extract32(insn, 12, 3);
13254 int rm = extract32(insn, 16, 5);
13255 int rn = extract32(insn, 5, 5);
13256 int rd = extract32(insn, 0, 5);
13257 CryptoThreeOpFn *genfn;
13258 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13259 int feature = ARM_FEATURE_V8_SHA256;
13260
13261 if (size != 0) {
13262 unallocated_encoding(s);
13263 return;
13264 }
13265
13266 switch (opcode) {
13267 case 0:
13268 case 1:
13269 case 2:
13270 case 3:
13271 genfn = NULL;
13272 feature = ARM_FEATURE_V8_SHA1;
13273 break;
13274 case 4:
13275 genfn = gen_helper_crypto_sha256h;
13276 break;
13277 case 5:
13278 genfn = gen_helper_crypto_sha256h2;
13279 break;
13280 case 6:
13281 genfn = gen_helper_crypto_sha256su1;
13282 break;
13283 default:
13284 unallocated_encoding(s);
13285 return;
13286 }
13287
13288 if (!arm_dc_feature(s, feature)) {
13289 unallocated_encoding(s);
13290 return;
13291 }
13292
13293 if (!fp_access_check(s)) {
13294 return;
13295 }
13296
13297 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13298 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13299 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13300
13301 if (genfn) {
13302 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
13303 } else {
13304 TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
13305
13306 gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
13307 tcg_rm_ptr, tcg_opcode);
13308 tcg_temp_free_i32(tcg_opcode);
13309 }
13310
13311 tcg_temp_free_ptr(tcg_rd_ptr);
13312 tcg_temp_free_ptr(tcg_rn_ptr);
13313 tcg_temp_free_ptr(tcg_rm_ptr);
13314}
13315
13316
13317
13318
13319
13320
13321
13322static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
13323{
13324 int size = extract32(insn, 22, 2);
13325 int opcode = extract32(insn, 12, 5);
13326 int rn = extract32(insn, 5, 5);
13327 int rd = extract32(insn, 0, 5);
13328 CryptoTwoOpFn *genfn;
13329 int feature;
13330 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13331
13332 if (size != 0) {
13333 unallocated_encoding(s);
13334 return;
13335 }
13336
13337 switch (opcode) {
13338 case 0:
13339 feature = ARM_FEATURE_V8_SHA1;
13340 genfn = gen_helper_crypto_sha1h;
13341 break;
13342 case 1:
13343 feature = ARM_FEATURE_V8_SHA1;
13344 genfn = gen_helper_crypto_sha1su1;
13345 break;
13346 case 2:
13347 feature = ARM_FEATURE_V8_SHA256;
13348 genfn = gen_helper_crypto_sha256su0;
13349 break;
13350 default:
13351 unallocated_encoding(s);
13352 return;
13353 }
13354
13355 if (!arm_dc_feature(s, feature)) {
13356 unallocated_encoding(s);
13357 return;
13358 }
13359
13360 if (!fp_access_check(s)) {
13361 return;
13362 }
13363
13364 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13365 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13366
13367 genfn(tcg_rd_ptr, tcg_rn_ptr);
13368
13369 tcg_temp_free_ptr(tcg_rd_ptr);
13370 tcg_temp_free_ptr(tcg_rn_ptr);
13371}
13372
13373
13374
13375
13376
13377
13378
13379static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13380{
13381 int opcode = extract32(insn, 10, 2);
13382 int o = extract32(insn, 14, 1);
13383 int rm = extract32(insn, 16, 5);
13384 int rn = extract32(insn, 5, 5);
13385 int rd = extract32(insn, 0, 5);
13386 int feature;
13387 CryptoThreeOpFn *genfn;
13388
13389 if (o == 0) {
13390 switch (opcode) {
13391 case 0:
13392 feature = ARM_FEATURE_V8_SHA512;
13393 genfn = gen_helper_crypto_sha512h;
13394 break;
13395 case 1:
13396 feature = ARM_FEATURE_V8_SHA512;
13397 genfn = gen_helper_crypto_sha512h2;
13398 break;
13399 case 2:
13400 feature = ARM_FEATURE_V8_SHA512;
13401 genfn = gen_helper_crypto_sha512su1;
13402 break;
13403 case 3:
13404 feature = ARM_FEATURE_V8_SHA3;
13405 genfn = NULL;
13406 break;
13407 }
13408 } else {
13409 switch (opcode) {
13410 case 0:
13411 feature = ARM_FEATURE_V8_SM3;
13412 genfn = gen_helper_crypto_sm3partw1;
13413 break;
13414 case 1:
13415 feature = ARM_FEATURE_V8_SM3;
13416 genfn = gen_helper_crypto_sm3partw2;
13417 break;
13418 case 2:
13419 feature = ARM_FEATURE_V8_SM4;
13420 genfn = gen_helper_crypto_sm4ekey;
13421 break;
13422 default:
13423 unallocated_encoding(s);
13424 return;
13425 }
13426 }
13427
13428 if (!arm_dc_feature(s, feature)) {
13429 unallocated_encoding(s);
13430 return;
13431 }
13432
13433 if (!fp_access_check(s)) {
13434 return;
13435 }
13436
13437 if (genfn) {
13438 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13439
13440 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13441 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13442 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13443
13444 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
13445
13446 tcg_temp_free_ptr(tcg_rd_ptr);
13447 tcg_temp_free_ptr(tcg_rn_ptr);
13448 tcg_temp_free_ptr(tcg_rm_ptr);
13449 } else {
13450 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13451 int pass;
13452
13453 tcg_op1 = tcg_temp_new_i64();
13454 tcg_op2 = tcg_temp_new_i64();
13455 tcg_res[0] = tcg_temp_new_i64();
13456 tcg_res[1] = tcg_temp_new_i64();
13457
13458 for (pass = 0; pass < 2; pass++) {
13459 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13460 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13461
13462 tcg_gen_rotli_i64(tcg_res[pass], tcg_op2, 1);
13463 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13464 }
13465 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13466 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13467
13468 tcg_temp_free_i64(tcg_op1);
13469 tcg_temp_free_i64(tcg_op2);
13470 tcg_temp_free_i64(tcg_res[0]);
13471 tcg_temp_free_i64(tcg_res[1]);
13472 }
13473}
13474
13475
13476
13477
13478
13479
13480
13481static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13482{
13483 int opcode = extract32(insn, 10, 2);
13484 int rn = extract32(insn, 5, 5);
13485 int rd = extract32(insn, 0, 5);
13486 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13487 int feature;
13488 CryptoTwoOpFn *genfn;
13489
13490 switch (opcode) {
13491 case 0:
13492 feature = ARM_FEATURE_V8_SHA512;
13493 genfn = gen_helper_crypto_sha512su0;
13494 break;
13495 case 1:
13496 feature = ARM_FEATURE_V8_SM4;
13497 genfn = gen_helper_crypto_sm4e;
13498 break;
13499 default:
13500 unallocated_encoding(s);
13501 return;
13502 }
13503
13504 if (!arm_dc_feature(s, feature)) {
13505 unallocated_encoding(s);
13506 return;
13507 }
13508
13509 if (!fp_access_check(s)) {
13510 return;
13511 }
13512
13513 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13514 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13515
13516 genfn(tcg_rd_ptr, tcg_rn_ptr);
13517
13518 tcg_temp_free_ptr(tcg_rd_ptr);
13519 tcg_temp_free_ptr(tcg_rn_ptr);
13520}
13521
13522
13523
13524
13525
13526
13527
13528static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13529{
13530 int op0 = extract32(insn, 21, 2);
13531 int rm = extract32(insn, 16, 5);
13532 int ra = extract32(insn, 10, 5);
13533 int rn = extract32(insn, 5, 5);
13534 int rd = extract32(insn, 0, 5);
13535 int feature;
13536
13537 switch (op0) {
13538 case 0:
13539 case 1:
13540 feature = ARM_FEATURE_V8_SHA3;
13541 break;
13542 case 2:
13543 feature = ARM_FEATURE_V8_SM3;
13544 break;
13545 default:
13546 unallocated_encoding(s);
13547 return;
13548 }
13549
13550 if (!arm_dc_feature(s, feature)) {
13551 unallocated_encoding(s);
13552 return;
13553 }
13554
13555 if (!fp_access_check(s)) {
13556 return;
13557 }
13558
13559 if (op0 < 2) {
13560 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13561 int pass;
13562
13563 tcg_op1 = tcg_temp_new_i64();
13564 tcg_op2 = tcg_temp_new_i64();
13565 tcg_op3 = tcg_temp_new_i64();
13566 tcg_res[0] = tcg_temp_new_i64();
13567 tcg_res[1] = tcg_temp_new_i64();
13568
13569 for (pass = 0; pass < 2; pass++) {
13570 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13571 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13572 read_vec_element(s, tcg_op3, ra, pass, MO_64);
13573
13574 if (op0 == 0) {
13575
13576 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13577 } else {
13578
13579 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13580 }
13581 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13582 }
13583 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13584 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13585
13586 tcg_temp_free_i64(tcg_op1);
13587 tcg_temp_free_i64(tcg_op2);
13588 tcg_temp_free_i64(tcg_op3);
13589 tcg_temp_free_i64(tcg_res[0]);
13590 tcg_temp_free_i64(tcg_res[1]);
13591 } else {
13592 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13593
13594 tcg_op1 = tcg_temp_new_i32();
13595 tcg_op2 = tcg_temp_new_i32();
13596 tcg_op3 = tcg_temp_new_i32();
13597 tcg_res = tcg_temp_new_i32();
13598 tcg_zero = tcg_const_i32(0);
13599
13600 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13601 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13602 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13603
13604 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13605 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13606 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13607 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13608
13609 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13610 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13611 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13612 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13613
13614 tcg_temp_free_i32(tcg_op1);
13615 tcg_temp_free_i32(tcg_op2);
13616 tcg_temp_free_i32(tcg_op3);
13617 tcg_temp_free_i32(tcg_res);
13618 tcg_temp_free_i32(tcg_zero);
13619 }
13620}
13621
13622
13623
13624
13625
13626
13627
13628static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13629{
13630 int rm = extract32(insn, 16, 5);
13631 int imm6 = extract32(insn, 10, 6);
13632 int rn = extract32(insn, 5, 5);
13633 int rd = extract32(insn, 0, 5);
13634 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13635 int pass;
13636
13637 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
13638 unallocated_encoding(s);
13639 return;
13640 }
13641
13642 if (!fp_access_check(s)) {
13643 return;
13644 }
13645
13646 tcg_op1 = tcg_temp_new_i64();
13647 tcg_op2 = tcg_temp_new_i64();
13648 tcg_res[0] = tcg_temp_new_i64();
13649 tcg_res[1] = tcg_temp_new_i64();
13650
13651 for (pass = 0; pass < 2; pass++) {
13652 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13653 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13654
13655 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
13656 tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
13657 }
13658 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13659 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13660
13661 tcg_temp_free_i64(tcg_op1);
13662 tcg_temp_free_i64(tcg_op2);
13663 tcg_temp_free_i64(tcg_res[0]);
13664 tcg_temp_free_i64(tcg_res[1]);
13665}
13666
13667
13668
13669
13670
13671
13672
13673static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13674{
13675 int opcode = extract32(insn, 10, 2);
13676 int imm2 = extract32(insn, 12, 2);
13677 int rm = extract32(insn, 16, 5);
13678 int rn = extract32(insn, 5, 5);
13679 int rd = extract32(insn, 0, 5);
13680 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13681 TCGv_i32 tcg_imm2, tcg_opcode;
13682
13683 if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
13684 unallocated_encoding(s);
13685 return;
13686 }
13687
13688 if (!fp_access_check(s)) {
13689 return;
13690 }
13691
13692 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13693 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13694 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13695 tcg_imm2 = tcg_const_i32(imm2);
13696 tcg_opcode = tcg_const_i32(opcode);
13697
13698 gen_helper_crypto_sm3tt(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2,
13699 tcg_opcode);
13700
13701 tcg_temp_free_ptr(tcg_rd_ptr);
13702 tcg_temp_free_ptr(tcg_rn_ptr);
13703 tcg_temp_free_ptr(tcg_rm_ptr);
13704 tcg_temp_free_i32(tcg_imm2);
13705 tcg_temp_free_i32(tcg_opcode);
13706}
13707
13708
13709
13710
13711
13712
13713static const AArch64DecodeTable data_proc_simd[] = {
13714
13715 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13716 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13717 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13718 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13719 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13720 { 0x0e000400, 0x9fe08400, disas_simd_copy },
13721 { 0x0f000000, 0x9f000400, disas_simd_indexed },
13722
13723 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13724 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13725 { 0x0e000000, 0xbf208c00, disas_simd_tb },
13726 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13727 { 0x2e000000, 0xbf208400, disas_simd_ext },
13728 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13729 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13730 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13731 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13732 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13733 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13734 { 0x5f000000, 0xdf000400, disas_simd_indexed },
13735 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13736 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13737 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13738 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13739 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13740 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13741 { 0xce000000, 0xff808000, disas_crypto_four_reg },
13742 { 0xce800000, 0xffe00000, disas_crypto_xar },
13743 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13744 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13745 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13746 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13747 { 0x00000000, 0x00000000, NULL }
13748};
13749
13750static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13751{
13752
13753
13754
13755
13756 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13757 if (fn) {
13758 fn(s, insn);
13759 } else {
13760 unallocated_encoding(s);
13761 }
13762}
13763
13764
13765static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13766{
13767 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13768 disas_data_proc_fp(s, insn);
13769 } else {
13770
13771 disas_data_proc_simd(s, insn);
13772 }
13773}
13774
13775
13776static void disas_a64_insn(CPUARMState *env, DisasContext *s)
13777{
13778 uint32_t insn;
13779
13780 insn = arm_ldl_code(env, s->pc, s->sctlr_b);
13781 s->insn = insn;
13782 s->pc += 4;
13783
13784 s->fp_access_checked = false;
13785
13786 switch (extract32(insn, 25, 4)) {
13787 case 0x0: case 0x1: case 0x3:
13788 unallocated_encoding(s);
13789 break;
13790 case 0x2:
13791 if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) {
13792 unallocated_encoding(s);
13793 }
13794 break;
13795 case 0x8: case 0x9:
13796 disas_data_proc_imm(s, insn);
13797 break;
13798 case 0xa: case 0xb:
13799 disas_b_exc_sys(s, insn);
13800 break;
13801 case 0x4:
13802 case 0x6:
13803 case 0xc:
13804 case 0xe:
13805 disas_ldst(s, insn);
13806 break;
13807 case 0x5:
13808 case 0xd:
13809 disas_data_proc_reg(s, insn);
13810 break;
13811 case 0x7:
13812 case 0xf:
13813 disas_data_proc_simd_fp(s, insn);
13814 break;
13815 default:
13816 assert(FALSE);
13817 break;
13818 }
13819
13820
13821 free_tmp_a64(s);
13822}
13823
13824static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
13825 CPUState *cpu)
13826{
13827 DisasContext *dc = container_of(dcbase, DisasContext, base);
13828 CPUARMState *env = cpu->env_ptr;
13829 ARMCPU *arm_cpu = arm_env_get_cpu(env);
13830 int bound;
13831
13832 dc->pc = dc->base.pc_first;
13833 dc->condjmp = 0;
13834
13835 dc->aarch64 = 1;
13836
13837
13838
13839 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13840 !arm_el_is_aa64(env, 3);
13841 dc->thumb = 0;
13842 dc->sctlr_b = 0;
13843 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
13844 dc->condexec_mask = 0;
13845 dc->condexec_cond = 0;
13846 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
13847 dc->tbi0 = ARM_TBFLAG_TBI0(dc->base.tb->flags);
13848 dc->tbi1 = ARM_TBFLAG_TBI1(dc->base.tb->flags);
13849 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
13850#if !defined(CONFIG_USER_ONLY)
13851 dc->user = (dc->current_el == 0);
13852#endif
13853 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
13854 dc->sve_excp_el = ARM_TBFLAG_SVEEXC_EL(dc->base.tb->flags);
13855 dc->sve_len = (ARM_TBFLAG_ZCR_LEN(dc->base.tb->flags) + 1) * 16;
13856 dc->vec_len = 0;
13857 dc->vec_stride = 0;
13858 dc->cp_regs = arm_cpu->cp_regs;
13859 dc->features = env->features;
13860
13861
13862
13863
13864
13865
13866
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
13877 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
13878 dc->is_ldex = false;
13879 dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
13880
13881
13882 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
13883
13884
13885 if (dc->ss_active) {
13886 bound = 1;
13887 }
13888 dc->base.max_insns = MIN(dc->base.max_insns, bound);
13889
13890 init_tmp_a64_array(dc);
13891}
13892
13893static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
13894{
13895 tcg_clear_temp_count();
13896}
13897
13898static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13899{
13900 DisasContext *dc = container_of(dcbase, DisasContext, base);
13901
13902 tcg_gen_insn_start(dc->pc, 0, 0);
13903 dc->insn_start = tcg_last_op();
13904}
13905
13906static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13907 const CPUBreakpoint *bp)
13908{
13909 DisasContext *dc = container_of(dcbase, DisasContext, base);
13910
13911 if (bp->flags & BP_CPU) {
13912 gen_a64_set_pc_im(dc->pc);
13913 gen_helper_check_breakpoints(cpu_env);
13914
13915 dc->base.is_jmp = DISAS_TOO_MANY;
13916 } else {
13917 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13918
13919
13920
13921
13922
13923 dc->pc += 4;
13924 dc->base.is_jmp = DISAS_NORETURN;
13925 }
13926
13927 return true;
13928}
13929
13930static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13931{
13932 DisasContext *dc = container_of(dcbase, DisasContext, base);
13933 CPUARMState *env = cpu->env_ptr;
13934
13935 if (dc->ss_active && !dc->pstate_ss) {
13936
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946 assert(dc->base.num_insns == 1);
13947 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13948 default_exception_el(dc));
13949 dc->base.is_jmp = DISAS_NORETURN;
13950 } else {
13951 disas_a64_insn(env, dc);
13952 }
13953
13954 dc->base.pc_next = dc->pc;
13955 translator_loop_temp_check(&dc->base);
13956}
13957
13958static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13959{
13960 DisasContext *dc = container_of(dcbase, DisasContext, base);
13961
13962 if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
13963
13964
13965
13966
13967
13968 switch (dc->base.is_jmp) {
13969 default:
13970 gen_a64_set_pc_im(dc->pc);
13971
13972 case DISAS_EXIT:
13973 case DISAS_JUMP:
13974 if (dc->base.singlestep_enabled) {
13975 gen_exception_internal(EXCP_DEBUG);
13976 } else {
13977 gen_step_complete_exception(dc);
13978 }
13979 break;
13980 case DISAS_NORETURN:
13981 break;
13982 }
13983 } else {
13984 switch (dc->base.is_jmp) {
13985 case DISAS_NEXT:
13986 case DISAS_TOO_MANY:
13987 gen_goto_tb(dc, 1, dc->pc);
13988 break;
13989 default:
13990 case DISAS_UPDATE:
13991 gen_a64_set_pc_im(dc->pc);
13992
13993 case DISAS_EXIT:
13994 tcg_gen_exit_tb(NULL, 0);
13995 break;
13996 case DISAS_JUMP:
13997 tcg_gen_lookup_and_goto_ptr();
13998 break;
13999 case DISAS_NORETURN:
14000 case DISAS_SWI:
14001 break;
14002 case DISAS_WFE:
14003 gen_a64_set_pc_im(dc->pc);
14004 gen_helper_wfe(cpu_env);
14005 break;
14006 case DISAS_YIELD:
14007 gen_a64_set_pc_im(dc->pc);
14008 gen_helper_yield(cpu_env);
14009 break;
14010 case DISAS_WFI:
14011 {
14012
14013
14014
14015 TCGv_i32 tmp = tcg_const_i32(4);
14016
14017 gen_a64_set_pc_im(dc->pc);
14018 gen_helper_wfi(cpu_env, tmp);
14019 tcg_temp_free_i32(tmp);
14020
14021
14022
14023 tcg_gen_exit_tb(NULL, 0);
14024 break;
14025 }
14026 }
14027 }
14028
14029
14030 dc->base.pc_next = dc->pc;
14031}
14032
14033static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14034 CPUState *cpu)
14035{
14036 DisasContext *dc = container_of(dcbase, DisasContext, base);
14037
14038 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
14039 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
14040}
14041
14042const TranslatorOps aarch64_translator_ops = {
14043 .init_disas_context = aarch64_tr_init_disas_context,
14044 .tb_start = aarch64_tr_tb_start,
14045 .insn_start = aarch64_tr_insn_start,
14046 .breakpoint_check = aarch64_tr_breakpoint_check,
14047 .translate_insn = aarch64_tr_translate_insn,
14048 .tb_stop = aarch64_tr_tb_stop,
14049 .disas_log = aarch64_tr_disas_log,
14050};
14051