1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "tcg-op.h"
24#include "tcg-op-gvec.h"
25#include "qemu/log.h"
26#include "arm_ldst.h"
27#include "translate.h"
28#include "internals.h"
29#include "qemu/host-utils.h"
30
31#include "exec/semihost.h"
32#include "exec/gen-icount.h"
33
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
36#include "exec/log.h"
37
38#include "trace-tcg.h"
39#include "translate-a64.h"
40#include "qemu/atomic128.h"
41
42static TCGv_i64 cpu_X[32];
43static TCGv_i64 cpu_pc;
44
45
46static TCGv_i64 cpu_exclusive_high;
47
48static const char *regnames[] = {
49 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
50 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
51 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
52 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
53};
54
55enum a64_shift_type {
56 A64_SHIFT_TYPE_LSL = 0,
57 A64_SHIFT_TYPE_LSR = 1,
58 A64_SHIFT_TYPE_ASR = 2,
59 A64_SHIFT_TYPE_ROR = 3
60};
61
62
63
64
65typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
66
67typedef struct AArch64DecodeTable {
68 uint32_t pattern;
69 uint32_t mask;
70 AArch64DecodeFn *disas_fn;
71} AArch64DecodeTable;
72
73
74typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
75typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
76typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
77typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
78typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
79typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
80typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
81typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
82typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
83typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
84typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
85typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
86typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
87typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
88typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
89
90
91void a64_translate_init(void)
92{
93 int i;
94
95 cpu_pc = tcg_global_mem_new_i64(cpu_env,
96 offsetof(CPUARMState, pc),
97 "pc");
98 for (i = 0; i < 32; i++) {
99 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
100 offsetof(CPUARMState, xregs[i]),
101 regnames[i]);
102 }
103
104 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
105 offsetof(CPUARMState, exclusive_high), "exclusive_high");
106}
107
108static inline int get_a64_user_mem_index(DisasContext *s)
109{
110
111
112
113 ARMMMUIdx useridx;
114
115 switch (s->mmu_idx) {
116 case ARMMMUIdx_S12NSE1:
117 useridx = ARMMMUIdx_S12NSE0;
118 break;
119 case ARMMMUIdx_S1SE1:
120 useridx = ARMMMUIdx_S1SE0;
121 break;
122 case ARMMMUIdx_S2NS:
123 g_assert_not_reached();
124 default:
125 useridx = s->mmu_idx;
126 break;
127 }
128 return arm_to_core_mmu_idx(useridx);
129}
130
131void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
132 fprintf_function cpu_fprintf, int flags)
133{
134 ARMCPU *cpu = ARM_CPU(cs);
135 CPUARMState *env = &cpu->env;
136 uint32_t psr = pstate_read(env);
137 int i;
138 int el = arm_current_el(env);
139 const char *ns_status;
140
141 cpu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
142 for (i = 0; i < 32; i++) {
143 if (i == 31) {
144 cpu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
145 } else {
146 cpu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
147 (i + 2) % 3 ? " " : "\n");
148 }
149 }
150
151 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
152 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
153 } else {
154 ns_status = "";
155 }
156 cpu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
157 psr,
158 psr & PSTATE_N ? 'N' : '-',
159 psr & PSTATE_Z ? 'Z' : '-',
160 psr & PSTATE_C ? 'C' : '-',
161 psr & PSTATE_V ? 'V' : '-',
162 ns_status,
163 el,
164 psr & PSTATE_SP ? 'h' : 't');
165
166 if (!(flags & CPU_DUMP_FPU)) {
167 cpu_fprintf(f, "\n");
168 return;
169 }
170 if (fp_exception_el(env, el) != 0) {
171 cpu_fprintf(f, " FPU disabled\n");
172 return;
173 }
174 cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
175 vfp_get_fpcr(env), vfp_get_fpsr(env));
176
177 if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
178 int j, zcr_len = sve_zcr_len_for_el(env, el);
179
180 for (i = 0; i <= FFR_PRED_NUM; i++) {
181 bool eol;
182 if (i == FFR_PRED_NUM) {
183 cpu_fprintf(f, "FFR=");
184
185 eol = true;
186 } else {
187 cpu_fprintf(f, "P%02d=", i);
188 switch (zcr_len) {
189 case 0:
190 eol = i % 8 == 7;
191 break;
192 case 1:
193 eol = i % 6 == 5;
194 break;
195 case 2:
196 case 3:
197 eol = i % 3 == 2;
198 break;
199 default:
200
201 eol = true;
202 break;
203 }
204 }
205 for (j = zcr_len / 4; j >= 0; j--) {
206 int digits;
207 if (j * 4 + 4 <= zcr_len + 1) {
208 digits = 16;
209 } else {
210 digits = (zcr_len % 4 + 1) * 4;
211 }
212 cpu_fprintf(f, "%0*" PRIx64 "%s", digits,
213 env->vfp.pregs[i].p[j],
214 j ? ":" : eol ? "\n" : " ");
215 }
216 }
217
218 for (i = 0; i < 32; i++) {
219 if (zcr_len == 0) {
220 cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
221 i, env->vfp.zregs[i].d[1],
222 env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
223 } else if (zcr_len == 1) {
224 cpu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
225 ":%016" PRIx64 ":%016" PRIx64 "\n",
226 i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
227 env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
228 } else {
229 for (j = zcr_len; j >= 0; j--) {
230 bool odd = (zcr_len - j) % 2 != 0;
231 if (j == zcr_len) {
232 cpu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
233 } else if (!odd) {
234 if (j > 0) {
235 cpu_fprintf(f, " [%x-%x]=", j, j - 1);
236 } else {
237 cpu_fprintf(f, " [%x]=", j);
238 }
239 }
240 cpu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
241 env->vfp.zregs[i].d[j * 2 + 1],
242 env->vfp.zregs[i].d[j * 2],
243 odd || j == 0 ? "\n" : ":");
244 }
245 }
246 }
247 } else {
248 for (i = 0; i < 32; i++) {
249 uint64_t *q = aa64_vfp_qreg(env, i);
250 cpu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
251 i, q[1], q[0], (i & 1 ? "\n" : " "));
252 }
253 }
254}
255
256void gen_a64_set_pc_im(uint64_t val)
257{
258 tcg_gen_movi_i64(cpu_pc, val);
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
278{
279
280 if (s->current_el <= 1) {
281
282
283
284
285 if (s->tbi0 && s->tbi1) {
286 TCGv_i64 tmp_reg = tcg_temp_new_i64();
287
288
289
290 tcg_gen_shli_i64(tmp_reg, src, 8);
291 tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
292 tcg_temp_free_i64(tmp_reg);
293 } else if (!s->tbi0 && !s->tbi1) {
294
295 tcg_gen_mov_i64(cpu_pc, src);
296 } else {
297 TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
298 TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
299 TCGv_i64 tcg_zero = tcg_const_i64(0);
300
301 tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
302
303 if (s->tbi0) {
304
305 tcg_gen_andi_i64(tcg_tmpval, src,
306 0x00FFFFFFFFFFFFFFull);
307 tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
308 tcg_tmpval, src);
309 } else {
310
311 tcg_gen_ori_i64(tcg_tmpval, src,
312 0xFF00000000000000ull);
313 tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
314 tcg_tmpval, src);
315 }
316 tcg_temp_free_i64(tcg_zero);
317 tcg_temp_free_i64(tcg_bit55);
318 tcg_temp_free_i64(tcg_tmpval);
319 }
320 } else {
321 if (s->tbi0) {
322
323 tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
324 } else {
325
326 tcg_gen_mov_i64(cpu_pc, src);
327 }
328 }
329}
330
331typedef struct DisasCompare64 {
332 TCGCond cond;
333 TCGv_i64 value;
334} DisasCompare64;
335
336static void a64_test_cc(DisasCompare64 *c64, int cc)
337{
338 DisasCompare c32;
339
340 arm_test_cc(&c32, cc);
341
342
343
344 c64->cond = c32.cond;
345 c64->value = tcg_temp_new_i64();
346 tcg_gen_ext_i32_i64(c64->value, c32.value);
347
348 arm_free_cc(&c32);
349}
350
351static void a64_free_cc(DisasCompare64 *c64)
352{
353 tcg_temp_free_i64(c64->value);
354}
355
356static void gen_exception_internal(int excp)
357{
358 TCGv_i32 tcg_excp = tcg_const_i32(excp);
359
360 assert(excp_is_internal(excp));
361 gen_helper_exception_internal(cpu_env, tcg_excp);
362 tcg_temp_free_i32(tcg_excp);
363}
364
365static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
366{
367 TCGv_i32 tcg_excp = tcg_const_i32(excp);
368 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
369 TCGv_i32 tcg_el = tcg_const_i32(target_el);
370
371 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
372 tcg_syn, tcg_el);
373 tcg_temp_free_i32(tcg_el);
374 tcg_temp_free_i32(tcg_syn);
375 tcg_temp_free_i32(tcg_excp);
376}
377
378static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
379{
380 gen_a64_set_pc_im(s->pc - offset);
381 gen_exception_internal(excp);
382 s->base.is_jmp = DISAS_NORETURN;
383}
384
385static void gen_exception_insn(DisasContext *s, int offset, int excp,
386 uint32_t syndrome, uint32_t target_el)
387{
388 gen_a64_set_pc_im(s->pc - offset);
389 gen_exception(excp, syndrome, target_el);
390 s->base.is_jmp = DISAS_NORETURN;
391}
392
393static void gen_exception_bkpt_insn(DisasContext *s, int offset,
394 uint32_t syndrome)
395{
396 TCGv_i32 tcg_syn;
397
398 gen_a64_set_pc_im(s->pc - offset);
399 tcg_syn = tcg_const_i32(syndrome);
400 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
401 tcg_temp_free_i32(tcg_syn);
402 s->base.is_jmp = DISAS_NORETURN;
403}
404
405static void gen_ss_advance(DisasContext *s)
406{
407
408
409
410 if (s->ss_active) {
411 s->pstate_ss = 0;
412 gen_helper_clear_pstate_ss(cpu_env);
413 }
414}
415
416static void gen_step_complete_exception(DisasContext *s)
417{
418
419
420
421
422
423
424
425
426
427 gen_ss_advance(s);
428 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
429 default_exception_el(s));
430 s->base.is_jmp = DISAS_NORETURN;
431}
432
433static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
434{
435
436
437
438 if (s->base.singlestep_enabled || s->ss_active ||
439 (tb_cflags(s->base.tb) & CF_LAST_IO)) {
440 return false;
441 }
442
443#ifndef CONFIG_USER_ONLY
444
445 if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
446 return false;
447 }
448#endif
449
450 return true;
451}
452
453static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
454{
455 TranslationBlock *tb;
456
457 tb = s->base.tb;
458 if (use_goto_tb(s, n, dest)) {
459 tcg_gen_goto_tb(n);
460 gen_a64_set_pc_im(dest);
461 tcg_gen_exit_tb(tb, n);
462 s->base.is_jmp = DISAS_NORETURN;
463 } else {
464 gen_a64_set_pc_im(dest);
465 if (s->ss_active) {
466 gen_step_complete_exception(s);
467 } else if (s->base.singlestep_enabled) {
468 gen_exception_internal(EXCP_DEBUG);
469 } else {
470 tcg_gen_lookup_and_goto_ptr();
471 s->base.is_jmp = DISAS_NORETURN;
472 }
473 }
474}
475
476void unallocated_encoding(DisasContext *s)
477{
478
479 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
480 default_exception_el(s));
481}
482
483static void init_tmp_a64_array(DisasContext *s)
484{
485#ifdef CONFIG_DEBUG_TCG
486 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
487#endif
488 s->tmp_a64_count = 0;
489}
490
491static void free_tmp_a64(DisasContext *s)
492{
493 int i;
494 for (i = 0; i < s->tmp_a64_count; i++) {
495 tcg_temp_free_i64(s->tmp_a64[i]);
496 }
497 init_tmp_a64_array(s);
498}
499
500TCGv_i64 new_tmp_a64(DisasContext *s)
501{
502 assert(s->tmp_a64_count < TMP_A64_MAX);
503 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
504}
505
506TCGv_i64 new_tmp_a64_zero(DisasContext *s)
507{
508 TCGv_i64 t = new_tmp_a64(s);
509 tcg_gen_movi_i64(t, 0);
510 return t;
511}
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528TCGv_i64 cpu_reg(DisasContext *s, int reg)
529{
530 if (reg == 31) {
531 return new_tmp_a64_zero(s);
532 } else {
533 return cpu_X[reg];
534 }
535}
536
537
538TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
539{
540 return cpu_X[reg];
541}
542
543
544
545
546
547TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
548{
549 TCGv_i64 v = new_tmp_a64(s);
550 if (reg != 31) {
551 if (sf) {
552 tcg_gen_mov_i64(v, cpu_X[reg]);
553 } else {
554 tcg_gen_ext32u_i64(v, cpu_X[reg]);
555 }
556 } else {
557 tcg_gen_movi_i64(v, 0);
558 }
559 return v;
560}
561
562TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
563{
564 TCGv_i64 v = new_tmp_a64(s);
565 if (sf) {
566 tcg_gen_mov_i64(v, cpu_X[reg]);
567 } else {
568 tcg_gen_ext32u_i64(v, cpu_X[reg]);
569 }
570 return v;
571}
572
573
574
575
576
577
578static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
579{
580 return vec_reg_offset(s, regno, 0, size);
581}
582
583
584static inline int fp_reg_hi_offset(DisasContext *s, int regno)
585{
586 return vec_reg_offset(s, regno, 1, MO_64);
587}
588
589
590
591
592
593
594
595static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
596{
597 TCGv_i64 v = tcg_temp_new_i64();
598
599 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
600 return v;
601}
602
603static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
604{
605 TCGv_i32 v = tcg_temp_new_i32();
606
607 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
608 return v;
609}
610
611static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
612{
613 TCGv_i32 v = tcg_temp_new_i32();
614
615 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
616 return v;
617}
618
619
620
621
622static void clear_vec_high(DisasContext *s, bool is_q, int rd)
623{
624 unsigned ofs = fp_reg_offset(s, rd, MO_64);
625 unsigned vsz = vec_full_reg_size(s);
626
627 if (!is_q) {
628 TCGv_i64 tcg_zero = tcg_const_i64(0);
629 tcg_gen_st_i64(tcg_zero, cpu_env, ofs + 8);
630 tcg_temp_free_i64(tcg_zero);
631 }
632 if (vsz > 16) {
633 tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
634 }
635}
636
637void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
638{
639 unsigned ofs = fp_reg_offset(s, reg, MO_64);
640
641 tcg_gen_st_i64(v, cpu_env, ofs);
642 clear_vec_high(s, false, reg);
643}
644
645static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
646{
647 TCGv_i64 tmp = tcg_temp_new_i64();
648
649 tcg_gen_extu_i32_i64(tmp, v);
650 write_fp_dreg(s, reg, tmp);
651 tcg_temp_free_i64(tmp);
652}
653
654TCGv_ptr get_fpstatus_ptr(bool is_f16)
655{
656 TCGv_ptr statusptr = tcg_temp_new_ptr();
657 int offset;
658
659
660
661
662
663
664 if (is_f16) {
665 offset = offsetof(CPUARMState, vfp.fp_status_f16);
666 } else {
667 offset = offsetof(CPUARMState, vfp.fp_status);
668 }
669 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
670 return statusptr;
671}
672
673
674static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
675 GVecGen2Fn *gvec_fn, int vece)
676{
677 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
678 is_q ? 16 : 8, vec_full_reg_size(s));
679}
680
681
682
683
684static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
685 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
686{
687 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
688 imm, is_q ? 16 : 8, vec_full_reg_size(s));
689}
690
691
692static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
693 GVecGen3Fn *gvec_fn, int vece)
694{
695 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
696 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
697}
698
699
700
701
702static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd,
703 int rn, int64_t imm, const GVecGen2i *gvec_op)
704{
705 tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
706 is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op);
707}
708
709
710static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
711 int rn, int rm, const GVecGen3 *gvec_op)
712{
713 tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
714 vec_full_reg_offset(s, rm), is_q ? 16 : 8,
715 vec_full_reg_size(s), gvec_op);
716}
717
718
719static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
720 int rn, int rm, int data, gen_helper_gvec_3 *fn)
721{
722 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
723 vec_full_reg_offset(s, rn),
724 vec_full_reg_offset(s, rm),
725 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
726}
727
728
729
730
731static void gen_gvec_op3_env(DisasContext *s, bool is_q, int rd,
732 int rn, int rm, gen_helper_gvec_3_ptr *fn)
733{
734 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
735 vec_full_reg_offset(s, rn),
736 vec_full_reg_offset(s, rm), cpu_env,
737 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
738}
739
740
741
742
743static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
744 int rm, bool is_fp16, int data,
745 gen_helper_gvec_3_ptr *fn)
746{
747 TCGv_ptr fpst = get_fpstatus_ptr(is_fp16);
748 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
749 vec_full_reg_offset(s, rn),
750 vec_full_reg_offset(s, rm), fpst,
751 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
752 tcg_temp_free_ptr(fpst);
753}
754
755
756
757
758static inline void gen_set_NZ64(TCGv_i64 result)
759{
760 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
761 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
762}
763
764
765static inline void gen_logic_CC(int sf, TCGv_i64 result)
766{
767 if (sf) {
768 gen_set_NZ64(result);
769 } else {
770 tcg_gen_extrl_i64_i32(cpu_ZF, result);
771 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
772 }
773 tcg_gen_movi_i32(cpu_CF, 0);
774 tcg_gen_movi_i32(cpu_VF, 0);
775}
776
777
778static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
779{
780 if (sf) {
781 TCGv_i64 result, flag, tmp;
782 result = tcg_temp_new_i64();
783 flag = tcg_temp_new_i64();
784 tmp = tcg_temp_new_i64();
785
786 tcg_gen_movi_i64(tmp, 0);
787 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
788
789 tcg_gen_extrl_i64_i32(cpu_CF, flag);
790
791 gen_set_NZ64(result);
792
793 tcg_gen_xor_i64(flag, result, t0);
794 tcg_gen_xor_i64(tmp, t0, t1);
795 tcg_gen_andc_i64(flag, flag, tmp);
796 tcg_temp_free_i64(tmp);
797 tcg_gen_extrh_i64_i32(cpu_VF, flag);
798
799 tcg_gen_mov_i64(dest, result);
800 tcg_temp_free_i64(result);
801 tcg_temp_free_i64(flag);
802 } else {
803
804 TCGv_i32 t0_32 = tcg_temp_new_i32();
805 TCGv_i32 t1_32 = tcg_temp_new_i32();
806 TCGv_i32 tmp = tcg_temp_new_i32();
807
808 tcg_gen_movi_i32(tmp, 0);
809 tcg_gen_extrl_i64_i32(t0_32, t0);
810 tcg_gen_extrl_i64_i32(t1_32, t1);
811 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
812 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
813 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
814 tcg_gen_xor_i32(tmp, t0_32, t1_32);
815 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
816 tcg_gen_extu_i32_i64(dest, cpu_NF);
817
818 tcg_temp_free_i32(tmp);
819 tcg_temp_free_i32(t0_32);
820 tcg_temp_free_i32(t1_32);
821 }
822}
823
824
825static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
826{
827 if (sf) {
828
829 TCGv_i64 result, flag, tmp;
830
831 result = tcg_temp_new_i64();
832 flag = tcg_temp_new_i64();
833 tcg_gen_sub_i64(result, t0, t1);
834
835 gen_set_NZ64(result);
836
837 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
838 tcg_gen_extrl_i64_i32(cpu_CF, flag);
839
840 tcg_gen_xor_i64(flag, result, t0);
841 tmp = tcg_temp_new_i64();
842 tcg_gen_xor_i64(tmp, t0, t1);
843 tcg_gen_and_i64(flag, flag, tmp);
844 tcg_temp_free_i64(tmp);
845 tcg_gen_extrh_i64_i32(cpu_VF, flag);
846 tcg_gen_mov_i64(dest, result);
847 tcg_temp_free_i64(flag);
848 tcg_temp_free_i64(result);
849 } else {
850
851 TCGv_i32 t0_32 = tcg_temp_new_i32();
852 TCGv_i32 t1_32 = tcg_temp_new_i32();
853 TCGv_i32 tmp;
854
855 tcg_gen_extrl_i64_i32(t0_32, t0);
856 tcg_gen_extrl_i64_i32(t1_32, t1);
857 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
858 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
859 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
860 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
861 tmp = tcg_temp_new_i32();
862 tcg_gen_xor_i32(tmp, t0_32, t1_32);
863 tcg_temp_free_i32(t0_32);
864 tcg_temp_free_i32(t1_32);
865 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
866 tcg_temp_free_i32(tmp);
867 tcg_gen_extu_i32_i64(dest, cpu_NF);
868 }
869}
870
871
872static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
873{
874 TCGv_i64 flag = tcg_temp_new_i64();
875 tcg_gen_extu_i32_i64(flag, cpu_CF);
876 tcg_gen_add_i64(dest, t0, t1);
877 tcg_gen_add_i64(dest, dest, flag);
878 tcg_temp_free_i64(flag);
879
880 if (!sf) {
881 tcg_gen_ext32u_i64(dest, dest);
882 }
883}
884
885
886static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
887{
888 if (sf) {
889 TCGv_i64 result, cf_64, vf_64, tmp;
890 result = tcg_temp_new_i64();
891 cf_64 = tcg_temp_new_i64();
892 vf_64 = tcg_temp_new_i64();
893 tmp = tcg_const_i64(0);
894
895 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
896 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
897 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
898 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
899 gen_set_NZ64(result);
900
901 tcg_gen_xor_i64(vf_64, result, t0);
902 tcg_gen_xor_i64(tmp, t0, t1);
903 tcg_gen_andc_i64(vf_64, vf_64, tmp);
904 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
905
906 tcg_gen_mov_i64(dest, result);
907
908 tcg_temp_free_i64(tmp);
909 tcg_temp_free_i64(vf_64);
910 tcg_temp_free_i64(cf_64);
911 tcg_temp_free_i64(result);
912 } else {
913 TCGv_i32 t0_32, t1_32, tmp;
914 t0_32 = tcg_temp_new_i32();
915 t1_32 = tcg_temp_new_i32();
916 tmp = tcg_const_i32(0);
917
918 tcg_gen_extrl_i64_i32(t0_32, t0);
919 tcg_gen_extrl_i64_i32(t1_32, t1);
920 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
921 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
922
923 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
924 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
925 tcg_gen_xor_i32(tmp, t0_32, t1_32);
926 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
927 tcg_gen_extu_i32_i64(dest, cpu_NF);
928
929 tcg_temp_free_i32(tmp);
930 tcg_temp_free_i32(t1_32);
931 tcg_temp_free_i32(t0_32);
932 }
933}
934
935
936
937
938
939
940
941
942static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
943 TCGv_i64 tcg_addr, int size, int memidx,
944 bool iss_valid,
945 unsigned int iss_srt,
946 bool iss_sf, bool iss_ar)
947{
948 g_assert(size <= 3);
949 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
950
951 if (iss_valid) {
952 uint32_t syn;
953
954 syn = syn_data_abort_with_iss(0,
955 size,
956 false,
957 iss_srt,
958 iss_sf,
959 iss_ar,
960 0, 0, 0, 0, 0, false);
961 disas_set_insn_syndrome(s, syn);
962 }
963}
964
965static void do_gpr_st(DisasContext *s, TCGv_i64 source,
966 TCGv_i64 tcg_addr, int size,
967 bool iss_valid,
968 unsigned int iss_srt,
969 bool iss_sf, bool iss_ar)
970{
971 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
972 iss_valid, iss_srt, iss_sf, iss_ar);
973}
974
975
976
977
978static void do_gpr_ld_memidx(DisasContext *s,
979 TCGv_i64 dest, TCGv_i64 tcg_addr,
980 int size, bool is_signed,
981 bool extend, int memidx,
982 bool iss_valid, unsigned int iss_srt,
983 bool iss_sf, bool iss_ar)
984{
985 TCGMemOp memop = s->be_data + size;
986
987 g_assert(size <= 3);
988
989 if (is_signed) {
990 memop += MO_SIGN;
991 }
992
993 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
994
995 if (extend && is_signed) {
996 g_assert(size < 3);
997 tcg_gen_ext32u_i64(dest, dest);
998 }
999
1000 if (iss_valid) {
1001 uint32_t syn;
1002
1003 syn = syn_data_abort_with_iss(0,
1004 size,
1005 is_signed,
1006 iss_srt,
1007 iss_sf,
1008 iss_ar,
1009 0, 0, 0, 0, 0, false);
1010 disas_set_insn_syndrome(s, syn);
1011 }
1012}
1013
1014static void do_gpr_ld(DisasContext *s,
1015 TCGv_i64 dest, TCGv_i64 tcg_addr,
1016 int size, bool is_signed, bool extend,
1017 bool iss_valid, unsigned int iss_srt,
1018 bool iss_sf, bool iss_ar)
1019{
1020 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
1021 get_mem_index(s),
1022 iss_valid, iss_srt, iss_sf, iss_ar);
1023}
1024
1025
1026
1027
1028static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
1029{
1030
1031 TCGv_i64 tmp = tcg_temp_new_i64();
1032 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
1033 if (size < 4) {
1034 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
1035 s->be_data + size);
1036 } else {
1037 bool be = s->be_data == MO_BE;
1038 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
1039
1040 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1041 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1042 s->be_data | MO_Q);
1043 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
1044 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1045 s->be_data | MO_Q);
1046 tcg_temp_free_i64(tcg_hiaddr);
1047 }
1048
1049 tcg_temp_free_i64(tmp);
1050}
1051
1052
1053
1054
1055static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
1056{
1057
1058 TCGv_i64 tmplo = tcg_temp_new_i64();
1059 TCGv_i64 tmphi;
1060
1061 if (size < 4) {
1062 TCGMemOp memop = s->be_data + size;
1063 tmphi = tcg_const_i64(0);
1064 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
1065 } else {
1066 bool be = s->be_data == MO_BE;
1067 TCGv_i64 tcg_hiaddr;
1068
1069 tmphi = tcg_temp_new_i64();
1070 tcg_hiaddr = tcg_temp_new_i64();
1071
1072 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1073 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1074 s->be_data | MO_Q);
1075 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1076 s->be_data | MO_Q);
1077 tcg_temp_free_i64(tcg_hiaddr);
1078 }
1079
1080 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1081 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1082
1083 tcg_temp_free_i64(tmplo);
1084 tcg_temp_free_i64(tmphi);
1085
1086 clear_vec_high(s, true, destidx);
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1103 int element, TCGMemOp memop)
1104{
1105 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1106 switch (memop) {
1107 case MO_8:
1108 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1109 break;
1110 case MO_16:
1111 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1112 break;
1113 case MO_32:
1114 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1115 break;
1116 case MO_8|MO_SIGN:
1117 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1118 break;
1119 case MO_16|MO_SIGN:
1120 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1121 break;
1122 case MO_32|MO_SIGN:
1123 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1124 break;
1125 case MO_64:
1126 case MO_64|MO_SIGN:
1127 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1128 break;
1129 default:
1130 g_assert_not_reached();
1131 }
1132}
1133
1134static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1135 int element, TCGMemOp memop)
1136{
1137 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1138 switch (memop) {
1139 case MO_8:
1140 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1141 break;
1142 case MO_16:
1143 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1144 break;
1145 case MO_8|MO_SIGN:
1146 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1147 break;
1148 case MO_16|MO_SIGN:
1149 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1150 break;
1151 case MO_32:
1152 case MO_32|MO_SIGN:
1153 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1154 break;
1155 default:
1156 g_assert_not_reached();
1157 }
1158}
1159
1160
1161static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1162 int element, TCGMemOp memop)
1163{
1164 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1165 switch (memop) {
1166 case MO_8:
1167 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1168 break;
1169 case MO_16:
1170 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1171 break;
1172 case MO_32:
1173 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1174 break;
1175 case MO_64:
1176 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1177 break;
1178 default:
1179 g_assert_not_reached();
1180 }
1181}
1182
1183static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1184 int destidx, int element, TCGMemOp memop)
1185{
1186 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1187 switch (memop) {
1188 case MO_8:
1189 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1190 break;
1191 case MO_16:
1192 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1193 break;
1194 case MO_32:
1195 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1196 break;
1197 default:
1198 g_assert_not_reached();
1199 }
1200}
1201
1202
1203static void do_vec_st(DisasContext *s, int srcidx, int element,
1204 TCGv_i64 tcg_addr, int size, TCGMemOp endian)
1205{
1206 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1207
1208 read_vec_element(s, tcg_tmp, srcidx, element, size);
1209 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1210
1211 tcg_temp_free_i64(tcg_tmp);
1212}
1213
1214
1215static void do_vec_ld(DisasContext *s, int destidx, int element,
1216 TCGv_i64 tcg_addr, int size, TCGMemOp endian)
1217{
1218 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1219
1220 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1221 write_vec_element(s, tcg_tmp, destidx, element, size);
1222
1223 tcg_temp_free_i64(tcg_tmp);
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233static inline bool fp_access_check(DisasContext *s)
1234{
1235 assert(!s->fp_access_checked);
1236 s->fp_access_checked = true;
1237
1238 if (!s->fp_excp_el) {
1239 return true;
1240 }
1241
1242 gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1243 s->fp_excp_el);
1244 return false;
1245}
1246
1247
1248
1249
1250bool sve_access_check(DisasContext *s)
1251{
1252 if (s->sve_excp_el) {
1253 gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
1254 s->sve_excp_el);
1255 return false;
1256 }
1257 return fp_access_check(s);
1258}
1259
1260
1261
1262
1263
1264
1265static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1266 int option, unsigned int shift)
1267{
1268 int extsize = extract32(option, 0, 2);
1269 bool is_signed = extract32(option, 2, 1);
1270
1271 if (is_signed) {
1272 switch (extsize) {
1273 case 0:
1274 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1275 break;
1276 case 1:
1277 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1278 break;
1279 case 2:
1280 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1281 break;
1282 case 3:
1283 tcg_gen_mov_i64(tcg_out, tcg_in);
1284 break;
1285 }
1286 } else {
1287 switch (extsize) {
1288 case 0:
1289 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1290 break;
1291 case 1:
1292 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1293 break;
1294 case 2:
1295 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1296 break;
1297 case 3:
1298 tcg_gen_mov_i64(tcg_out, tcg_in);
1299 break;
1300 }
1301 }
1302
1303 if (shift) {
1304 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1305 }
1306}
1307
1308static inline void gen_check_sp_alignment(DisasContext *s)
1309{
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1333 uint32_t insn)
1334{
1335 const AArch64DecodeTable *tptr = table;
1336
1337 while (tptr->mask) {
1338 if ((insn & tptr->mask) == tptr->pattern) {
1339 return tptr->disas_fn;
1340 }
1341 tptr++;
1342 }
1343 return NULL;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1361{
1362 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1363
1364 if (insn & (1U << 31)) {
1365
1366 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1367 }
1368
1369
1370 gen_goto_tb(s, 0, addr);
1371}
1372
1373
1374
1375
1376
1377
1378
1379static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1380{
1381 unsigned int sf, op, rt;
1382 uint64_t addr;
1383 TCGLabel *label_match;
1384 TCGv_i64 tcg_cmp;
1385
1386 sf = extract32(insn, 31, 1);
1387 op = extract32(insn, 24, 1);
1388 rt = extract32(insn, 0, 5);
1389 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1390
1391 tcg_cmp = read_cpu_reg(s, rt, sf);
1392 label_match = gen_new_label();
1393
1394 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1395 tcg_cmp, 0, label_match);
1396
1397 gen_goto_tb(s, 0, s->pc);
1398 gen_set_label(label_match);
1399 gen_goto_tb(s, 1, addr);
1400}
1401
1402
1403
1404
1405
1406
1407
1408static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1409{
1410 unsigned int bit_pos, op, rt;
1411 uint64_t addr;
1412 TCGLabel *label_match;
1413 TCGv_i64 tcg_cmp;
1414
1415 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1416 op = extract32(insn, 24, 1);
1417 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1418 rt = extract32(insn, 0, 5);
1419
1420 tcg_cmp = tcg_temp_new_i64();
1421 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1422 label_match = gen_new_label();
1423 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1424 tcg_cmp, 0, label_match);
1425 tcg_temp_free_i64(tcg_cmp);
1426 gen_goto_tb(s, 0, s->pc);
1427 gen_set_label(label_match);
1428 gen_goto_tb(s, 1, addr);
1429}
1430
1431
1432
1433
1434
1435
1436
1437static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1438{
1439 unsigned int cond;
1440 uint64_t addr;
1441
1442 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1443 unallocated_encoding(s);
1444 return;
1445 }
1446 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1447 cond = extract32(insn, 0, 4);
1448
1449 if (cond < 0x0e) {
1450
1451 TCGLabel *label_match = gen_new_label();
1452 arm_gen_test_cc(cond, label_match);
1453 gen_goto_tb(s, 0, s->pc);
1454 gen_set_label(label_match);
1455 gen_goto_tb(s, 1, addr);
1456 } else {
1457
1458 gen_goto_tb(s, 0, addr);
1459 }
1460}
1461
1462
1463static void handle_hint(DisasContext *s, uint32_t insn,
1464 unsigned int op1, unsigned int op2, unsigned int crm)
1465{
1466 unsigned int selector = crm << 3 | op2;
1467
1468 if (op1 != 3) {
1469 unallocated_encoding(s);
1470 return;
1471 }
1472
1473 switch (selector) {
1474 case 0:
1475 return;
1476 case 3:
1477 s->base.is_jmp = DISAS_WFI;
1478 return;
1479
1480
1481
1482
1483
1484 case 1:
1485 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1486 s->base.is_jmp = DISAS_YIELD;
1487 }
1488 return;
1489 case 2:
1490 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1491 s->base.is_jmp = DISAS_WFE;
1492 }
1493 return;
1494 case 4:
1495 case 5:
1496
1497 return;
1498 default:
1499
1500 return;
1501 }
1502}
1503
1504static void gen_clrex(DisasContext *s, uint32_t insn)
1505{
1506 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1507}
1508
1509
1510static void handle_sync(DisasContext *s, uint32_t insn,
1511 unsigned int op1, unsigned int op2, unsigned int crm)
1512{
1513 TCGBar bar;
1514
1515 if (op1 != 3) {
1516 unallocated_encoding(s);
1517 return;
1518 }
1519
1520 switch (op2) {
1521 case 2:
1522 gen_clrex(s, insn);
1523 return;
1524 case 4:
1525 case 5:
1526 switch (crm & 3) {
1527 case 1:
1528 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1529 break;
1530 case 2:
1531 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1532 break;
1533 default:
1534 bar = TCG_BAR_SC | TCG_MO_ALL;
1535 break;
1536 }
1537 tcg_gen_mb(bar);
1538 return;
1539 case 6:
1540
1541
1542
1543
1544 gen_goto_tb(s, 0, s->pc);
1545 return;
1546 default:
1547 unallocated_encoding(s);
1548 return;
1549 }
1550}
1551
1552
1553static void handle_msr_i(DisasContext *s, uint32_t insn,
1554 unsigned int op1, unsigned int op2, unsigned int crm)
1555{
1556 int op = op1 << 3 | op2;
1557 switch (op) {
1558 case 0x05:
1559 if (s->current_el == 0) {
1560 unallocated_encoding(s);
1561 return;
1562 }
1563
1564 case 0x1e:
1565 case 0x1f:
1566 {
1567 TCGv_i32 tcg_imm = tcg_const_i32(crm);
1568 TCGv_i32 tcg_op = tcg_const_i32(op);
1569 gen_a64_set_pc_im(s->pc - 4);
1570 gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1571 tcg_temp_free_i32(tcg_imm);
1572 tcg_temp_free_i32(tcg_op);
1573
1574 gen_a64_set_pc_im(s->pc);
1575 s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
1576 break;
1577 }
1578 default:
1579 unallocated_encoding(s);
1580 return;
1581 }
1582}
1583
1584static void gen_get_nzcv(TCGv_i64 tcg_rt)
1585{
1586 TCGv_i32 tmp = tcg_temp_new_i32();
1587 TCGv_i32 nzcv = tcg_temp_new_i32();
1588
1589
1590 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1591
1592 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1593 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1594
1595 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1596
1597 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1598 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1599
1600 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1601
1602 tcg_temp_free_i32(nzcv);
1603 tcg_temp_free_i32(tmp);
1604}
1605
1606static void gen_set_nzcv(TCGv_i64 tcg_rt)
1607
1608{
1609 TCGv_i32 nzcv = tcg_temp_new_i32();
1610
1611
1612 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1613
1614
1615 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1616
1617 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1618 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1619
1620 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1621 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1622
1623 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1624 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1625 tcg_temp_free_i32(nzcv);
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1636 unsigned int op0, unsigned int op1, unsigned int op2,
1637 unsigned int crn, unsigned int crm, unsigned int rt)
1638{
1639 const ARMCPRegInfo *ri;
1640 TCGv_i64 tcg_rt;
1641
1642 ri = get_arm_cp_reginfo(s->cp_regs,
1643 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1644 crn, crm, op0, op1, op2));
1645
1646 if (!ri) {
1647
1648
1649
1650 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1651 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1652 isread ? "read" : "write", op0, op1, crn, crm, op2);
1653 unallocated_encoding(s);
1654 return;
1655 }
1656
1657
1658 if (!cp_access_ok(s->current_el, ri, isread)) {
1659 unallocated_encoding(s);
1660 return;
1661 }
1662
1663 if (ri->accessfn) {
1664
1665
1666
1667 TCGv_ptr tmpptr;
1668 TCGv_i32 tcg_syn, tcg_isread;
1669 uint32_t syndrome;
1670
1671 gen_a64_set_pc_im(s->pc - 4);
1672 tmpptr = tcg_const_ptr(ri);
1673 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1674 tcg_syn = tcg_const_i32(syndrome);
1675 tcg_isread = tcg_const_i32(isread);
1676 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1677 tcg_temp_free_ptr(tmpptr);
1678 tcg_temp_free_i32(tcg_syn);
1679 tcg_temp_free_i32(tcg_isread);
1680 }
1681
1682
1683 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1684 case ARM_CP_NOP:
1685 return;
1686 case ARM_CP_NZCV:
1687 tcg_rt = cpu_reg(s, rt);
1688 if (isread) {
1689 gen_get_nzcv(tcg_rt);
1690 } else {
1691 gen_set_nzcv(tcg_rt);
1692 }
1693 return;
1694 case ARM_CP_CURRENTEL:
1695
1696
1697
1698 tcg_rt = cpu_reg(s, rt);
1699 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1700 return;
1701 case ARM_CP_DC_ZVA:
1702
1703 tcg_rt = cpu_reg(s, rt);
1704 gen_helper_dc_zva(cpu_env, tcg_rt);
1705 return;
1706 default:
1707 break;
1708 }
1709 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1710 return;
1711 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1712 return;
1713 }
1714
1715 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1716 gen_io_start();
1717 }
1718
1719 tcg_rt = cpu_reg(s, rt);
1720
1721 if (isread) {
1722 if (ri->type & ARM_CP_CONST) {
1723 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1724 } else if (ri->readfn) {
1725 TCGv_ptr tmpptr;
1726 tmpptr = tcg_const_ptr(ri);
1727 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1728 tcg_temp_free_ptr(tmpptr);
1729 } else {
1730 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1731 }
1732 } else {
1733 if (ri->type & ARM_CP_CONST) {
1734
1735 return;
1736 } else if (ri->writefn) {
1737 TCGv_ptr tmpptr;
1738 tmpptr = tcg_const_ptr(ri);
1739 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1740 tcg_temp_free_ptr(tmpptr);
1741 } else {
1742 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1743 }
1744 }
1745
1746 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1747
1748 gen_io_end();
1749 s->base.is_jmp = DISAS_UPDATE;
1750 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1751
1752
1753
1754
1755 s->base.is_jmp = DISAS_UPDATE;
1756 }
1757}
1758
1759
1760
1761
1762
1763
1764
1765static void disas_system(DisasContext *s, uint32_t insn)
1766{
1767 unsigned int l, op0, op1, crn, crm, op2, rt;
1768 l = extract32(insn, 21, 1);
1769 op0 = extract32(insn, 19, 2);
1770 op1 = extract32(insn, 16, 3);
1771 crn = extract32(insn, 12, 4);
1772 crm = extract32(insn, 8, 4);
1773 op2 = extract32(insn, 5, 3);
1774 rt = extract32(insn, 0, 5);
1775
1776 if (op0 == 0) {
1777 if (l || rt != 31) {
1778 unallocated_encoding(s);
1779 return;
1780 }
1781 switch (crn) {
1782 case 2:
1783 handle_hint(s, insn, op1, op2, crm);
1784 break;
1785 case 3:
1786 handle_sync(s, insn, op1, op2, crm);
1787 break;
1788 case 4:
1789 handle_msr_i(s, insn, op1, op2, crm);
1790 break;
1791 default:
1792 unallocated_encoding(s);
1793 break;
1794 }
1795 return;
1796 }
1797 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1798}
1799
1800
1801
1802
1803
1804
1805
1806
1807static void disas_exc(DisasContext *s, uint32_t insn)
1808{
1809 int opc = extract32(insn, 21, 3);
1810 int op2_ll = extract32(insn, 0, 5);
1811 int imm16 = extract32(insn, 5, 16);
1812 TCGv_i32 tmp;
1813
1814 switch (opc) {
1815 case 0:
1816
1817
1818
1819
1820
1821 switch (op2_ll) {
1822 case 1:
1823 gen_ss_advance(s);
1824 gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1825 default_exception_el(s));
1826 break;
1827 case 2:
1828 if (s->current_el == 0) {
1829 unallocated_encoding(s);
1830 break;
1831 }
1832
1833
1834
1835 gen_a64_set_pc_im(s->pc - 4);
1836 gen_helper_pre_hvc(cpu_env);
1837 gen_ss_advance(s);
1838 gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1839 break;
1840 case 3:
1841 if (s->current_el == 0) {
1842 unallocated_encoding(s);
1843 break;
1844 }
1845 gen_a64_set_pc_im(s->pc - 4);
1846 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1847 gen_helper_pre_smc(cpu_env, tmp);
1848 tcg_temp_free_i32(tmp);
1849 gen_ss_advance(s);
1850 gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1851 break;
1852 default:
1853 unallocated_encoding(s);
1854 break;
1855 }
1856 break;
1857 case 1:
1858 if (op2_ll != 0) {
1859 unallocated_encoding(s);
1860 break;
1861 }
1862
1863 gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
1864 break;
1865 case 2:
1866 if (op2_ll != 0) {
1867 unallocated_encoding(s);
1868 break;
1869 }
1870
1871
1872
1873
1874
1875
1876 if (semihosting_enabled() && imm16 == 0xf000) {
1877#ifndef CONFIG_USER_ONLY
1878
1879
1880
1881
1882 if (s->current_el == 0) {
1883 unsupported_encoding(s, insn);
1884 break;
1885 }
1886#endif
1887 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1888 } else {
1889 unsupported_encoding(s, insn);
1890 }
1891 break;
1892 case 5:
1893 if (op2_ll < 1 || op2_ll > 3) {
1894 unallocated_encoding(s);
1895 break;
1896 }
1897
1898 unsupported_encoding(s, insn);
1899 break;
1900 default:
1901 unallocated_encoding(s);
1902 break;
1903 }
1904}
1905
1906
1907
1908
1909
1910
1911
1912static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1913{
1914 unsigned int opc, op2, op3, rn, op4;
1915
1916 opc = extract32(insn, 21, 4);
1917 op2 = extract32(insn, 16, 5);
1918 op3 = extract32(insn, 10, 6);
1919 rn = extract32(insn, 5, 5);
1920 op4 = extract32(insn, 0, 5);
1921
1922 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1923 unallocated_encoding(s);
1924 return;
1925 }
1926
1927 switch (opc) {
1928 case 0:
1929 case 1:
1930 case 2:
1931 gen_a64_set_pc(s, cpu_reg(s, rn));
1932
1933 if (opc == 1) {
1934 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1935 }
1936 break;
1937 case 4:
1938 if (s->current_el == 0) {
1939 unallocated_encoding(s);
1940 return;
1941 }
1942 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1943 gen_io_start();
1944 }
1945 gen_helper_exception_return(cpu_env);
1946 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1947 gen_io_end();
1948 }
1949
1950 s->base.is_jmp = DISAS_EXIT;
1951 return;
1952 case 5:
1953 if (rn != 0x1f) {
1954 unallocated_encoding(s);
1955 } else {
1956 unsupported_encoding(s, insn);
1957 }
1958 return;
1959 default:
1960 unallocated_encoding(s);
1961 return;
1962 }
1963
1964 s->base.is_jmp = DISAS_JUMP;
1965}
1966
1967
1968static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1969{
1970 switch (extract32(insn, 25, 7)) {
1971 case 0x0a: case 0x0b:
1972 case 0x4a: case 0x4b:
1973 disas_uncond_b_imm(s, insn);
1974 break;
1975 case 0x1a: case 0x5a:
1976 disas_comp_b_imm(s, insn);
1977 break;
1978 case 0x1b: case 0x5b:
1979 disas_test_b_imm(s, insn);
1980 break;
1981 case 0x2a:
1982 disas_cond_b_imm(s, insn);
1983 break;
1984 case 0x6a:
1985 if (insn & (1 << 24)) {
1986 disas_system(s, insn);
1987 } else {
1988 disas_exc(s, insn);
1989 }
1990 break;
1991 case 0x6b:
1992 disas_uncond_b_reg(s, insn);
1993 break;
1994 default:
1995 unallocated_encoding(s);
1996 break;
1997 }
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2012 TCGv_i64 addr, int size, bool is_pair)
2013{
2014 int idx = get_mem_index(s);
2015 TCGMemOp memop = s->be_data;
2016
2017 g_assert(size <= 3);
2018 if (is_pair) {
2019 g_assert(size >= 2);
2020 if (size == 2) {
2021
2022 memop |= MO_64 | MO_ALIGN;
2023 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2024 if (s->be_data == MO_LE) {
2025 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2026 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2027 } else {
2028 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2029 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2030 }
2031 } else {
2032
2033
2034 memop |= MO_64;
2035 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2036 memop | MO_ALIGN_16);
2037
2038 TCGv_i64 addr2 = tcg_temp_new_i64();
2039 tcg_gen_addi_i64(addr2, addr, 8);
2040 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2041 tcg_temp_free_i64(addr2);
2042
2043 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2044 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2045 }
2046 } else {
2047 memop |= size | MO_ALIGN;
2048 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2049 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2050 }
2051 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2052}
2053
2054static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2055 TCGv_i64 addr, int size, int is_pair)
2056{
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 TCGLabel *fail_label = gen_new_label();
2070 TCGLabel *done_label = gen_new_label();
2071 TCGv_i64 tmp;
2072
2073 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2074
2075 tmp = tcg_temp_new_i64();
2076 if (is_pair) {
2077 if (size == 2) {
2078 if (s->be_data == MO_LE) {
2079 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2080 } else {
2081 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2082 }
2083 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2084 cpu_exclusive_val, tmp,
2085 get_mem_index(s),
2086 MO_64 | MO_ALIGN | s->be_data);
2087 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2088 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2089 if (!HAVE_CMPXCHG128) {
2090 gen_helper_exit_atomic(cpu_env);
2091 s->base.is_jmp = DISAS_NORETURN;
2092 } else if (s->be_data == MO_LE) {
2093 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2094 cpu_exclusive_addr,
2095 cpu_reg(s, rt),
2096 cpu_reg(s, rt2));
2097 } else {
2098 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2099 cpu_exclusive_addr,
2100 cpu_reg(s, rt),
2101 cpu_reg(s, rt2));
2102 }
2103 } else if (s->be_data == MO_LE) {
2104 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2105 cpu_reg(s, rt), cpu_reg(s, rt2));
2106 } else {
2107 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2108 cpu_reg(s, rt), cpu_reg(s, rt2));
2109 }
2110 } else {
2111 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2112 cpu_reg(s, rt), get_mem_index(s),
2113 size | MO_ALIGN | s->be_data);
2114 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2115 }
2116 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2117 tcg_temp_free_i64(tmp);
2118 tcg_gen_br(done_label);
2119
2120 gen_set_label(fail_label);
2121 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2122 gen_set_label(done_label);
2123 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2124}
2125
2126static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2127 int rn, int size)
2128{
2129 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2130 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2131 int memidx = get_mem_index(s);
2132 TCGv_i64 addr = cpu_reg_sp(s, rn);
2133
2134 if (rn == 31) {
2135 gen_check_sp_alignment(s);
2136 }
2137 tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
2138 size | MO_ALIGN | s->be_data);
2139}
2140
2141static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2142 int rn, int size)
2143{
2144 TCGv_i64 s1 = cpu_reg(s, rs);
2145 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2146 TCGv_i64 t1 = cpu_reg(s, rt);
2147 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2148 TCGv_i64 addr = cpu_reg_sp(s, rn);
2149 int memidx = get_mem_index(s);
2150
2151 if (rn == 31) {
2152 gen_check_sp_alignment(s);
2153 }
2154
2155 if (size == 2) {
2156 TCGv_i64 cmp = tcg_temp_new_i64();
2157 TCGv_i64 val = tcg_temp_new_i64();
2158
2159 if (s->be_data == MO_LE) {
2160 tcg_gen_concat32_i64(val, t1, t2);
2161 tcg_gen_concat32_i64(cmp, s1, s2);
2162 } else {
2163 tcg_gen_concat32_i64(val, t2, t1);
2164 tcg_gen_concat32_i64(cmp, s2, s1);
2165 }
2166
2167 tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
2168 MO_64 | MO_ALIGN | s->be_data);
2169 tcg_temp_free_i64(val);
2170
2171 if (s->be_data == MO_LE) {
2172 tcg_gen_extr32_i64(s1, s2, cmp);
2173 } else {
2174 tcg_gen_extr32_i64(s2, s1, cmp);
2175 }
2176 tcg_temp_free_i64(cmp);
2177 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2178 if (HAVE_CMPXCHG128) {
2179 TCGv_i32 tcg_rs = tcg_const_i32(rs);
2180 if (s->be_data == MO_LE) {
2181 gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
2182 } else {
2183 gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
2184 }
2185 tcg_temp_free_i32(tcg_rs);
2186 } else {
2187 gen_helper_exit_atomic(cpu_env);
2188 s->base.is_jmp = DISAS_NORETURN;
2189 }
2190 } else {
2191 TCGv_i64 d1 = tcg_temp_new_i64();
2192 TCGv_i64 d2 = tcg_temp_new_i64();
2193 TCGv_i64 a2 = tcg_temp_new_i64();
2194 TCGv_i64 c1 = tcg_temp_new_i64();
2195 TCGv_i64 c2 = tcg_temp_new_i64();
2196 TCGv_i64 zero = tcg_const_i64(0);
2197
2198
2199 tcg_gen_qemu_ld_i64(d1, addr, memidx,
2200 MO_64 | MO_ALIGN_16 | s->be_data);
2201 tcg_gen_addi_i64(a2, addr, 8);
2202 tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
2203
2204
2205 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2206 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2207 tcg_gen_and_i64(c2, c2, c1);
2208
2209
2210 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2211 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2212 tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
2213 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2214 tcg_temp_free_i64(a2);
2215 tcg_temp_free_i64(c1);
2216 tcg_temp_free_i64(c2);
2217 tcg_temp_free_i64(zero);
2218
2219
2220 tcg_gen_mov_i64(s1, d1);
2221 tcg_gen_mov_i64(s2, d2);
2222 tcg_temp_free_i64(d1);
2223 tcg_temp_free_i64(d2);
2224 }
2225}
2226
2227
2228
2229
2230static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2231{
2232 int opc0 = extract32(opc, 0, 1);
2233 int regsize;
2234
2235 if (is_signed) {
2236 regsize = opc0 ? 32 : 64;
2237 } else {
2238 regsize = size == 3 ? 64 : 32;
2239 }
2240 return regsize == 64;
2241}
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2257{
2258 int rt = extract32(insn, 0, 5);
2259 int rn = extract32(insn, 5, 5);
2260 int rt2 = extract32(insn, 10, 5);
2261 int rs = extract32(insn, 16, 5);
2262 int is_lasr = extract32(insn, 15, 1);
2263 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2264 int size = extract32(insn, 30, 2);
2265 TCGv_i64 tcg_addr;
2266
2267 switch (o2_L_o1_o0) {
2268 case 0x0:
2269 case 0x1:
2270 if (rn == 31) {
2271 gen_check_sp_alignment(s);
2272 }
2273 if (is_lasr) {
2274 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2275 }
2276 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2277 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
2278 return;
2279
2280 case 0x4:
2281 case 0x5:
2282 if (rn == 31) {
2283 gen_check_sp_alignment(s);
2284 }
2285 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2286 s->is_ldex = true;
2287 gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
2288 if (is_lasr) {
2289 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2290 }
2291 return;
2292
2293 case 0x9:
2294
2295 if (rn == 31) {
2296 gen_check_sp_alignment(s);
2297 }
2298 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2299 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2300 do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
2301 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2302 return;
2303
2304 case 0xd:
2305
2306 if (rn == 31) {
2307 gen_check_sp_alignment(s);
2308 }
2309 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2310 do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
2311 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2312 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2313 return;
2314
2315 case 0x2: case 0x3:
2316 if (size & 2) {
2317 if (rn == 31) {
2318 gen_check_sp_alignment(s);
2319 }
2320 if (is_lasr) {
2321 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2322 }
2323 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2324 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
2325 return;
2326 }
2327 if (rt2 == 31
2328 && ((rt | rs) & 1) == 0
2329 && dc_isar_feature(aa64_atomics, s)) {
2330
2331 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2332 return;
2333 }
2334 break;
2335
2336 case 0x6: case 0x7:
2337 if (size & 2) {
2338 if (rn == 31) {
2339 gen_check_sp_alignment(s);
2340 }
2341 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2342 s->is_ldex = true;
2343 gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
2344 if (is_lasr) {
2345 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2346 }
2347 return;
2348 }
2349 if (rt2 == 31
2350 && ((rt | rs) & 1) == 0
2351 && dc_isar_feature(aa64_atomics, s)) {
2352
2353 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2354 return;
2355 }
2356 break;
2357
2358 case 0xa:
2359 case 0xb:
2360 case 0xe:
2361 case 0xf:
2362 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2363 gen_compare_and_swap(s, rs, rt, rn, size);
2364 return;
2365 }
2366 break;
2367 }
2368 unallocated_encoding(s);
2369}
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384static void disas_ld_lit(DisasContext *s, uint32_t insn)
2385{
2386 int rt = extract32(insn, 0, 5);
2387 int64_t imm = sextract32(insn, 5, 19) << 2;
2388 bool is_vector = extract32(insn, 26, 1);
2389 int opc = extract32(insn, 30, 2);
2390 bool is_signed = false;
2391 int size = 2;
2392 TCGv_i64 tcg_rt, tcg_addr;
2393
2394 if (is_vector) {
2395 if (opc == 3) {
2396 unallocated_encoding(s);
2397 return;
2398 }
2399 size = 2 + opc;
2400 if (!fp_access_check(s)) {
2401 return;
2402 }
2403 } else {
2404 if (opc == 3) {
2405
2406 return;
2407 }
2408 size = 2 + extract32(opc, 0, 1);
2409 is_signed = extract32(opc, 1, 1);
2410 }
2411
2412 tcg_rt = cpu_reg(s, rt);
2413
2414 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2415 if (is_vector) {
2416 do_fp_ld(s, rt, tcg_addr, size);
2417 } else {
2418
2419 bool iss_sf = opc != 0;
2420
2421 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2422 true, rt, iss_sf, false);
2423 }
2424 tcg_temp_free_i64(tcg_addr);
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2456{
2457 int rt = extract32(insn, 0, 5);
2458 int rn = extract32(insn, 5, 5);
2459 int rt2 = extract32(insn, 10, 5);
2460 uint64_t offset = sextract64(insn, 15, 7);
2461 int index = extract32(insn, 23, 2);
2462 bool is_vector = extract32(insn, 26, 1);
2463 bool is_load = extract32(insn, 22, 1);
2464 int opc = extract32(insn, 30, 2);
2465
2466 bool is_signed = false;
2467 bool postindex = false;
2468 bool wback = false;
2469
2470 TCGv_i64 tcg_addr;
2471 int size;
2472
2473 if (opc == 3) {
2474 unallocated_encoding(s);
2475 return;
2476 }
2477
2478 if (is_vector) {
2479 size = 2 + opc;
2480 } else {
2481 size = 2 + extract32(opc, 1, 1);
2482 is_signed = extract32(opc, 0, 1);
2483 if (!is_load && is_signed) {
2484 unallocated_encoding(s);
2485 return;
2486 }
2487 }
2488
2489 switch (index) {
2490 case 1:
2491 postindex = true;
2492 wback = true;
2493 break;
2494 case 0:
2495
2496
2497
2498
2499
2500 if (is_signed) {
2501
2502 unallocated_encoding(s);
2503 return;
2504 }
2505 postindex = false;
2506 break;
2507 case 2:
2508 postindex = false;
2509 break;
2510 case 3:
2511 postindex = false;
2512 wback = true;
2513 break;
2514 }
2515
2516 if (is_vector && !fp_access_check(s)) {
2517 return;
2518 }
2519
2520 offset <<= size;
2521
2522 if (rn == 31) {
2523 gen_check_sp_alignment(s);
2524 }
2525
2526 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2527
2528 if (!postindex) {
2529 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2530 }
2531
2532 if (is_vector) {
2533 if (is_load) {
2534 do_fp_ld(s, rt, tcg_addr, size);
2535 } else {
2536 do_fp_st(s, rt, tcg_addr, size);
2537 }
2538 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2539 if (is_load) {
2540 do_fp_ld(s, rt2, tcg_addr, size);
2541 } else {
2542 do_fp_st(s, rt2, tcg_addr, size);
2543 }
2544 } else {
2545 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2546 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2547
2548 if (is_load) {
2549 TCGv_i64 tmp = tcg_temp_new_i64();
2550
2551
2552
2553
2554 do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
2555 false, 0, false, false);
2556 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2557 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2558 false, 0, false, false);
2559
2560 tcg_gen_mov_i64(tcg_rt, tmp);
2561 tcg_temp_free_i64(tmp);
2562 } else {
2563 do_gpr_st(s, tcg_rt, tcg_addr, size,
2564 false, 0, false, false);
2565 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2566 do_gpr_st(s, tcg_rt2, tcg_addr, size,
2567 false, 0, false, false);
2568 }
2569 }
2570
2571 if (wback) {
2572 if (postindex) {
2573 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2574 } else {
2575 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2576 }
2577 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2578 }
2579}
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2598 int opc,
2599 int size,
2600 int rt,
2601 bool is_vector)
2602{
2603 int rn = extract32(insn, 5, 5);
2604 int imm9 = sextract32(insn, 12, 9);
2605 int idx = extract32(insn, 10, 2);
2606 bool is_signed = false;
2607 bool is_store = false;
2608 bool is_extended = false;
2609 bool is_unpriv = (idx == 2);
2610 bool iss_valid = !is_vector;
2611 bool post_index;
2612 bool writeback;
2613
2614 TCGv_i64 tcg_addr;
2615
2616 if (is_vector) {
2617 size |= (opc & 2) << 1;
2618 if (size > 4 || is_unpriv) {
2619 unallocated_encoding(s);
2620 return;
2621 }
2622 is_store = ((opc & 1) == 0);
2623 if (!fp_access_check(s)) {
2624 return;
2625 }
2626 } else {
2627 if (size == 3 && opc == 2) {
2628
2629 if (is_unpriv) {
2630 unallocated_encoding(s);
2631 return;
2632 }
2633 return;
2634 }
2635 if (opc == 3 && size > 1) {
2636 unallocated_encoding(s);
2637 return;
2638 }
2639 is_store = (opc == 0);
2640 is_signed = extract32(opc, 1, 1);
2641 is_extended = (size < 3) && extract32(opc, 0, 1);
2642 }
2643
2644 switch (idx) {
2645 case 0:
2646 case 2:
2647 post_index = false;
2648 writeback = false;
2649 break;
2650 case 1:
2651 post_index = true;
2652 writeback = true;
2653 break;
2654 case 3:
2655 post_index = false;
2656 writeback = true;
2657 break;
2658 default:
2659 g_assert_not_reached();
2660 }
2661
2662 if (rn == 31) {
2663 gen_check_sp_alignment(s);
2664 }
2665 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2666
2667 if (!post_index) {
2668 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2669 }
2670
2671 if (is_vector) {
2672 if (is_store) {
2673 do_fp_st(s, rt, tcg_addr, size);
2674 } else {
2675 do_fp_ld(s, rt, tcg_addr, size);
2676 }
2677 } else {
2678 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2679 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2680 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2681
2682 if (is_store) {
2683 do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2684 iss_valid, rt, iss_sf, false);
2685 } else {
2686 do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2687 is_signed, is_extended, memidx,
2688 iss_valid, rt, iss_sf, false);
2689 }
2690 }
2691
2692 if (writeback) {
2693 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2694 if (post_index) {
2695 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2696 }
2697 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2698 }
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2723 int opc,
2724 int size,
2725 int rt,
2726 bool is_vector)
2727{
2728 int rn = extract32(insn, 5, 5);
2729 int shift = extract32(insn, 12, 1);
2730 int rm = extract32(insn, 16, 5);
2731 int opt = extract32(insn, 13, 3);
2732 bool is_signed = false;
2733 bool is_store = false;
2734 bool is_extended = false;
2735
2736 TCGv_i64 tcg_rm;
2737 TCGv_i64 tcg_addr;
2738
2739 if (extract32(opt, 1, 1) == 0) {
2740 unallocated_encoding(s);
2741 return;
2742 }
2743
2744 if (is_vector) {
2745 size |= (opc & 2) << 1;
2746 if (size > 4) {
2747 unallocated_encoding(s);
2748 return;
2749 }
2750 is_store = !extract32(opc, 0, 1);
2751 if (!fp_access_check(s)) {
2752 return;
2753 }
2754 } else {
2755 if (size == 3 && opc == 2) {
2756
2757 return;
2758 }
2759 if (opc == 3 && size > 1) {
2760 unallocated_encoding(s);
2761 return;
2762 }
2763 is_store = (opc == 0);
2764 is_signed = extract32(opc, 1, 1);
2765 is_extended = (size < 3) && extract32(opc, 0, 1);
2766 }
2767
2768 if (rn == 31) {
2769 gen_check_sp_alignment(s);
2770 }
2771 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2772
2773 tcg_rm = read_cpu_reg(s, rm, 1);
2774 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2775
2776 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2777
2778 if (is_vector) {
2779 if (is_store) {
2780 do_fp_st(s, rt, tcg_addr, size);
2781 } else {
2782 do_fp_ld(s, rt, tcg_addr, size);
2783 }
2784 } else {
2785 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2786 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2787 if (is_store) {
2788 do_gpr_st(s, tcg_rt, tcg_addr, size,
2789 true, rt, iss_sf, false);
2790 } else {
2791 do_gpr_ld(s, tcg_rt, tcg_addr, size,
2792 is_signed, is_extended,
2793 true, rt, iss_sf, false);
2794 }
2795 }
2796}
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2816 int opc,
2817 int size,
2818 int rt,
2819 bool is_vector)
2820{
2821 int rn = extract32(insn, 5, 5);
2822 unsigned int imm12 = extract32(insn, 10, 12);
2823 unsigned int offset;
2824
2825 TCGv_i64 tcg_addr;
2826
2827 bool is_store;
2828 bool is_signed = false;
2829 bool is_extended = false;
2830
2831 if (is_vector) {
2832 size |= (opc & 2) << 1;
2833 if (size > 4) {
2834 unallocated_encoding(s);
2835 return;
2836 }
2837 is_store = !extract32(opc, 0, 1);
2838 if (!fp_access_check(s)) {
2839 return;
2840 }
2841 } else {
2842 if (size == 3 && opc == 2) {
2843
2844 return;
2845 }
2846 if (opc == 3 && size > 1) {
2847 unallocated_encoding(s);
2848 return;
2849 }
2850 is_store = (opc == 0);
2851 is_signed = extract32(opc, 1, 1);
2852 is_extended = (size < 3) && extract32(opc, 0, 1);
2853 }
2854
2855 if (rn == 31) {
2856 gen_check_sp_alignment(s);
2857 }
2858 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2859 offset = imm12 << size;
2860 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2861
2862 if (is_vector) {
2863 if (is_store) {
2864 do_fp_st(s, rt, tcg_addr, size);
2865 } else {
2866 do_fp_ld(s, rt, tcg_addr, size);
2867 }
2868 } else {
2869 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2870 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2871 if (is_store) {
2872 do_gpr_st(s, tcg_rt, tcg_addr, size,
2873 true, rt, iss_sf, false);
2874 } else {
2875 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2876 true, rt, iss_sf, false);
2877 }
2878 }
2879}
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
2896 int size, int rt, bool is_vector)
2897{
2898 int rs = extract32(insn, 16, 5);
2899 int rn = extract32(insn, 5, 5);
2900 int o3_opc = extract32(insn, 12, 4);
2901 TCGv_i64 tcg_rn, tcg_rs;
2902 AtomicThreeOpFn *fn;
2903
2904 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
2905 unallocated_encoding(s);
2906 return;
2907 }
2908 switch (o3_opc) {
2909 case 000:
2910 fn = tcg_gen_atomic_fetch_add_i64;
2911 break;
2912 case 001:
2913 fn = tcg_gen_atomic_fetch_and_i64;
2914 break;
2915 case 002:
2916 fn = tcg_gen_atomic_fetch_xor_i64;
2917 break;
2918 case 003:
2919 fn = tcg_gen_atomic_fetch_or_i64;
2920 break;
2921 case 004:
2922 fn = tcg_gen_atomic_fetch_smax_i64;
2923 break;
2924 case 005:
2925 fn = tcg_gen_atomic_fetch_smin_i64;
2926 break;
2927 case 006:
2928 fn = tcg_gen_atomic_fetch_umax_i64;
2929 break;
2930 case 007:
2931 fn = tcg_gen_atomic_fetch_umin_i64;
2932 break;
2933 case 010:
2934 fn = tcg_gen_atomic_xchg_i64;
2935 break;
2936 default:
2937 unallocated_encoding(s);
2938 return;
2939 }
2940
2941 if (rn == 31) {
2942 gen_check_sp_alignment(s);
2943 }
2944 tcg_rn = cpu_reg_sp(s, rn);
2945 tcg_rs = read_cpu_reg(s, rs, true);
2946
2947 if (o3_opc == 1) {
2948 tcg_gen_not_i64(tcg_rs, tcg_rs);
2949 }
2950
2951
2952
2953
2954 fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
2955 s->be_data | size | MO_ALIGN);
2956}
2957
2958
2959static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2960{
2961 int rt = extract32(insn, 0, 5);
2962 int opc = extract32(insn, 22, 2);
2963 bool is_vector = extract32(insn, 26, 1);
2964 int size = extract32(insn, 30, 2);
2965
2966 switch (extract32(insn, 24, 2)) {
2967 case 0:
2968 if (extract32(insn, 21, 1) == 0) {
2969
2970
2971
2972
2973 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2974 return;
2975 }
2976 switch (extract32(insn, 10, 2)) {
2977 case 0:
2978 disas_ldst_atomic(s, insn, size, rt, is_vector);
2979 return;
2980 case 2:
2981 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2982 return;
2983 }
2984 break;
2985 case 1:
2986 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2987 return;
2988 }
2989 unallocated_encoding(s);
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3011{
3012 int rt = extract32(insn, 0, 5);
3013 int rn = extract32(insn, 5, 5);
3014 int size = extract32(insn, 10, 2);
3015 int opcode = extract32(insn, 12, 4);
3016 bool is_store = !extract32(insn, 22, 1);
3017 bool is_postidx = extract32(insn, 23, 1);
3018 bool is_q = extract32(insn, 30, 1);
3019 TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
3020 TCGMemOp endian = s->be_data;
3021
3022 int ebytes;
3023 int elements;
3024 int rpt;
3025 int selem;
3026 int r;
3027
3028 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3029 unallocated_encoding(s);
3030 return;
3031 }
3032
3033
3034 switch (opcode) {
3035 case 0x0:
3036 rpt = 1;
3037 selem = 4;
3038 break;
3039 case 0x2:
3040 rpt = 4;
3041 selem = 1;
3042 break;
3043 case 0x4:
3044 rpt = 1;
3045 selem = 3;
3046 break;
3047 case 0x6:
3048 rpt = 3;
3049 selem = 1;
3050 break;
3051 case 0x7:
3052 rpt = 1;
3053 selem = 1;
3054 break;
3055 case 0x8:
3056 rpt = 1;
3057 selem = 2;
3058 break;
3059 case 0xa:
3060 rpt = 2;
3061 selem = 1;
3062 break;
3063 default:
3064 unallocated_encoding(s);
3065 return;
3066 }
3067
3068 if (size == 3 && !is_q && selem != 1) {
3069
3070 unallocated_encoding(s);
3071 return;
3072 }
3073
3074 if (!fp_access_check(s)) {
3075 return;
3076 }
3077
3078 if (rn == 31) {
3079 gen_check_sp_alignment(s);
3080 }
3081
3082
3083 if (size == 0) {
3084 endian = MO_LE;
3085 }
3086
3087
3088
3089
3090 if (selem == 1 && endian == MO_LE) {
3091 size = 3;
3092 }
3093 ebytes = 1 << size;
3094 elements = (is_q ? 16 : 8) / ebytes;
3095
3096 tcg_rn = cpu_reg_sp(s, rn);
3097 tcg_addr = tcg_temp_new_i64();
3098 tcg_gen_mov_i64(tcg_addr, tcg_rn);
3099 tcg_ebytes = tcg_const_i64(ebytes);
3100
3101 for (r = 0; r < rpt; r++) {
3102 int e;
3103 for (e = 0; e < elements; e++) {
3104 int xs;
3105 for (xs = 0; xs < selem; xs++) {
3106 int tt = (rt + r + xs) % 32;
3107 if (is_store) {
3108 do_vec_st(s, tt, e, tcg_addr, size, endian);
3109 } else {
3110 do_vec_ld(s, tt, e, tcg_addr, size, endian);
3111 }
3112 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
3113 }
3114 }
3115 }
3116
3117 if (!is_store) {
3118
3119
3120
3121
3122
3123
3124
3125 for (r = 0; r < rpt * selem; r++) {
3126 int tt = (rt + r) % 32;
3127 clear_vec_high(s, is_q, tt);
3128 }
3129 }
3130
3131 if (is_postidx) {
3132 int rm = extract32(insn, 16, 5);
3133 if (rm == 31) {
3134 tcg_gen_mov_i64(tcg_rn, tcg_addr);
3135 } else {
3136 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3137 }
3138 }
3139 tcg_temp_free_i64(tcg_ebytes);
3140 tcg_temp_free_i64(tcg_addr);
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3166{
3167 int rt = extract32(insn, 0, 5);
3168 int rn = extract32(insn, 5, 5);
3169 int size = extract32(insn, 10, 2);
3170 int S = extract32(insn, 12, 1);
3171 int opc = extract32(insn, 13, 3);
3172 int R = extract32(insn, 21, 1);
3173 int is_load = extract32(insn, 22, 1);
3174 int is_postidx = extract32(insn, 23, 1);
3175 int is_q = extract32(insn, 30, 1);
3176
3177 int scale = extract32(opc, 1, 2);
3178 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3179 bool replicate = false;
3180 int index = is_q << 3 | S << 2 | size;
3181 int ebytes, xs;
3182 TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
3183
3184 switch (scale) {
3185 case 3:
3186 if (!is_load || S) {
3187 unallocated_encoding(s);
3188 return;
3189 }
3190 scale = size;
3191 replicate = true;
3192 break;
3193 case 0:
3194 break;
3195 case 1:
3196 if (extract32(size, 0, 1)) {
3197 unallocated_encoding(s);
3198 return;
3199 }
3200 index >>= 1;
3201 break;
3202 case 2:
3203 if (extract32(size, 1, 1)) {
3204 unallocated_encoding(s);
3205 return;
3206 }
3207 if (!extract32(size, 0, 1)) {
3208 index >>= 2;
3209 } else {
3210 if (S) {
3211 unallocated_encoding(s);
3212 return;
3213 }
3214 index >>= 3;
3215 scale = 3;
3216 }
3217 break;
3218 default:
3219 g_assert_not_reached();
3220 }
3221
3222 if (!fp_access_check(s)) {
3223 return;
3224 }
3225
3226 ebytes = 1 << scale;
3227
3228 if (rn == 31) {
3229 gen_check_sp_alignment(s);
3230 }
3231
3232 tcg_rn = cpu_reg_sp(s, rn);
3233 tcg_addr = tcg_temp_new_i64();
3234 tcg_gen_mov_i64(tcg_addr, tcg_rn);
3235 tcg_ebytes = tcg_const_i64(ebytes);
3236
3237 for (xs = 0; xs < selem; xs++) {
3238 if (replicate) {
3239
3240 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3241
3242 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
3243 get_mem_index(s), s->be_data + scale);
3244 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3245 (is_q + 1) * 8, vec_full_reg_size(s),
3246 tcg_tmp);
3247 tcg_temp_free_i64(tcg_tmp);
3248 } else {
3249
3250 if (is_load) {
3251 do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data);
3252 } else {
3253 do_vec_st(s, rt, index, tcg_addr, scale, s->be_data);
3254 }
3255 }
3256 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
3257 rt = (rt + 1) % 32;
3258 }
3259
3260 if (is_postidx) {
3261 int rm = extract32(insn, 16, 5);
3262 if (rm == 31) {
3263 tcg_gen_mov_i64(tcg_rn, tcg_addr);
3264 } else {
3265 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3266 }
3267 }
3268 tcg_temp_free_i64(tcg_ebytes);
3269 tcg_temp_free_i64(tcg_addr);
3270}
3271
3272
3273static void disas_ldst(DisasContext *s, uint32_t insn)
3274{
3275 switch (extract32(insn, 24, 6)) {
3276 case 0x08:
3277 disas_ldst_excl(s, insn);
3278 break;
3279 case 0x18: case 0x1c:
3280 disas_ld_lit(s, insn);
3281 break;
3282 case 0x28: case 0x29:
3283 case 0x2c: case 0x2d:
3284 disas_ldst_pair(s, insn);
3285 break;
3286 case 0x38: case 0x39:
3287 case 0x3c: case 0x3d:
3288 disas_ldst_reg(s, insn);
3289 break;
3290 case 0x0c:
3291 disas_ldst_multiple_struct(s, insn);
3292 break;
3293 case 0x0d:
3294 disas_ldst_single_struct(s, insn);
3295 break;
3296 default:
3297 unallocated_encoding(s);
3298 break;
3299 }
3300}
3301
3302
3303
3304
3305
3306
3307
3308static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
3309{
3310 unsigned int page, rd;
3311 uint64_t base;
3312 uint64_t offset;
3313
3314 page = extract32(insn, 31, 1);
3315
3316 offset = sextract64(insn, 5, 19);
3317 offset = offset << 2 | extract32(insn, 29, 2);
3318 rd = extract32(insn, 0, 5);
3319 base = s->pc - 4;
3320
3321 if (page) {
3322
3323 base &= ~0xfff;
3324 offset <<= 12;
3325 }
3326
3327 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
3328}
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3344{
3345 int rd = extract32(insn, 0, 5);
3346 int rn = extract32(insn, 5, 5);
3347 uint64_t imm = extract32(insn, 10, 12);
3348 int shift = extract32(insn, 22, 2);
3349 bool setflags = extract32(insn, 29, 1);
3350 bool sub_op = extract32(insn, 30, 1);
3351 bool is_64bit = extract32(insn, 31, 1);
3352
3353 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3354 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3355 TCGv_i64 tcg_result;
3356
3357 switch (shift) {
3358 case 0x0:
3359 break;
3360 case 0x1:
3361 imm <<= 12;
3362 break;
3363 default:
3364 unallocated_encoding(s);
3365 return;
3366 }
3367
3368 tcg_result = tcg_temp_new_i64();
3369 if (!setflags) {
3370 if (sub_op) {
3371 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3372 } else {
3373 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3374 }
3375 } else {
3376 TCGv_i64 tcg_imm = tcg_const_i64(imm);
3377 if (sub_op) {
3378 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3379 } else {
3380 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3381 }
3382 tcg_temp_free_i64(tcg_imm);
3383 }
3384
3385 if (is_64bit) {
3386 tcg_gen_mov_i64(tcg_rd, tcg_result);
3387 } else {
3388 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3389 }
3390
3391 tcg_temp_free_i64(tcg_result);
3392}
3393
3394
3395
3396
3397
3398static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3399{
3400 assert(e != 0);
3401 while (e < 64) {
3402 mask |= mask << e;
3403 e *= 2;
3404 }
3405 return mask;
3406}
3407
3408
3409static inline uint64_t bitmask64(unsigned int length)
3410{
3411 assert(length > 0 && length <= 64);
3412 return ~0ULL >> (64 - length);
3413}
3414
3415
3416
3417
3418
3419
3420bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3421 unsigned int imms, unsigned int immr)
3422{
3423 uint64_t mask;
3424 unsigned e, levels, s, r;
3425 int len;
3426
3427 assert(immn < 2 && imms < 64 && immr < 64);
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3452 if (len < 1) {
3453
3454 return false;
3455 }
3456 e = 1 << len;
3457
3458 levels = e - 1;
3459 s = imms & levels;
3460 r = immr & levels;
3461
3462 if (s == levels) {
3463
3464 return false;
3465 }
3466
3467
3468
3469
3470 mask = bitmask64(s + 1);
3471 if (r) {
3472 mask = (mask >> r) | (mask << (e - r));
3473 mask &= bitmask64(e);
3474 }
3475
3476 mask = bitfield_replicate(mask, e);
3477 *result = mask;
3478 return true;
3479}
3480
3481
3482
3483
3484
3485
3486
3487static void disas_logic_imm(DisasContext *s, uint32_t insn)
3488{
3489 unsigned int sf, opc, is_n, immr, imms, rn, rd;
3490 TCGv_i64 tcg_rd, tcg_rn;
3491 uint64_t wmask;
3492 bool is_and = false;
3493
3494 sf = extract32(insn, 31, 1);
3495 opc = extract32(insn, 29, 2);
3496 is_n = extract32(insn, 22, 1);
3497 immr = extract32(insn, 16, 6);
3498 imms = extract32(insn, 10, 6);
3499 rn = extract32(insn, 5, 5);
3500 rd = extract32(insn, 0, 5);
3501
3502 if (!sf && is_n) {
3503 unallocated_encoding(s);
3504 return;
3505 }
3506
3507 if (opc == 0x3) {
3508 tcg_rd = cpu_reg(s, rd);
3509 } else {
3510 tcg_rd = cpu_reg_sp(s, rd);
3511 }
3512 tcg_rn = cpu_reg(s, rn);
3513
3514 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3515
3516 unallocated_encoding(s);
3517 return;
3518 }
3519
3520 if (!sf) {
3521 wmask &= 0xffffffff;
3522 }
3523
3524 switch (opc) {
3525 case 0x3:
3526 case 0x0:
3527 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3528 is_and = true;
3529 break;
3530 case 0x1:
3531 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3532 break;
3533 case 0x2:
3534 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3535 break;
3536 default:
3537 assert(FALSE);
3538 break;
3539 }
3540
3541 if (!sf && !is_and) {
3542
3543
3544
3545 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3546 }
3547
3548 if (opc == 3) {
3549 gen_logic_CC(sf, tcg_rd);
3550 }
3551}
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565static void disas_movw_imm(DisasContext *s, uint32_t insn)
3566{
3567 int rd = extract32(insn, 0, 5);
3568 uint64_t imm = extract32(insn, 5, 16);
3569 int sf = extract32(insn, 31, 1);
3570 int opc = extract32(insn, 29, 2);
3571 int pos = extract32(insn, 21, 2) << 4;
3572 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3573 TCGv_i64 tcg_imm;
3574
3575 if (!sf && (pos >= 32)) {
3576 unallocated_encoding(s);
3577 return;
3578 }
3579
3580 switch (opc) {
3581 case 0:
3582 case 2:
3583 imm <<= pos;
3584 if (opc == 0) {
3585 imm = ~imm;
3586 }
3587 if (!sf) {
3588 imm &= 0xffffffffu;
3589 }
3590 tcg_gen_movi_i64(tcg_rd, imm);
3591 break;
3592 case 3:
3593 tcg_imm = tcg_const_i64(imm);
3594 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3595 tcg_temp_free_i64(tcg_imm);
3596 if (!sf) {
3597 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3598 }
3599 break;
3600 default:
3601 unallocated_encoding(s);
3602 break;
3603 }
3604}
3605
3606
3607
3608
3609
3610
3611
3612static void disas_bitfield(DisasContext *s, uint32_t insn)
3613{
3614 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3615 TCGv_i64 tcg_rd, tcg_tmp;
3616
3617 sf = extract32(insn, 31, 1);
3618 opc = extract32(insn, 29, 2);
3619 n = extract32(insn, 22, 1);
3620 ri = extract32(insn, 16, 6);
3621 si = extract32(insn, 10, 6);
3622 rn = extract32(insn, 5, 5);
3623 rd = extract32(insn, 0, 5);
3624 bitsize = sf ? 64 : 32;
3625
3626 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3627 unallocated_encoding(s);
3628 return;
3629 }
3630
3631 tcg_rd = cpu_reg(s, rd);
3632
3633
3634
3635
3636 tcg_tmp = read_cpu_reg(s, rn, 1);
3637
3638
3639 if (si >= ri) {
3640
3641 len = (si - ri) + 1;
3642 if (opc == 0) {
3643 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
3644 goto done;
3645 } else if (opc == 2) {
3646 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
3647 return;
3648 }
3649
3650 tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
3651 pos = 0;
3652 } else {
3653
3654
3655
3656 len = si + 1;
3657 pos = (bitsize - ri) & (bitsize - 1);
3658 }
3659
3660 if (opc == 0 && len < ri) {
3661
3662
3663
3664 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
3665 len = ri;
3666 }
3667
3668 if (opc == 1) {
3669 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3670 } else {
3671
3672
3673
3674 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
3675 return;
3676 }
3677
3678 done:
3679 if (!sf) {
3680 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3681 }
3682}
3683
3684
3685
3686
3687
3688
3689
3690static void disas_extract(DisasContext *s, uint32_t insn)
3691{
3692 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3693
3694 sf = extract32(insn, 31, 1);
3695 n = extract32(insn, 22, 1);
3696 rm = extract32(insn, 16, 5);
3697 imm = extract32(insn, 10, 6);
3698 rn = extract32(insn, 5, 5);
3699 rd = extract32(insn, 0, 5);
3700 op21 = extract32(insn, 29, 2);
3701 op0 = extract32(insn, 21, 1);
3702 bitsize = sf ? 64 : 32;
3703
3704 if (sf != n || op21 || op0 || imm >= bitsize) {
3705 unallocated_encoding(s);
3706 } else {
3707 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3708
3709 tcg_rd = cpu_reg(s, rd);
3710
3711 if (unlikely(imm == 0)) {
3712
3713
3714
3715 if (sf) {
3716 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3717 } else {
3718 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3719 }
3720 } else if (rm == rn) {
3721 tcg_rm = cpu_reg(s, rm);
3722 if (sf) {
3723 tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3724 } else {
3725 TCGv_i32 tmp = tcg_temp_new_i32();
3726 tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3727 tcg_gen_rotri_i32(tmp, tmp, imm);
3728 tcg_gen_extu_i32_i64(tcg_rd, tmp);
3729 tcg_temp_free_i32(tmp);
3730 }
3731 } else {
3732 tcg_rm = read_cpu_reg(s, rm, sf);
3733 tcg_rn = read_cpu_reg(s, rn, sf);
3734 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3735 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3736 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3737 if (!sf) {
3738 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3739 }
3740 }
3741 }
3742}
3743
3744
3745static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3746{
3747 switch (extract32(insn, 23, 6)) {
3748 case 0x20: case 0x21:
3749 disas_pc_rel_adr(s, insn);
3750 break;
3751 case 0x22: case 0x23:
3752 disas_add_sub_imm(s, insn);
3753 break;
3754 case 0x24:
3755 disas_logic_imm(s, insn);
3756 break;
3757 case 0x25:
3758 disas_movw_imm(s, insn);
3759 break;
3760 case 0x26:
3761 disas_bitfield(s, insn);
3762 break;
3763 case 0x27:
3764 disas_extract(s, insn);
3765 break;
3766 default:
3767 unallocated_encoding(s);
3768 break;
3769 }
3770}
3771
3772
3773
3774
3775
3776
3777static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3778 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3779{
3780 switch (shift_type) {
3781 case A64_SHIFT_TYPE_LSL:
3782 tcg_gen_shl_i64(dst, src, shift_amount);
3783 break;
3784 case A64_SHIFT_TYPE_LSR:
3785 tcg_gen_shr_i64(dst, src, shift_amount);
3786 break;
3787 case A64_SHIFT_TYPE_ASR:
3788 if (!sf) {
3789 tcg_gen_ext32s_i64(dst, src);
3790 }
3791 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3792 break;
3793 case A64_SHIFT_TYPE_ROR:
3794 if (sf) {
3795 tcg_gen_rotr_i64(dst, src, shift_amount);
3796 } else {
3797 TCGv_i32 t0, t1;
3798 t0 = tcg_temp_new_i32();
3799 t1 = tcg_temp_new_i32();
3800 tcg_gen_extrl_i64_i32(t0, src);
3801 tcg_gen_extrl_i64_i32(t1, shift_amount);
3802 tcg_gen_rotr_i32(t0, t0, t1);
3803 tcg_gen_extu_i32_i64(dst, t0);
3804 tcg_temp_free_i32(t0);
3805 tcg_temp_free_i32(t1);
3806 }
3807 break;
3808 default:
3809 assert(FALSE);
3810 break;
3811 }
3812
3813 if (!sf) {
3814 tcg_gen_ext32u_i64(dst, dst);
3815 }
3816}
3817
3818
3819
3820
3821
3822static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3823 enum a64_shift_type shift_type, unsigned int shift_i)
3824{
3825 assert(shift_i < (sf ? 64 : 32));
3826
3827 if (shift_i == 0) {
3828 tcg_gen_mov_i64(dst, src);
3829 } else {
3830 TCGv_i64 shift_const;
3831
3832 shift_const = tcg_const_i64(shift_i);
3833 shift_reg(dst, src, sf, shift_type, shift_const);
3834 tcg_temp_free_i64(shift_const);
3835 }
3836}
3837
3838
3839
3840
3841
3842
3843
3844static void disas_logic_reg(DisasContext *s, uint32_t insn)
3845{
3846 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3847 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3848
3849 sf = extract32(insn, 31, 1);
3850 opc = extract32(insn, 29, 2);
3851 shift_type = extract32(insn, 22, 2);
3852 invert = extract32(insn, 21, 1);
3853 rm = extract32(insn, 16, 5);
3854 shift_amount = extract32(insn, 10, 6);
3855 rn = extract32(insn, 5, 5);
3856 rd = extract32(insn, 0, 5);
3857
3858 if (!sf && (shift_amount & (1 << 5))) {
3859 unallocated_encoding(s);
3860 return;
3861 }
3862
3863 tcg_rd = cpu_reg(s, rd);
3864
3865 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3866
3867
3868
3869 tcg_rm = cpu_reg(s, rm);
3870 if (invert) {
3871 tcg_gen_not_i64(tcg_rd, tcg_rm);
3872 if (!sf) {
3873 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3874 }
3875 } else {
3876 if (sf) {
3877 tcg_gen_mov_i64(tcg_rd, tcg_rm);
3878 } else {
3879 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3880 }
3881 }
3882 return;
3883 }
3884
3885 tcg_rm = read_cpu_reg(s, rm, sf);
3886
3887 if (shift_amount) {
3888 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3889 }
3890
3891 tcg_rn = cpu_reg(s, rn);
3892
3893 switch (opc | (invert << 2)) {
3894 case 0:
3895 case 3:
3896 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3897 break;
3898 case 1:
3899 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3900 break;
3901 case 2:
3902 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3903 break;
3904 case 4:
3905 case 7:
3906 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3907 break;
3908 case 5:
3909 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3910 break;
3911 case 6:
3912 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3913 break;
3914 default:
3915 assert(FALSE);
3916 break;
3917 }
3918
3919 if (!sf) {
3920 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3921 }
3922
3923 if (opc == 3) {
3924 gen_logic_CC(sf, tcg_rd);
3925 }
3926}
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3946{
3947 int rd = extract32(insn, 0, 5);
3948 int rn = extract32(insn, 5, 5);
3949 int imm3 = extract32(insn, 10, 3);
3950 int option = extract32(insn, 13, 3);
3951 int rm = extract32(insn, 16, 5);
3952 bool setflags = extract32(insn, 29, 1);
3953 bool sub_op = extract32(insn, 30, 1);
3954 bool sf = extract32(insn, 31, 1);
3955
3956 TCGv_i64 tcg_rm, tcg_rn;
3957 TCGv_i64 tcg_rd;
3958 TCGv_i64 tcg_result;
3959
3960 if (imm3 > 4) {
3961 unallocated_encoding(s);
3962 return;
3963 }
3964
3965
3966 if (!setflags) {
3967 tcg_rd = cpu_reg_sp(s, rd);
3968 } else {
3969 tcg_rd = cpu_reg(s, rd);
3970 }
3971 tcg_rn = read_cpu_reg_sp(s, rn, sf);
3972
3973 tcg_rm = read_cpu_reg(s, rm, sf);
3974 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3975
3976 tcg_result = tcg_temp_new_i64();
3977
3978 if (!setflags) {
3979 if (sub_op) {
3980 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3981 } else {
3982 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3983 }
3984 } else {
3985 if (sub_op) {
3986 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3987 } else {
3988 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3989 }
3990 }
3991
3992 if (sf) {
3993 tcg_gen_mov_i64(tcg_rd, tcg_result);
3994 } else {
3995 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3996 }
3997
3998 tcg_temp_free_i64(tcg_result);
3999}
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4016{
4017 int rd = extract32(insn, 0, 5);
4018 int rn = extract32(insn, 5, 5);
4019 int imm6 = extract32(insn, 10, 6);
4020 int rm = extract32(insn, 16, 5);
4021 int shift_type = extract32(insn, 22, 2);
4022 bool setflags = extract32(insn, 29, 1);
4023 bool sub_op = extract32(insn, 30, 1);
4024 bool sf = extract32(insn, 31, 1);
4025
4026 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4027 TCGv_i64 tcg_rn, tcg_rm;
4028 TCGv_i64 tcg_result;
4029
4030 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4031 unallocated_encoding(s);
4032 return;
4033 }
4034
4035 tcg_rn = read_cpu_reg(s, rn, sf);
4036 tcg_rm = read_cpu_reg(s, rm, sf);
4037
4038 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4039
4040 tcg_result = tcg_temp_new_i64();
4041
4042 if (!setflags) {
4043 if (sub_op) {
4044 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4045 } else {
4046 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4047 }
4048 } else {
4049 if (sub_op) {
4050 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4051 } else {
4052 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4053 }
4054 }
4055
4056 if (sf) {
4057 tcg_gen_mov_i64(tcg_rd, tcg_result);
4058 } else {
4059 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4060 }
4061
4062 tcg_temp_free_i64(tcg_result);
4063}
4064
4065
4066
4067
4068
4069
4070
4071
4072static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4073{
4074 int rd = extract32(insn, 0, 5);
4075 int rn = extract32(insn, 5, 5);
4076 int ra = extract32(insn, 10, 5);
4077 int rm = extract32(insn, 16, 5);
4078 int op_id = (extract32(insn, 29, 3) << 4) |
4079 (extract32(insn, 21, 3) << 1) |
4080 extract32(insn, 15, 1);
4081 bool sf = extract32(insn, 31, 1);
4082 bool is_sub = extract32(op_id, 0, 1);
4083 bool is_high = extract32(op_id, 2, 1);
4084 bool is_signed = false;
4085 TCGv_i64 tcg_op1;
4086 TCGv_i64 tcg_op2;
4087 TCGv_i64 tcg_tmp;
4088
4089
4090 switch (op_id) {
4091 case 0x42:
4092 case 0x43:
4093 case 0x44:
4094 is_signed = true;
4095 break;
4096 case 0x0:
4097 case 0x1:
4098 case 0x40:
4099 case 0x41:
4100 case 0x4a:
4101 case 0x4b:
4102 case 0x4c:
4103 break;
4104 default:
4105 unallocated_encoding(s);
4106 return;
4107 }
4108
4109 if (is_high) {
4110 TCGv_i64 low_bits = tcg_temp_new_i64();
4111 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4112 TCGv_i64 tcg_rn = cpu_reg(s, rn);
4113 TCGv_i64 tcg_rm = cpu_reg(s, rm);
4114
4115 if (is_signed) {
4116 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4117 } else {
4118 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4119 }
4120
4121 tcg_temp_free_i64(low_bits);
4122 return;
4123 }
4124
4125 tcg_op1 = tcg_temp_new_i64();
4126 tcg_op2 = tcg_temp_new_i64();
4127 tcg_tmp = tcg_temp_new_i64();
4128
4129 if (op_id < 0x42) {
4130 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4131 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4132 } else {
4133 if (is_signed) {
4134 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4135 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4136 } else {
4137 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4138 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4139 }
4140 }
4141
4142 if (ra == 31 && !is_sub) {
4143
4144 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4145 } else {
4146 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4147 if (is_sub) {
4148 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4149 } else {
4150 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4151 }
4152 }
4153
4154 if (!sf) {
4155 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4156 }
4157
4158 tcg_temp_free_i64(tcg_op1);
4159 tcg_temp_free_i64(tcg_op2);
4160 tcg_temp_free_i64(tcg_tmp);
4161}
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4172{
4173 unsigned int sf, op, setflags, rm, rn, rd;
4174 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4175
4176 if (extract32(insn, 10, 6) != 0) {
4177 unallocated_encoding(s);
4178 return;
4179 }
4180
4181 sf = extract32(insn, 31, 1);
4182 op = extract32(insn, 30, 1);
4183 setflags = extract32(insn, 29, 1);
4184 rm = extract32(insn, 16, 5);
4185 rn = extract32(insn, 5, 5);
4186 rd = extract32(insn, 0, 5);
4187
4188 tcg_rd = cpu_reg(s, rd);
4189 tcg_rn = cpu_reg(s, rn);
4190
4191 if (op) {
4192 tcg_y = new_tmp_a64(s);
4193 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4194 } else {
4195 tcg_y = cpu_reg(s, rm);
4196 }
4197
4198 if (setflags) {
4199 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4200 } else {
4201 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4202 }
4203}
4204
4205
4206
4207
4208
4209
4210
4211
4212static void disas_cc(DisasContext *s, uint32_t insn)
4213{
4214 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4215 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4216 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4217 DisasCompare c;
4218
4219 if (!extract32(insn, 29, 1)) {
4220 unallocated_encoding(s);
4221 return;
4222 }
4223 if (insn & (1 << 10 | 1 << 4)) {
4224 unallocated_encoding(s);
4225 return;
4226 }
4227 sf = extract32(insn, 31, 1);
4228 op = extract32(insn, 30, 1);
4229 is_imm = extract32(insn, 11, 1);
4230 y = extract32(insn, 16, 5);
4231 cond = extract32(insn, 12, 4);
4232 rn = extract32(insn, 5, 5);
4233 nzcv = extract32(insn, 0, 4);
4234
4235
4236 tcg_t0 = tcg_temp_new_i32();
4237 arm_test_cc(&c, cond);
4238 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
4239 arm_free_cc(&c);
4240
4241
4242 if (is_imm) {
4243 tcg_y = new_tmp_a64(s);
4244 tcg_gen_movi_i64(tcg_y, y);
4245 } else {
4246 tcg_y = cpu_reg(s, y);
4247 }
4248 tcg_rn = cpu_reg(s, rn);
4249
4250
4251 tcg_tmp = tcg_temp_new_i64();
4252 if (op) {
4253 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4254 } else {
4255 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4256 }
4257 tcg_temp_free_i64(tcg_tmp);
4258
4259
4260
4261
4262
4263
4264 tcg_t1 = tcg_temp_new_i32();
4265 tcg_t2 = tcg_temp_new_i32();
4266 tcg_gen_neg_i32(tcg_t1, tcg_t0);
4267 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
4268
4269 if (nzcv & 8) {
4270 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
4271 } else {
4272 if (TCG_TARGET_HAS_andc_i32) {
4273 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
4274 } else {
4275 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
4276 }
4277 }
4278 if (nzcv & 4) {
4279 if (TCG_TARGET_HAS_andc_i32) {
4280 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
4281 } else {
4282 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
4283 }
4284 } else {
4285 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4286 }
4287 if (nzcv & 2) {
4288 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4289 } else {
4290 if (TCG_TARGET_HAS_andc_i32) {
4291 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4292 } else {
4293 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4294 }
4295 }
4296 if (nzcv & 1) {
4297 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4298 } else {
4299 if (TCG_TARGET_HAS_andc_i32) {
4300 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4301 } else {
4302 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4303 }
4304 }
4305 tcg_temp_free_i32(tcg_t0);
4306 tcg_temp_free_i32(tcg_t1);
4307 tcg_temp_free_i32(tcg_t2);
4308}
4309
4310
4311
4312
4313
4314
4315
4316static void disas_cond_select(DisasContext *s, uint32_t insn)
4317{
4318 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4319 TCGv_i64 tcg_rd, zero;
4320 DisasCompare64 c;
4321
4322 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4323
4324 unallocated_encoding(s);
4325 return;
4326 }
4327 sf = extract32(insn, 31, 1);
4328 else_inv = extract32(insn, 30, 1);
4329 rm = extract32(insn, 16, 5);
4330 cond = extract32(insn, 12, 4);
4331 else_inc = extract32(insn, 10, 1);
4332 rn = extract32(insn, 5, 5);
4333 rd = extract32(insn, 0, 5);
4334
4335 tcg_rd = cpu_reg(s, rd);
4336
4337 a64_test_cc(&c, cond);
4338 zero = tcg_const_i64(0);
4339
4340 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4341
4342 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4343 if (else_inv) {
4344 tcg_gen_neg_i64(tcg_rd, tcg_rd);
4345 }
4346 } else {
4347 TCGv_i64 t_true = cpu_reg(s, rn);
4348 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4349 if (else_inv && else_inc) {
4350 tcg_gen_neg_i64(t_false, t_false);
4351 } else if (else_inv) {
4352 tcg_gen_not_i64(t_false, t_false);
4353 } else if (else_inc) {
4354 tcg_gen_addi_i64(t_false, t_false, 1);
4355 }
4356 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4357 }
4358
4359 tcg_temp_free_i64(zero);
4360 a64_free_cc(&c);
4361
4362 if (!sf) {
4363 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4364 }
4365}
4366
4367static void handle_clz(DisasContext *s, unsigned int sf,
4368 unsigned int rn, unsigned int rd)
4369{
4370 TCGv_i64 tcg_rd, tcg_rn;
4371 tcg_rd = cpu_reg(s, rd);
4372 tcg_rn = cpu_reg(s, rn);
4373
4374 if (sf) {
4375 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4376 } else {
4377 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4378 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4379 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4380 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4381 tcg_temp_free_i32(tcg_tmp32);
4382 }
4383}
4384
4385static void handle_cls(DisasContext *s, unsigned int sf,
4386 unsigned int rn, unsigned int rd)
4387{
4388 TCGv_i64 tcg_rd, tcg_rn;
4389 tcg_rd = cpu_reg(s, rd);
4390 tcg_rn = cpu_reg(s, rn);
4391
4392 if (sf) {
4393 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4394 } else {
4395 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4396 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4397 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4398 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4399 tcg_temp_free_i32(tcg_tmp32);
4400 }
4401}
4402
4403static void handle_rbit(DisasContext *s, unsigned int sf,
4404 unsigned int rn, unsigned int rd)
4405{
4406 TCGv_i64 tcg_rd, tcg_rn;
4407 tcg_rd = cpu_reg(s, rd);
4408 tcg_rn = cpu_reg(s, rn);
4409
4410 if (sf) {
4411 gen_helper_rbit64(tcg_rd, tcg_rn);
4412 } else {
4413 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4414 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4415 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4416 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4417 tcg_temp_free_i32(tcg_tmp32);
4418 }
4419}
4420
4421
4422static void handle_rev64(DisasContext *s, unsigned int sf,
4423 unsigned int rn, unsigned int rd)
4424{
4425 if (!sf) {
4426 unallocated_encoding(s);
4427 return;
4428 }
4429 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4430}
4431
4432
4433
4434
4435static void handle_rev32(DisasContext *s, unsigned int sf,
4436 unsigned int rn, unsigned int rd)
4437{
4438 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4439
4440 if (sf) {
4441 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4442 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4443
4444
4445 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4446 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4447 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4448 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4449 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4450
4451 tcg_temp_free_i64(tcg_tmp);
4452 } else {
4453 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4454 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4455 }
4456}
4457
4458
4459static void handle_rev16(DisasContext *s, unsigned int sf,
4460 unsigned int rn, unsigned int rd)
4461{
4462 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4463 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4464 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4465 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4466
4467 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4468 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4469 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4470 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4471 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4472
4473 tcg_temp_free_i64(mask);
4474 tcg_temp_free_i64(tcg_tmp);
4475}
4476
4477
4478
4479
4480
4481
4482
4483static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4484{
4485 unsigned int sf, opcode, rn, rd;
4486
4487 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4488 unallocated_encoding(s);
4489 return;
4490 }
4491
4492 sf = extract32(insn, 31, 1);
4493 opcode = extract32(insn, 10, 6);
4494 rn = extract32(insn, 5, 5);
4495 rd = extract32(insn, 0, 5);
4496
4497 switch (opcode) {
4498 case 0:
4499 handle_rbit(s, sf, rn, rd);
4500 break;
4501 case 1:
4502 handle_rev16(s, sf, rn, rd);
4503 break;
4504 case 2:
4505 handle_rev32(s, sf, rn, rd);
4506 break;
4507 case 3:
4508 handle_rev64(s, sf, rn, rd);
4509 break;
4510 case 4:
4511 handle_clz(s, sf, rn, rd);
4512 break;
4513 case 5:
4514 handle_cls(s, sf, rn, rd);
4515 break;
4516 }
4517}
4518
4519static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4520 unsigned int rm, unsigned int rn, unsigned int rd)
4521{
4522 TCGv_i64 tcg_n, tcg_m, tcg_rd;
4523 tcg_rd = cpu_reg(s, rd);
4524
4525 if (!sf && is_signed) {
4526 tcg_n = new_tmp_a64(s);
4527 tcg_m = new_tmp_a64(s);
4528 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4529 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4530 } else {
4531 tcg_n = read_cpu_reg(s, rn, sf);
4532 tcg_m = read_cpu_reg(s, rm, sf);
4533 }
4534
4535 if (is_signed) {
4536 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4537 } else {
4538 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4539 }
4540
4541 if (!sf) {
4542 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4543 }
4544}
4545
4546
4547static void handle_shift_reg(DisasContext *s,
4548 enum a64_shift_type shift_type, unsigned int sf,
4549 unsigned int rm, unsigned int rn, unsigned int rd)
4550{
4551 TCGv_i64 tcg_shift = tcg_temp_new_i64();
4552 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4553 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4554
4555 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4556 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4557 tcg_temp_free_i64(tcg_shift);
4558}
4559
4560
4561static void handle_crc32(DisasContext *s,
4562 unsigned int sf, unsigned int sz, bool crc32c,
4563 unsigned int rm, unsigned int rn, unsigned int rd)
4564{
4565 TCGv_i64 tcg_acc, tcg_val;
4566 TCGv_i32 tcg_bytes;
4567
4568 if (!dc_isar_feature(aa64_crc32, s)
4569 || (sf == 1 && sz != 3)
4570 || (sf == 0 && sz == 3)) {
4571 unallocated_encoding(s);
4572 return;
4573 }
4574
4575 if (sz == 3) {
4576 tcg_val = cpu_reg(s, rm);
4577 } else {
4578 uint64_t mask;
4579 switch (sz) {
4580 case 0:
4581 mask = 0xFF;
4582 break;
4583 case 1:
4584 mask = 0xFFFF;
4585 break;
4586 case 2:
4587 mask = 0xFFFFFFFF;
4588 break;
4589 default:
4590 g_assert_not_reached();
4591 }
4592 tcg_val = new_tmp_a64(s);
4593 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4594 }
4595
4596 tcg_acc = cpu_reg(s, rn);
4597 tcg_bytes = tcg_const_i32(1 << sz);
4598
4599 if (crc32c) {
4600 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4601 } else {
4602 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4603 }
4604
4605 tcg_temp_free_i32(tcg_bytes);
4606}
4607
4608
4609
4610
4611
4612
4613
4614static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4615{
4616 unsigned int sf, rm, opcode, rn, rd;
4617 sf = extract32(insn, 31, 1);
4618 rm = extract32(insn, 16, 5);
4619 opcode = extract32(insn, 10, 6);
4620 rn = extract32(insn, 5, 5);
4621 rd = extract32(insn, 0, 5);
4622
4623 if (extract32(insn, 29, 1)) {
4624 unallocated_encoding(s);
4625 return;
4626 }
4627
4628 switch (opcode) {
4629 case 2:
4630 handle_div(s, false, sf, rm, rn, rd);
4631 break;
4632 case 3:
4633 handle_div(s, true, sf, rm, rn, rd);
4634 break;
4635 case 8:
4636 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4637 break;
4638 case 9:
4639 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4640 break;
4641 case 10:
4642 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4643 break;
4644 case 11:
4645 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4646 break;
4647 case 16:
4648 case 17:
4649 case 18:
4650 case 19:
4651 case 20:
4652 case 21:
4653 case 22:
4654 case 23:
4655 {
4656 int sz = extract32(opcode, 0, 2);
4657 bool crc32c = extract32(opcode, 2, 1);
4658 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4659 break;
4660 }
4661 default:
4662 unallocated_encoding(s);
4663 break;
4664 }
4665}
4666
4667
4668static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4669{
4670 switch (extract32(insn, 24, 5)) {
4671 case 0x0a:
4672 disas_logic_reg(s, insn);
4673 break;
4674 case 0x0b:
4675 if (insn & (1 << 21)) {
4676 disas_add_sub_ext_reg(s, insn);
4677 } else {
4678 disas_add_sub_reg(s, insn);
4679 }
4680 break;
4681 case 0x1b:
4682 disas_data_proc_3src(s, insn);
4683 break;
4684 case 0x1a:
4685 switch (extract32(insn, 21, 3)) {
4686 case 0x0:
4687 disas_adc_sbc(s, insn);
4688 break;
4689 case 0x2:
4690 disas_cc(s, insn);
4691 break;
4692 case 0x4:
4693 disas_cond_select(s, insn);
4694 break;
4695 case 0x6:
4696 if (insn & (1 << 30)) {
4697 disas_data_proc_1src(s, insn);
4698 } else {
4699 disas_data_proc_2src(s, insn);
4700 }
4701 break;
4702 default:
4703 unallocated_encoding(s);
4704 break;
4705 }
4706 break;
4707 default:
4708 unallocated_encoding(s);
4709 break;
4710 }
4711}
4712
4713static void handle_fp_compare(DisasContext *s, int size,
4714 unsigned int rn, unsigned int rm,
4715 bool cmp_with_zero, bool signal_all_nans)
4716{
4717 TCGv_i64 tcg_flags = tcg_temp_new_i64();
4718 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
4719
4720 if (size == MO_64) {
4721 TCGv_i64 tcg_vn, tcg_vm;
4722
4723 tcg_vn = read_fp_dreg(s, rn);
4724 if (cmp_with_zero) {
4725 tcg_vm = tcg_const_i64(0);
4726 } else {
4727 tcg_vm = read_fp_dreg(s, rm);
4728 }
4729 if (signal_all_nans) {
4730 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4731 } else {
4732 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4733 }
4734 tcg_temp_free_i64(tcg_vn);
4735 tcg_temp_free_i64(tcg_vm);
4736 } else {
4737 TCGv_i32 tcg_vn = tcg_temp_new_i32();
4738 TCGv_i32 tcg_vm = tcg_temp_new_i32();
4739
4740 read_vec_element_i32(s, tcg_vn, rn, 0, size);
4741 if (cmp_with_zero) {
4742 tcg_gen_movi_i32(tcg_vm, 0);
4743 } else {
4744 read_vec_element_i32(s, tcg_vm, rm, 0, size);
4745 }
4746
4747 switch (size) {
4748 case MO_32:
4749 if (signal_all_nans) {
4750 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4751 } else {
4752 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4753 }
4754 break;
4755 case MO_16:
4756 if (signal_all_nans) {
4757 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4758 } else {
4759 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4760 }
4761 break;
4762 default:
4763 g_assert_not_reached();
4764 }
4765
4766 tcg_temp_free_i32(tcg_vn);
4767 tcg_temp_free_i32(tcg_vm);
4768 }
4769
4770 tcg_temp_free_ptr(fpst);
4771
4772 gen_set_nzcv(tcg_flags);
4773
4774 tcg_temp_free_i64(tcg_flags);
4775}
4776
4777
4778
4779
4780
4781
4782
4783static void disas_fp_compare(DisasContext *s, uint32_t insn)
4784{
4785 unsigned int mos, type, rm, op, rn, opc, op2r;
4786 int size;
4787
4788 mos = extract32(insn, 29, 3);
4789 type = extract32(insn, 22, 2);
4790 rm = extract32(insn, 16, 5);
4791 op = extract32(insn, 14, 2);
4792 rn = extract32(insn, 5, 5);
4793 opc = extract32(insn, 3, 2);
4794 op2r = extract32(insn, 0, 3);
4795
4796 if (mos || op || op2r) {
4797 unallocated_encoding(s);
4798 return;
4799 }
4800
4801 switch (type) {
4802 case 0:
4803 size = MO_32;
4804 break;
4805 case 1:
4806 size = MO_64;
4807 break;
4808 case 3:
4809 size = MO_16;
4810 if (dc_isar_feature(aa64_fp16, s)) {
4811 break;
4812 }
4813
4814 default:
4815 unallocated_encoding(s);
4816 return;
4817 }
4818
4819 if (!fp_access_check(s)) {
4820 return;
4821 }
4822
4823 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
4824}
4825
4826
4827
4828
4829
4830
4831
4832static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4833{
4834 unsigned int mos, type, rm, cond, rn, op, nzcv;
4835 TCGv_i64 tcg_flags;
4836 TCGLabel *label_continue = NULL;
4837 int size;
4838
4839 mos = extract32(insn, 29, 3);
4840 type = extract32(insn, 22, 2);
4841 rm = extract32(insn, 16, 5);
4842 cond = extract32(insn, 12, 4);
4843 rn = extract32(insn, 5, 5);
4844 op = extract32(insn, 4, 1);
4845 nzcv = extract32(insn, 0, 4);
4846
4847 if (mos) {
4848 unallocated_encoding(s);
4849 return;
4850 }
4851
4852 switch (type) {
4853 case 0:
4854 size = MO_32;
4855 break;
4856 case 1:
4857 size = MO_64;
4858 break;
4859 case 3:
4860 size = MO_16;
4861 if (dc_isar_feature(aa64_fp16, s)) {
4862 break;
4863 }
4864
4865 default:
4866 unallocated_encoding(s);
4867 return;
4868 }
4869
4870 if (!fp_access_check(s)) {
4871 return;
4872 }
4873
4874 if (cond < 0x0e) {
4875 TCGLabel *label_match = gen_new_label();
4876 label_continue = gen_new_label();
4877 arm_gen_test_cc(cond, label_match);
4878
4879 tcg_flags = tcg_const_i64(nzcv << 28);
4880 gen_set_nzcv(tcg_flags);
4881 tcg_temp_free_i64(tcg_flags);
4882 tcg_gen_br(label_continue);
4883 gen_set_label(label_match);
4884 }
4885
4886 handle_fp_compare(s, size, rn, rm, false, op);
4887
4888 if (cond < 0x0e) {
4889 gen_set_label(label_continue);
4890 }
4891}
4892
4893
4894
4895
4896
4897
4898
4899static void disas_fp_csel(DisasContext *s, uint32_t insn)
4900{
4901 unsigned int mos, type, rm, cond, rn, rd;
4902 TCGv_i64 t_true, t_false, t_zero;
4903 DisasCompare64 c;
4904 TCGMemOp sz;
4905
4906 mos = extract32(insn, 29, 3);
4907 type = extract32(insn, 22, 2);
4908 rm = extract32(insn, 16, 5);
4909 cond = extract32(insn, 12, 4);
4910 rn = extract32(insn, 5, 5);
4911 rd = extract32(insn, 0, 5);
4912
4913 if (mos) {
4914 unallocated_encoding(s);
4915 return;
4916 }
4917
4918 switch (type) {
4919 case 0:
4920 sz = MO_32;
4921 break;
4922 case 1:
4923 sz = MO_64;
4924 break;
4925 case 3:
4926 sz = MO_16;
4927 if (dc_isar_feature(aa64_fp16, s)) {
4928 break;
4929 }
4930
4931 default:
4932 unallocated_encoding(s);
4933 return;
4934 }
4935
4936 if (!fp_access_check(s)) {
4937 return;
4938 }
4939
4940
4941 t_true = tcg_temp_new_i64();
4942 t_false = tcg_temp_new_i64();
4943 read_vec_element(s, t_true, rn, 0, sz);
4944 read_vec_element(s, t_false, rm, 0, sz);
4945
4946 a64_test_cc(&c, cond);
4947 t_zero = tcg_const_i64(0);
4948 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4949 tcg_temp_free_i64(t_zero);
4950 tcg_temp_free_i64(t_false);
4951 a64_free_cc(&c);
4952
4953
4954
4955 write_fp_dreg(s, rd, t_true);
4956 tcg_temp_free_i64(t_true);
4957}
4958
4959
4960static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
4961{
4962 TCGv_ptr fpst = NULL;
4963 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
4964 TCGv_i32 tcg_res = tcg_temp_new_i32();
4965
4966 switch (opcode) {
4967 case 0x0:
4968 tcg_gen_mov_i32(tcg_res, tcg_op);
4969 break;
4970 case 0x1:
4971 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
4972 break;
4973 case 0x2:
4974 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
4975 break;
4976 case 0x3:
4977 fpst = get_fpstatus_ptr(true);
4978 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
4979 break;
4980 case 0x8:
4981 case 0x9:
4982 case 0xa:
4983 case 0xb:
4984 case 0xc:
4985 {
4986 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4987 fpst = get_fpstatus_ptr(true);
4988
4989 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4990 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
4991
4992 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4993 tcg_temp_free_i32(tcg_rmode);
4994 break;
4995 }
4996 case 0xe:
4997 fpst = get_fpstatus_ptr(true);
4998 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
4999 break;
5000 case 0xf:
5001 fpst = get_fpstatus_ptr(true);
5002 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5003 break;
5004 default:
5005 abort();
5006 }
5007
5008 write_fp_sreg(s, rd, tcg_res);
5009
5010 if (fpst) {
5011 tcg_temp_free_ptr(fpst);
5012 }
5013 tcg_temp_free_i32(tcg_op);
5014 tcg_temp_free_i32(tcg_res);
5015}
5016
5017
5018static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5019{
5020 TCGv_ptr fpst;
5021 TCGv_i32 tcg_op;
5022 TCGv_i32 tcg_res;
5023
5024 fpst = get_fpstatus_ptr(false);
5025 tcg_op = read_fp_sreg(s, rn);
5026 tcg_res = tcg_temp_new_i32();
5027
5028 switch (opcode) {
5029 case 0x0:
5030 tcg_gen_mov_i32(tcg_res, tcg_op);
5031 break;
5032 case 0x1:
5033 gen_helper_vfp_abss(tcg_res, tcg_op);
5034 break;
5035 case 0x2:
5036 gen_helper_vfp_negs(tcg_res, tcg_op);
5037 break;
5038 case 0x3:
5039 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
5040 break;
5041 case 0x8:
5042 case 0x9:
5043 case 0xa:
5044 case 0xb:
5045 case 0xc:
5046 {
5047 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5048
5049 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5050 gen_helper_rints(tcg_res, tcg_op, fpst);
5051
5052 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5053 tcg_temp_free_i32(tcg_rmode);
5054 break;
5055 }
5056 case 0xe:
5057 gen_helper_rints_exact(tcg_res, tcg_op, fpst);
5058 break;
5059 case 0xf:
5060 gen_helper_rints(tcg_res, tcg_op, fpst);
5061 break;
5062 default:
5063 abort();
5064 }
5065
5066 write_fp_sreg(s, rd, tcg_res);
5067
5068 tcg_temp_free_ptr(fpst);
5069 tcg_temp_free_i32(tcg_op);
5070 tcg_temp_free_i32(tcg_res);
5071}
5072
5073
5074static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
5075{
5076 TCGv_ptr fpst;
5077 TCGv_i64 tcg_op;
5078 TCGv_i64 tcg_res;
5079
5080 switch (opcode) {
5081 case 0x0:
5082 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
5083 return;
5084 }
5085
5086 fpst = get_fpstatus_ptr(false);
5087 tcg_op = read_fp_dreg(s, rn);
5088 tcg_res = tcg_temp_new_i64();
5089
5090 switch (opcode) {
5091 case 0x1:
5092 gen_helper_vfp_absd(tcg_res, tcg_op);
5093 break;
5094 case 0x2:
5095 gen_helper_vfp_negd(tcg_res, tcg_op);
5096 break;
5097 case 0x3:
5098 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
5099 break;
5100 case 0x8:
5101 case 0x9:
5102 case 0xa:
5103 case 0xb:
5104 case 0xc:
5105 {
5106 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5107
5108 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5109 gen_helper_rintd(tcg_res, tcg_op, fpst);
5110
5111 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5112 tcg_temp_free_i32(tcg_rmode);
5113 break;
5114 }
5115 case 0xe:
5116 gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
5117 break;
5118 case 0xf:
5119 gen_helper_rintd(tcg_res, tcg_op, fpst);
5120 break;
5121 default:
5122 abort();
5123 }
5124
5125 write_fp_dreg(s, rd, tcg_res);
5126
5127 tcg_temp_free_ptr(fpst);
5128 tcg_temp_free_i64(tcg_op);
5129 tcg_temp_free_i64(tcg_res);
5130}
5131
5132static void handle_fp_fcvt(DisasContext *s, int opcode,
5133 int rd, int rn, int dtype, int ntype)
5134{
5135 switch (ntype) {
5136 case 0x0:
5137 {
5138 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5139 if (dtype == 1) {
5140
5141 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5142 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
5143 write_fp_dreg(s, rd, tcg_rd);
5144 tcg_temp_free_i64(tcg_rd);
5145 } else {
5146
5147 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5148 TCGv_i32 ahp = get_ahp_flag();
5149 TCGv_ptr fpst = get_fpstatus_ptr(false);
5150
5151 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5152
5153 write_fp_sreg(s, rd, tcg_rd);
5154 tcg_temp_free_i32(tcg_rd);
5155 tcg_temp_free_i32(ahp);
5156 tcg_temp_free_ptr(fpst);
5157 }
5158 tcg_temp_free_i32(tcg_rn);
5159 break;
5160 }
5161 case 0x1:
5162 {
5163 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
5164 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5165 if (dtype == 0) {
5166
5167 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
5168 } else {
5169 TCGv_ptr fpst = get_fpstatus_ptr(false);
5170 TCGv_i32 ahp = get_ahp_flag();
5171
5172 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5173
5174 tcg_temp_free_ptr(fpst);
5175 tcg_temp_free_i32(ahp);
5176 }
5177 write_fp_sreg(s, rd, tcg_rd);
5178 tcg_temp_free_i32(tcg_rd);
5179 tcg_temp_free_i64(tcg_rn);
5180 break;
5181 }
5182 case 0x3:
5183 {
5184 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5185 TCGv_ptr tcg_fpst = get_fpstatus_ptr(false);
5186 TCGv_i32 tcg_ahp = get_ahp_flag();
5187 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
5188 if (dtype == 0) {
5189
5190 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5191 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5192 write_fp_sreg(s, rd, tcg_rd);
5193 tcg_temp_free_ptr(tcg_fpst);
5194 tcg_temp_free_i32(tcg_ahp);
5195 tcg_temp_free_i32(tcg_rd);
5196 } else {
5197
5198 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5199 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5200 write_fp_dreg(s, rd, tcg_rd);
5201 tcg_temp_free_i64(tcg_rd);
5202 }
5203 tcg_temp_free_i32(tcg_rn);
5204 break;
5205 }
5206 default:
5207 abort();
5208 }
5209}
5210
5211
5212
5213
5214
5215
5216
5217static void disas_fp_1src(DisasContext *s, uint32_t insn)
5218{
5219 int type = extract32(insn, 22, 2);
5220 int opcode = extract32(insn, 15, 6);
5221 int rn = extract32(insn, 5, 5);
5222 int rd = extract32(insn, 0, 5);
5223
5224 switch (opcode) {
5225 case 0x4: case 0x5: case 0x7:
5226 {
5227
5228 int dtype = extract32(opcode, 0, 2);
5229 if (type == 2 || dtype == type) {
5230 unallocated_encoding(s);
5231 return;
5232 }
5233 if (!fp_access_check(s)) {
5234 return;
5235 }
5236
5237 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
5238 break;
5239 }
5240 case 0x0 ... 0x3:
5241 case 0x8 ... 0xc:
5242 case 0xe ... 0xf:
5243
5244 switch (type) {
5245 case 0:
5246 if (!fp_access_check(s)) {
5247 return;
5248 }
5249
5250 handle_fp_1src_single(s, opcode, rd, rn);
5251 break;
5252 case 1:
5253 if (!fp_access_check(s)) {
5254 return;
5255 }
5256
5257 handle_fp_1src_double(s, opcode, rd, rn);
5258 break;
5259 case 3:
5260 if (!dc_isar_feature(aa64_fp16, s)) {
5261 unallocated_encoding(s);
5262 return;
5263 }
5264
5265 if (!fp_access_check(s)) {
5266 return;
5267 }
5268
5269 handle_fp_1src_half(s, opcode, rd, rn);
5270 break;
5271 default:
5272 unallocated_encoding(s);
5273 }
5274 break;
5275 default:
5276 unallocated_encoding(s);
5277 break;
5278 }
5279}
5280
5281
5282static void handle_fp_2src_single(DisasContext *s, int opcode,
5283 int rd, int rn, int rm)
5284{
5285 TCGv_i32 tcg_op1;
5286 TCGv_i32 tcg_op2;
5287 TCGv_i32 tcg_res;
5288 TCGv_ptr fpst;
5289
5290 tcg_res = tcg_temp_new_i32();
5291 fpst = get_fpstatus_ptr(false);
5292 tcg_op1 = read_fp_sreg(s, rn);
5293 tcg_op2 = read_fp_sreg(s, rm);
5294
5295 switch (opcode) {
5296 case 0x0:
5297 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5298 break;
5299 case 0x1:
5300 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
5301 break;
5302 case 0x2:
5303 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
5304 break;
5305 case 0x3:
5306 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
5307 break;
5308 case 0x4:
5309 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
5310 break;
5311 case 0x5:
5312 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
5313 break;
5314 case 0x6:
5315 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
5316 break;
5317 case 0x7:
5318 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
5319 break;
5320 case 0x8:
5321 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5322 gen_helper_vfp_negs(tcg_res, tcg_res);
5323 break;
5324 }
5325
5326 write_fp_sreg(s, rd, tcg_res);
5327
5328 tcg_temp_free_ptr(fpst);
5329 tcg_temp_free_i32(tcg_op1);
5330 tcg_temp_free_i32(tcg_op2);
5331 tcg_temp_free_i32(tcg_res);
5332}
5333
5334
5335static void handle_fp_2src_double(DisasContext *s, int opcode,
5336 int rd, int rn, int rm)
5337{
5338 TCGv_i64 tcg_op1;
5339 TCGv_i64 tcg_op2;
5340 TCGv_i64 tcg_res;
5341 TCGv_ptr fpst;
5342
5343 tcg_res = tcg_temp_new_i64();
5344 fpst = get_fpstatus_ptr(false);
5345 tcg_op1 = read_fp_dreg(s, rn);
5346 tcg_op2 = read_fp_dreg(s, rm);
5347
5348 switch (opcode) {
5349 case 0x0:
5350 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5351 break;
5352 case 0x1:
5353 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
5354 break;
5355 case 0x2:
5356 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
5357 break;
5358 case 0x3:
5359 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
5360 break;
5361 case 0x4:
5362 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
5363 break;
5364 case 0x5:
5365 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
5366 break;
5367 case 0x6:
5368 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5369 break;
5370 case 0x7:
5371 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5372 break;
5373 case 0x8:
5374 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5375 gen_helper_vfp_negd(tcg_res, tcg_res);
5376 break;
5377 }
5378
5379 write_fp_dreg(s, rd, tcg_res);
5380
5381 tcg_temp_free_ptr(fpst);
5382 tcg_temp_free_i64(tcg_op1);
5383 tcg_temp_free_i64(tcg_op2);
5384 tcg_temp_free_i64(tcg_res);
5385}
5386
5387
5388static void handle_fp_2src_half(DisasContext *s, int opcode,
5389 int rd, int rn, int rm)
5390{
5391 TCGv_i32 tcg_op1;
5392 TCGv_i32 tcg_op2;
5393 TCGv_i32 tcg_res;
5394 TCGv_ptr fpst;
5395
5396 tcg_res = tcg_temp_new_i32();
5397 fpst = get_fpstatus_ptr(true);
5398 tcg_op1 = read_fp_hreg(s, rn);
5399 tcg_op2 = read_fp_hreg(s, rm);
5400
5401 switch (opcode) {
5402 case 0x0:
5403 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
5404 break;
5405 case 0x1:
5406 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
5407 break;
5408 case 0x2:
5409 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
5410 break;
5411 case 0x3:
5412 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
5413 break;
5414 case 0x4:
5415 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
5416 break;
5417 case 0x5:
5418 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
5419 break;
5420 case 0x6:
5421 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
5422 break;
5423 case 0x7:
5424 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
5425 break;
5426 case 0x8:
5427 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
5428 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
5429 break;
5430 default:
5431 g_assert_not_reached();
5432 }
5433
5434 write_fp_sreg(s, rd, tcg_res);
5435
5436 tcg_temp_free_ptr(fpst);
5437 tcg_temp_free_i32(tcg_op1);
5438 tcg_temp_free_i32(tcg_op2);
5439 tcg_temp_free_i32(tcg_res);
5440}
5441
5442
5443
5444
5445
5446
5447
5448static void disas_fp_2src(DisasContext *s, uint32_t insn)
5449{
5450 int type = extract32(insn, 22, 2);
5451 int rd = extract32(insn, 0, 5);
5452 int rn = extract32(insn, 5, 5);
5453 int rm = extract32(insn, 16, 5);
5454 int opcode = extract32(insn, 12, 4);
5455
5456 if (opcode > 8) {
5457 unallocated_encoding(s);
5458 return;
5459 }
5460
5461 switch (type) {
5462 case 0:
5463 if (!fp_access_check(s)) {
5464 return;
5465 }
5466 handle_fp_2src_single(s, opcode, rd, rn, rm);
5467 break;
5468 case 1:
5469 if (!fp_access_check(s)) {
5470 return;
5471 }
5472 handle_fp_2src_double(s, opcode, rd, rn, rm);
5473 break;
5474 case 3:
5475 if (!dc_isar_feature(aa64_fp16, s)) {
5476 unallocated_encoding(s);
5477 return;
5478 }
5479 if (!fp_access_check(s)) {
5480 return;
5481 }
5482 handle_fp_2src_half(s, opcode, rd, rn, rm);
5483 break;
5484 default:
5485 unallocated_encoding(s);
5486 }
5487}
5488
5489
5490static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
5491 int rd, int rn, int rm, int ra)
5492{
5493 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5494 TCGv_i32 tcg_res = tcg_temp_new_i32();
5495 TCGv_ptr fpst = get_fpstatus_ptr(false);
5496
5497 tcg_op1 = read_fp_sreg(s, rn);
5498 tcg_op2 = read_fp_sreg(s, rm);
5499 tcg_op3 = read_fp_sreg(s, ra);
5500
5501
5502
5503
5504
5505
5506
5507
5508 if (o1 == true) {
5509 gen_helper_vfp_negs(tcg_op3, tcg_op3);
5510 }
5511
5512 if (o0 != o1) {
5513 gen_helper_vfp_negs(tcg_op1, tcg_op1);
5514 }
5515
5516 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5517
5518 write_fp_sreg(s, rd, tcg_res);
5519
5520 tcg_temp_free_ptr(fpst);
5521 tcg_temp_free_i32(tcg_op1);
5522 tcg_temp_free_i32(tcg_op2);
5523 tcg_temp_free_i32(tcg_op3);
5524 tcg_temp_free_i32(tcg_res);
5525}
5526
5527
5528static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
5529 int rd, int rn, int rm, int ra)
5530{
5531 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
5532 TCGv_i64 tcg_res = tcg_temp_new_i64();
5533 TCGv_ptr fpst = get_fpstatus_ptr(false);
5534
5535 tcg_op1 = read_fp_dreg(s, rn);
5536 tcg_op2 = read_fp_dreg(s, rm);
5537 tcg_op3 = read_fp_dreg(s, ra);
5538
5539
5540
5541
5542
5543
5544
5545
5546 if (o1 == true) {
5547 gen_helper_vfp_negd(tcg_op3, tcg_op3);
5548 }
5549
5550 if (o0 != o1) {
5551 gen_helper_vfp_negd(tcg_op1, tcg_op1);
5552 }
5553
5554 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5555
5556 write_fp_dreg(s, rd, tcg_res);
5557
5558 tcg_temp_free_ptr(fpst);
5559 tcg_temp_free_i64(tcg_op1);
5560 tcg_temp_free_i64(tcg_op2);
5561 tcg_temp_free_i64(tcg_op3);
5562 tcg_temp_free_i64(tcg_res);
5563}
5564
5565
5566static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
5567 int rd, int rn, int rm, int ra)
5568{
5569 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5570 TCGv_i32 tcg_res = tcg_temp_new_i32();
5571 TCGv_ptr fpst = get_fpstatus_ptr(true);
5572
5573 tcg_op1 = read_fp_hreg(s, rn);
5574 tcg_op2 = read_fp_hreg(s, rm);
5575 tcg_op3 = read_fp_hreg(s, ra);
5576
5577
5578
5579
5580
5581
5582
5583
5584 if (o1 == true) {
5585 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
5586 }
5587
5588 if (o0 != o1) {
5589 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
5590 }
5591
5592 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5593
5594 write_fp_sreg(s, rd, tcg_res);
5595
5596 tcg_temp_free_ptr(fpst);
5597 tcg_temp_free_i32(tcg_op1);
5598 tcg_temp_free_i32(tcg_op2);
5599 tcg_temp_free_i32(tcg_op3);
5600 tcg_temp_free_i32(tcg_res);
5601}
5602
5603
5604
5605
5606
5607
5608
5609static void disas_fp_3src(DisasContext *s, uint32_t insn)
5610{
5611 int type = extract32(insn, 22, 2);
5612 int rd = extract32(insn, 0, 5);
5613 int rn = extract32(insn, 5, 5);
5614 int ra = extract32(insn, 10, 5);
5615 int rm = extract32(insn, 16, 5);
5616 bool o0 = extract32(insn, 15, 1);
5617 bool o1 = extract32(insn, 21, 1);
5618
5619 switch (type) {
5620 case 0:
5621 if (!fp_access_check(s)) {
5622 return;
5623 }
5624 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
5625 break;
5626 case 1:
5627 if (!fp_access_check(s)) {
5628 return;
5629 }
5630 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
5631 break;
5632 case 3:
5633 if (!dc_isar_feature(aa64_fp16, s)) {
5634 unallocated_encoding(s);
5635 return;
5636 }
5637 if (!fp_access_check(s)) {
5638 return;
5639 }
5640 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
5641 break;
5642 default:
5643 unallocated_encoding(s);
5644 }
5645}
5646
5647
5648
5649
5650
5651uint64_t vfp_expand_imm(int size, uint8_t imm8)
5652{
5653 uint64_t imm;
5654
5655 switch (size) {
5656 case MO_64:
5657 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5658 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
5659 extract32(imm8, 0, 6);
5660 imm <<= 48;
5661 break;
5662 case MO_32:
5663 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5664 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
5665 (extract32(imm8, 0, 6) << 3);
5666 imm <<= 16;
5667 break;
5668 case MO_16:
5669 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5670 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
5671 (extract32(imm8, 0, 6) << 6);
5672 break;
5673 default:
5674 g_assert_not_reached();
5675 }
5676 return imm;
5677}
5678
5679
5680
5681
5682
5683
5684
5685static void disas_fp_imm(DisasContext *s, uint32_t insn)
5686{
5687 int rd = extract32(insn, 0, 5);
5688 int imm8 = extract32(insn, 13, 8);
5689 int type = extract32(insn, 22, 2);
5690 uint64_t imm;
5691 TCGv_i64 tcg_res;
5692 TCGMemOp sz;
5693
5694 switch (type) {
5695 case 0:
5696 sz = MO_32;
5697 break;
5698 case 1:
5699 sz = MO_64;
5700 break;
5701 case 3:
5702 sz = MO_16;
5703 if (dc_isar_feature(aa64_fp16, s)) {
5704 break;
5705 }
5706
5707 default:
5708 unallocated_encoding(s);
5709 return;
5710 }
5711
5712 if (!fp_access_check(s)) {
5713 return;
5714 }
5715
5716 imm = vfp_expand_imm(sz, imm8);
5717
5718 tcg_res = tcg_const_i64(imm);
5719 write_fp_dreg(s, rd, tcg_res);
5720 tcg_temp_free_i64(tcg_res);
5721}
5722
5723
5724
5725
5726
5727
5728static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
5729 bool itof, int rmode, int scale, int sf, int type)
5730{
5731 bool is_signed = !(opcode & 1);
5732 TCGv_ptr tcg_fpstatus;
5733 TCGv_i32 tcg_shift, tcg_single;
5734 TCGv_i64 tcg_double;
5735
5736 tcg_fpstatus = get_fpstatus_ptr(type == 3);
5737
5738 tcg_shift = tcg_const_i32(64 - scale);
5739
5740 if (itof) {
5741 TCGv_i64 tcg_int = cpu_reg(s, rn);
5742 if (!sf) {
5743 TCGv_i64 tcg_extend = new_tmp_a64(s);
5744
5745 if (is_signed) {
5746 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
5747 } else {
5748 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
5749 }
5750
5751 tcg_int = tcg_extend;
5752 }
5753
5754 switch (type) {
5755 case 1:
5756 tcg_double = tcg_temp_new_i64();
5757 if (is_signed) {
5758 gen_helper_vfp_sqtod(tcg_double, tcg_int,
5759 tcg_shift, tcg_fpstatus);
5760 } else {
5761 gen_helper_vfp_uqtod(tcg_double, tcg_int,
5762 tcg_shift, tcg_fpstatus);
5763 }
5764 write_fp_dreg(s, rd, tcg_double);
5765 tcg_temp_free_i64(tcg_double);
5766 break;
5767
5768 case 0:
5769 tcg_single = tcg_temp_new_i32();
5770 if (is_signed) {
5771 gen_helper_vfp_sqtos(tcg_single, tcg_int,
5772 tcg_shift, tcg_fpstatus);
5773 } else {
5774 gen_helper_vfp_uqtos(tcg_single, tcg_int,
5775 tcg_shift, tcg_fpstatus);
5776 }
5777 write_fp_sreg(s, rd, tcg_single);
5778 tcg_temp_free_i32(tcg_single);
5779 break;
5780
5781 case 3:
5782 tcg_single = tcg_temp_new_i32();
5783 if (is_signed) {
5784 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
5785 tcg_shift, tcg_fpstatus);
5786 } else {
5787 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
5788 tcg_shift, tcg_fpstatus);
5789 }
5790 write_fp_sreg(s, rd, tcg_single);
5791 tcg_temp_free_i32(tcg_single);
5792 break;
5793
5794 default:
5795 g_assert_not_reached();
5796 }
5797 } else {
5798 TCGv_i64 tcg_int = cpu_reg(s, rd);
5799 TCGv_i32 tcg_rmode;
5800
5801 if (extract32(opcode, 2, 1)) {
5802
5803
5804
5805 rmode = FPROUNDING_TIEAWAY;
5806 }
5807
5808 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5809
5810 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5811
5812 switch (type) {
5813 case 1:
5814 tcg_double = read_fp_dreg(s, rn);
5815 if (is_signed) {
5816 if (!sf) {
5817 gen_helper_vfp_tosld(tcg_int, tcg_double,
5818 tcg_shift, tcg_fpstatus);
5819 } else {
5820 gen_helper_vfp_tosqd(tcg_int, tcg_double,
5821 tcg_shift, tcg_fpstatus);
5822 }
5823 } else {
5824 if (!sf) {
5825 gen_helper_vfp_tould(tcg_int, tcg_double,
5826 tcg_shift, tcg_fpstatus);
5827 } else {
5828 gen_helper_vfp_touqd(tcg_int, tcg_double,
5829 tcg_shift, tcg_fpstatus);
5830 }
5831 }
5832 if (!sf) {
5833 tcg_gen_ext32u_i64(tcg_int, tcg_int);
5834 }
5835 tcg_temp_free_i64(tcg_double);
5836 break;
5837
5838 case 0:
5839 tcg_single = read_fp_sreg(s, rn);
5840 if (sf) {
5841 if (is_signed) {
5842 gen_helper_vfp_tosqs(tcg_int, tcg_single,
5843 tcg_shift, tcg_fpstatus);
5844 } else {
5845 gen_helper_vfp_touqs(tcg_int, tcg_single,
5846 tcg_shift, tcg_fpstatus);
5847 }
5848 } else {
5849 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5850 if (is_signed) {
5851 gen_helper_vfp_tosls(tcg_dest, tcg_single,
5852 tcg_shift, tcg_fpstatus);
5853 } else {
5854 gen_helper_vfp_touls(tcg_dest, tcg_single,
5855 tcg_shift, tcg_fpstatus);
5856 }
5857 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5858 tcg_temp_free_i32(tcg_dest);
5859 }
5860 tcg_temp_free_i32(tcg_single);
5861 break;
5862
5863 case 3:
5864 tcg_single = read_fp_sreg(s, rn);
5865 if (sf) {
5866 if (is_signed) {
5867 gen_helper_vfp_tosqh(tcg_int, tcg_single,
5868 tcg_shift, tcg_fpstatus);
5869 } else {
5870 gen_helper_vfp_touqh(tcg_int, tcg_single,
5871 tcg_shift, tcg_fpstatus);
5872 }
5873 } else {
5874 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5875 if (is_signed) {
5876 gen_helper_vfp_toslh(tcg_dest, tcg_single,
5877 tcg_shift, tcg_fpstatus);
5878 } else {
5879 gen_helper_vfp_toulh(tcg_dest, tcg_single,
5880 tcg_shift, tcg_fpstatus);
5881 }
5882 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5883 tcg_temp_free_i32(tcg_dest);
5884 }
5885 tcg_temp_free_i32(tcg_single);
5886 break;
5887
5888 default:
5889 g_assert_not_reached();
5890 }
5891
5892 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5893 tcg_temp_free_i32(tcg_rmode);
5894 }
5895
5896 tcg_temp_free_ptr(tcg_fpstatus);
5897 tcg_temp_free_i32(tcg_shift);
5898}
5899
5900
5901
5902
5903
5904
5905
5906static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5907{
5908 int rd = extract32(insn, 0, 5);
5909 int rn = extract32(insn, 5, 5);
5910 int scale = extract32(insn, 10, 6);
5911 int opcode = extract32(insn, 16, 3);
5912 int rmode = extract32(insn, 19, 2);
5913 int type = extract32(insn, 22, 2);
5914 bool sbit = extract32(insn, 29, 1);
5915 bool sf = extract32(insn, 31, 1);
5916 bool itof;
5917
5918 if (sbit || (!sf && scale < 32)) {
5919 unallocated_encoding(s);
5920 return;
5921 }
5922
5923 switch (type) {
5924 case 0:
5925 case 1:
5926 break;
5927 case 3:
5928 if (dc_isar_feature(aa64_fp16, s)) {
5929 break;
5930 }
5931
5932 default:
5933 unallocated_encoding(s);
5934 return;
5935 }
5936
5937 switch ((rmode << 3) | opcode) {
5938 case 0x2:
5939 case 0x3:
5940 itof = true;
5941 break;
5942 case 0x18:
5943 case 0x19:
5944 itof = false;
5945 break;
5946 default:
5947 unallocated_encoding(s);
5948 return;
5949 }
5950
5951 if (!fp_access_check(s)) {
5952 return;
5953 }
5954
5955 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5956}
5957
5958static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5959{
5960
5961
5962
5963
5964 if (itof) {
5965 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5966 TCGv_i64 tmp;
5967
5968 switch (type) {
5969 case 0:
5970
5971 tmp = tcg_temp_new_i64();
5972 tcg_gen_ext32u_i64(tmp, tcg_rn);
5973 write_fp_dreg(s, rd, tmp);
5974 tcg_temp_free_i64(tmp);
5975 break;
5976 case 1:
5977
5978 write_fp_dreg(s, rd, tcg_rn);
5979 break;
5980 case 2:
5981
5982 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5983 clear_vec_high(s, true, rd);
5984 break;
5985 case 3:
5986
5987 tmp = tcg_temp_new_i64();
5988 tcg_gen_ext16u_i64(tmp, tcg_rn);
5989 write_fp_dreg(s, rd, tmp);
5990 tcg_temp_free_i64(tmp);
5991 break;
5992 default:
5993 g_assert_not_reached();
5994 }
5995 } else {
5996 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5997
5998 switch (type) {
5999 case 0:
6000
6001 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6002 break;
6003 case 1:
6004
6005 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6006 break;
6007 case 2:
6008
6009 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6010 break;
6011 case 3:
6012
6013 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6014 break;
6015 default:
6016 g_assert_not_reached();
6017 }
6018 }
6019}
6020
6021
6022
6023
6024
6025
6026
6027static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6028{
6029 int rd = extract32(insn, 0, 5);
6030 int rn = extract32(insn, 5, 5);
6031 int opcode = extract32(insn, 16, 3);
6032 int rmode = extract32(insn, 19, 2);
6033 int type = extract32(insn, 22, 2);
6034 bool sbit = extract32(insn, 29, 1);
6035 bool sf = extract32(insn, 31, 1);
6036
6037 if (sbit) {
6038 unallocated_encoding(s);
6039 return;
6040 }
6041
6042 if (opcode > 5) {
6043
6044 bool itof = opcode & 1;
6045
6046 if (rmode >= 2) {
6047 unallocated_encoding(s);
6048 return;
6049 }
6050
6051 switch (sf << 3 | type << 1 | rmode) {
6052 case 0x0:
6053 case 0xa:
6054 case 0xd:
6055 break;
6056 case 0x6:
6057 case 0xe:
6058 if (dc_isar_feature(aa64_fp16, s)) {
6059 break;
6060 }
6061
6062 default:
6063
6064 unallocated_encoding(s);
6065 return;
6066 }
6067
6068 if (!fp_access_check(s)) {
6069 return;
6070 }
6071 handle_fmov(s, rd, rn, type, itof);
6072 } else {
6073
6074 bool itof = extract32(opcode, 1, 1);
6075
6076 if (rmode != 0 && opcode > 1) {
6077 unallocated_encoding(s);
6078 return;
6079 }
6080 switch (type) {
6081 case 0:
6082 case 1:
6083 break;
6084 case 3:
6085 if (dc_isar_feature(aa64_fp16, s)) {
6086 break;
6087 }
6088
6089 default:
6090 unallocated_encoding(s);
6091 return;
6092 }
6093
6094 if (!fp_access_check(s)) {
6095 return;
6096 }
6097 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
6098 }
6099}
6100
6101
6102
6103
6104
6105
6106
6107static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
6108{
6109 if (extract32(insn, 24, 1)) {
6110
6111 disas_fp_3src(s, insn);
6112 } else if (extract32(insn, 21, 1) == 0) {
6113
6114 disas_fp_fixed_conv(s, insn);
6115 } else {
6116 switch (extract32(insn, 10, 2)) {
6117 case 1:
6118
6119 disas_fp_ccomp(s, insn);
6120 break;
6121 case 2:
6122
6123 disas_fp_2src(s, insn);
6124 break;
6125 case 3:
6126
6127 disas_fp_csel(s, insn);
6128 break;
6129 case 0:
6130 switch (ctz32(extract32(insn, 12, 4))) {
6131 case 0:
6132
6133 disas_fp_imm(s, insn);
6134 break;
6135 case 1:
6136
6137 disas_fp_compare(s, insn);
6138 break;
6139 case 2:
6140
6141 disas_fp_1src(s, insn);
6142 break;
6143 case 3:
6144 unallocated_encoding(s);
6145 break;
6146 default:
6147
6148 disas_fp_int_conv(s, insn);
6149 break;
6150 }
6151 break;
6152 }
6153 }
6154}
6155
6156static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
6157 int pos)
6158{
6159
6160
6161
6162
6163
6164
6165 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
6166 assert(pos > 0 && pos < 64);
6167
6168 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
6169 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
6170 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
6171
6172 tcg_temp_free_i64(tcg_tmp);
6173}
6174
6175
6176
6177
6178
6179
6180
6181static void disas_simd_ext(DisasContext *s, uint32_t insn)
6182{
6183 int is_q = extract32(insn, 30, 1);
6184 int op2 = extract32(insn, 22, 2);
6185 int imm4 = extract32(insn, 11, 4);
6186 int rm = extract32(insn, 16, 5);
6187 int rn = extract32(insn, 5, 5);
6188 int rd = extract32(insn, 0, 5);
6189 int pos = imm4 << 3;
6190 TCGv_i64 tcg_resl, tcg_resh;
6191
6192 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
6193 unallocated_encoding(s);
6194 return;
6195 }
6196
6197 if (!fp_access_check(s)) {
6198 return;
6199 }
6200
6201 tcg_resh = tcg_temp_new_i64();
6202 tcg_resl = tcg_temp_new_i64();
6203
6204
6205
6206
6207
6208 if (!is_q) {
6209 read_vec_element(s, tcg_resl, rn, 0, MO_64);
6210 if (pos != 0) {
6211 read_vec_element(s, tcg_resh, rm, 0, MO_64);
6212 do_ext64(s, tcg_resh, tcg_resl, pos);
6213 }
6214 tcg_gen_movi_i64(tcg_resh, 0);
6215 } else {
6216 TCGv_i64 tcg_hh;
6217 typedef struct {
6218 int reg;
6219 int elt;
6220 } EltPosns;
6221 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
6222 EltPosns *elt = eltposns;
6223
6224 if (pos >= 64) {
6225 elt++;
6226 pos -= 64;
6227 }
6228
6229 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
6230 elt++;
6231 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
6232 elt++;
6233 if (pos != 0) {
6234 do_ext64(s, tcg_resh, tcg_resl, pos);
6235 tcg_hh = tcg_temp_new_i64();
6236 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
6237 do_ext64(s, tcg_hh, tcg_resh, pos);
6238 tcg_temp_free_i64(tcg_hh);
6239 }
6240 }
6241
6242 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6243 tcg_temp_free_i64(tcg_resl);
6244 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6245 tcg_temp_free_i64(tcg_resh);
6246}
6247
6248
6249
6250
6251
6252
6253
6254static void disas_simd_tb(DisasContext *s, uint32_t insn)
6255{
6256 int op2 = extract32(insn, 22, 2);
6257 int is_q = extract32(insn, 30, 1);
6258 int rm = extract32(insn, 16, 5);
6259 int rn = extract32(insn, 5, 5);
6260 int rd = extract32(insn, 0, 5);
6261 int is_tblx = extract32(insn, 12, 1);
6262 int len = extract32(insn, 13, 2);
6263 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
6264 TCGv_i32 tcg_regno, tcg_numregs;
6265
6266 if (op2 != 0) {
6267 unallocated_encoding(s);
6268 return;
6269 }
6270
6271 if (!fp_access_check(s)) {
6272 return;
6273 }
6274
6275
6276
6277
6278
6279
6280
6281 tcg_resl = tcg_temp_new_i64();
6282 tcg_resh = tcg_temp_new_i64();
6283
6284 if (is_tblx) {
6285 read_vec_element(s, tcg_resl, rd, 0, MO_64);
6286 } else {
6287 tcg_gen_movi_i64(tcg_resl, 0);
6288 }
6289 if (is_tblx && is_q) {
6290 read_vec_element(s, tcg_resh, rd, 1, MO_64);
6291 } else {
6292 tcg_gen_movi_i64(tcg_resh, 0);
6293 }
6294
6295 tcg_idx = tcg_temp_new_i64();
6296 tcg_regno = tcg_const_i32(rn);
6297 tcg_numregs = tcg_const_i32(len + 1);
6298 read_vec_element(s, tcg_idx, rm, 0, MO_64);
6299 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
6300 tcg_regno, tcg_numregs);
6301 if (is_q) {
6302 read_vec_element(s, tcg_idx, rm, 1, MO_64);
6303 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
6304 tcg_regno, tcg_numregs);
6305 }
6306 tcg_temp_free_i64(tcg_idx);
6307 tcg_temp_free_i32(tcg_regno);
6308 tcg_temp_free_i32(tcg_numregs);
6309
6310 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6311 tcg_temp_free_i64(tcg_resl);
6312 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6313 tcg_temp_free_i64(tcg_resh);
6314}
6315
6316
6317
6318
6319
6320
6321
6322static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
6323{
6324 int rd = extract32(insn, 0, 5);
6325 int rn = extract32(insn, 5, 5);
6326 int rm = extract32(insn, 16, 5);
6327 int size = extract32(insn, 22, 2);
6328
6329
6330
6331 int opcode = extract32(insn, 12, 2);
6332 bool part = extract32(insn, 14, 1);
6333 bool is_q = extract32(insn, 30, 1);
6334 int esize = 8 << size;
6335 int i, ofs;
6336 int datasize = is_q ? 128 : 64;
6337 int elements = datasize / esize;
6338 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
6339
6340 if (opcode == 0 || (size == 3 && !is_q)) {
6341 unallocated_encoding(s);
6342 return;
6343 }
6344
6345 if (!fp_access_check(s)) {
6346 return;
6347 }
6348
6349 tcg_resl = tcg_const_i64(0);
6350 tcg_resh = tcg_const_i64(0);
6351 tcg_res = tcg_temp_new_i64();
6352
6353 for (i = 0; i < elements; i++) {
6354 switch (opcode) {
6355 case 1:
6356 {
6357 int midpoint = elements / 2;
6358 if (i < midpoint) {
6359 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
6360 } else {
6361 read_vec_element(s, tcg_res, rm,
6362 2 * (i - midpoint) + part, size);
6363 }
6364 break;
6365 }
6366 case 2:
6367 if (i & 1) {
6368 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
6369 } else {
6370 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
6371 }
6372 break;
6373 case 3:
6374 {
6375 int base = part * elements / 2;
6376 if (i & 1) {
6377 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
6378 } else {
6379 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
6380 }
6381 break;
6382 }
6383 default:
6384 g_assert_not_reached();
6385 }
6386
6387 ofs = i * esize;
6388 if (ofs < 64) {
6389 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
6390 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
6391 } else {
6392 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
6393 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
6394 }
6395 }
6396
6397 tcg_temp_free_i64(tcg_res);
6398
6399 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6400 tcg_temp_free_i64(tcg_resl);
6401 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6402 tcg_temp_free_i64(tcg_resh);
6403}
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
6416 int esize, int size, int vmap, TCGv_ptr fpst)
6417{
6418 if (esize == size) {
6419 int element;
6420 TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
6421 TCGv_i32 tcg_elem;
6422
6423
6424 assert(ctpop8(vmap) == 1);
6425 element = ctz32(vmap);
6426 assert(element < 8);
6427
6428 tcg_elem = tcg_temp_new_i32();
6429 read_vec_element_i32(s, tcg_elem, rn, element, msize);
6430 return tcg_elem;
6431 } else {
6432 int bits = size / 2;
6433 int shift = ctpop8(vmap) / 2;
6434 int vmap_lo = (vmap >> shift) & vmap;
6435 int vmap_hi = (vmap & ~vmap_lo);
6436 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
6437
6438 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
6439 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
6440 tcg_res = tcg_temp_new_i32();
6441
6442 switch (fpopcode) {
6443 case 0x0c:
6444 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6445 break;
6446 case 0x0f:
6447 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
6448 break;
6449 case 0x1c:
6450 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6451 break;
6452 case 0x1f:
6453 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
6454 break;
6455 case 0x2c:
6456 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
6457 break;
6458 case 0x2f:
6459 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
6460 break;
6461 case 0x3c:
6462 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
6463 break;
6464 case 0x3f:
6465 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
6466 break;
6467 default:
6468 g_assert_not_reached();
6469 }
6470
6471 tcg_temp_free_i32(tcg_hi);
6472 tcg_temp_free_i32(tcg_lo);
6473 return tcg_res;
6474 }
6475}
6476
6477
6478
6479
6480
6481
6482
6483static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
6484{
6485 int rd = extract32(insn, 0, 5);
6486 int rn = extract32(insn, 5, 5);
6487 int size = extract32(insn, 22, 2);
6488 int opcode = extract32(insn, 12, 5);
6489 bool is_q = extract32(insn, 30, 1);
6490 bool is_u = extract32(insn, 29, 1);
6491 bool is_fp = false;
6492 bool is_min = false;
6493 int esize;
6494 int elements;
6495 int i;
6496 TCGv_i64 tcg_res, tcg_elt;
6497
6498 switch (opcode) {
6499 case 0x1b:
6500 if (is_u) {
6501 unallocated_encoding(s);
6502 return;
6503 }
6504
6505 case 0x3:
6506 case 0xa:
6507 case 0x1a:
6508 if (size == 3 || (size == 2 && !is_q)) {
6509 unallocated_encoding(s);
6510 return;
6511 }
6512 break;
6513 case 0xc:
6514 case 0xf:
6515
6516
6517
6518
6519
6520 is_min = extract32(size, 1, 1);
6521 is_fp = true;
6522 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
6523 size = 1;
6524 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
6525 unallocated_encoding(s);
6526 return;
6527 } else {
6528 size = 2;
6529 }
6530 break;
6531 default:
6532 unallocated_encoding(s);
6533 return;
6534 }
6535
6536 if (!fp_access_check(s)) {
6537 return;
6538 }
6539
6540 esize = 8 << size;
6541 elements = (is_q ? 128 : 64) / esize;
6542
6543 tcg_res = tcg_temp_new_i64();
6544 tcg_elt = tcg_temp_new_i64();
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558 if (!is_fp) {
6559 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
6560
6561 for (i = 1; i < elements; i++) {
6562 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
6563
6564 switch (opcode) {
6565 case 0x03:
6566 case 0x1b:
6567 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
6568 break;
6569 case 0x0a:
6570 if (is_u) {
6571 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
6572 } else {
6573 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
6574 }
6575 break;
6576 case 0x1a:
6577 if (is_u) {
6578 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
6579 } else {
6580 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
6581 }
6582 break;
6583 default:
6584 g_assert_not_reached();
6585 }
6586
6587 }
6588 } else {
6589
6590
6591
6592
6593
6594 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
6595 int fpopcode = opcode | is_min << 4 | is_u << 5;
6596 int vmap = (1 << elements) - 1;
6597 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
6598 (is_q ? 128 : 64), vmap, fpst);
6599 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
6600 tcg_temp_free_i32(tcg_res32);
6601 tcg_temp_free_ptr(fpst);
6602 }
6603
6604 tcg_temp_free_i64(tcg_elt);
6605
6606
6607 if (opcode == 0x03) {
6608
6609 size++;
6610 }
6611
6612 switch (size) {
6613 case 0:
6614 tcg_gen_ext8u_i64(tcg_res, tcg_res);
6615 break;
6616 case 1:
6617 tcg_gen_ext16u_i64(tcg_res, tcg_res);
6618 break;
6619 case 2:
6620 tcg_gen_ext32u_i64(tcg_res, tcg_res);
6621 break;
6622 case 3:
6623 break;
6624 default:
6625 g_assert_not_reached();
6626 }
6627
6628 write_fp_dreg(s, rd, tcg_res);
6629 tcg_temp_free_i64(tcg_res);
6630}
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
6642 int imm5)
6643{
6644 int size = ctz32(imm5);
6645 int index = imm5 >> (size + 1);
6646
6647 if (size > 3 || (size == 3 && !is_q)) {
6648 unallocated_encoding(s);
6649 return;
6650 }
6651
6652 if (!fp_access_check(s)) {
6653 return;
6654 }
6655
6656 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
6657 vec_reg_offset(s, rn, index, size),
6658 is_q ? 16 : 8, vec_full_reg_size(s));
6659}
6660
6661
6662
6663
6664
6665
6666
6667static void handle_simd_dupes(DisasContext *s, int rd, int rn,
6668 int imm5)
6669{
6670 int size = ctz32(imm5);
6671 int index;
6672 TCGv_i64 tmp;
6673
6674 if (size > 3) {
6675 unallocated_encoding(s);
6676 return;
6677 }
6678
6679 if (!fp_access_check(s)) {
6680 return;
6681 }
6682
6683 index = imm5 >> (size + 1);
6684
6685
6686
6687
6688 tmp = tcg_temp_new_i64();
6689 read_vec_element(s, tmp, rn, index, size);
6690 write_fp_dreg(s, rd, tmp);
6691 tcg_temp_free_i64(tmp);
6692}
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
6704 int imm5)
6705{
6706 int size = ctz32(imm5);
6707 uint32_t dofs, oprsz, maxsz;
6708
6709 if (size > 3 || ((size == 3) && !is_q)) {
6710 unallocated_encoding(s);
6711 return;
6712 }
6713
6714 if (!fp_access_check(s)) {
6715 return;
6716 }
6717
6718 dofs = vec_full_reg_offset(s, rd);
6719 oprsz = is_q ? 16 : 8;
6720 maxsz = vec_full_reg_size(s);
6721
6722 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
6723}
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735static void handle_simd_inse(DisasContext *s, int rd, int rn,
6736 int imm4, int imm5)
6737{
6738 int size = ctz32(imm5);
6739 int src_index, dst_index;
6740 TCGv_i64 tmp;
6741
6742 if (size > 3) {
6743 unallocated_encoding(s);
6744 return;
6745 }
6746
6747 if (!fp_access_check(s)) {
6748 return;
6749 }
6750
6751 dst_index = extract32(imm5, 1+size, 5);
6752 src_index = extract32(imm4, size, 4);
6753
6754 tmp = tcg_temp_new_i64();
6755
6756 read_vec_element(s, tmp, rn, src_index, size);
6757 write_vec_element(s, tmp, rd, dst_index, size);
6758
6759 tcg_temp_free_i64(tmp);
6760}
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
6774{
6775 int size = ctz32(imm5);
6776 int idx;
6777
6778 if (size > 3) {
6779 unallocated_encoding(s);
6780 return;
6781 }
6782
6783 if (!fp_access_check(s)) {
6784 return;
6785 }
6786
6787 idx = extract32(imm5, 1 + size, 4 - size);
6788 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
6789}
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802
6803static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
6804 int rn, int rd, int imm5)
6805{
6806 int size = ctz32(imm5);
6807 int element;
6808 TCGv_i64 tcg_rd;
6809
6810
6811 if (is_signed) {
6812 if (size > 2 || (size == 2 && !is_q)) {
6813 unallocated_encoding(s);
6814 return;
6815 }
6816 } else {
6817 if (size > 3
6818 || (size < 3 && is_q)
6819 || (size == 3 && !is_q)) {
6820 unallocated_encoding(s);
6821 return;
6822 }
6823 }
6824
6825 if (!fp_access_check(s)) {
6826 return;
6827 }
6828
6829 element = extract32(imm5, 1+size, 4);
6830
6831 tcg_rd = cpu_reg(s, rd);
6832 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
6833 if (is_signed && !is_q) {
6834 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6835 }
6836}
6837
6838
6839
6840
6841
6842
6843
6844static void disas_simd_copy(DisasContext *s, uint32_t insn)
6845{
6846 int rd = extract32(insn, 0, 5);
6847 int rn = extract32(insn, 5, 5);
6848 int imm4 = extract32(insn, 11, 4);
6849 int op = extract32(insn, 29, 1);
6850 int is_q = extract32(insn, 30, 1);
6851 int imm5 = extract32(insn, 16, 5);
6852
6853 if (op) {
6854 if (is_q) {
6855
6856 handle_simd_inse(s, rd, rn, imm4, imm5);
6857 } else {
6858 unallocated_encoding(s);
6859 }
6860 } else {
6861 switch (imm4) {
6862 case 0:
6863
6864 handle_simd_dupe(s, is_q, rd, rn, imm5);
6865 break;
6866 case 1:
6867
6868 handle_simd_dupg(s, is_q, rd, rn, imm5);
6869 break;
6870 case 3:
6871 if (is_q) {
6872
6873 handle_simd_insg(s, rd, rn, imm5);
6874 } else {
6875 unallocated_encoding(s);
6876 }
6877 break;
6878 case 5:
6879 case 7:
6880
6881 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
6882 break;
6883 default:
6884 unallocated_encoding(s);
6885 break;
6886 }
6887 }
6888}
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6905{
6906 int rd = extract32(insn, 0, 5);
6907 int cmode = extract32(insn, 12, 4);
6908 int cmode_3_1 = extract32(cmode, 1, 3);
6909 int cmode_0 = extract32(cmode, 0, 1);
6910 int o2 = extract32(insn, 11, 1);
6911 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6912 bool is_neg = extract32(insn, 29, 1);
6913 bool is_q = extract32(insn, 30, 1);
6914 uint64_t imm = 0;
6915
6916 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6917
6918 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
6919 unallocated_encoding(s);
6920 return;
6921 }
6922 }
6923
6924 if (!fp_access_check(s)) {
6925 return;
6926 }
6927
6928
6929 switch (cmode_3_1) {
6930 case 0:
6931 case 1:
6932 case 2:
6933 case 3:
6934 {
6935 int shift = cmode_3_1 * 8;
6936 imm = bitfield_replicate(abcdefgh << shift, 32);
6937 break;
6938 }
6939 case 4:
6940 case 5:
6941 {
6942 int shift = (cmode_3_1 & 0x1) * 8;
6943 imm = bitfield_replicate(abcdefgh << shift, 16);
6944 break;
6945 }
6946 case 6:
6947 if (cmode_0) {
6948
6949 imm = (abcdefgh << 16) | 0xffff;
6950 } else {
6951
6952 imm = (abcdefgh << 8) | 0xff;
6953 }
6954 imm = bitfield_replicate(imm, 32);
6955 break;
6956 case 7:
6957 if (!cmode_0 && !is_neg) {
6958 imm = bitfield_replicate(abcdefgh, 8);
6959 } else if (!cmode_0 && is_neg) {
6960 int i;
6961 imm = 0;
6962 for (i = 0; i < 8; i++) {
6963 if ((abcdefgh) & (1 << i)) {
6964 imm |= 0xffULL << (i * 8);
6965 }
6966 }
6967 } else if (cmode_0) {
6968 if (is_neg) {
6969 imm = (abcdefgh & 0x3f) << 48;
6970 if (abcdefgh & 0x80) {
6971 imm |= 0x8000000000000000ULL;
6972 }
6973 if (abcdefgh & 0x40) {
6974 imm |= 0x3fc0000000000000ULL;
6975 } else {
6976 imm |= 0x4000000000000000ULL;
6977 }
6978 } else {
6979 if (o2) {
6980
6981 imm = vfp_expand_imm(MO_16, abcdefgh);
6982
6983 imm = bitfield_replicate(imm, 16);
6984 } else {
6985 imm = (abcdefgh & 0x3f) << 19;
6986 if (abcdefgh & 0x80) {
6987 imm |= 0x80000000;
6988 }
6989 if (abcdefgh & 0x40) {
6990 imm |= 0x3e000000;
6991 } else {
6992 imm |= 0x40000000;
6993 }
6994 imm |= (imm << 32);
6995 }
6996 }
6997 }
6998 break;
6999 default:
7000 fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
7001 g_assert_not_reached();
7002 }
7003
7004 if (cmode_3_1 != 7 && is_neg) {
7005 imm = ~imm;
7006 }
7007
7008 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7009
7010 tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7011 vec_full_reg_size(s), imm);
7012 } else {
7013
7014 if (is_neg) {
7015 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7016 } else {
7017 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7018 }
7019 }
7020}
7021
7022
7023
7024
7025
7026
7027
7028static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7029{
7030 int rd = extract32(insn, 0, 5);
7031 int rn = extract32(insn, 5, 5);
7032 int imm4 = extract32(insn, 11, 4);
7033 int imm5 = extract32(insn, 16, 5);
7034 int op = extract32(insn, 29, 1);
7035
7036 if (op != 0 || imm4 != 0) {
7037 unallocated_encoding(s);
7038 return;
7039 }
7040
7041
7042 handle_simd_dupes(s, rd, rn, imm5);
7043}
7044
7045
7046
7047
7048
7049
7050
7051static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7052{
7053 int u = extract32(insn, 29, 1);
7054 int size = extract32(insn, 22, 2);
7055 int opcode = extract32(insn, 12, 5);
7056 int rn = extract32(insn, 5, 5);
7057 int rd = extract32(insn, 0, 5);
7058 TCGv_ptr fpst;
7059
7060
7061
7062
7063
7064 opcode |= (extract32(size, 1, 1) << 5);
7065
7066 switch (opcode) {
7067 case 0x3b:
7068 if (u || size != 3) {
7069 unallocated_encoding(s);
7070 return;
7071 }
7072 if (!fp_access_check(s)) {
7073 return;
7074 }
7075
7076 fpst = NULL;
7077 break;
7078 case 0xc:
7079 case 0xd:
7080 case 0xf:
7081 case 0x2c:
7082 case 0x2f:
7083
7084 if (!u) {
7085 if (!dc_isar_feature(aa64_fp16, s)) {
7086 unallocated_encoding(s);
7087 return;
7088 } else {
7089 size = MO_16;
7090 }
7091 } else {
7092 size = extract32(size, 0, 1) ? MO_64 : MO_32;
7093 }
7094
7095 if (!fp_access_check(s)) {
7096 return;
7097 }
7098
7099 fpst = get_fpstatus_ptr(size == MO_16);
7100 break;
7101 default:
7102 unallocated_encoding(s);
7103 return;
7104 }
7105
7106 if (size == MO_64) {
7107 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7108 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7109 TCGv_i64 tcg_res = tcg_temp_new_i64();
7110
7111 read_vec_element(s, tcg_op1, rn, 0, MO_64);
7112 read_vec_element(s, tcg_op2, rn, 1, MO_64);
7113
7114 switch (opcode) {
7115 case 0x3b:
7116 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7117 break;
7118 case 0xc:
7119 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7120 break;
7121 case 0xd:
7122 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7123 break;
7124 case 0xf:
7125 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7126 break;
7127 case 0x2c:
7128 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7129 break;
7130 case 0x2f:
7131 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7132 break;
7133 default:
7134 g_assert_not_reached();
7135 }
7136
7137 write_fp_dreg(s, rd, tcg_res);
7138
7139 tcg_temp_free_i64(tcg_op1);
7140 tcg_temp_free_i64(tcg_op2);
7141 tcg_temp_free_i64(tcg_res);
7142 } else {
7143 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7144 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7145 TCGv_i32 tcg_res = tcg_temp_new_i32();
7146
7147 read_vec_element_i32(s, tcg_op1, rn, 0, size);
7148 read_vec_element_i32(s, tcg_op2, rn, 1, size);
7149
7150 if (size == MO_16) {
7151 switch (opcode) {
7152 case 0xc:
7153 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7154 break;
7155 case 0xd:
7156 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
7157 break;
7158 case 0xf:
7159 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
7160 break;
7161 case 0x2c:
7162 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7163 break;
7164 case 0x2f:
7165 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
7166 break;
7167 default:
7168 g_assert_not_reached();
7169 }
7170 } else {
7171 switch (opcode) {
7172 case 0xc:
7173 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7174 break;
7175 case 0xd:
7176 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7177 break;
7178 case 0xf:
7179 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7180 break;
7181 case 0x2c:
7182 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7183 break;
7184 case 0x2f:
7185 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7186 break;
7187 default:
7188 g_assert_not_reached();
7189 }
7190 }
7191
7192 write_fp_sreg(s, rd, tcg_res);
7193
7194 tcg_temp_free_i32(tcg_op1);
7195 tcg_temp_free_i32(tcg_op2);
7196 tcg_temp_free_i32(tcg_res);
7197 }
7198
7199 if (fpst) {
7200 tcg_temp_free_ptr(fpst);
7201 }
7202}
7203
7204
7205
7206
7207
7208
7209
7210static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
7211 TCGv_i64 tcg_rnd, bool accumulate,
7212 bool is_u, int size, int shift)
7213{
7214 bool extended_result = false;
7215 bool round = tcg_rnd != NULL;
7216 int ext_lshift = 0;
7217 TCGv_i64 tcg_src_hi;
7218
7219 if (round && size == 3) {
7220 extended_result = true;
7221 ext_lshift = 64 - shift;
7222 tcg_src_hi = tcg_temp_new_i64();
7223 } else if (shift == 64) {
7224 if (!accumulate && is_u) {
7225
7226 tcg_gen_movi_i64(tcg_res, 0);
7227 return;
7228 }
7229 }
7230
7231
7232 if (round) {
7233 if (extended_result) {
7234 TCGv_i64 tcg_zero = tcg_const_i64(0);
7235 if (!is_u) {
7236
7237 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
7238 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7239 tcg_src, tcg_src_hi,
7240 tcg_rnd, tcg_zero);
7241 } else {
7242 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7243 tcg_src, tcg_zero,
7244 tcg_rnd, tcg_zero);
7245 }
7246 tcg_temp_free_i64(tcg_zero);
7247 } else {
7248 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
7249 }
7250 }
7251
7252
7253 if (round && extended_result) {
7254
7255 if (ext_lshift == 0) {
7256
7257 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
7258 } else {
7259 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7260 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
7261 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
7262 }
7263 } else {
7264 if (is_u) {
7265 if (shift == 64) {
7266
7267 tcg_gen_movi_i64(tcg_src, 0);
7268 } else {
7269 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7270 }
7271 } else {
7272 if (shift == 64) {
7273
7274 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
7275 } else {
7276 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
7277 }
7278 }
7279 }
7280
7281 if (accumulate) {
7282 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
7283 } else {
7284 tcg_gen_mov_i64(tcg_res, tcg_src);
7285 }
7286
7287 if (extended_result) {
7288 tcg_temp_free_i64(tcg_src_hi);
7289 }
7290}
7291
7292
7293static void handle_scalar_simd_shri(DisasContext *s,
7294 bool is_u, int immh, int immb,
7295 int opcode, int rn, int rd)
7296{
7297 const int size = 3;
7298 int immhb = immh << 3 | immb;
7299 int shift = 2 * (8 << size) - immhb;
7300 bool accumulate = false;
7301 bool round = false;
7302 bool insert = false;
7303 TCGv_i64 tcg_rn;
7304 TCGv_i64 tcg_rd;
7305 TCGv_i64 tcg_round;
7306
7307 if (!extract32(immh, 3, 1)) {
7308 unallocated_encoding(s);
7309 return;
7310 }
7311
7312 if (!fp_access_check(s)) {
7313 return;
7314 }
7315
7316 switch (opcode) {
7317 case 0x02:
7318 accumulate = true;
7319 break;
7320 case 0x04:
7321 round = true;
7322 break;
7323 case 0x06:
7324 accumulate = round = true;
7325 break;
7326 case 0x08:
7327 insert = true;
7328 break;
7329 }
7330
7331 if (round) {
7332 uint64_t round_const = 1ULL << (shift - 1);
7333 tcg_round = tcg_const_i64(round_const);
7334 } else {
7335 tcg_round = NULL;
7336 }
7337
7338 tcg_rn = read_fp_dreg(s, rn);
7339 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7340
7341 if (insert) {
7342
7343
7344
7345 int esize = 8 << size;
7346 if (shift != esize) {
7347 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
7348 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
7349 }
7350 } else {
7351 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7352 accumulate, is_u, size, shift);
7353 }
7354
7355 write_fp_dreg(s, rd, tcg_rd);
7356
7357 tcg_temp_free_i64(tcg_rn);
7358 tcg_temp_free_i64(tcg_rd);
7359 if (round) {
7360 tcg_temp_free_i64(tcg_round);
7361 }
7362}
7363
7364
7365static void handle_scalar_simd_shli(DisasContext *s, bool insert,
7366 int immh, int immb, int opcode,
7367 int rn, int rd)
7368{
7369 int size = 32 - clz32(immh) - 1;
7370 int immhb = immh << 3 | immb;
7371 int shift = immhb - (8 << size);
7372 TCGv_i64 tcg_rn = new_tmp_a64(s);
7373 TCGv_i64 tcg_rd = new_tmp_a64(s);
7374
7375 if (!extract32(immh, 3, 1)) {
7376 unallocated_encoding(s);
7377 return;
7378 }
7379
7380 if (!fp_access_check(s)) {
7381 return;
7382 }
7383
7384 tcg_rn = read_fp_dreg(s, rn);
7385 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7386
7387 if (insert) {
7388 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
7389 } else {
7390 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
7391 }
7392
7393 write_fp_dreg(s, rd, tcg_rd);
7394
7395 tcg_temp_free_i64(tcg_rn);
7396 tcg_temp_free_i64(tcg_rd);
7397}
7398
7399
7400
7401static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
7402 bool is_u_shift, bool is_u_narrow,
7403 int immh, int immb, int opcode,
7404 int rn, int rd)
7405{
7406 int immhb = immh << 3 | immb;
7407 int size = 32 - clz32(immh) - 1;
7408 int esize = 8 << size;
7409 int shift = (2 * esize) - immhb;
7410 int elements = is_scalar ? 1 : (64 / esize);
7411 bool round = extract32(opcode, 0, 1);
7412 TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
7413 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
7414 TCGv_i32 tcg_rd_narrowed;
7415 TCGv_i64 tcg_final;
7416
7417 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
7418 { gen_helper_neon_narrow_sat_s8,
7419 gen_helper_neon_unarrow_sat8 },
7420 { gen_helper_neon_narrow_sat_s16,
7421 gen_helper_neon_unarrow_sat16 },
7422 { gen_helper_neon_narrow_sat_s32,
7423 gen_helper_neon_unarrow_sat32 },
7424 { NULL, NULL },
7425 };
7426 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
7427 gen_helper_neon_narrow_sat_u8,
7428 gen_helper_neon_narrow_sat_u16,
7429 gen_helper_neon_narrow_sat_u32,
7430 NULL
7431 };
7432 NeonGenNarrowEnvFn *narrowfn;
7433
7434 int i;
7435
7436 assert(size < 4);
7437
7438 if (extract32(immh, 3, 1)) {
7439 unallocated_encoding(s);
7440 return;
7441 }
7442
7443 if (!fp_access_check(s)) {
7444 return;
7445 }
7446
7447 if (is_u_shift) {
7448 narrowfn = unsigned_narrow_fns[size];
7449 } else {
7450 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
7451 }
7452
7453 tcg_rn = tcg_temp_new_i64();
7454 tcg_rd = tcg_temp_new_i64();
7455 tcg_rd_narrowed = tcg_temp_new_i32();
7456 tcg_final = tcg_const_i64(0);
7457
7458 if (round) {
7459 uint64_t round_const = 1ULL << (shift - 1);
7460 tcg_round = tcg_const_i64(round_const);
7461 } else {
7462 tcg_round = NULL;
7463 }
7464
7465 for (i = 0; i < elements; i++) {
7466 read_vec_element(s, tcg_rn, rn, i, ldop);
7467 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7468 false, is_u_shift, size+1, shift);
7469 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
7470 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
7471 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
7472 }
7473
7474 if (!is_q) {
7475 write_vec_element(s, tcg_final, rd, 0, MO_64);
7476 } else {
7477 write_vec_element(s, tcg_final, rd, 1, MO_64);
7478 }
7479
7480 if (round) {
7481 tcg_temp_free_i64(tcg_round);
7482 }
7483 tcg_temp_free_i64(tcg_rn);
7484 tcg_temp_free_i64(tcg_rd);
7485 tcg_temp_free_i32(tcg_rd_narrowed);
7486 tcg_temp_free_i64(tcg_final);
7487
7488 clear_vec_high(s, is_q, rd);
7489}
7490
7491
7492static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
7493 bool src_unsigned, bool dst_unsigned,
7494 int immh, int immb, int rn, int rd)
7495{
7496 int immhb = immh << 3 | immb;
7497 int size = 32 - clz32(immh) - 1;
7498 int shift = immhb - (8 << size);
7499 int pass;
7500
7501 assert(immh != 0);
7502 assert(!(scalar && is_q));
7503
7504 if (!scalar) {
7505 if (!is_q && extract32(immh, 3, 1)) {
7506 unallocated_encoding(s);
7507 return;
7508 }
7509
7510
7511
7512
7513
7514 switch (size) {
7515 case 0:
7516 shift |= shift << 8;
7517
7518 case 1:
7519 shift |= shift << 16;
7520 break;
7521 case 2:
7522 case 3:
7523 break;
7524 default:
7525 g_assert_not_reached();
7526 }
7527 }
7528
7529 if (!fp_access_check(s)) {
7530 return;
7531 }
7532
7533 if (size == 3) {
7534 TCGv_i64 tcg_shift = tcg_const_i64(shift);
7535 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
7536 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
7537 { NULL, gen_helper_neon_qshl_u64 },
7538 };
7539 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
7540 int maxpass = is_q ? 2 : 1;
7541
7542 for (pass = 0; pass < maxpass; pass++) {
7543 TCGv_i64 tcg_op = tcg_temp_new_i64();
7544
7545 read_vec_element(s, tcg_op, rn, pass, MO_64);
7546 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7547 write_vec_element(s, tcg_op, rd, pass, MO_64);
7548
7549 tcg_temp_free_i64(tcg_op);
7550 }
7551 tcg_temp_free_i64(tcg_shift);
7552 clear_vec_high(s, is_q, rd);
7553 } else {
7554 TCGv_i32 tcg_shift = tcg_const_i32(shift);
7555 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
7556 {
7557 { gen_helper_neon_qshl_s8,
7558 gen_helper_neon_qshl_s16,
7559 gen_helper_neon_qshl_s32 },
7560 { gen_helper_neon_qshlu_s8,
7561 gen_helper_neon_qshlu_s16,
7562 gen_helper_neon_qshlu_s32 }
7563 }, {
7564 { NULL, NULL, NULL },
7565 { gen_helper_neon_qshl_u8,
7566 gen_helper_neon_qshl_u16,
7567 gen_helper_neon_qshl_u32 }
7568 }
7569 };
7570 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
7571 TCGMemOp memop = scalar ? size : MO_32;
7572 int maxpass = scalar ? 1 : is_q ? 4 : 2;
7573
7574 for (pass = 0; pass < maxpass; pass++) {
7575 TCGv_i32 tcg_op = tcg_temp_new_i32();
7576
7577 read_vec_element_i32(s, tcg_op, rn, pass, memop);
7578 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7579 if (scalar) {
7580 switch (size) {
7581 case 0:
7582 tcg_gen_ext8u_i32(tcg_op, tcg_op);
7583 break;
7584 case 1:
7585 tcg_gen_ext16u_i32(tcg_op, tcg_op);
7586 break;
7587 case 2:
7588 break;
7589 default:
7590 g_assert_not_reached();
7591 }
7592 write_fp_sreg(s, rd, tcg_op);
7593 } else {
7594 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
7595 }
7596
7597 tcg_temp_free_i32(tcg_op);
7598 }
7599 tcg_temp_free_i32(tcg_shift);
7600
7601 if (!scalar) {
7602 clear_vec_high(s, is_q, rd);
7603 }
7604 }
7605}
7606
7607
7608static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
7609 int elements, int is_signed,
7610 int fracbits, int size)
7611{
7612 TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
7613 TCGv_i32 tcg_shift = NULL;
7614
7615 TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
7616 int pass;
7617
7618 if (fracbits || size == MO_64) {
7619 tcg_shift = tcg_const_i32(fracbits);
7620 }
7621
7622 if (size == MO_64) {
7623 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
7624 TCGv_i64 tcg_double = tcg_temp_new_i64();
7625
7626 for (pass = 0; pass < elements; pass++) {
7627 read_vec_element(s, tcg_int64, rn, pass, mop);
7628
7629 if (is_signed) {
7630 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
7631 tcg_shift, tcg_fpst);
7632 } else {
7633 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
7634 tcg_shift, tcg_fpst);
7635 }
7636 if (elements == 1) {
7637 write_fp_dreg(s, rd, tcg_double);
7638 } else {
7639 write_vec_element(s, tcg_double, rd, pass, MO_64);
7640 }
7641 }
7642
7643 tcg_temp_free_i64(tcg_int64);
7644 tcg_temp_free_i64(tcg_double);
7645
7646 } else {
7647 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
7648 TCGv_i32 tcg_float = tcg_temp_new_i32();
7649
7650 for (pass = 0; pass < elements; pass++) {
7651 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
7652
7653 switch (size) {
7654 case MO_32:
7655 if (fracbits) {
7656 if (is_signed) {
7657 gen_helper_vfp_sltos(tcg_float, tcg_int32,
7658 tcg_shift, tcg_fpst);
7659 } else {
7660 gen_helper_vfp_ultos(tcg_float, tcg_int32,
7661 tcg_shift, tcg_fpst);
7662 }
7663 } else {
7664 if (is_signed) {
7665 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
7666 } else {
7667 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
7668 }
7669 }
7670 break;
7671 case MO_16:
7672 if (fracbits) {
7673 if (is_signed) {
7674 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
7675 tcg_shift, tcg_fpst);
7676 } else {
7677 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
7678 tcg_shift, tcg_fpst);
7679 }
7680 } else {
7681 if (is_signed) {
7682 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
7683 } else {
7684 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
7685 }
7686 }
7687 break;
7688 default:
7689 g_assert_not_reached();
7690 }
7691
7692 if (elements == 1) {
7693 write_fp_sreg(s, rd, tcg_float);
7694 } else {
7695 write_vec_element_i32(s, tcg_float, rd, pass, size);
7696 }
7697 }
7698
7699 tcg_temp_free_i32(tcg_int32);
7700 tcg_temp_free_i32(tcg_float);
7701 }
7702
7703 tcg_temp_free_ptr(tcg_fpst);
7704 if (tcg_shift) {
7705 tcg_temp_free_i32(tcg_shift);
7706 }
7707
7708 clear_vec_high(s, elements << size == 16, rd);
7709}
7710
7711
7712static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
7713 bool is_q, bool is_u,
7714 int immh, int immb, int opcode,
7715 int rn, int rd)
7716{
7717 int size, elements, fracbits;
7718 int immhb = immh << 3 | immb;
7719
7720 if (immh & 8) {
7721 size = MO_64;
7722 if (!is_scalar && !is_q) {
7723 unallocated_encoding(s);
7724 return;
7725 }
7726 } else if (immh & 4) {
7727 size = MO_32;
7728 } else if (immh & 2) {
7729 size = MO_16;
7730 if (!dc_isar_feature(aa64_fp16, s)) {
7731 unallocated_encoding(s);
7732 return;
7733 }
7734 } else {
7735
7736 g_assert(immh == 1);
7737 unallocated_encoding(s);
7738 return;
7739 }
7740
7741 if (is_scalar) {
7742 elements = 1;
7743 } else {
7744 elements = (8 << is_q) >> size;
7745 }
7746 fracbits = (16 << size) - immhb;
7747
7748 if (!fp_access_check(s)) {
7749 return;
7750 }
7751
7752 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
7753}
7754
7755
7756static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
7757 bool is_q, bool is_u,
7758 int immh, int immb, int rn, int rd)
7759{
7760 int immhb = immh << 3 | immb;
7761 int pass, size, fracbits;
7762 TCGv_ptr tcg_fpstatus;
7763 TCGv_i32 tcg_rmode, tcg_shift;
7764
7765 if (immh & 0x8) {
7766 size = MO_64;
7767 if (!is_scalar && !is_q) {
7768 unallocated_encoding(s);
7769 return;
7770 }
7771 } else if (immh & 0x4) {
7772 size = MO_32;
7773 } else if (immh & 0x2) {
7774 size = MO_16;
7775 if (!dc_isar_feature(aa64_fp16, s)) {
7776 unallocated_encoding(s);
7777 return;
7778 }
7779 } else {
7780
7781 assert(immh == 1);
7782 unallocated_encoding(s);
7783 return;
7784 }
7785
7786 if (!fp_access_check(s)) {
7787 return;
7788 }
7789
7790 assert(!(is_scalar && is_q));
7791
7792 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
7793 tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
7794 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7795 fracbits = (16 << size) - immhb;
7796 tcg_shift = tcg_const_i32(fracbits);
7797
7798 if (size == MO_64) {
7799 int maxpass = is_scalar ? 1 : 2;
7800
7801 for (pass = 0; pass < maxpass; pass++) {
7802 TCGv_i64 tcg_op = tcg_temp_new_i64();
7803
7804 read_vec_element(s, tcg_op, rn, pass, MO_64);
7805 if (is_u) {
7806 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7807 } else {
7808 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7809 }
7810 write_vec_element(s, tcg_op, rd, pass, MO_64);
7811 tcg_temp_free_i64(tcg_op);
7812 }
7813 clear_vec_high(s, is_q, rd);
7814 } else {
7815 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
7816 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
7817
7818 switch (size) {
7819 case MO_16:
7820 if (is_u) {
7821 fn = gen_helper_vfp_touhh;
7822 } else {
7823 fn = gen_helper_vfp_toshh;
7824 }
7825 break;
7826 case MO_32:
7827 if (is_u) {
7828 fn = gen_helper_vfp_touls;
7829 } else {
7830 fn = gen_helper_vfp_tosls;
7831 }
7832 break;
7833 default:
7834 g_assert_not_reached();
7835 }
7836
7837 for (pass = 0; pass < maxpass; pass++) {
7838 TCGv_i32 tcg_op = tcg_temp_new_i32();
7839
7840 read_vec_element_i32(s, tcg_op, rn, pass, size);
7841 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7842 if (is_scalar) {
7843 write_fp_sreg(s, rd, tcg_op);
7844 } else {
7845 write_vec_element_i32(s, tcg_op, rd, pass, size);
7846 }
7847 tcg_temp_free_i32(tcg_op);
7848 }
7849 if (!is_scalar) {
7850 clear_vec_high(s, is_q, rd);
7851 }
7852 }
7853
7854 tcg_temp_free_ptr(tcg_fpstatus);
7855 tcg_temp_free_i32(tcg_shift);
7856 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7857 tcg_temp_free_i32(tcg_rmode);
7858}
7859
7860
7861
7862
7863
7864
7865
7866
7867
7868static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
7869{
7870 int rd = extract32(insn, 0, 5);
7871 int rn = extract32(insn, 5, 5);
7872 int opcode = extract32(insn, 11, 5);
7873 int immb = extract32(insn, 16, 3);
7874 int immh = extract32(insn, 19, 4);
7875 bool is_u = extract32(insn, 29, 1);
7876
7877 if (immh == 0) {
7878 unallocated_encoding(s);
7879 return;
7880 }
7881
7882 switch (opcode) {
7883 case 0x08:
7884 if (!is_u) {
7885 unallocated_encoding(s);
7886 return;
7887 }
7888
7889 case 0x00:
7890 case 0x02:
7891 case 0x04:
7892 case 0x06:
7893 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
7894 break;
7895 case 0x0a:
7896 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
7897 break;
7898 case 0x1c:
7899 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
7900 opcode, rn, rd);
7901 break;
7902 case 0x10:
7903 case 0x11:
7904 if (!is_u) {
7905 unallocated_encoding(s);
7906 return;
7907 }
7908 handle_vec_simd_sqshrn(s, true, false, false, true,
7909 immh, immb, opcode, rn, rd);
7910 break;
7911 case 0x12:
7912 case 0x13:
7913 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
7914 immh, immb, opcode, rn, rd);
7915 break;
7916 case 0xc:
7917 if (!is_u) {
7918 unallocated_encoding(s);
7919 return;
7920 }
7921 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
7922 break;
7923 case 0xe:
7924 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
7925 break;
7926 case 0x1f:
7927 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
7928 break;
7929 default:
7930 unallocated_encoding(s);
7931 break;
7932 }
7933}
7934
7935
7936
7937
7938
7939
7940
7941static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
7942{
7943 bool is_u = extract32(insn, 29, 1);
7944 int size = extract32(insn, 22, 2);
7945 int opcode = extract32(insn, 12, 4);
7946 int rm = extract32(insn, 16, 5);
7947 int rn = extract32(insn, 5, 5);
7948 int rd = extract32(insn, 0, 5);
7949
7950 if (is_u) {
7951 unallocated_encoding(s);
7952 return;
7953 }
7954
7955 switch (opcode) {
7956 case 0x9:
7957 case 0xb:
7958 case 0xd:
7959 if (size == 0 || size == 3) {
7960 unallocated_encoding(s);
7961 return;
7962 }
7963 break;
7964 default:
7965 unallocated_encoding(s);
7966 return;
7967 }
7968
7969 if (!fp_access_check(s)) {
7970 return;
7971 }
7972
7973 if (size == 2) {
7974 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7975 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7976 TCGv_i64 tcg_res = tcg_temp_new_i64();
7977
7978 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7979 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7980
7981 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7982 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7983
7984 switch (opcode) {
7985 case 0xd:
7986 break;
7987 case 0xb:
7988 tcg_gen_neg_i64(tcg_res, tcg_res);
7989
7990 case 0x9:
7991 read_vec_element(s, tcg_op1, rd, 0, MO_64);
7992 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7993 tcg_res, tcg_op1);
7994 break;
7995 default:
7996 g_assert_not_reached();
7997 }
7998
7999 write_fp_dreg(s, rd, tcg_res);
8000
8001 tcg_temp_free_i64(tcg_op1);
8002 tcg_temp_free_i64(tcg_op2);
8003 tcg_temp_free_i64(tcg_res);
8004 } else {
8005 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8006 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8007 TCGv_i64 tcg_res = tcg_temp_new_i64();
8008
8009 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8010 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8011
8012 switch (opcode) {
8013 case 0xd:
8014 break;
8015 case 0xb:
8016 gen_helper_neon_negl_u32(tcg_res, tcg_res);
8017
8018 case 0x9:
8019 {
8020 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8021 read_vec_element(s, tcg_op3, rd, 0, MO_32);
8022 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8023 tcg_res, tcg_op3);
8024 tcg_temp_free_i64(tcg_op3);
8025 break;
8026 }
8027 default:
8028 g_assert_not_reached();
8029 }
8030
8031 tcg_gen_ext32u_i64(tcg_res, tcg_res);
8032 write_fp_dreg(s, rd, tcg_res);
8033
8034 tcg_temp_free_i32(tcg_op1);
8035 tcg_temp_free_i32(tcg_op2);
8036 tcg_temp_free_i64(tcg_res);
8037 }
8038}
8039
8040static void handle_3same_64(DisasContext *s, int opcode, bool u,
8041 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8042{
8043
8044
8045
8046
8047
8048 TCGCond cond;
8049
8050 switch (opcode) {
8051 case 0x1:
8052 if (u) {
8053 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8054 } else {
8055 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8056 }
8057 break;
8058 case 0x5:
8059 if (u) {
8060 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8061 } else {
8062 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8063 }
8064 break;
8065 case 0x6:
8066
8067
8068
8069 cond = u ? TCG_COND_GTU : TCG_COND_GT;
8070 do_cmop:
8071 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8072 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8073 break;
8074 case 0x7:
8075 cond = u ? TCG_COND_GEU : TCG_COND_GE;
8076 goto do_cmop;
8077 case 0x11:
8078 if (u) {
8079 cond = TCG_COND_EQ;
8080 goto do_cmop;
8081 }
8082 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8083 break;
8084 case 0x8:
8085 if (u) {
8086 gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
8087 } else {
8088 gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
8089 }
8090 break;
8091 case 0x9:
8092 if (u) {
8093 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8094 } else {
8095 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8096 }
8097 break;
8098 case 0xa:
8099 if (u) {
8100 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8101 } else {
8102 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8103 }
8104 break;
8105 case 0xb:
8106 if (u) {
8107 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8108 } else {
8109 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8110 }
8111 break;
8112 case 0x10:
8113 if (u) {
8114 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8115 } else {
8116 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8117 }
8118 break;
8119 default:
8120 g_assert_not_reached();
8121 }
8122}
8123
8124
8125
8126
8127
8128static void handle_3same_float(DisasContext *s, int size, int elements,
8129 int fpopcode, int rd, int rn, int rm)
8130{
8131 int pass;
8132 TCGv_ptr fpst = get_fpstatus_ptr(false);
8133
8134 for (pass = 0; pass < elements; pass++) {
8135 if (size) {
8136
8137 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8138 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8139 TCGv_i64 tcg_res = tcg_temp_new_i64();
8140
8141 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8142 read_vec_element(s, tcg_op2, rm, pass, MO_64);
8143
8144 switch (fpopcode) {
8145 case 0x39:
8146
8147 gen_helper_vfp_negd(tcg_op1, tcg_op1);
8148
8149 case 0x19:
8150 read_vec_element(s, tcg_res, rd, pass, MO_64);
8151 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8152 tcg_res, fpst);
8153 break;
8154 case 0x18:
8155 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8156 break;
8157 case 0x1a:
8158 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8159 break;
8160 case 0x1b:
8161 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8162 break;
8163 case 0x1c:
8164 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8165 break;
8166 case 0x1e:
8167 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8168 break;
8169 case 0x1f:
8170 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8171 break;
8172 case 0x38:
8173 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8174 break;
8175 case 0x3a:
8176 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8177 break;
8178 case 0x3e:
8179 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8180 break;
8181 case 0x3f:
8182 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8183 break;
8184 case 0x5b:
8185 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8186 break;
8187 case 0x5c:
8188 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8189 break;
8190 case 0x5d:
8191 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8192 break;
8193 case 0x5f:
8194 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8195 break;
8196 case 0x7a:
8197 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8198 gen_helper_vfp_absd(tcg_res, tcg_res);
8199 break;
8200 case 0x7c:
8201 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8202 break;
8203 case 0x7d:
8204 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8205 break;
8206 default:
8207 g_assert_not_reached();
8208 }
8209
8210 write_vec_element(s, tcg_res, rd, pass, MO_64);
8211
8212 tcg_temp_free_i64(tcg_res);
8213 tcg_temp_free_i64(tcg_op1);
8214 tcg_temp_free_i64(tcg_op2);
8215 } else {
8216
8217 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8218 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8219 TCGv_i32 tcg_res = tcg_temp_new_i32();
8220
8221 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
8222 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
8223
8224 switch (fpopcode) {
8225 case 0x39:
8226
8227 gen_helper_vfp_negs(tcg_op1, tcg_op1);
8228
8229 case 0x19:
8230 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8231 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
8232 tcg_res, fpst);
8233 break;
8234 case 0x1a:
8235 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8236 break;
8237 case 0x1b:
8238 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
8239 break;
8240 case 0x1c:
8241 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8242 break;
8243 case 0x1e:
8244 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8245 break;
8246 case 0x1f:
8247 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8248 break;
8249 case 0x18:
8250 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8251 break;
8252 case 0x38:
8253 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8254 break;
8255 case 0x3a:
8256 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8257 break;
8258 case 0x3e:
8259 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8260 break;
8261 case 0x3f:
8262 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8263 break;
8264 case 0x5b:
8265 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
8266 break;
8267 case 0x5c:
8268 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8269 break;
8270 case 0x5d:
8271 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8272 break;
8273 case 0x5f:
8274 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
8275 break;
8276 case 0x7a:
8277 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8278 gen_helper_vfp_abss(tcg_res, tcg_res);
8279 break;
8280 case 0x7c:
8281 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8282 break;
8283 case 0x7d:
8284 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8285 break;
8286 default:
8287 g_assert_not_reached();
8288 }
8289
8290 if (elements == 1) {
8291
8292 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
8293
8294 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
8295 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
8296 tcg_temp_free_i64(tcg_tmp);
8297 } else {
8298 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8299 }
8300
8301 tcg_temp_free_i32(tcg_res);
8302 tcg_temp_free_i32(tcg_op1);
8303 tcg_temp_free_i32(tcg_op2);
8304 }
8305 }
8306
8307 tcg_temp_free_ptr(fpst);
8308
8309 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
8310}
8311
8312
8313
8314
8315
8316
8317
8318static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
8319{
8320 int rd = extract32(insn, 0, 5);
8321 int rn = extract32(insn, 5, 5);
8322 int opcode = extract32(insn, 11, 5);
8323 int rm = extract32(insn, 16, 5);
8324 int size = extract32(insn, 22, 2);
8325 bool u = extract32(insn, 29, 1);
8326 TCGv_i64 tcg_rd;
8327
8328 if (opcode >= 0x18) {
8329
8330 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
8331 switch (fpopcode) {
8332 case 0x1b:
8333 case 0x1f:
8334 case 0x3f:
8335 case 0x5d:
8336 case 0x7d:
8337 case 0x1c:
8338 case 0x5c:
8339 case 0x7c:
8340 case 0x7a:
8341 break;
8342 default:
8343 unallocated_encoding(s);
8344 return;
8345 }
8346
8347 if (!fp_access_check(s)) {
8348 return;
8349 }
8350
8351 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
8352 return;
8353 }
8354
8355 switch (opcode) {
8356 case 0x1:
8357 case 0x5:
8358 case 0x9:
8359 case 0xb:
8360 break;
8361 case 0x8:
8362 case 0xa:
8363 case 0x6:
8364 case 0x7:
8365 case 0x11:
8366 case 0x10:
8367 if (size != 3) {
8368 unallocated_encoding(s);
8369 return;
8370 }
8371 break;
8372 case 0x16:
8373 if (size != 1 && size != 2) {
8374 unallocated_encoding(s);
8375 return;
8376 }
8377 break;
8378 default:
8379 unallocated_encoding(s);
8380 return;
8381 }
8382
8383 if (!fp_access_check(s)) {
8384 return;
8385 }
8386
8387 tcg_rd = tcg_temp_new_i64();
8388
8389 if (size == 3) {
8390 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8391 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
8392
8393 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
8394 tcg_temp_free_i64(tcg_rn);
8395 tcg_temp_free_i64(tcg_rm);
8396 } else {
8397
8398
8399
8400
8401
8402
8403 NeonGenTwoOpEnvFn *genenvfn;
8404 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8405 TCGv_i32 tcg_rm = tcg_temp_new_i32();
8406 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
8407
8408 read_vec_element_i32(s, tcg_rn, rn, 0, size);
8409 read_vec_element_i32(s, tcg_rm, rm, 0, size);
8410
8411 switch (opcode) {
8412 case 0x1:
8413 {
8414 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8415 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
8416 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
8417 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
8418 };
8419 genenvfn = fns[size][u];
8420 break;
8421 }
8422 case 0x5:
8423 {
8424 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8425 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
8426 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
8427 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
8428 };
8429 genenvfn = fns[size][u];
8430 break;
8431 }
8432 case 0x9:
8433 {
8434 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8435 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
8436 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
8437 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
8438 };
8439 genenvfn = fns[size][u];
8440 break;
8441 }
8442 case 0xb:
8443 {
8444 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8445 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
8446 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
8447 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
8448 };
8449 genenvfn = fns[size][u];
8450 break;
8451 }
8452 case 0x16:
8453 {
8454 static NeonGenTwoOpEnvFn * const fns[2][2] = {
8455 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
8456 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
8457 };
8458 assert(size == 1 || size == 2);
8459 genenvfn = fns[size - 1][u];
8460 break;
8461 }
8462 default:
8463 g_assert_not_reached();
8464 }
8465
8466 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
8467 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
8468 tcg_temp_free_i32(tcg_rd32);
8469 tcg_temp_free_i32(tcg_rn);
8470 tcg_temp_free_i32(tcg_rm);
8471 }
8472
8473 write_fp_dreg(s, rd, tcg_rd);
8474
8475 tcg_temp_free_i64(tcg_rd);
8476}
8477
8478
8479
8480
8481
8482
8483
8484
8485
8486static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
8487 uint32_t insn)
8488{
8489 int rd = extract32(insn, 0, 5);
8490 int rn = extract32(insn, 5, 5);
8491 int opcode = extract32(insn, 11, 3);
8492 int rm = extract32(insn, 16, 5);
8493 bool u = extract32(insn, 29, 1);
8494 bool a = extract32(insn, 23, 1);
8495 int fpopcode = opcode | (a << 3) | (u << 4);
8496 TCGv_ptr fpst;
8497 TCGv_i32 tcg_op1;
8498 TCGv_i32 tcg_op2;
8499 TCGv_i32 tcg_res;
8500
8501 switch (fpopcode) {
8502 case 0x03:
8503 case 0x04:
8504 case 0x07:
8505 case 0x0f:
8506 case 0x14:
8507 case 0x15:
8508 case 0x1a:
8509 case 0x1c:
8510 case 0x1d:
8511 break;
8512 default:
8513 unallocated_encoding(s);
8514 return;
8515 }
8516
8517 if (!dc_isar_feature(aa64_fp16, s)) {
8518 unallocated_encoding(s);
8519 }
8520
8521 if (!fp_access_check(s)) {
8522 return;
8523 }
8524
8525 fpst = get_fpstatus_ptr(true);
8526
8527 tcg_op1 = read_fp_hreg(s, rn);
8528 tcg_op2 = read_fp_hreg(s, rm);
8529 tcg_res = tcg_temp_new_i32();
8530
8531 switch (fpopcode) {
8532 case 0x03:
8533 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
8534 break;
8535 case 0x04:
8536 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8537 break;
8538 case 0x07:
8539 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8540 break;
8541 case 0x0f:
8542 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8543 break;
8544 case 0x14:
8545 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8546 break;
8547 case 0x15:
8548 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8549 break;
8550 case 0x1a:
8551 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
8552 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
8553 break;
8554 case 0x1c:
8555 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8556 break;
8557 case 0x1d:
8558 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8559 break;
8560 default:
8561 g_assert_not_reached();
8562 }
8563
8564 write_fp_sreg(s, rd, tcg_res);
8565
8566
8567 tcg_temp_free_i32(tcg_res);
8568 tcg_temp_free_i32(tcg_op1);
8569 tcg_temp_free_i32(tcg_op2);
8570 tcg_temp_free_ptr(fpst);
8571}
8572
8573
8574
8575
8576
8577
8578
8579static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
8580 uint32_t insn)
8581{
8582 int rd = extract32(insn, 0, 5);
8583 int rn = extract32(insn, 5, 5);
8584 int opcode = extract32(insn, 11, 4);
8585 int rm = extract32(insn, 16, 5);
8586 int size = extract32(insn, 22, 2);
8587 bool u = extract32(insn, 29, 1);
8588 TCGv_i32 ele1, ele2, ele3;
8589 TCGv_i64 res;
8590 bool feature;
8591
8592 switch (u * 16 + opcode) {
8593 case 0x10:
8594 case 0x11:
8595 if (size != 1 && size != 2) {
8596 unallocated_encoding(s);
8597 return;
8598 }
8599 feature = dc_isar_feature(aa64_rdm, s);
8600 break;
8601 default:
8602 unallocated_encoding(s);
8603 return;
8604 }
8605 if (!feature) {
8606 unallocated_encoding(s);
8607 return;
8608 }
8609 if (!fp_access_check(s)) {
8610 return;
8611 }
8612
8613
8614
8615
8616
8617
8618
8619 ele1 = tcg_temp_new_i32();
8620 ele2 = tcg_temp_new_i32();
8621 ele3 = tcg_temp_new_i32();
8622
8623 read_vec_element_i32(s, ele1, rn, 0, size);
8624 read_vec_element_i32(s, ele2, rm, 0, size);
8625 read_vec_element_i32(s, ele3, rd, 0, size);
8626
8627 switch (opcode) {
8628 case 0x0:
8629 if (size == 1) {
8630 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
8631 } else {
8632 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
8633 }
8634 break;
8635 case 0x1:
8636 if (size == 1) {
8637 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
8638 } else {
8639 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
8640 }
8641 break;
8642 default:
8643 g_assert_not_reached();
8644 }
8645 tcg_temp_free_i32(ele1);
8646 tcg_temp_free_i32(ele2);
8647
8648 res = tcg_temp_new_i64();
8649 tcg_gen_extu_i32_i64(res, ele3);
8650 tcg_temp_free_i32(ele3);
8651
8652 write_fp_dreg(s, rd, res);
8653 tcg_temp_free_i64(res);
8654}
8655
8656static void handle_2misc_64(DisasContext *s, int opcode, bool u,
8657 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
8658 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
8659{
8660
8661
8662
8663
8664
8665
8666 TCGCond cond;
8667
8668 switch (opcode) {
8669 case 0x4:
8670 if (u) {
8671 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
8672 } else {
8673 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
8674 }
8675 break;
8676 case 0x5:
8677
8678
8679
8680 tcg_gen_not_i64(tcg_rd, tcg_rn);
8681 break;
8682 case 0x7:
8683 if (u) {
8684 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
8685 } else {
8686 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
8687 }
8688 break;
8689 case 0xa:
8690
8691
8692
8693
8694 cond = TCG_COND_LT;
8695 do_cmop:
8696 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
8697 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8698 break;
8699 case 0x8:
8700 cond = u ? TCG_COND_GE : TCG_COND_GT;
8701 goto do_cmop;
8702 case 0x9:
8703 cond = u ? TCG_COND_LE : TCG_COND_EQ;
8704 goto do_cmop;
8705 case 0xb:
8706 if (u) {
8707 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8708 } else {
8709 TCGv_i64 tcg_zero = tcg_const_i64(0);
8710 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8711 tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
8712 tcg_rn, tcg_rd);
8713 tcg_temp_free_i64(tcg_zero);
8714 }
8715 break;
8716 case 0x2f:
8717 gen_helper_vfp_absd(tcg_rd, tcg_rn);
8718 break;
8719 case 0x6f:
8720 gen_helper_vfp_negd(tcg_rd, tcg_rn);
8721 break;
8722 case 0x7f:
8723 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
8724 break;
8725 case 0x1a:
8726 case 0x1b:
8727 case 0x1c:
8728 case 0x3a:
8729 case 0x3b:
8730 {
8731 TCGv_i32 tcg_shift = tcg_const_i32(0);
8732 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8733 tcg_temp_free_i32(tcg_shift);
8734 break;
8735 }
8736 case 0x5a:
8737 case 0x5b:
8738 case 0x5c:
8739 case 0x7a:
8740 case 0x7b:
8741 {
8742 TCGv_i32 tcg_shift = tcg_const_i32(0);
8743 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8744 tcg_temp_free_i32(tcg_shift);
8745 break;
8746 }
8747 case 0x18:
8748 case 0x19:
8749 case 0x38:
8750 case 0x39:
8751 case 0x58:
8752 case 0x79:
8753 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
8754 break;
8755 case 0x59:
8756 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
8757 break;
8758 default:
8759 g_assert_not_reached();
8760 }
8761}
8762
8763static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
8764 bool is_scalar, bool is_u, bool is_q,
8765 int size, int rn, int rd)
8766{
8767 bool is_double = (size == MO_64);
8768 TCGv_ptr fpst;
8769
8770 if (!fp_access_check(s)) {
8771 return;
8772 }
8773
8774 fpst = get_fpstatus_ptr(size == MO_16);
8775
8776 if (is_double) {
8777 TCGv_i64 tcg_op = tcg_temp_new_i64();
8778 TCGv_i64 tcg_zero = tcg_const_i64(0);
8779 TCGv_i64 tcg_res = tcg_temp_new_i64();
8780 NeonGenTwoDoubleOPFn *genfn;
8781 bool swap = false;
8782 int pass;
8783
8784 switch (opcode) {
8785 case 0x2e:
8786 swap = true;
8787
8788 case 0x2c:
8789 genfn = gen_helper_neon_cgt_f64;
8790 break;
8791 case 0x2d:
8792 genfn = gen_helper_neon_ceq_f64;
8793 break;
8794 case 0x6d:
8795 swap = true;
8796
8797 case 0x6c:
8798 genfn = gen_helper_neon_cge_f64;
8799 break;
8800 default:
8801 g_assert_not_reached();
8802 }
8803
8804 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8805 read_vec_element(s, tcg_op, rn, pass, MO_64);
8806 if (swap) {
8807 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8808 } else {
8809 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8810 }
8811 write_vec_element(s, tcg_res, rd, pass, MO_64);
8812 }
8813 tcg_temp_free_i64(tcg_res);
8814 tcg_temp_free_i64(tcg_zero);
8815 tcg_temp_free_i64(tcg_op);
8816
8817 clear_vec_high(s, !is_scalar, rd);
8818 } else {
8819 TCGv_i32 tcg_op = tcg_temp_new_i32();
8820 TCGv_i32 tcg_zero = tcg_const_i32(0);
8821 TCGv_i32 tcg_res = tcg_temp_new_i32();
8822 NeonGenTwoSingleOPFn *genfn;
8823 bool swap = false;
8824 int pass, maxpasses;
8825
8826 if (size == MO_16) {
8827 switch (opcode) {
8828 case 0x2e:
8829 swap = true;
8830
8831 case 0x2c:
8832 genfn = gen_helper_advsimd_cgt_f16;
8833 break;
8834 case 0x2d:
8835 genfn = gen_helper_advsimd_ceq_f16;
8836 break;
8837 case 0x6d:
8838 swap = true;
8839
8840 case 0x6c:
8841 genfn = gen_helper_advsimd_cge_f16;
8842 break;
8843 default:
8844 g_assert_not_reached();
8845 }
8846 } else {
8847 switch (opcode) {
8848 case 0x2e:
8849 swap = true;
8850
8851 case 0x2c:
8852 genfn = gen_helper_neon_cgt_f32;
8853 break;
8854 case 0x2d:
8855 genfn = gen_helper_neon_ceq_f32;
8856 break;
8857 case 0x6d:
8858 swap = true;
8859
8860 case 0x6c:
8861 genfn = gen_helper_neon_cge_f32;
8862 break;
8863 default:
8864 g_assert_not_reached();
8865 }
8866 }
8867
8868 if (is_scalar) {
8869 maxpasses = 1;
8870 } else {
8871 int vector_size = 8 << is_q;
8872 maxpasses = vector_size >> size;
8873 }
8874
8875 for (pass = 0; pass < maxpasses; pass++) {
8876 read_vec_element_i32(s, tcg_op, rn, pass, size);
8877 if (swap) {
8878 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8879 } else {
8880 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8881 }
8882 if (is_scalar) {
8883 write_fp_sreg(s, rd, tcg_res);
8884 } else {
8885 write_vec_element_i32(s, tcg_res, rd, pass, size);
8886 }
8887 }
8888 tcg_temp_free_i32(tcg_res);
8889 tcg_temp_free_i32(tcg_zero);
8890 tcg_temp_free_i32(tcg_op);
8891 if (!is_scalar) {
8892 clear_vec_high(s, is_q, rd);
8893 }
8894 }
8895
8896 tcg_temp_free_ptr(fpst);
8897}
8898
8899static void handle_2misc_reciprocal(DisasContext *s, int opcode,
8900 bool is_scalar, bool is_u, bool is_q,
8901 int size, int rn, int rd)
8902{
8903 bool is_double = (size == 3);
8904 TCGv_ptr fpst = get_fpstatus_ptr(false);
8905
8906 if (is_double) {
8907 TCGv_i64 tcg_op = tcg_temp_new_i64();
8908 TCGv_i64 tcg_res = tcg_temp_new_i64();
8909 int pass;
8910
8911 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8912 read_vec_element(s, tcg_op, rn, pass, MO_64);
8913 switch (opcode) {
8914 case 0x3d:
8915 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
8916 break;
8917 case 0x3f:
8918 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
8919 break;
8920 case 0x7d:
8921 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
8922 break;
8923 default:
8924 g_assert_not_reached();
8925 }
8926 write_vec_element(s, tcg_res, rd, pass, MO_64);
8927 }
8928 tcg_temp_free_i64(tcg_res);
8929 tcg_temp_free_i64(tcg_op);
8930 clear_vec_high(s, !is_scalar, rd);
8931 } else {
8932 TCGv_i32 tcg_op = tcg_temp_new_i32();
8933 TCGv_i32 tcg_res = tcg_temp_new_i32();
8934 int pass, maxpasses;
8935
8936 if (is_scalar) {
8937 maxpasses = 1;
8938 } else {
8939 maxpasses = is_q ? 4 : 2;
8940 }
8941
8942 for (pass = 0; pass < maxpasses; pass++) {
8943 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
8944
8945 switch (opcode) {
8946 case 0x3c:
8947 gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
8948 break;
8949 case 0x3d:
8950 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
8951 break;
8952 case 0x3f:
8953 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
8954 break;
8955 case 0x7d:
8956 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
8957 break;
8958 default:
8959 g_assert_not_reached();
8960 }
8961
8962 if (is_scalar) {
8963 write_fp_sreg(s, rd, tcg_res);
8964 } else {
8965 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8966 }
8967 }
8968 tcg_temp_free_i32(tcg_res);
8969 tcg_temp_free_i32(tcg_op);
8970 if (!is_scalar) {
8971 clear_vec_high(s, is_q, rd);
8972 }
8973 }
8974 tcg_temp_free_ptr(fpst);
8975}
8976
8977static void handle_2misc_narrow(DisasContext *s, bool scalar,
8978 int opcode, bool u, bool is_q,
8979 int size, int rn, int rd)
8980{
8981
8982
8983
8984 int pass;
8985 TCGv_i32 tcg_res[2];
8986 int destelt = is_q ? 2 : 0;
8987 int passes = scalar ? 1 : 2;
8988
8989 if (scalar) {
8990 tcg_res[1] = tcg_const_i32(0);
8991 }
8992
8993 for (pass = 0; pass < passes; pass++) {
8994 TCGv_i64 tcg_op = tcg_temp_new_i64();
8995 NeonGenNarrowFn *genfn = NULL;
8996 NeonGenNarrowEnvFn *genenvfn = NULL;
8997
8998 if (scalar) {
8999 read_vec_element(s, tcg_op, rn, pass, size + 1);
9000 } else {
9001 read_vec_element(s, tcg_op, rn, pass, MO_64);
9002 }
9003 tcg_res[pass] = tcg_temp_new_i32();
9004
9005 switch (opcode) {
9006 case 0x12:
9007 {
9008 static NeonGenNarrowFn * const xtnfns[3] = {
9009 gen_helper_neon_narrow_u8,
9010 gen_helper_neon_narrow_u16,
9011 tcg_gen_extrl_i64_i32,
9012 };
9013 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9014 gen_helper_neon_unarrow_sat8,
9015 gen_helper_neon_unarrow_sat16,
9016 gen_helper_neon_unarrow_sat32,
9017 };
9018 if (u) {
9019 genenvfn = sqxtunfns[size];
9020 } else {
9021 genfn = xtnfns[size];
9022 }
9023 break;
9024 }
9025 case 0x14:
9026 {
9027 static NeonGenNarrowEnvFn * const fns[3][2] = {
9028 { gen_helper_neon_narrow_sat_s8,
9029 gen_helper_neon_narrow_sat_u8 },
9030 { gen_helper_neon_narrow_sat_s16,
9031 gen_helper_neon_narrow_sat_u16 },
9032 { gen_helper_neon_narrow_sat_s32,
9033 gen_helper_neon_narrow_sat_u32 },
9034 };
9035 genenvfn = fns[size][u];
9036 break;
9037 }
9038 case 0x16:
9039
9040 if (size == 2) {
9041 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9042 } else {
9043 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9044 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9045 TCGv_ptr fpst = get_fpstatus_ptr(false);
9046 TCGv_i32 ahp = get_ahp_flag();
9047
9048 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9049 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9050 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9051 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9052 tcg_temp_free_i32(tcg_lo);
9053 tcg_temp_free_i32(tcg_hi);
9054 tcg_temp_free_ptr(fpst);
9055 tcg_temp_free_i32(ahp);
9056 }
9057 break;
9058 case 0x56:
9059
9060
9061
9062 assert(size == 2);
9063 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9064 break;
9065 default:
9066 g_assert_not_reached();
9067 }
9068
9069 if (genfn) {
9070 genfn(tcg_res[pass], tcg_op);
9071 } else if (genenvfn) {
9072 genenvfn(tcg_res[pass], cpu_env, tcg_op);
9073 }
9074
9075 tcg_temp_free_i64(tcg_op);
9076 }
9077
9078 for (pass = 0; pass < 2; pass++) {
9079 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9080 tcg_temp_free_i32(tcg_res[pass]);
9081 }
9082 clear_vec_high(s, is_q, rd);
9083}
9084
9085
9086static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9087 bool is_q, int size, int rn, int rd)
9088{
9089 bool is_double = (size == 3);
9090
9091 if (is_double) {
9092 TCGv_i64 tcg_rn = tcg_temp_new_i64();
9093 TCGv_i64 tcg_rd = tcg_temp_new_i64();
9094 int pass;
9095
9096 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9097 read_vec_element(s, tcg_rn, rn, pass, MO_64);
9098 read_vec_element(s, tcg_rd, rd, pass, MO_64);
9099
9100 if (is_u) {
9101 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9102 } else {
9103 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9104 }
9105 write_vec_element(s, tcg_rd, rd, pass, MO_64);
9106 }
9107 tcg_temp_free_i64(tcg_rd);
9108 tcg_temp_free_i64(tcg_rn);
9109 clear_vec_high(s, !is_scalar, rd);
9110 } else {
9111 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9112 TCGv_i32 tcg_rd = tcg_temp_new_i32();
9113 int pass, maxpasses;
9114
9115 if (is_scalar) {
9116 maxpasses = 1;
9117 } else {
9118 maxpasses = is_q ? 4 : 2;
9119 }
9120
9121 for (pass = 0; pass < maxpasses; pass++) {
9122 if (is_scalar) {
9123 read_vec_element_i32(s, tcg_rn, rn, pass, size);
9124 read_vec_element_i32(s, tcg_rd, rd, pass, size);
9125 } else {
9126 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9127 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9128 }
9129
9130 if (is_u) {
9131 switch (size) {
9132 case 0:
9133 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9134 break;
9135 case 1:
9136 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9137 break;
9138 case 2:
9139 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9140 break;
9141 default:
9142 g_assert_not_reached();
9143 }
9144 } else {
9145 switch (size) {
9146 case 0:
9147 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9148 break;
9149 case 1:
9150 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9151 break;
9152 case 2:
9153 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9154 break;
9155 default:
9156 g_assert_not_reached();
9157 }
9158 }
9159
9160 if (is_scalar) {
9161 TCGv_i64 tcg_zero = tcg_const_i64(0);
9162 write_vec_element(s, tcg_zero, rd, 0, MO_64);
9163 tcg_temp_free_i64(tcg_zero);
9164 }
9165 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9166 }
9167 tcg_temp_free_i32(tcg_rd);
9168 tcg_temp_free_i32(tcg_rn);
9169 clear_vec_high(s, is_q, rd);
9170 }
9171}
9172
9173
9174
9175
9176
9177
9178
9179static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9180{
9181 int rd = extract32(insn, 0, 5);
9182 int rn = extract32(insn, 5, 5);
9183 int opcode = extract32(insn, 12, 5);
9184 int size = extract32(insn, 22, 2);
9185 bool u = extract32(insn, 29, 1);
9186 bool is_fcvt = false;
9187 int rmode;
9188 TCGv_i32 tcg_rmode;
9189 TCGv_ptr tcg_fpstatus;
9190
9191 switch (opcode) {
9192 case 0x3:
9193 if (!fp_access_check(s)) {
9194 return;
9195 }
9196 handle_2misc_satacc(s, true, u, false, size, rn, rd);
9197 return;
9198 case 0x7:
9199 break;
9200 case 0xa:
9201 if (u) {
9202 unallocated_encoding(s);
9203 return;
9204 }
9205
9206 case 0x8:
9207 case 0x9:
9208 case 0xb:
9209 if (size != 3) {
9210 unallocated_encoding(s);
9211 return;
9212 }
9213 break;
9214 case 0x12:
9215 if (!u) {
9216 unallocated_encoding(s);
9217 return;
9218 }
9219
9220 case 0x14:
9221 if (size == 3) {
9222 unallocated_encoding(s);
9223 return;
9224 }
9225 if (!fp_access_check(s)) {
9226 return;
9227 }
9228 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
9229 return;
9230 case 0xc ... 0xf:
9231 case 0x16 ... 0x1d:
9232 case 0x1f:
9233
9234
9235
9236 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
9237 size = extract32(size, 0, 1) ? 3 : 2;
9238 switch (opcode) {
9239 case 0x2c:
9240 case 0x2d:
9241 case 0x2e:
9242 case 0x6c:
9243 case 0x6d:
9244 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
9245 return;
9246 case 0x1d:
9247 case 0x5d:
9248 {
9249 bool is_signed = (opcode == 0x1d);
9250 if (!fp_access_check(s)) {
9251 return;
9252 }
9253 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
9254 return;
9255 }
9256 case 0x3d:
9257 case 0x3f:
9258 case 0x7d:
9259 if (!fp_access_check(s)) {
9260 return;
9261 }
9262 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
9263 return;
9264 case 0x1a:
9265 case 0x1b:
9266 case 0x3a:
9267 case 0x3b:
9268 case 0x5a:
9269 case 0x5b:
9270 case 0x7a:
9271 case 0x7b:
9272 is_fcvt = true;
9273 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
9274 break;
9275 case 0x1c:
9276 case 0x5c:
9277
9278 is_fcvt = true;
9279 rmode = FPROUNDING_TIEAWAY;
9280 break;
9281 case 0x56:
9282 if (size == 2) {
9283 unallocated_encoding(s);
9284 return;
9285 }
9286 if (!fp_access_check(s)) {
9287 return;
9288 }
9289 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
9290 return;
9291 default:
9292 unallocated_encoding(s);
9293 return;
9294 }
9295 break;
9296 default:
9297 unallocated_encoding(s);
9298 return;
9299 }
9300
9301 if (!fp_access_check(s)) {
9302 return;
9303 }
9304
9305 if (is_fcvt) {
9306 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
9307 tcg_fpstatus = get_fpstatus_ptr(false);
9308 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9309 } else {
9310 tcg_rmode = NULL;
9311 tcg_fpstatus = NULL;
9312 }
9313
9314 if (size == 3) {
9315 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9316 TCGv_i64 tcg_rd = tcg_temp_new_i64();
9317
9318 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
9319 write_fp_dreg(s, rd, tcg_rd);
9320 tcg_temp_free_i64(tcg_rd);
9321 tcg_temp_free_i64(tcg_rn);
9322 } else {
9323 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9324 TCGv_i32 tcg_rd = tcg_temp_new_i32();
9325
9326 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9327
9328 switch (opcode) {
9329 case 0x7:
9330 {
9331 NeonGenOneOpEnvFn *genfn;
9332 static NeonGenOneOpEnvFn * const fns[3][2] = {
9333 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
9334 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
9335 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
9336 };
9337 genfn = fns[size][u];
9338 genfn(tcg_rd, cpu_env, tcg_rn);
9339 break;
9340 }
9341 case 0x1a:
9342 case 0x1b:
9343 case 0x1c:
9344 case 0x3a:
9345 case 0x3b:
9346 {
9347 TCGv_i32 tcg_shift = tcg_const_i32(0);
9348 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9349 tcg_temp_free_i32(tcg_shift);
9350 break;
9351 }
9352 case 0x5a:
9353 case 0x5b:
9354 case 0x5c:
9355 case 0x7a:
9356 case 0x7b:
9357 {
9358 TCGv_i32 tcg_shift = tcg_const_i32(0);
9359 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9360 tcg_temp_free_i32(tcg_shift);
9361 break;
9362 }
9363 default:
9364 g_assert_not_reached();
9365 }
9366
9367 write_fp_sreg(s, rd, tcg_rd);
9368 tcg_temp_free_i32(tcg_rd);
9369 tcg_temp_free_i32(tcg_rn);
9370 }
9371
9372 if (is_fcvt) {
9373 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9374 tcg_temp_free_i32(tcg_rmode);
9375 tcg_temp_free_ptr(tcg_fpstatus);
9376 }
9377}
9378
9379
9380static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
9381 int immh, int immb, int opcode, int rn, int rd)
9382{
9383 int size = 32 - clz32(immh) - 1;
9384 int immhb = immh << 3 | immb;
9385 int shift = 2 * (8 << size) - immhb;
9386 bool accumulate = false;
9387 int dsize = is_q ? 128 : 64;
9388 int esize = 8 << size;
9389 int elements = dsize/esize;
9390 TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
9391 TCGv_i64 tcg_rn = new_tmp_a64(s);
9392 TCGv_i64 tcg_rd = new_tmp_a64(s);
9393 TCGv_i64 tcg_round;
9394 uint64_t round_const;
9395 int i;
9396
9397 if (extract32(immh, 3, 1) && !is_q) {
9398 unallocated_encoding(s);
9399 return;
9400 }
9401 tcg_debug_assert(size <= 3);
9402
9403 if (!fp_access_check(s)) {
9404 return;
9405 }
9406
9407 switch (opcode) {
9408 case 0x02:
9409 if (is_u) {
9410
9411 if (shift == 8 << size) {
9412 goto done;
9413 }
9414 gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]);
9415 } else {
9416
9417 if (shift == 8 << size) {
9418 shift -= 1;
9419 }
9420 gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]);
9421 }
9422 return;
9423 case 0x08:
9424
9425 if (shift == 8 << size) {
9426 goto done;
9427 }
9428 gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]);
9429 return;
9430
9431 case 0x00:
9432 if (is_u) {
9433 if (shift == 8 << size) {
9434
9435 tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
9436 is_q ? 16 : 8, vec_full_reg_size(s), 0);
9437 } else {
9438 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
9439 }
9440 } else {
9441
9442 if (shift == 8 << size) {
9443 shift -= 1;
9444 }
9445 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size);
9446 }
9447 return;
9448
9449 case 0x04:
9450 break;
9451 case 0x06:
9452 accumulate = true;
9453 break;
9454 default:
9455 g_assert_not_reached();
9456 }
9457
9458 round_const = 1ULL << (shift - 1);
9459 tcg_round = tcg_const_i64(round_const);
9460
9461 for (i = 0; i < elements; i++) {
9462 read_vec_element(s, tcg_rn, rn, i, memop);
9463 if (accumulate) {
9464 read_vec_element(s, tcg_rd, rd, i, memop);
9465 }
9466
9467 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9468 accumulate, is_u, size, shift);
9469
9470 write_vec_element(s, tcg_rd, rd, i, size);
9471 }
9472 tcg_temp_free_i64(tcg_round);
9473
9474 done:
9475 clear_vec_high(s, is_q, rd);
9476}
9477
9478
9479static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
9480 int immh, int immb, int opcode, int rn, int rd)
9481{
9482 int size = 32 - clz32(immh) - 1;
9483 int immhb = immh << 3 | immb;
9484 int shift = immhb - (8 << size);
9485
9486
9487 assert(size >= 0 && size <= 3);
9488
9489 if (extract32(immh, 3, 1) && !is_q) {
9490 unallocated_encoding(s);
9491 return;
9492 }
9493
9494 if (!fp_access_check(s)) {
9495 return;
9496 }
9497
9498 if (insert) {
9499 gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]);
9500 } else {
9501 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
9502 }
9503}
9504
9505
9506static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
9507 int immh, int immb, int opcode, int rn, int rd)
9508{
9509 int size = 32 - clz32(immh) - 1;
9510 int immhb = immh << 3 | immb;
9511 int shift = immhb - (8 << size);
9512 int dsize = 64;
9513 int esize = 8 << size;
9514 int elements = dsize/esize;
9515 TCGv_i64 tcg_rn = new_tmp_a64(s);
9516 TCGv_i64 tcg_rd = new_tmp_a64(s);
9517 int i;
9518
9519 if (size >= 3) {
9520 unallocated_encoding(s);
9521 return;
9522 }
9523
9524 if (!fp_access_check(s)) {
9525 return;
9526 }
9527
9528
9529
9530
9531
9532 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
9533
9534 for (i = 0; i < elements; i++) {
9535 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
9536 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
9537 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
9538 write_vec_element(s, tcg_rd, rd, i, size + 1);
9539 }
9540}
9541
9542
9543static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
9544 int immh, int immb, int opcode, int rn, int rd)
9545{
9546 int immhb = immh << 3 | immb;
9547 int size = 32 - clz32(immh) - 1;
9548 int dsize = 64;
9549 int esize = 8 << size;
9550 int elements = dsize/esize;
9551 int shift = (2 * esize) - immhb;
9552 bool round = extract32(opcode, 0, 1);
9553 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
9554 TCGv_i64 tcg_round;
9555 int i;
9556
9557 if (extract32(immh, 3, 1)) {
9558 unallocated_encoding(s);
9559 return;
9560 }
9561
9562 if (!fp_access_check(s)) {
9563 return;
9564 }
9565
9566 tcg_rn = tcg_temp_new_i64();
9567 tcg_rd = tcg_temp_new_i64();
9568 tcg_final = tcg_temp_new_i64();
9569 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
9570
9571 if (round) {
9572 uint64_t round_const = 1ULL << (shift - 1);
9573 tcg_round = tcg_const_i64(round_const);
9574 } else {
9575 tcg_round = NULL;
9576 }
9577
9578 for (i = 0; i < elements; i++) {
9579 read_vec_element(s, tcg_rn, rn, i, size+1);
9580 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9581 false, true, size+1, shift);
9582
9583 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
9584 }
9585
9586 if (!is_q) {
9587 write_vec_element(s, tcg_final, rd, 0, MO_64);
9588 } else {
9589 write_vec_element(s, tcg_final, rd, 1, MO_64);
9590 }
9591 if (round) {
9592 tcg_temp_free_i64(tcg_round);
9593 }
9594 tcg_temp_free_i64(tcg_rn);
9595 tcg_temp_free_i64(tcg_rd);
9596 tcg_temp_free_i64(tcg_final);
9597
9598 clear_vec_high(s, is_q, rd);
9599}
9600
9601
9602
9603
9604
9605
9606
9607
9608static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
9609{
9610 int rd = extract32(insn, 0, 5);
9611 int rn = extract32(insn, 5, 5);
9612 int opcode = extract32(insn, 11, 5);
9613 int immb = extract32(insn, 16, 3);
9614 int immh = extract32(insn, 19, 4);
9615 bool is_u = extract32(insn, 29, 1);
9616 bool is_q = extract32(insn, 30, 1);
9617
9618 switch (opcode) {
9619 case 0x08:
9620 if (!is_u) {
9621 unallocated_encoding(s);
9622 return;
9623 }
9624
9625 case 0x00:
9626 case 0x02:
9627 case 0x04:
9628 case 0x06:
9629 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
9630 break;
9631 case 0x0a:
9632 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9633 break;
9634 case 0x10:
9635 case 0x11:
9636 if (is_u) {
9637 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
9638 opcode, rn, rd);
9639 } else {
9640 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
9641 }
9642 break;
9643 case 0x12:
9644 case 0x13:
9645 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
9646 opcode, rn, rd);
9647 break;
9648 case 0x14:
9649 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9650 break;
9651 case 0x1c:
9652 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
9653 opcode, rn, rd);
9654 break;
9655 case 0xc:
9656 if (!is_u) {
9657 unallocated_encoding(s);
9658 return;
9659 }
9660 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
9661 break;
9662 case 0xe:
9663 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
9664 break;
9665 case 0x1f:
9666 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
9667 return;
9668 default:
9669 unallocated_encoding(s);
9670 return;
9671 }
9672}
9673
9674
9675
9676
9677static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
9678 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
9679{
9680 static NeonGenTwo64OpFn * const fns[3][2] = {
9681 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
9682 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
9683 { tcg_gen_add_i64, tcg_gen_sub_i64 },
9684 };
9685 NeonGenTwo64OpFn *genfn;
9686 assert(size < 3);
9687
9688 genfn = fns[size][is_sub];
9689 genfn(tcg_res, tcg_op1, tcg_op2);
9690}
9691
9692static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
9693 int opcode, int rd, int rn, int rm)
9694{
9695
9696 TCGv_i64 tcg_res[2];
9697 int pass, accop;
9698
9699 tcg_res[0] = tcg_temp_new_i64();
9700 tcg_res[1] = tcg_temp_new_i64();
9701
9702
9703
9704
9705 switch (opcode) {
9706 case 5:
9707 case 8:
9708 case 9:
9709 accop = 1;
9710 break;
9711 case 10:
9712 case 11:
9713 accop = -1;
9714 break;
9715 default:
9716 accop = 0;
9717 break;
9718 }
9719
9720 if (accop != 0) {
9721 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
9722 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
9723 }
9724
9725
9726
9727
9728 if (size == 2) {
9729 for (pass = 0; pass < 2; pass++) {
9730 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9731 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9732 TCGv_i64 tcg_passres;
9733 TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
9734
9735 int elt = pass + is_q * 2;
9736
9737 read_vec_element(s, tcg_op1, rn, elt, memop);
9738 read_vec_element(s, tcg_op2, rm, elt, memop);
9739
9740 if (accop == 0) {
9741 tcg_passres = tcg_res[pass];
9742 } else {
9743 tcg_passres = tcg_temp_new_i64();
9744 }
9745
9746 switch (opcode) {
9747 case 0:
9748 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
9749 break;
9750 case 2:
9751 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
9752 break;
9753 case 5:
9754 case 7:
9755 {
9756 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
9757 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
9758
9759 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
9760 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
9761 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
9762 tcg_passres,
9763 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
9764 tcg_temp_free_i64(tcg_tmp1);
9765 tcg_temp_free_i64(tcg_tmp2);
9766 break;
9767 }
9768 case 8:
9769 case 10:
9770 case 12:
9771 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
9772 break;
9773 case 9:
9774 case 11:
9775 case 13:
9776 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
9777 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
9778 tcg_passres, tcg_passres);
9779 break;
9780 default:
9781 g_assert_not_reached();
9782 }
9783
9784 if (opcode == 9 || opcode == 11) {
9785
9786 if (accop < 0) {
9787 tcg_gen_neg_i64(tcg_passres, tcg_passres);
9788 }
9789 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
9790 tcg_res[pass], tcg_passres);
9791 } else if (accop > 0) {
9792 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
9793 } else if (accop < 0) {
9794 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
9795 }
9796
9797 if (accop != 0) {
9798 tcg_temp_free_i64(tcg_passres);
9799 }
9800
9801 tcg_temp_free_i64(tcg_op1);
9802 tcg_temp_free_i64(tcg_op2);
9803 }
9804 } else {
9805
9806 for (pass = 0; pass < 2; pass++) {
9807 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9808 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9809 TCGv_i64 tcg_passres;
9810 int elt = pass + is_q * 2;
9811
9812 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
9813 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
9814
9815 if (accop == 0) {
9816 tcg_passres = tcg_res[pass];
9817 } else {
9818 tcg_passres = tcg_temp_new_i64();
9819 }
9820
9821 switch (opcode) {
9822 case 0:
9823 case 2:
9824 {
9825 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
9826 static NeonGenWidenFn * const widenfns[2][2] = {
9827 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
9828 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
9829 };
9830 NeonGenWidenFn *widenfn = widenfns[size][is_u];
9831
9832 widenfn(tcg_op2_64, tcg_op2);
9833 widenfn(tcg_passres, tcg_op1);
9834 gen_neon_addl(size, (opcode == 2), tcg_passres,
9835 tcg_passres, tcg_op2_64);
9836 tcg_temp_free_i64(tcg_op2_64);
9837 break;
9838 }
9839 case 5:
9840 case 7:
9841 if (size == 0) {
9842 if (is_u) {
9843 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
9844 } else {
9845 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
9846 }
9847 } else {
9848 if (is_u) {
9849 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
9850 } else {
9851 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
9852 }
9853 }
9854 break;
9855 case 8:
9856 case 10:
9857 case 12:
9858 if (size == 0) {
9859 if (is_u) {
9860 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
9861 } else {
9862 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
9863 }
9864 } else {
9865 if (is_u) {
9866 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
9867 } else {
9868 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
9869 }
9870 }
9871 break;
9872 case 9:
9873 case 11:
9874 case 13:
9875 assert(size == 1);
9876 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
9877 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
9878 tcg_passres, tcg_passres);
9879 break;
9880 case 14:
9881 assert(size == 0);
9882 gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
9883 break;
9884 default:
9885 g_assert_not_reached();
9886 }
9887 tcg_temp_free_i32(tcg_op1);
9888 tcg_temp_free_i32(tcg_op2);
9889
9890 if (accop != 0) {
9891 if (opcode == 9 || opcode == 11) {
9892
9893 if (accop < 0) {
9894 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
9895 }
9896 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
9897 tcg_res[pass],
9898 tcg_passres);
9899 } else {
9900 gen_neon_addl(size, (accop < 0), tcg_res[pass],
9901 tcg_res[pass], tcg_passres);
9902 }
9903 tcg_temp_free_i64(tcg_passres);
9904 }
9905 }
9906 }
9907
9908 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
9909 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
9910 tcg_temp_free_i64(tcg_res[0]);
9911 tcg_temp_free_i64(tcg_res[1]);
9912}
9913
9914static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
9915 int opcode, int rd, int rn, int rm)
9916{
9917 TCGv_i64 tcg_res[2];
9918 int part = is_q ? 2 : 0;
9919 int pass;
9920
9921 for (pass = 0; pass < 2; pass++) {
9922 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9923 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9924 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
9925 static NeonGenWidenFn * const widenfns[3][2] = {
9926 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
9927 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
9928 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
9929 };
9930 NeonGenWidenFn *widenfn = widenfns[size][is_u];
9931
9932 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9933 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
9934 widenfn(tcg_op2_wide, tcg_op2);
9935 tcg_temp_free_i32(tcg_op2);
9936 tcg_res[pass] = tcg_temp_new_i64();
9937 gen_neon_addl(size, (opcode == 3),
9938 tcg_res[pass], tcg_op1, tcg_op2_wide);
9939 tcg_temp_free_i64(tcg_op1);
9940 tcg_temp_free_i64(tcg_op2_wide);
9941 }
9942
9943 for (pass = 0; pass < 2; pass++) {
9944 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9945 tcg_temp_free_i64(tcg_res[pass]);
9946 }
9947}
9948
9949static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
9950{
9951 tcg_gen_addi_i64(in, in, 1U << 31);
9952 tcg_gen_extrh_i64_i32(res, in);
9953}
9954
9955static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
9956 int opcode, int rd, int rn, int rm)
9957{
9958 TCGv_i32 tcg_res[2];
9959 int part = is_q ? 2 : 0;
9960 int pass;
9961
9962 for (pass = 0; pass < 2; pass++) {
9963 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9964 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9965 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
9966 static NeonGenNarrowFn * const narrowfns[3][2] = {
9967 { gen_helper_neon_narrow_high_u8,
9968 gen_helper_neon_narrow_round_high_u8 },
9969 { gen_helper_neon_narrow_high_u16,
9970 gen_helper_neon_narrow_round_high_u16 },
9971 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
9972 };
9973 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
9974
9975 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9976 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9977
9978 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
9979
9980 tcg_temp_free_i64(tcg_op1);
9981 tcg_temp_free_i64(tcg_op2);
9982
9983 tcg_res[pass] = tcg_temp_new_i32();
9984 gennarrow(tcg_res[pass], tcg_wideres);
9985 tcg_temp_free_i64(tcg_wideres);
9986 }
9987
9988 for (pass = 0; pass < 2; pass++) {
9989 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
9990 tcg_temp_free_i32(tcg_res[pass]);
9991 }
9992 clear_vec_high(s, is_q, rd);
9993}
9994
9995static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
9996{
9997
9998
9999
10000
10001
10002
10003 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10004 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10005 TCGv_i64 tcg_res = tcg_temp_new_i64();
10006
10007 read_vec_element(s, tcg_op1, rn, is_q, MO_64);
10008 read_vec_element(s, tcg_op2, rm, is_q, MO_64);
10009 gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
10010 write_vec_element(s, tcg_res, rd, 0, MO_64);
10011 gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
10012 write_vec_element(s, tcg_res, rd, 1, MO_64);
10013
10014 tcg_temp_free_i64(tcg_op1);
10015 tcg_temp_free_i64(tcg_op2);
10016 tcg_temp_free_i64(tcg_res);
10017}
10018
10019
10020
10021
10022
10023
10024
10025static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10026{
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037 int is_q = extract32(insn, 30, 1);
10038 int is_u = extract32(insn, 29, 1);
10039 int size = extract32(insn, 22, 2);
10040 int opcode = extract32(insn, 12, 4);
10041 int rm = extract32(insn, 16, 5);
10042 int rn = extract32(insn, 5, 5);
10043 int rd = extract32(insn, 0, 5);
10044
10045 switch (opcode) {
10046 case 1:
10047 case 3:
10048
10049 if (size == 3) {
10050 unallocated_encoding(s);
10051 return;
10052 }
10053 if (!fp_access_check(s)) {
10054 return;
10055 }
10056 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10057 break;
10058 case 4:
10059 case 6:
10060
10061 if (size == 3) {
10062 unallocated_encoding(s);
10063 return;
10064 }
10065 if (!fp_access_check(s)) {
10066 return;
10067 }
10068 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10069 break;
10070 case 14:
10071 if (is_u || size == 1 || size == 2) {
10072 unallocated_encoding(s);
10073 return;
10074 }
10075 if (size == 3) {
10076 if (!dc_isar_feature(aa64_pmull, s)) {
10077 unallocated_encoding(s);
10078 return;
10079 }
10080 if (!fp_access_check(s)) {
10081 return;
10082 }
10083 handle_pmull_64(s, is_q, rd, rn, rm);
10084 return;
10085 }
10086 goto is_widening;
10087 case 9:
10088 case 11:
10089 case 13:
10090 if (is_u || size == 0) {
10091 unallocated_encoding(s);
10092 return;
10093 }
10094
10095 case 0:
10096 case 2:
10097 case 5:
10098 case 7:
10099 case 8:
10100 case 10:
10101 case 12:
10102
10103 if (size == 3) {
10104 unallocated_encoding(s);
10105 return;
10106 }
10107 is_widening:
10108 if (!fp_access_check(s)) {
10109 return;
10110 }
10111
10112 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10113 break;
10114 default:
10115
10116 unallocated_encoding(s);
10117 break;
10118 }
10119}
10120
10121
10122static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10123{
10124 int rd = extract32(insn, 0, 5);
10125 int rn = extract32(insn, 5, 5);
10126 int rm = extract32(insn, 16, 5);
10127 int size = extract32(insn, 22, 2);
10128 bool is_u = extract32(insn, 29, 1);
10129 bool is_q = extract32(insn, 30, 1);
10130
10131 if (!fp_access_check(s)) {
10132 return;
10133 }
10134
10135 switch (size + 4 * is_u) {
10136 case 0:
10137 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10138 return;
10139 case 1:
10140 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10141 return;
10142 case 2:
10143 if (rn == rm) {
10144 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_mov, 0);
10145 } else {
10146 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10147 }
10148 return;
10149 case 3:
10150 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10151 return;
10152 case 4:
10153 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10154 return;
10155
10156 case 5:
10157 gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
10158 return;
10159 case 6:
10160 gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
10161 return;
10162 case 7:
10163 gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
10164 return;
10165
10166 default:
10167 g_assert_not_reached();
10168 }
10169}
10170
10171
10172
10173
10174
10175
10176static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10177 int size, int rn, int rm, int rd)
10178{
10179 TCGv_ptr fpst;
10180 int pass;
10181
10182
10183 if (opcode >= 0x58) {
10184 fpst = get_fpstatus_ptr(false);
10185 } else {
10186 fpst = NULL;
10187 }
10188
10189 if (!fp_access_check(s)) {
10190 return;
10191 }
10192
10193
10194
10195
10196 if (size == 3) {
10197 TCGv_i64 tcg_res[2];
10198
10199 for (pass = 0; pass < 2; pass++) {
10200 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10201 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10202 int passreg = (pass == 0) ? rn : rm;
10203
10204 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10205 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10206 tcg_res[pass] = tcg_temp_new_i64();
10207
10208 switch (opcode) {
10209 case 0x17:
10210 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10211 break;
10212 case 0x58:
10213 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10214 break;
10215 case 0x5a:
10216 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10217 break;
10218 case 0x5e:
10219 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10220 break;
10221 case 0x78:
10222 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10223 break;
10224 case 0x7e:
10225 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10226 break;
10227 default:
10228 g_assert_not_reached();
10229 }
10230
10231 tcg_temp_free_i64(tcg_op1);
10232 tcg_temp_free_i64(tcg_op2);
10233 }
10234
10235 for (pass = 0; pass < 2; pass++) {
10236 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10237 tcg_temp_free_i64(tcg_res[pass]);
10238 }
10239 } else {
10240 int maxpass = is_q ? 4 : 2;
10241 TCGv_i32 tcg_res[4];
10242
10243 for (pass = 0; pass < maxpass; pass++) {
10244 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10245 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10246 NeonGenTwoOpFn *genfn = NULL;
10247 int passreg = pass < (maxpass / 2) ? rn : rm;
10248 int passelt = (is_q && (pass & 1)) ? 2 : 0;
10249
10250 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10251 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10252 tcg_res[pass] = tcg_temp_new_i32();
10253
10254 switch (opcode) {
10255 case 0x17:
10256 {
10257 static NeonGenTwoOpFn * const fns[3] = {
10258 gen_helper_neon_padd_u8,
10259 gen_helper_neon_padd_u16,
10260 tcg_gen_add_i32,
10261 };
10262 genfn = fns[size];
10263 break;
10264 }
10265 case 0x14:
10266 {
10267 static NeonGenTwoOpFn * const fns[3][2] = {
10268 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10269 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10270 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10271 };
10272 genfn = fns[size][u];
10273 break;
10274 }
10275 case 0x15:
10276 {
10277 static NeonGenTwoOpFn * const fns[3][2] = {
10278 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
10279 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
10280 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10281 };
10282 genfn = fns[size][u];
10283 break;
10284 }
10285
10286 case 0x58:
10287 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10288 break;
10289 case 0x5a:
10290 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10291 break;
10292 case 0x5e:
10293 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10294 break;
10295 case 0x78:
10296 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10297 break;
10298 case 0x7e:
10299 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10300 break;
10301 default:
10302 g_assert_not_reached();
10303 }
10304
10305
10306 if (genfn) {
10307 genfn(tcg_res[pass], tcg_op1, tcg_op2);
10308 }
10309
10310 tcg_temp_free_i32(tcg_op1);
10311 tcg_temp_free_i32(tcg_op2);
10312 }
10313
10314 for (pass = 0; pass < maxpass; pass++) {
10315 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10316 tcg_temp_free_i32(tcg_res[pass]);
10317 }
10318 clear_vec_high(s, is_q, rd);
10319 }
10320
10321 if (fpst) {
10322 tcg_temp_free_ptr(fpst);
10323 }
10324}
10325
10326
10327static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
10328{
10329
10330
10331
10332
10333 int fpopcode = extract32(insn, 11, 5)
10334 | (extract32(insn, 23, 1) << 5)
10335 | (extract32(insn, 29, 1) << 6);
10336 int is_q = extract32(insn, 30, 1);
10337 int size = extract32(insn, 22, 1);
10338 int rm = extract32(insn, 16, 5);
10339 int rn = extract32(insn, 5, 5);
10340 int rd = extract32(insn, 0, 5);
10341
10342 int datasize = is_q ? 128 : 64;
10343 int esize = 32 << size;
10344 int elements = datasize / esize;
10345
10346 if (size == 1 && !is_q) {
10347 unallocated_encoding(s);
10348 return;
10349 }
10350
10351 switch (fpopcode) {
10352 case 0x58:
10353 case 0x5a:
10354 case 0x5e:
10355 case 0x78:
10356 case 0x7e:
10357 if (size && !is_q) {
10358 unallocated_encoding(s);
10359 return;
10360 }
10361 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10362 rn, rm, rd);
10363 return;
10364 case 0x1b:
10365 case 0x1f:
10366 case 0x3f:
10367 case 0x5d:
10368 case 0x7d:
10369 case 0x19:
10370 case 0x39:
10371 case 0x18:
10372 case 0x1a:
10373 case 0x1c:
10374 case 0x1e:
10375 case 0x38:
10376 case 0x3a:
10377 case 0x3e:
10378 case 0x5b:
10379 case 0x5c:
10380 case 0x5f:
10381 case 0x7a:
10382 case 0x7c:
10383 if (!fp_access_check(s)) {
10384 return;
10385 }
10386
10387 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
10388 return;
10389 default:
10390 unallocated_encoding(s);
10391 return;
10392 }
10393}
10394
10395
10396static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
10397{
10398 int is_q = extract32(insn, 30, 1);
10399 int u = extract32(insn, 29, 1);
10400 int size = extract32(insn, 22, 2);
10401 int opcode = extract32(insn, 11, 5);
10402 int rm = extract32(insn, 16, 5);
10403 int rn = extract32(insn, 5, 5);
10404 int rd = extract32(insn, 0, 5);
10405 int pass;
10406 TCGCond cond;
10407
10408 switch (opcode) {
10409 case 0x13:
10410 if (u && size != 0) {
10411 unallocated_encoding(s);
10412 return;
10413 }
10414
10415 case 0x0:
10416 case 0x2:
10417 case 0x4:
10418 case 0xc:
10419 case 0xd:
10420 case 0xe:
10421 case 0xf:
10422 case 0x12:
10423 if (size == 3) {
10424 unallocated_encoding(s);
10425 return;
10426 }
10427 break;
10428 case 0x16:
10429 if (size == 0 || size == 3) {
10430 unallocated_encoding(s);
10431 return;
10432 }
10433 break;
10434 default:
10435 if (size == 3 && !is_q) {
10436 unallocated_encoding(s);
10437 return;
10438 }
10439 break;
10440 }
10441
10442 if (!fp_access_check(s)) {
10443 return;
10444 }
10445
10446 switch (opcode) {
10447 case 0x10:
10448 if (u) {
10449 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
10450 } else {
10451 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
10452 }
10453 return;
10454 case 0x13:
10455 if (!u) {
10456 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
10457 return;
10458 }
10459 break;
10460 case 0x12:
10461 if (u) {
10462 gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]);
10463 } else {
10464 gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]);
10465 }
10466 return;
10467 case 0x11:
10468 if (!u) {
10469 gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]);
10470 return;
10471 }
10472
10473 cond = TCG_COND_EQ;
10474 goto do_gvec_cmp;
10475 case 0x06:
10476 cond = u ? TCG_COND_GTU : TCG_COND_GT;
10477 goto do_gvec_cmp;
10478 case 0x07:
10479 cond = u ? TCG_COND_GEU : TCG_COND_GE;
10480 do_gvec_cmp:
10481 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
10482 vec_full_reg_offset(s, rn),
10483 vec_full_reg_offset(s, rm),
10484 is_q ? 16 : 8, vec_full_reg_size(s));
10485 return;
10486 }
10487
10488 if (size == 3) {
10489 assert(is_q);
10490 for (pass = 0; pass < 2; pass++) {
10491 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10492 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10493 TCGv_i64 tcg_res = tcg_temp_new_i64();
10494
10495 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10496 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10497
10498 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
10499
10500 write_vec_element(s, tcg_res, rd, pass, MO_64);
10501
10502 tcg_temp_free_i64(tcg_res);
10503 tcg_temp_free_i64(tcg_op1);
10504 tcg_temp_free_i64(tcg_op2);
10505 }
10506 } else {
10507 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10508 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10509 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10510 TCGv_i32 tcg_res = tcg_temp_new_i32();
10511 NeonGenTwoOpFn *genfn = NULL;
10512 NeonGenTwoOpEnvFn *genenvfn = NULL;
10513
10514 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
10515 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
10516
10517 switch (opcode) {
10518 case 0x0:
10519 {
10520 static NeonGenTwoOpFn * const fns[3][2] = {
10521 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
10522 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
10523 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
10524 };
10525 genfn = fns[size][u];
10526 break;
10527 }
10528 case 0x1:
10529 {
10530 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10531 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
10532 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
10533 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
10534 };
10535 genenvfn = fns[size][u];
10536 break;
10537 }
10538 case 0x2:
10539 {
10540 static NeonGenTwoOpFn * const fns[3][2] = {
10541 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
10542 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
10543 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
10544 };
10545 genfn = fns[size][u];
10546 break;
10547 }
10548 case 0x4:
10549 {
10550 static NeonGenTwoOpFn * const fns[3][2] = {
10551 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
10552 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
10553 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
10554 };
10555 genfn = fns[size][u];
10556 break;
10557 }
10558 case 0x5:
10559 {
10560 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10561 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
10562 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
10563 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
10564 };
10565 genenvfn = fns[size][u];
10566 break;
10567 }
10568 case 0x8:
10569 {
10570 static NeonGenTwoOpFn * const fns[3][2] = {
10571 { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
10572 { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
10573 { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
10574 };
10575 genfn = fns[size][u];
10576 break;
10577 }
10578 case 0x9:
10579 {
10580 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10581 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
10582 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
10583 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
10584 };
10585 genenvfn = fns[size][u];
10586 break;
10587 }
10588 case 0xa:
10589 {
10590 static NeonGenTwoOpFn * const fns[3][2] = {
10591 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
10592 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
10593 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
10594 };
10595 genfn = fns[size][u];
10596 break;
10597 }
10598 case 0xb:
10599 {
10600 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10601 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
10602 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
10603 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
10604 };
10605 genenvfn = fns[size][u];
10606 break;
10607 }
10608 case 0xc:
10609 {
10610 static NeonGenTwoOpFn * const fns[3][2] = {
10611 { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
10612 { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
10613 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10614 };
10615 genfn = fns[size][u];
10616 break;
10617 }
10618
10619 case 0xd:
10620 {
10621 static NeonGenTwoOpFn * const fns[3][2] = {
10622 { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
10623 { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
10624 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10625 };
10626 genfn = fns[size][u];
10627 break;
10628 }
10629 case 0xe:
10630 case 0xf:
10631 {
10632 static NeonGenTwoOpFn * const fns[3][2] = {
10633 { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
10634 { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
10635 { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
10636 };
10637 genfn = fns[size][u];
10638 break;
10639 }
10640 case 0x13:
10641 assert(u);
10642 assert(size == 0);
10643 genfn = gen_helper_neon_mul_p8;
10644 break;
10645 case 0x16:
10646 {
10647 static NeonGenTwoOpEnvFn * const fns[2][2] = {
10648 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
10649 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
10650 };
10651 assert(size == 1 || size == 2);
10652 genenvfn = fns[size - 1][u];
10653 break;
10654 }
10655 default:
10656 g_assert_not_reached();
10657 }
10658
10659 if (genenvfn) {
10660 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
10661 } else {
10662 genfn(tcg_res, tcg_op1, tcg_op2);
10663 }
10664
10665 if (opcode == 0xf) {
10666
10667 static NeonGenTwoOpFn * const fns[3] = {
10668 gen_helper_neon_add_u8,
10669 gen_helper_neon_add_u16,
10670 tcg_gen_add_i32,
10671 };
10672
10673 read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
10674 fns[size](tcg_res, tcg_op1, tcg_res);
10675 }
10676
10677 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10678
10679 tcg_temp_free_i32(tcg_res);
10680 tcg_temp_free_i32(tcg_op1);
10681 tcg_temp_free_i32(tcg_op2);
10682 }
10683 }
10684 clear_vec_high(s, is_q, rd);
10685}
10686
10687
10688
10689
10690
10691
10692
10693static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
10694{
10695 int opcode = extract32(insn, 11, 5);
10696
10697 switch (opcode) {
10698 case 0x3:
10699 disas_simd_3same_logic(s, insn);
10700 break;
10701 case 0x17:
10702 case 0x14:
10703 case 0x15:
10704 {
10705
10706 int is_q = extract32(insn, 30, 1);
10707 int u = extract32(insn, 29, 1);
10708 int size = extract32(insn, 22, 2);
10709 int rm = extract32(insn, 16, 5);
10710 int rn = extract32(insn, 5, 5);
10711 int rd = extract32(insn, 0, 5);
10712 if (opcode == 0x17) {
10713 if (u || (size == 3 && !is_q)) {
10714 unallocated_encoding(s);
10715 return;
10716 }
10717 } else {
10718 if (size == 3) {
10719 unallocated_encoding(s);
10720 return;
10721 }
10722 }
10723 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
10724 break;
10725 }
10726 case 0x18 ... 0x31:
10727
10728 disas_simd_3same_float(s, insn);
10729 break;
10730 default:
10731 disas_simd_3same_int(s, insn);
10732 break;
10733 }
10734}
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
10749{
10750 int opcode, fpopcode;
10751 int is_q, u, a, rm, rn, rd;
10752 int datasize, elements;
10753 int pass;
10754 TCGv_ptr fpst;
10755 bool pairwise = false;
10756
10757 if (!dc_isar_feature(aa64_fp16, s)) {
10758 unallocated_encoding(s);
10759 return;
10760 }
10761
10762 if (!fp_access_check(s)) {
10763 return;
10764 }
10765
10766
10767
10768
10769 opcode = extract32(insn, 11, 3);
10770 u = extract32(insn, 29, 1);
10771 a = extract32(insn, 23, 1);
10772 is_q = extract32(insn, 30, 1);
10773 rm = extract32(insn, 16, 5);
10774 rn = extract32(insn, 5, 5);
10775 rd = extract32(insn, 0, 5);
10776
10777 fpopcode = opcode | (a << 3) | (u << 4);
10778 datasize = is_q ? 128 : 64;
10779 elements = datasize / 16;
10780
10781 switch (fpopcode) {
10782 case 0x10:
10783 case 0x12:
10784 case 0x16:
10785 case 0x18:
10786 case 0x1e:
10787 pairwise = true;
10788 break;
10789 }
10790
10791 fpst = get_fpstatus_ptr(true);
10792
10793 if (pairwise) {
10794 int maxpass = is_q ? 8 : 4;
10795 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10796 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10797 TCGv_i32 tcg_res[8];
10798
10799 for (pass = 0; pass < maxpass; pass++) {
10800 int passreg = pass < (maxpass / 2) ? rn : rm;
10801 int passelt = (pass << 1) & (maxpass - 1);
10802
10803 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
10804 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
10805 tcg_res[pass] = tcg_temp_new_i32();
10806
10807 switch (fpopcode) {
10808 case 0x10:
10809 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
10810 fpst);
10811 break;
10812 case 0x12:
10813 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10814 break;
10815 case 0x16:
10816 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10817 break;
10818 case 0x18:
10819 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
10820 fpst);
10821 break;
10822 case 0x1e:
10823 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10824 break;
10825 default:
10826 g_assert_not_reached();
10827 }
10828 }
10829
10830 for (pass = 0; pass < maxpass; pass++) {
10831 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
10832 tcg_temp_free_i32(tcg_res[pass]);
10833 }
10834
10835 tcg_temp_free_i32(tcg_op1);
10836 tcg_temp_free_i32(tcg_op2);
10837
10838 } else {
10839 for (pass = 0; pass < elements; pass++) {
10840 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10841 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10842 TCGv_i32 tcg_res = tcg_temp_new_i32();
10843
10844 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
10845 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
10846
10847 switch (fpopcode) {
10848 case 0x0:
10849 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
10850 break;
10851 case 0x1:
10852 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
10853 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
10854 fpst);
10855 break;
10856 case 0x2:
10857 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
10858 break;
10859 case 0x3:
10860 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
10861 break;
10862 case 0x4:
10863 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10864 break;
10865 case 0x6:
10866 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
10867 break;
10868 case 0x7:
10869 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10870 break;
10871 case 0x8:
10872 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
10873 break;
10874 case 0x9:
10875
10876 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
10877 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
10878 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
10879 fpst);
10880 break;
10881 case 0xa:
10882 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
10883 break;
10884 case 0xe:
10885 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
10886 break;
10887 case 0xf:
10888 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10889 break;
10890 case 0x13:
10891 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
10892 break;
10893 case 0x14:
10894 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10895 break;
10896 case 0x15:
10897 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10898 break;
10899 case 0x17:
10900 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
10901 break;
10902 case 0x1a:
10903 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
10904 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
10905 break;
10906 case 0x1c:
10907 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10908 break;
10909 case 0x1d:
10910 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10911 break;
10912 default:
10913 fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
10914 __func__, insn, fpopcode, s->pc);
10915 g_assert_not_reached();
10916 }
10917
10918 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
10919 tcg_temp_free_i32(tcg_res);
10920 tcg_temp_free_i32(tcg_op1);
10921 tcg_temp_free_i32(tcg_op2);
10922 }
10923 }
10924
10925 tcg_temp_free_ptr(fpst);
10926
10927 clear_vec_high(s, is_q, rd);
10928}
10929
10930
10931
10932
10933
10934
10935
10936static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
10937{
10938 int rd = extract32(insn, 0, 5);
10939 int rn = extract32(insn, 5, 5);
10940 int opcode = extract32(insn, 11, 4);
10941 int rm = extract32(insn, 16, 5);
10942 int size = extract32(insn, 22, 2);
10943 bool u = extract32(insn, 29, 1);
10944 bool is_q = extract32(insn, 30, 1);
10945 bool feature;
10946 int rot;
10947
10948 switch (u * 16 + opcode) {
10949 case 0x10:
10950 case 0x11:
10951 if (size != 1 && size != 2) {
10952 unallocated_encoding(s);
10953 return;
10954 }
10955 feature = dc_isar_feature(aa64_rdm, s);
10956 break;
10957 case 0x02:
10958 case 0x12:
10959 if (size != MO_32) {
10960 unallocated_encoding(s);
10961 return;
10962 }
10963 feature = dc_isar_feature(aa64_dp, s);
10964 break;
10965 case 0x18:
10966 case 0x19:
10967 case 0x1a:
10968 case 0x1b:
10969 case 0x1c:
10970 case 0x1e:
10971 if (size == 0
10972 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
10973 || (size == 3 && !is_q)) {
10974 unallocated_encoding(s);
10975 return;
10976 }
10977 feature = dc_isar_feature(aa64_fcma, s);
10978 break;
10979 default:
10980 unallocated_encoding(s);
10981 return;
10982 }
10983 if (!feature) {
10984 unallocated_encoding(s);
10985 return;
10986 }
10987 if (!fp_access_check(s)) {
10988 return;
10989 }
10990
10991 switch (opcode) {
10992 case 0x0:
10993 switch (size) {
10994 case 1:
10995 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s16);
10996 break;
10997 case 2:
10998 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s32);
10999 break;
11000 default:
11001 g_assert_not_reached();
11002 }
11003 return;
11004
11005 case 0x1:
11006 switch (size) {
11007 case 1:
11008 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s16);
11009 break;
11010 case 2:
11011 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s32);
11012 break;
11013 default:
11014 g_assert_not_reached();
11015 }
11016 return;
11017
11018 case 0x2:
11019 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
11020 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11021 return;
11022
11023 case 0x8:
11024 case 0x9:
11025 case 0xa:
11026 case 0xb:
11027 rot = extract32(opcode, 0, 2);
11028 switch (size) {
11029 case 1:
11030 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
11031 gen_helper_gvec_fcmlah);
11032 break;
11033 case 2:
11034 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11035 gen_helper_gvec_fcmlas);
11036 break;
11037 case 3:
11038 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11039 gen_helper_gvec_fcmlad);
11040 break;
11041 default:
11042 g_assert_not_reached();
11043 }
11044 return;
11045
11046 case 0xc:
11047 case 0xe:
11048 rot = extract32(opcode, 1, 1);
11049 switch (size) {
11050 case 1:
11051 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11052 gen_helper_gvec_fcaddh);
11053 break;
11054 case 2:
11055 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11056 gen_helper_gvec_fcadds);
11057 break;
11058 case 3:
11059 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11060 gen_helper_gvec_fcaddd);
11061 break;
11062 default:
11063 g_assert_not_reached();
11064 }
11065 return;
11066
11067 default:
11068 g_assert_not_reached();
11069 }
11070}
11071
11072static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11073 int size, int rn, int rd)
11074{
11075
11076
11077
11078
11079 int pass;
11080
11081 if (size == 3) {
11082
11083 TCGv_i64 tcg_res[2];
11084 int srcelt = is_q ? 2 : 0;
11085
11086 for (pass = 0; pass < 2; pass++) {
11087 TCGv_i32 tcg_op = tcg_temp_new_i32();
11088 tcg_res[pass] = tcg_temp_new_i64();
11089
11090 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11091 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11092 tcg_temp_free_i32(tcg_op);
11093 }
11094 for (pass = 0; pass < 2; pass++) {
11095 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11096 tcg_temp_free_i64(tcg_res[pass]);
11097 }
11098 } else {
11099
11100 int srcelt = is_q ? 4 : 0;
11101 TCGv_i32 tcg_res[4];
11102 TCGv_ptr fpst = get_fpstatus_ptr(false);
11103 TCGv_i32 ahp = get_ahp_flag();
11104
11105 for (pass = 0; pass < 4; pass++) {
11106 tcg_res[pass] = tcg_temp_new_i32();
11107
11108 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11109 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11110 fpst, ahp);
11111 }
11112 for (pass = 0; pass < 4; pass++) {
11113 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11114 tcg_temp_free_i32(tcg_res[pass]);
11115 }
11116
11117 tcg_temp_free_ptr(fpst);
11118 tcg_temp_free_i32(ahp);
11119 }
11120}
11121
11122static void handle_rev(DisasContext *s, int opcode, bool u,
11123 bool is_q, int size, int rn, int rd)
11124{
11125 int op = (opcode << 1) | u;
11126 int opsz = op + size;
11127 int grp_size = 3 - opsz;
11128 int dsize = is_q ? 128 : 64;
11129 int i;
11130
11131 if (opsz >= 3) {
11132 unallocated_encoding(s);
11133 return;
11134 }
11135
11136 if (!fp_access_check(s)) {
11137 return;
11138 }
11139
11140 if (size == 0) {
11141
11142 int groups = dsize / (8 << grp_size);
11143
11144 for (i = 0; i < groups; i++) {
11145 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11146
11147 read_vec_element(s, tcg_tmp, rn, i, grp_size);
11148 switch (grp_size) {
11149 case MO_16:
11150 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
11151 break;
11152 case MO_32:
11153 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
11154 break;
11155 case MO_64:
11156 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11157 break;
11158 default:
11159 g_assert_not_reached();
11160 }
11161 write_vec_element(s, tcg_tmp, rd, i, grp_size);
11162 tcg_temp_free_i64(tcg_tmp);
11163 }
11164 clear_vec_high(s, is_q, rd);
11165 } else {
11166 int revmask = (1 << grp_size) - 1;
11167 int esize = 8 << size;
11168 int elements = dsize / esize;
11169 TCGv_i64 tcg_rn = tcg_temp_new_i64();
11170 TCGv_i64 tcg_rd = tcg_const_i64(0);
11171 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
11172
11173 for (i = 0; i < elements; i++) {
11174 int e_rev = (i & 0xf) ^ revmask;
11175 int off = e_rev * esize;
11176 read_vec_element(s, tcg_rn, rn, i, size);
11177 if (off >= 64) {
11178 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
11179 tcg_rn, off - 64, esize);
11180 } else {
11181 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
11182 }
11183 }
11184 write_vec_element(s, tcg_rd, rd, 0, MO_64);
11185 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
11186
11187 tcg_temp_free_i64(tcg_rd_hi);
11188 tcg_temp_free_i64(tcg_rd);
11189 tcg_temp_free_i64(tcg_rn);
11190 }
11191}
11192
11193static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11194 bool is_q, int size, int rn, int rd)
11195{
11196
11197
11198
11199
11200
11201 bool accum = (opcode == 0x6);
11202 int maxpass = is_q ? 2 : 1;
11203 int pass;
11204 TCGv_i64 tcg_res[2];
11205
11206 if (size == 2) {
11207
11208 TCGMemOp memop = size + (u ? 0 : MO_SIGN);
11209
11210 for (pass = 0; pass < maxpass; pass++) {
11211 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11212 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11213
11214 tcg_res[pass] = tcg_temp_new_i64();
11215
11216 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11217 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11218 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11219 if (accum) {
11220 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11221 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11222 }
11223
11224 tcg_temp_free_i64(tcg_op1);
11225 tcg_temp_free_i64(tcg_op2);
11226 }
11227 } else {
11228 for (pass = 0; pass < maxpass; pass++) {
11229 TCGv_i64 tcg_op = tcg_temp_new_i64();
11230 NeonGenOneOpFn *genfn;
11231 static NeonGenOneOpFn * const fns[2][2] = {
11232 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
11233 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
11234 };
11235
11236 genfn = fns[size][u];
11237
11238 tcg_res[pass] = tcg_temp_new_i64();
11239
11240 read_vec_element(s, tcg_op, rn, pass, MO_64);
11241 genfn(tcg_res[pass], tcg_op);
11242
11243 if (accum) {
11244 read_vec_element(s, tcg_op, rd, pass, MO_64);
11245 if (size == 0) {
11246 gen_helper_neon_addl_u16(tcg_res[pass],
11247 tcg_res[pass], tcg_op);
11248 } else {
11249 gen_helper_neon_addl_u32(tcg_res[pass],
11250 tcg_res[pass], tcg_op);
11251 }
11252 }
11253 tcg_temp_free_i64(tcg_op);
11254 }
11255 }
11256 if (!is_q) {
11257 tcg_res[1] = tcg_const_i64(0);
11258 }
11259 for (pass = 0; pass < 2; pass++) {
11260 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11261 tcg_temp_free_i64(tcg_res[pass]);
11262 }
11263}
11264
11265static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11266{
11267
11268 int pass;
11269 int part = is_q ? 2 : 0;
11270 TCGv_i64 tcg_res[2];
11271
11272 for (pass = 0; pass < 2; pass++) {
11273 static NeonGenWidenFn * const widenfns[3] = {
11274 gen_helper_neon_widen_u8,
11275 gen_helper_neon_widen_u16,
11276 tcg_gen_extu_i32_i64,
11277 };
11278 NeonGenWidenFn *widenfn = widenfns[size];
11279 TCGv_i32 tcg_op = tcg_temp_new_i32();
11280
11281 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11282 tcg_res[pass] = tcg_temp_new_i64();
11283 widenfn(tcg_res[pass], tcg_op);
11284 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11285
11286 tcg_temp_free_i32(tcg_op);
11287 }
11288
11289 for (pass = 0; pass < 2; pass++) {
11290 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11291 tcg_temp_free_i64(tcg_res[pass]);
11292 }
11293}
11294
11295
11296
11297
11298
11299
11300
11301static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11302{
11303 int size = extract32(insn, 22, 2);
11304 int opcode = extract32(insn, 12, 5);
11305 bool u = extract32(insn, 29, 1);
11306 bool is_q = extract32(insn, 30, 1);
11307 int rn = extract32(insn, 5, 5);
11308 int rd = extract32(insn, 0, 5);
11309 bool need_fpstatus = false;
11310 bool need_rmode = false;
11311 int rmode = -1;
11312 TCGv_i32 tcg_rmode;
11313 TCGv_ptr tcg_fpstatus;
11314
11315 switch (opcode) {
11316 case 0x0:
11317 case 0x1:
11318 handle_rev(s, opcode, u, is_q, size, rn, rd);
11319 return;
11320 case 0x5:
11321 if (u && size == 0) {
11322
11323 break;
11324 } else if (u && size == 1) {
11325
11326 break;
11327 } else if (!u && size == 0) {
11328
11329 break;
11330 }
11331 unallocated_encoding(s);
11332 return;
11333 case 0x12:
11334 case 0x14:
11335 if (size == 3) {
11336 unallocated_encoding(s);
11337 return;
11338 }
11339 if (!fp_access_check(s)) {
11340 return;
11341 }
11342
11343 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11344 return;
11345 case 0x4:
11346 if (size == 3) {
11347 unallocated_encoding(s);
11348 return;
11349 }
11350 break;
11351 case 0x2:
11352 case 0x6:
11353 if (size == 3) {
11354 unallocated_encoding(s);
11355 return;
11356 }
11357 if (!fp_access_check(s)) {
11358 return;
11359 }
11360 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
11361 return;
11362 case 0x13:
11363 if (u == 0 || size == 3) {
11364 unallocated_encoding(s);
11365 return;
11366 }
11367 if (!fp_access_check(s)) {
11368 return;
11369 }
11370 handle_shll(s, is_q, size, rn, rd);
11371 return;
11372 case 0xa:
11373 if (u == 1) {
11374 unallocated_encoding(s);
11375 return;
11376 }
11377
11378 case 0x8:
11379 case 0x9:
11380 case 0xb:
11381 if (size == 3 && !is_q) {
11382 unallocated_encoding(s);
11383 return;
11384 }
11385 break;
11386 case 0x3:
11387 if (size == 3 && !is_q) {
11388 unallocated_encoding(s);
11389 return;
11390 }
11391 if (!fp_access_check(s)) {
11392 return;
11393 }
11394 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
11395 return;
11396 case 0x7:
11397 if (size == 3 && !is_q) {
11398 unallocated_encoding(s);
11399 return;
11400 }
11401 break;
11402 case 0xc ... 0xf:
11403 case 0x16 ... 0x1d:
11404 case 0x1f:
11405 {
11406
11407
11408
11409 int is_double = extract32(size, 0, 1);
11410 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
11411 size = is_double ? 3 : 2;
11412 switch (opcode) {
11413 case 0x2f:
11414 case 0x6f:
11415 if (size == 3 && !is_q) {
11416 unallocated_encoding(s);
11417 return;
11418 }
11419 break;
11420 case 0x1d:
11421 case 0x5d:
11422 {
11423 bool is_signed = (opcode == 0x1d) ? true : false;
11424 int elements = is_double ? 2 : is_q ? 4 : 2;
11425 if (is_double && !is_q) {
11426 unallocated_encoding(s);
11427 return;
11428 }
11429 if (!fp_access_check(s)) {
11430 return;
11431 }
11432 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
11433 return;
11434 }
11435 case 0x2c:
11436 case 0x2d:
11437 case 0x2e:
11438 case 0x6c:
11439 case 0x6d:
11440 if (size == 3 && !is_q) {
11441 unallocated_encoding(s);
11442 return;
11443 }
11444 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
11445 return;
11446 case 0x7f:
11447 if (size == 3 && !is_q) {
11448 unallocated_encoding(s);
11449 return;
11450 }
11451 break;
11452 case 0x1a:
11453 case 0x1b:
11454 case 0x3a:
11455 case 0x3b:
11456 case 0x5a:
11457 case 0x5b:
11458 case 0x7a:
11459 case 0x7b:
11460 need_fpstatus = true;
11461 need_rmode = true;
11462 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11463 if (size == 3 && !is_q) {
11464 unallocated_encoding(s);
11465 return;
11466 }
11467 break;
11468 case 0x5c:
11469 case 0x1c:
11470 need_fpstatus = true;
11471 need_rmode = true;
11472 rmode = FPROUNDING_TIEAWAY;
11473 if (size == 3 && !is_q) {
11474 unallocated_encoding(s);
11475 return;
11476 }
11477 break;
11478 case 0x3c:
11479 if (size == 3) {
11480 unallocated_encoding(s);
11481 return;
11482 }
11483
11484 case 0x3d:
11485 case 0x7d:
11486 if (size == 3 && !is_q) {
11487 unallocated_encoding(s);
11488 return;
11489 }
11490 if (!fp_access_check(s)) {
11491 return;
11492 }
11493 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
11494 return;
11495 case 0x56:
11496 if (size == 2) {
11497 unallocated_encoding(s);
11498 return;
11499 }
11500
11501 case 0x16:
11502
11503
11504
11505 if (!fp_access_check(s)) {
11506 return;
11507 }
11508 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11509 return;
11510 case 0x17:
11511 if (!fp_access_check(s)) {
11512 return;
11513 }
11514 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
11515 return;
11516 case 0x18:
11517 case 0x19:
11518 case 0x38:
11519 case 0x39:
11520 need_rmode = true;
11521 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11522
11523 case 0x59:
11524 case 0x79:
11525 need_fpstatus = true;
11526 if (size == 3 && !is_q) {
11527 unallocated_encoding(s);
11528 return;
11529 }
11530 break;
11531 case 0x58:
11532 need_rmode = true;
11533 rmode = FPROUNDING_TIEAWAY;
11534 need_fpstatus = true;
11535 if (size == 3 && !is_q) {
11536 unallocated_encoding(s);
11537 return;
11538 }
11539 break;
11540 case 0x7c:
11541 if (size == 3) {
11542 unallocated_encoding(s);
11543 return;
11544 }
11545 need_fpstatus = true;
11546 break;
11547 default:
11548 unallocated_encoding(s);
11549 return;
11550 }
11551 break;
11552 }
11553 default:
11554 unallocated_encoding(s);
11555 return;
11556 }
11557
11558 if (!fp_access_check(s)) {
11559 return;
11560 }
11561
11562 if (need_fpstatus || need_rmode) {
11563 tcg_fpstatus = get_fpstatus_ptr(false);
11564 } else {
11565 tcg_fpstatus = NULL;
11566 }
11567 if (need_rmode) {
11568 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
11569 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11570 } else {
11571 tcg_rmode = NULL;
11572 }
11573
11574 switch (opcode) {
11575 case 0x5:
11576 if (u && size == 0) {
11577 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
11578 return;
11579 }
11580 break;
11581 case 0xb:
11582 if (u) {
11583 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
11584 return;
11585 }
11586 break;
11587 }
11588
11589 if (size == 3) {
11590
11591 int pass;
11592
11593
11594
11595
11596 tcg_debug_assert(is_q);
11597 for (pass = 0; pass < 2; pass++) {
11598 TCGv_i64 tcg_op = tcg_temp_new_i64();
11599 TCGv_i64 tcg_res = tcg_temp_new_i64();
11600
11601 read_vec_element(s, tcg_op, rn, pass, MO_64);
11602
11603 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
11604 tcg_rmode, tcg_fpstatus);
11605
11606 write_vec_element(s, tcg_res, rd, pass, MO_64);
11607
11608 tcg_temp_free_i64(tcg_res);
11609 tcg_temp_free_i64(tcg_op);
11610 }
11611 } else {
11612 int pass;
11613
11614 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11615 TCGv_i32 tcg_op = tcg_temp_new_i32();
11616 TCGv_i32 tcg_res = tcg_temp_new_i32();
11617 TCGCond cond;
11618
11619 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
11620
11621 if (size == 2) {
11622
11623 switch (opcode) {
11624 case 0xa:
11625
11626
11627
11628
11629 cond = TCG_COND_LT;
11630 do_cmop:
11631 tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
11632 tcg_gen_neg_i32(tcg_res, tcg_res);
11633 break;
11634 case 0x8:
11635 cond = u ? TCG_COND_GE : TCG_COND_GT;
11636 goto do_cmop;
11637 case 0x9:
11638 cond = u ? TCG_COND_LE : TCG_COND_EQ;
11639 goto do_cmop;
11640 case 0x4:
11641 if (u) {
11642 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
11643 } else {
11644 tcg_gen_clrsb_i32(tcg_res, tcg_op);
11645 }
11646 break;
11647 case 0x7:
11648 if (u) {
11649 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
11650 } else {
11651 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
11652 }
11653 break;
11654 case 0xb:
11655 if (u) {
11656 tcg_gen_neg_i32(tcg_res, tcg_op);
11657 } else {
11658 TCGv_i32 tcg_zero = tcg_const_i32(0);
11659 tcg_gen_neg_i32(tcg_res, tcg_op);
11660 tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
11661 tcg_zero, tcg_op, tcg_res);
11662 tcg_temp_free_i32(tcg_zero);
11663 }
11664 break;
11665 case 0x2f:
11666 gen_helper_vfp_abss(tcg_res, tcg_op);
11667 break;
11668 case 0x6f:
11669 gen_helper_vfp_negs(tcg_res, tcg_op);
11670 break;
11671 case 0x7f:
11672 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
11673 break;
11674 case 0x1a:
11675 case 0x1b:
11676 case 0x1c:
11677 case 0x3a:
11678 case 0x3b:
11679 {
11680 TCGv_i32 tcg_shift = tcg_const_i32(0);
11681 gen_helper_vfp_tosls(tcg_res, tcg_op,
11682 tcg_shift, tcg_fpstatus);
11683 tcg_temp_free_i32(tcg_shift);
11684 break;
11685 }
11686 case 0x5a:
11687 case 0x5b:
11688 case 0x5c:
11689 case 0x7a:
11690 case 0x7b:
11691 {
11692 TCGv_i32 tcg_shift = tcg_const_i32(0);
11693 gen_helper_vfp_touls(tcg_res, tcg_op,
11694 tcg_shift, tcg_fpstatus);
11695 tcg_temp_free_i32(tcg_shift);
11696 break;
11697 }
11698 case 0x18:
11699 case 0x19:
11700 case 0x38:
11701 case 0x39:
11702 case 0x58:
11703 case 0x79:
11704 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
11705 break;
11706 case 0x59:
11707 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
11708 break;
11709 case 0x7c:
11710 gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
11711 break;
11712 default:
11713 g_assert_not_reached();
11714 }
11715 } else {
11716
11717 switch (opcode) {
11718 case 0x5:
11719
11720
11721
11722 if (u) {
11723 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
11724 } else {
11725 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
11726 }
11727 break;
11728 case 0x7:
11729 {
11730 NeonGenOneOpEnvFn *genfn;
11731 static NeonGenOneOpEnvFn * const fns[2][2] = {
11732 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
11733 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
11734 };
11735 genfn = fns[size][u];
11736 genfn(tcg_res, cpu_env, tcg_op);
11737 break;
11738 }
11739 case 0x8:
11740 case 0x9:
11741 case 0xa:
11742 {
11743 static NeonGenTwoOpFn * const fns[3][2] = {
11744 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
11745 { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
11746 { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
11747 };
11748 NeonGenTwoOpFn *genfn;
11749 int comp;
11750 bool reverse;
11751 TCGv_i32 tcg_zero = tcg_const_i32(0);
11752
11753
11754 comp = (opcode - 0x8) * 2 + u;
11755
11756 reverse = (comp > 2);
11757 if (reverse) {
11758 comp = 4 - comp;
11759 }
11760 genfn = fns[comp][size];
11761 if (reverse) {
11762 genfn(tcg_res, tcg_zero, tcg_op);
11763 } else {
11764 genfn(tcg_res, tcg_op, tcg_zero);
11765 }
11766 tcg_temp_free_i32(tcg_zero);
11767 break;
11768 }
11769 case 0xb:
11770 if (u) {
11771 TCGv_i32 tcg_zero = tcg_const_i32(0);
11772 if (size) {
11773 gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
11774 } else {
11775 gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
11776 }
11777 tcg_temp_free_i32(tcg_zero);
11778 } else {
11779 if (size) {
11780 gen_helper_neon_abs_s16(tcg_res, tcg_op);
11781 } else {
11782 gen_helper_neon_abs_s8(tcg_res, tcg_op);
11783 }
11784 }
11785 break;
11786 case 0x4:
11787 if (u) {
11788 if (size == 0) {
11789 gen_helper_neon_clz_u8(tcg_res, tcg_op);
11790 } else {
11791 gen_helper_neon_clz_u16(tcg_res, tcg_op);
11792 }
11793 } else {
11794 if (size == 0) {
11795 gen_helper_neon_cls_s8(tcg_res, tcg_op);
11796 } else {
11797 gen_helper_neon_cls_s16(tcg_res, tcg_op);
11798 }
11799 }
11800 break;
11801 default:
11802 g_assert_not_reached();
11803 }
11804 }
11805
11806 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11807
11808 tcg_temp_free_i32(tcg_res);
11809 tcg_temp_free_i32(tcg_op);
11810 }
11811 }
11812 clear_vec_high(s, is_q, rd);
11813
11814 if (need_rmode) {
11815 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11816 tcg_temp_free_i32(tcg_rmode);
11817 }
11818 if (need_fpstatus) {
11819 tcg_temp_free_ptr(tcg_fpstatus);
11820 }
11821}
11822
11823
11824
11825
11826
11827
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
11838{
11839 int fpop, opcode, a, u;
11840 int rn, rd;
11841 bool is_q;
11842 bool is_scalar;
11843 bool only_in_vector = false;
11844
11845 int pass;
11846 TCGv_i32 tcg_rmode = NULL;
11847 TCGv_ptr tcg_fpstatus = NULL;
11848 bool need_rmode = false;
11849 bool need_fpst = true;
11850 int rmode;
11851
11852 if (!dc_isar_feature(aa64_fp16, s)) {
11853 unallocated_encoding(s);
11854 return;
11855 }
11856
11857 rd = extract32(insn, 0, 5);
11858 rn = extract32(insn, 5, 5);
11859
11860 a = extract32(insn, 23, 1);
11861 u = extract32(insn, 29, 1);
11862 is_scalar = extract32(insn, 28, 1);
11863 is_q = extract32(insn, 30, 1);
11864
11865 opcode = extract32(insn, 12, 5);
11866 fpop = deposit32(opcode, 5, 1, a);
11867 fpop = deposit32(fpop, 6, 1, u);
11868
11869 rd = extract32(insn, 0, 5);
11870 rn = extract32(insn, 5, 5);
11871
11872 switch (fpop) {
11873 case 0x1d:
11874 case 0x5d:
11875 {
11876 int elements;
11877
11878 if (is_scalar) {
11879 elements = 1;
11880 } else {
11881 elements = (is_q ? 8 : 4);
11882 }
11883
11884 if (!fp_access_check(s)) {
11885 return;
11886 }
11887 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
11888 return;
11889 }
11890 break;
11891 case 0x2c:
11892 case 0x2d:
11893 case 0x2e:
11894 case 0x6c:
11895 case 0x6d:
11896 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
11897 return;
11898 case 0x3d:
11899 case 0x3f:
11900 break;
11901 case 0x18:
11902 need_rmode = true;
11903 only_in_vector = true;
11904 rmode = FPROUNDING_TIEEVEN;
11905 break;
11906 case 0x19:
11907 need_rmode = true;
11908 only_in_vector = true;
11909 rmode = FPROUNDING_NEGINF;
11910 break;
11911 case 0x38:
11912 need_rmode = true;
11913 only_in_vector = true;
11914 rmode = FPROUNDING_POSINF;
11915 break;
11916 case 0x39:
11917 need_rmode = true;
11918 only_in_vector = true;
11919 rmode = FPROUNDING_ZERO;
11920 break;
11921 case 0x58:
11922 need_rmode = true;
11923 only_in_vector = true;
11924 rmode = FPROUNDING_TIEAWAY;
11925 break;
11926 case 0x59:
11927 case 0x79:
11928 only_in_vector = true;
11929
11930 break;
11931 case 0x1a:
11932 need_rmode = true;
11933 rmode = FPROUNDING_TIEEVEN;
11934 break;
11935 case 0x1b:
11936 need_rmode = true;
11937 rmode = FPROUNDING_NEGINF;
11938 break;
11939 case 0x1c:
11940 need_rmode = true;
11941 rmode = FPROUNDING_TIEAWAY;
11942 break;
11943 case 0x3a:
11944 need_rmode = true;
11945 rmode = FPROUNDING_POSINF;
11946 break;
11947 case 0x3b:
11948 need_rmode = true;
11949 rmode = FPROUNDING_ZERO;
11950 break;
11951 case 0x5a:
11952 need_rmode = true;
11953 rmode = FPROUNDING_TIEEVEN;
11954 break;
11955 case 0x5b:
11956 need_rmode = true;
11957 rmode = FPROUNDING_NEGINF;
11958 break;
11959 case 0x5c:
11960 need_rmode = true;
11961 rmode = FPROUNDING_TIEAWAY;
11962 break;
11963 case 0x7a:
11964 need_rmode = true;
11965 rmode = FPROUNDING_POSINF;
11966 break;
11967 case 0x7b:
11968 need_rmode = true;
11969 rmode = FPROUNDING_ZERO;
11970 break;
11971 case 0x2f:
11972 case 0x6f:
11973 need_fpst = false;
11974 break;
11975 case 0x7d:
11976 case 0x7f:
11977 break;
11978 default:
11979 fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop);
11980 g_assert_not_reached();
11981 }
11982
11983
11984
11985 if (is_scalar) {
11986 if (!is_q) {
11987 unallocated_encoding(s);
11988 return;
11989 }
11990
11991 if (only_in_vector) {
11992 unallocated_encoding(s);
11993 return;
11994 }
11995 }
11996
11997 if (!fp_access_check(s)) {
11998 return;
11999 }
12000
12001 if (need_rmode || need_fpst) {
12002 tcg_fpstatus = get_fpstatus_ptr(true);
12003 }
12004
12005 if (need_rmode) {
12006 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12007 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12008 }
12009
12010 if (is_scalar) {
12011 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12012 TCGv_i32 tcg_res = tcg_temp_new_i32();
12013
12014 switch (fpop) {
12015 case 0x1a:
12016 case 0x1b:
12017 case 0x1c:
12018 case 0x3a:
12019 case 0x3b:
12020 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12021 break;
12022 case 0x3d:
12023 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12024 break;
12025 case 0x3f:
12026 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12027 break;
12028 case 0x5a:
12029 case 0x5b:
12030 case 0x5c:
12031 case 0x7a:
12032 case 0x7b:
12033 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12034 break;
12035 case 0x6f:
12036 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12037 break;
12038 case 0x7d:
12039 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12040 break;
12041 default:
12042 g_assert_not_reached();
12043 }
12044
12045
12046 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12047 write_fp_sreg(s, rd, tcg_res);
12048
12049 tcg_temp_free_i32(tcg_res);
12050 tcg_temp_free_i32(tcg_op);
12051 } else {
12052 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12053 TCGv_i32 tcg_op = tcg_temp_new_i32();
12054 TCGv_i32 tcg_res = tcg_temp_new_i32();
12055
12056 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12057
12058 switch (fpop) {
12059 case 0x1a:
12060 case 0x1b:
12061 case 0x1c:
12062 case 0x3a:
12063 case 0x3b:
12064 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12065 break;
12066 case 0x3d:
12067 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12068 break;
12069 case 0x5a:
12070 case 0x5b:
12071 case 0x5c:
12072 case 0x7a:
12073 case 0x7b:
12074 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12075 break;
12076 case 0x18:
12077 case 0x19:
12078 case 0x38:
12079 case 0x39:
12080 case 0x58:
12081 case 0x79:
12082 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12083 break;
12084 case 0x59:
12085 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12086 break;
12087 case 0x2f:
12088 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12089 break;
12090 case 0x6f:
12091 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12092 break;
12093 case 0x7d:
12094 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12095 break;
12096 case 0x7f:
12097 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12098 break;
12099 default:
12100 g_assert_not_reached();
12101 }
12102
12103 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12104
12105 tcg_temp_free_i32(tcg_res);
12106 tcg_temp_free_i32(tcg_op);
12107 }
12108
12109 clear_vec_high(s, is_q, rd);
12110 }
12111
12112 if (tcg_rmode) {
12113 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12114 tcg_temp_free_i32(tcg_rmode);
12115 }
12116
12117 if (tcg_fpstatus) {
12118 tcg_temp_free_ptr(tcg_fpstatus);
12119 }
12120}
12121
12122
12123
12124
12125
12126
12127
12128
12129
12130
12131
12132
12133static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12134{
12135
12136
12137
12138
12139
12140
12141
12142 bool is_scalar = extract32(insn, 28, 1);
12143 bool is_q = extract32(insn, 30, 1);
12144 bool u = extract32(insn, 29, 1);
12145 int size = extract32(insn, 22, 2);
12146 int l = extract32(insn, 21, 1);
12147 int m = extract32(insn, 20, 1);
12148
12149 int rm = extract32(insn, 16, 4);
12150 int opcode = extract32(insn, 12, 4);
12151 int h = extract32(insn, 11, 1);
12152 int rn = extract32(insn, 5, 5);
12153 int rd = extract32(insn, 0, 5);
12154 bool is_long = false;
12155 int is_fp = 0;
12156 bool is_fp16 = false;
12157 int index;
12158 TCGv_ptr fpst;
12159
12160 switch (16 * u + opcode) {
12161 case 0x08:
12162 case 0x10:
12163 case 0x14:
12164 if (is_scalar) {
12165 unallocated_encoding(s);
12166 return;
12167 }
12168 break;
12169 case 0x02:
12170 case 0x12:
12171 case 0x06:
12172 case 0x16:
12173 case 0x0a:
12174 case 0x1a:
12175 if (is_scalar) {
12176 unallocated_encoding(s);
12177 return;
12178 }
12179 is_long = true;
12180 break;
12181 case 0x03:
12182 case 0x07:
12183 case 0x0b:
12184 is_long = true;
12185 break;
12186 case 0x0c:
12187 case 0x0d:
12188 break;
12189 case 0x01:
12190 case 0x05:
12191 case 0x09:
12192 case 0x19:
12193 is_fp = 1;
12194 break;
12195 case 0x1d:
12196 case 0x1f:
12197 if (!dc_isar_feature(aa64_rdm, s)) {
12198 unallocated_encoding(s);
12199 return;
12200 }
12201 break;
12202 case 0x0e:
12203 case 0x1e:
12204 if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
12205 unallocated_encoding(s);
12206 return;
12207 }
12208 break;
12209 case 0x11:
12210 case 0x13:
12211 case 0x15:
12212 case 0x17:
12213 if (!dc_isar_feature(aa64_fcma, s)) {
12214 unallocated_encoding(s);
12215 return;
12216 }
12217 is_fp = 2;
12218 break;
12219 default:
12220 unallocated_encoding(s);
12221 return;
12222 }
12223
12224 switch (is_fp) {
12225 case 1:
12226
12227 switch (size) {
12228 case 0:
12229 size = MO_16;
12230 is_fp16 = true;
12231 break;
12232 case MO_32:
12233 case MO_64:
12234 break;
12235 default:
12236 unallocated_encoding(s);
12237 return;
12238 }
12239 break;
12240
12241 case 2:
12242
12243 size <<= 1;
12244 switch (size) {
12245 case MO_32:
12246 if (h && !is_q) {
12247 unallocated_encoding(s);
12248 return;
12249 }
12250 is_fp16 = true;
12251 break;
12252 case MO_64:
12253 break;
12254 default:
12255 unallocated_encoding(s);
12256 return;
12257 }
12258 break;
12259
12260 default:
12261 switch (size) {
12262 case MO_8:
12263 case MO_64:
12264 unallocated_encoding(s);
12265 return;
12266 }
12267 break;
12268 }
12269 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
12270 unallocated_encoding(s);
12271 return;
12272 }
12273
12274
12275 switch (size) {
12276 case MO_16:
12277 index = h << 2 | l << 1 | m;
12278 break;
12279 case MO_32:
12280 index = h << 1 | l;
12281 rm |= m << 4;
12282 break;
12283 case MO_64:
12284 if (l || !is_q) {
12285 unallocated_encoding(s);
12286 return;
12287 }
12288 index = h;
12289 rm |= m << 4;
12290 break;
12291 default:
12292 g_assert_not_reached();
12293 }
12294
12295 if (!fp_access_check(s)) {
12296 return;
12297 }
12298
12299 if (is_fp) {
12300 fpst = get_fpstatus_ptr(is_fp16);
12301 } else {
12302 fpst = NULL;
12303 }
12304
12305 switch (16 * u + opcode) {
12306 case 0x0e:
12307 case 0x1e:
12308 gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
12309 u ? gen_helper_gvec_udot_idx_b
12310 : gen_helper_gvec_sdot_idx_b);
12311 return;
12312 case 0x11:
12313 case 0x13:
12314 case 0x15:
12315 case 0x17:
12316 {
12317 int rot = extract32(insn, 13, 2);
12318 int data = (index << 2) | rot;
12319 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12320 vec_full_reg_offset(s, rn),
12321 vec_full_reg_offset(s, rm), fpst,
12322 is_q ? 16 : 8, vec_full_reg_size(s), data,
12323 size == MO_64
12324 ? gen_helper_gvec_fcmlas_idx
12325 : gen_helper_gvec_fcmlah_idx);
12326 tcg_temp_free_ptr(fpst);
12327 }
12328 return;
12329 }
12330
12331 if (size == 3) {
12332 TCGv_i64 tcg_idx = tcg_temp_new_i64();
12333 int pass;
12334
12335 assert(is_fp && is_q && !is_long);
12336
12337 read_vec_element(s, tcg_idx, rm, index, MO_64);
12338
12339 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12340 TCGv_i64 tcg_op = tcg_temp_new_i64();
12341 TCGv_i64 tcg_res = tcg_temp_new_i64();
12342
12343 read_vec_element(s, tcg_op, rn, pass, MO_64);
12344
12345 switch (16 * u + opcode) {
12346 case 0x05:
12347
12348 gen_helper_vfp_negd(tcg_op, tcg_op);
12349
12350 case 0x01:
12351 read_vec_element(s, tcg_res, rd, pass, MO_64);
12352 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
12353 break;
12354 case 0x09:
12355 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
12356 break;
12357 case 0x19:
12358 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
12359 break;
12360 default:
12361 g_assert_not_reached();
12362 }
12363
12364 write_vec_element(s, tcg_res, rd, pass, MO_64);
12365 tcg_temp_free_i64(tcg_op);
12366 tcg_temp_free_i64(tcg_res);
12367 }
12368
12369 tcg_temp_free_i64(tcg_idx);
12370 clear_vec_high(s, !is_scalar, rd);
12371 } else if (!is_long) {
12372
12373
12374
12375
12376 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12377 int pass, maxpasses;
12378
12379 if (is_scalar) {
12380 maxpasses = 1;
12381 } else {
12382 maxpasses = is_q ? 4 : 2;
12383 }
12384
12385 read_vec_element_i32(s, tcg_idx, rm, index, size);
12386
12387 if (size == 1 && !is_scalar) {
12388
12389
12390
12391
12392 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12393 }
12394
12395 for (pass = 0; pass < maxpasses; pass++) {
12396 TCGv_i32 tcg_op = tcg_temp_new_i32();
12397 TCGv_i32 tcg_res = tcg_temp_new_i32();
12398
12399 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
12400
12401 switch (16 * u + opcode) {
12402 case 0x08:
12403 case 0x10:
12404 case 0x14:
12405 {
12406 static NeonGenTwoOpFn * const fns[2][2] = {
12407 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
12408 { tcg_gen_add_i32, tcg_gen_sub_i32 },
12409 };
12410 NeonGenTwoOpFn *genfn;
12411 bool is_sub = opcode == 0x4;
12412
12413 if (size == 1) {
12414 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
12415 } else {
12416 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
12417 }
12418 if (opcode == 0x8) {
12419 break;
12420 }
12421 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
12422 genfn = fns[size - 1][is_sub];
12423 genfn(tcg_res, tcg_op, tcg_res);
12424 break;
12425 }
12426 case 0x05:
12427 case 0x01:
12428 read_vec_element_i32(s, tcg_res, rd, pass,
12429 is_scalar ? size : MO_32);
12430 switch (size) {
12431 case 1:
12432 if (opcode == 0x5) {
12433
12434
12435 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
12436 }
12437 if (is_scalar) {
12438 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
12439 tcg_res, fpst);
12440 } else {
12441 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
12442 tcg_res, fpst);
12443 }
12444 break;
12445 case 2:
12446 if (opcode == 0x5) {
12447
12448
12449 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
12450 }
12451 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
12452 tcg_res, fpst);
12453 break;
12454 default:
12455 g_assert_not_reached();
12456 }
12457 break;
12458 case 0x09:
12459 switch (size) {
12460 case 1:
12461 if (is_scalar) {
12462 gen_helper_advsimd_mulh(tcg_res, tcg_op,
12463 tcg_idx, fpst);
12464 } else {
12465 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
12466 tcg_idx, fpst);
12467 }
12468 break;
12469 case 2:
12470 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
12471 break;
12472 default:
12473 g_assert_not_reached();
12474 }
12475 break;
12476 case 0x19:
12477 switch (size) {
12478 case 1:
12479 if (is_scalar) {
12480 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
12481 tcg_idx, fpst);
12482 } else {
12483 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
12484 tcg_idx, fpst);
12485 }
12486 break;
12487 case 2:
12488 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
12489 break;
12490 default:
12491 g_assert_not_reached();
12492 }
12493 break;
12494 case 0x0c:
12495 if (size == 1) {
12496 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
12497 tcg_op, tcg_idx);
12498 } else {
12499 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
12500 tcg_op, tcg_idx);
12501 }
12502 break;
12503 case 0x0d:
12504 if (size == 1) {
12505 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
12506 tcg_op, tcg_idx);
12507 } else {
12508 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
12509 tcg_op, tcg_idx);
12510 }
12511 break;
12512 case 0x1d:
12513 read_vec_element_i32(s, tcg_res, rd, pass,
12514 is_scalar ? size : MO_32);
12515 if (size == 1) {
12516 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
12517 tcg_op, tcg_idx, tcg_res);
12518 } else {
12519 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
12520 tcg_op, tcg_idx, tcg_res);
12521 }
12522 break;
12523 case 0x1f:
12524 read_vec_element_i32(s, tcg_res, rd, pass,
12525 is_scalar ? size : MO_32);
12526 if (size == 1) {
12527 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
12528 tcg_op, tcg_idx, tcg_res);
12529 } else {
12530 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
12531 tcg_op, tcg_idx, tcg_res);
12532 }
12533 break;
12534 default:
12535 g_assert_not_reached();
12536 }
12537
12538 if (is_scalar) {
12539 write_fp_sreg(s, rd, tcg_res);
12540 } else {
12541 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12542 }
12543
12544 tcg_temp_free_i32(tcg_op);
12545 tcg_temp_free_i32(tcg_res);
12546 }
12547
12548 tcg_temp_free_i32(tcg_idx);
12549 clear_vec_high(s, is_q, rd);
12550 } else {
12551
12552 TCGv_i64 tcg_res[2];
12553 int pass;
12554 bool satop = extract32(opcode, 0, 1);
12555 TCGMemOp memop = MO_32;
12556
12557 if (satop || !u) {
12558 memop |= MO_SIGN;
12559 }
12560
12561 if (size == 2) {
12562 TCGv_i64 tcg_idx = tcg_temp_new_i64();
12563
12564 read_vec_element(s, tcg_idx, rm, index, memop);
12565
12566 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12567 TCGv_i64 tcg_op = tcg_temp_new_i64();
12568 TCGv_i64 tcg_passres;
12569 int passelt;
12570
12571 if (is_scalar) {
12572 passelt = 0;
12573 } else {
12574 passelt = pass + (is_q * 2);
12575 }
12576
12577 read_vec_element(s, tcg_op, rn, passelt, memop);
12578
12579 tcg_res[pass] = tcg_temp_new_i64();
12580
12581 if (opcode == 0xa || opcode == 0xb) {
12582
12583 tcg_passres = tcg_res[pass];
12584 } else {
12585 tcg_passres = tcg_temp_new_i64();
12586 }
12587
12588 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
12589 tcg_temp_free_i64(tcg_op);
12590
12591 if (satop) {
12592
12593 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
12594 tcg_passres, tcg_passres);
12595 }
12596
12597 if (opcode == 0xa || opcode == 0xb) {
12598 continue;
12599 }
12600
12601
12602 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12603
12604 switch (opcode) {
12605 case 0x2:
12606 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12607 break;
12608 case 0x6:
12609 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12610 break;
12611 case 0x7:
12612 tcg_gen_neg_i64(tcg_passres, tcg_passres);
12613
12614 case 0x3:
12615 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
12616 tcg_res[pass],
12617 tcg_passres);
12618 break;
12619 default:
12620 g_assert_not_reached();
12621 }
12622 tcg_temp_free_i64(tcg_passres);
12623 }
12624 tcg_temp_free_i64(tcg_idx);
12625
12626 clear_vec_high(s, !is_scalar, rd);
12627 } else {
12628 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12629
12630 assert(size == 1);
12631 read_vec_element_i32(s, tcg_idx, rm, index, size);
12632
12633 if (!is_scalar) {
12634
12635
12636
12637
12638 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12639 }
12640
12641 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12642 TCGv_i32 tcg_op = tcg_temp_new_i32();
12643 TCGv_i64 tcg_passres;
12644
12645 if (is_scalar) {
12646 read_vec_element_i32(s, tcg_op, rn, pass, size);
12647 } else {
12648 read_vec_element_i32(s, tcg_op, rn,
12649 pass + (is_q * 2), MO_32);
12650 }
12651
12652 tcg_res[pass] = tcg_temp_new_i64();
12653
12654 if (opcode == 0xa || opcode == 0xb) {
12655
12656 tcg_passres = tcg_res[pass];
12657 } else {
12658 tcg_passres = tcg_temp_new_i64();
12659 }
12660
12661 if (memop & MO_SIGN) {
12662 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
12663 } else {
12664 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
12665 }
12666 if (satop) {
12667 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
12668 tcg_passres, tcg_passres);
12669 }
12670 tcg_temp_free_i32(tcg_op);
12671
12672 if (opcode == 0xa || opcode == 0xb) {
12673 continue;
12674 }
12675
12676
12677 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12678
12679 switch (opcode) {
12680 case 0x2:
12681 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
12682 tcg_passres);
12683 break;
12684 case 0x6:
12685 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
12686 tcg_passres);
12687 break;
12688 case 0x7:
12689 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
12690
12691 case 0x3:
12692 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
12693 tcg_res[pass],
12694 tcg_passres);
12695 break;
12696 default:
12697 g_assert_not_reached();
12698 }
12699 tcg_temp_free_i64(tcg_passres);
12700 }
12701 tcg_temp_free_i32(tcg_idx);
12702
12703 if (is_scalar) {
12704 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
12705 }
12706 }
12707
12708 if (is_scalar) {
12709 tcg_res[1] = tcg_const_i64(0);
12710 }
12711
12712 for (pass = 0; pass < 2; pass++) {
12713 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12714 tcg_temp_free_i64(tcg_res[pass]);
12715 }
12716 }
12717
12718 if (fpst) {
12719 tcg_temp_free_ptr(fpst);
12720 }
12721}
12722
12723
12724
12725
12726
12727
12728
12729static void disas_crypto_aes(DisasContext *s, uint32_t insn)
12730{
12731 int size = extract32(insn, 22, 2);
12732 int opcode = extract32(insn, 12, 5);
12733 int rn = extract32(insn, 5, 5);
12734 int rd = extract32(insn, 0, 5);
12735 int decrypt;
12736 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
12737 TCGv_i32 tcg_decrypt;
12738 CryptoThreeOpIntFn *genfn;
12739
12740 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
12741 unallocated_encoding(s);
12742 return;
12743 }
12744
12745 switch (opcode) {
12746 case 0x4:
12747 decrypt = 0;
12748 genfn = gen_helper_crypto_aese;
12749 break;
12750 case 0x6:
12751 decrypt = 0;
12752 genfn = gen_helper_crypto_aesmc;
12753 break;
12754 case 0x5:
12755 decrypt = 1;
12756 genfn = gen_helper_crypto_aese;
12757 break;
12758 case 0x7:
12759 decrypt = 1;
12760 genfn = gen_helper_crypto_aesmc;
12761 break;
12762 default:
12763 unallocated_encoding(s);
12764 return;
12765 }
12766
12767 if (!fp_access_check(s)) {
12768 return;
12769 }
12770
12771 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12772 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12773 tcg_decrypt = tcg_const_i32(decrypt);
12774
12775 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt);
12776
12777 tcg_temp_free_ptr(tcg_rd_ptr);
12778 tcg_temp_free_ptr(tcg_rn_ptr);
12779 tcg_temp_free_i32(tcg_decrypt);
12780}
12781
12782
12783
12784
12785
12786
12787
12788static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
12789{
12790 int size = extract32(insn, 22, 2);
12791 int opcode = extract32(insn, 12, 3);
12792 int rm = extract32(insn, 16, 5);
12793 int rn = extract32(insn, 5, 5);
12794 int rd = extract32(insn, 0, 5);
12795 CryptoThreeOpFn *genfn;
12796 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
12797 bool feature;
12798
12799 if (size != 0) {
12800 unallocated_encoding(s);
12801 return;
12802 }
12803
12804 switch (opcode) {
12805 case 0:
12806 case 1:
12807 case 2:
12808 case 3:
12809 genfn = NULL;
12810 feature = dc_isar_feature(aa64_sha1, s);
12811 break;
12812 case 4:
12813 genfn = gen_helper_crypto_sha256h;
12814 feature = dc_isar_feature(aa64_sha256, s);
12815 break;
12816 case 5:
12817 genfn = gen_helper_crypto_sha256h2;
12818 feature = dc_isar_feature(aa64_sha256, s);
12819 break;
12820 case 6:
12821 genfn = gen_helper_crypto_sha256su1;
12822 feature = dc_isar_feature(aa64_sha256, s);
12823 break;
12824 default:
12825 unallocated_encoding(s);
12826 return;
12827 }
12828
12829 if (!feature) {
12830 unallocated_encoding(s);
12831 return;
12832 }
12833
12834 if (!fp_access_check(s)) {
12835 return;
12836 }
12837
12838 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12839 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12840 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
12841
12842 if (genfn) {
12843 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
12844 } else {
12845 TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
12846
12847 gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
12848 tcg_rm_ptr, tcg_opcode);
12849 tcg_temp_free_i32(tcg_opcode);
12850 }
12851
12852 tcg_temp_free_ptr(tcg_rd_ptr);
12853 tcg_temp_free_ptr(tcg_rn_ptr);
12854 tcg_temp_free_ptr(tcg_rm_ptr);
12855}
12856
12857
12858
12859
12860
12861
12862
12863static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
12864{
12865 int size = extract32(insn, 22, 2);
12866 int opcode = extract32(insn, 12, 5);
12867 int rn = extract32(insn, 5, 5);
12868 int rd = extract32(insn, 0, 5);
12869 CryptoTwoOpFn *genfn;
12870 bool feature;
12871 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
12872
12873 if (size != 0) {
12874 unallocated_encoding(s);
12875 return;
12876 }
12877
12878 switch (opcode) {
12879 case 0:
12880 feature = dc_isar_feature(aa64_sha1, s);
12881 genfn = gen_helper_crypto_sha1h;
12882 break;
12883 case 1:
12884 feature = dc_isar_feature(aa64_sha1, s);
12885 genfn = gen_helper_crypto_sha1su1;
12886 break;
12887 case 2:
12888 feature = dc_isar_feature(aa64_sha256, s);
12889 genfn = gen_helper_crypto_sha256su0;
12890 break;
12891 default:
12892 unallocated_encoding(s);
12893 return;
12894 }
12895
12896 if (!feature) {
12897 unallocated_encoding(s);
12898 return;
12899 }
12900
12901 if (!fp_access_check(s)) {
12902 return;
12903 }
12904
12905 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12906 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12907
12908 genfn(tcg_rd_ptr, tcg_rn_ptr);
12909
12910 tcg_temp_free_ptr(tcg_rd_ptr);
12911 tcg_temp_free_ptr(tcg_rn_ptr);
12912}
12913
12914
12915
12916
12917
12918
12919
12920static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
12921{
12922 int opcode = extract32(insn, 10, 2);
12923 int o = extract32(insn, 14, 1);
12924 int rm = extract32(insn, 16, 5);
12925 int rn = extract32(insn, 5, 5);
12926 int rd = extract32(insn, 0, 5);
12927 bool feature;
12928 CryptoThreeOpFn *genfn;
12929
12930 if (o == 0) {
12931 switch (opcode) {
12932 case 0:
12933 feature = dc_isar_feature(aa64_sha512, s);
12934 genfn = gen_helper_crypto_sha512h;
12935 break;
12936 case 1:
12937 feature = dc_isar_feature(aa64_sha512, s);
12938 genfn = gen_helper_crypto_sha512h2;
12939 break;
12940 case 2:
12941 feature = dc_isar_feature(aa64_sha512, s);
12942 genfn = gen_helper_crypto_sha512su1;
12943 break;
12944 case 3:
12945 feature = dc_isar_feature(aa64_sha3, s);
12946 genfn = NULL;
12947 break;
12948 }
12949 } else {
12950 switch (opcode) {
12951 case 0:
12952 feature = dc_isar_feature(aa64_sm3, s);
12953 genfn = gen_helper_crypto_sm3partw1;
12954 break;
12955 case 1:
12956 feature = dc_isar_feature(aa64_sm3, s);
12957 genfn = gen_helper_crypto_sm3partw2;
12958 break;
12959 case 2:
12960 feature = dc_isar_feature(aa64_sm4, s);
12961 genfn = gen_helper_crypto_sm4ekey;
12962 break;
12963 default:
12964 unallocated_encoding(s);
12965 return;
12966 }
12967 }
12968
12969 if (!feature) {
12970 unallocated_encoding(s);
12971 return;
12972 }
12973
12974 if (!fp_access_check(s)) {
12975 return;
12976 }
12977
12978 if (genfn) {
12979 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
12980
12981 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12982 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12983 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
12984
12985 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
12986
12987 tcg_temp_free_ptr(tcg_rd_ptr);
12988 tcg_temp_free_ptr(tcg_rn_ptr);
12989 tcg_temp_free_ptr(tcg_rm_ptr);
12990 } else {
12991 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
12992 int pass;
12993
12994 tcg_op1 = tcg_temp_new_i64();
12995 tcg_op2 = tcg_temp_new_i64();
12996 tcg_res[0] = tcg_temp_new_i64();
12997 tcg_res[1] = tcg_temp_new_i64();
12998
12999 for (pass = 0; pass < 2; pass++) {
13000 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13001 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13002
13003 tcg_gen_rotli_i64(tcg_res[pass], tcg_op2, 1);
13004 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13005 }
13006 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13007 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13008
13009 tcg_temp_free_i64(tcg_op1);
13010 tcg_temp_free_i64(tcg_op2);
13011 tcg_temp_free_i64(tcg_res[0]);
13012 tcg_temp_free_i64(tcg_res[1]);
13013 }
13014}
13015
13016
13017
13018
13019
13020
13021
13022static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13023{
13024 int opcode = extract32(insn, 10, 2);
13025 int rn = extract32(insn, 5, 5);
13026 int rd = extract32(insn, 0, 5);
13027 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13028 bool feature;
13029 CryptoTwoOpFn *genfn;
13030
13031 switch (opcode) {
13032 case 0:
13033 feature = dc_isar_feature(aa64_sha512, s);
13034 genfn = gen_helper_crypto_sha512su0;
13035 break;
13036 case 1:
13037 feature = dc_isar_feature(aa64_sm4, s);
13038 genfn = gen_helper_crypto_sm4e;
13039 break;
13040 default:
13041 unallocated_encoding(s);
13042 return;
13043 }
13044
13045 if (!feature) {
13046 unallocated_encoding(s);
13047 return;
13048 }
13049
13050 if (!fp_access_check(s)) {
13051 return;
13052 }
13053
13054 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13055 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13056
13057 genfn(tcg_rd_ptr, tcg_rn_ptr);
13058
13059 tcg_temp_free_ptr(tcg_rd_ptr);
13060 tcg_temp_free_ptr(tcg_rn_ptr);
13061}
13062
13063
13064
13065
13066
13067
13068
13069static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13070{
13071 int op0 = extract32(insn, 21, 2);
13072 int rm = extract32(insn, 16, 5);
13073 int ra = extract32(insn, 10, 5);
13074 int rn = extract32(insn, 5, 5);
13075 int rd = extract32(insn, 0, 5);
13076 bool feature;
13077
13078 switch (op0) {
13079 case 0:
13080 case 1:
13081 feature = dc_isar_feature(aa64_sha3, s);
13082 break;
13083 case 2:
13084 feature = dc_isar_feature(aa64_sm3, s);
13085 break;
13086 default:
13087 unallocated_encoding(s);
13088 return;
13089 }
13090
13091 if (!feature) {
13092 unallocated_encoding(s);
13093 return;
13094 }
13095
13096 if (!fp_access_check(s)) {
13097 return;
13098 }
13099
13100 if (op0 < 2) {
13101 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13102 int pass;
13103
13104 tcg_op1 = tcg_temp_new_i64();
13105 tcg_op2 = tcg_temp_new_i64();
13106 tcg_op3 = tcg_temp_new_i64();
13107 tcg_res[0] = tcg_temp_new_i64();
13108 tcg_res[1] = tcg_temp_new_i64();
13109
13110 for (pass = 0; pass < 2; pass++) {
13111 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13112 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13113 read_vec_element(s, tcg_op3, ra, pass, MO_64);
13114
13115 if (op0 == 0) {
13116
13117 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13118 } else {
13119
13120 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13121 }
13122 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13123 }
13124 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13125 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13126
13127 tcg_temp_free_i64(tcg_op1);
13128 tcg_temp_free_i64(tcg_op2);
13129 tcg_temp_free_i64(tcg_op3);
13130 tcg_temp_free_i64(tcg_res[0]);
13131 tcg_temp_free_i64(tcg_res[1]);
13132 } else {
13133 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13134
13135 tcg_op1 = tcg_temp_new_i32();
13136 tcg_op2 = tcg_temp_new_i32();
13137 tcg_op3 = tcg_temp_new_i32();
13138 tcg_res = tcg_temp_new_i32();
13139 tcg_zero = tcg_const_i32(0);
13140
13141 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13142 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13143 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13144
13145 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13146 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13147 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13148 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13149
13150 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13151 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13152 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13153 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13154
13155 tcg_temp_free_i32(tcg_op1);
13156 tcg_temp_free_i32(tcg_op2);
13157 tcg_temp_free_i32(tcg_op3);
13158 tcg_temp_free_i32(tcg_res);
13159 tcg_temp_free_i32(tcg_zero);
13160 }
13161}
13162
13163
13164
13165
13166
13167
13168
13169static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13170{
13171 int rm = extract32(insn, 16, 5);
13172 int imm6 = extract32(insn, 10, 6);
13173 int rn = extract32(insn, 5, 5);
13174 int rd = extract32(insn, 0, 5);
13175 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13176 int pass;
13177
13178 if (!dc_isar_feature(aa64_sha3, s)) {
13179 unallocated_encoding(s);
13180 return;
13181 }
13182
13183 if (!fp_access_check(s)) {
13184 return;
13185 }
13186
13187 tcg_op1 = tcg_temp_new_i64();
13188 tcg_op2 = tcg_temp_new_i64();
13189 tcg_res[0] = tcg_temp_new_i64();
13190 tcg_res[1] = tcg_temp_new_i64();
13191
13192 for (pass = 0; pass < 2; pass++) {
13193 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13194 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13195
13196 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
13197 tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
13198 }
13199 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13200 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13201
13202 tcg_temp_free_i64(tcg_op1);
13203 tcg_temp_free_i64(tcg_op2);
13204 tcg_temp_free_i64(tcg_res[0]);
13205 tcg_temp_free_i64(tcg_res[1]);
13206}
13207
13208
13209
13210
13211
13212
13213
13214static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13215{
13216 int opcode = extract32(insn, 10, 2);
13217 int imm2 = extract32(insn, 12, 2);
13218 int rm = extract32(insn, 16, 5);
13219 int rn = extract32(insn, 5, 5);
13220 int rd = extract32(insn, 0, 5);
13221 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13222 TCGv_i32 tcg_imm2, tcg_opcode;
13223
13224 if (!dc_isar_feature(aa64_sm3, s)) {
13225 unallocated_encoding(s);
13226 return;
13227 }
13228
13229 if (!fp_access_check(s)) {
13230 return;
13231 }
13232
13233 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13234 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13235 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13236 tcg_imm2 = tcg_const_i32(imm2);
13237 tcg_opcode = tcg_const_i32(opcode);
13238
13239 gen_helper_crypto_sm3tt(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2,
13240 tcg_opcode);
13241
13242 tcg_temp_free_ptr(tcg_rd_ptr);
13243 tcg_temp_free_ptr(tcg_rn_ptr);
13244 tcg_temp_free_ptr(tcg_rm_ptr);
13245 tcg_temp_free_i32(tcg_imm2);
13246 tcg_temp_free_i32(tcg_opcode);
13247}
13248
13249
13250
13251
13252
13253
13254static const AArch64DecodeTable data_proc_simd[] = {
13255
13256 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13257 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13258 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13259 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13260 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13261 { 0x0e000400, 0x9fe08400, disas_simd_copy },
13262 { 0x0f000000, 0x9f000400, disas_simd_indexed },
13263
13264 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13265 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13266 { 0x0e000000, 0xbf208c00, disas_simd_tb },
13267 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13268 { 0x2e000000, 0xbf208400, disas_simd_ext },
13269 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13270 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13271 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13272 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13273 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13274 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13275 { 0x5f000000, 0xdf000400, disas_simd_indexed },
13276 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13277 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13278 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13279 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13280 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13281 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13282 { 0xce000000, 0xff808000, disas_crypto_four_reg },
13283 { 0xce800000, 0xffe00000, disas_crypto_xar },
13284 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13285 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13286 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13287 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13288 { 0x00000000, 0x00000000, NULL }
13289};
13290
13291static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13292{
13293
13294
13295
13296
13297 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13298 if (fn) {
13299 fn(s, insn);
13300 } else {
13301 unallocated_encoding(s);
13302 }
13303}
13304
13305
13306static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13307{
13308 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13309 disas_data_proc_fp(s, insn);
13310 } else {
13311
13312 disas_data_proc_simd(s, insn);
13313 }
13314}
13315
13316
13317static void disas_a64_insn(CPUARMState *env, DisasContext *s)
13318{
13319 uint32_t insn;
13320
13321 insn = arm_ldl_code(env, s->pc, s->sctlr_b);
13322 s->insn = insn;
13323 s->pc += 4;
13324
13325 s->fp_access_checked = false;
13326
13327 switch (extract32(insn, 25, 4)) {
13328 case 0x0: case 0x1: case 0x3:
13329 unallocated_encoding(s);
13330 break;
13331 case 0x2:
13332 if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
13333 unallocated_encoding(s);
13334 }
13335 break;
13336 case 0x8: case 0x9:
13337 disas_data_proc_imm(s, insn);
13338 break;
13339 case 0xa: case 0xb:
13340 disas_b_exc_sys(s, insn);
13341 break;
13342 case 0x4:
13343 case 0x6:
13344 case 0xc:
13345 case 0xe:
13346 disas_ldst(s, insn);
13347 break;
13348 case 0x5:
13349 case 0xd:
13350 disas_data_proc_reg(s, insn);
13351 break;
13352 case 0x7:
13353 case 0xf:
13354 disas_data_proc_simd_fp(s, insn);
13355 break;
13356 default:
13357 assert(FALSE);
13358 break;
13359 }
13360
13361
13362 free_tmp_a64(s);
13363}
13364
13365static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
13366 CPUState *cpu)
13367{
13368 DisasContext *dc = container_of(dcbase, DisasContext, base);
13369 CPUARMState *env = cpu->env_ptr;
13370 ARMCPU *arm_cpu = arm_env_get_cpu(env);
13371 int bound;
13372
13373 dc->isar = &arm_cpu->isar;
13374 dc->pc = dc->base.pc_first;
13375 dc->condjmp = 0;
13376
13377 dc->aarch64 = 1;
13378
13379
13380
13381 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13382 !arm_el_is_aa64(env, 3);
13383 dc->thumb = 0;
13384 dc->sctlr_b = 0;
13385 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
13386 dc->condexec_mask = 0;
13387 dc->condexec_cond = 0;
13388 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
13389 dc->tbi0 = ARM_TBFLAG_TBI0(dc->base.tb->flags);
13390 dc->tbi1 = ARM_TBFLAG_TBI1(dc->base.tb->flags);
13391 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
13392#if !defined(CONFIG_USER_ONLY)
13393 dc->user = (dc->current_el == 0);
13394#endif
13395 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
13396 dc->sve_excp_el = ARM_TBFLAG_SVEEXC_EL(dc->base.tb->flags);
13397 dc->sve_len = (ARM_TBFLAG_ZCR_LEN(dc->base.tb->flags) + 1) * 16;
13398 dc->vec_len = 0;
13399 dc->vec_stride = 0;
13400 dc->cp_regs = arm_cpu->cp_regs;
13401 dc->features = env->features;
13402
13403
13404
13405
13406
13407
13408
13409
13410
13411
13412
13413
13414
13415
13416
13417
13418 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
13419 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
13420 dc->is_ldex = false;
13421 dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
13422
13423
13424 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
13425
13426
13427 if (dc->ss_active) {
13428 bound = 1;
13429 }
13430 dc->base.max_insns = MIN(dc->base.max_insns, bound);
13431
13432 init_tmp_a64_array(dc);
13433}
13434
13435static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
13436{
13437}
13438
13439static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13440{
13441 DisasContext *dc = container_of(dcbase, DisasContext, base);
13442
13443 tcg_gen_insn_start(dc->pc, 0, 0);
13444 dc->insn_start = tcg_last_op();
13445}
13446
13447static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13448 const CPUBreakpoint *bp)
13449{
13450 DisasContext *dc = container_of(dcbase, DisasContext, base);
13451
13452 if (bp->flags & BP_CPU) {
13453 gen_a64_set_pc_im(dc->pc);
13454 gen_helper_check_breakpoints(cpu_env);
13455
13456 dc->base.is_jmp = DISAS_TOO_MANY;
13457 } else {
13458 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13459
13460
13461
13462
13463
13464 dc->pc += 4;
13465 dc->base.is_jmp = DISAS_NORETURN;
13466 }
13467
13468 return true;
13469}
13470
13471static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13472{
13473 DisasContext *dc = container_of(dcbase, DisasContext, base);
13474 CPUARMState *env = cpu->env_ptr;
13475
13476 if (dc->ss_active && !dc->pstate_ss) {
13477
13478
13479
13480
13481
13482
13483
13484
13485
13486
13487 assert(dc->base.num_insns == 1);
13488 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13489 default_exception_el(dc));
13490 dc->base.is_jmp = DISAS_NORETURN;
13491 } else {
13492 disas_a64_insn(env, dc);
13493 }
13494
13495 dc->base.pc_next = dc->pc;
13496 translator_loop_temp_check(&dc->base);
13497}
13498
13499static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13500{
13501 DisasContext *dc = container_of(dcbase, DisasContext, base);
13502
13503 if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
13504
13505
13506
13507
13508
13509 switch (dc->base.is_jmp) {
13510 default:
13511 gen_a64_set_pc_im(dc->pc);
13512
13513 case DISAS_EXIT:
13514 case DISAS_JUMP:
13515 if (dc->base.singlestep_enabled) {
13516 gen_exception_internal(EXCP_DEBUG);
13517 } else {
13518 gen_step_complete_exception(dc);
13519 }
13520 break;
13521 case DISAS_NORETURN:
13522 break;
13523 }
13524 } else {
13525 switch (dc->base.is_jmp) {
13526 case DISAS_NEXT:
13527 case DISAS_TOO_MANY:
13528 gen_goto_tb(dc, 1, dc->pc);
13529 break;
13530 default:
13531 case DISAS_UPDATE:
13532 gen_a64_set_pc_im(dc->pc);
13533
13534 case DISAS_EXIT:
13535 tcg_gen_exit_tb(NULL, 0);
13536 break;
13537 case DISAS_JUMP:
13538 tcg_gen_lookup_and_goto_ptr();
13539 break;
13540 case DISAS_NORETURN:
13541 case DISAS_SWI:
13542 break;
13543 case DISAS_WFE:
13544 gen_a64_set_pc_im(dc->pc);
13545 gen_helper_wfe(cpu_env);
13546 break;
13547 case DISAS_YIELD:
13548 gen_a64_set_pc_im(dc->pc);
13549 gen_helper_yield(cpu_env);
13550 break;
13551 case DISAS_WFI:
13552 {
13553
13554
13555
13556 TCGv_i32 tmp = tcg_const_i32(4);
13557
13558 gen_a64_set_pc_im(dc->pc);
13559 gen_helper_wfi(cpu_env, tmp);
13560 tcg_temp_free_i32(tmp);
13561
13562
13563
13564 tcg_gen_exit_tb(NULL, 0);
13565 break;
13566 }
13567 }
13568 }
13569
13570
13571 dc->base.pc_next = dc->pc;
13572}
13573
13574static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
13575 CPUState *cpu)
13576{
13577 DisasContext *dc = container_of(dcbase, DisasContext, base);
13578
13579 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
13580 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
13581}
13582
13583const TranslatorOps aarch64_translator_ops = {
13584 .init_disas_context = aarch64_tr_init_disas_context,
13585 .tb_start = aarch64_tr_tb_start,
13586 .insn_start = aarch64_tr_insn_start,
13587 .breakpoint_check = aarch64_tr_breakpoint_check,
13588 .translate_insn = aarch64_tr_translate_insn,
13589 .tb_stop = aarch64_tr_tb_stop,
13590 .disas_log = aarch64_tr_disas_log,
13591};
13592