1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "cpu.h"
22#include "tcg-op.h"
23#include "qemu/log.h"
24#include "arm_ldst.h"
25#include "translate.h"
26#include "internals.h"
27#include "qemu/host-utils.h"
28
29#include "exec/semihost.h"
30#include "exec/gen-icount.h"
31
32#include "exec/helper-proto.h"
33#include "exec/helper-gen.h"
34#include "exec/log.h"
35
36#include "trace-tcg.h"
37
38static TCGv_i64 cpu_X[32];
39static TCGv_i64 cpu_pc;
40
41
42static TCGv_i64 cpu_exclusive_high;
43
44static const char *regnames[] = {
45 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
46 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
47 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
48 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
49};
50
51enum a64_shift_type {
52 A64_SHIFT_TYPE_LSL = 0,
53 A64_SHIFT_TYPE_LSR = 1,
54 A64_SHIFT_TYPE_ASR = 2,
55 A64_SHIFT_TYPE_ROR = 3
56};
57
58
59
60
61typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
62
63typedef struct AArch64DecodeTable {
64 uint32_t pattern;
65 uint32_t mask;
66 AArch64DecodeFn *disas_fn;
67} AArch64DecodeTable;
68
69
70typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
71typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
72typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
73typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
74typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
75typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
76typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
77typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
78typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
79typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
80typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
81typedef void CryptoTwoOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32);
82typedef void CryptoThreeOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
83
84
85void a64_translate_init(void)
86{
87 int i;
88
89 cpu_pc = tcg_global_mem_new_i64(cpu_env,
90 offsetof(CPUARMState, pc),
91 "pc");
92 for (i = 0; i < 32; i++) {
93 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
94 offsetof(CPUARMState, xregs[i]),
95 regnames[i]);
96 }
97
98 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_high), "exclusive_high");
100}
101
102static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
103{
104
105
106
107 switch (s->mmu_idx) {
108 case ARMMMUIdx_S12NSE1:
109 return ARMMMUIdx_S12NSE0;
110 case ARMMMUIdx_S1SE1:
111 return ARMMMUIdx_S1SE0;
112 case ARMMMUIdx_S2NS:
113 g_assert_not_reached();
114 default:
115 return s->mmu_idx;
116 }
117}
118
119void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
120 fprintf_function cpu_fprintf, int flags)
121{
122 ARMCPU *cpu = ARM_CPU(cs);
123 CPUARMState *env = &cpu->env;
124 uint32_t psr = pstate_read(env);
125 int i;
126 int el = arm_current_el(env);
127 const char *ns_status;
128
129 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
130 env->pc, env->xregs[31]);
131 for (i = 0; i < 31; i++) {
132 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
137 }
138 }
139
140 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
141 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
142 } else {
143 ns_status = "";
144 }
145
146 cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
147 psr,
148 psr & PSTATE_N ? 'N' : '-',
149 psr & PSTATE_Z ? 'Z' : '-',
150 psr & PSTATE_C ? 'C' : '-',
151 psr & PSTATE_V ? 'V' : '-',
152 ns_status,
153 el,
154 psr & PSTATE_SP ? 'h' : 't');
155
156 if (flags & CPU_DUMP_FPU) {
157 int numvfpregs = 32;
158 for (i = 0; i < numvfpregs; i += 2) {
159 uint64_t vlo = float64_val(env->vfp.regs[i * 2]);
160 uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]);
161 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ",
162 i, vhi, vlo);
163 vlo = float64_val(env->vfp.regs[(i + 1) * 2]);
164 vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]);
165 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n",
166 i + 1, vhi, vlo);
167 }
168 cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
169 vfp_get_fpcr(env), vfp_get_fpsr(env));
170 }
171}
172
173void gen_a64_set_pc_im(uint64_t val)
174{
175 tcg_gen_movi_i64(cpu_pc, val);
176}
177
178typedef struct DisasCompare64 {
179 TCGCond cond;
180 TCGv_i64 value;
181} DisasCompare64;
182
183static void a64_test_cc(DisasCompare64 *c64, int cc)
184{
185 DisasCompare c32;
186
187 arm_test_cc(&c32, cc);
188
189
190
191 c64->cond = c32.cond;
192 c64->value = tcg_temp_new_i64();
193 tcg_gen_ext_i32_i64(c64->value, c32.value);
194
195 arm_free_cc(&c32);
196}
197
198static void a64_free_cc(DisasCompare64 *c64)
199{
200 tcg_temp_free_i64(c64->value);
201}
202
203static void gen_exception_internal(int excp)
204{
205 TCGv_i32 tcg_excp = tcg_const_i32(excp);
206
207 assert(excp_is_internal(excp));
208 gen_helper_exception_internal(cpu_env, tcg_excp);
209 tcg_temp_free_i32(tcg_excp);
210}
211
212static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
213{
214 TCGv_i32 tcg_excp = tcg_const_i32(excp);
215 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
216 TCGv_i32 tcg_el = tcg_const_i32(target_el);
217
218 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
219 tcg_syn, tcg_el);
220 tcg_temp_free_i32(tcg_el);
221 tcg_temp_free_i32(tcg_syn);
222 tcg_temp_free_i32(tcg_excp);
223}
224
225static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
226{
227 gen_a64_set_pc_im(s->pc - offset);
228 gen_exception_internal(excp);
229 s->is_jmp = DISAS_EXC;
230}
231
232static void gen_exception_insn(DisasContext *s, int offset, int excp,
233 uint32_t syndrome, uint32_t target_el)
234{
235 gen_a64_set_pc_im(s->pc - offset);
236 gen_exception(excp, syndrome, target_el);
237 s->is_jmp = DISAS_EXC;
238}
239
240static void gen_ss_advance(DisasContext *s)
241{
242
243
244
245 if (s->ss_active) {
246 s->pstate_ss = 0;
247 gen_helper_clear_pstate_ss(cpu_env);
248 }
249}
250
251static void gen_step_complete_exception(DisasContext *s)
252{
253
254
255
256
257
258
259
260
261
262 gen_ss_advance(s);
263 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
264 default_exception_el(s));
265 s->is_jmp = DISAS_EXC;
266}
267
268static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
269{
270
271
272
273 if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) {
274 return false;
275 }
276
277
278 if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
279 return false;
280 }
281
282 return true;
283}
284
285static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
286{
287 TranslationBlock *tb;
288
289 tb = s->tb;
290 if (use_goto_tb(s, n, dest)) {
291 tcg_gen_goto_tb(n);
292 gen_a64_set_pc_im(dest);
293 tcg_gen_exit_tb((intptr_t)tb + n);
294 s->is_jmp = DISAS_TB_JUMP;
295 } else {
296 gen_a64_set_pc_im(dest);
297 if (s->ss_active) {
298 gen_step_complete_exception(s);
299 } else if (s->singlestep_enabled) {
300 gen_exception_internal(EXCP_DEBUG);
301 } else {
302 tcg_gen_exit_tb(0);
303 s->is_jmp = DISAS_TB_JUMP;
304 }
305 }
306}
307
308static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
309{
310
311
312
313 syn &= ARM_INSN_START_WORD2_MASK;
314 syn >>= ARM_INSN_START_WORD2_SHIFT;
315
316
317 assert(s->insn_start_idx != 0);
318 tcg_set_insn_param(s->insn_start_idx, 2, syn);
319 s->insn_start_idx = 0;
320}
321
322static void unallocated_encoding(DisasContext *s)
323{
324
325 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
326 default_exception_el(s));
327}
328
329#define unsupported_encoding(s, insn) \
330 do { \
331 qemu_log_mask(LOG_UNIMP, \
332 "%s:%d: unsupported instruction encoding 0x%08x " \
333 "at pc=%016" PRIx64 "\n", \
334 __FILE__, __LINE__, insn, s->pc - 4); \
335 unallocated_encoding(s); \
336 } while (0);
337
338static void init_tmp_a64_array(DisasContext *s)
339{
340#ifdef CONFIG_DEBUG_TCG
341 int i;
342 for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
343 TCGV_UNUSED_I64(s->tmp_a64[i]);
344 }
345#endif
346 s->tmp_a64_count = 0;
347}
348
349static void free_tmp_a64(DisasContext *s)
350{
351 int i;
352 for (i = 0; i < s->tmp_a64_count; i++) {
353 tcg_temp_free_i64(s->tmp_a64[i]);
354 }
355 init_tmp_a64_array(s);
356}
357
358static TCGv_i64 new_tmp_a64(DisasContext *s)
359{
360 assert(s->tmp_a64_count < TMP_A64_MAX);
361 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
362}
363
364static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
365{
366 TCGv_i64 t = new_tmp_a64(s);
367 tcg_gen_movi_i64(t, 0);
368 return t;
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386static TCGv_i64 cpu_reg(DisasContext *s, int reg)
387{
388 if (reg == 31) {
389 return new_tmp_a64_zero(s);
390 } else {
391 return cpu_X[reg];
392 }
393}
394
395
396static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
397{
398 return cpu_X[reg];
399}
400
401
402
403
404
405static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
406{
407 TCGv_i64 v = new_tmp_a64(s);
408 if (reg != 31) {
409 if (sf) {
410 tcg_gen_mov_i64(v, cpu_X[reg]);
411 } else {
412 tcg_gen_ext32u_i64(v, cpu_X[reg]);
413 }
414 } else {
415 tcg_gen_movi_i64(v, 0);
416 }
417 return v;
418}
419
420static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
421{
422 TCGv_i64 v = new_tmp_a64(s);
423 if (sf) {
424 tcg_gen_mov_i64(v, cpu_X[reg]);
425 } else {
426 tcg_gen_ext32u_i64(v, cpu_X[reg]);
427 }
428 return v;
429}
430
431
432
433
434
435
436
437
438static inline void assert_fp_access_checked(DisasContext *s)
439{
440#ifdef CONFIG_DEBUG_TCG
441 if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
442 fprintf(stderr, "target-arm: FP access check missing for "
443 "instruction 0x%08x\n", s->insn);
444 abort();
445 }
446#endif
447}
448
449
450
451
452
453static inline int vec_reg_offset(DisasContext *s, int regno,
454 int element, TCGMemOp size)
455{
456 int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
457#ifdef HOST_WORDS_BIGENDIAN
458
459
460
461
462
463
464 offs += (16 - ((element + 1) * (1 << size)));
465 offs ^= 8;
466#else
467 offs += element * (1 << size);
468#endif
469 assert_fp_access_checked(s);
470 return offs;
471}
472
473
474
475
476
477
478static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
479{
480 int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
481#ifdef HOST_WORDS_BIGENDIAN
482 offs += (8 - (1 << size));
483#endif
484 assert_fp_access_checked(s);
485 return offs;
486}
487
488
489static inline int fp_reg_hi_offset(DisasContext *s, int regno)
490{
491 assert_fp_access_checked(s);
492 return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
493}
494
495
496
497
498
499
500
501static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
502{
503 TCGv_i64 v = tcg_temp_new_i64();
504
505 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
506 return v;
507}
508
509static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
510{
511 TCGv_i32 v = tcg_temp_new_i32();
512
513 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
514 return v;
515}
516
517static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
518{
519 TCGv_i64 tcg_zero = tcg_const_i64(0);
520
521 tcg_gen_st_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
522 tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(s, reg));
523 tcg_temp_free_i64(tcg_zero);
524}
525
526static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
527{
528 TCGv_i64 tmp = tcg_temp_new_i64();
529
530 tcg_gen_extu_i32_i64(tmp, v);
531 write_fp_dreg(s, reg, tmp);
532 tcg_temp_free_i64(tmp);
533}
534
535static TCGv_ptr get_fpstatus_ptr(void)
536{
537 TCGv_ptr statusptr = tcg_temp_new_ptr();
538 int offset;
539
540
541
542
543
544 offset = offsetof(CPUARMState, vfp.fp_status);
545 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
546 return statusptr;
547}
548
549
550
551
552static inline void gen_set_NZ64(TCGv_i64 result)
553{
554 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
555 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
556}
557
558
559static inline void gen_logic_CC(int sf, TCGv_i64 result)
560{
561 if (sf) {
562 gen_set_NZ64(result);
563 } else {
564 tcg_gen_extrl_i64_i32(cpu_ZF, result);
565 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
566 }
567 tcg_gen_movi_i32(cpu_CF, 0);
568 tcg_gen_movi_i32(cpu_VF, 0);
569}
570
571
572static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
573{
574 if (sf) {
575 TCGv_i64 result, flag, tmp;
576 result = tcg_temp_new_i64();
577 flag = tcg_temp_new_i64();
578 tmp = tcg_temp_new_i64();
579
580 tcg_gen_movi_i64(tmp, 0);
581 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
582
583 tcg_gen_extrl_i64_i32(cpu_CF, flag);
584
585 gen_set_NZ64(result);
586
587 tcg_gen_xor_i64(flag, result, t0);
588 tcg_gen_xor_i64(tmp, t0, t1);
589 tcg_gen_andc_i64(flag, flag, tmp);
590 tcg_temp_free_i64(tmp);
591 tcg_gen_extrh_i64_i32(cpu_VF, flag);
592
593 tcg_gen_mov_i64(dest, result);
594 tcg_temp_free_i64(result);
595 tcg_temp_free_i64(flag);
596 } else {
597
598 TCGv_i32 t0_32 = tcg_temp_new_i32();
599 TCGv_i32 t1_32 = tcg_temp_new_i32();
600 TCGv_i32 tmp = tcg_temp_new_i32();
601
602 tcg_gen_movi_i32(tmp, 0);
603 tcg_gen_extrl_i64_i32(t0_32, t0);
604 tcg_gen_extrl_i64_i32(t1_32, t1);
605 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
606 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
607 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
608 tcg_gen_xor_i32(tmp, t0_32, t1_32);
609 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
610 tcg_gen_extu_i32_i64(dest, cpu_NF);
611
612 tcg_temp_free_i32(tmp);
613 tcg_temp_free_i32(t0_32);
614 tcg_temp_free_i32(t1_32);
615 }
616}
617
618
619static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
620{
621 if (sf) {
622
623 TCGv_i64 result, flag, tmp;
624
625 result = tcg_temp_new_i64();
626 flag = tcg_temp_new_i64();
627 tcg_gen_sub_i64(result, t0, t1);
628
629 gen_set_NZ64(result);
630
631 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
632 tcg_gen_extrl_i64_i32(cpu_CF, flag);
633
634 tcg_gen_xor_i64(flag, result, t0);
635 tmp = tcg_temp_new_i64();
636 tcg_gen_xor_i64(tmp, t0, t1);
637 tcg_gen_and_i64(flag, flag, tmp);
638 tcg_temp_free_i64(tmp);
639 tcg_gen_extrh_i64_i32(cpu_VF, flag);
640 tcg_gen_mov_i64(dest, result);
641 tcg_temp_free_i64(flag);
642 tcg_temp_free_i64(result);
643 } else {
644
645 TCGv_i32 t0_32 = tcg_temp_new_i32();
646 TCGv_i32 t1_32 = tcg_temp_new_i32();
647 TCGv_i32 tmp;
648
649 tcg_gen_extrl_i64_i32(t0_32, t0);
650 tcg_gen_extrl_i64_i32(t1_32, t1);
651 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
652 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
653 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
654 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
655 tmp = tcg_temp_new_i32();
656 tcg_gen_xor_i32(tmp, t0_32, t1_32);
657 tcg_temp_free_i32(t0_32);
658 tcg_temp_free_i32(t1_32);
659 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
660 tcg_temp_free_i32(tmp);
661 tcg_gen_extu_i32_i64(dest, cpu_NF);
662 }
663}
664
665
666static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
667{
668 TCGv_i64 flag = tcg_temp_new_i64();
669 tcg_gen_extu_i32_i64(flag, cpu_CF);
670 tcg_gen_add_i64(dest, t0, t1);
671 tcg_gen_add_i64(dest, dest, flag);
672 tcg_temp_free_i64(flag);
673
674 if (!sf) {
675 tcg_gen_ext32u_i64(dest, dest);
676 }
677}
678
679
680static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
681{
682 if (sf) {
683 TCGv_i64 result, cf_64, vf_64, tmp;
684 result = tcg_temp_new_i64();
685 cf_64 = tcg_temp_new_i64();
686 vf_64 = tcg_temp_new_i64();
687 tmp = tcg_const_i64(0);
688
689 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
690 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
691 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
692 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
693 gen_set_NZ64(result);
694
695 tcg_gen_xor_i64(vf_64, result, t0);
696 tcg_gen_xor_i64(tmp, t0, t1);
697 tcg_gen_andc_i64(vf_64, vf_64, tmp);
698 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
699
700 tcg_gen_mov_i64(dest, result);
701
702 tcg_temp_free_i64(tmp);
703 tcg_temp_free_i64(vf_64);
704 tcg_temp_free_i64(cf_64);
705 tcg_temp_free_i64(result);
706 } else {
707 TCGv_i32 t0_32, t1_32, tmp;
708 t0_32 = tcg_temp_new_i32();
709 t1_32 = tcg_temp_new_i32();
710 tmp = tcg_const_i32(0);
711
712 tcg_gen_extrl_i64_i32(t0_32, t0);
713 tcg_gen_extrl_i64_i32(t1_32, t1);
714 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
715 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
716
717 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
718 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
719 tcg_gen_xor_i32(tmp, t0_32, t1_32);
720 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
721 tcg_gen_extu_i32_i64(dest, cpu_NF);
722
723 tcg_temp_free_i32(tmp);
724 tcg_temp_free_i32(t1_32);
725 tcg_temp_free_i32(t0_32);
726 }
727}
728
729
730
731
732
733
734
735
736static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
737 TCGv_i64 tcg_addr, int size, int memidx,
738 bool iss_valid,
739 unsigned int iss_srt,
740 bool iss_sf, bool iss_ar)
741{
742 g_assert(size <= 3);
743 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
744
745 if (iss_valid) {
746 uint32_t syn;
747
748 syn = syn_data_abort_with_iss(0,
749 size,
750 false,
751 iss_srt,
752 iss_sf,
753 iss_ar,
754 0, 0, 0, 0, 0, false);
755 disas_set_insn_syndrome(s, syn);
756 }
757}
758
759static void do_gpr_st(DisasContext *s, TCGv_i64 source,
760 TCGv_i64 tcg_addr, int size,
761 bool iss_valid,
762 unsigned int iss_srt,
763 bool iss_sf, bool iss_ar)
764{
765 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
766 iss_valid, iss_srt, iss_sf, iss_ar);
767}
768
769
770
771
772static void do_gpr_ld_memidx(DisasContext *s,
773 TCGv_i64 dest, TCGv_i64 tcg_addr,
774 int size, bool is_signed,
775 bool extend, int memidx,
776 bool iss_valid, unsigned int iss_srt,
777 bool iss_sf, bool iss_ar)
778{
779 TCGMemOp memop = s->be_data + size;
780
781 g_assert(size <= 3);
782
783 if (is_signed) {
784 memop += MO_SIGN;
785 }
786
787 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
788
789 if (extend && is_signed) {
790 g_assert(size < 3);
791 tcg_gen_ext32u_i64(dest, dest);
792 }
793
794 if (iss_valid) {
795 uint32_t syn;
796
797 syn = syn_data_abort_with_iss(0,
798 size,
799 is_signed,
800 iss_srt,
801 iss_sf,
802 iss_ar,
803 0, 0, 0, 0, 0, false);
804 disas_set_insn_syndrome(s, syn);
805 }
806}
807
808static void do_gpr_ld(DisasContext *s,
809 TCGv_i64 dest, TCGv_i64 tcg_addr,
810 int size, bool is_signed, bool extend,
811 bool iss_valid, unsigned int iss_srt,
812 bool iss_sf, bool iss_ar)
813{
814 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
815 get_mem_index(s),
816 iss_valid, iss_srt, iss_sf, iss_ar);
817}
818
819
820
821
822static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
823{
824
825 TCGv_i64 tmp = tcg_temp_new_i64();
826 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
827 if (size < 4) {
828 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
829 s->be_data + size);
830 } else {
831 bool be = s->be_data == MO_BE;
832 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
833
834 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
835 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
836 s->be_data | MO_Q);
837 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
838 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
839 s->be_data | MO_Q);
840 tcg_temp_free_i64(tcg_hiaddr);
841 }
842
843 tcg_temp_free_i64(tmp);
844}
845
846
847
848
849static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
850{
851
852 TCGv_i64 tmplo = tcg_temp_new_i64();
853 TCGv_i64 tmphi;
854
855 if (size < 4) {
856 TCGMemOp memop = s->be_data + size;
857 tmphi = tcg_const_i64(0);
858 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
859 } else {
860 bool be = s->be_data == MO_BE;
861 TCGv_i64 tcg_hiaddr;
862
863 tmphi = tcg_temp_new_i64();
864 tcg_hiaddr = tcg_temp_new_i64();
865
866 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
867 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
868 s->be_data | MO_Q);
869 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
870 s->be_data | MO_Q);
871 tcg_temp_free_i64(tcg_hiaddr);
872 }
873
874 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
875 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
876
877 tcg_temp_free_i64(tmplo);
878 tcg_temp_free_i64(tmphi);
879}
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
895 int element, TCGMemOp memop)
896{
897 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
898 switch (memop) {
899 case MO_8:
900 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
901 break;
902 case MO_16:
903 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
904 break;
905 case MO_32:
906 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
907 break;
908 case MO_8|MO_SIGN:
909 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
910 break;
911 case MO_16|MO_SIGN:
912 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
913 break;
914 case MO_32|MO_SIGN:
915 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
916 break;
917 case MO_64:
918 case MO_64|MO_SIGN:
919 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
920 break;
921 default:
922 g_assert_not_reached();
923 }
924}
925
926static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
927 int element, TCGMemOp memop)
928{
929 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
930 switch (memop) {
931 case MO_8:
932 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
933 break;
934 case MO_16:
935 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
936 break;
937 case MO_8|MO_SIGN:
938 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
939 break;
940 case MO_16|MO_SIGN:
941 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
942 break;
943 case MO_32:
944 case MO_32|MO_SIGN:
945 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
946 break;
947 default:
948 g_assert_not_reached();
949 }
950}
951
952
953static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
954 int element, TCGMemOp memop)
955{
956 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
957 switch (memop) {
958 case MO_8:
959 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
960 break;
961 case MO_16:
962 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
963 break;
964 case MO_32:
965 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
966 break;
967 case MO_64:
968 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
969 break;
970 default:
971 g_assert_not_reached();
972 }
973}
974
975static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
976 int destidx, int element, TCGMemOp memop)
977{
978 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
979 switch (memop) {
980 case MO_8:
981 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
982 break;
983 case MO_16:
984 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
985 break;
986 case MO_32:
987 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
988 break;
989 default:
990 g_assert_not_reached();
991 }
992}
993
994
995
996
997static void clear_vec_high(DisasContext *s, int rd)
998{
999 TCGv_i64 tcg_zero = tcg_const_i64(0);
1000
1001 write_vec_element(s, tcg_zero, rd, 1, MO_64);
1002 tcg_temp_free_i64(tcg_zero);
1003}
1004
1005
1006static void do_vec_st(DisasContext *s, int srcidx, int element,
1007 TCGv_i64 tcg_addr, int size)
1008{
1009 TCGMemOp memop = s->be_data + size;
1010 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1011
1012 read_vec_element(s, tcg_tmp, srcidx, element, size);
1013 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1014
1015 tcg_temp_free_i64(tcg_tmp);
1016}
1017
1018
1019static void do_vec_ld(DisasContext *s, int destidx, int element,
1020 TCGv_i64 tcg_addr, int size)
1021{
1022 TCGMemOp memop = s->be_data + size;
1023 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1024
1025 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1026 write_vec_element(s, tcg_tmp, destidx, element, size);
1027
1028 tcg_temp_free_i64(tcg_tmp);
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038static inline bool fp_access_check(DisasContext *s)
1039{
1040 assert(!s->fp_access_checked);
1041 s->fp_access_checked = true;
1042
1043 if (!s->fp_excp_el) {
1044 return true;
1045 }
1046
1047 gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1048 s->fp_excp_el);
1049 return false;
1050}
1051
1052
1053
1054
1055
1056
1057static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1058 int option, unsigned int shift)
1059{
1060 int extsize = extract32(option, 0, 2);
1061 bool is_signed = extract32(option, 2, 1);
1062
1063 if (is_signed) {
1064 switch (extsize) {
1065 case 0:
1066 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1067 break;
1068 case 1:
1069 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1070 break;
1071 case 2:
1072 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1073 break;
1074 case 3:
1075 tcg_gen_mov_i64(tcg_out, tcg_in);
1076 break;
1077 }
1078 } else {
1079 switch (extsize) {
1080 case 0:
1081 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1082 break;
1083 case 1:
1084 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1085 break;
1086 case 2:
1087 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1088 break;
1089 case 3:
1090 tcg_gen_mov_i64(tcg_out, tcg_in);
1091 break;
1092 }
1093 }
1094
1095 if (shift) {
1096 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1097 }
1098}
1099
1100static inline void gen_check_sp_alignment(DisasContext *s)
1101{
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1125 uint32_t insn)
1126{
1127 const AArch64DecodeTable *tptr = table;
1128
1129 while (tptr->mask) {
1130 if ((insn & tptr->mask) == tptr->pattern) {
1131 return tptr->disas_fn;
1132 }
1133 tptr++;
1134 }
1135 return NULL;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1151{
1152 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1153
1154 if (insn & (1U << 31)) {
1155
1156 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1157 }
1158
1159
1160 gen_goto_tb(s, 0, addr);
1161}
1162
1163
1164
1165
1166
1167
1168
1169static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1170{
1171 unsigned int sf, op, rt;
1172 uint64_t addr;
1173 TCGLabel *label_match;
1174 TCGv_i64 tcg_cmp;
1175
1176 sf = extract32(insn, 31, 1);
1177 op = extract32(insn, 24, 1);
1178 rt = extract32(insn, 0, 5);
1179 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1180
1181 tcg_cmp = read_cpu_reg(s, rt, sf);
1182 label_match = gen_new_label();
1183
1184 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1185 tcg_cmp, 0, label_match);
1186
1187 gen_goto_tb(s, 0, s->pc);
1188 gen_set_label(label_match);
1189 gen_goto_tb(s, 1, addr);
1190}
1191
1192
1193
1194
1195
1196
1197
1198static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1199{
1200 unsigned int bit_pos, op, rt;
1201 uint64_t addr;
1202 TCGLabel *label_match;
1203 TCGv_i64 tcg_cmp;
1204
1205 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1206 op = extract32(insn, 24, 1);
1207 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1208 rt = extract32(insn, 0, 5);
1209
1210 tcg_cmp = tcg_temp_new_i64();
1211 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1212 label_match = gen_new_label();
1213 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1214 tcg_cmp, 0, label_match);
1215 tcg_temp_free_i64(tcg_cmp);
1216 gen_goto_tb(s, 0, s->pc);
1217 gen_set_label(label_match);
1218 gen_goto_tb(s, 1, addr);
1219}
1220
1221
1222
1223
1224
1225
1226
1227static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1228{
1229 unsigned int cond;
1230 uint64_t addr;
1231
1232 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1233 unallocated_encoding(s);
1234 return;
1235 }
1236 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1237 cond = extract32(insn, 0, 4);
1238
1239 if (cond < 0x0e) {
1240
1241 TCGLabel *label_match = gen_new_label();
1242 arm_gen_test_cc(cond, label_match);
1243 gen_goto_tb(s, 0, s->pc);
1244 gen_set_label(label_match);
1245 gen_goto_tb(s, 1, addr);
1246 } else {
1247
1248 gen_goto_tb(s, 0, addr);
1249 }
1250}
1251
1252
1253static void handle_hint(DisasContext *s, uint32_t insn,
1254 unsigned int op1, unsigned int op2, unsigned int crm)
1255{
1256 unsigned int selector = crm << 3 | op2;
1257
1258 if (op1 != 3) {
1259 unallocated_encoding(s);
1260 return;
1261 }
1262
1263 switch (selector) {
1264 case 0:
1265 return;
1266 case 3:
1267 s->is_jmp = DISAS_WFI;
1268 return;
1269 case 1:
1270 s->is_jmp = DISAS_YIELD;
1271 return;
1272 case 2:
1273 s->is_jmp = DISAS_WFE;
1274 return;
1275 case 4:
1276 gen_helper_sev(cpu_env);
1277 return;
1278 case 5:
1279 gen_helper_sevl(cpu_env);
1280 return;
1281 default:
1282
1283 return;
1284 }
1285}
1286
1287static void gen_clrex(DisasContext *s, uint32_t insn)
1288{
1289 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1290 gen_helper_sev(cpu_env);
1291}
1292
1293
1294static void handle_sync(DisasContext *s, uint32_t insn,
1295 unsigned int op1, unsigned int op2, unsigned int crm)
1296{
1297 if (op1 != 3) {
1298 unallocated_encoding(s);
1299 return;
1300 }
1301
1302 switch (op2) {
1303 case 2:
1304 gen_clrex(s, insn);
1305 return;
1306 case 4:
1307 case 5:
1308
1309 return;
1310 case 6:
1311
1312
1313
1314
1315 s->is_jmp = DISAS_UPDATE;
1316 return;
1317 default:
1318 unallocated_encoding(s);
1319 return;
1320 }
1321}
1322
1323
1324static void handle_msr_i(DisasContext *s, uint32_t insn,
1325 unsigned int op1, unsigned int op2, unsigned int crm)
1326{
1327 int op = op1 << 3 | op2;
1328 switch (op) {
1329 case 0x05:
1330 if (s->current_el == 0) {
1331 unallocated_encoding(s);
1332 return;
1333 }
1334
1335 case 0x1e:
1336 case 0x1f:
1337 {
1338 TCGv_i32 tcg_imm = tcg_const_i32(crm);
1339 TCGv_i32 tcg_op = tcg_const_i32(op);
1340 gen_a64_set_pc_im(s->pc - 4);
1341 gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1342 tcg_temp_free_i32(tcg_imm);
1343 tcg_temp_free_i32(tcg_op);
1344 s->is_jmp = DISAS_UPDATE;
1345 break;
1346 }
1347 default:
1348 unallocated_encoding(s);
1349 return;
1350 }
1351}
1352
1353static void gen_get_nzcv(TCGv_i64 tcg_rt)
1354{
1355 TCGv_i32 tmp = tcg_temp_new_i32();
1356 TCGv_i32 nzcv = tcg_temp_new_i32();
1357
1358
1359 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1360
1361 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1362 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1363
1364 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1365
1366 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1367 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1368
1369 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1370
1371 tcg_temp_free_i32(nzcv);
1372 tcg_temp_free_i32(tmp);
1373}
1374
1375static void gen_set_nzcv(TCGv_i64 tcg_rt)
1376
1377{
1378 TCGv_i32 nzcv = tcg_temp_new_i32();
1379
1380
1381 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1382
1383
1384 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1385
1386 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1387 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1388
1389 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1390 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1391
1392 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1393 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1394 tcg_temp_free_i32(nzcv);
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1405 unsigned int op0, unsigned int op1, unsigned int op2,
1406 unsigned int crn, unsigned int crm, unsigned int rt)
1407{
1408 const ARMCPRegInfo *ri;
1409 TCGv_i64 tcg_rt;
1410
1411 ri = get_arm_cp_reginfo(s->cp_regs,
1412 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1413 crn, crm, op0, op1, op2));
1414
1415 if (!ri) {
1416
1417
1418
1419 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1420 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1421 isread ? "read" : "write", op0, op1, crn, crm, op2);
1422 unallocated_encoding(s);
1423 return;
1424 }
1425
1426
1427 if (!cp_access_ok(s->current_el, ri, isread)) {
1428 unallocated_encoding(s);
1429 return;
1430 }
1431
1432 if (ri->accessfn) {
1433
1434
1435
1436 TCGv_ptr tmpptr;
1437 TCGv_i32 tcg_syn, tcg_isread;
1438 uint32_t syndrome;
1439
1440 gen_a64_set_pc_im(s->pc - 4);
1441 tmpptr = tcg_const_ptr(ri);
1442 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1443 tcg_syn = tcg_const_i32(syndrome);
1444 tcg_isread = tcg_const_i32(isread);
1445 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1446 tcg_temp_free_ptr(tmpptr);
1447 tcg_temp_free_i32(tcg_syn);
1448 tcg_temp_free_i32(tcg_isread);
1449 }
1450
1451
1452 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1453 case ARM_CP_NOP:
1454 return;
1455 case ARM_CP_NZCV:
1456 tcg_rt = cpu_reg(s, rt);
1457 if (isread) {
1458 gen_get_nzcv(tcg_rt);
1459 } else {
1460 gen_set_nzcv(tcg_rt);
1461 }
1462 return;
1463 case ARM_CP_CURRENTEL:
1464
1465
1466
1467 tcg_rt = cpu_reg(s, rt);
1468 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1469 return;
1470 case ARM_CP_DC_ZVA:
1471
1472 tcg_rt = cpu_reg(s, rt);
1473 gen_helper_dc_zva(cpu_env, tcg_rt);
1474 return;
1475 default:
1476 break;
1477 }
1478
1479 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1480 gen_io_start();
1481 }
1482
1483 tcg_rt = cpu_reg(s, rt);
1484
1485 if (isread) {
1486 if (ri->type & ARM_CP_CONST) {
1487 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1488 } else if (ri->readfn) {
1489 TCGv_ptr tmpptr;
1490 tmpptr = tcg_const_ptr(ri);
1491 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1492 tcg_temp_free_ptr(tmpptr);
1493 } else {
1494 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1495 }
1496 } else {
1497 if (ri->type & ARM_CP_CONST) {
1498
1499 return;
1500 } else if (ri->writefn) {
1501 TCGv_ptr tmpptr;
1502 tmpptr = tcg_const_ptr(ri);
1503 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1504 tcg_temp_free_ptr(tmpptr);
1505 } else {
1506 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1507 }
1508 }
1509
1510 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1511
1512 gen_io_end();
1513 s->is_jmp = DISAS_UPDATE;
1514 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1515
1516
1517
1518
1519 s->is_jmp = DISAS_UPDATE;
1520 }
1521}
1522
1523
1524
1525
1526
1527
1528
1529static void disas_system(DisasContext *s, uint32_t insn)
1530{
1531 unsigned int l, op0, op1, crn, crm, op2, rt;
1532 l = extract32(insn, 21, 1);
1533 op0 = extract32(insn, 19, 2);
1534 op1 = extract32(insn, 16, 3);
1535 crn = extract32(insn, 12, 4);
1536 crm = extract32(insn, 8, 4);
1537 op2 = extract32(insn, 5, 3);
1538 rt = extract32(insn, 0, 5);
1539
1540 if (op0 == 0) {
1541 if (l || rt != 31) {
1542 unallocated_encoding(s);
1543 return;
1544 }
1545 switch (crn) {
1546 case 2:
1547 handle_hint(s, insn, op1, op2, crm);
1548 break;
1549 case 3:
1550 handle_sync(s, insn, op1, op2, crm);
1551 break;
1552 case 4:
1553 handle_msr_i(s, insn, op1, op2, crm);
1554 break;
1555 default:
1556 unallocated_encoding(s);
1557 break;
1558 }
1559 return;
1560 }
1561 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571static void disas_exc(DisasContext *s, uint32_t insn)
1572{
1573 int opc = extract32(insn, 21, 3);
1574 int op2_ll = extract32(insn, 0, 5);
1575 int imm16 = extract32(insn, 5, 16);
1576 TCGv_i32 tmp;
1577
1578 switch (opc) {
1579 case 0:
1580
1581
1582
1583
1584
1585 switch (op2_ll) {
1586 case 1:
1587 gen_ss_advance(s);
1588 gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1589 default_exception_el(s));
1590 break;
1591 case 2:
1592 if (s->current_el == 0) {
1593 unallocated_encoding(s);
1594 break;
1595 }
1596
1597
1598
1599 gen_a64_set_pc_im(s->pc - 4);
1600 gen_helper_pre_hvc(cpu_env);
1601 gen_ss_advance(s);
1602 gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1603 break;
1604 case 3:
1605 if (s->current_el == 0) {
1606 unallocated_encoding(s);
1607 break;
1608 }
1609 gen_a64_set_pc_im(s->pc - 4);
1610 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1611 gen_helper_pre_smc(cpu_env, tmp);
1612 tcg_temp_free_i32(tmp);
1613 gen_ss_advance(s);
1614 gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1615 break;
1616 default:
1617 unallocated_encoding(s);
1618 break;
1619 }
1620 break;
1621 case 1:
1622 if (op2_ll != 0) {
1623 unallocated_encoding(s);
1624 break;
1625 }
1626
1627 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
1628 default_exception_el(s));
1629 break;
1630 case 2:
1631 if (op2_ll != 0) {
1632 unallocated_encoding(s);
1633 break;
1634 }
1635
1636
1637
1638
1639
1640
1641 if (semihosting_enabled() && imm16 == 0xf000) {
1642#ifndef CONFIG_USER_ONLY
1643
1644
1645
1646
1647 if (s->current_el == 0) {
1648 unsupported_encoding(s, insn);
1649 break;
1650 }
1651#endif
1652 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1653 } else {
1654 unsupported_encoding(s, insn);
1655 }
1656 break;
1657 case 5:
1658 if (op2_ll < 1 || op2_ll > 3) {
1659 unallocated_encoding(s);
1660 break;
1661 }
1662
1663 unsupported_encoding(s, insn);
1664 break;
1665 default:
1666 unallocated_encoding(s);
1667 break;
1668 }
1669}
1670
1671
1672
1673
1674
1675
1676
1677static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1678{
1679 unsigned int opc, op2, op3, rn, op4;
1680
1681 opc = extract32(insn, 21, 4);
1682 op2 = extract32(insn, 16, 5);
1683 op3 = extract32(insn, 10, 6);
1684 rn = extract32(insn, 5, 5);
1685 op4 = extract32(insn, 0, 5);
1686
1687 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1688 unallocated_encoding(s);
1689 return;
1690 }
1691
1692 switch (opc) {
1693 case 0:
1694 case 2:
1695 tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
1696 break;
1697 case 1:
1698 tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
1699 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1700 break;
1701 case 4:
1702 if (s->current_el == 0) {
1703 unallocated_encoding(s);
1704 return;
1705 }
1706 gen_helper_exception_return(cpu_env);
1707 s->is_jmp = DISAS_JUMP;
1708 return;
1709 case 5:
1710 if (rn != 0x1f) {
1711 unallocated_encoding(s);
1712 } else {
1713 unsupported_encoding(s, insn);
1714 }
1715 return;
1716 default:
1717 unallocated_encoding(s);
1718 return;
1719 }
1720
1721 s->is_jmp = DISAS_JUMP;
1722}
1723
1724
1725static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1726{
1727 switch (extract32(insn, 25, 7)) {
1728 case 0x0a: case 0x0b:
1729 case 0x4a: case 0x4b:
1730 disas_uncond_b_imm(s, insn);
1731 break;
1732 case 0x1a: case 0x5a:
1733 disas_comp_b_imm(s, insn);
1734 break;
1735 case 0x1b: case 0x5b:
1736 disas_test_b_imm(s, insn);
1737 break;
1738 case 0x2a:
1739 disas_cond_b_imm(s, insn);
1740 break;
1741 case 0x6a:
1742 if (insn & (1 << 24)) {
1743 disas_system(s, insn);
1744 } else {
1745 disas_exc(s, insn);
1746 }
1747 break;
1748 case 0x6b:
1749 disas_uncond_b_reg(s, insn);
1750 break;
1751 default:
1752 unallocated_encoding(s);
1753 break;
1754 }
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
1769 TCGv_i64 addr, int size, bool is_pair)
1770{
1771 TCGv_i64 tmp = tcg_temp_new_i64();
1772 TCGMemOp memop = s->be_data + size;
1773
1774 g_assert(size <= 3);
1775 tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
1776
1777 if (is_pair) {
1778 TCGv_i64 addr2 = tcg_temp_new_i64();
1779 TCGv_i64 hitmp = tcg_temp_new_i64();
1780
1781 g_assert(size >= 2);
1782 tcg_gen_addi_i64(addr2, addr, 1 << size);
1783 tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop);
1784 tcg_temp_free_i64(addr2);
1785 tcg_gen_mov_i64(cpu_exclusive_high, hitmp);
1786 tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp);
1787 tcg_temp_free_i64(hitmp);
1788 }
1789
1790 tcg_gen_mov_i64(cpu_exclusive_val, tmp);
1791 tcg_gen_mov_i64(cpu_reg(s, rt), tmp);
1792
1793 tcg_temp_free_i64(tmp);
1794 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
1795}
1796
1797#ifdef CONFIG_USER_ONLY
1798static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1799 TCGv_i64 addr, int size, int is_pair)
1800{
1801 tcg_gen_mov_i64(cpu_exclusive_test, addr);
1802 tcg_gen_movi_i32(cpu_exclusive_info,
1803 size | is_pair << 2 | (rd << 4) | (rt << 9) | (rt2 << 14));
1804 gen_exception_internal_insn(s, 4, EXCP_STREX);
1805}
1806#else
1807static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1808 TCGv_i64 inaddr, int size, int is_pair)
1809{
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822 TCGLabel *fail_label = gen_new_label();
1823 TCGLabel *done_label = gen_new_label();
1824 TCGv_i64 addr = tcg_temp_local_new_i64();
1825 TCGv_i64 tmp;
1826
1827
1828
1829
1830 tcg_gen_mov_i64(addr, inaddr);
1831 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
1832
1833 tmp = tcg_temp_new_i64();
1834 tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), s->be_data + size);
1835 tcg_gen_brcond_i64(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
1836 tcg_temp_free_i64(tmp);
1837
1838 if (is_pair) {
1839 TCGv_i64 addrhi = tcg_temp_new_i64();
1840 TCGv_i64 tmphi = tcg_temp_new_i64();
1841
1842 tcg_gen_addi_i64(addrhi, addr, 1 << size);
1843 tcg_gen_qemu_ld_i64(tmphi, addrhi, get_mem_index(s),
1844 s->be_data + size);
1845 tcg_gen_brcond_i64(TCG_COND_NE, tmphi, cpu_exclusive_high, fail_label);
1846
1847 tcg_temp_free_i64(tmphi);
1848 tcg_temp_free_i64(addrhi);
1849 }
1850
1851
1852 tcg_gen_qemu_st_i64(cpu_reg(s, rt), addr, get_mem_index(s),
1853 s->be_data + size);
1854 if (is_pair) {
1855 TCGv_i64 addrhi = tcg_temp_new_i64();
1856
1857 tcg_gen_addi_i64(addrhi, addr, 1 << size);
1858 tcg_gen_qemu_st_i64(cpu_reg(s, rt2), addrhi,
1859 get_mem_index(s), s->be_data + size);
1860 tcg_temp_free_i64(addrhi);
1861 }
1862
1863 tcg_temp_free_i64(addr);
1864
1865 tcg_gen_movi_i64(cpu_reg(s, rd), 0);
1866 tcg_gen_br(done_label);
1867 gen_set_label(fail_label);
1868 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
1869 gen_set_label(done_label);
1870 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1871
1872 gen_helper_sev(cpu_env);
1873}
1874#endif
1875
1876
1877
1878
1879static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
1880{
1881 int opc0 = extract32(opc, 0, 1);
1882 int regsize;
1883
1884 if (is_signed) {
1885 regsize = opc0 ? 32 : 64;
1886 } else {
1887 regsize = size == 3 ? 64 : 32;
1888 }
1889 return regsize == 64;
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static void disas_ldst_excl(DisasContext *s, uint32_t insn)
1906{
1907 int rt = extract32(insn, 0, 5);
1908 int rn = extract32(insn, 5, 5);
1909 int rt2 = extract32(insn, 10, 5);
1910 int is_lasr = extract32(insn, 15, 1);
1911 int rs = extract32(insn, 16, 5);
1912 int is_pair = extract32(insn, 21, 1);
1913 int is_store = !extract32(insn, 22, 1);
1914 int is_excl = !extract32(insn, 23, 1);
1915 int size = extract32(insn, 30, 2);
1916 TCGv_i64 tcg_addr;
1917
1918 if ((!is_excl && !is_pair && !is_lasr) ||
1919 (!is_excl && is_pair) ||
1920 (is_pair && size < 2)) {
1921 unallocated_encoding(s);
1922 return;
1923 }
1924
1925 if (rn == 31) {
1926 gen_check_sp_alignment(s);
1927 }
1928 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1929
1930
1931
1932
1933
1934 if (is_excl) {
1935 if (!is_store) {
1936 s->is_ldex = true;
1937 gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
1938 } else {
1939 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
1940 }
1941 } else {
1942 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1943 bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
1944
1945
1946 if (is_store) {
1947 do_gpr_st(s, tcg_rt, tcg_addr, size,
1948 true, rt, iss_sf, is_lasr);
1949 } else {
1950 do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
1951 true, rt, iss_sf, is_lasr);
1952 }
1953 }
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969static void disas_ld_lit(DisasContext *s, uint32_t insn)
1970{
1971 int rt = extract32(insn, 0, 5);
1972 int64_t imm = sextract32(insn, 5, 19) << 2;
1973 bool is_vector = extract32(insn, 26, 1);
1974 int opc = extract32(insn, 30, 2);
1975 bool is_signed = false;
1976 int size = 2;
1977 TCGv_i64 tcg_rt, tcg_addr;
1978
1979 if (is_vector) {
1980 if (opc == 3) {
1981 unallocated_encoding(s);
1982 return;
1983 }
1984 size = 2 + opc;
1985 if (!fp_access_check(s)) {
1986 return;
1987 }
1988 } else {
1989 if (opc == 3) {
1990
1991 return;
1992 }
1993 size = 2 + extract32(opc, 0, 1);
1994 is_signed = extract32(opc, 1, 1);
1995 }
1996
1997 tcg_rt = cpu_reg(s, rt);
1998
1999 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2000 if (is_vector) {
2001 do_fp_ld(s, rt, tcg_addr, size);
2002 } else {
2003
2004 bool iss_sf = opc == 0 ? 32 : 64;
2005
2006 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2007 true, rt, iss_sf, false);
2008 }
2009 tcg_temp_free_i64(tcg_addr);
2010}
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2041{
2042 int rt = extract32(insn, 0, 5);
2043 int rn = extract32(insn, 5, 5);
2044 int rt2 = extract32(insn, 10, 5);
2045 uint64_t offset = sextract64(insn, 15, 7);
2046 int index = extract32(insn, 23, 2);
2047 bool is_vector = extract32(insn, 26, 1);
2048 bool is_load = extract32(insn, 22, 1);
2049 int opc = extract32(insn, 30, 2);
2050
2051 bool is_signed = false;
2052 bool postindex = false;
2053 bool wback = false;
2054
2055 TCGv_i64 tcg_addr;
2056 int size;
2057
2058 if (opc == 3) {
2059 unallocated_encoding(s);
2060 return;
2061 }
2062
2063 if (is_vector) {
2064 size = 2 + opc;
2065 } else {
2066 size = 2 + extract32(opc, 1, 1);
2067 is_signed = extract32(opc, 0, 1);
2068 if (!is_load && is_signed) {
2069 unallocated_encoding(s);
2070 return;
2071 }
2072 }
2073
2074 switch (index) {
2075 case 1:
2076 postindex = true;
2077 wback = true;
2078 break;
2079 case 0:
2080
2081
2082
2083
2084
2085 if (is_signed) {
2086
2087 unallocated_encoding(s);
2088 return;
2089 }
2090 postindex = false;
2091 break;
2092 case 2:
2093 postindex = false;
2094 break;
2095 case 3:
2096 postindex = false;
2097 wback = true;
2098 break;
2099 }
2100
2101 if (is_vector && !fp_access_check(s)) {
2102 return;
2103 }
2104
2105 offset <<= size;
2106
2107 if (rn == 31) {
2108 gen_check_sp_alignment(s);
2109 }
2110
2111 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2112
2113 if (!postindex) {
2114 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2115 }
2116
2117 if (is_vector) {
2118 if (is_load) {
2119 do_fp_ld(s, rt, tcg_addr, size);
2120 } else {
2121 do_fp_st(s, rt, tcg_addr, size);
2122 }
2123 } else {
2124 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2125 if (is_load) {
2126 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2127 false, 0, false, false);
2128 } else {
2129 do_gpr_st(s, tcg_rt, tcg_addr, size,
2130 false, 0, false, false);
2131 }
2132 }
2133 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2134 if (is_vector) {
2135 if (is_load) {
2136 do_fp_ld(s, rt2, tcg_addr, size);
2137 } else {
2138 do_fp_st(s, rt2, tcg_addr, size);
2139 }
2140 } else {
2141 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2142 if (is_load) {
2143 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2144 false, 0, false, false);
2145 } else {
2146 do_gpr_st(s, tcg_rt2, tcg_addr, size,
2147 false, 0, false, false);
2148 }
2149 }
2150
2151 if (wback) {
2152 if (postindex) {
2153 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2154 } else {
2155 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2156 }
2157 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2158 }
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
2178{
2179 int rt = extract32(insn, 0, 5);
2180 int rn = extract32(insn, 5, 5);
2181 int imm9 = sextract32(insn, 12, 9);
2182 int opc = extract32(insn, 22, 2);
2183 int size = extract32(insn, 30, 2);
2184 int idx = extract32(insn, 10, 2);
2185 bool is_signed = false;
2186 bool is_store = false;
2187 bool is_extended = false;
2188 bool is_unpriv = (idx == 2);
2189 bool is_vector = extract32(insn, 26, 1);
2190 bool iss_valid = !is_vector;
2191 bool post_index;
2192 bool writeback;
2193
2194 TCGv_i64 tcg_addr;
2195
2196 if (is_vector) {
2197 size |= (opc & 2) << 1;
2198 if (size > 4 || is_unpriv) {
2199 unallocated_encoding(s);
2200 return;
2201 }
2202 is_store = ((opc & 1) == 0);
2203 if (!fp_access_check(s)) {
2204 return;
2205 }
2206 } else {
2207 if (size == 3 && opc == 2) {
2208
2209 if (is_unpriv) {
2210 unallocated_encoding(s);
2211 return;
2212 }
2213 return;
2214 }
2215 if (opc == 3 && size > 1) {
2216 unallocated_encoding(s);
2217 return;
2218 }
2219 is_store = (opc == 0);
2220 is_signed = opc & (1<<1);
2221 is_extended = (size < 3) && (opc & 1);
2222 }
2223
2224 switch (idx) {
2225 case 0:
2226 case 2:
2227 post_index = false;
2228 writeback = false;
2229 break;
2230 case 1:
2231 post_index = true;
2232 writeback = true;
2233 break;
2234 case 3:
2235 post_index = false;
2236 writeback = true;
2237 break;
2238 }
2239
2240 if (rn == 31) {
2241 gen_check_sp_alignment(s);
2242 }
2243 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2244
2245 if (!post_index) {
2246 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2247 }
2248
2249 if (is_vector) {
2250 if (is_store) {
2251 do_fp_st(s, rt, tcg_addr, size);
2252 } else {
2253 do_fp_ld(s, rt, tcg_addr, size);
2254 }
2255 } else {
2256 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2257 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2258 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2259
2260 if (is_store) {
2261 do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2262 iss_valid, rt, iss_sf, false);
2263 } else {
2264 do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2265 is_signed, is_extended, memidx,
2266 iss_valid, rt, iss_sf, false);
2267 }
2268 }
2269
2270 if (writeback) {
2271 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2272 if (post_index) {
2273 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2274 }
2275 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2276 }
2277}
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
2301{
2302 int rt = extract32(insn, 0, 5);
2303 int rn = extract32(insn, 5, 5);
2304 int shift = extract32(insn, 12, 1);
2305 int rm = extract32(insn, 16, 5);
2306 int opc = extract32(insn, 22, 2);
2307 int opt = extract32(insn, 13, 3);
2308 int size = extract32(insn, 30, 2);
2309 bool is_signed = false;
2310 bool is_store = false;
2311 bool is_extended = false;
2312 bool is_vector = extract32(insn, 26, 1);
2313
2314 TCGv_i64 tcg_rm;
2315 TCGv_i64 tcg_addr;
2316
2317 if (extract32(opt, 1, 1) == 0) {
2318 unallocated_encoding(s);
2319 return;
2320 }
2321
2322 if (is_vector) {
2323 size |= (opc & 2) << 1;
2324 if (size > 4) {
2325 unallocated_encoding(s);
2326 return;
2327 }
2328 is_store = !extract32(opc, 0, 1);
2329 if (!fp_access_check(s)) {
2330 return;
2331 }
2332 } else {
2333 if (size == 3 && opc == 2) {
2334
2335 return;
2336 }
2337 if (opc == 3 && size > 1) {
2338 unallocated_encoding(s);
2339 return;
2340 }
2341 is_store = (opc == 0);
2342 is_signed = extract32(opc, 1, 1);
2343 is_extended = (size < 3) && extract32(opc, 0, 1);
2344 }
2345
2346 if (rn == 31) {
2347 gen_check_sp_alignment(s);
2348 }
2349 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2350
2351 tcg_rm = read_cpu_reg(s, rm, 1);
2352 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2353
2354 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2355
2356 if (is_vector) {
2357 if (is_store) {
2358 do_fp_st(s, rt, tcg_addr, size);
2359 } else {
2360 do_fp_ld(s, rt, tcg_addr, size);
2361 }
2362 } else {
2363 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2364 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2365 if (is_store) {
2366 do_gpr_st(s, tcg_rt, tcg_addr, size,
2367 true, rt, iss_sf, false);
2368 } else {
2369 do_gpr_ld(s, tcg_rt, tcg_addr, size,
2370 is_signed, is_extended,
2371 true, rt, iss_sf, false);
2372 }
2373 }
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
2394{
2395 int rt = extract32(insn, 0, 5);
2396 int rn = extract32(insn, 5, 5);
2397 unsigned int imm12 = extract32(insn, 10, 12);
2398 bool is_vector = extract32(insn, 26, 1);
2399 int size = extract32(insn, 30, 2);
2400 int opc = extract32(insn, 22, 2);
2401 unsigned int offset;
2402
2403 TCGv_i64 tcg_addr;
2404
2405 bool is_store;
2406 bool is_signed = false;
2407 bool is_extended = false;
2408
2409 if (is_vector) {
2410 size |= (opc & 2) << 1;
2411 if (size > 4) {
2412 unallocated_encoding(s);
2413 return;
2414 }
2415 is_store = !extract32(opc, 0, 1);
2416 if (!fp_access_check(s)) {
2417 return;
2418 }
2419 } else {
2420 if (size == 3 && opc == 2) {
2421
2422 return;
2423 }
2424 if (opc == 3 && size > 1) {
2425 unallocated_encoding(s);
2426 return;
2427 }
2428 is_store = (opc == 0);
2429 is_signed = extract32(opc, 1, 1);
2430 is_extended = (size < 3) && extract32(opc, 0, 1);
2431 }
2432
2433 if (rn == 31) {
2434 gen_check_sp_alignment(s);
2435 }
2436 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2437 offset = imm12 << size;
2438 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2439
2440 if (is_vector) {
2441 if (is_store) {
2442 do_fp_st(s, rt, tcg_addr, size);
2443 } else {
2444 do_fp_ld(s, rt, tcg_addr, size);
2445 }
2446 } else {
2447 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2448 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2449 if (is_store) {
2450 do_gpr_st(s, tcg_rt, tcg_addr, size,
2451 true, rt, iss_sf, false);
2452 } else {
2453 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2454 true, rt, iss_sf, false);
2455 }
2456 }
2457}
2458
2459
2460static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2461{
2462 switch (extract32(insn, 24, 2)) {
2463 case 0:
2464 if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
2465 disas_ldst_reg_roffset(s, insn);
2466 } else {
2467
2468
2469
2470
2471 disas_ldst_reg_imm9(s, insn);
2472 }
2473 break;
2474 case 1:
2475 disas_ldst_reg_unsigned_imm(s, insn);
2476 break;
2477 default:
2478 unallocated_encoding(s);
2479 break;
2480 }
2481}
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
2502{
2503 int rt = extract32(insn, 0, 5);
2504 int rn = extract32(insn, 5, 5);
2505 int size = extract32(insn, 10, 2);
2506 int opcode = extract32(insn, 12, 4);
2507 bool is_store = !extract32(insn, 22, 1);
2508 bool is_postidx = extract32(insn, 23, 1);
2509 bool is_q = extract32(insn, 30, 1);
2510 TCGv_i64 tcg_addr, tcg_rn;
2511
2512 int ebytes = 1 << size;
2513 int elements = (is_q ? 128 : 64) / (8 << size);
2514 int rpt;
2515 int selem;
2516 int r;
2517
2518 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
2519 unallocated_encoding(s);
2520 return;
2521 }
2522
2523
2524 switch (opcode) {
2525 case 0x0:
2526 rpt = 1;
2527 selem = 4;
2528 break;
2529 case 0x2:
2530 rpt = 4;
2531 selem = 1;
2532 break;
2533 case 0x4:
2534 rpt = 1;
2535 selem = 3;
2536 break;
2537 case 0x6:
2538 rpt = 3;
2539 selem = 1;
2540 break;
2541 case 0x7:
2542 rpt = 1;
2543 selem = 1;
2544 break;
2545 case 0x8:
2546 rpt = 1;
2547 selem = 2;
2548 break;
2549 case 0xa:
2550 rpt = 2;
2551 selem = 1;
2552 break;
2553 default:
2554 unallocated_encoding(s);
2555 return;
2556 }
2557
2558 if (size == 3 && !is_q && selem != 1) {
2559
2560 unallocated_encoding(s);
2561 return;
2562 }
2563
2564 if (!fp_access_check(s)) {
2565 return;
2566 }
2567
2568 if (rn == 31) {
2569 gen_check_sp_alignment(s);
2570 }
2571
2572 tcg_rn = cpu_reg_sp(s, rn);
2573 tcg_addr = tcg_temp_new_i64();
2574 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2575
2576 for (r = 0; r < rpt; r++) {
2577 int e;
2578 for (e = 0; e < elements; e++) {
2579 int tt = (rt + r) % 32;
2580 int xs;
2581 for (xs = 0; xs < selem; xs++) {
2582 if (is_store) {
2583 do_vec_st(s, tt, e, tcg_addr, size);
2584 } else {
2585 do_vec_ld(s, tt, e, tcg_addr, size);
2586
2587
2588
2589
2590
2591
2592
2593
2594 if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) {
2595 clear_vec_high(s, tt);
2596 }
2597 }
2598 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2599 tt = (tt + 1) % 32;
2600 }
2601 }
2602 }
2603
2604 if (is_postidx) {
2605 int rm = extract32(insn, 16, 5);
2606 if (rm == 31) {
2607 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2608 } else {
2609 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2610 }
2611 }
2612 tcg_temp_free_i64(tcg_addr);
2613}
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
2638{
2639 int rt = extract32(insn, 0, 5);
2640 int rn = extract32(insn, 5, 5);
2641 int size = extract32(insn, 10, 2);
2642 int S = extract32(insn, 12, 1);
2643 int opc = extract32(insn, 13, 3);
2644 int R = extract32(insn, 21, 1);
2645 int is_load = extract32(insn, 22, 1);
2646 int is_postidx = extract32(insn, 23, 1);
2647 int is_q = extract32(insn, 30, 1);
2648
2649 int scale = extract32(opc, 1, 2);
2650 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
2651 bool replicate = false;
2652 int index = is_q << 3 | S << 2 | size;
2653 int ebytes, xs;
2654 TCGv_i64 tcg_addr, tcg_rn;
2655
2656 switch (scale) {
2657 case 3:
2658 if (!is_load || S) {
2659 unallocated_encoding(s);
2660 return;
2661 }
2662 scale = size;
2663 replicate = true;
2664 break;
2665 case 0:
2666 break;
2667 case 1:
2668 if (extract32(size, 0, 1)) {
2669 unallocated_encoding(s);
2670 return;
2671 }
2672 index >>= 1;
2673 break;
2674 case 2:
2675 if (extract32(size, 1, 1)) {
2676 unallocated_encoding(s);
2677 return;
2678 }
2679 if (!extract32(size, 0, 1)) {
2680 index >>= 2;
2681 } else {
2682 if (S) {
2683 unallocated_encoding(s);
2684 return;
2685 }
2686 index >>= 3;
2687 scale = 3;
2688 }
2689 break;
2690 default:
2691 g_assert_not_reached();
2692 }
2693
2694 if (!fp_access_check(s)) {
2695 return;
2696 }
2697
2698 ebytes = 1 << scale;
2699
2700 if (rn == 31) {
2701 gen_check_sp_alignment(s);
2702 }
2703
2704 tcg_rn = cpu_reg_sp(s, rn);
2705 tcg_addr = tcg_temp_new_i64();
2706 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2707
2708 for (xs = 0; xs < selem; xs++) {
2709 if (replicate) {
2710
2711 uint64_t mulconst;
2712 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2713
2714 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
2715 get_mem_index(s), s->be_data + scale);
2716 switch (scale) {
2717 case 0:
2718 mulconst = 0x0101010101010101ULL;
2719 break;
2720 case 1:
2721 mulconst = 0x0001000100010001ULL;
2722 break;
2723 case 2:
2724 mulconst = 0x0000000100000001ULL;
2725 break;
2726 case 3:
2727 mulconst = 0;
2728 break;
2729 default:
2730 g_assert_not_reached();
2731 }
2732 if (mulconst) {
2733 tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
2734 }
2735 write_vec_element(s, tcg_tmp, rt, 0, MO_64);
2736 if (is_q) {
2737 write_vec_element(s, tcg_tmp, rt, 1, MO_64);
2738 } else {
2739 clear_vec_high(s, rt);
2740 }
2741 tcg_temp_free_i64(tcg_tmp);
2742 } else {
2743
2744 if (is_load) {
2745 do_vec_ld(s, rt, index, tcg_addr, s->be_data + scale);
2746 } else {
2747 do_vec_st(s, rt, index, tcg_addr, s->be_data + scale);
2748 }
2749 }
2750 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2751 rt = (rt + 1) % 32;
2752 }
2753
2754 if (is_postidx) {
2755 int rm = extract32(insn, 16, 5);
2756 if (rm == 31) {
2757 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2758 } else {
2759 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2760 }
2761 }
2762 tcg_temp_free_i64(tcg_addr);
2763}
2764
2765
2766static void disas_ldst(DisasContext *s, uint32_t insn)
2767{
2768 switch (extract32(insn, 24, 6)) {
2769 case 0x08:
2770 disas_ldst_excl(s, insn);
2771 break;
2772 case 0x18: case 0x1c:
2773 disas_ld_lit(s, insn);
2774 break;
2775 case 0x28: case 0x29:
2776 case 0x2c: case 0x2d:
2777 disas_ldst_pair(s, insn);
2778 break;
2779 case 0x38: case 0x39:
2780 case 0x3c: case 0x3d:
2781 disas_ldst_reg(s, insn);
2782 break;
2783 case 0x0c:
2784 disas_ldst_multiple_struct(s, insn);
2785 break;
2786 case 0x0d:
2787 disas_ldst_single_struct(s, insn);
2788 break;
2789 default:
2790 unallocated_encoding(s);
2791 break;
2792 }
2793}
2794
2795
2796
2797
2798
2799
2800
2801static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
2802{
2803 unsigned int page, rd;
2804 uint64_t base;
2805 uint64_t offset;
2806
2807 page = extract32(insn, 31, 1);
2808
2809 offset = sextract64(insn, 5, 19);
2810 offset = offset << 2 | extract32(insn, 29, 2);
2811 rd = extract32(insn, 0, 5);
2812 base = s->pc - 4;
2813
2814 if (page) {
2815
2816 base &= ~0xfff;
2817 offset <<= 12;
2818 }
2819
2820 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
2821}
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
2837{
2838 int rd = extract32(insn, 0, 5);
2839 int rn = extract32(insn, 5, 5);
2840 uint64_t imm = extract32(insn, 10, 12);
2841 int shift = extract32(insn, 22, 2);
2842 bool setflags = extract32(insn, 29, 1);
2843 bool sub_op = extract32(insn, 30, 1);
2844 bool is_64bit = extract32(insn, 31, 1);
2845
2846 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2847 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
2848 TCGv_i64 tcg_result;
2849
2850 switch (shift) {
2851 case 0x0:
2852 break;
2853 case 0x1:
2854 imm <<= 12;
2855 break;
2856 default:
2857 unallocated_encoding(s);
2858 return;
2859 }
2860
2861 tcg_result = tcg_temp_new_i64();
2862 if (!setflags) {
2863 if (sub_op) {
2864 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
2865 } else {
2866 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
2867 }
2868 } else {
2869 TCGv_i64 tcg_imm = tcg_const_i64(imm);
2870 if (sub_op) {
2871 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2872 } else {
2873 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2874 }
2875 tcg_temp_free_i64(tcg_imm);
2876 }
2877
2878 if (is_64bit) {
2879 tcg_gen_mov_i64(tcg_rd, tcg_result);
2880 } else {
2881 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2882 }
2883
2884 tcg_temp_free_i64(tcg_result);
2885}
2886
2887
2888
2889
2890
2891static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
2892{
2893 assert(e != 0);
2894 while (e < 64) {
2895 mask |= mask << e;
2896 e *= 2;
2897 }
2898 return mask;
2899}
2900
2901
2902static inline uint64_t bitmask64(unsigned int length)
2903{
2904 assert(length > 0 && length <= 64);
2905 return ~0ULL >> (64 - length);
2906}
2907
2908
2909
2910
2911
2912
2913static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
2914 unsigned int imms, unsigned int immr)
2915{
2916 uint64_t mask;
2917 unsigned e, levels, s, r;
2918 int len;
2919
2920 assert(immn < 2 && imms < 64 && immr < 64);
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
2945 if (len < 1) {
2946
2947 return false;
2948 }
2949 e = 1 << len;
2950
2951 levels = e - 1;
2952 s = imms & levels;
2953 r = immr & levels;
2954
2955 if (s == levels) {
2956
2957 return false;
2958 }
2959
2960
2961
2962
2963 mask = bitmask64(s + 1);
2964 if (r) {
2965 mask = (mask >> r) | (mask << (e - r));
2966 mask &= bitmask64(e);
2967 }
2968
2969 mask = bitfield_replicate(mask, e);
2970 *result = mask;
2971 return true;
2972}
2973
2974
2975
2976
2977
2978
2979
2980static void disas_logic_imm(DisasContext *s, uint32_t insn)
2981{
2982 unsigned int sf, opc, is_n, immr, imms, rn, rd;
2983 TCGv_i64 tcg_rd, tcg_rn;
2984 uint64_t wmask;
2985 bool is_and = false;
2986
2987 sf = extract32(insn, 31, 1);
2988 opc = extract32(insn, 29, 2);
2989 is_n = extract32(insn, 22, 1);
2990 immr = extract32(insn, 16, 6);
2991 imms = extract32(insn, 10, 6);
2992 rn = extract32(insn, 5, 5);
2993 rd = extract32(insn, 0, 5);
2994
2995 if (!sf && is_n) {
2996 unallocated_encoding(s);
2997 return;
2998 }
2999
3000 if (opc == 0x3) {
3001 tcg_rd = cpu_reg(s, rd);
3002 } else {
3003 tcg_rd = cpu_reg_sp(s, rd);
3004 }
3005 tcg_rn = cpu_reg(s, rn);
3006
3007 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3008
3009 unallocated_encoding(s);
3010 return;
3011 }
3012
3013 if (!sf) {
3014 wmask &= 0xffffffff;
3015 }
3016
3017 switch (opc) {
3018 case 0x3:
3019 case 0x0:
3020 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3021 is_and = true;
3022 break;
3023 case 0x1:
3024 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3025 break;
3026 case 0x2:
3027 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3028 break;
3029 default:
3030 assert(FALSE);
3031 break;
3032 }
3033
3034 if (!sf && !is_and) {
3035
3036
3037
3038 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3039 }
3040
3041 if (opc == 3) {
3042 gen_logic_CC(sf, tcg_rd);
3043 }
3044}
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058static void disas_movw_imm(DisasContext *s, uint32_t insn)
3059{
3060 int rd = extract32(insn, 0, 5);
3061 uint64_t imm = extract32(insn, 5, 16);
3062 int sf = extract32(insn, 31, 1);
3063 int opc = extract32(insn, 29, 2);
3064 int pos = extract32(insn, 21, 2) << 4;
3065 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3066 TCGv_i64 tcg_imm;
3067
3068 if (!sf && (pos >= 32)) {
3069 unallocated_encoding(s);
3070 return;
3071 }
3072
3073 switch (opc) {
3074 case 0:
3075 case 2:
3076 imm <<= pos;
3077 if (opc == 0) {
3078 imm = ~imm;
3079 }
3080 if (!sf) {
3081 imm &= 0xffffffffu;
3082 }
3083 tcg_gen_movi_i64(tcg_rd, imm);
3084 break;
3085 case 3:
3086 tcg_imm = tcg_const_i64(imm);
3087 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3088 tcg_temp_free_i64(tcg_imm);
3089 if (!sf) {
3090 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3091 }
3092 break;
3093 default:
3094 unallocated_encoding(s);
3095 break;
3096 }
3097}
3098
3099
3100
3101
3102
3103
3104
3105static void disas_bitfield(DisasContext *s, uint32_t insn)
3106{
3107 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3108 TCGv_i64 tcg_rd, tcg_tmp;
3109
3110 sf = extract32(insn, 31, 1);
3111 opc = extract32(insn, 29, 2);
3112 n = extract32(insn, 22, 1);
3113 ri = extract32(insn, 16, 6);
3114 si = extract32(insn, 10, 6);
3115 rn = extract32(insn, 5, 5);
3116 rd = extract32(insn, 0, 5);
3117 bitsize = sf ? 64 : 32;
3118
3119 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3120 unallocated_encoding(s);
3121 return;
3122 }
3123
3124 tcg_rd = cpu_reg(s, rd);
3125
3126
3127
3128
3129 tcg_tmp = read_cpu_reg(s, rn, 1);
3130
3131
3132 if (opc == 0) {
3133 if (ri == 0) {
3134 if (si == 7) {
3135 tcg_gen_ext8s_i64(tcg_rd, tcg_tmp);
3136 goto done;
3137 } else if (si == 15) {
3138 tcg_gen_ext16s_i64(tcg_rd, tcg_tmp);
3139 goto done;
3140 } else if (si == 31) {
3141 tcg_gen_ext32s_i64(tcg_rd, tcg_tmp);
3142 goto done;
3143 }
3144 }
3145 if (si == 63 || (si == 31 && ri <= si)) {
3146 if (si == 31) {
3147 tcg_gen_ext32s_i64(tcg_tmp, tcg_tmp);
3148 }
3149 tcg_gen_sari_i64(tcg_rd, tcg_tmp, ri);
3150 goto done;
3151 }
3152 } else if (opc == 2) {
3153 if (ri == 0) {
3154 tcg_gen_andi_i64(tcg_rd, tcg_tmp, bitmask64(si + 1));
3155 return;
3156 }
3157 if (si == 63 || (si == 31 && ri <= si)) {
3158 if (si == 31) {
3159 tcg_gen_ext32u_i64(tcg_tmp, tcg_tmp);
3160 }
3161 tcg_gen_shri_i64(tcg_rd, tcg_tmp, ri);
3162 return;
3163 }
3164 if (si + 1 == ri && si != bitsize - 1) {
3165 int shift = bitsize - 1 - si;
3166 tcg_gen_shli_i64(tcg_rd, tcg_tmp, shift);
3167 goto done;
3168 }
3169 }
3170
3171 if (opc != 1) {
3172 tcg_gen_movi_i64(tcg_rd, 0);
3173 }
3174
3175
3176 if (si >= ri) {
3177
3178 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
3179 pos = 0;
3180 len = (si - ri) + 1;
3181 } else {
3182
3183 pos = bitsize - ri;
3184 len = si + 1;
3185 }
3186
3187 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3188
3189 if (opc == 0) {
3190 tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
3191 tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
3192 }
3193
3194 done:
3195 if (!sf) {
3196 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3197 }
3198}
3199
3200
3201
3202
3203
3204
3205
3206static void disas_extract(DisasContext *s, uint32_t insn)
3207{
3208 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3209
3210 sf = extract32(insn, 31, 1);
3211 n = extract32(insn, 22, 1);
3212 rm = extract32(insn, 16, 5);
3213 imm = extract32(insn, 10, 6);
3214 rn = extract32(insn, 5, 5);
3215 rd = extract32(insn, 0, 5);
3216 op21 = extract32(insn, 29, 2);
3217 op0 = extract32(insn, 21, 1);
3218 bitsize = sf ? 64 : 32;
3219
3220 if (sf != n || op21 || op0 || imm >= bitsize) {
3221 unallocated_encoding(s);
3222 } else {
3223 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3224
3225 tcg_rd = cpu_reg(s, rd);
3226
3227 if (unlikely(imm == 0)) {
3228
3229
3230
3231 if (sf) {
3232 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3233 } else {
3234 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3235 }
3236 } else if (rm == rn) {
3237 tcg_rm = cpu_reg(s, rm);
3238 if (sf) {
3239 tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3240 } else {
3241 TCGv_i32 tmp = tcg_temp_new_i32();
3242 tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3243 tcg_gen_rotri_i32(tmp, tmp, imm);
3244 tcg_gen_extu_i32_i64(tcg_rd, tmp);
3245 tcg_temp_free_i32(tmp);
3246 }
3247 } else {
3248 tcg_rm = read_cpu_reg(s, rm, sf);
3249 tcg_rn = read_cpu_reg(s, rn, sf);
3250 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3251 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3252 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3253 if (!sf) {
3254 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3255 }
3256 }
3257 }
3258}
3259
3260
3261static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3262{
3263 switch (extract32(insn, 23, 6)) {
3264 case 0x20: case 0x21:
3265 disas_pc_rel_adr(s, insn);
3266 break;
3267 case 0x22: case 0x23:
3268 disas_add_sub_imm(s, insn);
3269 break;
3270 case 0x24:
3271 disas_logic_imm(s, insn);
3272 break;
3273 case 0x25:
3274 disas_movw_imm(s, insn);
3275 break;
3276 case 0x26:
3277 disas_bitfield(s, insn);
3278 break;
3279 case 0x27:
3280 disas_extract(s, insn);
3281 break;
3282 default:
3283 unallocated_encoding(s);
3284 break;
3285 }
3286}
3287
3288
3289
3290
3291
3292
3293static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3294 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3295{
3296 switch (shift_type) {
3297 case A64_SHIFT_TYPE_LSL:
3298 tcg_gen_shl_i64(dst, src, shift_amount);
3299 break;
3300 case A64_SHIFT_TYPE_LSR:
3301 tcg_gen_shr_i64(dst, src, shift_amount);
3302 break;
3303 case A64_SHIFT_TYPE_ASR:
3304 if (!sf) {
3305 tcg_gen_ext32s_i64(dst, src);
3306 }
3307 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3308 break;
3309 case A64_SHIFT_TYPE_ROR:
3310 if (sf) {
3311 tcg_gen_rotr_i64(dst, src, shift_amount);
3312 } else {
3313 TCGv_i32 t0, t1;
3314 t0 = tcg_temp_new_i32();
3315 t1 = tcg_temp_new_i32();
3316 tcg_gen_extrl_i64_i32(t0, src);
3317 tcg_gen_extrl_i64_i32(t1, shift_amount);
3318 tcg_gen_rotr_i32(t0, t0, t1);
3319 tcg_gen_extu_i32_i64(dst, t0);
3320 tcg_temp_free_i32(t0);
3321 tcg_temp_free_i32(t1);
3322 }
3323 break;
3324 default:
3325 assert(FALSE);
3326 break;
3327 }
3328
3329 if (!sf) {
3330 tcg_gen_ext32u_i64(dst, dst);
3331 }
3332}
3333
3334
3335
3336
3337
3338static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3339 enum a64_shift_type shift_type, unsigned int shift_i)
3340{
3341 assert(shift_i < (sf ? 64 : 32));
3342
3343 if (shift_i == 0) {
3344 tcg_gen_mov_i64(dst, src);
3345 } else {
3346 TCGv_i64 shift_const;
3347
3348 shift_const = tcg_const_i64(shift_i);
3349 shift_reg(dst, src, sf, shift_type, shift_const);
3350 tcg_temp_free_i64(shift_const);
3351 }
3352}
3353
3354
3355
3356
3357
3358
3359
3360static void disas_logic_reg(DisasContext *s, uint32_t insn)
3361{
3362 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3363 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3364
3365 sf = extract32(insn, 31, 1);
3366 opc = extract32(insn, 29, 2);
3367 shift_type = extract32(insn, 22, 2);
3368 invert = extract32(insn, 21, 1);
3369 rm = extract32(insn, 16, 5);
3370 shift_amount = extract32(insn, 10, 6);
3371 rn = extract32(insn, 5, 5);
3372 rd = extract32(insn, 0, 5);
3373
3374 if (!sf && (shift_amount & (1 << 5))) {
3375 unallocated_encoding(s);
3376 return;
3377 }
3378
3379 tcg_rd = cpu_reg(s, rd);
3380
3381 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3382
3383
3384
3385 tcg_rm = cpu_reg(s, rm);
3386 if (invert) {
3387 tcg_gen_not_i64(tcg_rd, tcg_rm);
3388 if (!sf) {
3389 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3390 }
3391 } else {
3392 if (sf) {
3393 tcg_gen_mov_i64(tcg_rd, tcg_rm);
3394 } else {
3395 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3396 }
3397 }
3398 return;
3399 }
3400
3401 tcg_rm = read_cpu_reg(s, rm, sf);
3402
3403 if (shift_amount) {
3404 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3405 }
3406
3407 tcg_rn = cpu_reg(s, rn);
3408
3409 switch (opc | (invert << 2)) {
3410 case 0:
3411 case 3:
3412 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3413 break;
3414 case 1:
3415 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3416 break;
3417 case 2:
3418 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3419 break;
3420 case 4:
3421 case 7:
3422 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3423 break;
3424 case 5:
3425 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3426 break;
3427 case 6:
3428 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3429 break;
3430 default:
3431 assert(FALSE);
3432 break;
3433 }
3434
3435 if (!sf) {
3436 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3437 }
3438
3439 if (opc == 3) {
3440 gen_logic_CC(sf, tcg_rd);
3441 }
3442}
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3462{
3463 int rd = extract32(insn, 0, 5);
3464 int rn = extract32(insn, 5, 5);
3465 int imm3 = extract32(insn, 10, 3);
3466 int option = extract32(insn, 13, 3);
3467 int rm = extract32(insn, 16, 5);
3468 bool setflags = extract32(insn, 29, 1);
3469 bool sub_op = extract32(insn, 30, 1);
3470 bool sf = extract32(insn, 31, 1);
3471
3472 TCGv_i64 tcg_rm, tcg_rn;
3473 TCGv_i64 tcg_rd;
3474 TCGv_i64 tcg_result;
3475
3476 if (imm3 > 4) {
3477 unallocated_encoding(s);
3478 return;
3479 }
3480
3481
3482 if (!setflags) {
3483 tcg_rd = cpu_reg_sp(s, rd);
3484 } else {
3485 tcg_rd = cpu_reg(s, rd);
3486 }
3487 tcg_rn = read_cpu_reg_sp(s, rn, sf);
3488
3489 tcg_rm = read_cpu_reg(s, rm, sf);
3490 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3491
3492 tcg_result = tcg_temp_new_i64();
3493
3494 if (!setflags) {
3495 if (sub_op) {
3496 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3497 } else {
3498 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3499 }
3500 } else {
3501 if (sub_op) {
3502 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3503 } else {
3504 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3505 }
3506 }
3507
3508 if (sf) {
3509 tcg_gen_mov_i64(tcg_rd, tcg_result);
3510 } else {
3511 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3512 }
3513
3514 tcg_temp_free_i64(tcg_result);
3515}
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
3532{
3533 int rd = extract32(insn, 0, 5);
3534 int rn = extract32(insn, 5, 5);
3535 int imm6 = extract32(insn, 10, 6);
3536 int rm = extract32(insn, 16, 5);
3537 int shift_type = extract32(insn, 22, 2);
3538 bool setflags = extract32(insn, 29, 1);
3539 bool sub_op = extract32(insn, 30, 1);
3540 bool sf = extract32(insn, 31, 1);
3541
3542 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3543 TCGv_i64 tcg_rn, tcg_rm;
3544 TCGv_i64 tcg_result;
3545
3546 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
3547 unallocated_encoding(s);
3548 return;
3549 }
3550
3551 tcg_rn = read_cpu_reg(s, rn, sf);
3552 tcg_rm = read_cpu_reg(s, rm, sf);
3553
3554 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
3555
3556 tcg_result = tcg_temp_new_i64();
3557
3558 if (!setflags) {
3559 if (sub_op) {
3560 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3561 } else {
3562 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3563 }
3564 } else {
3565 if (sub_op) {
3566 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3567 } else {
3568 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3569 }
3570 }
3571
3572 if (sf) {
3573 tcg_gen_mov_i64(tcg_rd, tcg_result);
3574 } else {
3575 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3576 }
3577
3578 tcg_temp_free_i64(tcg_result);
3579}
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
3590{
3591 int rd = extract32(insn, 0, 5);
3592 int rn = extract32(insn, 5, 5);
3593 int ra = extract32(insn, 10, 5);
3594 int rm = extract32(insn, 16, 5);
3595 int op_id = (extract32(insn, 29, 3) << 4) |
3596 (extract32(insn, 21, 3) << 1) |
3597 extract32(insn, 15, 1);
3598 bool sf = extract32(insn, 31, 1);
3599 bool is_sub = extract32(op_id, 0, 1);
3600 bool is_high = extract32(op_id, 2, 1);
3601 bool is_signed = false;
3602 TCGv_i64 tcg_op1;
3603 TCGv_i64 tcg_op2;
3604 TCGv_i64 tcg_tmp;
3605
3606
3607 switch (op_id) {
3608 case 0x42:
3609 case 0x43:
3610 case 0x44:
3611 is_signed = true;
3612 break;
3613 case 0x0:
3614 case 0x1:
3615 case 0x40:
3616 case 0x41:
3617 case 0x4a:
3618 case 0x4b:
3619 case 0x4c:
3620 break;
3621 default:
3622 unallocated_encoding(s);
3623 return;
3624 }
3625
3626 if (is_high) {
3627 TCGv_i64 low_bits = tcg_temp_new_i64();
3628 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3629 TCGv_i64 tcg_rn = cpu_reg(s, rn);
3630 TCGv_i64 tcg_rm = cpu_reg(s, rm);
3631
3632 if (is_signed) {
3633 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3634 } else {
3635 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3636 }
3637
3638 tcg_temp_free_i64(low_bits);
3639 return;
3640 }
3641
3642 tcg_op1 = tcg_temp_new_i64();
3643 tcg_op2 = tcg_temp_new_i64();
3644 tcg_tmp = tcg_temp_new_i64();
3645
3646 if (op_id < 0x42) {
3647 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
3648 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
3649 } else {
3650 if (is_signed) {
3651 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
3652 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
3653 } else {
3654 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
3655 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
3656 }
3657 }
3658
3659 if (ra == 31 && !is_sub) {
3660
3661 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
3662 } else {
3663 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
3664 if (is_sub) {
3665 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3666 } else {
3667 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3668 }
3669 }
3670
3671 if (!sf) {
3672 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
3673 }
3674
3675 tcg_temp_free_i64(tcg_op1);
3676 tcg_temp_free_i64(tcg_op2);
3677 tcg_temp_free_i64(tcg_tmp);
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688static void disas_adc_sbc(DisasContext *s, uint32_t insn)
3689{
3690 unsigned int sf, op, setflags, rm, rn, rd;
3691 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
3692
3693 if (extract32(insn, 10, 6) != 0) {
3694 unallocated_encoding(s);
3695 return;
3696 }
3697
3698 sf = extract32(insn, 31, 1);
3699 op = extract32(insn, 30, 1);
3700 setflags = extract32(insn, 29, 1);
3701 rm = extract32(insn, 16, 5);
3702 rn = extract32(insn, 5, 5);
3703 rd = extract32(insn, 0, 5);
3704
3705 tcg_rd = cpu_reg(s, rd);
3706 tcg_rn = cpu_reg(s, rn);
3707
3708 if (op) {
3709 tcg_y = new_tmp_a64(s);
3710 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
3711 } else {
3712 tcg_y = cpu_reg(s, rm);
3713 }
3714
3715 if (setflags) {
3716 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
3717 } else {
3718 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
3719 }
3720}
3721
3722
3723
3724
3725
3726
3727
3728
3729static void disas_cc(DisasContext *s, uint32_t insn)
3730{
3731 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
3732 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
3733 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
3734 DisasCompare c;
3735
3736 if (!extract32(insn, 29, 1)) {
3737 unallocated_encoding(s);
3738 return;
3739 }
3740 if (insn & (1 << 10 | 1 << 4)) {
3741 unallocated_encoding(s);
3742 return;
3743 }
3744 sf = extract32(insn, 31, 1);
3745 op = extract32(insn, 30, 1);
3746 is_imm = extract32(insn, 11, 1);
3747 y = extract32(insn, 16, 5);
3748 cond = extract32(insn, 12, 4);
3749 rn = extract32(insn, 5, 5);
3750 nzcv = extract32(insn, 0, 4);
3751
3752
3753 tcg_t0 = tcg_temp_new_i32();
3754 arm_test_cc(&c, cond);
3755 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
3756 arm_free_cc(&c);
3757
3758
3759 if (is_imm) {
3760 tcg_y = new_tmp_a64(s);
3761 tcg_gen_movi_i64(tcg_y, y);
3762 } else {
3763 tcg_y = cpu_reg(s, y);
3764 }
3765 tcg_rn = cpu_reg(s, rn);
3766
3767
3768 tcg_tmp = tcg_temp_new_i64();
3769 if (op) {
3770 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3771 } else {
3772 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3773 }
3774 tcg_temp_free_i64(tcg_tmp);
3775
3776
3777
3778
3779
3780
3781 tcg_t1 = tcg_temp_new_i32();
3782 tcg_t2 = tcg_temp_new_i32();
3783 tcg_gen_neg_i32(tcg_t1, tcg_t0);
3784 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
3785
3786 if (nzcv & 8) {
3787 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
3788 } else {
3789 if (TCG_TARGET_HAS_andc_i32) {
3790 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
3791 } else {
3792 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
3793 }
3794 }
3795 if (nzcv & 4) {
3796 if (TCG_TARGET_HAS_andc_i32) {
3797 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
3798 } else {
3799 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
3800 }
3801 } else {
3802 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
3803 }
3804 if (nzcv & 2) {
3805 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
3806 } else {
3807 if (TCG_TARGET_HAS_andc_i32) {
3808 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
3809 } else {
3810 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
3811 }
3812 }
3813 if (nzcv & 1) {
3814 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
3815 } else {
3816 if (TCG_TARGET_HAS_andc_i32) {
3817 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
3818 } else {
3819 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
3820 }
3821 }
3822 tcg_temp_free_i32(tcg_t0);
3823 tcg_temp_free_i32(tcg_t1);
3824 tcg_temp_free_i32(tcg_t2);
3825}
3826
3827
3828
3829
3830
3831
3832
3833static void disas_cond_select(DisasContext *s, uint32_t insn)
3834{
3835 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
3836 TCGv_i64 tcg_rd, zero;
3837 DisasCompare64 c;
3838
3839 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
3840
3841 unallocated_encoding(s);
3842 return;
3843 }
3844 sf = extract32(insn, 31, 1);
3845 else_inv = extract32(insn, 30, 1);
3846 rm = extract32(insn, 16, 5);
3847 cond = extract32(insn, 12, 4);
3848 else_inc = extract32(insn, 10, 1);
3849 rn = extract32(insn, 5, 5);
3850 rd = extract32(insn, 0, 5);
3851
3852 tcg_rd = cpu_reg(s, rd);
3853
3854 a64_test_cc(&c, cond);
3855 zero = tcg_const_i64(0);
3856
3857 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
3858
3859 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
3860 if (else_inv) {
3861 tcg_gen_neg_i64(tcg_rd, tcg_rd);
3862 }
3863 } else {
3864 TCGv_i64 t_true = cpu_reg(s, rn);
3865 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
3866 if (else_inv && else_inc) {
3867 tcg_gen_neg_i64(t_false, t_false);
3868 } else if (else_inv) {
3869 tcg_gen_not_i64(t_false, t_false);
3870 } else if (else_inc) {
3871 tcg_gen_addi_i64(t_false, t_false, 1);
3872 }
3873 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
3874 }
3875
3876 tcg_temp_free_i64(zero);
3877 a64_free_cc(&c);
3878
3879 if (!sf) {
3880 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3881 }
3882}
3883
3884static void handle_clz(DisasContext *s, unsigned int sf,
3885 unsigned int rn, unsigned int rd)
3886{
3887 TCGv_i64 tcg_rd, tcg_rn;
3888 tcg_rd = cpu_reg(s, rd);
3889 tcg_rn = cpu_reg(s, rn);
3890
3891 if (sf) {
3892 gen_helper_clz64(tcg_rd, tcg_rn);
3893 } else {
3894 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3895 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
3896 gen_helper_clz(tcg_tmp32, tcg_tmp32);
3897 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
3898 tcg_temp_free_i32(tcg_tmp32);
3899 }
3900}
3901
3902static void handle_cls(DisasContext *s, unsigned int sf,
3903 unsigned int rn, unsigned int rd)
3904{
3905 TCGv_i64 tcg_rd, tcg_rn;
3906 tcg_rd = cpu_reg(s, rd);
3907 tcg_rn = cpu_reg(s, rn);
3908
3909 if (sf) {
3910 gen_helper_cls64(tcg_rd, tcg_rn);
3911 } else {
3912 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3913 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
3914 gen_helper_cls32(tcg_tmp32, tcg_tmp32);
3915 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
3916 tcg_temp_free_i32(tcg_tmp32);
3917 }
3918}
3919
3920static void handle_rbit(DisasContext *s, unsigned int sf,
3921 unsigned int rn, unsigned int rd)
3922{
3923 TCGv_i64 tcg_rd, tcg_rn;
3924 tcg_rd = cpu_reg(s, rd);
3925 tcg_rn = cpu_reg(s, rn);
3926
3927 if (sf) {
3928 gen_helper_rbit64(tcg_rd, tcg_rn);
3929 } else {
3930 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3931 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
3932 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
3933 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
3934 tcg_temp_free_i32(tcg_tmp32);
3935 }
3936}
3937
3938
3939static void handle_rev64(DisasContext *s, unsigned int sf,
3940 unsigned int rn, unsigned int rd)
3941{
3942 if (!sf) {
3943 unallocated_encoding(s);
3944 return;
3945 }
3946 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
3947}
3948
3949
3950
3951
3952static void handle_rev32(DisasContext *s, unsigned int sf,
3953 unsigned int rn, unsigned int rd)
3954{
3955 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3956
3957 if (sf) {
3958 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3959 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
3960
3961
3962 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
3963 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
3964 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
3965 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
3966 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
3967
3968 tcg_temp_free_i64(tcg_tmp);
3969 } else {
3970 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
3971 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
3972 }
3973}
3974
3975
3976static void handle_rev16(DisasContext *s, unsigned int sf,
3977 unsigned int rn, unsigned int rd)
3978{
3979 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3980 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3981 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
3982
3983 tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
3984 tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
3985
3986 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
3987 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
3988 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
3989 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
3990
3991 if (sf) {
3992 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
3993 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
3994 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
3995 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
3996
3997 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
3998 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
3999 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
4000 }
4001
4002 tcg_temp_free_i64(tcg_tmp);
4003}
4004
4005
4006
4007
4008
4009
4010
4011static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4012{
4013 unsigned int sf, opcode, rn, rd;
4014
4015 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4016 unallocated_encoding(s);
4017 return;
4018 }
4019
4020 sf = extract32(insn, 31, 1);
4021 opcode = extract32(insn, 10, 6);
4022 rn = extract32(insn, 5, 5);
4023 rd = extract32(insn, 0, 5);
4024
4025 switch (opcode) {
4026 case 0:
4027 handle_rbit(s, sf, rn, rd);
4028 break;
4029 case 1:
4030 handle_rev16(s, sf, rn, rd);
4031 break;
4032 case 2:
4033 handle_rev32(s, sf, rn, rd);
4034 break;
4035 case 3:
4036 handle_rev64(s, sf, rn, rd);
4037 break;
4038 case 4:
4039 handle_clz(s, sf, rn, rd);
4040 break;
4041 case 5:
4042 handle_cls(s, sf, rn, rd);
4043 break;
4044 }
4045}
4046
4047static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4048 unsigned int rm, unsigned int rn, unsigned int rd)
4049{
4050 TCGv_i64 tcg_n, tcg_m, tcg_rd;
4051 tcg_rd = cpu_reg(s, rd);
4052
4053 if (!sf && is_signed) {
4054 tcg_n = new_tmp_a64(s);
4055 tcg_m = new_tmp_a64(s);
4056 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4057 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4058 } else {
4059 tcg_n = read_cpu_reg(s, rn, sf);
4060 tcg_m = read_cpu_reg(s, rm, sf);
4061 }
4062
4063 if (is_signed) {
4064 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4065 } else {
4066 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4067 }
4068
4069 if (!sf) {
4070 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4071 }
4072}
4073
4074
4075static void handle_shift_reg(DisasContext *s,
4076 enum a64_shift_type shift_type, unsigned int sf,
4077 unsigned int rm, unsigned int rn, unsigned int rd)
4078{
4079 TCGv_i64 tcg_shift = tcg_temp_new_i64();
4080 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4081 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4082
4083 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4084 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4085 tcg_temp_free_i64(tcg_shift);
4086}
4087
4088
4089static void handle_crc32(DisasContext *s,
4090 unsigned int sf, unsigned int sz, bool crc32c,
4091 unsigned int rm, unsigned int rn, unsigned int rd)
4092{
4093 TCGv_i64 tcg_acc, tcg_val;
4094 TCGv_i32 tcg_bytes;
4095
4096 if (!arm_dc_feature(s, ARM_FEATURE_CRC)
4097 || (sf == 1 && sz != 3)
4098 || (sf == 0 && sz == 3)) {
4099 unallocated_encoding(s);
4100 return;
4101 }
4102
4103 if (sz == 3) {
4104 tcg_val = cpu_reg(s, rm);
4105 } else {
4106 uint64_t mask;
4107 switch (sz) {
4108 case 0:
4109 mask = 0xFF;
4110 break;
4111 case 1:
4112 mask = 0xFFFF;
4113 break;
4114 case 2:
4115 mask = 0xFFFFFFFF;
4116 break;
4117 default:
4118 g_assert_not_reached();
4119 }
4120 tcg_val = new_tmp_a64(s);
4121 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4122 }
4123
4124 tcg_acc = cpu_reg(s, rn);
4125 tcg_bytes = tcg_const_i32(1 << sz);
4126
4127 if (crc32c) {
4128 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4129 } else {
4130 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4131 }
4132
4133 tcg_temp_free_i32(tcg_bytes);
4134}
4135
4136
4137
4138
4139
4140
4141
4142static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4143{
4144 unsigned int sf, rm, opcode, rn, rd;
4145 sf = extract32(insn, 31, 1);
4146 rm = extract32(insn, 16, 5);
4147 opcode = extract32(insn, 10, 6);
4148 rn = extract32(insn, 5, 5);
4149 rd = extract32(insn, 0, 5);
4150
4151 if (extract32(insn, 29, 1)) {
4152 unallocated_encoding(s);
4153 return;
4154 }
4155
4156 switch (opcode) {
4157 case 2:
4158 handle_div(s, false, sf, rm, rn, rd);
4159 break;
4160 case 3:
4161 handle_div(s, true, sf, rm, rn, rd);
4162 break;
4163 case 8:
4164 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4165 break;
4166 case 9:
4167 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4168 break;
4169 case 10:
4170 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4171 break;
4172 case 11:
4173 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4174 break;
4175 case 16:
4176 case 17:
4177 case 18:
4178 case 19:
4179 case 20:
4180 case 21:
4181 case 22:
4182 case 23:
4183 {
4184 int sz = extract32(opcode, 0, 2);
4185 bool crc32c = extract32(opcode, 2, 1);
4186 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4187 break;
4188 }
4189 default:
4190 unallocated_encoding(s);
4191 break;
4192 }
4193}
4194
4195
4196static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4197{
4198 switch (extract32(insn, 24, 5)) {
4199 case 0x0a:
4200 disas_logic_reg(s, insn);
4201 break;
4202 case 0x0b:
4203 if (insn & (1 << 21)) {
4204 disas_add_sub_ext_reg(s, insn);
4205 } else {
4206 disas_add_sub_reg(s, insn);
4207 }
4208 break;
4209 case 0x1b:
4210 disas_data_proc_3src(s, insn);
4211 break;
4212 case 0x1a:
4213 switch (extract32(insn, 21, 3)) {
4214 case 0x0:
4215 disas_adc_sbc(s, insn);
4216 break;
4217 case 0x2:
4218 disas_cc(s, insn);
4219 break;
4220 case 0x4:
4221 disas_cond_select(s, insn);
4222 break;
4223 case 0x6:
4224 if (insn & (1 << 30)) {
4225 disas_data_proc_1src(s, insn);
4226 } else {
4227 disas_data_proc_2src(s, insn);
4228 }
4229 break;
4230 default:
4231 unallocated_encoding(s);
4232 break;
4233 }
4234 break;
4235 default:
4236 unallocated_encoding(s);
4237 break;
4238 }
4239}
4240
4241static void handle_fp_compare(DisasContext *s, bool is_double,
4242 unsigned int rn, unsigned int rm,
4243 bool cmp_with_zero, bool signal_all_nans)
4244{
4245 TCGv_i64 tcg_flags = tcg_temp_new_i64();
4246 TCGv_ptr fpst = get_fpstatus_ptr();
4247
4248 if (is_double) {
4249 TCGv_i64 tcg_vn, tcg_vm;
4250
4251 tcg_vn = read_fp_dreg(s, rn);
4252 if (cmp_with_zero) {
4253 tcg_vm = tcg_const_i64(0);
4254 } else {
4255 tcg_vm = read_fp_dreg(s, rm);
4256 }
4257 if (signal_all_nans) {
4258 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4259 } else {
4260 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4261 }
4262 tcg_temp_free_i64(tcg_vn);
4263 tcg_temp_free_i64(tcg_vm);
4264 } else {
4265 TCGv_i32 tcg_vn, tcg_vm;
4266
4267 tcg_vn = read_fp_sreg(s, rn);
4268 if (cmp_with_zero) {
4269 tcg_vm = tcg_const_i32(0);
4270 } else {
4271 tcg_vm = read_fp_sreg(s, rm);
4272 }
4273 if (signal_all_nans) {
4274 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4275 } else {
4276 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4277 }
4278 tcg_temp_free_i32(tcg_vn);
4279 tcg_temp_free_i32(tcg_vm);
4280 }
4281
4282 tcg_temp_free_ptr(fpst);
4283
4284 gen_set_nzcv(tcg_flags);
4285
4286 tcg_temp_free_i64(tcg_flags);
4287}
4288
4289
4290
4291
4292
4293
4294
4295static void disas_fp_compare(DisasContext *s, uint32_t insn)
4296{
4297 unsigned int mos, type, rm, op, rn, opc, op2r;
4298
4299 mos = extract32(insn, 29, 3);
4300 type = extract32(insn, 22, 2);
4301 rm = extract32(insn, 16, 5);
4302 op = extract32(insn, 14, 2);
4303 rn = extract32(insn, 5, 5);
4304 opc = extract32(insn, 3, 2);
4305 op2r = extract32(insn, 0, 3);
4306
4307 if (mos || op || op2r || type > 1) {
4308 unallocated_encoding(s);
4309 return;
4310 }
4311
4312 if (!fp_access_check(s)) {
4313 return;
4314 }
4315
4316 handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
4317}
4318
4319
4320
4321
4322
4323
4324
4325static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4326{
4327 unsigned int mos, type, rm, cond, rn, op, nzcv;
4328 TCGv_i64 tcg_flags;
4329 TCGLabel *label_continue = NULL;
4330
4331 mos = extract32(insn, 29, 3);
4332 type = extract32(insn, 22, 2);
4333 rm = extract32(insn, 16, 5);
4334 cond = extract32(insn, 12, 4);
4335 rn = extract32(insn, 5, 5);
4336 op = extract32(insn, 4, 1);
4337 nzcv = extract32(insn, 0, 4);
4338
4339 if (mos || type > 1) {
4340 unallocated_encoding(s);
4341 return;
4342 }
4343
4344 if (!fp_access_check(s)) {
4345 return;
4346 }
4347
4348 if (cond < 0x0e) {
4349 TCGLabel *label_match = gen_new_label();
4350 label_continue = gen_new_label();
4351 arm_gen_test_cc(cond, label_match);
4352
4353 tcg_flags = tcg_const_i64(nzcv << 28);
4354 gen_set_nzcv(tcg_flags);
4355 tcg_temp_free_i64(tcg_flags);
4356 tcg_gen_br(label_continue);
4357 gen_set_label(label_match);
4358 }
4359
4360 handle_fp_compare(s, type, rn, rm, false, op);
4361
4362 if (cond < 0x0e) {
4363 gen_set_label(label_continue);
4364 }
4365}
4366
4367
4368
4369
4370
4371
4372
4373static void disas_fp_csel(DisasContext *s, uint32_t insn)
4374{
4375 unsigned int mos, type, rm, cond, rn, rd;
4376 TCGv_i64 t_true, t_false, t_zero;
4377 DisasCompare64 c;
4378
4379 mos = extract32(insn, 29, 3);
4380 type = extract32(insn, 22, 2);
4381 rm = extract32(insn, 16, 5);
4382 cond = extract32(insn, 12, 4);
4383 rn = extract32(insn, 5, 5);
4384 rd = extract32(insn, 0, 5);
4385
4386 if (mos || type > 1) {
4387 unallocated_encoding(s);
4388 return;
4389 }
4390
4391 if (!fp_access_check(s)) {
4392 return;
4393 }
4394
4395
4396 t_true = tcg_temp_new_i64();
4397 t_false = tcg_temp_new_i64();
4398 read_vec_element(s, t_true, rn, 0, type ? MO_64 : MO_32);
4399 read_vec_element(s, t_false, rm, 0, type ? MO_64 : MO_32);
4400
4401 a64_test_cc(&c, cond);
4402 t_zero = tcg_const_i64(0);
4403 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4404 tcg_temp_free_i64(t_zero);
4405 tcg_temp_free_i64(t_false);
4406 a64_free_cc(&c);
4407
4408
4409
4410 write_fp_dreg(s, rd, t_true);
4411 tcg_temp_free_i64(t_true);
4412}
4413
4414
4415static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
4416{
4417 TCGv_ptr fpst;
4418 TCGv_i32 tcg_op;
4419 TCGv_i32 tcg_res;
4420
4421 fpst = get_fpstatus_ptr();
4422 tcg_op = read_fp_sreg(s, rn);
4423 tcg_res = tcg_temp_new_i32();
4424
4425 switch (opcode) {
4426 case 0x0:
4427 tcg_gen_mov_i32(tcg_res, tcg_op);
4428 break;
4429 case 0x1:
4430 gen_helper_vfp_abss(tcg_res, tcg_op);
4431 break;
4432 case 0x2:
4433 gen_helper_vfp_negs(tcg_res, tcg_op);
4434 break;
4435 case 0x3:
4436 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
4437 break;
4438 case 0x8:
4439 case 0x9:
4440 case 0xa:
4441 case 0xb:
4442 case 0xc:
4443 {
4444 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4445
4446 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4447 gen_helper_rints(tcg_res, tcg_op, fpst);
4448
4449 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4450 tcg_temp_free_i32(tcg_rmode);
4451 break;
4452 }
4453 case 0xe:
4454 gen_helper_rints_exact(tcg_res, tcg_op, fpst);
4455 break;
4456 case 0xf:
4457 gen_helper_rints(tcg_res, tcg_op, fpst);
4458 break;
4459 default:
4460 abort();
4461 }
4462
4463 write_fp_sreg(s, rd, tcg_res);
4464
4465 tcg_temp_free_ptr(fpst);
4466 tcg_temp_free_i32(tcg_op);
4467 tcg_temp_free_i32(tcg_res);
4468}
4469
4470
4471static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
4472{
4473 TCGv_ptr fpst;
4474 TCGv_i64 tcg_op;
4475 TCGv_i64 tcg_res;
4476
4477 fpst = get_fpstatus_ptr();
4478 tcg_op = read_fp_dreg(s, rn);
4479 tcg_res = tcg_temp_new_i64();
4480
4481 switch (opcode) {
4482 case 0x0:
4483 tcg_gen_mov_i64(tcg_res, tcg_op);
4484 break;
4485 case 0x1:
4486 gen_helper_vfp_absd(tcg_res, tcg_op);
4487 break;
4488 case 0x2:
4489 gen_helper_vfp_negd(tcg_res, tcg_op);
4490 break;
4491 case 0x3:
4492 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
4493 break;
4494 case 0x8:
4495 case 0x9:
4496 case 0xa:
4497 case 0xb:
4498 case 0xc:
4499 {
4500 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4501
4502 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4503 gen_helper_rintd(tcg_res, tcg_op, fpst);
4504
4505 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4506 tcg_temp_free_i32(tcg_rmode);
4507 break;
4508 }
4509 case 0xe:
4510 gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
4511 break;
4512 case 0xf:
4513 gen_helper_rintd(tcg_res, tcg_op, fpst);
4514 break;
4515 default:
4516 abort();
4517 }
4518
4519 write_fp_dreg(s, rd, tcg_res);
4520
4521 tcg_temp_free_ptr(fpst);
4522 tcg_temp_free_i64(tcg_op);
4523 tcg_temp_free_i64(tcg_res);
4524}
4525
4526static void handle_fp_fcvt(DisasContext *s, int opcode,
4527 int rd, int rn, int dtype, int ntype)
4528{
4529 switch (ntype) {
4530 case 0x0:
4531 {
4532 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4533 if (dtype == 1) {
4534
4535 TCGv_i64 tcg_rd = tcg_temp_new_i64();
4536 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
4537 write_fp_dreg(s, rd, tcg_rd);
4538 tcg_temp_free_i64(tcg_rd);
4539 } else {
4540
4541 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4542 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
4543
4544 write_fp_sreg(s, rd, tcg_rd);
4545 tcg_temp_free_i32(tcg_rd);
4546 }
4547 tcg_temp_free_i32(tcg_rn);
4548 break;
4549 }
4550 case 0x1:
4551 {
4552 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
4553 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4554 if (dtype == 0) {
4555
4556 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
4557 } else {
4558
4559 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
4560
4561 }
4562 write_fp_sreg(s, rd, tcg_rd);
4563 tcg_temp_free_i32(tcg_rd);
4564 tcg_temp_free_i64(tcg_rn);
4565 break;
4566 }
4567 case 0x3:
4568 {
4569 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4570 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
4571 if (dtype == 0) {
4572
4573 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4574 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
4575 write_fp_sreg(s, rd, tcg_rd);
4576 tcg_temp_free_i32(tcg_rd);
4577 } else {
4578
4579 TCGv_i64 tcg_rd = tcg_temp_new_i64();
4580 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
4581 write_fp_dreg(s, rd, tcg_rd);
4582 tcg_temp_free_i64(tcg_rd);
4583 }
4584 tcg_temp_free_i32(tcg_rn);
4585 break;
4586 }
4587 default:
4588 abort();
4589 }
4590}
4591
4592
4593
4594
4595
4596
4597
4598static void disas_fp_1src(DisasContext *s, uint32_t insn)
4599{
4600 int type = extract32(insn, 22, 2);
4601 int opcode = extract32(insn, 15, 6);
4602 int rn = extract32(insn, 5, 5);
4603 int rd = extract32(insn, 0, 5);
4604
4605 switch (opcode) {
4606 case 0x4: case 0x5: case 0x7:
4607 {
4608
4609 int dtype = extract32(opcode, 0, 2);
4610 if (type == 2 || dtype == type) {
4611 unallocated_encoding(s);
4612 return;
4613 }
4614 if (!fp_access_check(s)) {
4615 return;
4616 }
4617
4618 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
4619 break;
4620 }
4621 case 0x0 ... 0x3:
4622 case 0x8 ... 0xc:
4623 case 0xe ... 0xf:
4624
4625 switch (type) {
4626 case 0:
4627 if (!fp_access_check(s)) {
4628 return;
4629 }
4630
4631 handle_fp_1src_single(s, opcode, rd, rn);
4632 break;
4633 case 1:
4634 if (!fp_access_check(s)) {
4635 return;
4636 }
4637
4638 handle_fp_1src_double(s, opcode, rd, rn);
4639 break;
4640 default:
4641 unallocated_encoding(s);
4642 }
4643 break;
4644 default:
4645 unallocated_encoding(s);
4646 break;
4647 }
4648}
4649
4650
4651static void handle_fp_2src_single(DisasContext *s, int opcode,
4652 int rd, int rn, int rm)
4653{
4654 TCGv_i32 tcg_op1;
4655 TCGv_i32 tcg_op2;
4656 TCGv_i32 tcg_res;
4657 TCGv_ptr fpst;
4658
4659 tcg_res = tcg_temp_new_i32();
4660 fpst = get_fpstatus_ptr();
4661 tcg_op1 = read_fp_sreg(s, rn);
4662 tcg_op2 = read_fp_sreg(s, rm);
4663
4664 switch (opcode) {
4665 case 0x0:
4666 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4667 break;
4668 case 0x1:
4669 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
4670 break;
4671 case 0x2:
4672 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
4673 break;
4674 case 0x3:
4675 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
4676 break;
4677 case 0x4:
4678 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
4679 break;
4680 case 0x5:
4681 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
4682 break;
4683 case 0x6:
4684 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
4685 break;
4686 case 0x7:
4687 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
4688 break;
4689 case 0x8:
4690 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4691 gen_helper_vfp_negs(tcg_res, tcg_res);
4692 break;
4693 }
4694
4695 write_fp_sreg(s, rd, tcg_res);
4696
4697 tcg_temp_free_ptr(fpst);
4698 tcg_temp_free_i32(tcg_op1);
4699 tcg_temp_free_i32(tcg_op2);
4700 tcg_temp_free_i32(tcg_res);
4701}
4702
4703
4704static void handle_fp_2src_double(DisasContext *s, int opcode,
4705 int rd, int rn, int rm)
4706{
4707 TCGv_i64 tcg_op1;
4708 TCGv_i64 tcg_op2;
4709 TCGv_i64 tcg_res;
4710 TCGv_ptr fpst;
4711
4712 tcg_res = tcg_temp_new_i64();
4713 fpst = get_fpstatus_ptr();
4714 tcg_op1 = read_fp_dreg(s, rn);
4715 tcg_op2 = read_fp_dreg(s, rm);
4716
4717 switch (opcode) {
4718 case 0x0:
4719 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4720 break;
4721 case 0x1:
4722 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
4723 break;
4724 case 0x2:
4725 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
4726 break;
4727 case 0x3:
4728 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
4729 break;
4730 case 0x4:
4731 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
4732 break;
4733 case 0x5:
4734 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
4735 break;
4736 case 0x6:
4737 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4738 break;
4739 case 0x7:
4740 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4741 break;
4742 case 0x8:
4743 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4744 gen_helper_vfp_negd(tcg_res, tcg_res);
4745 break;
4746 }
4747
4748 write_fp_dreg(s, rd, tcg_res);
4749
4750 tcg_temp_free_ptr(fpst);
4751 tcg_temp_free_i64(tcg_op1);
4752 tcg_temp_free_i64(tcg_op2);
4753 tcg_temp_free_i64(tcg_res);
4754}
4755
4756
4757
4758
4759
4760
4761
4762static void disas_fp_2src(DisasContext *s, uint32_t insn)
4763{
4764 int type = extract32(insn, 22, 2);
4765 int rd = extract32(insn, 0, 5);
4766 int rn = extract32(insn, 5, 5);
4767 int rm = extract32(insn, 16, 5);
4768 int opcode = extract32(insn, 12, 4);
4769
4770 if (opcode > 8) {
4771 unallocated_encoding(s);
4772 return;
4773 }
4774
4775 switch (type) {
4776 case 0:
4777 if (!fp_access_check(s)) {
4778 return;
4779 }
4780 handle_fp_2src_single(s, opcode, rd, rn, rm);
4781 break;
4782 case 1:
4783 if (!fp_access_check(s)) {
4784 return;
4785 }
4786 handle_fp_2src_double(s, opcode, rd, rn, rm);
4787 break;
4788 default:
4789 unallocated_encoding(s);
4790 }
4791}
4792
4793
4794static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
4795 int rd, int rn, int rm, int ra)
4796{
4797 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
4798 TCGv_i32 tcg_res = tcg_temp_new_i32();
4799 TCGv_ptr fpst = get_fpstatus_ptr();
4800
4801 tcg_op1 = read_fp_sreg(s, rn);
4802 tcg_op2 = read_fp_sreg(s, rm);
4803 tcg_op3 = read_fp_sreg(s, ra);
4804
4805
4806
4807
4808
4809
4810
4811
4812 if (o1 == true) {
4813 gen_helper_vfp_negs(tcg_op3, tcg_op3);
4814 }
4815
4816 if (o0 != o1) {
4817 gen_helper_vfp_negs(tcg_op1, tcg_op1);
4818 }
4819
4820 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4821
4822 write_fp_sreg(s, rd, tcg_res);
4823
4824 tcg_temp_free_ptr(fpst);
4825 tcg_temp_free_i32(tcg_op1);
4826 tcg_temp_free_i32(tcg_op2);
4827 tcg_temp_free_i32(tcg_op3);
4828 tcg_temp_free_i32(tcg_res);
4829}
4830
4831
4832static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
4833 int rd, int rn, int rm, int ra)
4834{
4835 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
4836 TCGv_i64 tcg_res = tcg_temp_new_i64();
4837 TCGv_ptr fpst = get_fpstatus_ptr();
4838
4839 tcg_op1 = read_fp_dreg(s, rn);
4840 tcg_op2 = read_fp_dreg(s, rm);
4841 tcg_op3 = read_fp_dreg(s, ra);
4842
4843
4844
4845
4846
4847
4848
4849
4850 if (o1 == true) {
4851 gen_helper_vfp_negd(tcg_op3, tcg_op3);
4852 }
4853
4854 if (o0 != o1) {
4855 gen_helper_vfp_negd(tcg_op1, tcg_op1);
4856 }
4857
4858 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4859
4860 write_fp_dreg(s, rd, tcg_res);
4861
4862 tcg_temp_free_ptr(fpst);
4863 tcg_temp_free_i64(tcg_op1);
4864 tcg_temp_free_i64(tcg_op2);
4865 tcg_temp_free_i64(tcg_op3);
4866 tcg_temp_free_i64(tcg_res);
4867}
4868
4869
4870
4871
4872
4873
4874
4875static void disas_fp_3src(DisasContext *s, uint32_t insn)
4876{
4877 int type = extract32(insn, 22, 2);
4878 int rd = extract32(insn, 0, 5);
4879 int rn = extract32(insn, 5, 5);
4880 int ra = extract32(insn, 10, 5);
4881 int rm = extract32(insn, 16, 5);
4882 bool o0 = extract32(insn, 15, 1);
4883 bool o1 = extract32(insn, 21, 1);
4884
4885 switch (type) {
4886 case 0:
4887 if (!fp_access_check(s)) {
4888 return;
4889 }
4890 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
4891 break;
4892 case 1:
4893 if (!fp_access_check(s)) {
4894 return;
4895 }
4896 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
4897 break;
4898 default:
4899 unallocated_encoding(s);
4900 }
4901}
4902
4903
4904
4905
4906
4907
4908
4909static void disas_fp_imm(DisasContext *s, uint32_t insn)
4910{
4911 int rd = extract32(insn, 0, 5);
4912 int imm8 = extract32(insn, 13, 8);
4913 int is_double = extract32(insn, 22, 2);
4914 uint64_t imm;
4915 TCGv_i64 tcg_res;
4916
4917 if (is_double > 1) {
4918 unallocated_encoding(s);
4919 return;
4920 }
4921
4922 if (!fp_access_check(s)) {
4923 return;
4924 }
4925
4926
4927
4928
4929
4930
4931 if (is_double) {
4932 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
4933 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
4934 extract32(imm8, 0, 6);
4935 imm <<= 48;
4936 } else {
4937 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
4938 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
4939 (extract32(imm8, 0, 6) << 3);
4940 imm <<= 16;
4941 }
4942
4943 tcg_res = tcg_const_i64(imm);
4944 write_fp_dreg(s, rd, tcg_res);
4945 tcg_temp_free_i64(tcg_res);
4946}
4947
4948
4949
4950
4951
4952
4953static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
4954 bool itof, int rmode, int scale, int sf, int type)
4955{
4956 bool is_signed = !(opcode & 1);
4957 bool is_double = type;
4958 TCGv_ptr tcg_fpstatus;
4959 TCGv_i32 tcg_shift;
4960
4961 tcg_fpstatus = get_fpstatus_ptr();
4962
4963 tcg_shift = tcg_const_i32(64 - scale);
4964
4965 if (itof) {
4966 TCGv_i64 tcg_int = cpu_reg(s, rn);
4967 if (!sf) {
4968 TCGv_i64 tcg_extend = new_tmp_a64(s);
4969
4970 if (is_signed) {
4971 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
4972 } else {
4973 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
4974 }
4975
4976 tcg_int = tcg_extend;
4977 }
4978
4979 if (is_double) {
4980 TCGv_i64 tcg_double = tcg_temp_new_i64();
4981 if (is_signed) {
4982 gen_helper_vfp_sqtod(tcg_double, tcg_int,
4983 tcg_shift, tcg_fpstatus);
4984 } else {
4985 gen_helper_vfp_uqtod(tcg_double, tcg_int,
4986 tcg_shift, tcg_fpstatus);
4987 }
4988 write_fp_dreg(s, rd, tcg_double);
4989 tcg_temp_free_i64(tcg_double);
4990 } else {
4991 TCGv_i32 tcg_single = tcg_temp_new_i32();
4992 if (is_signed) {
4993 gen_helper_vfp_sqtos(tcg_single, tcg_int,
4994 tcg_shift, tcg_fpstatus);
4995 } else {
4996 gen_helper_vfp_uqtos(tcg_single, tcg_int,
4997 tcg_shift, tcg_fpstatus);
4998 }
4999 write_fp_sreg(s, rd, tcg_single);
5000 tcg_temp_free_i32(tcg_single);
5001 }
5002 } else {
5003 TCGv_i64 tcg_int = cpu_reg(s, rd);
5004 TCGv_i32 tcg_rmode;
5005
5006 if (extract32(opcode, 2, 1)) {
5007
5008
5009
5010 rmode = FPROUNDING_TIEAWAY;
5011 }
5012
5013 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5014
5015 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
5016
5017 if (is_double) {
5018 TCGv_i64 tcg_double = read_fp_dreg(s, rn);
5019 if (is_signed) {
5020 if (!sf) {
5021 gen_helper_vfp_tosld(tcg_int, tcg_double,
5022 tcg_shift, tcg_fpstatus);
5023 } else {
5024 gen_helper_vfp_tosqd(tcg_int, tcg_double,
5025 tcg_shift, tcg_fpstatus);
5026 }
5027 } else {
5028 if (!sf) {
5029 gen_helper_vfp_tould(tcg_int, tcg_double,
5030 tcg_shift, tcg_fpstatus);
5031 } else {
5032 gen_helper_vfp_touqd(tcg_int, tcg_double,
5033 tcg_shift, tcg_fpstatus);
5034 }
5035 }
5036 tcg_temp_free_i64(tcg_double);
5037 } else {
5038 TCGv_i32 tcg_single = read_fp_sreg(s, rn);
5039 if (sf) {
5040 if (is_signed) {
5041 gen_helper_vfp_tosqs(tcg_int, tcg_single,
5042 tcg_shift, tcg_fpstatus);
5043 } else {
5044 gen_helper_vfp_touqs(tcg_int, tcg_single,
5045 tcg_shift, tcg_fpstatus);
5046 }
5047 } else {
5048 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5049 if (is_signed) {
5050 gen_helper_vfp_tosls(tcg_dest, tcg_single,
5051 tcg_shift, tcg_fpstatus);
5052 } else {
5053 gen_helper_vfp_touls(tcg_dest, tcg_single,
5054 tcg_shift, tcg_fpstatus);
5055 }
5056 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5057 tcg_temp_free_i32(tcg_dest);
5058 }
5059 tcg_temp_free_i32(tcg_single);
5060 }
5061
5062 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
5063 tcg_temp_free_i32(tcg_rmode);
5064
5065 if (!sf) {
5066 tcg_gen_ext32u_i64(tcg_int, tcg_int);
5067 }
5068 }
5069
5070 tcg_temp_free_ptr(tcg_fpstatus);
5071 tcg_temp_free_i32(tcg_shift);
5072}
5073
5074
5075
5076
5077
5078
5079
5080static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5081{
5082 int rd = extract32(insn, 0, 5);
5083 int rn = extract32(insn, 5, 5);
5084 int scale = extract32(insn, 10, 6);
5085 int opcode = extract32(insn, 16, 3);
5086 int rmode = extract32(insn, 19, 2);
5087 int type = extract32(insn, 22, 2);
5088 bool sbit = extract32(insn, 29, 1);
5089 bool sf = extract32(insn, 31, 1);
5090 bool itof;
5091
5092 if (sbit || (type > 1)
5093 || (!sf && scale < 32)) {
5094 unallocated_encoding(s);
5095 return;
5096 }
5097
5098 switch ((rmode << 3) | opcode) {
5099 case 0x2:
5100 case 0x3:
5101 itof = true;
5102 break;
5103 case 0x18:
5104 case 0x19:
5105 itof = false;
5106 break;
5107 default:
5108 unallocated_encoding(s);
5109 return;
5110 }
5111
5112 if (!fp_access_check(s)) {
5113 return;
5114 }
5115
5116 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5117}
5118
5119static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5120{
5121
5122
5123
5124
5125 if (itof) {
5126 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5127
5128 switch (type) {
5129 case 0:
5130 {
5131
5132 TCGv_i64 tmp = tcg_temp_new_i64();
5133 tcg_gen_ext32u_i64(tmp, tcg_rn);
5134 tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
5135 tcg_gen_movi_i64(tmp, 0);
5136 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5137 tcg_temp_free_i64(tmp);
5138 break;
5139 }
5140 case 1:
5141 {
5142
5143 TCGv_i64 tmp = tcg_const_i64(0);
5144 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
5145 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5146 tcg_temp_free_i64(tmp);
5147 break;
5148 }
5149 case 2:
5150
5151 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5152 break;
5153 }
5154 } else {
5155 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5156
5157 switch (type) {
5158 case 0:
5159
5160 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
5161 break;
5162 case 1:
5163
5164 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
5165 break;
5166 case 2:
5167
5168 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
5169 break;
5170 }
5171 }
5172}
5173
5174
5175
5176
5177
5178
5179
5180static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
5181{
5182 int rd = extract32(insn, 0, 5);
5183 int rn = extract32(insn, 5, 5);
5184 int opcode = extract32(insn, 16, 3);
5185 int rmode = extract32(insn, 19, 2);
5186 int type = extract32(insn, 22, 2);
5187 bool sbit = extract32(insn, 29, 1);
5188 bool sf = extract32(insn, 31, 1);
5189
5190 if (sbit) {
5191 unallocated_encoding(s);
5192 return;
5193 }
5194
5195 if (opcode > 5) {
5196
5197 bool itof = opcode & 1;
5198
5199 if (rmode >= 2) {
5200 unallocated_encoding(s);
5201 return;
5202 }
5203
5204 switch (sf << 3 | type << 1 | rmode) {
5205 case 0x0:
5206 case 0xa:
5207 case 0xd:
5208 break;
5209 default:
5210
5211 unallocated_encoding(s);
5212 break;
5213 }
5214
5215 if (!fp_access_check(s)) {
5216 return;
5217 }
5218 handle_fmov(s, rd, rn, type, itof);
5219 } else {
5220
5221 bool itof = extract32(opcode, 1, 1);
5222
5223 if (type > 1 || (rmode != 0 && opcode > 1)) {
5224 unallocated_encoding(s);
5225 return;
5226 }
5227
5228 if (!fp_access_check(s)) {
5229 return;
5230 }
5231 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
5232 }
5233}
5234
5235
5236
5237
5238
5239
5240
5241static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
5242{
5243 if (extract32(insn, 24, 1)) {
5244
5245 disas_fp_3src(s, insn);
5246 } else if (extract32(insn, 21, 1) == 0) {
5247
5248 disas_fp_fixed_conv(s, insn);
5249 } else {
5250 switch (extract32(insn, 10, 2)) {
5251 case 1:
5252
5253 disas_fp_ccomp(s, insn);
5254 break;
5255 case 2:
5256
5257 disas_fp_2src(s, insn);
5258 break;
5259 case 3:
5260
5261 disas_fp_csel(s, insn);
5262 break;
5263 case 0:
5264 switch (ctz32(extract32(insn, 12, 4))) {
5265 case 0:
5266
5267 disas_fp_imm(s, insn);
5268 break;
5269 case 1:
5270
5271 disas_fp_compare(s, insn);
5272 break;
5273 case 2:
5274
5275 disas_fp_1src(s, insn);
5276 break;
5277 case 3:
5278 unallocated_encoding(s);
5279 break;
5280 default:
5281
5282 disas_fp_int_conv(s, insn);
5283 break;
5284 }
5285 break;
5286 }
5287 }
5288}
5289
5290static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
5291 int pos)
5292{
5293
5294
5295
5296
5297
5298
5299 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5300 assert(pos > 0 && pos < 64);
5301
5302 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
5303 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
5304 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
5305
5306 tcg_temp_free_i64(tcg_tmp);
5307}
5308
5309
5310
5311
5312
5313
5314
5315static void disas_simd_ext(DisasContext *s, uint32_t insn)
5316{
5317 int is_q = extract32(insn, 30, 1);
5318 int op2 = extract32(insn, 22, 2);
5319 int imm4 = extract32(insn, 11, 4);
5320 int rm = extract32(insn, 16, 5);
5321 int rn = extract32(insn, 5, 5);
5322 int rd = extract32(insn, 0, 5);
5323 int pos = imm4 << 3;
5324 TCGv_i64 tcg_resl, tcg_resh;
5325
5326 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
5327 unallocated_encoding(s);
5328 return;
5329 }
5330
5331 if (!fp_access_check(s)) {
5332 return;
5333 }
5334
5335 tcg_resh = tcg_temp_new_i64();
5336 tcg_resl = tcg_temp_new_i64();
5337
5338
5339
5340
5341
5342 if (!is_q) {
5343 read_vec_element(s, tcg_resl, rn, 0, MO_64);
5344 if (pos != 0) {
5345 read_vec_element(s, tcg_resh, rm, 0, MO_64);
5346 do_ext64(s, tcg_resh, tcg_resl, pos);
5347 }
5348 tcg_gen_movi_i64(tcg_resh, 0);
5349 } else {
5350 TCGv_i64 tcg_hh;
5351 typedef struct {
5352 int reg;
5353 int elt;
5354 } EltPosns;
5355 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
5356 EltPosns *elt = eltposns;
5357
5358 if (pos >= 64) {
5359 elt++;
5360 pos -= 64;
5361 }
5362
5363 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
5364 elt++;
5365 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
5366 elt++;
5367 if (pos != 0) {
5368 do_ext64(s, tcg_resh, tcg_resl, pos);
5369 tcg_hh = tcg_temp_new_i64();
5370 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
5371 do_ext64(s, tcg_hh, tcg_resh, pos);
5372 tcg_temp_free_i64(tcg_hh);
5373 }
5374 }
5375
5376 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5377 tcg_temp_free_i64(tcg_resl);
5378 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5379 tcg_temp_free_i64(tcg_resh);
5380}
5381
5382
5383
5384
5385
5386
5387
5388static void disas_simd_tb(DisasContext *s, uint32_t insn)
5389{
5390 int op2 = extract32(insn, 22, 2);
5391 int is_q = extract32(insn, 30, 1);
5392 int rm = extract32(insn, 16, 5);
5393 int rn = extract32(insn, 5, 5);
5394 int rd = extract32(insn, 0, 5);
5395 int is_tblx = extract32(insn, 12, 1);
5396 int len = extract32(insn, 13, 2);
5397 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
5398 TCGv_i32 tcg_regno, tcg_numregs;
5399
5400 if (op2 != 0) {
5401 unallocated_encoding(s);
5402 return;
5403 }
5404
5405 if (!fp_access_check(s)) {
5406 return;
5407 }
5408
5409
5410
5411
5412
5413
5414
5415 tcg_resl = tcg_temp_new_i64();
5416 tcg_resh = tcg_temp_new_i64();
5417
5418 if (is_tblx) {
5419 read_vec_element(s, tcg_resl, rd, 0, MO_64);
5420 } else {
5421 tcg_gen_movi_i64(tcg_resl, 0);
5422 }
5423 if (is_tblx && is_q) {
5424 read_vec_element(s, tcg_resh, rd, 1, MO_64);
5425 } else {
5426 tcg_gen_movi_i64(tcg_resh, 0);
5427 }
5428
5429 tcg_idx = tcg_temp_new_i64();
5430 tcg_regno = tcg_const_i32(rn);
5431 tcg_numregs = tcg_const_i32(len + 1);
5432 read_vec_element(s, tcg_idx, rm, 0, MO_64);
5433 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
5434 tcg_regno, tcg_numregs);
5435 if (is_q) {
5436 read_vec_element(s, tcg_idx, rm, 1, MO_64);
5437 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
5438 tcg_regno, tcg_numregs);
5439 }
5440 tcg_temp_free_i64(tcg_idx);
5441 tcg_temp_free_i32(tcg_regno);
5442 tcg_temp_free_i32(tcg_numregs);
5443
5444 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5445 tcg_temp_free_i64(tcg_resl);
5446 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5447 tcg_temp_free_i64(tcg_resh);
5448}
5449
5450
5451
5452
5453
5454
5455
5456static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
5457{
5458 int rd = extract32(insn, 0, 5);
5459 int rn = extract32(insn, 5, 5);
5460 int rm = extract32(insn, 16, 5);
5461 int size = extract32(insn, 22, 2);
5462
5463
5464
5465 int opcode = extract32(insn, 12, 2);
5466 bool part = extract32(insn, 14, 1);
5467 bool is_q = extract32(insn, 30, 1);
5468 int esize = 8 << size;
5469 int i, ofs;
5470 int datasize = is_q ? 128 : 64;
5471 int elements = datasize / esize;
5472 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
5473
5474 if (opcode == 0 || (size == 3 && !is_q)) {
5475 unallocated_encoding(s);
5476 return;
5477 }
5478
5479 if (!fp_access_check(s)) {
5480 return;
5481 }
5482
5483 tcg_resl = tcg_const_i64(0);
5484 tcg_resh = tcg_const_i64(0);
5485 tcg_res = tcg_temp_new_i64();
5486
5487 for (i = 0; i < elements; i++) {
5488 switch (opcode) {
5489 case 1:
5490 {
5491 int midpoint = elements / 2;
5492 if (i < midpoint) {
5493 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
5494 } else {
5495 read_vec_element(s, tcg_res, rm,
5496 2 * (i - midpoint) + part, size);
5497 }
5498 break;
5499 }
5500 case 2:
5501 if (i & 1) {
5502 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
5503 } else {
5504 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
5505 }
5506 break;
5507 case 3:
5508 {
5509 int base = part * elements / 2;
5510 if (i & 1) {
5511 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
5512 } else {
5513 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
5514 }
5515 break;
5516 }
5517 default:
5518 g_assert_not_reached();
5519 }
5520
5521 ofs = i * esize;
5522 if (ofs < 64) {
5523 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
5524 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
5525 } else {
5526 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
5527 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
5528 }
5529 }
5530
5531 tcg_temp_free_i64(tcg_res);
5532
5533 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5534 tcg_temp_free_i64(tcg_resl);
5535 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5536 tcg_temp_free_i64(tcg_resh);
5537}
5538
5539static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
5540 int opc, bool is_min, TCGv_ptr fpst)
5541{
5542
5543
5544
5545
5546 if (opc == 0xc) {
5547 if (is_min) {
5548 gen_helper_vfp_minnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5549 } else {
5550 gen_helper_vfp_maxnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5551 }
5552 } else {
5553 assert(opc == 0xf);
5554 if (is_min) {
5555 gen_helper_vfp_mins(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5556 } else {
5557 gen_helper_vfp_maxs(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
5558 }
5559 }
5560}
5561
5562
5563
5564
5565
5566
5567
5568static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
5569{
5570 int rd = extract32(insn, 0, 5);
5571 int rn = extract32(insn, 5, 5);
5572 int size = extract32(insn, 22, 2);
5573 int opcode = extract32(insn, 12, 5);
5574 bool is_q = extract32(insn, 30, 1);
5575 bool is_u = extract32(insn, 29, 1);
5576 bool is_fp = false;
5577 bool is_min = false;
5578 int esize;
5579 int elements;
5580 int i;
5581 TCGv_i64 tcg_res, tcg_elt;
5582
5583 switch (opcode) {
5584 case 0x1b:
5585 if (is_u) {
5586 unallocated_encoding(s);
5587 return;
5588 }
5589
5590 case 0x3:
5591 case 0xa:
5592 case 0x1a:
5593 if (size == 3 || (size == 2 && !is_q)) {
5594 unallocated_encoding(s);
5595 return;
5596 }
5597 break;
5598 case 0xc:
5599 case 0xf:
5600 if (!is_u || !is_q || extract32(size, 0, 1)) {
5601 unallocated_encoding(s);
5602 return;
5603 }
5604
5605
5606
5607 is_min = extract32(size, 1, 1);
5608 is_fp = true;
5609 size = 2;
5610 break;
5611 default:
5612 unallocated_encoding(s);
5613 return;
5614 }
5615
5616 if (!fp_access_check(s)) {
5617 return;
5618 }
5619
5620 esize = 8 << size;
5621 elements = (is_q ? 128 : 64) / esize;
5622
5623 tcg_res = tcg_temp_new_i64();
5624 tcg_elt = tcg_temp_new_i64();
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638 if (!is_fp) {
5639 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
5640
5641 for (i = 1; i < elements; i++) {
5642 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
5643
5644 switch (opcode) {
5645 case 0x03:
5646 case 0x1b:
5647 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
5648 break;
5649 case 0x0a:
5650 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
5651 tcg_res,
5652 tcg_res, tcg_elt, tcg_res, tcg_elt);
5653 break;
5654 case 0x1a:
5655 tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
5656 tcg_res,
5657 tcg_res, tcg_elt, tcg_res, tcg_elt);
5658 break;
5659 break;
5660 default:
5661 g_assert_not_reached();
5662 }
5663
5664 }
5665 } else {
5666
5667
5668
5669
5670 TCGv_i32 tcg_elt1 = tcg_temp_new_i32();
5671 TCGv_i32 tcg_elt2 = tcg_temp_new_i32();
5672 TCGv_i32 tcg_elt3 = tcg_temp_new_i32();
5673 TCGv_ptr fpst = get_fpstatus_ptr();
5674
5675 assert(esize == 32);
5676 assert(elements == 4);
5677
5678 read_vec_element(s, tcg_elt, rn, 0, MO_32);
5679 tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt);
5680 read_vec_element(s, tcg_elt, rn, 1, MO_32);
5681 tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
5682
5683 do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5684
5685 read_vec_element(s, tcg_elt, rn, 2, MO_32);
5686 tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
5687 read_vec_element(s, tcg_elt, rn, 3, MO_32);
5688 tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt);
5689
5690 do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
5691
5692 do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5693
5694 tcg_gen_extu_i32_i64(tcg_res, tcg_elt1);
5695 tcg_temp_free_i32(tcg_elt1);
5696 tcg_temp_free_i32(tcg_elt2);
5697 tcg_temp_free_i32(tcg_elt3);
5698 tcg_temp_free_ptr(fpst);
5699 }
5700
5701 tcg_temp_free_i64(tcg_elt);
5702
5703
5704 if (opcode == 0x03) {
5705
5706 size++;
5707 }
5708
5709 switch (size) {
5710 case 0:
5711 tcg_gen_ext8u_i64(tcg_res, tcg_res);
5712 break;
5713 case 1:
5714 tcg_gen_ext16u_i64(tcg_res, tcg_res);
5715 break;
5716 case 2:
5717 tcg_gen_ext32u_i64(tcg_res, tcg_res);
5718 break;
5719 case 3:
5720 break;
5721 default:
5722 g_assert_not_reached();
5723 }
5724
5725 write_fp_dreg(s, rd, tcg_res);
5726 tcg_temp_free_i64(tcg_res);
5727}
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
5739 int imm5)
5740{
5741 int size = ctz32(imm5);
5742 int esize = 8 << size;
5743 int elements = (is_q ? 128 : 64) / esize;
5744 int index, i;
5745 TCGv_i64 tmp;
5746
5747 if (size > 3 || (size == 3 && !is_q)) {
5748 unallocated_encoding(s);
5749 return;
5750 }
5751
5752 if (!fp_access_check(s)) {
5753 return;
5754 }
5755
5756 index = imm5 >> (size + 1);
5757
5758 tmp = tcg_temp_new_i64();
5759 read_vec_element(s, tmp, rn, index, size);
5760
5761 for (i = 0; i < elements; i++) {
5762 write_vec_element(s, tmp, rd, i, size);
5763 }
5764
5765 if (!is_q) {
5766 clear_vec_high(s, rd);
5767 }
5768
5769 tcg_temp_free_i64(tmp);
5770}
5771
5772
5773
5774
5775
5776
5777
5778static void handle_simd_dupes(DisasContext *s, int rd, int rn,
5779 int imm5)
5780{
5781 int size = ctz32(imm5);
5782 int index;
5783 TCGv_i64 tmp;
5784
5785 if (size > 3) {
5786 unallocated_encoding(s);
5787 return;
5788 }
5789
5790 if (!fp_access_check(s)) {
5791 return;
5792 }
5793
5794 index = imm5 >> (size + 1);
5795
5796
5797
5798
5799 tmp = tcg_temp_new_i64();
5800 read_vec_element(s, tmp, rn, index, size);
5801 write_fp_dreg(s, rd, tmp);
5802 tcg_temp_free_i64(tmp);
5803}
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
5815 int imm5)
5816{
5817 int size = ctz32(imm5);
5818 int esize = 8 << size;
5819 int elements = (is_q ? 128 : 64)/esize;
5820 int i = 0;
5821
5822 if (size > 3 || ((size == 3) && !is_q)) {
5823 unallocated_encoding(s);
5824 return;
5825 }
5826
5827 if (!fp_access_check(s)) {
5828 return;
5829 }
5830
5831 for (i = 0; i < elements; i++) {
5832 write_vec_element(s, cpu_reg(s, rn), rd, i, size);
5833 }
5834 if (!is_q) {
5835 clear_vec_high(s, rd);
5836 }
5837}
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849static void handle_simd_inse(DisasContext *s, int rd, int rn,
5850 int imm4, int imm5)
5851{
5852 int size = ctz32(imm5);
5853 int src_index, dst_index;
5854 TCGv_i64 tmp;
5855
5856 if (size > 3) {
5857 unallocated_encoding(s);
5858 return;
5859 }
5860
5861 if (!fp_access_check(s)) {
5862 return;
5863 }
5864
5865 dst_index = extract32(imm5, 1+size, 5);
5866 src_index = extract32(imm4, size, 4);
5867
5868 tmp = tcg_temp_new_i64();
5869
5870 read_vec_element(s, tmp, rn, src_index, size);
5871 write_vec_element(s, tmp, rd, dst_index, size);
5872
5873 tcg_temp_free_i64(tmp);
5874}
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
5888{
5889 int size = ctz32(imm5);
5890 int idx;
5891
5892 if (size > 3) {
5893 unallocated_encoding(s);
5894 return;
5895 }
5896
5897 if (!fp_access_check(s)) {
5898 return;
5899 }
5900
5901 idx = extract32(imm5, 1 + size, 4 - size);
5902 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
5903}
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
5918 int rn, int rd, int imm5)
5919{
5920 int size = ctz32(imm5);
5921 int element;
5922 TCGv_i64 tcg_rd;
5923
5924
5925 if (is_signed) {
5926 if (size > 2 || (size == 2 && !is_q)) {
5927 unallocated_encoding(s);
5928 return;
5929 }
5930 } else {
5931 if (size > 3
5932 || (size < 3 && is_q)
5933 || (size == 3 && !is_q)) {
5934 unallocated_encoding(s);
5935 return;
5936 }
5937 }
5938
5939 if (!fp_access_check(s)) {
5940 return;
5941 }
5942
5943 element = extract32(imm5, 1+size, 4);
5944
5945 tcg_rd = cpu_reg(s, rd);
5946 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
5947 if (is_signed && !is_q) {
5948 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5949 }
5950}
5951
5952
5953
5954
5955
5956
5957
5958static void disas_simd_copy(DisasContext *s, uint32_t insn)
5959{
5960 int rd = extract32(insn, 0, 5);
5961 int rn = extract32(insn, 5, 5);
5962 int imm4 = extract32(insn, 11, 4);
5963 int op = extract32(insn, 29, 1);
5964 int is_q = extract32(insn, 30, 1);
5965 int imm5 = extract32(insn, 16, 5);
5966
5967 if (op) {
5968 if (is_q) {
5969
5970 handle_simd_inse(s, rd, rn, imm4, imm5);
5971 } else {
5972 unallocated_encoding(s);
5973 }
5974 } else {
5975 switch (imm4) {
5976 case 0:
5977
5978 handle_simd_dupe(s, is_q, rd, rn, imm5);
5979 break;
5980 case 1:
5981
5982 handle_simd_dupg(s, is_q, rd, rn, imm5);
5983 break;
5984 case 3:
5985 if (is_q) {
5986
5987 handle_simd_insg(s, rd, rn, imm5);
5988 } else {
5989 unallocated_encoding(s);
5990 }
5991 break;
5992 case 5:
5993 case 7:
5994
5995 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
5996 break;
5997 default:
5998 unallocated_encoding(s);
5999 break;
6000 }
6001 }
6002}
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6017{
6018 int rd = extract32(insn, 0, 5);
6019 int cmode = extract32(insn, 12, 4);
6020 int cmode_3_1 = extract32(cmode, 1, 3);
6021 int cmode_0 = extract32(cmode, 0, 1);
6022 int o2 = extract32(insn, 11, 1);
6023 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6024 bool is_neg = extract32(insn, 29, 1);
6025 bool is_q = extract32(insn, 30, 1);
6026 uint64_t imm = 0;
6027 TCGv_i64 tcg_rd, tcg_imm;
6028 int i;
6029
6030 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6031 unallocated_encoding(s);
6032 return;
6033 }
6034
6035 if (!fp_access_check(s)) {
6036 return;
6037 }
6038
6039
6040 switch (cmode_3_1) {
6041 case 0:
6042 case 1:
6043 case 2:
6044 case 3:
6045 {
6046 int shift = cmode_3_1 * 8;
6047 imm = bitfield_replicate(abcdefgh << shift, 32);
6048 break;
6049 }
6050 case 4:
6051 case 5:
6052 {
6053 int shift = (cmode_3_1 & 0x1) * 8;
6054 imm = bitfield_replicate(abcdefgh << shift, 16);
6055 break;
6056 }
6057 case 6:
6058 if (cmode_0) {
6059
6060 imm = (abcdefgh << 16) | 0xffff;
6061 } else {
6062
6063 imm = (abcdefgh << 8) | 0xff;
6064 }
6065 imm = bitfield_replicate(imm, 32);
6066 break;
6067 case 7:
6068 if (!cmode_0 && !is_neg) {
6069 imm = bitfield_replicate(abcdefgh, 8);
6070 } else if (!cmode_0 && is_neg) {
6071 int i;
6072 imm = 0;
6073 for (i = 0; i < 8; i++) {
6074 if ((abcdefgh) & (1 << i)) {
6075 imm |= 0xffULL << (i * 8);
6076 }
6077 }
6078 } else if (cmode_0) {
6079 if (is_neg) {
6080 imm = (abcdefgh & 0x3f) << 48;
6081 if (abcdefgh & 0x80) {
6082 imm |= 0x8000000000000000ULL;
6083 }
6084 if (abcdefgh & 0x40) {
6085 imm |= 0x3fc0000000000000ULL;
6086 } else {
6087 imm |= 0x4000000000000000ULL;
6088 }
6089 } else {
6090 imm = (abcdefgh & 0x3f) << 19;
6091 if (abcdefgh & 0x80) {
6092 imm |= 0x80000000;
6093 }
6094 if (abcdefgh & 0x40) {
6095 imm |= 0x3e000000;
6096 } else {
6097 imm |= 0x40000000;
6098 }
6099 imm |= (imm << 32);
6100 }
6101 }
6102 break;
6103 }
6104
6105 if (cmode_3_1 != 7 && is_neg) {
6106 imm = ~imm;
6107 }
6108
6109 tcg_imm = tcg_const_i64(imm);
6110 tcg_rd = new_tmp_a64(s);
6111
6112 for (i = 0; i < 2; i++) {
6113 int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64);
6114
6115 if (i == 1 && !is_q) {
6116
6117 tcg_gen_movi_i64(tcg_rd, 0);
6118 } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) {
6119 tcg_gen_ld_i64(tcg_rd, cpu_env, foffs);
6120 if (is_neg) {
6121
6122 tcg_gen_and_i64(tcg_rd, tcg_rd, tcg_imm);
6123 } else {
6124
6125 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_imm);
6126 }
6127 } else {
6128
6129 tcg_gen_mov_i64(tcg_rd, tcg_imm);
6130 }
6131 tcg_gen_st_i64(tcg_rd, cpu_env, foffs);
6132 }
6133
6134 tcg_temp_free_i64(tcg_imm);
6135}
6136
6137
6138
6139
6140
6141
6142
6143static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
6144{
6145 int rd = extract32(insn, 0, 5);
6146 int rn = extract32(insn, 5, 5);
6147 int imm4 = extract32(insn, 11, 4);
6148 int imm5 = extract32(insn, 16, 5);
6149 int op = extract32(insn, 29, 1);
6150
6151 if (op != 0 || imm4 != 0) {
6152 unallocated_encoding(s);
6153 return;
6154 }
6155
6156
6157 handle_simd_dupes(s, rd, rn, imm5);
6158}
6159
6160
6161
6162
6163
6164
6165
6166static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
6167{
6168 int u = extract32(insn, 29, 1);
6169 int size = extract32(insn, 22, 2);
6170 int opcode = extract32(insn, 12, 5);
6171 int rn = extract32(insn, 5, 5);
6172 int rd = extract32(insn, 0, 5);
6173 TCGv_ptr fpst;
6174
6175
6176
6177
6178
6179 opcode |= (extract32(size, 1, 1) << 5);
6180
6181 switch (opcode) {
6182 case 0x3b:
6183 if (u || size != 3) {
6184 unallocated_encoding(s);
6185 return;
6186 }
6187 if (!fp_access_check(s)) {
6188 return;
6189 }
6190
6191 TCGV_UNUSED_PTR(fpst);
6192 break;
6193 case 0xc:
6194 case 0xd:
6195 case 0xf:
6196 case 0x2c:
6197 case 0x2f:
6198
6199 if (!u) {
6200 unallocated_encoding(s);
6201 return;
6202 }
6203 if (!fp_access_check(s)) {
6204 return;
6205 }
6206
6207 size = extract32(size, 0, 1) ? 3 : 2;
6208 fpst = get_fpstatus_ptr();
6209 break;
6210 default:
6211 unallocated_encoding(s);
6212 return;
6213 }
6214
6215 if (size == 3) {
6216 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6217 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6218 TCGv_i64 tcg_res = tcg_temp_new_i64();
6219
6220 read_vec_element(s, tcg_op1, rn, 0, MO_64);
6221 read_vec_element(s, tcg_op2, rn, 1, MO_64);
6222
6223 switch (opcode) {
6224 case 0x3b:
6225 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
6226 break;
6227 case 0xc:
6228 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6229 break;
6230 case 0xd:
6231 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6232 break;
6233 case 0xf:
6234 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6235 break;
6236 case 0x2c:
6237 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6238 break;
6239 case 0x2f:
6240 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6241 break;
6242 default:
6243 g_assert_not_reached();
6244 }
6245
6246 write_fp_dreg(s, rd, tcg_res);
6247
6248 tcg_temp_free_i64(tcg_op1);
6249 tcg_temp_free_i64(tcg_op2);
6250 tcg_temp_free_i64(tcg_res);
6251 } else {
6252 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
6253 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
6254 TCGv_i32 tcg_res = tcg_temp_new_i32();
6255
6256 read_vec_element_i32(s, tcg_op1, rn, 0, MO_32);
6257 read_vec_element_i32(s, tcg_op2, rn, 1, MO_32);
6258
6259 switch (opcode) {
6260 case 0xc:
6261 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6262 break;
6263 case 0xd:
6264 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6265 break;
6266 case 0xf:
6267 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6268 break;
6269 case 0x2c:
6270 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6271 break;
6272 case 0x2f:
6273 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6274 break;
6275 default:
6276 g_assert_not_reached();
6277 }
6278
6279 write_fp_sreg(s, rd, tcg_res);
6280
6281 tcg_temp_free_i32(tcg_op1);
6282 tcg_temp_free_i32(tcg_op2);
6283 tcg_temp_free_i32(tcg_res);
6284 }
6285
6286 if (!TCGV_IS_UNUSED_PTR(fpst)) {
6287 tcg_temp_free_ptr(fpst);
6288 }
6289}
6290
6291
6292
6293
6294
6295
6296
6297static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6298 TCGv_i64 tcg_rnd, bool accumulate,
6299 bool is_u, int size, int shift)
6300{
6301 bool extended_result = false;
6302 bool round = !TCGV_IS_UNUSED_I64(tcg_rnd);
6303 int ext_lshift = 0;
6304 TCGv_i64 tcg_src_hi;
6305
6306 if (round && size == 3) {
6307 extended_result = true;
6308 ext_lshift = 64 - shift;
6309 tcg_src_hi = tcg_temp_new_i64();
6310 } else if (shift == 64) {
6311 if (!accumulate && is_u) {
6312
6313 tcg_gen_movi_i64(tcg_res, 0);
6314 return;
6315 }
6316 }
6317
6318
6319 if (round) {
6320 if (extended_result) {
6321 TCGv_i64 tcg_zero = tcg_const_i64(0);
6322 if (!is_u) {
6323
6324 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
6325 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6326 tcg_src, tcg_src_hi,
6327 tcg_rnd, tcg_zero);
6328 } else {
6329 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6330 tcg_src, tcg_zero,
6331 tcg_rnd, tcg_zero);
6332 }
6333 tcg_temp_free_i64(tcg_zero);
6334 } else {
6335 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
6336 }
6337 }
6338
6339
6340 if (round && extended_result) {
6341
6342 if (ext_lshift == 0) {
6343
6344 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
6345 } else {
6346 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6347 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
6348 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
6349 }
6350 } else {
6351 if (is_u) {
6352 if (shift == 64) {
6353
6354 tcg_gen_movi_i64(tcg_src, 0);
6355 } else {
6356 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6357 }
6358 } else {
6359 if (shift == 64) {
6360
6361 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
6362 } else {
6363 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
6364 }
6365 }
6366 }
6367
6368 if (accumulate) {
6369 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
6370 } else {
6371 tcg_gen_mov_i64(tcg_res, tcg_src);
6372 }
6373
6374 if (extended_result) {
6375 tcg_temp_free_i64(tcg_src_hi);
6376 }
6377}
6378
6379
6380static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6381 bool insert, int shift)
6382{
6383 if (insert) {
6384 tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, shift, 64 - shift);
6385 } else {
6386 tcg_gen_shli_i64(tcg_res, tcg_src, shift);
6387 }
6388}
6389
6390
6391static void handle_shri_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6392 int size, int shift)
6393{
6394 int esize = 8 << size;
6395
6396
6397
6398
6399 if (shift != esize) {
6400 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6401 tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, 0, esize - shift);
6402 }
6403}
6404
6405
6406static void handle_scalar_simd_shri(DisasContext *s,
6407 bool is_u, int immh, int immb,
6408 int opcode, int rn, int rd)
6409{
6410 const int size = 3;
6411 int immhb = immh << 3 | immb;
6412 int shift = 2 * (8 << size) - immhb;
6413 bool accumulate = false;
6414 bool round = false;
6415 bool insert = false;
6416 TCGv_i64 tcg_rn;
6417 TCGv_i64 tcg_rd;
6418 TCGv_i64 tcg_round;
6419
6420 if (!extract32(immh, 3, 1)) {
6421 unallocated_encoding(s);
6422 return;
6423 }
6424
6425 if (!fp_access_check(s)) {
6426 return;
6427 }
6428
6429 switch (opcode) {
6430 case 0x02:
6431 accumulate = true;
6432 break;
6433 case 0x04:
6434 round = true;
6435 break;
6436 case 0x06:
6437 accumulate = round = true;
6438 break;
6439 case 0x08:
6440 insert = true;
6441 break;
6442 }
6443
6444 if (round) {
6445 uint64_t round_const = 1ULL << (shift - 1);
6446 tcg_round = tcg_const_i64(round_const);
6447 } else {
6448 TCGV_UNUSED_I64(tcg_round);
6449 }
6450
6451 tcg_rn = read_fp_dreg(s, rn);
6452 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6453
6454 if (insert) {
6455 handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
6456 } else {
6457 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6458 accumulate, is_u, size, shift);
6459 }
6460
6461 write_fp_dreg(s, rd, tcg_rd);
6462
6463 tcg_temp_free_i64(tcg_rn);
6464 tcg_temp_free_i64(tcg_rd);
6465 if (round) {
6466 tcg_temp_free_i64(tcg_round);
6467 }
6468}
6469
6470
6471static void handle_scalar_simd_shli(DisasContext *s, bool insert,
6472 int immh, int immb, int opcode,
6473 int rn, int rd)
6474{
6475 int size = 32 - clz32(immh) - 1;
6476 int immhb = immh << 3 | immb;
6477 int shift = immhb - (8 << size);
6478 TCGv_i64 tcg_rn = new_tmp_a64(s);
6479 TCGv_i64 tcg_rd = new_tmp_a64(s);
6480
6481 if (!extract32(immh, 3, 1)) {
6482 unallocated_encoding(s);
6483 return;
6484 }
6485
6486 if (!fp_access_check(s)) {
6487 return;
6488 }
6489
6490 tcg_rn = read_fp_dreg(s, rn);
6491 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6492
6493 handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
6494
6495 write_fp_dreg(s, rd, tcg_rd);
6496
6497 tcg_temp_free_i64(tcg_rn);
6498 tcg_temp_free_i64(tcg_rd);
6499}
6500
6501
6502
6503static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
6504 bool is_u_shift, bool is_u_narrow,
6505 int immh, int immb, int opcode,
6506 int rn, int rd)
6507{
6508 int immhb = immh << 3 | immb;
6509 int size = 32 - clz32(immh) - 1;
6510 int esize = 8 << size;
6511 int shift = (2 * esize) - immhb;
6512 int elements = is_scalar ? 1 : (64 / esize);
6513 bool round = extract32(opcode, 0, 1);
6514 TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
6515 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
6516 TCGv_i32 tcg_rd_narrowed;
6517 TCGv_i64 tcg_final;
6518
6519 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
6520 { gen_helper_neon_narrow_sat_s8,
6521 gen_helper_neon_unarrow_sat8 },
6522 { gen_helper_neon_narrow_sat_s16,
6523 gen_helper_neon_unarrow_sat16 },
6524 { gen_helper_neon_narrow_sat_s32,
6525 gen_helper_neon_unarrow_sat32 },
6526 { NULL, NULL },
6527 };
6528 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
6529 gen_helper_neon_narrow_sat_u8,
6530 gen_helper_neon_narrow_sat_u16,
6531 gen_helper_neon_narrow_sat_u32,
6532 NULL
6533 };
6534 NeonGenNarrowEnvFn *narrowfn;
6535
6536 int i;
6537
6538 assert(size < 4);
6539
6540 if (extract32(immh, 3, 1)) {
6541 unallocated_encoding(s);
6542 return;
6543 }
6544
6545 if (!fp_access_check(s)) {
6546 return;
6547 }
6548
6549 if (is_u_shift) {
6550 narrowfn = unsigned_narrow_fns[size];
6551 } else {
6552 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
6553 }
6554
6555 tcg_rn = tcg_temp_new_i64();
6556 tcg_rd = tcg_temp_new_i64();
6557 tcg_rd_narrowed = tcg_temp_new_i32();
6558 tcg_final = tcg_const_i64(0);
6559
6560 if (round) {
6561 uint64_t round_const = 1ULL << (shift - 1);
6562 tcg_round = tcg_const_i64(round_const);
6563 } else {
6564 TCGV_UNUSED_I64(tcg_round);
6565 }
6566
6567 for (i = 0; i < elements; i++) {
6568 read_vec_element(s, tcg_rn, rn, i, ldop);
6569 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6570 false, is_u_shift, size+1, shift);
6571 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
6572 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
6573 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
6574 }
6575
6576 if (!is_q) {
6577 clear_vec_high(s, rd);
6578 write_vec_element(s, tcg_final, rd, 0, MO_64);
6579 } else {
6580 write_vec_element(s, tcg_final, rd, 1, MO_64);
6581 }
6582
6583 if (round) {
6584 tcg_temp_free_i64(tcg_round);
6585 }
6586 tcg_temp_free_i64(tcg_rn);
6587 tcg_temp_free_i64(tcg_rd);
6588 tcg_temp_free_i32(tcg_rd_narrowed);
6589 tcg_temp_free_i64(tcg_final);
6590 return;
6591}
6592
6593
6594static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
6595 bool src_unsigned, bool dst_unsigned,
6596 int immh, int immb, int rn, int rd)
6597{
6598 int immhb = immh << 3 | immb;
6599 int size = 32 - clz32(immh) - 1;
6600 int shift = immhb - (8 << size);
6601 int pass;
6602
6603 assert(immh != 0);
6604 assert(!(scalar && is_q));
6605
6606 if (!scalar) {
6607 if (!is_q && extract32(immh, 3, 1)) {
6608 unallocated_encoding(s);
6609 return;
6610 }
6611
6612
6613
6614
6615
6616 switch (size) {
6617 case 0:
6618 shift |= shift << 8;
6619
6620 case 1:
6621 shift |= shift << 16;
6622 break;
6623 case 2:
6624 case 3:
6625 break;
6626 default:
6627 g_assert_not_reached();
6628 }
6629 }
6630
6631 if (!fp_access_check(s)) {
6632 return;
6633 }
6634
6635 if (size == 3) {
6636 TCGv_i64 tcg_shift = tcg_const_i64(shift);
6637 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
6638 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
6639 { NULL, gen_helper_neon_qshl_u64 },
6640 };
6641 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
6642 int maxpass = is_q ? 2 : 1;
6643
6644 for (pass = 0; pass < maxpass; pass++) {
6645 TCGv_i64 tcg_op = tcg_temp_new_i64();
6646
6647 read_vec_element(s, tcg_op, rn, pass, MO_64);
6648 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6649 write_vec_element(s, tcg_op, rd, pass, MO_64);
6650
6651 tcg_temp_free_i64(tcg_op);
6652 }
6653 tcg_temp_free_i64(tcg_shift);
6654
6655 if (!is_q) {
6656 clear_vec_high(s, rd);
6657 }
6658 } else {
6659 TCGv_i32 tcg_shift = tcg_const_i32(shift);
6660 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
6661 {
6662 { gen_helper_neon_qshl_s8,
6663 gen_helper_neon_qshl_s16,
6664 gen_helper_neon_qshl_s32 },
6665 { gen_helper_neon_qshlu_s8,
6666 gen_helper_neon_qshlu_s16,
6667 gen_helper_neon_qshlu_s32 }
6668 }, {
6669 { NULL, NULL, NULL },
6670 { gen_helper_neon_qshl_u8,
6671 gen_helper_neon_qshl_u16,
6672 gen_helper_neon_qshl_u32 }
6673 }
6674 };
6675 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
6676 TCGMemOp memop = scalar ? size : MO_32;
6677 int maxpass = scalar ? 1 : is_q ? 4 : 2;
6678
6679 for (pass = 0; pass < maxpass; pass++) {
6680 TCGv_i32 tcg_op = tcg_temp_new_i32();
6681
6682 read_vec_element_i32(s, tcg_op, rn, pass, memop);
6683 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6684 if (scalar) {
6685 switch (size) {
6686 case 0:
6687 tcg_gen_ext8u_i32(tcg_op, tcg_op);
6688 break;
6689 case 1:
6690 tcg_gen_ext16u_i32(tcg_op, tcg_op);
6691 break;
6692 case 2:
6693 break;
6694 default:
6695 g_assert_not_reached();
6696 }
6697 write_fp_sreg(s, rd, tcg_op);
6698 } else {
6699 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
6700 }
6701
6702 tcg_temp_free_i32(tcg_op);
6703 }
6704 tcg_temp_free_i32(tcg_shift);
6705
6706 if (!is_q && !scalar) {
6707 clear_vec_high(s, rd);
6708 }
6709 }
6710}
6711
6712
6713static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
6714 int elements, int is_signed,
6715 int fracbits, int size)
6716{
6717 bool is_double = size == 3 ? true : false;
6718 TCGv_ptr tcg_fpst = get_fpstatus_ptr();
6719 TCGv_i32 tcg_shift = tcg_const_i32(fracbits);
6720 TCGv_i64 tcg_int = tcg_temp_new_i64();
6721 TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
6722 int pass;
6723
6724 for (pass = 0; pass < elements; pass++) {
6725 read_vec_element(s, tcg_int, rn, pass, mop);
6726
6727 if (is_double) {
6728 TCGv_i64 tcg_double = tcg_temp_new_i64();
6729 if (is_signed) {
6730 gen_helper_vfp_sqtod(tcg_double, tcg_int,
6731 tcg_shift, tcg_fpst);
6732 } else {
6733 gen_helper_vfp_uqtod(tcg_double, tcg_int,
6734 tcg_shift, tcg_fpst);
6735 }
6736 if (elements == 1) {
6737 write_fp_dreg(s, rd, tcg_double);
6738 } else {
6739 write_vec_element(s, tcg_double, rd, pass, MO_64);
6740 }
6741 tcg_temp_free_i64(tcg_double);
6742 } else {
6743 TCGv_i32 tcg_single = tcg_temp_new_i32();
6744 if (is_signed) {
6745 gen_helper_vfp_sqtos(tcg_single, tcg_int,
6746 tcg_shift, tcg_fpst);
6747 } else {
6748 gen_helper_vfp_uqtos(tcg_single, tcg_int,
6749 tcg_shift, tcg_fpst);
6750 }
6751 if (elements == 1) {
6752 write_fp_sreg(s, rd, tcg_single);
6753 } else {
6754 write_vec_element_i32(s, tcg_single, rd, pass, MO_32);
6755 }
6756 tcg_temp_free_i32(tcg_single);
6757 }
6758 }
6759
6760 if (!is_double && elements == 2) {
6761 clear_vec_high(s, rd);
6762 }
6763
6764 tcg_temp_free_i64(tcg_int);
6765 tcg_temp_free_ptr(tcg_fpst);
6766 tcg_temp_free_i32(tcg_shift);
6767}
6768
6769
6770static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
6771 bool is_q, bool is_u,
6772 int immh, int immb, int opcode,
6773 int rn, int rd)
6774{
6775 bool is_double = extract32(immh, 3, 1);
6776 int size = is_double ? MO_64 : MO_32;
6777 int elements;
6778 int immhb = immh << 3 | immb;
6779 int fracbits = (is_double ? 128 : 64) - immhb;
6780
6781 if (!extract32(immh, 2, 2)) {
6782 unallocated_encoding(s);
6783 return;
6784 }
6785
6786 if (is_scalar) {
6787 elements = 1;
6788 } else {
6789 elements = is_double ? 2 : is_q ? 4 : 2;
6790 if (is_double && !is_q) {
6791 unallocated_encoding(s);
6792 return;
6793 }
6794 }
6795
6796 if (!fp_access_check(s)) {
6797 return;
6798 }
6799
6800
6801 g_assert(immh);
6802
6803 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
6804}
6805
6806
6807static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
6808 bool is_q, bool is_u,
6809 int immh, int immb, int rn, int rd)
6810{
6811 bool is_double = extract32(immh, 3, 1);
6812 int immhb = immh << 3 | immb;
6813 int fracbits = (is_double ? 128 : 64) - immhb;
6814 int pass;
6815 TCGv_ptr tcg_fpstatus;
6816 TCGv_i32 tcg_rmode, tcg_shift;
6817
6818 if (!extract32(immh, 2, 2)) {
6819 unallocated_encoding(s);
6820 return;
6821 }
6822
6823 if (!is_scalar && !is_q && is_double) {
6824 unallocated_encoding(s);
6825 return;
6826 }
6827
6828 if (!fp_access_check(s)) {
6829 return;
6830 }
6831
6832 assert(!(is_scalar && is_q));
6833
6834 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
6835 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
6836 tcg_fpstatus = get_fpstatus_ptr();
6837 tcg_shift = tcg_const_i32(fracbits);
6838
6839 if (is_double) {
6840 int maxpass = is_scalar ? 1 : 2;
6841
6842 for (pass = 0; pass < maxpass; pass++) {
6843 TCGv_i64 tcg_op = tcg_temp_new_i64();
6844
6845 read_vec_element(s, tcg_op, rn, pass, MO_64);
6846 if (is_u) {
6847 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6848 } else {
6849 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6850 }
6851 write_vec_element(s, tcg_op, rd, pass, MO_64);
6852 tcg_temp_free_i64(tcg_op);
6853 }
6854 if (!is_q) {
6855 clear_vec_high(s, rd);
6856 }
6857 } else {
6858 int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
6859 for (pass = 0; pass < maxpass; pass++) {
6860 TCGv_i32 tcg_op = tcg_temp_new_i32();
6861
6862 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
6863 if (is_u) {
6864 gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6865 } else {
6866 gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
6867 }
6868 if (is_scalar) {
6869 write_fp_sreg(s, rd, tcg_op);
6870 } else {
6871 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
6872 }
6873 tcg_temp_free_i32(tcg_op);
6874 }
6875 if (!is_q && !is_scalar) {
6876 clear_vec_high(s, rd);
6877 }
6878 }
6879
6880 tcg_temp_free_ptr(tcg_fpstatus);
6881 tcg_temp_free_i32(tcg_shift);
6882 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
6883 tcg_temp_free_i32(tcg_rmode);
6884}
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
6895{
6896 int rd = extract32(insn, 0, 5);
6897 int rn = extract32(insn, 5, 5);
6898 int opcode = extract32(insn, 11, 5);
6899 int immb = extract32(insn, 16, 3);
6900 int immh = extract32(insn, 19, 4);
6901 bool is_u = extract32(insn, 29, 1);
6902
6903 if (immh == 0) {
6904 unallocated_encoding(s);
6905 return;
6906 }
6907
6908 switch (opcode) {
6909 case 0x08:
6910 if (!is_u) {
6911 unallocated_encoding(s);
6912 return;
6913 }
6914
6915 case 0x00:
6916 case 0x02:
6917 case 0x04:
6918 case 0x06:
6919 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
6920 break;
6921 case 0x0a:
6922 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
6923 break;
6924 case 0x1c:
6925 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
6926 opcode, rn, rd);
6927 break;
6928 case 0x10:
6929 case 0x11:
6930 if (!is_u) {
6931 unallocated_encoding(s);
6932 return;
6933 }
6934 handle_vec_simd_sqshrn(s, true, false, false, true,
6935 immh, immb, opcode, rn, rd);
6936 break;
6937 case 0x12:
6938 case 0x13:
6939 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
6940 immh, immb, opcode, rn, rd);
6941 break;
6942 case 0xc:
6943 if (!is_u) {
6944 unallocated_encoding(s);
6945 return;
6946 }
6947 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
6948 break;
6949 case 0xe:
6950 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
6951 break;
6952 case 0x1f:
6953 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
6954 break;
6955 default:
6956 unallocated_encoding(s);
6957 break;
6958 }
6959}
6960
6961
6962
6963
6964
6965
6966
6967static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
6968{
6969 bool is_u = extract32(insn, 29, 1);
6970 int size = extract32(insn, 22, 2);
6971 int opcode = extract32(insn, 12, 4);
6972 int rm = extract32(insn, 16, 5);
6973 int rn = extract32(insn, 5, 5);
6974 int rd = extract32(insn, 0, 5);
6975
6976 if (is_u) {
6977 unallocated_encoding(s);
6978 return;
6979 }
6980
6981 switch (opcode) {
6982 case 0x9:
6983 case 0xb:
6984 case 0xd:
6985 if (size == 0 || size == 3) {
6986 unallocated_encoding(s);
6987 return;
6988 }
6989 break;
6990 default:
6991 unallocated_encoding(s);
6992 return;
6993 }
6994
6995 if (!fp_access_check(s)) {
6996 return;
6997 }
6998
6999 if (size == 2) {
7000 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7001 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7002 TCGv_i64 tcg_res = tcg_temp_new_i64();
7003
7004 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7005 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7006
7007 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7008 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7009
7010 switch (opcode) {
7011 case 0xd:
7012 break;
7013 case 0xb:
7014 tcg_gen_neg_i64(tcg_res, tcg_res);
7015
7016 case 0x9:
7017 read_vec_element(s, tcg_op1, rd, 0, MO_64);
7018 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7019 tcg_res, tcg_op1);
7020 break;
7021 default:
7022 g_assert_not_reached();
7023 }
7024
7025 write_fp_dreg(s, rd, tcg_res);
7026
7027 tcg_temp_free_i64(tcg_op1);
7028 tcg_temp_free_i64(tcg_op2);
7029 tcg_temp_free_i64(tcg_res);
7030 } else {
7031 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7032 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7033 TCGv_i64 tcg_res = tcg_temp_new_i64();
7034
7035 read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
7036 read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
7037
7038 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
7039 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
7040
7041 switch (opcode) {
7042 case 0xd:
7043 break;
7044 case 0xb:
7045 gen_helper_neon_negl_u32(tcg_res, tcg_res);
7046
7047 case 0x9:
7048 {
7049 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
7050 read_vec_element(s, tcg_op3, rd, 0, MO_32);
7051 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
7052 tcg_res, tcg_op3);
7053 tcg_temp_free_i64(tcg_op3);
7054 break;
7055 }
7056 default:
7057 g_assert_not_reached();
7058 }
7059
7060 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7061 write_fp_dreg(s, rd, tcg_res);
7062
7063 tcg_temp_free_i32(tcg_op1);
7064 tcg_temp_free_i32(tcg_op2);
7065 tcg_temp_free_i64(tcg_res);
7066 }
7067}
7068
7069static void handle_3same_64(DisasContext *s, int opcode, bool u,
7070 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
7071{
7072
7073
7074
7075
7076
7077
7078 TCGCond cond;
7079
7080 switch (opcode) {
7081 case 0x1:
7082 if (u) {
7083 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7084 } else {
7085 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7086 }
7087 break;
7088 case 0x5:
7089 if (u) {
7090 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7091 } else {
7092 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7093 }
7094 break;
7095 case 0x6:
7096
7097
7098
7099 cond = u ? TCG_COND_GTU : TCG_COND_GT;
7100 do_cmop:
7101 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
7102 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7103 break;
7104 case 0x7:
7105 cond = u ? TCG_COND_GEU : TCG_COND_GE;
7106 goto do_cmop;
7107 case 0x11:
7108 if (u) {
7109 cond = TCG_COND_EQ;
7110 goto do_cmop;
7111 }
7112
7113 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
7114 tcg_gen_setcondi_i64(TCG_COND_NE, tcg_rd, tcg_rd, 0);
7115 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7116 break;
7117 case 0x8:
7118 if (u) {
7119 gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
7120 } else {
7121 gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
7122 }
7123 break;
7124 case 0x9:
7125 if (u) {
7126 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7127 } else {
7128 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7129 }
7130 break;
7131 case 0xa:
7132 if (u) {
7133 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
7134 } else {
7135 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
7136 }
7137 break;
7138 case 0xb:
7139 if (u) {
7140 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7141 } else {
7142 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7143 }
7144 break;
7145 case 0x10:
7146 if (u) {
7147 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
7148 } else {
7149 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
7150 }
7151 break;
7152 default:
7153 g_assert_not_reached();
7154 }
7155}
7156
7157
7158
7159
7160
7161static void handle_3same_float(DisasContext *s, int size, int elements,
7162 int fpopcode, int rd, int rn, int rm)
7163{
7164 int pass;
7165 TCGv_ptr fpst = get_fpstatus_ptr();
7166
7167 for (pass = 0; pass < elements; pass++) {
7168 if (size) {
7169
7170 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7171 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7172 TCGv_i64 tcg_res = tcg_temp_new_i64();
7173
7174 read_vec_element(s, tcg_op1, rn, pass, MO_64);
7175 read_vec_element(s, tcg_op2, rm, pass, MO_64);
7176
7177 switch (fpopcode) {
7178 case 0x39:
7179
7180 gen_helper_vfp_negd(tcg_op1, tcg_op1);
7181
7182 case 0x19:
7183 read_vec_element(s, tcg_res, rd, pass, MO_64);
7184 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
7185 tcg_res, fpst);
7186 break;
7187 case 0x18:
7188 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7189 break;
7190 case 0x1a:
7191 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7192 break;
7193 case 0x1b:
7194 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
7195 break;
7196 case 0x1c:
7197 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7198 break;
7199 case 0x1e:
7200 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7201 break;
7202 case 0x1f:
7203 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7204 break;
7205 case 0x38:
7206 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7207 break;
7208 case 0x3a:
7209 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7210 break;
7211 case 0x3e:
7212 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7213 break;
7214 case 0x3f:
7215 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7216 break;
7217 case 0x5b:
7218 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
7219 break;
7220 case 0x5c:
7221 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7222 break;
7223 case 0x5d:
7224 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7225 break;
7226 case 0x5f:
7227 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
7228 break;
7229 case 0x7a:
7230 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7231 gen_helper_vfp_absd(tcg_res, tcg_res);
7232 break;
7233 case 0x7c:
7234 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7235 break;
7236 case 0x7d:
7237 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7238 break;
7239 default:
7240 g_assert_not_reached();
7241 }
7242
7243 write_vec_element(s, tcg_res, rd, pass, MO_64);
7244
7245 tcg_temp_free_i64(tcg_res);
7246 tcg_temp_free_i64(tcg_op1);
7247 tcg_temp_free_i64(tcg_op2);
7248 } else {
7249
7250 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7251 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7252 TCGv_i32 tcg_res = tcg_temp_new_i32();
7253
7254 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
7255 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
7256
7257 switch (fpopcode) {
7258 case 0x39:
7259
7260 gen_helper_vfp_negs(tcg_op1, tcg_op1);
7261
7262 case 0x19:
7263 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7264 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
7265 tcg_res, fpst);
7266 break;
7267 case 0x1a:
7268 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7269 break;
7270 case 0x1b:
7271 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
7272 break;
7273 case 0x1c:
7274 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7275 break;
7276 case 0x1e:
7277 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7278 break;
7279 case 0x1f:
7280 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7281 break;
7282 case 0x18:
7283 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7284 break;
7285 case 0x38:
7286 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7287 break;
7288 case 0x3a:
7289 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7290 break;
7291 case 0x3e:
7292 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7293 break;
7294 case 0x3f:
7295 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7296 break;
7297 case 0x5b:
7298 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
7299 break;
7300 case 0x5c:
7301 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7302 break;
7303 case 0x5d:
7304 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7305 break;
7306 case 0x5f:
7307 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
7308 break;
7309 case 0x7a:
7310 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7311 gen_helper_vfp_abss(tcg_res, tcg_res);
7312 break;
7313 case 0x7c:
7314 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7315 break;
7316 case 0x7d:
7317 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7318 break;
7319 default:
7320 g_assert_not_reached();
7321 }
7322
7323 if (elements == 1) {
7324
7325 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7326
7327 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
7328 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
7329 tcg_temp_free_i64(tcg_tmp);
7330 } else {
7331 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7332 }
7333
7334 tcg_temp_free_i32(tcg_res);
7335 tcg_temp_free_i32(tcg_op1);
7336 tcg_temp_free_i32(tcg_op2);
7337 }
7338 }
7339
7340 tcg_temp_free_ptr(fpst);
7341
7342 if ((elements << size) < 4) {
7343
7344 clear_vec_high(s, rd);
7345 }
7346}
7347
7348
7349
7350
7351
7352
7353
7354static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
7355{
7356 int rd = extract32(insn, 0, 5);
7357 int rn = extract32(insn, 5, 5);
7358 int opcode = extract32(insn, 11, 5);
7359 int rm = extract32(insn, 16, 5);
7360 int size = extract32(insn, 22, 2);
7361 bool u = extract32(insn, 29, 1);
7362 TCGv_i64 tcg_rd;
7363
7364 if (opcode >= 0x18) {
7365
7366 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
7367 switch (fpopcode) {
7368 case 0x1b:
7369 case 0x1f:
7370 case 0x3f:
7371 case 0x5d:
7372 case 0x7d:
7373 case 0x1c:
7374 case 0x5c:
7375 case 0x7c:
7376 case 0x7a:
7377 break;
7378 default:
7379 unallocated_encoding(s);
7380 return;
7381 }
7382
7383 if (!fp_access_check(s)) {
7384 return;
7385 }
7386
7387 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
7388 return;
7389 }
7390
7391 switch (opcode) {
7392 case 0x1:
7393 case 0x5:
7394 case 0x9:
7395 case 0xb:
7396 break;
7397 case 0x8:
7398 case 0xa:
7399 case 0x6:
7400 case 0x7:
7401 case 0x11:
7402 case 0x10:
7403 if (size != 3) {
7404 unallocated_encoding(s);
7405 return;
7406 }
7407 break;
7408 case 0x16:
7409 if (size != 1 && size != 2) {
7410 unallocated_encoding(s);
7411 return;
7412 }
7413 break;
7414 default:
7415 unallocated_encoding(s);
7416 return;
7417 }
7418
7419 if (!fp_access_check(s)) {
7420 return;
7421 }
7422
7423 tcg_rd = tcg_temp_new_i64();
7424
7425 if (size == 3) {
7426 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
7427 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
7428
7429 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
7430 tcg_temp_free_i64(tcg_rn);
7431 tcg_temp_free_i64(tcg_rm);
7432 } else {
7433
7434
7435
7436
7437
7438
7439 NeonGenTwoOpEnvFn *genenvfn;
7440 TCGv_i32 tcg_rn = tcg_temp_new_i32();
7441 TCGv_i32 tcg_rm = tcg_temp_new_i32();
7442 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
7443
7444 read_vec_element_i32(s, tcg_rn, rn, 0, size);
7445 read_vec_element_i32(s, tcg_rm, rm, 0, size);
7446
7447 switch (opcode) {
7448 case 0x1:
7449 {
7450 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7451 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
7452 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
7453 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
7454 };
7455 genenvfn = fns[size][u];
7456 break;
7457 }
7458 case 0x5:
7459 {
7460 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7461 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
7462 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
7463 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
7464 };
7465 genenvfn = fns[size][u];
7466 break;
7467 }
7468 case 0x9:
7469 {
7470 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7471 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
7472 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
7473 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
7474 };
7475 genenvfn = fns[size][u];
7476 break;
7477 }
7478 case 0xb:
7479 {
7480 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7481 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
7482 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
7483 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
7484 };
7485 genenvfn = fns[size][u];
7486 break;
7487 }
7488 case 0x16:
7489 {
7490 static NeonGenTwoOpEnvFn * const fns[2][2] = {
7491 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
7492 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
7493 };
7494 assert(size == 1 || size == 2);
7495 genenvfn = fns[size - 1][u];
7496 break;
7497 }
7498 default:
7499 g_assert_not_reached();
7500 }
7501
7502 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
7503 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
7504 tcg_temp_free_i32(tcg_rd32);
7505 tcg_temp_free_i32(tcg_rn);
7506 tcg_temp_free_i32(tcg_rm);
7507 }
7508
7509 write_fp_dreg(s, rd, tcg_rd);
7510
7511 tcg_temp_free_i64(tcg_rd);
7512}
7513
7514static void handle_2misc_64(DisasContext *s, int opcode, bool u,
7515 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
7516 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
7517{
7518
7519
7520
7521
7522
7523
7524 TCGCond cond;
7525
7526 switch (opcode) {
7527 case 0x4:
7528 if (u) {
7529 gen_helper_clz64(tcg_rd, tcg_rn);
7530 } else {
7531 gen_helper_cls64(tcg_rd, tcg_rn);
7532 }
7533 break;
7534 case 0x5:
7535
7536
7537
7538 tcg_gen_not_i64(tcg_rd, tcg_rn);
7539 break;
7540 case 0x7:
7541 if (u) {
7542 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
7543 } else {
7544 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
7545 }
7546 break;
7547 case 0xa:
7548
7549
7550
7551
7552 cond = TCG_COND_LT;
7553 do_cmop:
7554 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
7555 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7556 break;
7557 case 0x8:
7558 cond = u ? TCG_COND_GE : TCG_COND_GT;
7559 goto do_cmop;
7560 case 0x9:
7561 cond = u ? TCG_COND_LE : TCG_COND_EQ;
7562 goto do_cmop;
7563 case 0xb:
7564 if (u) {
7565 tcg_gen_neg_i64(tcg_rd, tcg_rn);
7566 } else {
7567 TCGv_i64 tcg_zero = tcg_const_i64(0);
7568 tcg_gen_neg_i64(tcg_rd, tcg_rn);
7569 tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
7570 tcg_rn, tcg_rd);
7571 tcg_temp_free_i64(tcg_zero);
7572 }
7573 break;
7574 case 0x2f:
7575 gen_helper_vfp_absd(tcg_rd, tcg_rn);
7576 break;
7577 case 0x6f:
7578 gen_helper_vfp_negd(tcg_rd, tcg_rn);
7579 break;
7580 case 0x7f:
7581 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
7582 break;
7583 case 0x1a:
7584 case 0x1b:
7585 case 0x1c:
7586 case 0x3a:
7587 case 0x3b:
7588 {
7589 TCGv_i32 tcg_shift = tcg_const_i32(0);
7590 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
7591 tcg_temp_free_i32(tcg_shift);
7592 break;
7593 }
7594 case 0x5a:
7595 case 0x5b:
7596 case 0x5c:
7597 case 0x7a:
7598 case 0x7b:
7599 {
7600 TCGv_i32 tcg_shift = tcg_const_i32(0);
7601 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
7602 tcg_temp_free_i32(tcg_shift);
7603 break;
7604 }
7605 case 0x18:
7606 case 0x19:
7607 case 0x38:
7608 case 0x39:
7609 case 0x58:
7610 case 0x79:
7611 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
7612 break;
7613 case 0x59:
7614 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
7615 break;
7616 default:
7617 g_assert_not_reached();
7618 }
7619}
7620
7621static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
7622 bool is_scalar, bool is_u, bool is_q,
7623 int size, int rn, int rd)
7624{
7625 bool is_double = (size == 3);
7626 TCGv_ptr fpst;
7627
7628 if (!fp_access_check(s)) {
7629 return;
7630 }
7631
7632 fpst = get_fpstatus_ptr();
7633
7634 if (is_double) {
7635 TCGv_i64 tcg_op = tcg_temp_new_i64();
7636 TCGv_i64 tcg_zero = tcg_const_i64(0);
7637 TCGv_i64 tcg_res = tcg_temp_new_i64();
7638 NeonGenTwoDoubleOPFn *genfn;
7639 bool swap = false;
7640 int pass;
7641
7642 switch (opcode) {
7643 case 0x2e:
7644 swap = true;
7645
7646 case 0x2c:
7647 genfn = gen_helper_neon_cgt_f64;
7648 break;
7649 case 0x2d:
7650 genfn = gen_helper_neon_ceq_f64;
7651 break;
7652 case 0x6d:
7653 swap = true;
7654
7655 case 0x6c:
7656 genfn = gen_helper_neon_cge_f64;
7657 break;
7658 default:
7659 g_assert_not_reached();
7660 }
7661
7662 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
7663 read_vec_element(s, tcg_op, rn, pass, MO_64);
7664 if (swap) {
7665 genfn(tcg_res, tcg_zero, tcg_op, fpst);
7666 } else {
7667 genfn(tcg_res, tcg_op, tcg_zero, fpst);
7668 }
7669 write_vec_element(s, tcg_res, rd, pass, MO_64);
7670 }
7671 if (is_scalar) {
7672 clear_vec_high(s, rd);
7673 }
7674
7675 tcg_temp_free_i64(tcg_res);
7676 tcg_temp_free_i64(tcg_zero);
7677 tcg_temp_free_i64(tcg_op);
7678 } else {
7679 TCGv_i32 tcg_op = tcg_temp_new_i32();
7680 TCGv_i32 tcg_zero = tcg_const_i32(0);
7681 TCGv_i32 tcg_res = tcg_temp_new_i32();
7682 NeonGenTwoSingleOPFn *genfn;
7683 bool swap = false;
7684 int pass, maxpasses;
7685
7686 switch (opcode) {
7687 case 0x2e:
7688 swap = true;
7689
7690 case 0x2c:
7691 genfn = gen_helper_neon_cgt_f32;
7692 break;
7693 case 0x2d:
7694 genfn = gen_helper_neon_ceq_f32;
7695 break;
7696 case 0x6d:
7697 swap = true;
7698
7699 case 0x6c:
7700 genfn = gen_helper_neon_cge_f32;
7701 break;
7702 default:
7703 g_assert_not_reached();
7704 }
7705
7706 if (is_scalar) {
7707 maxpasses = 1;
7708 } else {
7709 maxpasses = is_q ? 4 : 2;
7710 }
7711
7712 for (pass = 0; pass < maxpasses; pass++) {
7713 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7714 if (swap) {
7715 genfn(tcg_res, tcg_zero, tcg_op, fpst);
7716 } else {
7717 genfn(tcg_res, tcg_op, tcg_zero, fpst);
7718 }
7719 if (is_scalar) {
7720 write_fp_sreg(s, rd, tcg_res);
7721 } else {
7722 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7723 }
7724 }
7725 tcg_temp_free_i32(tcg_res);
7726 tcg_temp_free_i32(tcg_zero);
7727 tcg_temp_free_i32(tcg_op);
7728 if (!is_q && !is_scalar) {
7729 clear_vec_high(s, rd);
7730 }
7731 }
7732
7733 tcg_temp_free_ptr(fpst);
7734}
7735
7736static void handle_2misc_reciprocal(DisasContext *s, int opcode,
7737 bool is_scalar, bool is_u, bool is_q,
7738 int size, int rn, int rd)
7739{
7740 bool is_double = (size == 3);
7741 TCGv_ptr fpst = get_fpstatus_ptr();
7742
7743 if (is_double) {
7744 TCGv_i64 tcg_op = tcg_temp_new_i64();
7745 TCGv_i64 tcg_res = tcg_temp_new_i64();
7746 int pass;
7747
7748 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
7749 read_vec_element(s, tcg_op, rn, pass, MO_64);
7750 switch (opcode) {
7751 case 0x3d:
7752 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
7753 break;
7754 case 0x3f:
7755 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
7756 break;
7757 case 0x7d:
7758 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
7759 break;
7760 default:
7761 g_assert_not_reached();
7762 }
7763 write_vec_element(s, tcg_res, rd, pass, MO_64);
7764 }
7765 if (is_scalar) {
7766 clear_vec_high(s, rd);
7767 }
7768
7769 tcg_temp_free_i64(tcg_res);
7770 tcg_temp_free_i64(tcg_op);
7771 } else {
7772 TCGv_i32 tcg_op = tcg_temp_new_i32();
7773 TCGv_i32 tcg_res = tcg_temp_new_i32();
7774 int pass, maxpasses;
7775
7776 if (is_scalar) {
7777 maxpasses = 1;
7778 } else {
7779 maxpasses = is_q ? 4 : 2;
7780 }
7781
7782 for (pass = 0; pass < maxpasses; pass++) {
7783 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7784
7785 switch (opcode) {
7786 case 0x3c:
7787 gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
7788 break;
7789 case 0x3d:
7790 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
7791 break;
7792 case 0x3f:
7793 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
7794 break;
7795 case 0x7d:
7796 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
7797 break;
7798 default:
7799 g_assert_not_reached();
7800 }
7801
7802 if (is_scalar) {
7803 write_fp_sreg(s, rd, tcg_res);
7804 } else {
7805 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7806 }
7807 }
7808 tcg_temp_free_i32(tcg_res);
7809 tcg_temp_free_i32(tcg_op);
7810 if (!is_q && !is_scalar) {
7811 clear_vec_high(s, rd);
7812 }
7813 }
7814 tcg_temp_free_ptr(fpst);
7815}
7816
7817static void handle_2misc_narrow(DisasContext *s, bool scalar,
7818 int opcode, bool u, bool is_q,
7819 int size, int rn, int rd)
7820{
7821
7822
7823
7824 int pass;
7825 TCGv_i32 tcg_res[2];
7826 int destelt = is_q ? 2 : 0;
7827 int passes = scalar ? 1 : 2;
7828
7829 if (scalar) {
7830 tcg_res[1] = tcg_const_i32(0);
7831 }
7832
7833 for (pass = 0; pass < passes; pass++) {
7834 TCGv_i64 tcg_op = tcg_temp_new_i64();
7835 NeonGenNarrowFn *genfn = NULL;
7836 NeonGenNarrowEnvFn *genenvfn = NULL;
7837
7838 if (scalar) {
7839 read_vec_element(s, tcg_op, rn, pass, size + 1);
7840 } else {
7841 read_vec_element(s, tcg_op, rn, pass, MO_64);
7842 }
7843 tcg_res[pass] = tcg_temp_new_i32();
7844
7845 switch (opcode) {
7846 case 0x12:
7847 {
7848 static NeonGenNarrowFn * const xtnfns[3] = {
7849 gen_helper_neon_narrow_u8,
7850 gen_helper_neon_narrow_u16,
7851 tcg_gen_extrl_i64_i32,
7852 };
7853 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
7854 gen_helper_neon_unarrow_sat8,
7855 gen_helper_neon_unarrow_sat16,
7856 gen_helper_neon_unarrow_sat32,
7857 };
7858 if (u) {
7859 genenvfn = sqxtunfns[size];
7860 } else {
7861 genfn = xtnfns[size];
7862 }
7863 break;
7864 }
7865 case 0x14:
7866 {
7867 static NeonGenNarrowEnvFn * const fns[3][2] = {
7868 { gen_helper_neon_narrow_sat_s8,
7869 gen_helper_neon_narrow_sat_u8 },
7870 { gen_helper_neon_narrow_sat_s16,
7871 gen_helper_neon_narrow_sat_u16 },
7872 { gen_helper_neon_narrow_sat_s32,
7873 gen_helper_neon_narrow_sat_u32 },
7874 };
7875 genenvfn = fns[size][u];
7876 break;
7877 }
7878 case 0x16:
7879
7880 if (size == 2) {
7881 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
7882 } else {
7883 TCGv_i32 tcg_lo = tcg_temp_new_i32();
7884 TCGv_i32 tcg_hi = tcg_temp_new_i32();
7885 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
7886 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
7887 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
7888 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
7889 tcg_temp_free_i32(tcg_lo);
7890 tcg_temp_free_i32(tcg_hi);
7891 }
7892 break;
7893 case 0x56:
7894
7895
7896
7897 assert(size == 2);
7898 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
7899 break;
7900 default:
7901 g_assert_not_reached();
7902 }
7903
7904 if (genfn) {
7905 genfn(tcg_res[pass], tcg_op);
7906 } else if (genenvfn) {
7907 genenvfn(tcg_res[pass], cpu_env, tcg_op);
7908 }
7909
7910 tcg_temp_free_i64(tcg_op);
7911 }
7912
7913 for (pass = 0; pass < 2; pass++) {
7914 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
7915 tcg_temp_free_i32(tcg_res[pass]);
7916 }
7917 if (!is_q) {
7918 clear_vec_high(s, rd);
7919 }
7920}
7921
7922
7923static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
7924 bool is_q, int size, int rn, int rd)
7925{
7926 bool is_double = (size == 3);
7927
7928 if (is_double) {
7929 TCGv_i64 tcg_rn = tcg_temp_new_i64();
7930 TCGv_i64 tcg_rd = tcg_temp_new_i64();
7931 int pass;
7932
7933 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
7934 read_vec_element(s, tcg_rn, rn, pass, MO_64);
7935 read_vec_element(s, tcg_rd, rd, pass, MO_64);
7936
7937 if (is_u) {
7938 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7939 } else {
7940 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7941 }
7942 write_vec_element(s, tcg_rd, rd, pass, MO_64);
7943 }
7944 if (is_scalar) {
7945 clear_vec_high(s, rd);
7946 }
7947
7948 tcg_temp_free_i64(tcg_rd);
7949 tcg_temp_free_i64(tcg_rn);
7950 } else {
7951 TCGv_i32 tcg_rn = tcg_temp_new_i32();
7952 TCGv_i32 tcg_rd = tcg_temp_new_i32();
7953 int pass, maxpasses;
7954
7955 if (is_scalar) {
7956 maxpasses = 1;
7957 } else {
7958 maxpasses = is_q ? 4 : 2;
7959 }
7960
7961 for (pass = 0; pass < maxpasses; pass++) {
7962 if (is_scalar) {
7963 read_vec_element_i32(s, tcg_rn, rn, pass, size);
7964 read_vec_element_i32(s, tcg_rd, rd, pass, size);
7965 } else {
7966 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
7967 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
7968 }
7969
7970 if (is_u) {
7971 switch (size) {
7972 case 0:
7973 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7974 break;
7975 case 1:
7976 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7977 break;
7978 case 2:
7979 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7980 break;
7981 default:
7982 g_assert_not_reached();
7983 }
7984 } else {
7985 switch (size) {
7986 case 0:
7987 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7988 break;
7989 case 1:
7990 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7991 break;
7992 case 2:
7993 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
7994 break;
7995 default:
7996 g_assert_not_reached();
7997 }
7998 }
7999
8000 if (is_scalar) {
8001 TCGv_i64 tcg_zero = tcg_const_i64(0);
8002 write_vec_element(s, tcg_zero, rd, 0, MO_64);
8003 tcg_temp_free_i64(tcg_zero);
8004 }
8005 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8006 }
8007
8008 if (!is_q) {
8009 clear_vec_high(s, rd);
8010 }
8011
8012 tcg_temp_free_i32(tcg_rd);
8013 tcg_temp_free_i32(tcg_rn);
8014 }
8015}
8016
8017
8018
8019
8020
8021
8022
8023static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
8024{
8025 int rd = extract32(insn, 0, 5);
8026 int rn = extract32(insn, 5, 5);
8027 int opcode = extract32(insn, 12, 5);
8028 int size = extract32(insn, 22, 2);
8029 bool u = extract32(insn, 29, 1);
8030 bool is_fcvt = false;
8031 int rmode;
8032 TCGv_i32 tcg_rmode;
8033 TCGv_ptr tcg_fpstatus;
8034
8035 switch (opcode) {
8036 case 0x3:
8037 if (!fp_access_check(s)) {
8038 return;
8039 }
8040 handle_2misc_satacc(s, true, u, false, size, rn, rd);
8041 return;
8042 case 0x7:
8043 break;
8044 case 0xa:
8045 if (u) {
8046 unallocated_encoding(s);
8047 return;
8048 }
8049
8050 case 0x8:
8051 case 0x9:
8052 case 0xb:
8053 if (size != 3) {
8054 unallocated_encoding(s);
8055 return;
8056 }
8057 break;
8058 case 0x12:
8059 if (!u) {
8060 unallocated_encoding(s);
8061 return;
8062 }
8063
8064 case 0x14:
8065 if (size == 3) {
8066 unallocated_encoding(s);
8067 return;
8068 }
8069 if (!fp_access_check(s)) {
8070 return;
8071 }
8072 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
8073 return;
8074 case 0xc ... 0xf:
8075 case 0x16 ... 0x1d:
8076 case 0x1f:
8077
8078
8079
8080 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
8081 size = extract32(size, 0, 1) ? 3 : 2;
8082 switch (opcode) {
8083 case 0x2c:
8084 case 0x2d:
8085 case 0x2e:
8086 case 0x6c:
8087 case 0x6d:
8088 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
8089 return;
8090 case 0x1d:
8091 case 0x5d:
8092 {
8093 bool is_signed = (opcode == 0x1d);
8094 if (!fp_access_check(s)) {
8095 return;
8096 }
8097 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
8098 return;
8099 }
8100 case 0x3d:
8101 case 0x3f:
8102 case 0x7d:
8103 if (!fp_access_check(s)) {
8104 return;
8105 }
8106 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
8107 return;
8108 case 0x1a:
8109 case 0x1b:
8110 case 0x3a:
8111 case 0x3b:
8112 case 0x5a:
8113 case 0x5b:
8114 case 0x7a:
8115 case 0x7b:
8116 is_fcvt = true;
8117 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
8118 break;
8119 case 0x1c:
8120 case 0x5c:
8121
8122 is_fcvt = true;
8123 rmode = FPROUNDING_TIEAWAY;
8124 break;
8125 case 0x56:
8126 if (size == 2) {
8127 unallocated_encoding(s);
8128 return;
8129 }
8130 if (!fp_access_check(s)) {
8131 return;
8132 }
8133 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
8134 return;
8135 default:
8136 unallocated_encoding(s);
8137 return;
8138 }
8139 break;
8140 default:
8141 unallocated_encoding(s);
8142 return;
8143 }
8144
8145 if (!fp_access_check(s)) {
8146 return;
8147 }
8148
8149 if (is_fcvt) {
8150 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8151 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
8152 tcg_fpstatus = get_fpstatus_ptr();
8153 } else {
8154 TCGV_UNUSED_I32(tcg_rmode);
8155 TCGV_UNUSED_PTR(tcg_fpstatus);
8156 }
8157
8158 if (size == 3) {
8159 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8160 TCGv_i64 tcg_rd = tcg_temp_new_i64();
8161
8162 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
8163 write_fp_dreg(s, rd, tcg_rd);
8164 tcg_temp_free_i64(tcg_rd);
8165 tcg_temp_free_i64(tcg_rn);
8166 } else {
8167 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8168 TCGv_i32 tcg_rd = tcg_temp_new_i32();
8169
8170 read_vec_element_i32(s, tcg_rn, rn, 0, size);
8171
8172 switch (opcode) {
8173 case 0x7:
8174 {
8175 NeonGenOneOpEnvFn *genfn;
8176 static NeonGenOneOpEnvFn * const fns[3][2] = {
8177 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
8178 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
8179 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
8180 };
8181 genfn = fns[size][u];
8182 genfn(tcg_rd, cpu_env, tcg_rn);
8183 break;
8184 }
8185 case 0x1a:
8186 case 0x1b:
8187 case 0x1c:
8188 case 0x3a:
8189 case 0x3b:
8190 {
8191 TCGv_i32 tcg_shift = tcg_const_i32(0);
8192 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8193 tcg_temp_free_i32(tcg_shift);
8194 break;
8195 }
8196 case 0x5a:
8197 case 0x5b:
8198 case 0x5c:
8199 case 0x7a:
8200 case 0x7b:
8201 {
8202 TCGv_i32 tcg_shift = tcg_const_i32(0);
8203 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8204 tcg_temp_free_i32(tcg_shift);
8205 break;
8206 }
8207 default:
8208 g_assert_not_reached();
8209 }
8210
8211 write_fp_sreg(s, rd, tcg_rd);
8212 tcg_temp_free_i32(tcg_rd);
8213 tcg_temp_free_i32(tcg_rn);
8214 }
8215
8216 if (is_fcvt) {
8217 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
8218 tcg_temp_free_i32(tcg_rmode);
8219 tcg_temp_free_ptr(tcg_fpstatus);
8220 }
8221}
8222
8223
8224static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
8225 int immh, int immb, int opcode, int rn, int rd)
8226{
8227 int size = 32 - clz32(immh) - 1;
8228 int immhb = immh << 3 | immb;
8229 int shift = 2 * (8 << size) - immhb;
8230 bool accumulate = false;
8231 bool round = false;
8232 bool insert = false;
8233 int dsize = is_q ? 128 : 64;
8234 int esize = 8 << size;
8235 int elements = dsize/esize;
8236 TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
8237 TCGv_i64 tcg_rn = new_tmp_a64(s);
8238 TCGv_i64 tcg_rd = new_tmp_a64(s);
8239 TCGv_i64 tcg_round;
8240 int i;
8241
8242 if (extract32(immh, 3, 1) && !is_q) {
8243 unallocated_encoding(s);
8244 return;
8245 }
8246
8247 if (size > 3 && !is_q) {
8248 unallocated_encoding(s);
8249 return;
8250 }
8251
8252 if (!fp_access_check(s)) {
8253 return;
8254 }
8255
8256 switch (opcode) {
8257 case 0x02:
8258 accumulate = true;
8259 break;
8260 case 0x04:
8261 round = true;
8262 break;
8263 case 0x06:
8264 accumulate = round = true;
8265 break;
8266 case 0x08:
8267 insert = true;
8268 break;
8269 }
8270
8271 if (round) {
8272 uint64_t round_const = 1ULL << (shift - 1);
8273 tcg_round = tcg_const_i64(round_const);
8274 } else {
8275 TCGV_UNUSED_I64(tcg_round);
8276 }
8277
8278 for (i = 0; i < elements; i++) {
8279 read_vec_element(s, tcg_rn, rn, i, memop);
8280 if (accumulate || insert) {
8281 read_vec_element(s, tcg_rd, rd, i, memop);
8282 }
8283
8284 if (insert) {
8285 handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
8286 } else {
8287 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8288 accumulate, is_u, size, shift);
8289 }
8290
8291 write_vec_element(s, tcg_rd, rd, i, size);
8292 }
8293
8294 if (!is_q) {
8295 clear_vec_high(s, rd);
8296 }
8297
8298 if (round) {
8299 tcg_temp_free_i64(tcg_round);
8300 }
8301}
8302
8303
8304static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
8305 int immh, int immb, int opcode, int rn, int rd)
8306{
8307 int size = 32 - clz32(immh) - 1;
8308 int immhb = immh << 3 | immb;
8309 int shift = immhb - (8 << size);
8310 int dsize = is_q ? 128 : 64;
8311 int esize = 8 << size;
8312 int elements = dsize/esize;
8313 TCGv_i64 tcg_rn = new_tmp_a64(s);
8314 TCGv_i64 tcg_rd = new_tmp_a64(s);
8315 int i;
8316
8317 if (extract32(immh, 3, 1) && !is_q) {
8318 unallocated_encoding(s);
8319 return;
8320 }
8321
8322 if (size > 3 && !is_q) {
8323 unallocated_encoding(s);
8324 return;
8325 }
8326
8327 if (!fp_access_check(s)) {
8328 return;
8329 }
8330
8331 for (i = 0; i < elements; i++) {
8332 read_vec_element(s, tcg_rn, rn, i, size);
8333 if (insert) {
8334 read_vec_element(s, tcg_rd, rd, i, size);
8335 }
8336
8337 handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
8338
8339 write_vec_element(s, tcg_rd, rd, i, size);
8340 }
8341
8342 if (!is_q) {
8343 clear_vec_high(s, rd);
8344 }
8345}
8346
8347
8348static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
8349 int immh, int immb, int opcode, int rn, int rd)
8350{
8351 int size = 32 - clz32(immh) - 1;
8352 int immhb = immh << 3 | immb;
8353 int shift = immhb - (8 << size);
8354 int dsize = 64;
8355 int esize = 8 << size;
8356 int elements = dsize/esize;
8357 TCGv_i64 tcg_rn = new_tmp_a64(s);
8358 TCGv_i64 tcg_rd = new_tmp_a64(s);
8359 int i;
8360
8361 if (size >= 3) {
8362 unallocated_encoding(s);
8363 return;
8364 }
8365
8366 if (!fp_access_check(s)) {
8367 return;
8368 }
8369
8370
8371
8372
8373
8374 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
8375
8376 for (i = 0; i < elements; i++) {
8377 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
8378 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
8379 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
8380 write_vec_element(s, tcg_rd, rd, i, size + 1);
8381 }
8382}
8383
8384
8385static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
8386 int immh, int immb, int opcode, int rn, int rd)
8387{
8388 int immhb = immh << 3 | immb;
8389 int size = 32 - clz32(immh) - 1;
8390 int dsize = 64;
8391 int esize = 8 << size;
8392 int elements = dsize/esize;
8393 int shift = (2 * esize) - immhb;
8394 bool round = extract32(opcode, 0, 1);
8395 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
8396 TCGv_i64 tcg_round;
8397 int i;
8398
8399 if (extract32(immh, 3, 1)) {
8400 unallocated_encoding(s);
8401 return;
8402 }
8403
8404 if (!fp_access_check(s)) {
8405 return;
8406 }
8407
8408 tcg_rn = tcg_temp_new_i64();
8409 tcg_rd = tcg_temp_new_i64();
8410 tcg_final = tcg_temp_new_i64();
8411 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
8412
8413 if (round) {
8414 uint64_t round_const = 1ULL << (shift - 1);
8415 tcg_round = tcg_const_i64(round_const);
8416 } else {
8417 TCGV_UNUSED_I64(tcg_round);
8418 }
8419
8420 for (i = 0; i < elements; i++) {
8421 read_vec_element(s, tcg_rn, rn, i, size+1);
8422 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8423 false, true, size+1, shift);
8424
8425 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8426 }
8427
8428 if (!is_q) {
8429 clear_vec_high(s, rd);
8430 write_vec_element(s, tcg_final, rd, 0, MO_64);
8431 } else {
8432 write_vec_element(s, tcg_final, rd, 1, MO_64);
8433 }
8434
8435 if (round) {
8436 tcg_temp_free_i64(tcg_round);
8437 }
8438 tcg_temp_free_i64(tcg_rn);
8439 tcg_temp_free_i64(tcg_rd);
8440 tcg_temp_free_i64(tcg_final);
8441 return;
8442}
8443
8444
8445
8446
8447
8448
8449
8450
8451static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
8452{
8453 int rd = extract32(insn, 0, 5);
8454 int rn = extract32(insn, 5, 5);
8455 int opcode = extract32(insn, 11, 5);
8456 int immb = extract32(insn, 16, 3);
8457 int immh = extract32(insn, 19, 4);
8458 bool is_u = extract32(insn, 29, 1);
8459 bool is_q = extract32(insn, 30, 1);
8460
8461 switch (opcode) {
8462 case 0x08:
8463 if (!is_u) {
8464 unallocated_encoding(s);
8465 return;
8466 }
8467
8468 case 0x00:
8469 case 0x02:
8470 case 0x04:
8471 case 0x06:
8472 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
8473 break;
8474 case 0x0a:
8475 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
8476 break;
8477 case 0x10:
8478 case 0x11:
8479 if (is_u) {
8480 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
8481 opcode, rn, rd);
8482 } else {
8483 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
8484 }
8485 break;
8486 case 0x12:
8487 case 0x13:
8488 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
8489 opcode, rn, rd);
8490 break;
8491 case 0x14:
8492 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
8493 break;
8494 case 0x1c:
8495 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
8496 opcode, rn, rd);
8497 break;
8498 case 0xc:
8499 if (!is_u) {
8500 unallocated_encoding(s);
8501 return;
8502 }
8503 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
8504 break;
8505 case 0xe:
8506 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
8507 break;
8508 case 0x1f:
8509 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
8510 return;
8511 default:
8512 unallocated_encoding(s);
8513 return;
8514 }
8515}
8516
8517
8518
8519
8520static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
8521 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
8522{
8523 static NeonGenTwo64OpFn * const fns[3][2] = {
8524 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
8525 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
8526 { tcg_gen_add_i64, tcg_gen_sub_i64 },
8527 };
8528 NeonGenTwo64OpFn *genfn;
8529 assert(size < 3);
8530
8531 genfn = fns[size][is_sub];
8532 genfn(tcg_res, tcg_op1, tcg_op2);
8533}
8534
8535static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
8536 int opcode, int rd, int rn, int rm)
8537{
8538
8539 TCGv_i64 tcg_res[2];
8540 int pass, accop;
8541
8542 tcg_res[0] = tcg_temp_new_i64();
8543 tcg_res[1] = tcg_temp_new_i64();
8544
8545
8546
8547
8548 switch (opcode) {
8549 case 5:
8550 case 8:
8551 case 9:
8552 accop = 1;
8553 break;
8554 case 10:
8555 case 11:
8556 accop = -1;
8557 break;
8558 default:
8559 accop = 0;
8560 break;
8561 }
8562
8563 if (accop != 0) {
8564 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
8565 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
8566 }
8567
8568
8569
8570
8571 if (size == 2) {
8572 for (pass = 0; pass < 2; pass++) {
8573 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8574 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8575 TCGv_i64 tcg_passres;
8576 TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
8577
8578 int elt = pass + is_q * 2;
8579
8580 read_vec_element(s, tcg_op1, rn, elt, memop);
8581 read_vec_element(s, tcg_op2, rm, elt, memop);
8582
8583 if (accop == 0) {
8584 tcg_passres = tcg_res[pass];
8585 } else {
8586 tcg_passres = tcg_temp_new_i64();
8587 }
8588
8589 switch (opcode) {
8590 case 0:
8591 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
8592 break;
8593 case 2:
8594 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
8595 break;
8596 case 5:
8597 case 7:
8598 {
8599 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
8600 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
8601
8602 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
8603 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
8604 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
8605 tcg_passres,
8606 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
8607 tcg_temp_free_i64(tcg_tmp1);
8608 tcg_temp_free_i64(tcg_tmp2);
8609 break;
8610 }
8611 case 8:
8612 case 10:
8613 case 12:
8614 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
8615 break;
8616 case 9:
8617 case 11:
8618 case 13:
8619 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
8620 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
8621 tcg_passres, tcg_passres);
8622 break;
8623 default:
8624 g_assert_not_reached();
8625 }
8626
8627 if (opcode == 9 || opcode == 11) {
8628
8629 if (accop < 0) {
8630 tcg_gen_neg_i64(tcg_passres, tcg_passres);
8631 }
8632 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
8633 tcg_res[pass], tcg_passres);
8634 } else if (accop > 0) {
8635 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
8636 } else if (accop < 0) {
8637 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
8638 }
8639
8640 if (accop != 0) {
8641 tcg_temp_free_i64(tcg_passres);
8642 }
8643
8644 tcg_temp_free_i64(tcg_op1);
8645 tcg_temp_free_i64(tcg_op2);
8646 }
8647 } else {
8648
8649 for (pass = 0; pass < 2; pass++) {
8650 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8651 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8652 TCGv_i64 tcg_passres;
8653 int elt = pass + is_q * 2;
8654
8655 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
8656 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
8657
8658 if (accop == 0) {
8659 tcg_passres = tcg_res[pass];
8660 } else {
8661 tcg_passres = tcg_temp_new_i64();
8662 }
8663
8664 switch (opcode) {
8665 case 0:
8666 case 2:
8667 {
8668 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
8669 static NeonGenWidenFn * const widenfns[2][2] = {
8670 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
8671 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
8672 };
8673 NeonGenWidenFn *widenfn = widenfns[size][is_u];
8674
8675 widenfn(tcg_op2_64, tcg_op2);
8676 widenfn(tcg_passres, tcg_op1);
8677 gen_neon_addl(size, (opcode == 2), tcg_passres,
8678 tcg_passres, tcg_op2_64);
8679 tcg_temp_free_i64(tcg_op2_64);
8680 break;
8681 }
8682 case 5:
8683 case 7:
8684 if (size == 0) {
8685 if (is_u) {
8686 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
8687 } else {
8688 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
8689 }
8690 } else {
8691 if (is_u) {
8692 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
8693 } else {
8694 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
8695 }
8696 }
8697 break;
8698 case 8:
8699 case 10:
8700 case 12:
8701 if (size == 0) {
8702 if (is_u) {
8703 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
8704 } else {
8705 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
8706 }
8707 } else {
8708 if (is_u) {
8709 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
8710 } else {
8711 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
8712 }
8713 }
8714 break;
8715 case 9:
8716 case 11:
8717 case 13:
8718 assert(size == 1);
8719 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
8720 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
8721 tcg_passres, tcg_passres);
8722 break;
8723 case 14:
8724 assert(size == 0);
8725 gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
8726 break;
8727 default:
8728 g_assert_not_reached();
8729 }
8730 tcg_temp_free_i32(tcg_op1);
8731 tcg_temp_free_i32(tcg_op2);
8732
8733 if (accop != 0) {
8734 if (opcode == 9 || opcode == 11) {
8735
8736 if (accop < 0) {
8737 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
8738 }
8739 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
8740 tcg_res[pass],
8741 tcg_passres);
8742 } else {
8743 gen_neon_addl(size, (accop < 0), tcg_res[pass],
8744 tcg_res[pass], tcg_passres);
8745 }
8746 tcg_temp_free_i64(tcg_passres);
8747 }
8748 }
8749 }
8750
8751 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
8752 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
8753 tcg_temp_free_i64(tcg_res[0]);
8754 tcg_temp_free_i64(tcg_res[1]);
8755}
8756
8757static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
8758 int opcode, int rd, int rn, int rm)
8759{
8760 TCGv_i64 tcg_res[2];
8761 int part = is_q ? 2 : 0;
8762 int pass;
8763
8764 for (pass = 0; pass < 2; pass++) {
8765 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8766 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8767 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
8768 static NeonGenWidenFn * const widenfns[3][2] = {
8769 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
8770 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
8771 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
8772 };
8773 NeonGenWidenFn *widenfn = widenfns[size][is_u];
8774
8775 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8776 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
8777 widenfn(tcg_op2_wide, tcg_op2);
8778 tcg_temp_free_i32(tcg_op2);
8779 tcg_res[pass] = tcg_temp_new_i64();
8780 gen_neon_addl(size, (opcode == 3),
8781 tcg_res[pass], tcg_op1, tcg_op2_wide);
8782 tcg_temp_free_i64(tcg_op1);
8783 tcg_temp_free_i64(tcg_op2_wide);
8784 }
8785
8786 for (pass = 0; pass < 2; pass++) {
8787 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
8788 tcg_temp_free_i64(tcg_res[pass]);
8789 }
8790}
8791
8792static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
8793{
8794 tcg_gen_addi_i64(in, in, 1U << 31);
8795 tcg_gen_extrh_i64_i32(res, in);
8796}
8797
8798static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
8799 int opcode, int rd, int rn, int rm)
8800{
8801 TCGv_i32 tcg_res[2];
8802 int part = is_q ? 2 : 0;
8803 int pass;
8804
8805 for (pass = 0; pass < 2; pass++) {
8806 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8807 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8808 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
8809 static NeonGenNarrowFn * const narrowfns[3][2] = {
8810 { gen_helper_neon_narrow_high_u8,
8811 gen_helper_neon_narrow_round_high_u8 },
8812 { gen_helper_neon_narrow_high_u16,
8813 gen_helper_neon_narrow_round_high_u16 },
8814 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
8815 };
8816 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
8817
8818 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8819 read_vec_element(s, tcg_op2, rm, pass, MO_64);
8820
8821 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
8822
8823 tcg_temp_free_i64(tcg_op1);
8824 tcg_temp_free_i64(tcg_op2);
8825
8826 tcg_res[pass] = tcg_temp_new_i32();
8827 gennarrow(tcg_res[pass], tcg_wideres);
8828 tcg_temp_free_i64(tcg_wideres);
8829 }
8830
8831 for (pass = 0; pass < 2; pass++) {
8832 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
8833 tcg_temp_free_i32(tcg_res[pass]);
8834 }
8835 if (!is_q) {
8836 clear_vec_high(s, rd);
8837 }
8838}
8839
8840static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
8841{
8842
8843
8844
8845
8846
8847
8848 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8849 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8850 TCGv_i64 tcg_res = tcg_temp_new_i64();
8851
8852 read_vec_element(s, tcg_op1, rn, is_q, MO_64);
8853 read_vec_element(s, tcg_op2, rm, is_q, MO_64);
8854 gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
8855 write_vec_element(s, tcg_res, rd, 0, MO_64);
8856 gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
8857 write_vec_element(s, tcg_res, rd, 1, MO_64);
8858
8859 tcg_temp_free_i64(tcg_op1);
8860 tcg_temp_free_i64(tcg_op2);
8861 tcg_temp_free_i64(tcg_res);
8862}
8863
8864
8865
8866
8867
8868
8869
8870static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
8871{
8872
8873
8874
8875
8876
8877
8878
8879
8880
8881
8882 int is_q = extract32(insn, 30, 1);
8883 int is_u = extract32(insn, 29, 1);
8884 int size = extract32(insn, 22, 2);
8885 int opcode = extract32(insn, 12, 4);
8886 int rm = extract32(insn, 16, 5);
8887 int rn = extract32(insn, 5, 5);
8888 int rd = extract32(insn, 0, 5);
8889
8890 switch (opcode) {
8891 case 1:
8892 case 3:
8893
8894 if (size == 3) {
8895 unallocated_encoding(s);
8896 return;
8897 }
8898 if (!fp_access_check(s)) {
8899 return;
8900 }
8901 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
8902 break;
8903 case 4:
8904 case 6:
8905
8906 if (size == 3) {
8907 unallocated_encoding(s);
8908 return;
8909 }
8910 if (!fp_access_check(s)) {
8911 return;
8912 }
8913 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
8914 break;
8915 case 14:
8916 if (is_u || size == 1 || size == 2) {
8917 unallocated_encoding(s);
8918 return;
8919 }
8920 if (size == 3) {
8921 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
8922 unallocated_encoding(s);
8923 return;
8924 }
8925 if (!fp_access_check(s)) {
8926 return;
8927 }
8928 handle_pmull_64(s, is_q, rd, rn, rm);
8929 return;
8930 }
8931 goto is_widening;
8932 case 9:
8933 case 11:
8934 case 13:
8935 if (is_u || size == 0) {
8936 unallocated_encoding(s);
8937 return;
8938 }
8939
8940 case 0:
8941 case 2:
8942 case 5:
8943 case 7:
8944 case 8:
8945 case 10:
8946 case 12:
8947
8948 if (size == 3) {
8949 unallocated_encoding(s);
8950 return;
8951 }
8952 is_widening:
8953 if (!fp_access_check(s)) {
8954 return;
8955 }
8956
8957 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
8958 break;
8959 default:
8960
8961 unallocated_encoding(s);
8962 break;
8963 }
8964}
8965
8966
8967static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
8968{
8969 int rd = extract32(insn, 0, 5);
8970 int rn = extract32(insn, 5, 5);
8971 int rm = extract32(insn, 16, 5);
8972 int size = extract32(insn, 22, 2);
8973 bool is_u = extract32(insn, 29, 1);
8974 bool is_q = extract32(insn, 30, 1);
8975 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
8976 int pass;
8977
8978 if (!fp_access_check(s)) {
8979 return;
8980 }
8981
8982 tcg_op1 = tcg_temp_new_i64();
8983 tcg_op2 = tcg_temp_new_i64();
8984 tcg_res[0] = tcg_temp_new_i64();
8985 tcg_res[1] = tcg_temp_new_i64();
8986
8987 for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
8988 read_vec_element(s, tcg_op1, rn, pass, MO_64);
8989 read_vec_element(s, tcg_op2, rm, pass, MO_64);
8990
8991 if (!is_u) {
8992 switch (size) {
8993 case 0:
8994 tcg_gen_and_i64(tcg_res[pass], tcg_op1, tcg_op2);
8995 break;
8996 case 1:
8997 tcg_gen_andc_i64(tcg_res[pass], tcg_op1, tcg_op2);
8998 break;
8999 case 2:
9000 tcg_gen_or_i64(tcg_res[pass], tcg_op1, tcg_op2);
9001 break;
9002 case 3:
9003 tcg_gen_orc_i64(tcg_res[pass], tcg_op1, tcg_op2);
9004 break;
9005 }
9006 } else {
9007 if (size != 0) {
9008
9009 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9010 }
9011
9012 switch (size) {
9013 case 0:
9014 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
9015 break;
9016 case 1:
9017 tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_op2);
9018 tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_res[pass]);
9019 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op1);
9020 break;
9021 case 2:
9022 tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
9023 tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_op2);
9024 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
9025 break;
9026 case 3:
9027 tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
9028 tcg_gen_andc_i64(tcg_op1, tcg_op1, tcg_op2);
9029 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
9030 break;
9031 }
9032 }
9033 }
9034
9035 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
9036 if (!is_q) {
9037 tcg_gen_movi_i64(tcg_res[1], 0);
9038 }
9039 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
9040
9041 tcg_temp_free_i64(tcg_op1);
9042 tcg_temp_free_i64(tcg_op2);
9043 tcg_temp_free_i64(tcg_res[0]);
9044 tcg_temp_free_i64(tcg_res[1]);
9045}
9046
9047
9048static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9049{
9050 tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
9051}
9052
9053static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9054{
9055 tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
9056}
9057
9058static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9059{
9060 tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
9061}
9062
9063static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9064{
9065 tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
9066}
9067
9068
9069
9070
9071
9072
9073static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
9074 int size, int rn, int rm, int rd)
9075{
9076 TCGv_ptr fpst;
9077 int pass;
9078
9079
9080 if (opcode >= 0x58) {
9081 fpst = get_fpstatus_ptr();
9082 } else {
9083 TCGV_UNUSED_PTR(fpst);
9084 }
9085
9086 if (!fp_access_check(s)) {
9087 return;
9088 }
9089
9090
9091
9092
9093 if (size == 3) {
9094 TCGv_i64 tcg_res[2];
9095
9096 for (pass = 0; pass < 2; pass++) {
9097 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9098 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9099 int passreg = (pass == 0) ? rn : rm;
9100
9101 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
9102 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
9103 tcg_res[pass] = tcg_temp_new_i64();
9104
9105 switch (opcode) {
9106 case 0x17:
9107 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
9108 break;
9109 case 0x58:
9110 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9111 break;
9112 case 0x5a:
9113 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9114 break;
9115 case 0x5e:
9116 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9117 break;
9118 case 0x78:
9119 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9120 break;
9121 case 0x7e:
9122 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9123 break;
9124 default:
9125 g_assert_not_reached();
9126 }
9127
9128 tcg_temp_free_i64(tcg_op1);
9129 tcg_temp_free_i64(tcg_op2);
9130 }
9131
9132 for (pass = 0; pass < 2; pass++) {
9133 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9134 tcg_temp_free_i64(tcg_res[pass]);
9135 }
9136 } else {
9137 int maxpass = is_q ? 4 : 2;
9138 TCGv_i32 tcg_res[4];
9139
9140 for (pass = 0; pass < maxpass; pass++) {
9141 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9142 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9143 NeonGenTwoOpFn *genfn = NULL;
9144 int passreg = pass < (maxpass / 2) ? rn : rm;
9145 int passelt = (is_q && (pass & 1)) ? 2 : 0;
9146
9147 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
9148 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
9149 tcg_res[pass] = tcg_temp_new_i32();
9150
9151 switch (opcode) {
9152 case 0x17:
9153 {
9154 static NeonGenTwoOpFn * const fns[3] = {
9155 gen_helper_neon_padd_u8,
9156 gen_helper_neon_padd_u16,
9157 tcg_gen_add_i32,
9158 };
9159 genfn = fns[size];
9160 break;
9161 }
9162 case 0x14:
9163 {
9164 static NeonGenTwoOpFn * const fns[3][2] = {
9165 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
9166 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
9167 { gen_max_s32, gen_max_u32 },
9168 };
9169 genfn = fns[size][u];
9170 break;
9171 }
9172 case 0x15:
9173 {
9174 static NeonGenTwoOpFn * const fns[3][2] = {
9175 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
9176 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
9177 { gen_min_s32, gen_min_u32 },
9178 };
9179 genfn = fns[size][u];
9180 break;
9181 }
9182
9183 case 0x58:
9184 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9185 break;
9186 case 0x5a:
9187 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9188 break;
9189 case 0x5e:
9190 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9191 break;
9192 case 0x78:
9193 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9194 break;
9195 case 0x7e:
9196 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9197 break;
9198 default:
9199 g_assert_not_reached();
9200 }
9201
9202
9203 if (genfn) {
9204 genfn(tcg_res[pass], tcg_op1, tcg_op2);
9205 }
9206
9207 tcg_temp_free_i32(tcg_op1);
9208 tcg_temp_free_i32(tcg_op2);
9209 }
9210
9211 for (pass = 0; pass < maxpass; pass++) {
9212 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
9213 tcg_temp_free_i32(tcg_res[pass]);
9214 }
9215 if (!is_q) {
9216 clear_vec_high(s, rd);
9217 }
9218 }
9219
9220 if (!TCGV_IS_UNUSED_PTR(fpst)) {
9221 tcg_temp_free_ptr(fpst);
9222 }
9223}
9224
9225
9226static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
9227{
9228
9229
9230
9231
9232 int fpopcode = extract32(insn, 11, 5)
9233 | (extract32(insn, 23, 1) << 5)
9234 | (extract32(insn, 29, 1) << 6);
9235 int is_q = extract32(insn, 30, 1);
9236 int size = extract32(insn, 22, 1);
9237 int rm = extract32(insn, 16, 5);
9238 int rn = extract32(insn, 5, 5);
9239 int rd = extract32(insn, 0, 5);
9240
9241 int datasize = is_q ? 128 : 64;
9242 int esize = 32 << size;
9243 int elements = datasize / esize;
9244
9245 if (size == 1 && !is_q) {
9246 unallocated_encoding(s);
9247 return;
9248 }
9249
9250 switch (fpopcode) {
9251 case 0x58:
9252 case 0x5a:
9253 case 0x5e:
9254 case 0x78:
9255 case 0x7e:
9256 if (size && !is_q) {
9257 unallocated_encoding(s);
9258 return;
9259 }
9260 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
9261 rn, rm, rd);
9262 return;
9263 case 0x1b:
9264 case 0x1f:
9265 case 0x3f:
9266 case 0x5d:
9267 case 0x7d:
9268 case 0x19:
9269 case 0x39:
9270 case 0x18:
9271 case 0x1a:
9272 case 0x1c:
9273 case 0x1e:
9274 case 0x38:
9275 case 0x3a:
9276 case 0x3e:
9277 case 0x5b:
9278 case 0x5c:
9279 case 0x5f:
9280 case 0x7a:
9281 case 0x7c:
9282 if (!fp_access_check(s)) {
9283 return;
9284 }
9285
9286 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
9287 return;
9288 default:
9289 unallocated_encoding(s);
9290 return;
9291 }
9292}
9293
9294
9295static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
9296{
9297 int is_q = extract32(insn, 30, 1);
9298 int u = extract32(insn, 29, 1);
9299 int size = extract32(insn, 22, 2);
9300 int opcode = extract32(insn, 11, 5);
9301 int rm = extract32(insn, 16, 5);
9302 int rn = extract32(insn, 5, 5);
9303 int rd = extract32(insn, 0, 5);
9304 int pass;
9305
9306 switch (opcode) {
9307 case 0x13:
9308 if (u && size != 0) {
9309 unallocated_encoding(s);
9310 return;
9311 }
9312
9313 case 0x0:
9314 case 0x2:
9315 case 0x4:
9316 case 0xc:
9317 case 0xd:
9318 case 0xe:
9319 case 0xf:
9320 case 0x12:
9321 if (size == 3) {
9322 unallocated_encoding(s);
9323 return;
9324 }
9325 break;
9326 case 0x16:
9327 if (size == 0 || size == 3) {
9328 unallocated_encoding(s);
9329 return;
9330 }
9331 break;
9332 default:
9333 if (size == 3 && !is_q) {
9334 unallocated_encoding(s);
9335 return;
9336 }
9337 break;
9338 }
9339
9340 if (!fp_access_check(s)) {
9341 return;
9342 }
9343
9344 if (size == 3) {
9345 assert(is_q);
9346 for (pass = 0; pass < 2; pass++) {
9347 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9348 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9349 TCGv_i64 tcg_res = tcg_temp_new_i64();
9350
9351 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9352 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9353
9354 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
9355
9356 write_vec_element(s, tcg_res, rd, pass, MO_64);
9357
9358 tcg_temp_free_i64(tcg_res);
9359 tcg_temp_free_i64(tcg_op1);
9360 tcg_temp_free_i64(tcg_op2);
9361 }
9362 } else {
9363 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
9364 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9365 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9366 TCGv_i32 tcg_res = tcg_temp_new_i32();
9367 NeonGenTwoOpFn *genfn = NULL;
9368 NeonGenTwoOpEnvFn *genenvfn = NULL;
9369
9370 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9371 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9372
9373 switch (opcode) {
9374 case 0x0:
9375 {
9376 static NeonGenTwoOpFn * const fns[3][2] = {
9377 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
9378 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
9379 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
9380 };
9381 genfn = fns[size][u];
9382 break;
9383 }
9384 case 0x1:
9385 {
9386 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9387 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9388 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9389 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9390 };
9391 genenvfn = fns[size][u];
9392 break;
9393 }
9394 case 0x2:
9395 {
9396 static NeonGenTwoOpFn * const fns[3][2] = {
9397 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
9398 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
9399 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
9400 };
9401 genfn = fns[size][u];
9402 break;
9403 }
9404 case 0x4:
9405 {
9406 static NeonGenTwoOpFn * const fns[3][2] = {
9407 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
9408 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
9409 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
9410 };
9411 genfn = fns[size][u];
9412 break;
9413 }
9414 case 0x5:
9415 {
9416 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9417 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9418 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9419 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9420 };
9421 genenvfn = fns[size][u];
9422 break;
9423 }
9424 case 0x6:
9425 {
9426 static NeonGenTwoOpFn * const fns[3][2] = {
9427 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 },
9428 { gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 },
9429 { gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 },
9430 };
9431 genfn = fns[size][u];
9432 break;
9433 }
9434 case 0x7:
9435 {
9436 static NeonGenTwoOpFn * const fns[3][2] = {
9437 { gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 },
9438 { gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 },
9439 { gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 },
9440 };
9441 genfn = fns[size][u];
9442 break;
9443 }
9444 case 0x8:
9445 {
9446 static NeonGenTwoOpFn * const fns[3][2] = {
9447 { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
9448 { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
9449 { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
9450 };
9451 genfn = fns[size][u];
9452 break;
9453 }
9454 case 0x9:
9455 {
9456 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9457 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9458 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9459 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9460 };
9461 genenvfn = fns[size][u];
9462 break;
9463 }
9464 case 0xa:
9465 {
9466 static NeonGenTwoOpFn * const fns[3][2] = {
9467 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
9468 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
9469 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
9470 };
9471 genfn = fns[size][u];
9472 break;
9473 }
9474 case 0xb:
9475 {
9476 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9477 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9478 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9479 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9480 };
9481 genenvfn = fns[size][u];
9482 break;
9483 }
9484 case 0xc:
9485 {
9486 static NeonGenTwoOpFn * const fns[3][2] = {
9487 { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
9488 { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
9489 { gen_max_s32, gen_max_u32 },
9490 };
9491 genfn = fns[size][u];
9492 break;
9493 }
9494
9495 case 0xd:
9496 {
9497 static NeonGenTwoOpFn * const fns[3][2] = {
9498 { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
9499 { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
9500 { gen_min_s32, gen_min_u32 },
9501 };
9502 genfn = fns[size][u];
9503 break;
9504 }
9505 case 0xe:
9506 case 0xf:
9507 {
9508 static NeonGenTwoOpFn * const fns[3][2] = {
9509 { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
9510 { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
9511 { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
9512 };
9513 genfn = fns[size][u];
9514 break;
9515 }
9516 case 0x10:
9517 {
9518 static NeonGenTwoOpFn * const fns[3][2] = {
9519 { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
9520 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
9521 { tcg_gen_add_i32, tcg_gen_sub_i32 },
9522 };
9523 genfn = fns[size][u];
9524 break;
9525 }
9526 case 0x11:
9527 {
9528 static NeonGenTwoOpFn * const fns[3][2] = {
9529 { gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 },
9530 { gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 },
9531 { gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 },
9532 };
9533 genfn = fns[size][u];
9534 break;
9535 }
9536 case 0x13:
9537 if (u) {
9538
9539 assert(size == 0);
9540 genfn = gen_helper_neon_mul_p8;
9541 break;
9542 }
9543
9544 case 0x12:
9545 {
9546 static NeonGenTwoOpFn * const fns[3] = {
9547 gen_helper_neon_mul_u8,
9548 gen_helper_neon_mul_u16,
9549 tcg_gen_mul_i32,
9550 };
9551 genfn = fns[size];
9552 break;
9553 }
9554 case 0x16:
9555 {
9556 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9557 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9558 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9559 };
9560 assert(size == 1 || size == 2);
9561 genenvfn = fns[size - 1][u];
9562 break;
9563 }
9564 default:
9565 g_assert_not_reached();
9566 }
9567
9568 if (genenvfn) {
9569 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
9570 } else {
9571 genfn(tcg_res, tcg_op1, tcg_op2);
9572 }
9573
9574 if (opcode == 0xf || opcode == 0x12) {
9575
9576 static NeonGenTwoOpFn * const fns[3][2] = {
9577 { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
9578 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
9579 { tcg_gen_add_i32, tcg_gen_sub_i32 },
9580 };
9581 bool is_sub = (opcode == 0x12 && u);
9582
9583 genfn = fns[size][is_sub];
9584 read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
9585 genfn(tcg_res, tcg_op1, tcg_res);
9586 }
9587
9588 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9589
9590 tcg_temp_free_i32(tcg_res);
9591 tcg_temp_free_i32(tcg_op1);
9592 tcg_temp_free_i32(tcg_op2);
9593 }
9594 }
9595
9596 if (!is_q) {
9597 clear_vec_high(s, rd);
9598 }
9599}
9600
9601
9602
9603
9604
9605
9606
9607static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
9608{
9609 int opcode = extract32(insn, 11, 5);
9610
9611 switch (opcode) {
9612 case 0x3:
9613 disas_simd_3same_logic(s, insn);
9614 break;
9615 case 0x17:
9616 case 0x14:
9617 case 0x15:
9618 {
9619
9620 int is_q = extract32(insn, 30, 1);
9621 int u = extract32(insn, 29, 1);
9622 int size = extract32(insn, 22, 2);
9623 int rm = extract32(insn, 16, 5);
9624 int rn = extract32(insn, 5, 5);
9625 int rd = extract32(insn, 0, 5);
9626 if (opcode == 0x17) {
9627 if (u || (size == 3 && !is_q)) {
9628 unallocated_encoding(s);
9629 return;
9630 }
9631 } else {
9632 if (size == 3) {
9633 unallocated_encoding(s);
9634 return;
9635 }
9636 }
9637 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
9638 break;
9639 }
9640 case 0x18 ... 0x31:
9641
9642 disas_simd_3same_float(s, insn);
9643 break;
9644 default:
9645 disas_simd_3same_int(s, insn);
9646 break;
9647 }
9648}
9649
9650static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
9651 int size, int rn, int rd)
9652{
9653
9654
9655
9656
9657 int pass;
9658
9659 if (size == 3) {
9660
9661 TCGv_i64 tcg_res[2];
9662 int srcelt = is_q ? 2 : 0;
9663
9664 for (pass = 0; pass < 2; pass++) {
9665 TCGv_i32 tcg_op = tcg_temp_new_i32();
9666 tcg_res[pass] = tcg_temp_new_i64();
9667
9668 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
9669 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
9670 tcg_temp_free_i32(tcg_op);
9671 }
9672 for (pass = 0; pass < 2; pass++) {
9673 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9674 tcg_temp_free_i64(tcg_res[pass]);
9675 }
9676 } else {
9677
9678 int srcelt = is_q ? 4 : 0;
9679 TCGv_i32 tcg_res[4];
9680
9681 for (pass = 0; pass < 4; pass++) {
9682 tcg_res[pass] = tcg_temp_new_i32();
9683
9684 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
9685 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
9686 cpu_env);
9687 }
9688 for (pass = 0; pass < 4; pass++) {
9689 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
9690 tcg_temp_free_i32(tcg_res[pass]);
9691 }
9692 }
9693}
9694
9695static void handle_rev(DisasContext *s, int opcode, bool u,
9696 bool is_q, int size, int rn, int rd)
9697{
9698 int op = (opcode << 1) | u;
9699 int opsz = op + size;
9700 int grp_size = 3 - opsz;
9701 int dsize = is_q ? 128 : 64;
9702 int i;
9703
9704 if (opsz >= 3) {
9705 unallocated_encoding(s);
9706 return;
9707 }
9708
9709 if (!fp_access_check(s)) {
9710 return;
9711 }
9712
9713 if (size == 0) {
9714
9715 int groups = dsize / (8 << grp_size);
9716
9717 for (i = 0; i < groups; i++) {
9718 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9719
9720 read_vec_element(s, tcg_tmp, rn, i, grp_size);
9721 switch (grp_size) {
9722 case MO_16:
9723 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
9724 break;
9725 case MO_32:
9726 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
9727 break;
9728 case MO_64:
9729 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
9730 break;
9731 default:
9732 g_assert_not_reached();
9733 }
9734 write_vec_element(s, tcg_tmp, rd, i, grp_size);
9735 tcg_temp_free_i64(tcg_tmp);
9736 }
9737 if (!is_q) {
9738 clear_vec_high(s, rd);
9739 }
9740 } else {
9741 int revmask = (1 << grp_size) - 1;
9742 int esize = 8 << size;
9743 int elements = dsize / esize;
9744 TCGv_i64 tcg_rn = tcg_temp_new_i64();
9745 TCGv_i64 tcg_rd = tcg_const_i64(0);
9746 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
9747
9748 for (i = 0; i < elements; i++) {
9749 int e_rev = (i & 0xf) ^ revmask;
9750 int off = e_rev * esize;
9751 read_vec_element(s, tcg_rn, rn, i, size);
9752 if (off >= 64) {
9753 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
9754 tcg_rn, off - 64, esize);
9755 } else {
9756 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
9757 }
9758 }
9759 write_vec_element(s, tcg_rd, rd, 0, MO_64);
9760 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
9761
9762 tcg_temp_free_i64(tcg_rd_hi);
9763 tcg_temp_free_i64(tcg_rd);
9764 tcg_temp_free_i64(tcg_rn);
9765 }
9766}
9767
9768static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
9769 bool is_q, int size, int rn, int rd)
9770{
9771
9772
9773
9774
9775
9776 bool accum = (opcode == 0x6);
9777 int maxpass = is_q ? 2 : 1;
9778 int pass;
9779 TCGv_i64 tcg_res[2];
9780
9781 if (size == 2) {
9782
9783 TCGMemOp memop = size + (u ? 0 : MO_SIGN);
9784
9785 for (pass = 0; pass < maxpass; pass++) {
9786 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9787 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9788
9789 tcg_res[pass] = tcg_temp_new_i64();
9790
9791 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
9792 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
9793 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
9794 if (accum) {
9795 read_vec_element(s, tcg_op1, rd, pass, MO_64);
9796 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
9797 }
9798
9799 tcg_temp_free_i64(tcg_op1);
9800 tcg_temp_free_i64(tcg_op2);
9801 }
9802 } else {
9803 for (pass = 0; pass < maxpass; pass++) {
9804 TCGv_i64 tcg_op = tcg_temp_new_i64();
9805 NeonGenOneOpFn *genfn;
9806 static NeonGenOneOpFn * const fns[2][2] = {
9807 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
9808 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
9809 };
9810
9811 genfn = fns[size][u];
9812
9813 tcg_res[pass] = tcg_temp_new_i64();
9814
9815 read_vec_element(s, tcg_op, rn, pass, MO_64);
9816 genfn(tcg_res[pass], tcg_op);
9817
9818 if (accum) {
9819 read_vec_element(s, tcg_op, rd, pass, MO_64);
9820 if (size == 0) {
9821 gen_helper_neon_addl_u16(tcg_res[pass],
9822 tcg_res[pass], tcg_op);
9823 } else {
9824 gen_helper_neon_addl_u32(tcg_res[pass],
9825 tcg_res[pass], tcg_op);
9826 }
9827 }
9828 tcg_temp_free_i64(tcg_op);
9829 }
9830 }
9831 if (!is_q) {
9832 tcg_res[1] = tcg_const_i64(0);
9833 }
9834 for (pass = 0; pass < 2; pass++) {
9835 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9836 tcg_temp_free_i64(tcg_res[pass]);
9837 }
9838}
9839
9840static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
9841{
9842
9843 int pass;
9844 int part = is_q ? 2 : 0;
9845 TCGv_i64 tcg_res[2];
9846
9847 for (pass = 0; pass < 2; pass++) {
9848 static NeonGenWidenFn * const widenfns[3] = {
9849 gen_helper_neon_widen_u8,
9850 gen_helper_neon_widen_u16,
9851 tcg_gen_extu_i32_i64,
9852 };
9853 NeonGenWidenFn *widenfn = widenfns[size];
9854 TCGv_i32 tcg_op = tcg_temp_new_i32();
9855
9856 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
9857 tcg_res[pass] = tcg_temp_new_i64();
9858 widenfn(tcg_res[pass], tcg_op);
9859 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
9860
9861 tcg_temp_free_i32(tcg_op);
9862 }
9863
9864 for (pass = 0; pass < 2; pass++) {
9865 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9866 tcg_temp_free_i64(tcg_res[pass]);
9867 }
9868}
9869
9870
9871
9872
9873
9874
9875
9876static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
9877{
9878 int size = extract32(insn, 22, 2);
9879 int opcode = extract32(insn, 12, 5);
9880 bool u = extract32(insn, 29, 1);
9881 bool is_q = extract32(insn, 30, 1);
9882 int rn = extract32(insn, 5, 5);
9883 int rd = extract32(insn, 0, 5);
9884 bool need_fpstatus = false;
9885 bool need_rmode = false;
9886 int rmode = -1;
9887 TCGv_i32 tcg_rmode;
9888 TCGv_ptr tcg_fpstatus;
9889
9890 switch (opcode) {
9891 case 0x0:
9892 case 0x1:
9893 handle_rev(s, opcode, u, is_q, size, rn, rd);
9894 return;
9895 case 0x5:
9896 if (u && size == 0) {
9897
9898 size = 3;
9899 break;
9900 } else if (u && size == 1) {
9901
9902 break;
9903 } else if (!u && size == 0) {
9904
9905 break;
9906 }
9907 unallocated_encoding(s);
9908 return;
9909 case 0x12:
9910 case 0x14:
9911 if (size == 3) {
9912 unallocated_encoding(s);
9913 return;
9914 }
9915 if (!fp_access_check(s)) {
9916 return;
9917 }
9918
9919 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
9920 return;
9921 case 0x4:
9922 if (size == 3) {
9923 unallocated_encoding(s);
9924 return;
9925 }
9926 break;
9927 case 0x2:
9928 case 0x6:
9929 if (size == 3) {
9930 unallocated_encoding(s);
9931 return;
9932 }
9933 if (!fp_access_check(s)) {
9934 return;
9935 }
9936 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
9937 return;
9938 case 0x13:
9939 if (u == 0 || size == 3) {
9940 unallocated_encoding(s);
9941 return;
9942 }
9943 if (!fp_access_check(s)) {
9944 return;
9945 }
9946 handle_shll(s, is_q, size, rn, rd);
9947 return;
9948 case 0xa:
9949 if (u == 1) {
9950 unallocated_encoding(s);
9951 return;
9952 }
9953
9954 case 0x8:
9955 case 0x9:
9956 case 0xb:
9957 if (size == 3 && !is_q) {
9958 unallocated_encoding(s);
9959 return;
9960 }
9961 break;
9962 case 0x3:
9963 if (size == 3 && !is_q) {
9964 unallocated_encoding(s);
9965 return;
9966 }
9967 if (!fp_access_check(s)) {
9968 return;
9969 }
9970 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
9971 return;
9972 case 0x7:
9973 if (size == 3 && !is_q) {
9974 unallocated_encoding(s);
9975 return;
9976 }
9977 break;
9978 case 0xc ... 0xf:
9979 case 0x16 ... 0x1d:
9980 case 0x1f:
9981 {
9982
9983
9984
9985 int is_double = extract32(size, 0, 1);
9986 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
9987 size = is_double ? 3 : 2;
9988 switch (opcode) {
9989 case 0x2f:
9990 case 0x6f:
9991 if (size == 3 && !is_q) {
9992 unallocated_encoding(s);
9993 return;
9994 }
9995 break;
9996 case 0x1d:
9997 case 0x5d:
9998 {
9999 bool is_signed = (opcode == 0x1d) ? true : false;
10000 int elements = is_double ? 2 : is_q ? 4 : 2;
10001 if (is_double && !is_q) {
10002 unallocated_encoding(s);
10003 return;
10004 }
10005 if (!fp_access_check(s)) {
10006 return;
10007 }
10008 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
10009 return;
10010 }
10011 case 0x2c:
10012 case 0x2d:
10013 case 0x2e:
10014 case 0x6c:
10015 case 0x6d:
10016 if (size == 3 && !is_q) {
10017 unallocated_encoding(s);
10018 return;
10019 }
10020 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
10021 return;
10022 case 0x7f:
10023 if (size == 3 && !is_q) {
10024 unallocated_encoding(s);
10025 return;
10026 }
10027 break;
10028 case 0x1a:
10029 case 0x1b:
10030 case 0x3a:
10031 case 0x3b:
10032 case 0x5a:
10033 case 0x5b:
10034 case 0x7a:
10035 case 0x7b:
10036 need_fpstatus = true;
10037 need_rmode = true;
10038 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10039 if (size == 3 && !is_q) {
10040 unallocated_encoding(s);
10041 return;
10042 }
10043 break;
10044 case 0x5c:
10045 case 0x1c:
10046 need_fpstatus = true;
10047 need_rmode = true;
10048 rmode = FPROUNDING_TIEAWAY;
10049 if (size == 3 && !is_q) {
10050 unallocated_encoding(s);
10051 return;
10052 }
10053 break;
10054 case 0x3c:
10055 if (size == 3) {
10056 unallocated_encoding(s);
10057 return;
10058 }
10059
10060 case 0x3d:
10061 case 0x7d:
10062 if (size == 3 && !is_q) {
10063 unallocated_encoding(s);
10064 return;
10065 }
10066 if (!fp_access_check(s)) {
10067 return;
10068 }
10069 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
10070 return;
10071 case 0x56:
10072 if (size == 2) {
10073 unallocated_encoding(s);
10074 return;
10075 }
10076
10077 case 0x16:
10078
10079
10080
10081 if (!fp_access_check(s)) {
10082 return;
10083 }
10084 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
10085 return;
10086 case 0x17:
10087 if (!fp_access_check(s)) {
10088 return;
10089 }
10090 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
10091 return;
10092 case 0x18:
10093 case 0x19:
10094 case 0x38:
10095 case 0x39:
10096 need_rmode = true;
10097 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10098
10099 case 0x59:
10100 case 0x79:
10101 need_fpstatus = true;
10102 if (size == 3 && !is_q) {
10103 unallocated_encoding(s);
10104 return;
10105 }
10106 break;
10107 case 0x58:
10108 need_rmode = true;
10109 rmode = FPROUNDING_TIEAWAY;
10110 need_fpstatus = true;
10111 if (size == 3 && !is_q) {
10112 unallocated_encoding(s);
10113 return;
10114 }
10115 break;
10116 case 0x7c:
10117 if (size == 3) {
10118 unallocated_encoding(s);
10119 return;
10120 }
10121 need_fpstatus = true;
10122 break;
10123 default:
10124 unallocated_encoding(s);
10125 return;
10126 }
10127 break;
10128 }
10129 default:
10130 unallocated_encoding(s);
10131 return;
10132 }
10133
10134 if (!fp_access_check(s)) {
10135 return;
10136 }
10137
10138 if (need_fpstatus) {
10139 tcg_fpstatus = get_fpstatus_ptr();
10140 } else {
10141 TCGV_UNUSED_PTR(tcg_fpstatus);
10142 }
10143 if (need_rmode) {
10144 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10145 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
10146 } else {
10147 TCGV_UNUSED_I32(tcg_rmode);
10148 }
10149
10150 if (size == 3) {
10151
10152 int pass;
10153
10154 for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
10155 TCGv_i64 tcg_op = tcg_temp_new_i64();
10156 TCGv_i64 tcg_res = tcg_temp_new_i64();
10157
10158 read_vec_element(s, tcg_op, rn, pass, MO_64);
10159
10160 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
10161 tcg_rmode, tcg_fpstatus);
10162
10163 write_vec_element(s, tcg_res, rd, pass, MO_64);
10164
10165 tcg_temp_free_i64(tcg_res);
10166 tcg_temp_free_i64(tcg_op);
10167 }
10168 } else {
10169 int pass;
10170
10171 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10172 TCGv_i32 tcg_op = tcg_temp_new_i32();
10173 TCGv_i32 tcg_res = tcg_temp_new_i32();
10174 TCGCond cond;
10175
10176 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10177
10178 if (size == 2) {
10179
10180 switch (opcode) {
10181 case 0xa:
10182
10183
10184
10185
10186 cond = TCG_COND_LT;
10187 do_cmop:
10188 tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
10189 tcg_gen_neg_i32(tcg_res, tcg_res);
10190 break;
10191 case 0x8:
10192 cond = u ? TCG_COND_GE : TCG_COND_GT;
10193 goto do_cmop;
10194 case 0x9:
10195 cond = u ? TCG_COND_LE : TCG_COND_EQ;
10196 goto do_cmop;
10197 case 0x4:
10198 if (u) {
10199 gen_helper_clz32(tcg_res, tcg_op);
10200 } else {
10201 gen_helper_cls32(tcg_res, tcg_op);
10202 }
10203 break;
10204 case 0x7:
10205 if (u) {
10206 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
10207 } else {
10208 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
10209 }
10210 break;
10211 case 0xb:
10212 if (u) {
10213 tcg_gen_neg_i32(tcg_res, tcg_op);
10214 } else {
10215 TCGv_i32 tcg_zero = tcg_const_i32(0);
10216 tcg_gen_neg_i32(tcg_res, tcg_op);
10217 tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
10218 tcg_zero, tcg_op, tcg_res);
10219 tcg_temp_free_i32(tcg_zero);
10220 }
10221 break;
10222 case 0x2f:
10223 gen_helper_vfp_abss(tcg_res, tcg_op);
10224 break;
10225 case 0x6f:
10226 gen_helper_vfp_negs(tcg_res, tcg_op);
10227 break;
10228 case 0x7f:
10229 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
10230 break;
10231 case 0x1a:
10232 case 0x1b:
10233 case 0x1c:
10234 case 0x3a:
10235 case 0x3b:
10236 {
10237 TCGv_i32 tcg_shift = tcg_const_i32(0);
10238 gen_helper_vfp_tosls(tcg_res, tcg_op,
10239 tcg_shift, tcg_fpstatus);
10240 tcg_temp_free_i32(tcg_shift);
10241 break;
10242 }
10243 case 0x5a:
10244 case 0x5b:
10245 case 0x5c:
10246 case 0x7a:
10247 case 0x7b:
10248 {
10249 TCGv_i32 tcg_shift = tcg_const_i32(0);
10250 gen_helper_vfp_touls(tcg_res, tcg_op,
10251 tcg_shift, tcg_fpstatus);
10252 tcg_temp_free_i32(tcg_shift);
10253 break;
10254 }
10255 case 0x18:
10256 case 0x19:
10257 case 0x38:
10258 case 0x39:
10259 case 0x58:
10260 case 0x79:
10261 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
10262 break;
10263 case 0x59:
10264 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
10265 break;
10266 case 0x7c:
10267 gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
10268 break;
10269 default:
10270 g_assert_not_reached();
10271 }
10272 } else {
10273
10274 switch (opcode) {
10275 case 0x5:
10276
10277
10278
10279 if (u) {
10280 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
10281 } else {
10282 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
10283 }
10284 break;
10285 case 0x7:
10286 {
10287 NeonGenOneOpEnvFn *genfn;
10288 static NeonGenOneOpEnvFn * const fns[2][2] = {
10289 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10290 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10291 };
10292 genfn = fns[size][u];
10293 genfn(tcg_res, cpu_env, tcg_op);
10294 break;
10295 }
10296 case 0x8:
10297 case 0x9:
10298 case 0xa:
10299 {
10300 static NeonGenTwoOpFn * const fns[3][2] = {
10301 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
10302 { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
10303 { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
10304 };
10305 NeonGenTwoOpFn *genfn;
10306 int comp;
10307 bool reverse;
10308 TCGv_i32 tcg_zero = tcg_const_i32(0);
10309
10310
10311 comp = (opcode - 0x8) * 2 + u;
10312
10313 reverse = (comp > 2);
10314 if (reverse) {
10315 comp = 4 - comp;
10316 }
10317 genfn = fns[comp][size];
10318 if (reverse) {
10319 genfn(tcg_res, tcg_zero, tcg_op);
10320 } else {
10321 genfn(tcg_res, tcg_op, tcg_zero);
10322 }
10323 tcg_temp_free_i32(tcg_zero);
10324 break;
10325 }
10326 case 0xb:
10327 if (u) {
10328 TCGv_i32 tcg_zero = tcg_const_i32(0);
10329 if (size) {
10330 gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
10331 } else {
10332 gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
10333 }
10334 tcg_temp_free_i32(tcg_zero);
10335 } else {
10336 if (size) {
10337 gen_helper_neon_abs_s16(tcg_res, tcg_op);
10338 } else {
10339 gen_helper_neon_abs_s8(tcg_res, tcg_op);
10340 }
10341 }
10342 break;
10343 case 0x4:
10344 if (u) {
10345 if (size == 0) {
10346 gen_helper_neon_clz_u8(tcg_res, tcg_op);
10347 } else {
10348 gen_helper_neon_clz_u16(tcg_res, tcg_op);
10349 }
10350 } else {
10351 if (size == 0) {
10352 gen_helper_neon_cls_s8(tcg_res, tcg_op);
10353 } else {
10354 gen_helper_neon_cls_s16(tcg_res, tcg_op);
10355 }
10356 }
10357 break;
10358 default:
10359 g_assert_not_reached();
10360 }
10361 }
10362
10363 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10364
10365 tcg_temp_free_i32(tcg_res);
10366 tcg_temp_free_i32(tcg_op);
10367 }
10368 }
10369 if (!is_q) {
10370 clear_vec_high(s, rd);
10371 }
10372
10373 if (need_rmode) {
10374 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
10375 tcg_temp_free_i32(tcg_rmode);
10376 }
10377 if (need_fpstatus) {
10378 tcg_temp_free_ptr(tcg_fpstatus);
10379 }
10380}
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
10391
10392
10393static void disas_simd_indexed(DisasContext *s, uint32_t insn)
10394{
10395
10396
10397
10398
10399
10400
10401
10402 bool is_scalar = extract32(insn, 28, 1);
10403 bool is_q = extract32(insn, 30, 1);
10404 bool u = extract32(insn, 29, 1);
10405 int size = extract32(insn, 22, 2);
10406 int l = extract32(insn, 21, 1);
10407 int m = extract32(insn, 20, 1);
10408
10409 int rm = extract32(insn, 16, 4);
10410 int opcode = extract32(insn, 12, 4);
10411 int h = extract32(insn, 11, 1);
10412 int rn = extract32(insn, 5, 5);
10413 int rd = extract32(insn, 0, 5);
10414 bool is_long = false;
10415 bool is_fp = false;
10416 int index;
10417 TCGv_ptr fpst;
10418
10419 switch (opcode) {
10420 case 0x0:
10421 case 0x4:
10422 if (!u || is_scalar) {
10423 unallocated_encoding(s);
10424 return;
10425 }
10426 break;
10427 case 0x2:
10428 case 0x6:
10429 case 0xa:
10430 if (is_scalar) {
10431 unallocated_encoding(s);
10432 return;
10433 }
10434 is_long = true;
10435 break;
10436 case 0x3:
10437 case 0x7:
10438 case 0xb:
10439 is_long = true;
10440
10441 case 0xc:
10442 case 0xd:
10443 if (u) {
10444 unallocated_encoding(s);
10445 return;
10446 }
10447 break;
10448 case 0x8:
10449 if (u || is_scalar) {
10450 unallocated_encoding(s);
10451 return;
10452 }
10453 break;
10454 case 0x1:
10455 case 0x5:
10456 if (u) {
10457 unallocated_encoding(s);
10458 return;
10459 }
10460
10461 case 0x9:
10462 if (!extract32(size, 1, 1)) {
10463 unallocated_encoding(s);
10464 return;
10465 }
10466 is_fp = true;
10467 break;
10468 default:
10469 unallocated_encoding(s);
10470 return;
10471 }
10472
10473 if (is_fp) {
10474
10475 size = extract32(size, 0, 1) ? 3 : 2;
10476 if (size == 2) {
10477 index = h << 1 | l;
10478 } else {
10479 if (l || !is_q) {
10480 unallocated_encoding(s);
10481 return;
10482 }
10483 index = h;
10484 }
10485 rm |= (m << 4);
10486 } else {
10487 switch (size) {
10488 case 1:
10489 index = h << 2 | l << 1 | m;
10490 break;
10491 case 2:
10492 index = h << 1 | l;
10493 rm |= (m << 4);
10494 break;
10495 default:
10496 unallocated_encoding(s);
10497 return;
10498 }
10499 }
10500
10501 if (!fp_access_check(s)) {
10502 return;
10503 }
10504
10505 if (is_fp) {
10506 fpst = get_fpstatus_ptr();
10507 } else {
10508 TCGV_UNUSED_PTR(fpst);
10509 }
10510
10511 if (size == 3) {
10512 TCGv_i64 tcg_idx = tcg_temp_new_i64();
10513 int pass;
10514
10515 assert(is_fp && is_q && !is_long);
10516
10517 read_vec_element(s, tcg_idx, rm, index, MO_64);
10518
10519 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10520 TCGv_i64 tcg_op = tcg_temp_new_i64();
10521 TCGv_i64 tcg_res = tcg_temp_new_i64();
10522
10523 read_vec_element(s, tcg_op, rn, pass, MO_64);
10524
10525 switch (opcode) {
10526 case 0x5:
10527
10528 gen_helper_vfp_negd(tcg_op, tcg_op);
10529
10530 case 0x1:
10531 read_vec_element(s, tcg_res, rd, pass, MO_64);
10532 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
10533 break;
10534 case 0x9:
10535 if (u) {
10536 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
10537 } else {
10538 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
10539 }
10540 break;
10541 default:
10542 g_assert_not_reached();
10543 }
10544
10545 write_vec_element(s, tcg_res, rd, pass, MO_64);
10546 tcg_temp_free_i64(tcg_op);
10547 tcg_temp_free_i64(tcg_res);
10548 }
10549
10550 if (is_scalar) {
10551 clear_vec_high(s, rd);
10552 }
10553
10554 tcg_temp_free_i64(tcg_idx);
10555 } else if (!is_long) {
10556
10557
10558
10559
10560 TCGv_i32 tcg_idx = tcg_temp_new_i32();
10561 int pass, maxpasses;
10562
10563 if (is_scalar) {
10564 maxpasses = 1;
10565 } else {
10566 maxpasses = is_q ? 4 : 2;
10567 }
10568
10569 read_vec_element_i32(s, tcg_idx, rm, index, size);
10570
10571 if (size == 1 && !is_scalar) {
10572
10573
10574
10575
10576 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
10577 }
10578
10579 for (pass = 0; pass < maxpasses; pass++) {
10580 TCGv_i32 tcg_op = tcg_temp_new_i32();
10581 TCGv_i32 tcg_res = tcg_temp_new_i32();
10582
10583 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
10584
10585 switch (opcode) {
10586 case 0x0:
10587 case 0x4:
10588 case 0x8:
10589 {
10590 static NeonGenTwoOpFn * const fns[2][2] = {
10591 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
10592 { tcg_gen_add_i32, tcg_gen_sub_i32 },
10593 };
10594 NeonGenTwoOpFn *genfn;
10595 bool is_sub = opcode == 0x4;
10596
10597 if (size == 1) {
10598 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
10599 } else {
10600 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
10601 }
10602 if (opcode == 0x8) {
10603 break;
10604 }
10605 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
10606 genfn = fns[size - 1][is_sub];
10607 genfn(tcg_res, tcg_op, tcg_res);
10608 break;
10609 }
10610 case 0x5:
10611
10612 gen_helper_vfp_negs(tcg_op, tcg_op);
10613
10614 case 0x1:
10615 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10616 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
10617 break;
10618 case 0x9:
10619 if (u) {
10620 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
10621 } else {
10622 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
10623 }
10624 break;
10625 case 0xc:
10626 if (size == 1) {
10627 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
10628 tcg_op, tcg_idx);
10629 } else {
10630 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
10631 tcg_op, tcg_idx);
10632 }
10633 break;
10634 case 0xd:
10635 if (size == 1) {
10636 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
10637 tcg_op, tcg_idx);
10638 } else {
10639 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
10640 tcg_op, tcg_idx);
10641 }
10642 break;
10643 default:
10644 g_assert_not_reached();
10645 }
10646
10647 if (is_scalar) {
10648 write_fp_sreg(s, rd, tcg_res);
10649 } else {
10650 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10651 }
10652
10653 tcg_temp_free_i32(tcg_op);
10654 tcg_temp_free_i32(tcg_res);
10655 }
10656
10657 tcg_temp_free_i32(tcg_idx);
10658
10659 if (!is_q) {
10660 clear_vec_high(s, rd);
10661 }
10662 } else {
10663
10664 TCGv_i64 tcg_res[2];
10665 int pass;
10666 bool satop = extract32(opcode, 0, 1);
10667 TCGMemOp memop = MO_32;
10668
10669 if (satop || !u) {
10670 memop |= MO_SIGN;
10671 }
10672
10673 if (size == 2) {
10674 TCGv_i64 tcg_idx = tcg_temp_new_i64();
10675
10676 read_vec_element(s, tcg_idx, rm, index, memop);
10677
10678 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10679 TCGv_i64 tcg_op = tcg_temp_new_i64();
10680 TCGv_i64 tcg_passres;
10681 int passelt;
10682
10683 if (is_scalar) {
10684 passelt = 0;
10685 } else {
10686 passelt = pass + (is_q * 2);
10687 }
10688
10689 read_vec_element(s, tcg_op, rn, passelt, memop);
10690
10691 tcg_res[pass] = tcg_temp_new_i64();
10692
10693 if (opcode == 0xa || opcode == 0xb) {
10694
10695 tcg_passres = tcg_res[pass];
10696 } else {
10697 tcg_passres = tcg_temp_new_i64();
10698 }
10699
10700 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
10701 tcg_temp_free_i64(tcg_op);
10702
10703 if (satop) {
10704
10705 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10706 tcg_passres, tcg_passres);
10707 }
10708
10709 if (opcode == 0xa || opcode == 0xb) {
10710 continue;
10711 }
10712
10713
10714 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10715
10716 switch (opcode) {
10717 case 0x2:
10718 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10719 break;
10720 case 0x6:
10721 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10722 break;
10723 case 0x7:
10724 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10725
10726 case 0x3:
10727 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10728 tcg_res[pass],
10729 tcg_passres);
10730 break;
10731 default:
10732 g_assert_not_reached();
10733 }
10734 tcg_temp_free_i64(tcg_passres);
10735 }
10736 tcg_temp_free_i64(tcg_idx);
10737
10738 if (is_scalar) {
10739 clear_vec_high(s, rd);
10740 }
10741 } else {
10742 TCGv_i32 tcg_idx = tcg_temp_new_i32();
10743
10744 assert(size == 1);
10745 read_vec_element_i32(s, tcg_idx, rm, index, size);
10746
10747 if (!is_scalar) {
10748
10749
10750
10751
10752 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
10753 }
10754
10755 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10756 TCGv_i32 tcg_op = tcg_temp_new_i32();
10757 TCGv_i64 tcg_passres;
10758
10759 if (is_scalar) {
10760 read_vec_element_i32(s, tcg_op, rn, pass, size);
10761 } else {
10762 read_vec_element_i32(s, tcg_op, rn,
10763 pass + (is_q * 2), MO_32);
10764 }
10765
10766 tcg_res[pass] = tcg_temp_new_i64();
10767
10768 if (opcode == 0xa || opcode == 0xb) {
10769
10770 tcg_passres = tcg_res[pass];
10771 } else {
10772 tcg_passres = tcg_temp_new_i64();
10773 }
10774
10775 if (memop & MO_SIGN) {
10776 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
10777 } else {
10778 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
10779 }
10780 if (satop) {
10781 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10782 tcg_passres, tcg_passres);
10783 }
10784 tcg_temp_free_i32(tcg_op);
10785
10786 if (opcode == 0xa || opcode == 0xb) {
10787 continue;
10788 }
10789
10790
10791 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10792
10793 switch (opcode) {
10794 case 0x2:
10795 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
10796 tcg_passres);
10797 break;
10798 case 0x6:
10799 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
10800 tcg_passres);
10801 break;
10802 case 0x7:
10803 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10804
10805 case 0x3:
10806 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10807 tcg_res[pass],
10808 tcg_passres);
10809 break;
10810 default:
10811 g_assert_not_reached();
10812 }
10813 tcg_temp_free_i64(tcg_passres);
10814 }
10815 tcg_temp_free_i32(tcg_idx);
10816
10817 if (is_scalar) {
10818 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
10819 }
10820 }
10821
10822 if (is_scalar) {
10823 tcg_res[1] = tcg_const_i64(0);
10824 }
10825
10826 for (pass = 0; pass < 2; pass++) {
10827 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10828 tcg_temp_free_i64(tcg_res[pass]);
10829 }
10830 }
10831
10832 if (!TCGV_IS_UNUSED_PTR(fpst)) {
10833 tcg_temp_free_ptr(fpst);
10834 }
10835}
10836
10837
10838
10839
10840
10841
10842
10843static void disas_crypto_aes(DisasContext *s, uint32_t insn)
10844{
10845 int size = extract32(insn, 22, 2);
10846 int opcode = extract32(insn, 12, 5);
10847 int rn = extract32(insn, 5, 5);
10848 int rd = extract32(insn, 0, 5);
10849 int decrypt;
10850 TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_decrypt;
10851 CryptoThreeOpEnvFn *genfn;
10852
10853 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
10854 || size != 0) {
10855 unallocated_encoding(s);
10856 return;
10857 }
10858
10859 switch (opcode) {
10860 case 0x4:
10861 decrypt = 0;
10862 genfn = gen_helper_crypto_aese;
10863 break;
10864 case 0x6:
10865 decrypt = 0;
10866 genfn = gen_helper_crypto_aesmc;
10867 break;
10868 case 0x5:
10869 decrypt = 1;
10870 genfn = gen_helper_crypto_aese;
10871 break;
10872 case 0x7:
10873 decrypt = 1;
10874 genfn = gen_helper_crypto_aesmc;
10875 break;
10876 default:
10877 unallocated_encoding(s);
10878 return;
10879 }
10880
10881
10882
10883
10884
10885 tcg_rd_regno = tcg_const_i32(rd << 1);
10886 tcg_rn_regno = tcg_const_i32(rn << 1);
10887 tcg_decrypt = tcg_const_i32(decrypt);
10888
10889 genfn(cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_decrypt);
10890
10891 tcg_temp_free_i32(tcg_rd_regno);
10892 tcg_temp_free_i32(tcg_rn_regno);
10893 tcg_temp_free_i32(tcg_decrypt);
10894}
10895
10896
10897
10898
10899
10900
10901
10902static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
10903{
10904 int size = extract32(insn, 22, 2);
10905 int opcode = extract32(insn, 12, 3);
10906 int rm = extract32(insn, 16, 5);
10907 int rn = extract32(insn, 5, 5);
10908 int rd = extract32(insn, 0, 5);
10909 CryptoThreeOpEnvFn *genfn;
10910 TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_rm_regno;
10911 int feature = ARM_FEATURE_V8_SHA256;
10912
10913 if (size != 0) {
10914 unallocated_encoding(s);
10915 return;
10916 }
10917
10918 switch (opcode) {
10919 case 0:
10920 case 1:
10921 case 2:
10922 case 3:
10923 genfn = NULL;
10924 feature = ARM_FEATURE_V8_SHA1;
10925 break;
10926 case 4:
10927 genfn = gen_helper_crypto_sha256h;
10928 break;
10929 case 5:
10930 genfn = gen_helper_crypto_sha256h2;
10931 break;
10932 case 6:
10933 genfn = gen_helper_crypto_sha256su1;
10934 break;
10935 default:
10936 unallocated_encoding(s);
10937 return;
10938 }
10939
10940 if (!arm_dc_feature(s, feature)) {
10941 unallocated_encoding(s);
10942 return;
10943 }
10944
10945 tcg_rd_regno = tcg_const_i32(rd << 1);
10946 tcg_rn_regno = tcg_const_i32(rn << 1);
10947 tcg_rm_regno = tcg_const_i32(rm << 1);
10948
10949 if (genfn) {
10950 genfn(cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_rm_regno);
10951 } else {
10952 TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
10953
10954 gen_helper_crypto_sha1_3reg(cpu_env, tcg_rd_regno,
10955 tcg_rn_regno, tcg_rm_regno, tcg_opcode);
10956 tcg_temp_free_i32(tcg_opcode);
10957 }
10958
10959 tcg_temp_free_i32(tcg_rd_regno);
10960 tcg_temp_free_i32(tcg_rn_regno);
10961 tcg_temp_free_i32(tcg_rm_regno);
10962}
10963
10964
10965
10966
10967
10968
10969
10970static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
10971{
10972 int size = extract32(insn, 22, 2);
10973 int opcode = extract32(insn, 12, 5);
10974 int rn = extract32(insn, 5, 5);
10975 int rd = extract32(insn, 0, 5);
10976 CryptoTwoOpEnvFn *genfn;
10977 int feature;
10978 TCGv_i32 tcg_rd_regno, tcg_rn_regno;
10979
10980 if (size != 0) {
10981 unallocated_encoding(s);
10982 return;
10983 }
10984
10985 switch (opcode) {
10986 case 0:
10987 feature = ARM_FEATURE_V8_SHA1;
10988 genfn = gen_helper_crypto_sha1h;
10989 break;
10990 case 1:
10991 feature = ARM_FEATURE_V8_SHA1;
10992 genfn = gen_helper_crypto_sha1su1;
10993 break;
10994 case 2:
10995 feature = ARM_FEATURE_V8_SHA256;
10996 genfn = gen_helper_crypto_sha256su0;
10997 break;
10998 default:
10999 unallocated_encoding(s);
11000 return;
11001 }
11002
11003 if (!arm_dc_feature(s, feature)) {
11004 unallocated_encoding(s);
11005 return;
11006 }
11007
11008 tcg_rd_regno = tcg_const_i32(rd << 1);
11009 tcg_rn_regno = tcg_const_i32(rn << 1);
11010
11011 genfn(cpu_env, tcg_rd_regno, tcg_rn_regno);
11012
11013 tcg_temp_free_i32(tcg_rd_regno);
11014 tcg_temp_free_i32(tcg_rn_regno);
11015}
11016
11017
11018
11019
11020
11021
11022static const AArch64DecodeTable data_proc_simd[] = {
11023
11024 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
11025 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
11026 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
11027 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
11028 { 0x0e000400, 0x9fe08400, disas_simd_copy },
11029 { 0x0f000000, 0x9f000400, disas_simd_indexed },
11030
11031 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
11032 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
11033 { 0x0e000000, 0xbf208c00, disas_simd_tb },
11034 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
11035 { 0x2e000000, 0xbf208400, disas_simd_ext },
11036 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
11037 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
11038 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
11039 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
11040 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
11041 { 0x5f000000, 0xdf000400, disas_simd_indexed },
11042 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
11043 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
11044 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
11045 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
11046 { 0x00000000, 0x00000000, NULL }
11047};
11048
11049static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
11050{
11051
11052
11053
11054
11055 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
11056 if (fn) {
11057 fn(s, insn);
11058 } else {
11059 unallocated_encoding(s);
11060 }
11061}
11062
11063
11064static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
11065{
11066 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
11067 disas_data_proc_fp(s, insn);
11068 } else {
11069
11070 disas_data_proc_simd(s, insn);
11071 }
11072}
11073
11074
11075static void disas_a64_insn(CPUARMState *env, DisasContext *s)
11076{
11077 uint32_t insn;
11078
11079 insn = arm_ldl_code(env, s->pc, s->sctlr_b);
11080 s->insn = insn;
11081 s->pc += 4;
11082
11083 s->fp_access_checked = false;
11084
11085 switch (extract32(insn, 25, 4)) {
11086 case 0x0: case 0x1: case 0x2: case 0x3:
11087 unallocated_encoding(s);
11088 break;
11089 case 0x8: case 0x9:
11090 disas_data_proc_imm(s, insn);
11091 break;
11092 case 0xa: case 0xb:
11093 disas_b_exc_sys(s, insn);
11094 break;
11095 case 0x4:
11096 case 0x6:
11097 case 0xc:
11098 case 0xe:
11099 disas_ldst(s, insn);
11100 break;
11101 case 0x5:
11102 case 0xd:
11103 disas_data_proc_reg(s, insn);
11104 break;
11105 case 0x7:
11106 case 0xf:
11107 disas_data_proc_simd_fp(s, insn);
11108 break;
11109 default:
11110 assert(FALSE);
11111 break;
11112 }
11113
11114
11115 free_tmp_a64(s);
11116}
11117
11118void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
11119{
11120 CPUState *cs = CPU(cpu);
11121 CPUARMState *env = &cpu->env;
11122 DisasContext dc1, *dc = &dc1;
11123 target_ulong pc_start;
11124 target_ulong next_page_start;
11125 int num_insns;
11126 int max_insns;
11127
11128 pc_start = tb->pc;
11129
11130 dc->tb = tb;
11131
11132 dc->is_jmp = DISAS_NEXT;
11133 dc->pc = pc_start;
11134 dc->singlestep_enabled = cs->singlestep_enabled;
11135 dc->condjmp = 0;
11136
11137 dc->aarch64 = 1;
11138
11139
11140
11141 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11142 !arm_el_is_aa64(env, 3);
11143 dc->thumb = 0;
11144 dc->sctlr_b = 0;
11145 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
11146 dc->condexec_mask = 0;
11147 dc->condexec_cond = 0;
11148 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11149 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11150#if !defined(CONFIG_USER_ONLY)
11151 dc->user = (dc->current_el == 0);
11152 dc->ns = ARM_TBFLAG_NS(tb->flags);
11153#endif
11154 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
11155 dc->vec_len = 0;
11156 dc->vec_stride = 0;
11157 dc->cp_regs = cpu->cp_regs;
11158 dc->features = env->features;
11159
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169
11170
11171
11172
11173
11174
11175 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11176 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11177 dc->is_ldex = false;
11178 dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
11179
11180 init_tmp_a64_array(dc);
11181
11182 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11183 num_insns = 0;
11184 max_insns = tb->cflags & CF_COUNT_MASK;
11185 if (max_insns == 0) {
11186 max_insns = CF_COUNT_MASK;
11187 }
11188 if (max_insns > TCG_MAX_INSNS) {
11189 max_insns = TCG_MAX_INSNS;
11190 }
11191
11192 gen_tb_start(tb);
11193
11194 tcg_clear_temp_count();
11195
11196 do {
11197 dc->insn_start_idx = tcg_op_buf_count();
11198 tcg_gen_insn_start(dc->pc, 0, 0);
11199 num_insns++;
11200
11201 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11202 CPUBreakpoint *bp;
11203 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11204 if (bp->pc == dc->pc) {
11205 if (bp->flags & BP_CPU) {
11206 gen_a64_set_pc_im(dc->pc);
11207 gen_helper_check_breakpoints(cpu_env);
11208
11209 dc->is_jmp = DISAS_UPDATE;
11210 } else {
11211 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11212
11213
11214
11215
11216
11217 dc->pc += 4;
11218 goto done_generating;
11219 }
11220 break;
11221 }
11222 }
11223 }
11224
11225 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
11226 gen_io_start();
11227 }
11228
11229 if (dc->ss_active && !dc->pstate_ss) {
11230
11231
11232
11233
11234
11235
11236
11237
11238
11239
11240 assert(num_insns == 1);
11241 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11242 default_exception_el(dc));
11243 dc->is_jmp = DISAS_EXC;
11244 break;
11245 }
11246
11247 disas_a64_insn(env, dc);
11248
11249 if (tcg_check_temp_count()) {
11250 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11251 dc->pc);
11252 }
11253
11254
11255
11256
11257
11258
11259 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11260 !cs->singlestep_enabled &&
11261 !singlestep &&
11262 !dc->ss_active &&
11263 dc->pc < next_page_start &&
11264 num_insns < max_insns);
11265
11266 if (tb->cflags & CF_LAST_IO) {
11267 gen_io_end();
11268 }
11269
11270 if (unlikely(cs->singlestep_enabled || dc->ss_active)
11271 && dc->is_jmp != DISAS_EXC) {
11272
11273
11274
11275
11276
11277 assert(dc->is_jmp != DISAS_TB_JUMP);
11278 if (dc->is_jmp != DISAS_JUMP) {
11279 gen_a64_set_pc_im(dc->pc);
11280 }
11281 if (cs->singlestep_enabled) {
11282 gen_exception_internal(EXCP_DEBUG);
11283 } else {
11284 gen_step_complete_exception(dc);
11285 }
11286 } else {
11287 switch (dc->is_jmp) {
11288 case DISAS_NEXT:
11289 gen_goto_tb(dc, 1, dc->pc);
11290 break;
11291 default:
11292 case DISAS_UPDATE:
11293 gen_a64_set_pc_im(dc->pc);
11294
11295 case DISAS_JUMP:
11296
11297 tcg_gen_exit_tb(0);
11298 break;
11299 case DISAS_TB_JUMP:
11300 case DISAS_EXC:
11301 case DISAS_SWI:
11302 break;
11303 case DISAS_WFE:
11304 gen_a64_set_pc_im(dc->pc);
11305 gen_helper_wfe(cpu_env);
11306 tcg_gen_exit_tb(0);
11307 break;
11308 case DISAS_YIELD:
11309 gen_a64_set_pc_im(dc->pc);
11310 gen_helper_yield(cpu_env);
11311 break;
11312 case DISAS_WFI:
11313
11314
11315
11316 gen_a64_set_pc_im(dc->pc);
11317 gen_helper_wfi(cpu_env);
11318
11319
11320
11321 tcg_gen_exit_tb(0);
11322 break;
11323 }
11324 }
11325
11326done_generating:
11327 gen_tb_end(tb, num_insns);
11328
11329#ifdef DEBUG_DISAS
11330 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
11331 qemu_log_in_addr_range(pc_start)) {
11332 qemu_log("----------------\n");
11333 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11334 log_target_disas(cs, pc_start, dc->pc - pc_start,
11335 4 | (bswap_code(dc->sctlr_b) ? 2 : 0));
11336 qemu_log("\n");
11337 }
11338#endif
11339 tb->size = dc->pc - pc_start;
11340 tb->icount = num_insns;
11341}
11342