1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "tcg/tcg-op.h"
24#include "tcg/tcg-op-gvec.h"
25#include "qemu/log.h"
26#include "arm_ldst.h"
27#include "translate.h"
28#include "internals.h"
29#include "qemu/host-utils.h"
30#include "semihosting/semihost.h"
31#include "exec/gen-icount.h"
32#include "exec/helper-proto.h"
33#include "exec/helper-gen.h"
34#include "exec/log.h"
35#include "cpregs.h"
36#include "translate-a64.h"
37#include "qemu/atomic128.h"
38
39static TCGv_i64 cpu_X[32];
40static TCGv_i64 cpu_pc;
41
42
43static TCGv_i64 cpu_exclusive_high;
44
45static const char *regnames[] = {
46 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
47 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
48 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
49 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
50};
51
52enum a64_shift_type {
53 A64_SHIFT_TYPE_LSL = 0,
54 A64_SHIFT_TYPE_LSR = 1,
55 A64_SHIFT_TYPE_ASR = 2,
56 A64_SHIFT_TYPE_ROR = 3
57};
58
59
60
61
62typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
63
64typedef struct AArch64DecodeTable {
65 uint32_t pattern;
66 uint32_t mask;
67 AArch64DecodeFn *disas_fn;
68} AArch64DecodeTable;
69
70
71void a64_translate_init(void)
72{
73 int i;
74
75 cpu_pc = tcg_global_mem_new_i64(cpu_env,
76 offsetof(CPUARMState, pc),
77 "pc");
78 for (i = 0; i < 32; i++) {
79 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
80 offsetof(CPUARMState, xregs[i]),
81 regnames[i]);
82 }
83
84 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
85 offsetof(CPUARMState, exclusive_high), "exclusive_high");
86}
87
88
89
90
91static int get_a64_user_mem_index(DisasContext *s)
92{
93
94
95
96
97 ARMMMUIdx useridx = s->mmu_idx;
98
99 if (s->unpriv) {
100
101
102
103
104
105 switch (useridx) {
106 case ARMMMUIdx_E10_1:
107 case ARMMMUIdx_E10_1_PAN:
108 useridx = ARMMMUIdx_E10_0;
109 break;
110 case ARMMMUIdx_E20_2:
111 case ARMMMUIdx_E20_2_PAN:
112 useridx = ARMMMUIdx_E20_0;
113 break;
114 case ARMMMUIdx_SE10_1:
115 case ARMMMUIdx_SE10_1_PAN:
116 useridx = ARMMMUIdx_SE10_0;
117 break;
118 case ARMMMUIdx_SE20_2:
119 case ARMMMUIdx_SE20_2_PAN:
120 useridx = ARMMMUIdx_SE20_0;
121 break;
122 default:
123 g_assert_not_reached();
124 }
125 }
126 return arm_to_core_mmu_idx(useridx);
127}
128
129static void set_btype_raw(int val)
130{
131 tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
132 offsetof(CPUARMState, btype));
133}
134
135static void set_btype(DisasContext *s, int val)
136{
137
138 tcg_debug_assert(val >= 1 && val <= 3);
139 set_btype_raw(val);
140 s->btype = -1;
141}
142
143static void reset_btype(DisasContext *s)
144{
145 if (s->btype != 0) {
146 set_btype_raw(0);
147 s->btype = 0;
148 }
149}
150
151void gen_a64_set_pc_im(uint64_t val)
152{
153 tcg_gen_movi_i64(cpu_pc, val);
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
170 TCGv_i64 src, int tbi)
171{
172 if (tbi == 0) {
173
174 tcg_gen_mov_i64(dst, src);
175 } else if (!regime_has_2_ranges(s->mmu_idx)) {
176
177 tcg_gen_extract_i64(dst, src, 0, 56);
178 } else {
179
180 tcg_gen_sextract_i64(dst, src, 0, 56);
181
182 switch (tbi) {
183 case 1:
184
185 tcg_gen_and_i64(dst, dst, src);
186 break;
187 case 2:
188
189 tcg_gen_or_i64(dst, dst, src);
190 break;
191 case 3:
192
193 break;
194 default:
195 g_assert_not_reached();
196 }
197 }
198}
199
200static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
201{
202
203
204
205
206 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
222{
223 TCGv_i64 clean = new_tmp_a64(s);
224#ifdef CONFIG_USER_ONLY
225 gen_top_byte_ignore(s, clean, addr, s->tbid);
226#else
227 tcg_gen_mov_i64(clean, addr);
228#endif
229 return clean;
230}
231
232
233static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
234{
235 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
236}
237
238static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
239 MMUAccessType acc, int log2_size)
240{
241 gen_helper_probe_access(cpu_env, ptr,
242 tcg_constant_i32(acc),
243 tcg_constant_i32(get_mem_index(s)),
244 tcg_constant_i32(1 << log2_size));
245}
246
247
248
249
250
251
252
253static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
254 bool is_write, bool tag_checked,
255 int log2_size, bool is_unpriv,
256 int core_idx)
257{
258 if (tag_checked && s->mte_active[is_unpriv]) {
259 TCGv_i64 ret;
260 int desc = 0;
261
262 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
263 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
264 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
265 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
266 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
267
268 ret = new_tmp_a64(s);
269 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
270
271 return ret;
272 }
273 return clean_data_tbi(s, addr);
274}
275
276TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
277 bool tag_checked, int log2_size)
278{
279 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
280 false, get_mem_index(s));
281}
282
283
284
285
286TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
287 bool tag_checked, int size)
288{
289 if (tag_checked && s->mte_active[0]) {
290 TCGv_i64 ret;
291 int desc = 0;
292
293 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
294 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
295 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
296 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
297 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
298
299 ret = new_tmp_a64(s);
300 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
301
302 return ret;
303 }
304 return clean_data_tbi(s, addr);
305}
306
307typedef struct DisasCompare64 {
308 TCGCond cond;
309 TCGv_i64 value;
310} DisasCompare64;
311
312static void a64_test_cc(DisasCompare64 *c64, int cc)
313{
314 DisasCompare c32;
315
316 arm_test_cc(&c32, cc);
317
318
319
320 c64->cond = c32.cond;
321 c64->value = tcg_temp_new_i64();
322 tcg_gen_ext_i32_i64(c64->value, c32.value);
323
324 arm_free_cc(&c32);
325}
326
327static void a64_free_cc(DisasCompare64 *c64)
328{
329 tcg_temp_free_i64(c64->value);
330}
331
332static void gen_rebuild_hflags(DisasContext *s)
333{
334 gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
335}
336
337static void gen_exception_internal(int excp)
338{
339 assert(excp_is_internal(excp));
340 gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
341}
342
343static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
344{
345 gen_a64_set_pc_im(pc);
346 gen_exception_internal(excp);
347 s->base.is_jmp = DISAS_NORETURN;
348}
349
350static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
351{
352 gen_a64_set_pc_im(s->pc_curr);
353 gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
354 s->base.is_jmp = DISAS_NORETURN;
355}
356
357static void gen_step_complete_exception(DisasContext *s)
358{
359
360
361
362
363
364
365
366
367
368 gen_ss_advance(s);
369 gen_swstep_exception(s, 1, s->is_ldex);
370 s->base.is_jmp = DISAS_NORETURN;
371}
372
373static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
374{
375 if (s->ss_active) {
376 return false;
377 }
378 return translator_use_goto_tb(&s->base, dest);
379}
380
381static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
382{
383 if (use_goto_tb(s, dest)) {
384 tcg_gen_goto_tb(n);
385 gen_a64_set_pc_im(dest);
386 tcg_gen_exit_tb(s->base.tb, n);
387 s->base.is_jmp = DISAS_NORETURN;
388 } else {
389 gen_a64_set_pc_im(dest);
390 if (s->ss_active) {
391 gen_step_complete_exception(s);
392 } else {
393 tcg_gen_lookup_and_goto_ptr();
394 s->base.is_jmp = DISAS_NORETURN;
395 }
396 }
397}
398
399static void init_tmp_a64_array(DisasContext *s)
400{
401#ifdef CONFIG_DEBUG_TCG
402 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
403#endif
404 s->tmp_a64_count = 0;
405}
406
407static void free_tmp_a64(DisasContext *s)
408{
409 int i;
410 for (i = 0; i < s->tmp_a64_count; i++) {
411 tcg_temp_free_i64(s->tmp_a64[i]);
412 }
413 init_tmp_a64_array(s);
414}
415
416TCGv_i64 new_tmp_a64(DisasContext *s)
417{
418 assert(s->tmp_a64_count < TMP_A64_MAX);
419 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
420}
421
422TCGv_i64 new_tmp_a64_local(DisasContext *s)
423{
424 assert(s->tmp_a64_count < TMP_A64_MAX);
425 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
426}
427
428TCGv_i64 new_tmp_a64_zero(DisasContext *s)
429{
430 TCGv_i64 t = new_tmp_a64(s);
431 tcg_gen_movi_i64(t, 0);
432 return t;
433}
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450TCGv_i64 cpu_reg(DisasContext *s, int reg)
451{
452 if (reg == 31) {
453 return new_tmp_a64_zero(s);
454 } else {
455 return cpu_X[reg];
456 }
457}
458
459
460TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
461{
462 return cpu_X[reg];
463}
464
465
466
467
468
469TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
470{
471 TCGv_i64 v = new_tmp_a64(s);
472 if (reg != 31) {
473 if (sf) {
474 tcg_gen_mov_i64(v, cpu_X[reg]);
475 } else {
476 tcg_gen_ext32u_i64(v, cpu_X[reg]);
477 }
478 } else {
479 tcg_gen_movi_i64(v, 0);
480 }
481 return v;
482}
483
484TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
485{
486 TCGv_i64 v = new_tmp_a64(s);
487 if (sf) {
488 tcg_gen_mov_i64(v, cpu_X[reg]);
489 } else {
490 tcg_gen_ext32u_i64(v, cpu_X[reg]);
491 }
492 return v;
493}
494
495
496
497
498
499
500static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
501{
502 return vec_reg_offset(s, regno, 0, size);
503}
504
505
506static inline int fp_reg_hi_offset(DisasContext *s, int regno)
507{
508 return vec_reg_offset(s, regno, 1, MO_64);
509}
510
511
512
513
514
515
516
517static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
518{
519 TCGv_i64 v = tcg_temp_new_i64();
520
521 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
522 return v;
523}
524
525static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
526{
527 TCGv_i32 v = tcg_temp_new_i32();
528
529 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
530 return v;
531}
532
533static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
534{
535 TCGv_i32 v = tcg_temp_new_i32();
536
537 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
538 return v;
539}
540
541
542
543
544static void clear_vec_high(DisasContext *s, bool is_q, int rd)
545{
546 unsigned ofs = fp_reg_offset(s, rd, MO_64);
547 unsigned vsz = vec_full_reg_size(s);
548
549
550 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
551}
552
553void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
554{
555 unsigned ofs = fp_reg_offset(s, reg, MO_64);
556
557 tcg_gen_st_i64(v, cpu_env, ofs);
558 clear_vec_high(s, false, reg);
559}
560
561static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
562{
563 TCGv_i64 tmp = tcg_temp_new_i64();
564
565 tcg_gen_extu_i32_i64(tmp, v);
566 write_fp_dreg(s, reg, tmp);
567 tcg_temp_free_i64(tmp);
568}
569
570
571static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
572 GVecGen2Fn *gvec_fn, int vece)
573{
574 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
575 is_q ? 16 : 8, vec_full_reg_size(s));
576}
577
578
579
580
581static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
582 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
583{
584 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
585 imm, is_q ? 16 : 8, vec_full_reg_size(s));
586}
587
588
589static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
590 GVecGen3Fn *gvec_fn, int vece)
591{
592 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
593 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
594}
595
596
597static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
598 int rx, GVecGen4Fn *gvec_fn, int vece)
599{
600 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
601 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
602 is_q ? 16 : 8, vec_full_reg_size(s));
603}
604
605
606static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
607 int rn, int data, gen_helper_gvec_2 *fn)
608{
609 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
610 vec_full_reg_offset(s, rn),
611 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
612}
613
614
615static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
616 int rn, int rm, int data, gen_helper_gvec_3 *fn)
617{
618 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
619 vec_full_reg_offset(s, rn),
620 vec_full_reg_offset(s, rm),
621 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
622}
623
624
625
626
627static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
628 int rm, bool is_fp16, int data,
629 gen_helper_gvec_3_ptr *fn)
630{
631 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
632 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
633 vec_full_reg_offset(s, rn),
634 vec_full_reg_offset(s, rm), fpst,
635 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
636 tcg_temp_free_ptr(fpst);
637}
638
639
640static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
641 int rm, gen_helper_gvec_3_ptr *fn)
642{
643 TCGv_ptr qc_ptr = tcg_temp_new_ptr();
644
645 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
646 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
647 vec_full_reg_offset(s, rn),
648 vec_full_reg_offset(s, rm), qc_ptr,
649 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
650 tcg_temp_free_ptr(qc_ptr);
651}
652
653
654static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
655 int rm, int ra, int data, gen_helper_gvec_4 *fn)
656{
657 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
658 vec_full_reg_offset(s, rn),
659 vec_full_reg_offset(s, rm),
660 vec_full_reg_offset(s, ra),
661 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
662}
663
664
665
666
667
668static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
669 int rm, int ra, bool is_fp16, int data,
670 gen_helper_gvec_4_ptr *fn)
671{
672 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
673 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
674 vec_full_reg_offset(s, rn),
675 vec_full_reg_offset(s, rm),
676 vec_full_reg_offset(s, ra), fpst,
677 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
678 tcg_temp_free_ptr(fpst);
679}
680
681
682
683
684static inline void gen_set_NZ64(TCGv_i64 result)
685{
686 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
687 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
688}
689
690
691static inline void gen_logic_CC(int sf, TCGv_i64 result)
692{
693 if (sf) {
694 gen_set_NZ64(result);
695 } else {
696 tcg_gen_extrl_i64_i32(cpu_ZF, result);
697 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
698 }
699 tcg_gen_movi_i32(cpu_CF, 0);
700 tcg_gen_movi_i32(cpu_VF, 0);
701}
702
703
704static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
705{
706 if (sf) {
707 TCGv_i64 result, flag, tmp;
708 result = tcg_temp_new_i64();
709 flag = tcg_temp_new_i64();
710 tmp = tcg_temp_new_i64();
711
712 tcg_gen_movi_i64(tmp, 0);
713 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
714
715 tcg_gen_extrl_i64_i32(cpu_CF, flag);
716
717 gen_set_NZ64(result);
718
719 tcg_gen_xor_i64(flag, result, t0);
720 tcg_gen_xor_i64(tmp, t0, t1);
721 tcg_gen_andc_i64(flag, flag, tmp);
722 tcg_temp_free_i64(tmp);
723 tcg_gen_extrh_i64_i32(cpu_VF, flag);
724
725 tcg_gen_mov_i64(dest, result);
726 tcg_temp_free_i64(result);
727 tcg_temp_free_i64(flag);
728 } else {
729
730 TCGv_i32 t0_32 = tcg_temp_new_i32();
731 TCGv_i32 t1_32 = tcg_temp_new_i32();
732 TCGv_i32 tmp = tcg_temp_new_i32();
733
734 tcg_gen_movi_i32(tmp, 0);
735 tcg_gen_extrl_i64_i32(t0_32, t0);
736 tcg_gen_extrl_i64_i32(t1_32, t1);
737 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
738 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
739 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
740 tcg_gen_xor_i32(tmp, t0_32, t1_32);
741 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
742 tcg_gen_extu_i32_i64(dest, cpu_NF);
743
744 tcg_temp_free_i32(tmp);
745 tcg_temp_free_i32(t0_32);
746 tcg_temp_free_i32(t1_32);
747 }
748}
749
750
751static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
752{
753 if (sf) {
754
755 TCGv_i64 result, flag, tmp;
756
757 result = tcg_temp_new_i64();
758 flag = tcg_temp_new_i64();
759 tcg_gen_sub_i64(result, t0, t1);
760
761 gen_set_NZ64(result);
762
763 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
764 tcg_gen_extrl_i64_i32(cpu_CF, flag);
765
766 tcg_gen_xor_i64(flag, result, t0);
767 tmp = tcg_temp_new_i64();
768 tcg_gen_xor_i64(tmp, t0, t1);
769 tcg_gen_and_i64(flag, flag, tmp);
770 tcg_temp_free_i64(tmp);
771 tcg_gen_extrh_i64_i32(cpu_VF, flag);
772 tcg_gen_mov_i64(dest, result);
773 tcg_temp_free_i64(flag);
774 tcg_temp_free_i64(result);
775 } else {
776
777 TCGv_i32 t0_32 = tcg_temp_new_i32();
778 TCGv_i32 t1_32 = tcg_temp_new_i32();
779 TCGv_i32 tmp;
780
781 tcg_gen_extrl_i64_i32(t0_32, t0);
782 tcg_gen_extrl_i64_i32(t1_32, t1);
783 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
784 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
785 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
786 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
787 tmp = tcg_temp_new_i32();
788 tcg_gen_xor_i32(tmp, t0_32, t1_32);
789 tcg_temp_free_i32(t0_32);
790 tcg_temp_free_i32(t1_32);
791 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
792 tcg_temp_free_i32(tmp);
793 tcg_gen_extu_i32_i64(dest, cpu_NF);
794 }
795}
796
797
798static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
799{
800 TCGv_i64 flag = tcg_temp_new_i64();
801 tcg_gen_extu_i32_i64(flag, cpu_CF);
802 tcg_gen_add_i64(dest, t0, t1);
803 tcg_gen_add_i64(dest, dest, flag);
804 tcg_temp_free_i64(flag);
805
806 if (!sf) {
807 tcg_gen_ext32u_i64(dest, dest);
808 }
809}
810
811
812static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
813{
814 if (sf) {
815 TCGv_i64 result = tcg_temp_new_i64();
816 TCGv_i64 cf_64 = tcg_temp_new_i64();
817 TCGv_i64 vf_64 = tcg_temp_new_i64();
818 TCGv_i64 tmp = tcg_temp_new_i64();
819 TCGv_i64 zero = tcg_constant_i64(0);
820
821 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
822 tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
823 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
824 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
825 gen_set_NZ64(result);
826
827 tcg_gen_xor_i64(vf_64, result, t0);
828 tcg_gen_xor_i64(tmp, t0, t1);
829 tcg_gen_andc_i64(vf_64, vf_64, tmp);
830 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
831
832 tcg_gen_mov_i64(dest, result);
833
834 tcg_temp_free_i64(tmp);
835 tcg_temp_free_i64(vf_64);
836 tcg_temp_free_i64(cf_64);
837 tcg_temp_free_i64(result);
838 } else {
839 TCGv_i32 t0_32 = tcg_temp_new_i32();
840 TCGv_i32 t1_32 = tcg_temp_new_i32();
841 TCGv_i32 tmp = tcg_temp_new_i32();
842 TCGv_i32 zero = tcg_constant_i32(0);
843
844 tcg_gen_extrl_i64_i32(t0_32, t0);
845 tcg_gen_extrl_i64_i32(t1_32, t1);
846 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
847 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
848
849 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
850 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
851 tcg_gen_xor_i32(tmp, t0_32, t1_32);
852 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
853 tcg_gen_extu_i32_i64(dest, cpu_NF);
854
855 tcg_temp_free_i32(tmp);
856 tcg_temp_free_i32(t1_32);
857 tcg_temp_free_i32(t0_32);
858 }
859}
860
861
862
863
864
865
866
867
868static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
869 TCGv_i64 tcg_addr, MemOp memop, int memidx,
870 bool iss_valid,
871 unsigned int iss_srt,
872 bool iss_sf, bool iss_ar)
873{
874 memop = finalize_memop(s, memop);
875 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
876
877 if (iss_valid) {
878 uint32_t syn;
879
880 syn = syn_data_abort_with_iss(0,
881 (memop & MO_SIZE),
882 false,
883 iss_srt,
884 iss_sf,
885 iss_ar,
886 0, 0, 0, 0, 0, false);
887 disas_set_insn_syndrome(s, syn);
888 }
889}
890
891static void do_gpr_st(DisasContext *s, TCGv_i64 source,
892 TCGv_i64 tcg_addr, MemOp memop,
893 bool iss_valid,
894 unsigned int iss_srt,
895 bool iss_sf, bool iss_ar)
896{
897 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
898 iss_valid, iss_srt, iss_sf, iss_ar);
899}
900
901
902
903
904static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
905 MemOp memop, bool extend, int memidx,
906 bool iss_valid, unsigned int iss_srt,
907 bool iss_sf, bool iss_ar)
908{
909 memop = finalize_memop(s, memop);
910 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
911
912 if (extend && (memop & MO_SIGN)) {
913 g_assert((memop & MO_SIZE) <= MO_32);
914 tcg_gen_ext32u_i64(dest, dest);
915 }
916
917 if (iss_valid) {
918 uint32_t syn;
919
920 syn = syn_data_abort_with_iss(0,
921 (memop & MO_SIZE),
922 (memop & MO_SIGN) != 0,
923 iss_srt,
924 iss_sf,
925 iss_ar,
926 0, 0, 0, 0, 0, false);
927 disas_set_insn_syndrome(s, syn);
928 }
929}
930
931static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
932 MemOp memop, bool extend,
933 bool iss_valid, unsigned int iss_srt,
934 bool iss_sf, bool iss_ar)
935{
936 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
937 iss_valid, iss_srt, iss_sf, iss_ar);
938}
939
940
941
942
943static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
944{
945
946 TCGv_i64 tmplo = tcg_temp_new_i64();
947 MemOp mop;
948
949 tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
950
951 if (size < 4) {
952 mop = finalize_memop(s, size);
953 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
954 } else {
955 bool be = s->be_data == MO_BE;
956 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
957 TCGv_i64 tmphi = tcg_temp_new_i64();
958
959 tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
960
961 mop = s->be_data | MO_UQ;
962 tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
963 mop | (s->align_mem ? MO_ALIGN_16 : 0));
964 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
965 tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
966 get_mem_index(s), mop);
967
968 tcg_temp_free_i64(tcg_hiaddr);
969 tcg_temp_free_i64(tmphi);
970 }
971
972 tcg_temp_free_i64(tmplo);
973}
974
975
976
977
978static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
979{
980
981 TCGv_i64 tmplo = tcg_temp_new_i64();
982 TCGv_i64 tmphi = NULL;
983 MemOp mop;
984
985 if (size < 4) {
986 mop = finalize_memop(s, size);
987 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
988 } else {
989 bool be = s->be_data == MO_BE;
990 TCGv_i64 tcg_hiaddr;
991
992 tmphi = tcg_temp_new_i64();
993 tcg_hiaddr = tcg_temp_new_i64();
994
995 mop = s->be_data | MO_UQ;
996 tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
997 mop | (s->align_mem ? MO_ALIGN_16 : 0));
998 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
999 tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
1000 get_mem_index(s), mop);
1001 tcg_temp_free_i64(tcg_hiaddr);
1002 }
1003
1004 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1005 tcg_temp_free_i64(tmplo);
1006
1007 if (tmphi) {
1008 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1009 tcg_temp_free_i64(tmphi);
1010 }
1011 clear_vec_high(s, tmphi != NULL, destidx);
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1028 int element, MemOp memop)
1029{
1030 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1031 switch ((unsigned)memop) {
1032 case MO_8:
1033 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1034 break;
1035 case MO_16:
1036 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1037 break;
1038 case MO_32:
1039 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1040 break;
1041 case MO_8|MO_SIGN:
1042 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1043 break;
1044 case MO_16|MO_SIGN:
1045 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1046 break;
1047 case MO_32|MO_SIGN:
1048 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1049 break;
1050 case MO_64:
1051 case MO_64|MO_SIGN:
1052 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1053 break;
1054 default:
1055 g_assert_not_reached();
1056 }
1057}
1058
1059static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1060 int element, MemOp memop)
1061{
1062 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1063 switch (memop) {
1064 case MO_8:
1065 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1066 break;
1067 case MO_16:
1068 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1069 break;
1070 case MO_8|MO_SIGN:
1071 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1072 break;
1073 case MO_16|MO_SIGN:
1074 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1075 break;
1076 case MO_32:
1077 case MO_32|MO_SIGN:
1078 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1079 break;
1080 default:
1081 g_assert_not_reached();
1082 }
1083}
1084
1085
1086static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1087 int element, MemOp memop)
1088{
1089 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1090 switch (memop) {
1091 case MO_8:
1092 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1093 break;
1094 case MO_16:
1095 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1096 break;
1097 case MO_32:
1098 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1099 break;
1100 case MO_64:
1101 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1102 break;
1103 default:
1104 g_assert_not_reached();
1105 }
1106}
1107
1108static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1109 int destidx, int element, MemOp memop)
1110{
1111 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1112 switch (memop) {
1113 case MO_8:
1114 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1115 break;
1116 case MO_16:
1117 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1118 break;
1119 case MO_32:
1120 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1121 break;
1122 default:
1123 g_assert_not_reached();
1124 }
1125}
1126
1127
1128static void do_vec_st(DisasContext *s, int srcidx, int element,
1129 TCGv_i64 tcg_addr, MemOp mop)
1130{
1131 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1132
1133 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1134 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1135
1136 tcg_temp_free_i64(tcg_tmp);
1137}
1138
1139
1140static void do_vec_ld(DisasContext *s, int destidx, int element,
1141 TCGv_i64 tcg_addr, MemOp mop)
1142{
1143 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1144
1145 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1146 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1147
1148 tcg_temp_free_i64(tcg_tmp);
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158static bool fp_access_check_only(DisasContext *s)
1159{
1160 if (s->fp_excp_el) {
1161 assert(!s->fp_access_checked);
1162 s->fp_access_checked = true;
1163
1164 gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1165 syn_fp_access_trap(1, 0xe, false, 0),
1166 s->fp_excp_el);
1167 return false;
1168 }
1169 s->fp_access_checked = true;
1170 return true;
1171}
1172
1173static bool fp_access_check(DisasContext *s)
1174{
1175 if (!fp_access_check_only(s)) {
1176 return false;
1177 }
1178 if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
1179 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1180 syn_smetrap(SME_ET_Streaming, false));
1181 return false;
1182 }
1183 return true;
1184}
1185
1186
1187
1188
1189
1190
1191bool sve_access_check(DisasContext *s)
1192{
1193 if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
1194 assert(dc_isar_feature(aa64_sme, s));
1195 if (!sme_sm_enabled_check(s)) {
1196 goto fail_exit;
1197 }
1198 } else if (s->sve_excp_el) {
1199 gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1200 syn_sve_access_trap(), s->sve_excp_el);
1201 goto fail_exit;
1202 }
1203 s->sve_access_checked = true;
1204 return fp_access_check(s);
1205
1206 fail_exit:
1207
1208 assert(!s->sve_access_checked);
1209 s->sve_access_checked = true;
1210 return false;
1211}
1212
1213
1214
1215
1216
1217
1218static bool sme_access_check(DisasContext *s)
1219{
1220 if (s->sme_excp_el) {
1221 gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
1222 syn_smetrap(SME_ET_AccessTrap, false),
1223 s->sme_excp_el);
1224 return false;
1225 }
1226 return true;
1227}
1228
1229
1230bool sme_enabled_check(DisasContext *s)
1231{
1232
1233
1234
1235
1236
1237 if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
1238 s->fp_access_checked = true;
1239 return sme_access_check(s);
1240 }
1241 return fp_access_check_only(s);
1242}
1243
1244
1245bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
1246{
1247 if (!sme_enabled_check(s)) {
1248 return false;
1249 }
1250 if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
1251 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1252 syn_smetrap(SME_ET_NotStreaming, false));
1253 return false;
1254 }
1255 if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
1256 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1257 syn_smetrap(SME_ET_InactiveZA, false));
1258 return false;
1259 }
1260 return true;
1261}
1262
1263
1264
1265
1266
1267
1268static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1269 int option, unsigned int shift)
1270{
1271 int extsize = extract32(option, 0, 2);
1272 bool is_signed = extract32(option, 2, 1);
1273
1274 if (is_signed) {
1275 switch (extsize) {
1276 case 0:
1277 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1278 break;
1279 case 1:
1280 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1281 break;
1282 case 2:
1283 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1284 break;
1285 case 3:
1286 tcg_gen_mov_i64(tcg_out, tcg_in);
1287 break;
1288 }
1289 } else {
1290 switch (extsize) {
1291 case 0:
1292 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1293 break;
1294 case 1:
1295 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1296 break;
1297 case 2:
1298 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1299 break;
1300 case 3:
1301 tcg_gen_mov_i64(tcg_out, tcg_in);
1302 break;
1303 }
1304 }
1305
1306 if (shift) {
1307 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1308 }
1309}
1310
1311static inline void gen_check_sp_alignment(DisasContext *s)
1312{
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1336 uint32_t insn)
1337{
1338 const AArch64DecodeTable *tptr = table;
1339
1340 while (tptr->mask) {
1341 if ((insn & tptr->mask) == tptr->pattern) {
1342 return tptr->disas_fn;
1343 }
1344 tptr++;
1345 }
1346 return NULL;
1347}
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1364{
1365 uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1366
1367 if (insn & (1U << 31)) {
1368
1369 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1370 }
1371
1372
1373 reset_btype(s);
1374 gen_goto_tb(s, 0, addr);
1375}
1376
1377
1378
1379
1380
1381
1382
1383static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1384{
1385 unsigned int sf, op, rt;
1386 uint64_t addr;
1387 TCGLabel *label_match;
1388 TCGv_i64 tcg_cmp;
1389
1390 sf = extract32(insn, 31, 1);
1391 op = extract32(insn, 24, 1);
1392 rt = extract32(insn, 0, 5);
1393 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1394
1395 tcg_cmp = read_cpu_reg(s, rt, sf);
1396 label_match = gen_new_label();
1397
1398 reset_btype(s);
1399 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1400 tcg_cmp, 0, label_match);
1401
1402 gen_goto_tb(s, 0, s->base.pc_next);
1403 gen_set_label(label_match);
1404 gen_goto_tb(s, 1, addr);
1405}
1406
1407
1408
1409
1410
1411
1412
1413static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1414{
1415 unsigned int bit_pos, op, rt;
1416 uint64_t addr;
1417 TCGLabel *label_match;
1418 TCGv_i64 tcg_cmp;
1419
1420 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1421 op = extract32(insn, 24, 1);
1422 addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1423 rt = extract32(insn, 0, 5);
1424
1425 tcg_cmp = tcg_temp_new_i64();
1426 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1427 label_match = gen_new_label();
1428
1429 reset_btype(s);
1430 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1431 tcg_cmp, 0, label_match);
1432 tcg_temp_free_i64(tcg_cmp);
1433 gen_goto_tb(s, 0, s->base.pc_next);
1434 gen_set_label(label_match);
1435 gen_goto_tb(s, 1, addr);
1436}
1437
1438
1439
1440
1441
1442
1443
1444static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1445{
1446 unsigned int cond;
1447 uint64_t addr;
1448
1449 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1450 unallocated_encoding(s);
1451 return;
1452 }
1453 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1454 cond = extract32(insn, 0, 4);
1455
1456 reset_btype(s);
1457 if (cond < 0x0e) {
1458
1459 TCGLabel *label_match = gen_new_label();
1460 arm_gen_test_cc(cond, label_match);
1461 gen_goto_tb(s, 0, s->base.pc_next);
1462 gen_set_label(label_match);
1463 gen_goto_tb(s, 1, addr);
1464 } else {
1465
1466 gen_goto_tb(s, 0, addr);
1467 }
1468}
1469
1470
1471static void handle_hint(DisasContext *s, uint32_t insn,
1472 unsigned int op1, unsigned int op2, unsigned int crm)
1473{
1474 unsigned int selector = crm << 3 | op2;
1475
1476 if (op1 != 3) {
1477 unallocated_encoding(s);
1478 return;
1479 }
1480
1481 switch (selector) {
1482 case 0b00000:
1483 break;
1484 case 0b00011:
1485 s->base.is_jmp = DISAS_WFI;
1486 break;
1487 case 0b00001:
1488
1489
1490
1491
1492
1493 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1494 s->base.is_jmp = DISAS_YIELD;
1495 }
1496 break;
1497 case 0b00010:
1498 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1499 s->base.is_jmp = DISAS_WFE;
1500 }
1501 break;
1502 case 0b00100:
1503 case 0b00101:
1504 case 0b00110:
1505
1506 break;
1507 case 0b00111:
1508 if (s->pauth_active) {
1509 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1510 }
1511 break;
1512 case 0b01000:
1513 if (s->pauth_active) {
1514 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1515 }
1516 break;
1517 case 0b01010:
1518 if (s->pauth_active) {
1519 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1520 }
1521 break;
1522 case 0b01100:
1523 if (s->pauth_active) {
1524 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1525 }
1526 break;
1527 case 0b01110:
1528 if (s->pauth_active) {
1529 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1530 }
1531 break;
1532 case 0b10000:
1533
1534 if (dc_isar_feature(aa64_ras, s)) {
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
1545 gen_helper_vesb(cpu_env);
1546 }
1547 }
1548 break;
1549 case 0b11000:
1550 if (s->pauth_active) {
1551 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1552 new_tmp_a64_zero(s));
1553 }
1554 break;
1555 case 0b11001:
1556 if (s->pauth_active) {
1557 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1558 }
1559 break;
1560 case 0b11010:
1561 if (s->pauth_active) {
1562 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1563 new_tmp_a64_zero(s));
1564 }
1565 break;
1566 case 0b11011:
1567 if (s->pauth_active) {
1568 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1569 }
1570 break;
1571 case 0b11100:
1572 if (s->pauth_active) {
1573 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1574 new_tmp_a64_zero(s));
1575 }
1576 break;
1577 case 0b11101:
1578 if (s->pauth_active) {
1579 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1580 }
1581 break;
1582 case 0b11110:
1583 if (s->pauth_active) {
1584 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1585 new_tmp_a64_zero(s));
1586 }
1587 break;
1588 case 0b11111:
1589 if (s->pauth_active) {
1590 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1591 }
1592 break;
1593 default:
1594
1595 break;
1596 }
1597}
1598
1599static void gen_clrex(DisasContext *s, uint32_t insn)
1600{
1601 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1602}
1603
1604
1605static void handle_sync(DisasContext *s, uint32_t insn,
1606 unsigned int op1, unsigned int op2, unsigned int crm)
1607{
1608 TCGBar bar;
1609
1610 if (op1 != 3) {
1611 unallocated_encoding(s);
1612 return;
1613 }
1614
1615 switch (op2) {
1616 case 2:
1617 gen_clrex(s, insn);
1618 return;
1619 case 4:
1620 case 5:
1621 switch (crm & 3) {
1622 case 1:
1623 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1624 break;
1625 case 2:
1626 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1627 break;
1628 default:
1629 bar = TCG_BAR_SC | TCG_MO_ALL;
1630 break;
1631 }
1632 tcg_gen_mb(bar);
1633 return;
1634 case 6:
1635
1636
1637
1638
1639 reset_btype(s);
1640 gen_goto_tb(s, 0, s->base.pc_next);
1641 return;
1642
1643 case 7:
1644 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1645 goto do_unallocated;
1646 }
1647
1648
1649
1650
1651 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1652 gen_goto_tb(s, 0, s->base.pc_next);
1653 return;
1654
1655 default:
1656 do_unallocated:
1657 unallocated_encoding(s);
1658 return;
1659 }
1660}
1661
1662static void gen_xaflag(void)
1663{
1664 TCGv_i32 z = tcg_temp_new_i32();
1665
1666 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1667
1668
1669
1670
1671
1672
1673
1674
1675 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1676 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1677
1678
1679 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1680 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1681
1682
1683 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1684 tcg_gen_neg_i32(cpu_VF, cpu_VF);
1685
1686
1687 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1688
1689 tcg_temp_free_i32(z);
1690}
1691
1692static void gen_axflag(void)
1693{
1694 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);
1695 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);
1696
1697
1698 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1699
1700 tcg_gen_movi_i32(cpu_NF, 0);
1701 tcg_gen_movi_i32(cpu_VF, 0);
1702}
1703
1704
1705static void handle_msr_i(DisasContext *s, uint32_t insn,
1706 unsigned int op1, unsigned int op2, unsigned int crm)
1707{
1708 int op = op1 << 3 | op2;
1709
1710
1711 s->base.is_jmp = DISAS_TOO_MANY;
1712
1713 switch (op) {
1714 case 0x00:
1715 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1716 goto do_unallocated;
1717 }
1718 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1719 s->base.is_jmp = DISAS_NEXT;
1720 break;
1721
1722 case 0x01:
1723 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1724 goto do_unallocated;
1725 }
1726 gen_xaflag();
1727 s->base.is_jmp = DISAS_NEXT;
1728 break;
1729
1730 case 0x02:
1731 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1732 goto do_unallocated;
1733 }
1734 gen_axflag();
1735 s->base.is_jmp = DISAS_NEXT;
1736 break;
1737
1738 case 0x03:
1739 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1740 goto do_unallocated;
1741 }
1742 if (crm & 1) {
1743 set_pstate_bits(PSTATE_UAO);
1744 } else {
1745 clear_pstate_bits(PSTATE_UAO);
1746 }
1747 gen_rebuild_hflags(s);
1748 break;
1749
1750 case 0x04:
1751 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1752 goto do_unallocated;
1753 }
1754 if (crm & 1) {
1755 set_pstate_bits(PSTATE_PAN);
1756 } else {
1757 clear_pstate_bits(PSTATE_PAN);
1758 }
1759 gen_rebuild_hflags(s);
1760 break;
1761
1762 case 0x05:
1763 if (s->current_el == 0) {
1764 goto do_unallocated;
1765 }
1766 gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP));
1767 break;
1768
1769 case 0x19:
1770 if (!dc_isar_feature(aa64_ssbs, s)) {
1771 goto do_unallocated;
1772 }
1773 if (crm & 1) {
1774 set_pstate_bits(PSTATE_SSBS);
1775 } else {
1776 clear_pstate_bits(PSTATE_SSBS);
1777 }
1778
1779 break;
1780
1781 case 0x1a:
1782 if (!dc_isar_feature(aa64_dit, s)) {
1783 goto do_unallocated;
1784 }
1785 if (crm & 1) {
1786 set_pstate_bits(PSTATE_DIT);
1787 } else {
1788 clear_pstate_bits(PSTATE_DIT);
1789 }
1790
1791 break;
1792
1793 case 0x1e:
1794 gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm));
1795 break;
1796
1797 case 0x1f:
1798 gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm));
1799
1800 s->base.is_jmp = DISAS_UPDATE_EXIT;
1801 break;
1802
1803 case 0x1c:
1804 if (dc_isar_feature(aa64_mte, s)) {
1805
1806 if (crm & 1) {
1807 set_pstate_bits(PSTATE_TCO);
1808 } else {
1809 clear_pstate_bits(PSTATE_TCO);
1810 }
1811 gen_rebuild_hflags(s);
1812
1813 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1814 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1815
1816 s->base.is_jmp = DISAS_NEXT;
1817 } else {
1818 goto do_unallocated;
1819 }
1820 break;
1821
1822 case 0x1b:
1823 if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) {
1824 goto do_unallocated;
1825 }
1826 if (sme_access_check(s)) {
1827 bool i = crm & 1;
1828 bool changed = false;
1829
1830 if ((crm & 2) && i != s->pstate_sm) {
1831 gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i));
1832 changed = true;
1833 }
1834 if ((crm & 4) && i != s->pstate_za) {
1835 gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
1836 changed = true;
1837 }
1838 if (changed) {
1839 gen_rebuild_hflags(s);
1840 } else {
1841 s->base.is_jmp = DISAS_NEXT;
1842 }
1843 }
1844 break;
1845
1846 default:
1847 do_unallocated:
1848 unallocated_encoding(s);
1849 return;
1850 }
1851}
1852
1853static void gen_get_nzcv(TCGv_i64 tcg_rt)
1854{
1855 TCGv_i32 tmp = tcg_temp_new_i32();
1856 TCGv_i32 nzcv = tcg_temp_new_i32();
1857
1858
1859 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1860
1861 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1862 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1863
1864 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1865
1866 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1867 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1868
1869 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1870
1871 tcg_temp_free_i32(nzcv);
1872 tcg_temp_free_i32(tmp);
1873}
1874
1875static void gen_set_nzcv(TCGv_i64 tcg_rt)
1876{
1877 TCGv_i32 nzcv = tcg_temp_new_i32();
1878
1879
1880 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1881
1882
1883 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1884
1885 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1886 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1887
1888 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1889 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1890
1891 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1892 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1893 tcg_temp_free_i32(nzcv);
1894}
1895
1896static void gen_sysreg_undef(DisasContext *s, bool isread,
1897 uint8_t op0, uint8_t op1, uint8_t op2,
1898 uint8_t crn, uint8_t crm, uint8_t rt)
1899{
1900
1901
1902
1903
1904
1905
1906
1907
1908 uint32_t syndrome;
1909
1910 if (isread && dc_isar_feature(aa64_ids, s) &&
1911 arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
1912 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1913 } else {
1914 syndrome = syn_uncategorized();
1915 }
1916 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syndrome);
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1927 unsigned int op0, unsigned int op1, unsigned int op2,
1928 unsigned int crn, unsigned int crm, unsigned int rt)
1929{
1930 const ARMCPRegInfo *ri;
1931 TCGv_i64 tcg_rt;
1932
1933 ri = get_arm_cp_reginfo(s->cp_regs,
1934 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1935 crn, crm, op0, op1, op2));
1936
1937 if (!ri) {
1938
1939
1940
1941 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1942 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1943 isread ? "read" : "write", op0, op1, crn, crm, op2);
1944 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
1945 return;
1946 }
1947
1948
1949 if (!cp_access_ok(s->current_el, ri, isread)) {
1950 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
1951 return;
1952 }
1953
1954 if (ri->accessfn) {
1955
1956
1957
1958 uint32_t syndrome;
1959
1960 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1961 gen_a64_set_pc_im(s->pc_curr);
1962 gen_helper_access_check_cp_reg(cpu_env,
1963 tcg_constant_ptr(ri),
1964 tcg_constant_i32(syndrome),
1965 tcg_constant_i32(isread));
1966 } else if (ri->type & ARM_CP_RAISES_EXC) {
1967
1968
1969
1970
1971 gen_a64_set_pc_im(s->pc_curr);
1972 }
1973
1974
1975 switch (ri->type & ARM_CP_SPECIAL_MASK) {
1976 case 0:
1977 break;
1978 case ARM_CP_NOP:
1979 return;
1980 case ARM_CP_NZCV:
1981 tcg_rt = cpu_reg(s, rt);
1982 if (isread) {
1983 gen_get_nzcv(tcg_rt);
1984 } else {
1985 gen_set_nzcv(tcg_rt);
1986 }
1987 return;
1988 case ARM_CP_CURRENTEL:
1989
1990
1991
1992 tcg_rt = cpu_reg(s, rt);
1993 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1994 return;
1995 case ARM_CP_DC_ZVA:
1996
1997 if (s->mte_active[0]) {
1998 int desc = 0;
1999
2000 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
2001 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
2002 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
2003
2004 tcg_rt = new_tmp_a64(s);
2005 gen_helper_mte_check_zva(tcg_rt, cpu_env,
2006 tcg_constant_i32(desc), cpu_reg(s, rt));
2007 } else {
2008 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
2009 }
2010 gen_helper_dc_zva(cpu_env, tcg_rt);
2011 return;
2012 case ARM_CP_DC_GVA:
2013 {
2014 TCGv_i64 clean_addr, tag;
2015
2016
2017
2018
2019
2020 tcg_rt = cpu_reg(s, rt);
2021 clean_addr = clean_data_tbi(s, tcg_rt);
2022 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
2023
2024 if (s->ata) {
2025
2026 tag = tcg_temp_new_i64();
2027 tcg_gen_shri_i64(tag, tcg_rt, 56);
2028 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2029 tcg_temp_free_i64(tag);
2030 }
2031 }
2032 return;
2033 case ARM_CP_DC_GZVA:
2034 {
2035 TCGv_i64 clean_addr, tag;
2036
2037
2038 tcg_rt = cpu_reg(s, rt);
2039 clean_addr = clean_data_tbi(s, tcg_rt);
2040 gen_helper_dc_zva(cpu_env, clean_addr);
2041
2042 if (s->ata) {
2043
2044 tag = tcg_temp_new_i64();
2045 tcg_gen_shri_i64(tag, tcg_rt, 56);
2046 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2047 tcg_temp_free_i64(tag);
2048 }
2049 }
2050 return;
2051 default:
2052 g_assert_not_reached();
2053 }
2054 if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
2055 return;
2056 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
2057 return;
2058 } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
2059 return;
2060 }
2061
2062 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2063 gen_io_start();
2064 }
2065
2066 tcg_rt = cpu_reg(s, rt);
2067
2068 if (isread) {
2069 if (ri->type & ARM_CP_CONST) {
2070 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
2071 } else if (ri->readfn) {
2072 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_constant_ptr(ri));
2073 } else {
2074 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
2075 }
2076 } else {
2077 if (ri->type & ARM_CP_CONST) {
2078
2079 return;
2080 } else if (ri->writefn) {
2081 gen_helper_set_cp_reg64(cpu_env, tcg_constant_ptr(ri), tcg_rt);
2082 } else {
2083 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
2084 }
2085 }
2086
2087 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2088
2089 s->base.is_jmp = DISAS_UPDATE_EXIT;
2090 }
2091 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
2092
2093
2094
2095
2096 gen_rebuild_hflags(s);
2097
2098
2099
2100
2101
2102 s->base.is_jmp = DISAS_UPDATE_EXIT;
2103 }
2104}
2105
2106
2107
2108
2109
2110
2111
2112static void disas_system(DisasContext *s, uint32_t insn)
2113{
2114 unsigned int l, op0, op1, crn, crm, op2, rt;
2115 l = extract32(insn, 21, 1);
2116 op0 = extract32(insn, 19, 2);
2117 op1 = extract32(insn, 16, 3);
2118 crn = extract32(insn, 12, 4);
2119 crm = extract32(insn, 8, 4);
2120 op2 = extract32(insn, 5, 3);
2121 rt = extract32(insn, 0, 5);
2122
2123 if (op0 == 0) {
2124 if (l || rt != 31) {
2125 unallocated_encoding(s);
2126 return;
2127 }
2128 switch (crn) {
2129 case 2:
2130 handle_hint(s, insn, op1, op2, crm);
2131 break;
2132 case 3:
2133 handle_sync(s, insn, op1, op2, crm);
2134 break;
2135 case 4:
2136 handle_msr_i(s, insn, op1, op2, crm);
2137 break;
2138 default:
2139 unallocated_encoding(s);
2140 break;
2141 }
2142 return;
2143 }
2144 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2145}
2146
2147
2148
2149
2150
2151
2152
2153
2154static void disas_exc(DisasContext *s, uint32_t insn)
2155{
2156 int opc = extract32(insn, 21, 3);
2157 int op2_ll = extract32(insn, 0, 5);
2158 int imm16 = extract32(insn, 5, 16);
2159
2160 switch (opc) {
2161 case 0:
2162
2163
2164
2165
2166
2167 switch (op2_ll) {
2168 case 1:
2169 gen_ss_advance(s);
2170 gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
2171 syn_aa64_svc(imm16));
2172 break;
2173 case 2:
2174 if (s->current_el == 0) {
2175 unallocated_encoding(s);
2176 break;
2177 }
2178
2179
2180
2181 gen_a64_set_pc_im(s->pc_curr);
2182 gen_helper_pre_hvc(cpu_env);
2183 gen_ss_advance(s);
2184 gen_exception_insn_el(s, s->base.pc_next, EXCP_HVC,
2185 syn_aa64_hvc(imm16), 2);
2186 break;
2187 case 3:
2188 if (s->current_el == 0) {
2189 unallocated_encoding(s);
2190 break;
2191 }
2192 gen_a64_set_pc_im(s->pc_curr);
2193 gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
2194 gen_ss_advance(s);
2195 gen_exception_insn_el(s, s->base.pc_next, EXCP_SMC,
2196 syn_aa64_smc(imm16), 3);
2197 break;
2198 default:
2199 unallocated_encoding(s);
2200 break;
2201 }
2202 break;
2203 case 1:
2204 if (op2_ll != 0) {
2205 unallocated_encoding(s);
2206 break;
2207 }
2208
2209 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2210 break;
2211 case 2:
2212 if (op2_ll != 0) {
2213 unallocated_encoding(s);
2214 break;
2215 }
2216
2217
2218
2219
2220
2221
2222 if (semihosting_enabled() && imm16 == 0xf000) {
2223#ifndef CONFIG_USER_ONLY
2224
2225
2226
2227
2228 if (s->current_el == 0) {
2229 unallocated_encoding(s);
2230 break;
2231 }
2232#endif
2233 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
2234 } else {
2235 unallocated_encoding(s);
2236 }
2237 break;
2238 case 5:
2239 if (op2_ll < 1 || op2_ll > 3) {
2240 unallocated_encoding(s);
2241 break;
2242 }
2243
2244 unallocated_encoding(s);
2245 break;
2246 default:
2247 unallocated_encoding(s);
2248 break;
2249 }
2250}
2251
2252
2253
2254
2255
2256
2257
2258static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
2259{
2260 unsigned int opc, op2, op3, rn, op4;
2261 unsigned btype_mod = 2;
2262 TCGv_i64 dst;
2263 TCGv_i64 modifier;
2264
2265 opc = extract32(insn, 21, 4);
2266 op2 = extract32(insn, 16, 5);
2267 op3 = extract32(insn, 10, 6);
2268 rn = extract32(insn, 5, 5);
2269 op4 = extract32(insn, 0, 5);
2270
2271 if (op2 != 0x1f) {
2272 goto do_unallocated;
2273 }
2274
2275 switch (opc) {
2276 case 0:
2277 case 1:
2278 case 2:
2279 btype_mod = opc;
2280 switch (op3) {
2281 case 0:
2282
2283 if (op4 != 0) {
2284 goto do_unallocated;
2285 }
2286 dst = cpu_reg(s, rn);
2287 break;
2288
2289 case 2:
2290 case 3:
2291 if (!dc_isar_feature(aa64_pauth, s)) {
2292 goto do_unallocated;
2293 }
2294 if (opc == 2) {
2295
2296 if (rn != 0x1f || op4 != 0x1f) {
2297 goto do_unallocated;
2298 }
2299 rn = 30;
2300 modifier = cpu_X[31];
2301 } else {
2302
2303 if (op4 != 0x1f) {
2304 goto do_unallocated;
2305 }
2306 modifier = new_tmp_a64_zero(s);
2307 }
2308 if (s->pauth_active) {
2309 dst = new_tmp_a64(s);
2310 if (op3 == 2) {
2311 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2312 } else {
2313 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2314 }
2315 } else {
2316 dst = cpu_reg(s, rn);
2317 }
2318 break;
2319
2320 default:
2321 goto do_unallocated;
2322 }
2323 gen_a64_set_pc(s, dst);
2324
2325 if (opc == 1) {
2326 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2327 }
2328 break;
2329
2330 case 8:
2331 case 9:
2332 if (!dc_isar_feature(aa64_pauth, s)) {
2333 goto do_unallocated;
2334 }
2335 if ((op3 & ~1) != 2) {
2336 goto do_unallocated;
2337 }
2338 btype_mod = opc & 1;
2339 if (s->pauth_active) {
2340 dst = new_tmp_a64(s);
2341 modifier = cpu_reg_sp(s, op4);
2342 if (op3 == 2) {
2343 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2344 } else {
2345 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2346 }
2347 } else {
2348 dst = cpu_reg(s, rn);
2349 }
2350 gen_a64_set_pc(s, dst);
2351
2352 if (opc == 9) {
2353 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2354 }
2355 break;
2356
2357 case 4:
2358 if (s->current_el == 0) {
2359 goto do_unallocated;
2360 }
2361 switch (op3) {
2362 case 0:
2363 if (op4 != 0) {
2364 goto do_unallocated;
2365 }
2366 dst = tcg_temp_new_i64();
2367 tcg_gen_ld_i64(dst, cpu_env,
2368 offsetof(CPUARMState, elr_el[s->current_el]));
2369 break;
2370
2371 case 2:
2372 case 3:
2373 if (!dc_isar_feature(aa64_pauth, s)) {
2374 goto do_unallocated;
2375 }
2376 if (rn != 0x1f || op4 != 0x1f) {
2377 goto do_unallocated;
2378 }
2379 dst = tcg_temp_new_i64();
2380 tcg_gen_ld_i64(dst, cpu_env,
2381 offsetof(CPUARMState, elr_el[s->current_el]));
2382 if (s->pauth_active) {
2383 modifier = cpu_X[31];
2384 if (op3 == 2) {
2385 gen_helper_autia(dst, cpu_env, dst, modifier);
2386 } else {
2387 gen_helper_autib(dst, cpu_env, dst, modifier);
2388 }
2389 }
2390 break;
2391
2392 default:
2393 goto do_unallocated;
2394 }
2395 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2396 gen_io_start();
2397 }
2398
2399 gen_helper_exception_return(cpu_env, dst);
2400 tcg_temp_free_i64(dst);
2401
2402 s->base.is_jmp = DISAS_EXIT;
2403 return;
2404
2405 case 5:
2406 if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2407 goto do_unallocated;
2408 } else {
2409 unallocated_encoding(s);
2410 }
2411 return;
2412
2413 default:
2414 do_unallocated:
2415 unallocated_encoding(s);
2416 return;
2417 }
2418
2419 switch (btype_mod) {
2420 case 0:
2421 if (dc_isar_feature(aa64_bti, s)) {
2422
2423 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2424 }
2425 break;
2426
2427 case 1:
2428 if (dc_isar_feature(aa64_bti, s)) {
2429
2430 set_btype(s, 2);
2431 }
2432 break;
2433
2434 default:
2435
2436 break;
2437 }
2438
2439 s->base.is_jmp = DISAS_JUMP;
2440}
2441
2442
2443static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2444{
2445 switch (extract32(insn, 25, 7)) {
2446 case 0x0a: case 0x0b:
2447 case 0x4a: case 0x4b:
2448 disas_uncond_b_imm(s, insn);
2449 break;
2450 case 0x1a: case 0x5a:
2451 disas_comp_b_imm(s, insn);
2452 break;
2453 case 0x1b: case 0x5b:
2454 disas_test_b_imm(s, insn);
2455 break;
2456 case 0x2a:
2457 disas_cond_b_imm(s, insn);
2458 break;
2459 case 0x6a:
2460 if (insn & (1 << 24)) {
2461 if (extract32(insn, 22, 2) == 0) {
2462 disas_system(s, insn);
2463 } else {
2464 unallocated_encoding(s);
2465 }
2466 } else {
2467 disas_exc(s, insn);
2468 }
2469 break;
2470 case 0x6b:
2471 disas_uncond_b_reg(s, insn);
2472 break;
2473 default:
2474 unallocated_encoding(s);
2475 break;
2476 }
2477}
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2491 TCGv_i64 addr, int size, bool is_pair)
2492{
2493 int idx = get_mem_index(s);
2494 MemOp memop = s->be_data;
2495
2496 g_assert(size <= 3);
2497 if (is_pair) {
2498 g_assert(size >= 2);
2499 if (size == 2) {
2500
2501 memop |= MO_64 | MO_ALIGN;
2502 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2503 if (s->be_data == MO_LE) {
2504 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2505 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2506 } else {
2507 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2508 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2509 }
2510 } else {
2511
2512
2513 memop |= MO_64;
2514 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2515 memop | MO_ALIGN_16);
2516
2517 TCGv_i64 addr2 = tcg_temp_new_i64();
2518 tcg_gen_addi_i64(addr2, addr, 8);
2519 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2520 tcg_temp_free_i64(addr2);
2521
2522 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2523 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2524 }
2525 } else {
2526 memop |= size | MO_ALIGN;
2527 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2528 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2529 }
2530 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2531}
2532
2533static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2534 TCGv_i64 addr, int size, int is_pair)
2535{
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548 TCGLabel *fail_label = gen_new_label();
2549 TCGLabel *done_label = gen_new_label();
2550 TCGv_i64 tmp;
2551
2552 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2553
2554 tmp = tcg_temp_new_i64();
2555 if (is_pair) {
2556 if (size == 2) {
2557 if (s->be_data == MO_LE) {
2558 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2559 } else {
2560 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2561 }
2562 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2563 cpu_exclusive_val, tmp,
2564 get_mem_index(s),
2565 MO_64 | MO_ALIGN | s->be_data);
2566 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2567 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2568 if (!HAVE_CMPXCHG128) {
2569 gen_helper_exit_atomic(cpu_env);
2570
2571
2572
2573
2574
2575 tcg_gen_movi_i64(tmp, 0);
2576 } else if (s->be_data == MO_LE) {
2577 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2578 cpu_exclusive_addr,
2579 cpu_reg(s, rt),
2580 cpu_reg(s, rt2));
2581 } else {
2582 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2583 cpu_exclusive_addr,
2584 cpu_reg(s, rt),
2585 cpu_reg(s, rt2));
2586 }
2587 } else if (s->be_data == MO_LE) {
2588 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2589 cpu_reg(s, rt), cpu_reg(s, rt2));
2590 } else {
2591 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2592 cpu_reg(s, rt), cpu_reg(s, rt2));
2593 }
2594 } else {
2595 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2596 cpu_reg(s, rt), get_mem_index(s),
2597 size | MO_ALIGN | s->be_data);
2598 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2599 }
2600 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2601 tcg_temp_free_i64(tmp);
2602 tcg_gen_br(done_label);
2603
2604 gen_set_label(fail_label);
2605 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2606 gen_set_label(done_label);
2607 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2608}
2609
2610static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2611 int rn, int size)
2612{
2613 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2614 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2615 int memidx = get_mem_index(s);
2616 TCGv_i64 clean_addr;
2617
2618 if (rn == 31) {
2619 gen_check_sp_alignment(s);
2620 }
2621 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2622 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2623 size | MO_ALIGN | s->be_data);
2624}
2625
2626static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2627 int rn, int size)
2628{
2629 TCGv_i64 s1 = cpu_reg(s, rs);
2630 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2631 TCGv_i64 t1 = cpu_reg(s, rt);
2632 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2633 TCGv_i64 clean_addr;
2634 int memidx = get_mem_index(s);
2635
2636 if (rn == 31) {
2637 gen_check_sp_alignment(s);
2638 }
2639
2640
2641 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2642
2643 if (size == 2) {
2644 TCGv_i64 cmp = tcg_temp_new_i64();
2645 TCGv_i64 val = tcg_temp_new_i64();
2646
2647 if (s->be_data == MO_LE) {
2648 tcg_gen_concat32_i64(val, t1, t2);
2649 tcg_gen_concat32_i64(cmp, s1, s2);
2650 } else {
2651 tcg_gen_concat32_i64(val, t2, t1);
2652 tcg_gen_concat32_i64(cmp, s2, s1);
2653 }
2654
2655 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2656 MO_64 | MO_ALIGN | s->be_data);
2657 tcg_temp_free_i64(val);
2658
2659 if (s->be_data == MO_LE) {
2660 tcg_gen_extr32_i64(s1, s2, cmp);
2661 } else {
2662 tcg_gen_extr32_i64(s2, s1, cmp);
2663 }
2664 tcg_temp_free_i64(cmp);
2665 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2666 if (HAVE_CMPXCHG128) {
2667 TCGv_i32 tcg_rs = tcg_constant_i32(rs);
2668 if (s->be_data == MO_LE) {
2669 gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2670 clean_addr, t1, t2);
2671 } else {
2672 gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2673 clean_addr, t1, t2);
2674 }
2675 } else {
2676 gen_helper_exit_atomic(cpu_env);
2677 s->base.is_jmp = DISAS_NORETURN;
2678 }
2679 } else {
2680 TCGv_i64 d1 = tcg_temp_new_i64();
2681 TCGv_i64 d2 = tcg_temp_new_i64();
2682 TCGv_i64 a2 = tcg_temp_new_i64();
2683 TCGv_i64 c1 = tcg_temp_new_i64();
2684 TCGv_i64 c2 = tcg_temp_new_i64();
2685 TCGv_i64 zero = tcg_constant_i64(0);
2686
2687
2688 tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2689 MO_64 | MO_ALIGN_16 | s->be_data);
2690 tcg_gen_addi_i64(a2, clean_addr, 8);
2691 tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2692
2693
2694 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2695 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2696 tcg_gen_and_i64(c2, c2, c1);
2697
2698
2699 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2700 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2701 tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2702 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2703 tcg_temp_free_i64(a2);
2704 tcg_temp_free_i64(c1);
2705 tcg_temp_free_i64(c2);
2706
2707
2708 tcg_gen_mov_i64(s1, d1);
2709 tcg_gen_mov_i64(s2, d2);
2710 tcg_temp_free_i64(d1);
2711 tcg_temp_free_i64(d2);
2712 }
2713}
2714
2715
2716
2717
2718static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2719{
2720 int opc0 = extract32(opc, 0, 1);
2721 int regsize;
2722
2723 if (is_signed) {
2724 regsize = opc0 ? 32 : 64;
2725 } else {
2726 regsize = size == 3 ? 64 : 32;
2727 }
2728 return regsize == 64;
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2745{
2746 int rt = extract32(insn, 0, 5);
2747 int rn = extract32(insn, 5, 5);
2748 int rt2 = extract32(insn, 10, 5);
2749 int rs = extract32(insn, 16, 5);
2750 int is_lasr = extract32(insn, 15, 1);
2751 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2752 int size = extract32(insn, 30, 2);
2753 TCGv_i64 clean_addr;
2754
2755 switch (o2_L_o1_o0) {
2756 case 0x0:
2757 case 0x1:
2758 if (rn == 31) {
2759 gen_check_sp_alignment(s);
2760 }
2761 if (is_lasr) {
2762 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2763 }
2764 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2765 true, rn != 31, size);
2766 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2767 return;
2768
2769 case 0x4:
2770 case 0x5:
2771 if (rn == 31) {
2772 gen_check_sp_alignment(s);
2773 }
2774 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2775 false, rn != 31, size);
2776 s->is_ldex = true;
2777 gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2778 if (is_lasr) {
2779 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2780 }
2781 return;
2782
2783 case 0x8:
2784 if (!dc_isar_feature(aa64_lor, s)) {
2785 break;
2786 }
2787
2788
2789 case 0x9:
2790
2791 if (rn == 31) {
2792 gen_check_sp_alignment(s);
2793 }
2794 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2795 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2796 true, rn != 31, size);
2797
2798 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
2799 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2800 return;
2801
2802 case 0xc:
2803 if (!dc_isar_feature(aa64_lor, s)) {
2804 break;
2805 }
2806
2807
2808 case 0xd:
2809
2810 if (rn == 31) {
2811 gen_check_sp_alignment(s);
2812 }
2813 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2814 false, rn != 31, size);
2815
2816 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
2817 rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2818 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2819 return;
2820
2821 case 0x2: case 0x3:
2822 if (size & 2) {
2823 if (rn == 31) {
2824 gen_check_sp_alignment(s);
2825 }
2826 if (is_lasr) {
2827 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2828 }
2829 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2830 true, rn != 31, size);
2831 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2832 return;
2833 }
2834 if (rt2 == 31
2835 && ((rt | rs) & 1) == 0
2836 && dc_isar_feature(aa64_atomics, s)) {
2837
2838 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2839 return;
2840 }
2841 break;
2842
2843 case 0x6: case 0x7:
2844 if (size & 2) {
2845 if (rn == 31) {
2846 gen_check_sp_alignment(s);
2847 }
2848 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2849 false, rn != 31, size);
2850 s->is_ldex = true;
2851 gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2852 if (is_lasr) {
2853 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2854 }
2855 return;
2856 }
2857 if (rt2 == 31
2858 && ((rt | rs) & 1) == 0
2859 && dc_isar_feature(aa64_atomics, s)) {
2860
2861 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2862 return;
2863 }
2864 break;
2865
2866 case 0xa:
2867 case 0xb:
2868 case 0xe:
2869 case 0xf:
2870 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2871 gen_compare_and_swap(s, rs, rt, rn, size);
2872 return;
2873 }
2874 break;
2875 }
2876 unallocated_encoding(s);
2877}
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892static void disas_ld_lit(DisasContext *s, uint32_t insn)
2893{
2894 int rt = extract32(insn, 0, 5);
2895 int64_t imm = sextract32(insn, 5, 19) << 2;
2896 bool is_vector = extract32(insn, 26, 1);
2897 int opc = extract32(insn, 30, 2);
2898 bool is_signed = false;
2899 int size = 2;
2900 TCGv_i64 tcg_rt, clean_addr;
2901
2902 if (is_vector) {
2903 if (opc == 3) {
2904 unallocated_encoding(s);
2905 return;
2906 }
2907 size = 2 + opc;
2908 if (!fp_access_check(s)) {
2909 return;
2910 }
2911 } else {
2912 if (opc == 3) {
2913
2914 return;
2915 }
2916 size = 2 + extract32(opc, 0, 1);
2917 is_signed = extract32(opc, 1, 1);
2918 }
2919
2920 tcg_rt = cpu_reg(s, rt);
2921
2922 clean_addr = tcg_constant_i64(s->pc_curr + imm);
2923 if (is_vector) {
2924 do_fp_ld(s, rt, clean_addr, size);
2925 } else {
2926
2927 bool iss_sf = opc != 0;
2928
2929 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
2930 false, true, rt, iss_sf, false);
2931 }
2932}
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2963{
2964 int rt = extract32(insn, 0, 5);
2965 int rn = extract32(insn, 5, 5);
2966 int rt2 = extract32(insn, 10, 5);
2967 uint64_t offset = sextract64(insn, 15, 7);
2968 int index = extract32(insn, 23, 2);
2969 bool is_vector = extract32(insn, 26, 1);
2970 bool is_load = extract32(insn, 22, 1);
2971 int opc = extract32(insn, 30, 2);
2972
2973 bool is_signed = false;
2974 bool postindex = false;
2975 bool wback = false;
2976 bool set_tag = false;
2977
2978 TCGv_i64 clean_addr, dirty_addr;
2979
2980 int size;
2981
2982 if (opc == 3) {
2983 unallocated_encoding(s);
2984 return;
2985 }
2986
2987 if (is_vector) {
2988 size = 2 + opc;
2989 } else if (opc == 1 && !is_load) {
2990
2991 if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2992 unallocated_encoding(s);
2993 return;
2994 }
2995 size = 3;
2996 set_tag = true;
2997 } else {
2998 size = 2 + extract32(opc, 1, 1);
2999 is_signed = extract32(opc, 0, 1);
3000 if (!is_load && is_signed) {
3001 unallocated_encoding(s);
3002 return;
3003 }
3004 }
3005
3006 switch (index) {
3007 case 1:
3008 postindex = true;
3009 wback = true;
3010 break;
3011 case 0:
3012
3013
3014
3015
3016
3017 if (is_signed) {
3018
3019 unallocated_encoding(s);
3020 return;
3021 }
3022 postindex = false;
3023 break;
3024 case 2:
3025 postindex = false;
3026 break;
3027 case 3:
3028 postindex = false;
3029 wback = true;
3030 break;
3031 }
3032
3033 if (is_vector && !fp_access_check(s)) {
3034 return;
3035 }
3036
3037 offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
3038
3039 if (rn == 31) {
3040 gen_check_sp_alignment(s);
3041 }
3042
3043 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3044 if (!postindex) {
3045 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3046 }
3047
3048 if (set_tag) {
3049 if (!s->ata) {
3050
3051
3052
3053
3054 gen_helper_stg_stub(cpu_env, dirty_addr);
3055 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3056 gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
3057 } else {
3058 gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
3059 }
3060 }
3061
3062 clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
3063 (wback || rn != 31) && !set_tag, 2 << size);
3064
3065 if (is_vector) {
3066 if (is_load) {
3067 do_fp_ld(s, rt, clean_addr, size);
3068 } else {
3069 do_fp_st(s, rt, clean_addr, size);
3070 }
3071 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3072 if (is_load) {
3073 do_fp_ld(s, rt2, clean_addr, size);
3074 } else {
3075 do_fp_st(s, rt2, clean_addr, size);
3076 }
3077 } else {
3078 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3079 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
3080
3081 if (is_load) {
3082 TCGv_i64 tmp = tcg_temp_new_i64();
3083
3084
3085
3086
3087 do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
3088 false, false, 0, false, false);
3089 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3090 do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
3091 false, false, 0, false, false);
3092
3093 tcg_gen_mov_i64(tcg_rt, tmp);
3094 tcg_temp_free_i64(tmp);
3095 } else {
3096 do_gpr_st(s, tcg_rt, clean_addr, size,
3097 false, 0, false, false);
3098 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3099 do_gpr_st(s, tcg_rt2, clean_addr, size,
3100 false, 0, false, false);
3101 }
3102 }
3103
3104 if (wback) {
3105 if (postindex) {
3106 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3107 }
3108 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3109 }
3110}
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
3129 int opc,
3130 int size,
3131 int rt,
3132 bool is_vector)
3133{
3134 int rn = extract32(insn, 5, 5);
3135 int imm9 = sextract32(insn, 12, 9);
3136 int idx = extract32(insn, 10, 2);
3137 bool is_signed = false;
3138 bool is_store = false;
3139 bool is_extended = false;
3140 bool is_unpriv = (idx == 2);
3141 bool iss_valid;
3142 bool post_index;
3143 bool writeback;
3144 int memidx;
3145
3146 TCGv_i64 clean_addr, dirty_addr;
3147
3148 if (is_vector) {
3149 size |= (opc & 2) << 1;
3150 if (size > 4 || is_unpriv) {
3151 unallocated_encoding(s);
3152 return;
3153 }
3154 is_store = ((opc & 1) == 0);
3155 if (!fp_access_check(s)) {
3156 return;
3157 }
3158 } else {
3159 if (size == 3 && opc == 2) {
3160
3161 if (idx != 0) {
3162 unallocated_encoding(s);
3163 return;
3164 }
3165 return;
3166 }
3167 if (opc == 3 && size > 1) {
3168 unallocated_encoding(s);
3169 return;
3170 }
3171 is_store = (opc == 0);
3172 is_signed = extract32(opc, 1, 1);
3173 is_extended = (size < 3) && extract32(opc, 0, 1);
3174 }
3175
3176 switch (idx) {
3177 case 0:
3178 case 2:
3179 post_index = false;
3180 writeback = false;
3181 break;
3182 case 1:
3183 post_index = true;
3184 writeback = true;
3185 break;
3186 case 3:
3187 post_index = false;
3188 writeback = true;
3189 break;
3190 default:
3191 g_assert_not_reached();
3192 }
3193
3194 iss_valid = !is_vector && !writeback;
3195
3196 if (rn == 31) {
3197 gen_check_sp_alignment(s);
3198 }
3199
3200 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3201 if (!post_index) {
3202 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3203 }
3204
3205 memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3206 clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3207 writeback || rn != 31,
3208 size, is_unpriv, memidx);
3209
3210 if (is_vector) {
3211 if (is_store) {
3212 do_fp_st(s, rt, clean_addr, size);
3213 } else {
3214 do_fp_ld(s, rt, clean_addr, size);
3215 }
3216 } else {
3217 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3218 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3219
3220 if (is_store) {
3221 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3222 iss_valid, rt, iss_sf, false);
3223 } else {
3224 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3225 is_extended, memidx,
3226 iss_valid, rt, iss_sf, false);
3227 }
3228 }
3229
3230 if (writeback) {
3231 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3232 if (post_index) {
3233 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3234 }
3235 tcg_gen_mov_i64(tcg_rn, dirty_addr);
3236 }
3237}
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3261 int opc,
3262 int size,
3263 int rt,
3264 bool is_vector)
3265{
3266 int rn = extract32(insn, 5, 5);
3267 int shift = extract32(insn, 12, 1);
3268 int rm = extract32(insn, 16, 5);
3269 int opt = extract32(insn, 13, 3);
3270 bool is_signed = false;
3271 bool is_store = false;
3272 bool is_extended = false;
3273
3274 TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3275
3276 if (extract32(opt, 1, 1) == 0) {
3277 unallocated_encoding(s);
3278 return;
3279 }
3280
3281 if (is_vector) {
3282 size |= (opc & 2) << 1;
3283 if (size > 4) {
3284 unallocated_encoding(s);
3285 return;
3286 }
3287 is_store = !extract32(opc, 0, 1);
3288 if (!fp_access_check(s)) {
3289 return;
3290 }
3291 } else {
3292 if (size == 3 && opc == 2) {
3293
3294 return;
3295 }
3296 if (opc == 3 && size > 1) {
3297 unallocated_encoding(s);
3298 return;
3299 }
3300 is_store = (opc == 0);
3301 is_signed = extract32(opc, 1, 1);
3302 is_extended = (size < 3) && extract32(opc, 0, 1);
3303 }
3304
3305 if (rn == 31) {
3306 gen_check_sp_alignment(s);
3307 }
3308 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3309
3310 tcg_rm = read_cpu_reg(s, rm, 1);
3311 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3312
3313 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3314 clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3315
3316 if (is_vector) {
3317 if (is_store) {
3318 do_fp_st(s, rt, clean_addr, size);
3319 } else {
3320 do_fp_ld(s, rt, clean_addr, size);
3321 }
3322 } else {
3323 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3324 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3325 if (is_store) {
3326 do_gpr_st(s, tcg_rt, clean_addr, size,
3327 true, rt, iss_sf, false);
3328 } else {
3329 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3330 is_extended, true, rt, iss_sf, false);
3331 }
3332 }
3333}
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3353 int opc,
3354 int size,
3355 int rt,
3356 bool is_vector)
3357{
3358 int rn = extract32(insn, 5, 5);
3359 unsigned int imm12 = extract32(insn, 10, 12);
3360 unsigned int offset;
3361
3362 TCGv_i64 clean_addr, dirty_addr;
3363
3364 bool is_store;
3365 bool is_signed = false;
3366 bool is_extended = false;
3367
3368 if (is_vector) {
3369 size |= (opc & 2) << 1;
3370 if (size > 4) {
3371 unallocated_encoding(s);
3372 return;
3373 }
3374 is_store = !extract32(opc, 0, 1);
3375 if (!fp_access_check(s)) {
3376 return;
3377 }
3378 } else {
3379 if (size == 3 && opc == 2) {
3380
3381 return;
3382 }
3383 if (opc == 3 && size > 1) {
3384 unallocated_encoding(s);
3385 return;
3386 }
3387 is_store = (opc == 0);
3388 is_signed = extract32(opc, 1, 1);
3389 is_extended = (size < 3) && extract32(opc, 0, 1);
3390 }
3391
3392 if (rn == 31) {
3393 gen_check_sp_alignment(s);
3394 }
3395 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3396 offset = imm12 << size;
3397 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3398 clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3399
3400 if (is_vector) {
3401 if (is_store) {
3402 do_fp_st(s, rt, clean_addr, size);
3403 } else {
3404 do_fp_ld(s, rt, clean_addr, size);
3405 }
3406 } else {
3407 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3408 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3409 if (is_store) {
3410 do_gpr_st(s, tcg_rt, clean_addr, size,
3411 true, rt, iss_sf, false);
3412 } else {
3413 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3414 is_extended, true, rt, iss_sf, false);
3415 }
3416 }
3417}
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3434 int size, int rt, bool is_vector)
3435{
3436 int rs = extract32(insn, 16, 5);
3437 int rn = extract32(insn, 5, 5);
3438 int o3_opc = extract32(insn, 12, 4);
3439 bool r = extract32(insn, 22, 1);
3440 bool a = extract32(insn, 23, 1);
3441 TCGv_i64 tcg_rs, tcg_rt, clean_addr;
3442 AtomicThreeOpFn *fn = NULL;
3443 MemOp mop = s->be_data | size | MO_ALIGN;
3444
3445 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3446 unallocated_encoding(s);
3447 return;
3448 }
3449 switch (o3_opc) {
3450 case 000:
3451 fn = tcg_gen_atomic_fetch_add_i64;
3452 break;
3453 case 001:
3454 fn = tcg_gen_atomic_fetch_and_i64;
3455 break;
3456 case 002:
3457 fn = tcg_gen_atomic_fetch_xor_i64;
3458 break;
3459 case 003:
3460 fn = tcg_gen_atomic_fetch_or_i64;
3461 break;
3462 case 004:
3463 fn = tcg_gen_atomic_fetch_smax_i64;
3464 mop |= MO_SIGN;
3465 break;
3466 case 005:
3467 fn = tcg_gen_atomic_fetch_smin_i64;
3468 mop |= MO_SIGN;
3469 break;
3470 case 006:
3471 fn = tcg_gen_atomic_fetch_umax_i64;
3472 break;
3473 case 007:
3474 fn = tcg_gen_atomic_fetch_umin_i64;
3475 break;
3476 case 010:
3477 fn = tcg_gen_atomic_xchg_i64;
3478 break;
3479 case 014:
3480 if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3481 rs != 31 || a != 1 || r != 0) {
3482 unallocated_encoding(s);
3483 return;
3484 }
3485 break;
3486 default:
3487 unallocated_encoding(s);
3488 return;
3489 }
3490
3491 if (rn == 31) {
3492 gen_check_sp_alignment(s);
3493 }
3494 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3495
3496 if (o3_opc == 014) {
3497
3498
3499
3500
3501
3502
3503
3504 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
3505 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3506 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3507 return;
3508 }
3509
3510 tcg_rs = read_cpu_reg(s, rs, true);
3511 tcg_rt = cpu_reg(s, rt);
3512
3513 if (o3_opc == 1) {
3514 tcg_gen_not_i64(tcg_rs, tcg_rs);
3515 }
3516
3517
3518
3519
3520 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3521
3522 if ((mop & MO_SIGN) && size != MO_64) {
3523 tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3524 }
3525}
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3543 int size, int rt, bool is_vector)
3544{
3545 int rn = extract32(insn, 5, 5);
3546 bool is_wback = extract32(insn, 11, 1);
3547 bool use_key_a = !extract32(insn, 23, 1);
3548 int offset;
3549 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3550
3551 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3552 unallocated_encoding(s);
3553 return;
3554 }
3555
3556 if (rn == 31) {
3557 gen_check_sp_alignment(s);
3558 }
3559 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3560
3561 if (s->pauth_active) {
3562 if (use_key_a) {
3563 gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3564 new_tmp_a64_zero(s));
3565 } else {
3566 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3567 new_tmp_a64_zero(s));
3568 }
3569 }
3570
3571
3572 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3573 offset = sextract32(offset << size, 0, 10 + size);
3574 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3575
3576
3577 clean_addr = gen_mte_check1(s, dirty_addr, false,
3578 is_wback || rn != 31, size);
3579
3580 tcg_rt = cpu_reg(s, rt);
3581 do_gpr_ld(s, tcg_rt, clean_addr, size,
3582 false, !is_wback,
3583 rt, true, false);
3584
3585 if (is_wback) {
3586 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3587 }
3588}
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3605{
3606 int rt = extract32(insn, 0, 5);
3607 int rn = extract32(insn, 5, 5);
3608 int offset = sextract32(insn, 12, 9);
3609 int opc = extract32(insn, 22, 2);
3610 int size = extract32(insn, 30, 2);
3611 TCGv_i64 clean_addr, dirty_addr;
3612 bool is_store = false;
3613 bool extend = false;
3614 bool iss_sf;
3615 MemOp mop;
3616
3617 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3618 unallocated_encoding(s);
3619 return;
3620 }
3621
3622
3623 mop = size | MO_ALIGN;
3624
3625 switch (opc) {
3626 case 0:
3627 is_store = true;
3628 break;
3629 case 1:
3630 break;
3631 case 2:
3632 if (size == 3) {
3633 unallocated_encoding(s);
3634 return;
3635 }
3636 mop |= MO_SIGN;
3637 break;
3638 case 3:
3639 if (size > 1) {
3640 unallocated_encoding(s);
3641 return;
3642 }
3643 mop |= MO_SIGN;
3644 extend = true;
3645 break;
3646 default:
3647 g_assert_not_reached();
3648 }
3649
3650 iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
3651
3652 if (rn == 31) {
3653 gen_check_sp_alignment(s);
3654 }
3655
3656 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3657 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3658 clean_addr = clean_data_tbi(s, dirty_addr);
3659
3660 if (is_store) {
3661
3662 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3663 do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
3664 } else {
3665
3666
3667
3668
3669 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
3670 extend, true, rt, iss_sf, true);
3671 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3672 }
3673}
3674
3675
3676static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3677{
3678 int rt = extract32(insn, 0, 5);
3679 int opc = extract32(insn, 22, 2);
3680 bool is_vector = extract32(insn, 26, 1);
3681 int size = extract32(insn, 30, 2);
3682
3683 switch (extract32(insn, 24, 2)) {
3684 case 0:
3685 if (extract32(insn, 21, 1) == 0) {
3686
3687
3688
3689
3690 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3691 return;
3692 }
3693 switch (extract32(insn, 10, 2)) {
3694 case 0:
3695 disas_ldst_atomic(s, insn, size, rt, is_vector);
3696 return;
3697 case 2:
3698 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3699 return;
3700 default:
3701 disas_ldst_pac(s, insn, size, rt, is_vector);
3702 return;
3703 }
3704 break;
3705 case 1:
3706 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3707 return;
3708 }
3709 unallocated_encoding(s);
3710}
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3731{
3732 int rt = extract32(insn, 0, 5);
3733 int rn = extract32(insn, 5, 5);
3734 int rm = extract32(insn, 16, 5);
3735 int size = extract32(insn, 10, 2);
3736 int opcode = extract32(insn, 12, 4);
3737 bool is_store = !extract32(insn, 22, 1);
3738 bool is_postidx = extract32(insn, 23, 1);
3739 bool is_q = extract32(insn, 30, 1);
3740 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3741 MemOp endian, align, mop;
3742
3743 int total;
3744 int elements;
3745 int rpt;
3746 int selem;
3747 int r;
3748
3749 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3750 unallocated_encoding(s);
3751 return;
3752 }
3753
3754 if (!is_postidx && rm != 0) {
3755 unallocated_encoding(s);
3756 return;
3757 }
3758
3759
3760 switch (opcode) {
3761 case 0x0:
3762 rpt = 1;
3763 selem = 4;
3764 break;
3765 case 0x2:
3766 rpt = 4;
3767 selem = 1;
3768 break;
3769 case 0x4:
3770 rpt = 1;
3771 selem = 3;
3772 break;
3773 case 0x6:
3774 rpt = 3;
3775 selem = 1;
3776 break;
3777 case 0x7:
3778 rpt = 1;
3779 selem = 1;
3780 break;
3781 case 0x8:
3782 rpt = 1;
3783 selem = 2;
3784 break;
3785 case 0xa:
3786 rpt = 2;
3787 selem = 1;
3788 break;
3789 default:
3790 unallocated_encoding(s);
3791 return;
3792 }
3793
3794 if (size == 3 && !is_q && selem != 1) {
3795
3796 unallocated_encoding(s);
3797 return;
3798 }
3799
3800 if (!fp_access_check(s)) {
3801 return;
3802 }
3803
3804 if (rn == 31) {
3805 gen_check_sp_alignment(s);
3806 }
3807
3808
3809 endian = s->be_data;
3810 if (size == 0) {
3811 endian = MO_LE;
3812 }
3813
3814 total = rpt * selem * (is_q ? 16 : 8);
3815 tcg_rn = cpu_reg_sp(s, rn);
3816
3817
3818
3819
3820
3821 clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3822 total);
3823
3824
3825
3826
3827
3828 align = MO_ALIGN;
3829 if (selem == 1 && endian == MO_LE) {
3830 align = pow2_align(size);
3831 size = 3;
3832 }
3833 if (!s->align_mem) {
3834 align = 0;
3835 }
3836 mop = endian | size | align;
3837
3838 elements = (is_q ? 16 : 8) >> size;
3839 tcg_ebytes = tcg_constant_i64(1 << size);
3840 for (r = 0; r < rpt; r++) {
3841 int e;
3842 for (e = 0; e < elements; e++) {
3843 int xs;
3844 for (xs = 0; xs < selem; xs++) {
3845 int tt = (rt + r + xs) % 32;
3846 if (is_store) {
3847 do_vec_st(s, tt, e, clean_addr, mop);
3848 } else {
3849 do_vec_ld(s, tt, e, clean_addr, mop);
3850 }
3851 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3852 }
3853 }
3854 }
3855
3856 if (!is_store) {
3857
3858
3859
3860
3861
3862
3863
3864 for (r = 0; r < rpt * selem; r++) {
3865 int tt = (rt + r) % 32;
3866 clear_vec_high(s, is_q, tt);
3867 }
3868 }
3869
3870 if (is_postidx) {
3871 if (rm == 31) {
3872 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3873 } else {
3874 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3875 }
3876 }
3877}
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3902{
3903 int rt = extract32(insn, 0, 5);
3904 int rn = extract32(insn, 5, 5);
3905 int rm = extract32(insn, 16, 5);
3906 int size = extract32(insn, 10, 2);
3907 int S = extract32(insn, 12, 1);
3908 int opc = extract32(insn, 13, 3);
3909 int R = extract32(insn, 21, 1);
3910 int is_load = extract32(insn, 22, 1);
3911 int is_postidx = extract32(insn, 23, 1);
3912 int is_q = extract32(insn, 30, 1);
3913
3914 int scale = extract32(opc, 1, 2);
3915 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3916 bool replicate = false;
3917 int index = is_q << 3 | S << 2 | size;
3918 int xs, total;
3919 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3920 MemOp mop;
3921
3922 if (extract32(insn, 31, 1)) {
3923 unallocated_encoding(s);
3924 return;
3925 }
3926 if (!is_postidx && rm != 0) {
3927 unallocated_encoding(s);
3928 return;
3929 }
3930
3931 switch (scale) {
3932 case 3:
3933 if (!is_load || S) {
3934 unallocated_encoding(s);
3935 return;
3936 }
3937 scale = size;
3938 replicate = true;
3939 break;
3940 case 0:
3941 break;
3942 case 1:
3943 if (extract32(size, 0, 1)) {
3944 unallocated_encoding(s);
3945 return;
3946 }
3947 index >>= 1;
3948 break;
3949 case 2:
3950 if (extract32(size, 1, 1)) {
3951 unallocated_encoding(s);
3952 return;
3953 }
3954 if (!extract32(size, 0, 1)) {
3955 index >>= 2;
3956 } else {
3957 if (S) {
3958 unallocated_encoding(s);
3959 return;
3960 }
3961 index >>= 3;
3962 scale = 3;
3963 }
3964 break;
3965 default:
3966 g_assert_not_reached();
3967 }
3968
3969 if (!fp_access_check(s)) {
3970 return;
3971 }
3972
3973 if (rn == 31) {
3974 gen_check_sp_alignment(s);
3975 }
3976
3977 total = selem << scale;
3978 tcg_rn = cpu_reg_sp(s, rn);
3979
3980 clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3981 total);
3982 mop = finalize_memop(s, scale);
3983
3984 tcg_ebytes = tcg_constant_i64(1 << scale);
3985 for (xs = 0; xs < selem; xs++) {
3986 if (replicate) {
3987
3988 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3989
3990 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3991 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3992 (is_q + 1) * 8, vec_full_reg_size(s),
3993 tcg_tmp);
3994 tcg_temp_free_i64(tcg_tmp);
3995 } else {
3996
3997 if (is_load) {
3998 do_vec_ld(s, rt, index, clean_addr, mop);
3999 } else {
4000 do_vec_st(s, rt, index, clean_addr, mop);
4001 }
4002 }
4003 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
4004 rt = (rt + 1) % 32;
4005 }
4006
4007 if (is_postidx) {
4008 if (rm == 31) {
4009 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
4010 } else {
4011 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
4012 }
4013 }
4014}
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024static void disas_ldst_tag(DisasContext *s, uint32_t insn)
4025{
4026 int rt = extract32(insn, 0, 5);
4027 int rn = extract32(insn, 5, 5);
4028 uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
4029 int op2 = extract32(insn, 10, 2);
4030 int op1 = extract32(insn, 22, 2);
4031 bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
4032 int index = 0;
4033 TCGv_i64 addr, clean_addr, tcg_rt;
4034
4035
4036 if (extract32(insn, 30, 2) != 3) {
4037 goto do_unallocated;
4038 }
4039
4040
4041
4042
4043
4044
4045
4046 switch (op1) {
4047 case 0:
4048 if (op2 != 0) {
4049
4050 index = op2 - 2;
4051 } else {
4052
4053 if (s->current_el == 0 || offset != 0) {
4054 goto do_unallocated;
4055 }
4056 is_mult = is_zero = true;
4057 }
4058 break;
4059 case 1:
4060 if (op2 != 0) {
4061
4062 is_zero = true;
4063 index = op2 - 2;
4064 } else {
4065
4066 is_load = true;
4067 }
4068 break;
4069 case 2:
4070 if (op2 != 0) {
4071
4072 is_pair = true;
4073 index = op2 - 2;
4074 } else {
4075
4076 if (s->current_el == 0 || offset != 0) {
4077 goto do_unallocated;
4078 }
4079 is_mult = true;
4080 }
4081 break;
4082 case 3:
4083 if (op2 != 0) {
4084
4085 is_pair = is_zero = true;
4086 index = op2 - 2;
4087 } else {
4088
4089 if (s->current_el == 0 || offset != 0) {
4090 goto do_unallocated;
4091 }
4092 is_mult = is_load = true;
4093 }
4094 break;
4095
4096 default:
4097 do_unallocated:
4098 unallocated_encoding(s);
4099 return;
4100 }
4101
4102 if (is_mult
4103 ? !dc_isar_feature(aa64_mte, s)
4104 : !dc_isar_feature(aa64_mte_insn_reg, s)) {
4105 goto do_unallocated;
4106 }
4107
4108 if (rn == 31) {
4109 gen_check_sp_alignment(s);
4110 }
4111
4112 addr = read_cpu_reg_sp(s, rn, true);
4113 if (index >= 0) {
4114
4115 tcg_gen_addi_i64(addr, addr, offset);
4116 }
4117
4118 if (is_mult) {
4119 tcg_rt = cpu_reg(s, rt);
4120
4121 if (is_zero) {
4122 int size = 4 << s->dcz_blocksize;
4123
4124 if (s->ata) {
4125 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
4126 }
4127
4128
4129
4130
4131 clean_addr = clean_data_tbi(s, addr);
4132 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4133 gen_helper_dc_zva(cpu_env, clean_addr);
4134 } else if (s->ata) {
4135 if (is_load) {
4136 gen_helper_ldgm(tcg_rt, cpu_env, addr);
4137 } else {
4138 gen_helper_stgm(cpu_env, addr, tcg_rt);
4139 }
4140 } else {
4141 MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4142 int size = 4 << GMID_EL1_BS;
4143
4144 clean_addr = clean_data_tbi(s, addr);
4145 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4146 gen_probe_access(s, clean_addr, acc, size);
4147
4148 if (is_load) {
4149
4150 tcg_gen_movi_i64(tcg_rt, 0);
4151 }
4152 }
4153 return;
4154 }
4155
4156 if (is_load) {
4157 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4158 tcg_rt = cpu_reg(s, rt);
4159 if (s->ata) {
4160 gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4161 } else {
4162 clean_addr = clean_data_tbi(s, addr);
4163 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4164 gen_address_with_allocation_tag0(tcg_rt, addr);
4165 }
4166 } else {
4167 tcg_rt = cpu_reg_sp(s, rt);
4168 if (!s->ata) {
4169
4170
4171
4172
4173
4174 if (is_pair) {
4175 gen_helper_st2g_stub(cpu_env, addr);
4176 } else {
4177 gen_helper_stg_stub(cpu_env, addr);
4178 }
4179 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4180 if (is_pair) {
4181 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4182 } else {
4183 gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4184 }
4185 } else {
4186 if (is_pair) {
4187 gen_helper_st2g(cpu_env, addr, tcg_rt);
4188 } else {
4189 gen_helper_stg(cpu_env, addr, tcg_rt);
4190 }
4191 }
4192 }
4193
4194 if (is_zero) {
4195 TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4196 TCGv_i64 tcg_zero = tcg_constant_i64(0);
4197 int mem_index = get_mem_index(s);
4198 int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
4199
4200 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
4201 MO_UQ | MO_ALIGN_16);
4202 for (i = 8; i < n; i += 8) {
4203 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4204 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ);
4205 }
4206 }
4207
4208 if (index != 0) {
4209
4210 if (index < 0) {
4211
4212 tcg_gen_addi_i64(addr, addr, offset);
4213 }
4214 tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4215 }
4216}
4217
4218
4219static void disas_ldst(DisasContext *s, uint32_t insn)
4220{
4221 switch (extract32(insn, 24, 6)) {
4222 case 0x08:
4223 disas_ldst_excl(s, insn);
4224 break;
4225 case 0x18: case 0x1c:
4226 disas_ld_lit(s, insn);
4227 break;
4228 case 0x28: case 0x29:
4229 case 0x2c: case 0x2d:
4230 disas_ldst_pair(s, insn);
4231 break;
4232 case 0x38: case 0x39:
4233 case 0x3c: case 0x3d:
4234 disas_ldst_reg(s, insn);
4235 break;
4236 case 0x0c:
4237 disas_ldst_multiple_struct(s, insn);
4238 break;
4239 case 0x0d:
4240 disas_ldst_single_struct(s, insn);
4241 break;
4242 case 0x19:
4243 if (extract32(insn, 21, 1) != 0) {
4244 disas_ldst_tag(s, insn);
4245 } else if (extract32(insn, 10, 2) == 0) {
4246 disas_ldst_ldapr_stlr(s, insn);
4247 } else {
4248 unallocated_encoding(s);
4249 }
4250 break;
4251 default:
4252 unallocated_encoding(s);
4253 break;
4254 }
4255}
4256
4257
4258
4259
4260
4261
4262
4263static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
4264{
4265 unsigned int page, rd;
4266 uint64_t base;
4267 uint64_t offset;
4268
4269 page = extract32(insn, 31, 1);
4270
4271 offset = sextract64(insn, 5, 19);
4272 offset = offset << 2 | extract32(insn, 29, 2);
4273 rd = extract32(insn, 0, 5);
4274 base = s->pc_curr;
4275
4276 if (page) {
4277
4278 base &= ~0xfff;
4279 offset <<= 12;
4280 }
4281
4282 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
4283}
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
4299{
4300 int rd = extract32(insn, 0, 5);
4301 int rn = extract32(insn, 5, 5);
4302 uint64_t imm = extract32(insn, 10, 12);
4303 bool shift = extract32(insn, 22, 1);
4304 bool setflags = extract32(insn, 29, 1);
4305 bool sub_op = extract32(insn, 30, 1);
4306 bool is_64bit = extract32(insn, 31, 1);
4307
4308 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
4309 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
4310 TCGv_i64 tcg_result;
4311
4312 if (shift) {
4313 imm <<= 12;
4314 }
4315
4316 tcg_result = tcg_temp_new_i64();
4317 if (!setflags) {
4318 if (sub_op) {
4319 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
4320 } else {
4321 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
4322 }
4323 } else {
4324 TCGv_i64 tcg_imm = tcg_constant_i64(imm);
4325 if (sub_op) {
4326 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4327 } else {
4328 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4329 }
4330 }
4331
4332 if (is_64bit) {
4333 tcg_gen_mov_i64(tcg_rd, tcg_result);
4334 } else {
4335 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4336 }
4337
4338 tcg_temp_free_i64(tcg_result);
4339}
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
4352{
4353 int rd = extract32(insn, 0, 5);
4354 int rn = extract32(insn, 5, 5);
4355 int uimm4 = extract32(insn, 10, 4);
4356 int uimm6 = extract32(insn, 16, 6);
4357 bool sub_op = extract32(insn, 30, 1);
4358 TCGv_i64 tcg_rn, tcg_rd;
4359 int imm;
4360
4361
4362 if ((insn & 0xa040c000u) != 0x80000000u ||
4363 !dc_isar_feature(aa64_mte_insn_reg, s)) {
4364 unallocated_encoding(s);
4365 return;
4366 }
4367
4368 imm = uimm6 << LOG2_TAG_GRANULE;
4369 if (sub_op) {
4370 imm = -imm;
4371 }
4372
4373 tcg_rn = cpu_reg_sp(s, rn);
4374 tcg_rd = cpu_reg_sp(s, rd);
4375
4376 if (s->ata) {
4377 gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
4378 tcg_constant_i32(imm),
4379 tcg_constant_i32(uimm4));
4380 } else {
4381 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4382 gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4383 }
4384}
4385
4386
4387
4388
4389
4390static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4391{
4392 assert(e != 0);
4393 while (e < 64) {
4394 mask |= mask << e;
4395 e *= 2;
4396 }
4397 return mask;
4398}
4399
4400
4401static inline uint64_t bitmask64(unsigned int length)
4402{
4403 assert(length > 0 && length <= 64);
4404 return ~0ULL >> (64 - length);
4405}
4406
4407
4408
4409
4410
4411
4412bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4413 unsigned int imms, unsigned int immr)
4414{
4415 uint64_t mask;
4416 unsigned e, levels, s, r;
4417 int len;
4418
4419 assert(immn < 2 && imms < 64 && immr < 64);
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4444 if (len < 1) {
4445
4446 return false;
4447 }
4448 e = 1 << len;
4449
4450 levels = e - 1;
4451 s = imms & levels;
4452 r = immr & levels;
4453
4454 if (s == levels) {
4455
4456 return false;
4457 }
4458
4459
4460
4461
4462 mask = bitmask64(s + 1);
4463 if (r) {
4464 mask = (mask >> r) | (mask << (e - r));
4465 mask &= bitmask64(e);
4466 }
4467
4468 mask = bitfield_replicate(mask, e);
4469 *result = mask;
4470 return true;
4471}
4472
4473
4474
4475
4476
4477
4478
4479static void disas_logic_imm(DisasContext *s, uint32_t insn)
4480{
4481 unsigned int sf, opc, is_n, immr, imms, rn, rd;
4482 TCGv_i64 tcg_rd, tcg_rn;
4483 uint64_t wmask;
4484 bool is_and = false;
4485
4486 sf = extract32(insn, 31, 1);
4487 opc = extract32(insn, 29, 2);
4488 is_n = extract32(insn, 22, 1);
4489 immr = extract32(insn, 16, 6);
4490 imms = extract32(insn, 10, 6);
4491 rn = extract32(insn, 5, 5);
4492 rd = extract32(insn, 0, 5);
4493
4494 if (!sf && is_n) {
4495 unallocated_encoding(s);
4496 return;
4497 }
4498
4499 if (opc == 0x3) {
4500 tcg_rd = cpu_reg(s, rd);
4501 } else {
4502 tcg_rd = cpu_reg_sp(s, rd);
4503 }
4504 tcg_rn = cpu_reg(s, rn);
4505
4506 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
4507
4508 unallocated_encoding(s);
4509 return;
4510 }
4511
4512 if (!sf) {
4513 wmask &= 0xffffffff;
4514 }
4515
4516 switch (opc) {
4517 case 0x3:
4518 case 0x0:
4519 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
4520 is_and = true;
4521 break;
4522 case 0x1:
4523 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
4524 break;
4525 case 0x2:
4526 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
4527 break;
4528 default:
4529 assert(FALSE);
4530 break;
4531 }
4532
4533 if (!sf && !is_and) {
4534
4535
4536
4537 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4538 }
4539
4540 if (opc == 3) {
4541 gen_logic_CC(sf, tcg_rd);
4542 }
4543}
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557static void disas_movw_imm(DisasContext *s, uint32_t insn)
4558{
4559 int rd = extract32(insn, 0, 5);
4560 uint64_t imm = extract32(insn, 5, 16);
4561 int sf = extract32(insn, 31, 1);
4562 int opc = extract32(insn, 29, 2);
4563 int pos = extract32(insn, 21, 2) << 4;
4564 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4565
4566 if (!sf && (pos >= 32)) {
4567 unallocated_encoding(s);
4568 return;
4569 }
4570
4571 switch (opc) {
4572 case 0:
4573 case 2:
4574 imm <<= pos;
4575 if (opc == 0) {
4576 imm = ~imm;
4577 }
4578 if (!sf) {
4579 imm &= 0xffffffffu;
4580 }
4581 tcg_gen_movi_i64(tcg_rd, imm);
4582 break;
4583 case 3:
4584 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_constant_i64(imm), pos, 16);
4585 if (!sf) {
4586 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4587 }
4588 break;
4589 default:
4590 unallocated_encoding(s);
4591 break;
4592 }
4593}
4594
4595
4596
4597
4598
4599
4600
4601static void disas_bitfield(DisasContext *s, uint32_t insn)
4602{
4603 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4604 TCGv_i64 tcg_rd, tcg_tmp;
4605
4606 sf = extract32(insn, 31, 1);
4607 opc = extract32(insn, 29, 2);
4608 n = extract32(insn, 22, 1);
4609 ri = extract32(insn, 16, 6);
4610 si = extract32(insn, 10, 6);
4611 rn = extract32(insn, 5, 5);
4612 rd = extract32(insn, 0, 5);
4613 bitsize = sf ? 64 : 32;
4614
4615 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4616 unallocated_encoding(s);
4617 return;
4618 }
4619
4620 tcg_rd = cpu_reg(s, rd);
4621
4622
4623
4624
4625 tcg_tmp = read_cpu_reg(s, rn, 1);
4626
4627
4628 if (si >= ri) {
4629
4630 len = (si - ri) + 1;
4631 if (opc == 0) {
4632 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4633 goto done;
4634 } else if (opc == 2) {
4635 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4636 return;
4637 }
4638
4639 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4640 pos = 0;
4641 } else {
4642
4643
4644
4645 len = si + 1;
4646 pos = (bitsize - ri) & (bitsize - 1);
4647 }
4648
4649 if (opc == 0 && len < ri) {
4650
4651
4652
4653 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4654 len = ri;
4655 }
4656
4657 if (opc == 1) {
4658 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4659 } else {
4660
4661
4662
4663 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4664 return;
4665 }
4666
4667 done:
4668 if (!sf) {
4669 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4670 }
4671}
4672
4673
4674
4675
4676
4677
4678
4679static void disas_extract(DisasContext *s, uint32_t insn)
4680{
4681 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4682
4683 sf = extract32(insn, 31, 1);
4684 n = extract32(insn, 22, 1);
4685 rm = extract32(insn, 16, 5);
4686 imm = extract32(insn, 10, 6);
4687 rn = extract32(insn, 5, 5);
4688 rd = extract32(insn, 0, 5);
4689 op21 = extract32(insn, 29, 2);
4690 op0 = extract32(insn, 21, 1);
4691 bitsize = sf ? 64 : 32;
4692
4693 if (sf != n || op21 || op0 || imm >= bitsize) {
4694 unallocated_encoding(s);
4695 } else {
4696 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4697
4698 tcg_rd = cpu_reg(s, rd);
4699
4700 if (unlikely(imm == 0)) {
4701
4702
4703
4704 if (sf) {
4705 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4706 } else {
4707 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4708 }
4709 } else {
4710 tcg_rm = cpu_reg(s, rm);
4711 tcg_rn = cpu_reg(s, rn);
4712
4713 if (sf) {
4714
4715 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4716 } else {
4717 TCGv_i32 t0 = tcg_temp_new_i32();
4718
4719 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4720 if (rm == rn) {
4721 tcg_gen_rotri_i32(t0, t0, imm);
4722 } else {
4723 TCGv_i32 t1 = tcg_temp_new_i32();
4724 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4725 tcg_gen_extract2_i32(t0, t0, t1, imm);
4726 tcg_temp_free_i32(t1);
4727 }
4728 tcg_gen_extu_i32_i64(tcg_rd, t0);
4729 tcg_temp_free_i32(t0);
4730 }
4731 }
4732 }
4733}
4734
4735
4736static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4737{
4738 switch (extract32(insn, 23, 6)) {
4739 case 0x20: case 0x21:
4740 disas_pc_rel_adr(s, insn);
4741 break;
4742 case 0x22:
4743 disas_add_sub_imm(s, insn);
4744 break;
4745 case 0x23:
4746 disas_add_sub_imm_with_tags(s, insn);
4747 break;
4748 case 0x24:
4749 disas_logic_imm(s, insn);
4750 break;
4751 case 0x25:
4752 disas_movw_imm(s, insn);
4753 break;
4754 case 0x26:
4755 disas_bitfield(s, insn);
4756 break;
4757 case 0x27:
4758 disas_extract(s, insn);
4759 break;
4760 default:
4761 unallocated_encoding(s);
4762 break;
4763 }
4764}
4765
4766
4767
4768
4769
4770
4771static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4772 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4773{
4774 switch (shift_type) {
4775 case A64_SHIFT_TYPE_LSL:
4776 tcg_gen_shl_i64(dst, src, shift_amount);
4777 break;
4778 case A64_SHIFT_TYPE_LSR:
4779 tcg_gen_shr_i64(dst, src, shift_amount);
4780 break;
4781 case A64_SHIFT_TYPE_ASR:
4782 if (!sf) {
4783 tcg_gen_ext32s_i64(dst, src);
4784 }
4785 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4786 break;
4787 case A64_SHIFT_TYPE_ROR:
4788 if (sf) {
4789 tcg_gen_rotr_i64(dst, src, shift_amount);
4790 } else {
4791 TCGv_i32 t0, t1;
4792 t0 = tcg_temp_new_i32();
4793 t1 = tcg_temp_new_i32();
4794 tcg_gen_extrl_i64_i32(t0, src);
4795 tcg_gen_extrl_i64_i32(t1, shift_amount);
4796 tcg_gen_rotr_i32(t0, t0, t1);
4797 tcg_gen_extu_i32_i64(dst, t0);
4798 tcg_temp_free_i32(t0);
4799 tcg_temp_free_i32(t1);
4800 }
4801 break;
4802 default:
4803 assert(FALSE);
4804 break;
4805 }
4806
4807 if (!sf) {
4808 tcg_gen_ext32u_i64(dst, dst);
4809 }
4810}
4811
4812
4813
4814
4815
4816static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4817 enum a64_shift_type shift_type, unsigned int shift_i)
4818{
4819 assert(shift_i < (sf ? 64 : 32));
4820
4821 if (shift_i == 0) {
4822 tcg_gen_mov_i64(dst, src);
4823 } else {
4824 shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
4825 }
4826}
4827
4828
4829
4830
4831
4832
4833
4834static void disas_logic_reg(DisasContext *s, uint32_t insn)
4835{
4836 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4837 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4838
4839 sf = extract32(insn, 31, 1);
4840 opc = extract32(insn, 29, 2);
4841 shift_type = extract32(insn, 22, 2);
4842 invert = extract32(insn, 21, 1);
4843 rm = extract32(insn, 16, 5);
4844 shift_amount = extract32(insn, 10, 6);
4845 rn = extract32(insn, 5, 5);
4846 rd = extract32(insn, 0, 5);
4847
4848 if (!sf && (shift_amount & (1 << 5))) {
4849 unallocated_encoding(s);
4850 return;
4851 }
4852
4853 tcg_rd = cpu_reg(s, rd);
4854
4855 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4856
4857
4858
4859 tcg_rm = cpu_reg(s, rm);
4860 if (invert) {
4861 tcg_gen_not_i64(tcg_rd, tcg_rm);
4862 if (!sf) {
4863 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4864 }
4865 } else {
4866 if (sf) {
4867 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4868 } else {
4869 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4870 }
4871 }
4872 return;
4873 }
4874
4875 tcg_rm = read_cpu_reg(s, rm, sf);
4876
4877 if (shift_amount) {
4878 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4879 }
4880
4881 tcg_rn = cpu_reg(s, rn);
4882
4883 switch (opc | (invert << 2)) {
4884 case 0:
4885 case 3:
4886 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4887 break;
4888 case 1:
4889 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4890 break;
4891 case 2:
4892 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4893 break;
4894 case 4:
4895 case 7:
4896 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4897 break;
4898 case 5:
4899 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4900 break;
4901 case 6:
4902 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4903 break;
4904 default:
4905 assert(FALSE);
4906 break;
4907 }
4908
4909 if (!sf) {
4910 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4911 }
4912
4913 if (opc == 3) {
4914 gen_logic_CC(sf, tcg_rd);
4915 }
4916}
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4936{
4937 int rd = extract32(insn, 0, 5);
4938 int rn = extract32(insn, 5, 5);
4939 int imm3 = extract32(insn, 10, 3);
4940 int option = extract32(insn, 13, 3);
4941 int rm = extract32(insn, 16, 5);
4942 int opt = extract32(insn, 22, 2);
4943 bool setflags = extract32(insn, 29, 1);
4944 bool sub_op = extract32(insn, 30, 1);
4945 bool sf = extract32(insn, 31, 1);
4946
4947 TCGv_i64 tcg_rm, tcg_rn;
4948 TCGv_i64 tcg_rd;
4949 TCGv_i64 tcg_result;
4950
4951 if (imm3 > 4 || opt != 0) {
4952 unallocated_encoding(s);
4953 return;
4954 }
4955
4956
4957 if (!setflags) {
4958 tcg_rd = cpu_reg_sp(s, rd);
4959 } else {
4960 tcg_rd = cpu_reg(s, rd);
4961 }
4962 tcg_rn = read_cpu_reg_sp(s, rn, sf);
4963
4964 tcg_rm = read_cpu_reg(s, rm, sf);
4965 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4966
4967 tcg_result = tcg_temp_new_i64();
4968
4969 if (!setflags) {
4970 if (sub_op) {
4971 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4972 } else {
4973 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4974 }
4975 } else {
4976 if (sub_op) {
4977 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4978 } else {
4979 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4980 }
4981 }
4982
4983 if (sf) {
4984 tcg_gen_mov_i64(tcg_rd, tcg_result);
4985 } else {
4986 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4987 }
4988
4989 tcg_temp_free_i64(tcg_result);
4990}
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
5007{
5008 int rd = extract32(insn, 0, 5);
5009 int rn = extract32(insn, 5, 5);
5010 int imm6 = extract32(insn, 10, 6);
5011 int rm = extract32(insn, 16, 5);
5012 int shift_type = extract32(insn, 22, 2);
5013 bool setflags = extract32(insn, 29, 1);
5014 bool sub_op = extract32(insn, 30, 1);
5015 bool sf = extract32(insn, 31, 1);
5016
5017 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5018 TCGv_i64 tcg_rn, tcg_rm;
5019 TCGv_i64 tcg_result;
5020
5021 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
5022 unallocated_encoding(s);
5023 return;
5024 }
5025
5026 tcg_rn = read_cpu_reg(s, rn, sf);
5027 tcg_rm = read_cpu_reg(s, rm, sf);
5028
5029 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
5030
5031 tcg_result = tcg_temp_new_i64();
5032
5033 if (!setflags) {
5034 if (sub_op) {
5035 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
5036 } else {
5037 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
5038 }
5039 } else {
5040 if (sub_op) {
5041 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
5042 } else {
5043 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
5044 }
5045 }
5046
5047 if (sf) {
5048 tcg_gen_mov_i64(tcg_rd, tcg_result);
5049 } else {
5050 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
5051 }
5052
5053 tcg_temp_free_i64(tcg_result);
5054}
5055
5056
5057
5058
5059
5060
5061
5062
5063static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
5064{
5065 int rd = extract32(insn, 0, 5);
5066 int rn = extract32(insn, 5, 5);
5067 int ra = extract32(insn, 10, 5);
5068 int rm = extract32(insn, 16, 5);
5069 int op_id = (extract32(insn, 29, 3) << 4) |
5070 (extract32(insn, 21, 3) << 1) |
5071 extract32(insn, 15, 1);
5072 bool sf = extract32(insn, 31, 1);
5073 bool is_sub = extract32(op_id, 0, 1);
5074 bool is_high = extract32(op_id, 2, 1);
5075 bool is_signed = false;
5076 TCGv_i64 tcg_op1;
5077 TCGv_i64 tcg_op2;
5078 TCGv_i64 tcg_tmp;
5079
5080
5081 switch (op_id) {
5082 case 0x42:
5083 case 0x43:
5084 case 0x44:
5085 is_signed = true;
5086 break;
5087 case 0x0:
5088 case 0x1:
5089 case 0x40:
5090 case 0x41:
5091 case 0x4a:
5092 case 0x4b:
5093 case 0x4c:
5094 break;
5095 default:
5096 unallocated_encoding(s);
5097 return;
5098 }
5099
5100 if (is_high) {
5101 TCGv_i64 low_bits = tcg_temp_new_i64();
5102 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5103 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5104 TCGv_i64 tcg_rm = cpu_reg(s, rm);
5105
5106 if (is_signed) {
5107 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5108 } else {
5109 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5110 }
5111
5112 tcg_temp_free_i64(low_bits);
5113 return;
5114 }
5115
5116 tcg_op1 = tcg_temp_new_i64();
5117 tcg_op2 = tcg_temp_new_i64();
5118 tcg_tmp = tcg_temp_new_i64();
5119
5120 if (op_id < 0x42) {
5121 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
5122 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
5123 } else {
5124 if (is_signed) {
5125 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
5126 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
5127 } else {
5128 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
5129 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
5130 }
5131 }
5132
5133 if (ra == 31 && !is_sub) {
5134
5135 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
5136 } else {
5137 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
5138 if (is_sub) {
5139 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5140 } else {
5141 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5142 }
5143 }
5144
5145 if (!sf) {
5146 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
5147 }
5148
5149 tcg_temp_free_i64(tcg_op1);
5150 tcg_temp_free_i64(tcg_op2);
5151 tcg_temp_free_i64(tcg_tmp);
5152}
5153
5154
5155
5156
5157
5158
5159
5160
5161static void disas_adc_sbc(DisasContext *s, uint32_t insn)
5162{
5163 unsigned int sf, op, setflags, rm, rn, rd;
5164 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
5165
5166 sf = extract32(insn, 31, 1);
5167 op = extract32(insn, 30, 1);
5168 setflags = extract32(insn, 29, 1);
5169 rm = extract32(insn, 16, 5);
5170 rn = extract32(insn, 5, 5);
5171 rd = extract32(insn, 0, 5);
5172
5173 tcg_rd = cpu_reg(s, rd);
5174 tcg_rn = cpu_reg(s, rn);
5175
5176 if (op) {
5177 tcg_y = new_tmp_a64(s);
5178 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
5179 } else {
5180 tcg_y = cpu_reg(s, rm);
5181 }
5182
5183 if (setflags) {
5184 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
5185 } else {
5186 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
5187 }
5188}
5189
5190
5191
5192
5193
5194
5195
5196
5197static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
5198{
5199 int mask = extract32(insn, 0, 4);
5200 int o2 = extract32(insn, 4, 1);
5201 int rn = extract32(insn, 5, 5);
5202 int imm6 = extract32(insn, 15, 6);
5203 int sf_op_s = extract32(insn, 29, 3);
5204 TCGv_i64 tcg_rn;
5205 TCGv_i32 nzcv;
5206
5207 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
5208 unallocated_encoding(s);
5209 return;
5210 }
5211
5212 tcg_rn = read_cpu_reg(s, rn, 1);
5213 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
5214
5215 nzcv = tcg_temp_new_i32();
5216 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
5217
5218 if (mask & 8) {
5219 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
5220 }
5221 if (mask & 4) {
5222 tcg_gen_not_i32(cpu_ZF, nzcv);
5223 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
5224 }
5225 if (mask & 2) {
5226 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
5227 }
5228 if (mask & 1) {
5229 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
5230 }
5231
5232 tcg_temp_free_i32(nzcv);
5233}
5234
5235
5236
5237
5238
5239
5240
5241
5242static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
5243{
5244 int o3_mask = extract32(insn, 0, 5);
5245 int rn = extract32(insn, 5, 5);
5246 int o2 = extract32(insn, 15, 6);
5247 int sz = extract32(insn, 14, 1);
5248 int sf_op_s = extract32(insn, 29, 3);
5249 TCGv_i32 tmp;
5250 int shift;
5251
5252 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
5253 !dc_isar_feature(aa64_condm_4, s)) {
5254 unallocated_encoding(s);
5255 return;
5256 }
5257 shift = sz ? 16 : 24;
5258
5259 tmp = tcg_temp_new_i32();
5260 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
5261 tcg_gen_shli_i32(cpu_NF, tmp, shift);
5262 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
5263 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
5264 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
5265 tcg_temp_free_i32(tmp);
5266}
5267
5268
5269
5270
5271
5272
5273
5274
5275static void disas_cc(DisasContext *s, uint32_t insn)
5276{
5277 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
5278 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
5279 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
5280 DisasCompare c;
5281
5282 if (!extract32(insn, 29, 1)) {
5283 unallocated_encoding(s);
5284 return;
5285 }
5286 if (insn & (1 << 10 | 1 << 4)) {
5287 unallocated_encoding(s);
5288 return;
5289 }
5290 sf = extract32(insn, 31, 1);
5291 op = extract32(insn, 30, 1);
5292 is_imm = extract32(insn, 11, 1);
5293 y = extract32(insn, 16, 5);
5294 cond = extract32(insn, 12, 4);
5295 rn = extract32(insn, 5, 5);
5296 nzcv = extract32(insn, 0, 4);
5297
5298
5299 tcg_t0 = tcg_temp_new_i32();
5300 arm_test_cc(&c, cond);
5301 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5302 arm_free_cc(&c);
5303
5304
5305 if (is_imm) {
5306 tcg_y = new_tmp_a64(s);
5307 tcg_gen_movi_i64(tcg_y, y);
5308 } else {
5309 tcg_y = cpu_reg(s, y);
5310 }
5311 tcg_rn = cpu_reg(s, rn);
5312
5313
5314 tcg_tmp = tcg_temp_new_i64();
5315 if (op) {
5316 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5317 } else {
5318 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5319 }
5320 tcg_temp_free_i64(tcg_tmp);
5321
5322
5323
5324
5325
5326
5327 tcg_t1 = tcg_temp_new_i32();
5328 tcg_t2 = tcg_temp_new_i32();
5329 tcg_gen_neg_i32(tcg_t1, tcg_t0);
5330 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5331
5332 if (nzcv & 8) {
5333 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5334 } else {
5335 if (TCG_TARGET_HAS_andc_i32) {
5336 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5337 } else {
5338 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5339 }
5340 }
5341 if (nzcv & 4) {
5342 if (TCG_TARGET_HAS_andc_i32) {
5343 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5344 } else {
5345 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5346 }
5347 } else {
5348 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5349 }
5350 if (nzcv & 2) {
5351 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5352 } else {
5353 if (TCG_TARGET_HAS_andc_i32) {
5354 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5355 } else {
5356 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5357 }
5358 }
5359 if (nzcv & 1) {
5360 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5361 } else {
5362 if (TCG_TARGET_HAS_andc_i32) {
5363 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5364 } else {
5365 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5366 }
5367 }
5368 tcg_temp_free_i32(tcg_t0);
5369 tcg_temp_free_i32(tcg_t1);
5370 tcg_temp_free_i32(tcg_t2);
5371}
5372
5373
5374
5375
5376
5377
5378
5379static void disas_cond_select(DisasContext *s, uint32_t insn)
5380{
5381 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5382 TCGv_i64 tcg_rd, zero;
5383 DisasCompare64 c;
5384
5385 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5386
5387 unallocated_encoding(s);
5388 return;
5389 }
5390 sf = extract32(insn, 31, 1);
5391 else_inv = extract32(insn, 30, 1);
5392 rm = extract32(insn, 16, 5);
5393 cond = extract32(insn, 12, 4);
5394 else_inc = extract32(insn, 10, 1);
5395 rn = extract32(insn, 5, 5);
5396 rd = extract32(insn, 0, 5);
5397
5398 tcg_rd = cpu_reg(s, rd);
5399
5400 a64_test_cc(&c, cond);
5401 zero = tcg_constant_i64(0);
5402
5403 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5404
5405 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5406 if (else_inv) {
5407 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5408 }
5409 } else {
5410 TCGv_i64 t_true = cpu_reg(s, rn);
5411 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5412 if (else_inv && else_inc) {
5413 tcg_gen_neg_i64(t_false, t_false);
5414 } else if (else_inv) {
5415 tcg_gen_not_i64(t_false, t_false);
5416 } else if (else_inc) {
5417 tcg_gen_addi_i64(t_false, t_false, 1);
5418 }
5419 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5420 }
5421
5422 a64_free_cc(&c);
5423
5424 if (!sf) {
5425 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5426 }
5427}
5428
5429static void handle_clz(DisasContext *s, unsigned int sf,
5430 unsigned int rn, unsigned int rd)
5431{
5432 TCGv_i64 tcg_rd, tcg_rn;
5433 tcg_rd = cpu_reg(s, rd);
5434 tcg_rn = cpu_reg(s, rn);
5435
5436 if (sf) {
5437 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5438 } else {
5439 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5440 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5441 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5442 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5443 tcg_temp_free_i32(tcg_tmp32);
5444 }
5445}
5446
5447static void handle_cls(DisasContext *s, unsigned int sf,
5448 unsigned int rn, unsigned int rd)
5449{
5450 TCGv_i64 tcg_rd, tcg_rn;
5451 tcg_rd = cpu_reg(s, rd);
5452 tcg_rn = cpu_reg(s, rn);
5453
5454 if (sf) {
5455 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5456 } else {
5457 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5458 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5459 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5460 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5461 tcg_temp_free_i32(tcg_tmp32);
5462 }
5463}
5464
5465static void handle_rbit(DisasContext *s, unsigned int sf,
5466 unsigned int rn, unsigned int rd)
5467{
5468 TCGv_i64 tcg_rd, tcg_rn;
5469 tcg_rd = cpu_reg(s, rd);
5470 tcg_rn = cpu_reg(s, rn);
5471
5472 if (sf) {
5473 gen_helper_rbit64(tcg_rd, tcg_rn);
5474 } else {
5475 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5476 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5477 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5478 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5479 tcg_temp_free_i32(tcg_tmp32);
5480 }
5481}
5482
5483
5484static void handle_rev64(DisasContext *s, unsigned int sf,
5485 unsigned int rn, unsigned int rd)
5486{
5487 if (!sf) {
5488 unallocated_encoding(s);
5489 return;
5490 }
5491 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5492}
5493
5494
5495
5496
5497static void handle_rev32(DisasContext *s, unsigned int sf,
5498 unsigned int rn, unsigned int rd)
5499{
5500 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5501 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5502
5503 if (sf) {
5504 tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5505 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5506 } else {
5507 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5508 }
5509}
5510
5511
5512static void handle_rev16(DisasContext *s, unsigned int sf,
5513 unsigned int rn, unsigned int rd)
5514{
5515 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5516 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5517 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5518 TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5519
5520 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5521 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5522 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5523 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5524 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5525
5526 tcg_temp_free_i64(tcg_tmp);
5527}
5528
5529
5530
5531
5532
5533
5534
5535static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5536{
5537 unsigned int sf, opcode, opcode2, rn, rd;
5538 TCGv_i64 tcg_rd;
5539
5540 if (extract32(insn, 29, 1)) {
5541 unallocated_encoding(s);
5542 return;
5543 }
5544
5545 sf = extract32(insn, 31, 1);
5546 opcode = extract32(insn, 10, 6);
5547 opcode2 = extract32(insn, 16, 5);
5548 rn = extract32(insn, 5, 5);
5549 rd = extract32(insn, 0, 5);
5550
5551#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5552
5553 switch (MAP(sf, opcode2, opcode)) {
5554 case MAP(0, 0x00, 0x00):
5555 case MAP(1, 0x00, 0x00):
5556 handle_rbit(s, sf, rn, rd);
5557 break;
5558 case MAP(0, 0x00, 0x01):
5559 case MAP(1, 0x00, 0x01):
5560 handle_rev16(s, sf, rn, rd);
5561 break;
5562 case MAP(0, 0x00, 0x02):
5563 case MAP(1, 0x00, 0x02):
5564 handle_rev32(s, sf, rn, rd);
5565 break;
5566 case MAP(1, 0x00, 0x03):
5567 handle_rev64(s, sf, rn, rd);
5568 break;
5569 case MAP(0, 0x00, 0x04):
5570 case MAP(1, 0x00, 0x04):
5571 handle_clz(s, sf, rn, rd);
5572 break;
5573 case MAP(0, 0x00, 0x05):
5574 case MAP(1, 0x00, 0x05):
5575 handle_cls(s, sf, rn, rd);
5576 break;
5577 case MAP(1, 0x01, 0x00):
5578 if (s->pauth_active) {
5579 tcg_rd = cpu_reg(s, rd);
5580 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5581 } else if (!dc_isar_feature(aa64_pauth, s)) {
5582 goto do_unallocated;
5583 }
5584 break;
5585 case MAP(1, 0x01, 0x01):
5586 if (s->pauth_active) {
5587 tcg_rd = cpu_reg(s, rd);
5588 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5589 } else if (!dc_isar_feature(aa64_pauth, s)) {
5590 goto do_unallocated;
5591 }
5592 break;
5593 case MAP(1, 0x01, 0x02):
5594 if (s->pauth_active) {
5595 tcg_rd = cpu_reg(s, rd);
5596 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5597 } else if (!dc_isar_feature(aa64_pauth, s)) {
5598 goto do_unallocated;
5599 }
5600 break;
5601 case MAP(1, 0x01, 0x03):
5602 if (s->pauth_active) {
5603 tcg_rd = cpu_reg(s, rd);
5604 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5605 } else if (!dc_isar_feature(aa64_pauth, s)) {
5606 goto do_unallocated;
5607 }
5608 break;
5609 case MAP(1, 0x01, 0x04):
5610 if (s->pauth_active) {
5611 tcg_rd = cpu_reg(s, rd);
5612 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5613 } else if (!dc_isar_feature(aa64_pauth, s)) {
5614 goto do_unallocated;
5615 }
5616 break;
5617 case MAP(1, 0x01, 0x05):
5618 if (s->pauth_active) {
5619 tcg_rd = cpu_reg(s, rd);
5620 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5621 } else if (!dc_isar_feature(aa64_pauth, s)) {
5622 goto do_unallocated;
5623 }
5624 break;
5625 case MAP(1, 0x01, 0x06):
5626 if (s->pauth_active) {
5627 tcg_rd = cpu_reg(s, rd);
5628 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5629 } else if (!dc_isar_feature(aa64_pauth, s)) {
5630 goto do_unallocated;
5631 }
5632 break;
5633 case MAP(1, 0x01, 0x07):
5634 if (s->pauth_active) {
5635 tcg_rd = cpu_reg(s, rd);
5636 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5637 } else if (!dc_isar_feature(aa64_pauth, s)) {
5638 goto do_unallocated;
5639 }
5640 break;
5641 case MAP(1, 0x01, 0x08):
5642 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5643 goto do_unallocated;
5644 } else if (s->pauth_active) {
5645 tcg_rd = cpu_reg(s, rd);
5646 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5647 }
5648 break;
5649 case MAP(1, 0x01, 0x09):
5650 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5651 goto do_unallocated;
5652 } else if (s->pauth_active) {
5653 tcg_rd = cpu_reg(s, rd);
5654 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5655 }
5656 break;
5657 case MAP(1, 0x01, 0x0a):
5658 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5659 goto do_unallocated;
5660 } else if (s->pauth_active) {
5661 tcg_rd = cpu_reg(s, rd);
5662 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5663 }
5664 break;
5665 case MAP(1, 0x01, 0x0b):
5666 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5667 goto do_unallocated;
5668 } else if (s->pauth_active) {
5669 tcg_rd = cpu_reg(s, rd);
5670 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5671 }
5672 break;
5673 case MAP(1, 0x01, 0x0c):
5674 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5675 goto do_unallocated;
5676 } else if (s->pauth_active) {
5677 tcg_rd = cpu_reg(s, rd);
5678 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5679 }
5680 break;
5681 case MAP(1, 0x01, 0x0d):
5682 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5683 goto do_unallocated;
5684 } else if (s->pauth_active) {
5685 tcg_rd = cpu_reg(s, rd);
5686 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5687 }
5688 break;
5689 case MAP(1, 0x01, 0x0e):
5690 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5691 goto do_unallocated;
5692 } else if (s->pauth_active) {
5693 tcg_rd = cpu_reg(s, rd);
5694 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5695 }
5696 break;
5697 case MAP(1, 0x01, 0x0f):
5698 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5699 goto do_unallocated;
5700 } else if (s->pauth_active) {
5701 tcg_rd = cpu_reg(s, rd);
5702 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5703 }
5704 break;
5705 case MAP(1, 0x01, 0x10):
5706 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5707 goto do_unallocated;
5708 } else if (s->pauth_active) {
5709 tcg_rd = cpu_reg(s, rd);
5710 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5711 }
5712 break;
5713 case MAP(1, 0x01, 0x11):
5714 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5715 goto do_unallocated;
5716 } else if (s->pauth_active) {
5717 tcg_rd = cpu_reg(s, rd);
5718 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5719 }
5720 break;
5721 default:
5722 do_unallocated:
5723 unallocated_encoding(s);
5724 break;
5725 }
5726
5727#undef MAP
5728}
5729
5730static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5731 unsigned int rm, unsigned int rn, unsigned int rd)
5732{
5733 TCGv_i64 tcg_n, tcg_m, tcg_rd;
5734 tcg_rd = cpu_reg(s, rd);
5735
5736 if (!sf && is_signed) {
5737 tcg_n = new_tmp_a64(s);
5738 tcg_m = new_tmp_a64(s);
5739 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5740 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5741 } else {
5742 tcg_n = read_cpu_reg(s, rn, sf);
5743 tcg_m = read_cpu_reg(s, rm, sf);
5744 }
5745
5746 if (is_signed) {
5747 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5748 } else {
5749 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5750 }
5751
5752 if (!sf) {
5753 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5754 }
5755}
5756
5757
5758static void handle_shift_reg(DisasContext *s,
5759 enum a64_shift_type shift_type, unsigned int sf,
5760 unsigned int rm, unsigned int rn, unsigned int rd)
5761{
5762 TCGv_i64 tcg_shift = tcg_temp_new_i64();
5763 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5764 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5765
5766 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5767 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5768 tcg_temp_free_i64(tcg_shift);
5769}
5770
5771
5772static void handle_crc32(DisasContext *s,
5773 unsigned int sf, unsigned int sz, bool crc32c,
5774 unsigned int rm, unsigned int rn, unsigned int rd)
5775{
5776 TCGv_i64 tcg_acc, tcg_val;
5777 TCGv_i32 tcg_bytes;
5778
5779 if (!dc_isar_feature(aa64_crc32, s)
5780 || (sf == 1 && sz != 3)
5781 || (sf == 0 && sz == 3)) {
5782 unallocated_encoding(s);
5783 return;
5784 }
5785
5786 if (sz == 3) {
5787 tcg_val = cpu_reg(s, rm);
5788 } else {
5789 uint64_t mask;
5790 switch (sz) {
5791 case 0:
5792 mask = 0xFF;
5793 break;
5794 case 1:
5795 mask = 0xFFFF;
5796 break;
5797 case 2:
5798 mask = 0xFFFFFFFF;
5799 break;
5800 default:
5801 g_assert_not_reached();
5802 }
5803 tcg_val = new_tmp_a64(s);
5804 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5805 }
5806
5807 tcg_acc = cpu_reg(s, rn);
5808 tcg_bytes = tcg_constant_i32(1 << sz);
5809
5810 if (crc32c) {
5811 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5812 } else {
5813 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5814 }
5815}
5816
5817
5818
5819
5820
5821
5822
5823static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5824{
5825 unsigned int sf, rm, opcode, rn, rd, setflag;
5826 sf = extract32(insn, 31, 1);
5827 setflag = extract32(insn, 29, 1);
5828 rm = extract32(insn, 16, 5);
5829 opcode = extract32(insn, 10, 6);
5830 rn = extract32(insn, 5, 5);
5831 rd = extract32(insn, 0, 5);
5832
5833 if (setflag && opcode != 0) {
5834 unallocated_encoding(s);
5835 return;
5836 }
5837
5838 switch (opcode) {
5839 case 0:
5840 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5841 goto do_unallocated;
5842 } else {
5843 TCGv_i64 tcg_n, tcg_m, tcg_d;
5844
5845 tcg_n = read_cpu_reg_sp(s, rn, true);
5846 tcg_m = read_cpu_reg_sp(s, rm, true);
5847 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5848 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5849 tcg_d = cpu_reg(s, rd);
5850
5851 if (setflag) {
5852 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5853 } else {
5854 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5855 }
5856 }
5857 break;
5858 case 2:
5859 handle_div(s, false, sf, rm, rn, rd);
5860 break;
5861 case 3:
5862 handle_div(s, true, sf, rm, rn, rd);
5863 break;
5864 case 4:
5865 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5866 goto do_unallocated;
5867 }
5868 if (s->ata) {
5869 gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5870 cpu_reg_sp(s, rn), cpu_reg(s, rm));
5871 } else {
5872 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5873 cpu_reg_sp(s, rn));
5874 }
5875 break;
5876 case 5:
5877 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5878 goto do_unallocated;
5879 } else {
5880 TCGv_i64 t = tcg_temp_new_i64();
5881
5882 tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
5883 tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
5884 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
5885
5886 tcg_temp_free_i64(t);
5887 }
5888 break;
5889 case 8:
5890 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5891 break;
5892 case 9:
5893 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5894 break;
5895 case 10:
5896 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5897 break;
5898 case 11:
5899 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5900 break;
5901 case 12:
5902 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5903 goto do_unallocated;
5904 }
5905 gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5906 cpu_reg(s, rn), cpu_reg_sp(s, rm));
5907 break;
5908 case 16:
5909 case 17:
5910 case 18:
5911 case 19:
5912 case 20:
5913 case 21:
5914 case 22:
5915 case 23:
5916 {
5917 int sz = extract32(opcode, 0, 2);
5918 bool crc32c = extract32(opcode, 2, 1);
5919 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5920 break;
5921 }
5922 default:
5923 do_unallocated:
5924 unallocated_encoding(s);
5925 break;
5926 }
5927}
5928
5929
5930
5931
5932
5933
5934
5935
5936static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5937{
5938 int op0 = extract32(insn, 30, 1);
5939 int op1 = extract32(insn, 28, 1);
5940 int op2 = extract32(insn, 21, 4);
5941 int op3 = extract32(insn, 10, 6);
5942
5943 if (!op1) {
5944 if (op2 & 8) {
5945 if (op2 & 1) {
5946
5947 disas_add_sub_ext_reg(s, insn);
5948 } else {
5949
5950 disas_add_sub_reg(s, insn);
5951 }
5952 } else {
5953
5954 disas_logic_reg(s, insn);
5955 }
5956 return;
5957 }
5958
5959 switch (op2) {
5960 case 0x0:
5961 switch (op3) {
5962 case 0x00:
5963 disas_adc_sbc(s, insn);
5964 break;
5965
5966 case 0x01:
5967 case 0x21:
5968 disas_rotate_right_into_flags(s, insn);
5969 break;
5970
5971 case 0x02:
5972 case 0x12:
5973 case 0x22:
5974 case 0x32:
5975 disas_evaluate_into_flags(s, insn);
5976 break;
5977
5978 default:
5979 goto do_unallocated;
5980 }
5981 break;
5982
5983 case 0x2:
5984 disas_cc(s, insn);
5985 break;
5986
5987 case 0x4:
5988 disas_cond_select(s, insn);
5989 break;
5990
5991 case 0x6:
5992 if (op0) {
5993 disas_data_proc_1src(s, insn);
5994 } else {
5995 disas_data_proc_2src(s, insn);
5996 }
5997 break;
5998 case 0x8 ... 0xf:
5999 disas_data_proc_3src(s, insn);
6000 break;
6001
6002 default:
6003 do_unallocated:
6004 unallocated_encoding(s);
6005 break;
6006 }
6007}
6008
6009static void handle_fp_compare(DisasContext *s, int size,
6010 unsigned int rn, unsigned int rm,
6011 bool cmp_with_zero, bool signal_all_nans)
6012{
6013 TCGv_i64 tcg_flags = tcg_temp_new_i64();
6014 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
6015
6016 if (size == MO_64) {
6017 TCGv_i64 tcg_vn, tcg_vm;
6018
6019 tcg_vn = read_fp_dreg(s, rn);
6020 if (cmp_with_zero) {
6021 tcg_vm = tcg_constant_i64(0);
6022 } else {
6023 tcg_vm = read_fp_dreg(s, rm);
6024 }
6025 if (signal_all_nans) {
6026 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6027 } else {
6028 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6029 }
6030 tcg_temp_free_i64(tcg_vn);
6031 tcg_temp_free_i64(tcg_vm);
6032 } else {
6033 TCGv_i32 tcg_vn = tcg_temp_new_i32();
6034 TCGv_i32 tcg_vm = tcg_temp_new_i32();
6035
6036 read_vec_element_i32(s, tcg_vn, rn, 0, size);
6037 if (cmp_with_zero) {
6038 tcg_gen_movi_i32(tcg_vm, 0);
6039 } else {
6040 read_vec_element_i32(s, tcg_vm, rm, 0, size);
6041 }
6042
6043 switch (size) {
6044 case MO_32:
6045 if (signal_all_nans) {
6046 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6047 } else {
6048 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6049 }
6050 break;
6051 case MO_16:
6052 if (signal_all_nans) {
6053 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6054 } else {
6055 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
6056 }
6057 break;
6058 default:
6059 g_assert_not_reached();
6060 }
6061
6062 tcg_temp_free_i32(tcg_vn);
6063 tcg_temp_free_i32(tcg_vm);
6064 }
6065
6066 tcg_temp_free_ptr(fpst);
6067
6068 gen_set_nzcv(tcg_flags);
6069
6070 tcg_temp_free_i64(tcg_flags);
6071}
6072
6073
6074
6075
6076
6077
6078
6079static void disas_fp_compare(DisasContext *s, uint32_t insn)
6080{
6081 unsigned int mos, type, rm, op, rn, opc, op2r;
6082 int size;
6083
6084 mos = extract32(insn, 29, 3);
6085 type = extract32(insn, 22, 2);
6086 rm = extract32(insn, 16, 5);
6087 op = extract32(insn, 14, 2);
6088 rn = extract32(insn, 5, 5);
6089 opc = extract32(insn, 3, 2);
6090 op2r = extract32(insn, 0, 3);
6091
6092 if (mos || op || op2r) {
6093 unallocated_encoding(s);
6094 return;
6095 }
6096
6097 switch (type) {
6098 case 0:
6099 size = MO_32;
6100 break;
6101 case 1:
6102 size = MO_64;
6103 break;
6104 case 3:
6105 size = MO_16;
6106 if (dc_isar_feature(aa64_fp16, s)) {
6107 break;
6108 }
6109
6110 default:
6111 unallocated_encoding(s);
6112 return;
6113 }
6114
6115 if (!fp_access_check(s)) {
6116 return;
6117 }
6118
6119 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
6120}
6121
6122
6123
6124
6125
6126
6127
6128static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
6129{
6130 unsigned int mos, type, rm, cond, rn, op, nzcv;
6131 TCGLabel *label_continue = NULL;
6132 int size;
6133
6134 mos = extract32(insn, 29, 3);
6135 type = extract32(insn, 22, 2);
6136 rm = extract32(insn, 16, 5);
6137 cond = extract32(insn, 12, 4);
6138 rn = extract32(insn, 5, 5);
6139 op = extract32(insn, 4, 1);
6140 nzcv = extract32(insn, 0, 4);
6141
6142 if (mos) {
6143 unallocated_encoding(s);
6144 return;
6145 }
6146
6147 switch (type) {
6148 case 0:
6149 size = MO_32;
6150 break;
6151 case 1:
6152 size = MO_64;
6153 break;
6154 case 3:
6155 size = MO_16;
6156 if (dc_isar_feature(aa64_fp16, s)) {
6157 break;
6158 }
6159
6160 default:
6161 unallocated_encoding(s);
6162 return;
6163 }
6164
6165 if (!fp_access_check(s)) {
6166 return;
6167 }
6168
6169 if (cond < 0x0e) {
6170 TCGLabel *label_match = gen_new_label();
6171 label_continue = gen_new_label();
6172 arm_gen_test_cc(cond, label_match);
6173
6174 gen_set_nzcv(tcg_constant_i64(nzcv << 28));
6175 tcg_gen_br(label_continue);
6176 gen_set_label(label_match);
6177 }
6178
6179 handle_fp_compare(s, size, rn, rm, false, op);
6180
6181 if (cond < 0x0e) {
6182 gen_set_label(label_continue);
6183 }
6184}
6185
6186
6187
6188
6189
6190
6191
6192static void disas_fp_csel(DisasContext *s, uint32_t insn)
6193{
6194 unsigned int mos, type, rm, cond, rn, rd;
6195 TCGv_i64 t_true, t_false;
6196 DisasCompare64 c;
6197 MemOp sz;
6198
6199 mos = extract32(insn, 29, 3);
6200 type = extract32(insn, 22, 2);
6201 rm = extract32(insn, 16, 5);
6202 cond = extract32(insn, 12, 4);
6203 rn = extract32(insn, 5, 5);
6204 rd = extract32(insn, 0, 5);
6205
6206 if (mos) {
6207 unallocated_encoding(s);
6208 return;
6209 }
6210
6211 switch (type) {
6212 case 0:
6213 sz = MO_32;
6214 break;
6215 case 1:
6216 sz = MO_64;
6217 break;
6218 case 3:
6219 sz = MO_16;
6220 if (dc_isar_feature(aa64_fp16, s)) {
6221 break;
6222 }
6223
6224 default:
6225 unallocated_encoding(s);
6226 return;
6227 }
6228
6229 if (!fp_access_check(s)) {
6230 return;
6231 }
6232
6233
6234 t_true = tcg_temp_new_i64();
6235 t_false = tcg_temp_new_i64();
6236 read_vec_element(s, t_true, rn, 0, sz);
6237 read_vec_element(s, t_false, rm, 0, sz);
6238
6239 a64_test_cc(&c, cond);
6240 tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
6241 t_true, t_false);
6242 tcg_temp_free_i64(t_false);
6243 a64_free_cc(&c);
6244
6245
6246
6247 write_fp_dreg(s, rd, t_true);
6248 tcg_temp_free_i64(t_true);
6249}
6250
6251
6252static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
6253{
6254 TCGv_ptr fpst = NULL;
6255 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
6256 TCGv_i32 tcg_res = tcg_temp_new_i32();
6257
6258 switch (opcode) {
6259 case 0x0:
6260 tcg_gen_mov_i32(tcg_res, tcg_op);
6261 break;
6262 case 0x1:
6263 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
6264 break;
6265 case 0x2:
6266 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
6267 break;
6268 case 0x3:
6269 fpst = fpstatus_ptr(FPST_FPCR_F16);
6270 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
6271 break;
6272 case 0x8:
6273 case 0x9:
6274 case 0xa:
6275 case 0xb:
6276 case 0xc:
6277 {
6278 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
6279 fpst = fpstatus_ptr(FPST_FPCR_F16);
6280
6281 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6282 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6283
6284 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6285 tcg_temp_free_i32(tcg_rmode);
6286 break;
6287 }
6288 case 0xe:
6289 fpst = fpstatus_ptr(FPST_FPCR_F16);
6290 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
6291 break;
6292 case 0xf:
6293 fpst = fpstatus_ptr(FPST_FPCR_F16);
6294 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6295 break;
6296 default:
6297 g_assert_not_reached();
6298 }
6299
6300 write_fp_sreg(s, rd, tcg_res);
6301
6302 if (fpst) {
6303 tcg_temp_free_ptr(fpst);
6304 }
6305 tcg_temp_free_i32(tcg_op);
6306 tcg_temp_free_i32(tcg_res);
6307}
6308
6309
6310static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
6311{
6312 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
6313 TCGv_i32 tcg_op, tcg_res;
6314 TCGv_ptr fpst;
6315 int rmode = -1;
6316
6317 tcg_op = read_fp_sreg(s, rn);
6318 tcg_res = tcg_temp_new_i32();
6319
6320 switch (opcode) {
6321 case 0x0:
6322 tcg_gen_mov_i32(tcg_res, tcg_op);
6323 goto done;
6324 case 0x1:
6325 gen_helper_vfp_abss(tcg_res, tcg_op);
6326 goto done;
6327 case 0x2:
6328 gen_helper_vfp_negs(tcg_res, tcg_op);
6329 goto done;
6330 case 0x3:
6331 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6332 goto done;
6333 case 0x6:
6334 gen_fpst = gen_helper_bfcvt;
6335 break;
6336 case 0x8:
6337 case 0x9:
6338 case 0xa:
6339 case 0xb:
6340 case 0xc:
6341 rmode = arm_rmode_to_sf(opcode & 7);
6342 gen_fpst = gen_helper_rints;
6343 break;
6344 case 0xe:
6345 gen_fpst = gen_helper_rints_exact;
6346 break;
6347 case 0xf:
6348 gen_fpst = gen_helper_rints;
6349 break;
6350 case 0x10:
6351 rmode = float_round_to_zero;
6352 gen_fpst = gen_helper_frint32_s;
6353 break;
6354 case 0x11:
6355 gen_fpst = gen_helper_frint32_s;
6356 break;
6357 case 0x12:
6358 rmode = float_round_to_zero;
6359 gen_fpst = gen_helper_frint64_s;
6360 break;
6361 case 0x13:
6362 gen_fpst = gen_helper_frint64_s;
6363 break;
6364 default:
6365 g_assert_not_reached();
6366 }
6367
6368 fpst = fpstatus_ptr(FPST_FPCR);
6369 if (rmode >= 0) {
6370 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6371 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6372 gen_fpst(tcg_res, tcg_op, fpst);
6373 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6374 tcg_temp_free_i32(tcg_rmode);
6375 } else {
6376 gen_fpst(tcg_res, tcg_op, fpst);
6377 }
6378 tcg_temp_free_ptr(fpst);
6379
6380 done:
6381 write_fp_sreg(s, rd, tcg_res);
6382 tcg_temp_free_i32(tcg_op);
6383 tcg_temp_free_i32(tcg_res);
6384}
6385
6386
6387static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6388{
6389 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6390 TCGv_i64 tcg_op, tcg_res;
6391 TCGv_ptr fpst;
6392 int rmode = -1;
6393
6394 switch (opcode) {
6395 case 0x0:
6396 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6397 return;
6398 }
6399
6400 tcg_op = read_fp_dreg(s, rn);
6401 tcg_res = tcg_temp_new_i64();
6402
6403 switch (opcode) {
6404 case 0x1:
6405 gen_helper_vfp_absd(tcg_res, tcg_op);
6406 goto done;
6407 case 0x2:
6408 gen_helper_vfp_negd(tcg_res, tcg_op);
6409 goto done;
6410 case 0x3:
6411 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6412 goto done;
6413 case 0x8:
6414 case 0x9:
6415 case 0xa:
6416 case 0xb:
6417 case 0xc:
6418 rmode = arm_rmode_to_sf(opcode & 7);
6419 gen_fpst = gen_helper_rintd;
6420 break;
6421 case 0xe:
6422 gen_fpst = gen_helper_rintd_exact;
6423 break;
6424 case 0xf:
6425 gen_fpst = gen_helper_rintd;
6426 break;
6427 case 0x10:
6428 rmode = float_round_to_zero;
6429 gen_fpst = gen_helper_frint32_d;
6430 break;
6431 case 0x11:
6432 gen_fpst = gen_helper_frint32_d;
6433 break;
6434 case 0x12:
6435 rmode = float_round_to_zero;
6436 gen_fpst = gen_helper_frint64_d;
6437 break;
6438 case 0x13:
6439 gen_fpst = gen_helper_frint64_d;
6440 break;
6441 default:
6442 g_assert_not_reached();
6443 }
6444
6445 fpst = fpstatus_ptr(FPST_FPCR);
6446 if (rmode >= 0) {
6447 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6448 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6449 gen_fpst(tcg_res, tcg_op, fpst);
6450 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6451 tcg_temp_free_i32(tcg_rmode);
6452 } else {
6453 gen_fpst(tcg_res, tcg_op, fpst);
6454 }
6455 tcg_temp_free_ptr(fpst);
6456
6457 done:
6458 write_fp_dreg(s, rd, tcg_res);
6459 tcg_temp_free_i64(tcg_op);
6460 tcg_temp_free_i64(tcg_res);
6461}
6462
6463static void handle_fp_fcvt(DisasContext *s, int opcode,
6464 int rd, int rn, int dtype, int ntype)
6465{
6466 switch (ntype) {
6467 case 0x0:
6468 {
6469 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6470 if (dtype == 1) {
6471
6472 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6473 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6474 write_fp_dreg(s, rd, tcg_rd);
6475 tcg_temp_free_i64(tcg_rd);
6476 } else {
6477
6478 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6479 TCGv_i32 ahp = get_ahp_flag();
6480 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6481
6482 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6483
6484 write_fp_sreg(s, rd, tcg_rd);
6485 tcg_temp_free_i32(tcg_rd);
6486 tcg_temp_free_i32(ahp);
6487 tcg_temp_free_ptr(fpst);
6488 }
6489 tcg_temp_free_i32(tcg_rn);
6490 break;
6491 }
6492 case 0x1:
6493 {
6494 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6495 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6496 if (dtype == 0) {
6497
6498 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6499 } else {
6500 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6501 TCGv_i32 ahp = get_ahp_flag();
6502
6503 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6504
6505 tcg_temp_free_ptr(fpst);
6506 tcg_temp_free_i32(ahp);
6507 }
6508 write_fp_sreg(s, rd, tcg_rd);
6509 tcg_temp_free_i32(tcg_rd);
6510 tcg_temp_free_i64(tcg_rn);
6511 break;
6512 }
6513 case 0x3:
6514 {
6515 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6516 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6517 TCGv_i32 tcg_ahp = get_ahp_flag();
6518 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6519 if (dtype == 0) {
6520
6521 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6522 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6523 write_fp_sreg(s, rd, tcg_rd);
6524 tcg_temp_free_i32(tcg_rd);
6525 } else {
6526
6527 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6528 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6529 write_fp_dreg(s, rd, tcg_rd);
6530 tcg_temp_free_i64(tcg_rd);
6531 }
6532 tcg_temp_free_i32(tcg_rn);
6533 tcg_temp_free_ptr(tcg_fpst);
6534 tcg_temp_free_i32(tcg_ahp);
6535 break;
6536 }
6537 default:
6538 g_assert_not_reached();
6539 }
6540}
6541
6542
6543
6544
6545
6546
6547
6548static void disas_fp_1src(DisasContext *s, uint32_t insn)
6549{
6550 int mos = extract32(insn, 29, 3);
6551 int type = extract32(insn, 22, 2);
6552 int opcode = extract32(insn, 15, 6);
6553 int rn = extract32(insn, 5, 5);
6554 int rd = extract32(insn, 0, 5);
6555
6556 if (mos) {
6557 goto do_unallocated;
6558 }
6559
6560 switch (opcode) {
6561 case 0x4: case 0x5: case 0x7:
6562 {
6563
6564 int dtype = extract32(opcode, 0, 2);
6565 if (type == 2 || dtype == type) {
6566 goto do_unallocated;
6567 }
6568 if (!fp_access_check(s)) {
6569 return;
6570 }
6571
6572 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6573 break;
6574 }
6575
6576 case 0x10 ... 0x13:
6577 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6578 goto do_unallocated;
6579 }
6580
6581 case 0x0 ... 0x3:
6582 case 0x8 ... 0xc:
6583 case 0xe ... 0xf:
6584
6585 switch (type) {
6586 case 0:
6587 if (!fp_access_check(s)) {
6588 return;
6589 }
6590 handle_fp_1src_single(s, opcode, rd, rn);
6591 break;
6592 case 1:
6593 if (!fp_access_check(s)) {
6594 return;
6595 }
6596 handle_fp_1src_double(s, opcode, rd, rn);
6597 break;
6598 case 3:
6599 if (!dc_isar_feature(aa64_fp16, s)) {
6600 goto do_unallocated;
6601 }
6602
6603 if (!fp_access_check(s)) {
6604 return;
6605 }
6606 handle_fp_1src_half(s, opcode, rd, rn);
6607 break;
6608 default:
6609 goto do_unallocated;
6610 }
6611 break;
6612
6613 case 0x6:
6614 switch (type) {
6615 case 1:
6616 if (!dc_isar_feature(aa64_bf16, s)) {
6617 goto do_unallocated;
6618 }
6619 if (!fp_access_check(s)) {
6620 return;
6621 }
6622 handle_fp_1src_single(s, opcode, rd, rn);
6623 break;
6624 default:
6625 goto do_unallocated;
6626 }
6627 break;
6628
6629 default:
6630 do_unallocated:
6631 unallocated_encoding(s);
6632 break;
6633 }
6634}
6635
6636
6637static void handle_fp_2src_single(DisasContext *s, int opcode,
6638 int rd, int rn, int rm)
6639{
6640 TCGv_i32 tcg_op1;
6641 TCGv_i32 tcg_op2;
6642 TCGv_i32 tcg_res;
6643 TCGv_ptr fpst;
6644
6645 tcg_res = tcg_temp_new_i32();
6646 fpst = fpstatus_ptr(FPST_FPCR);
6647 tcg_op1 = read_fp_sreg(s, rn);
6648 tcg_op2 = read_fp_sreg(s, rm);
6649
6650 switch (opcode) {
6651 case 0x0:
6652 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6653 break;
6654 case 0x1:
6655 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6656 break;
6657 case 0x2:
6658 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6659 break;
6660 case 0x3:
6661 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6662 break;
6663 case 0x4:
6664 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6665 break;
6666 case 0x5:
6667 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6668 break;
6669 case 0x6:
6670 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6671 break;
6672 case 0x7:
6673 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6674 break;
6675 case 0x8:
6676 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6677 gen_helper_vfp_negs(tcg_res, tcg_res);
6678 break;
6679 }
6680
6681 write_fp_sreg(s, rd, tcg_res);
6682
6683 tcg_temp_free_ptr(fpst);
6684 tcg_temp_free_i32(tcg_op1);
6685 tcg_temp_free_i32(tcg_op2);
6686 tcg_temp_free_i32(tcg_res);
6687}
6688
6689
6690static void handle_fp_2src_double(DisasContext *s, int opcode,
6691 int rd, int rn, int rm)
6692{
6693 TCGv_i64 tcg_op1;
6694 TCGv_i64 tcg_op2;
6695 TCGv_i64 tcg_res;
6696 TCGv_ptr fpst;
6697
6698 tcg_res = tcg_temp_new_i64();
6699 fpst = fpstatus_ptr(FPST_FPCR);
6700 tcg_op1 = read_fp_dreg(s, rn);
6701 tcg_op2 = read_fp_dreg(s, rm);
6702
6703 switch (opcode) {
6704 case 0x0:
6705 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6706 break;
6707 case 0x1:
6708 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6709 break;
6710 case 0x2:
6711 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6712 break;
6713 case 0x3:
6714 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6715 break;
6716 case 0x4:
6717 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6718 break;
6719 case 0x5:
6720 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6721 break;
6722 case 0x6:
6723 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6724 break;
6725 case 0x7:
6726 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6727 break;
6728 case 0x8:
6729 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6730 gen_helper_vfp_negd(tcg_res, tcg_res);
6731 break;
6732 }
6733
6734 write_fp_dreg(s, rd, tcg_res);
6735
6736 tcg_temp_free_ptr(fpst);
6737 tcg_temp_free_i64(tcg_op1);
6738 tcg_temp_free_i64(tcg_op2);
6739 tcg_temp_free_i64(tcg_res);
6740}
6741
6742
6743static void handle_fp_2src_half(DisasContext *s, int opcode,
6744 int rd, int rn, int rm)
6745{
6746 TCGv_i32 tcg_op1;
6747 TCGv_i32 tcg_op2;
6748 TCGv_i32 tcg_res;
6749 TCGv_ptr fpst;
6750
6751 tcg_res = tcg_temp_new_i32();
6752 fpst = fpstatus_ptr(FPST_FPCR_F16);
6753 tcg_op1 = read_fp_hreg(s, rn);
6754 tcg_op2 = read_fp_hreg(s, rm);
6755
6756 switch (opcode) {
6757 case 0x0:
6758 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6759 break;
6760 case 0x1:
6761 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6762 break;
6763 case 0x2:
6764 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6765 break;
6766 case 0x3:
6767 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6768 break;
6769 case 0x4:
6770 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6771 break;
6772 case 0x5:
6773 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6774 break;
6775 case 0x6:
6776 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6777 break;
6778 case 0x7:
6779 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6780 break;
6781 case 0x8:
6782 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6783 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6784 break;
6785 default:
6786 g_assert_not_reached();
6787 }
6788
6789 write_fp_sreg(s, rd, tcg_res);
6790
6791 tcg_temp_free_ptr(fpst);
6792 tcg_temp_free_i32(tcg_op1);
6793 tcg_temp_free_i32(tcg_op2);
6794 tcg_temp_free_i32(tcg_res);
6795}
6796
6797
6798
6799
6800
6801
6802
6803static void disas_fp_2src(DisasContext *s, uint32_t insn)
6804{
6805 int mos = extract32(insn, 29, 3);
6806 int type = extract32(insn, 22, 2);
6807 int rd = extract32(insn, 0, 5);
6808 int rn = extract32(insn, 5, 5);
6809 int rm = extract32(insn, 16, 5);
6810 int opcode = extract32(insn, 12, 4);
6811
6812 if (opcode > 8 || mos) {
6813 unallocated_encoding(s);
6814 return;
6815 }
6816
6817 switch (type) {
6818 case 0:
6819 if (!fp_access_check(s)) {
6820 return;
6821 }
6822 handle_fp_2src_single(s, opcode, rd, rn, rm);
6823 break;
6824 case 1:
6825 if (!fp_access_check(s)) {
6826 return;
6827 }
6828 handle_fp_2src_double(s, opcode, rd, rn, rm);
6829 break;
6830 case 3:
6831 if (!dc_isar_feature(aa64_fp16, s)) {
6832 unallocated_encoding(s);
6833 return;
6834 }
6835 if (!fp_access_check(s)) {
6836 return;
6837 }
6838 handle_fp_2src_half(s, opcode, rd, rn, rm);
6839 break;
6840 default:
6841 unallocated_encoding(s);
6842 }
6843}
6844
6845
6846static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6847 int rd, int rn, int rm, int ra)
6848{
6849 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6850 TCGv_i32 tcg_res = tcg_temp_new_i32();
6851 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6852
6853 tcg_op1 = read_fp_sreg(s, rn);
6854 tcg_op2 = read_fp_sreg(s, rm);
6855 tcg_op3 = read_fp_sreg(s, ra);
6856
6857
6858
6859
6860
6861
6862
6863
6864 if (o1 == true) {
6865 gen_helper_vfp_negs(tcg_op3, tcg_op3);
6866 }
6867
6868 if (o0 != o1) {
6869 gen_helper_vfp_negs(tcg_op1, tcg_op1);
6870 }
6871
6872 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6873
6874 write_fp_sreg(s, rd, tcg_res);
6875
6876 tcg_temp_free_ptr(fpst);
6877 tcg_temp_free_i32(tcg_op1);
6878 tcg_temp_free_i32(tcg_op2);
6879 tcg_temp_free_i32(tcg_op3);
6880 tcg_temp_free_i32(tcg_res);
6881}
6882
6883
6884static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6885 int rd, int rn, int rm, int ra)
6886{
6887 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6888 TCGv_i64 tcg_res = tcg_temp_new_i64();
6889 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6890
6891 tcg_op1 = read_fp_dreg(s, rn);
6892 tcg_op2 = read_fp_dreg(s, rm);
6893 tcg_op3 = read_fp_dreg(s, ra);
6894
6895
6896
6897
6898
6899
6900
6901
6902 if (o1 == true) {
6903 gen_helper_vfp_negd(tcg_op3, tcg_op3);
6904 }
6905
6906 if (o0 != o1) {
6907 gen_helper_vfp_negd(tcg_op1, tcg_op1);
6908 }
6909
6910 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6911
6912 write_fp_dreg(s, rd, tcg_res);
6913
6914 tcg_temp_free_ptr(fpst);
6915 tcg_temp_free_i64(tcg_op1);
6916 tcg_temp_free_i64(tcg_op2);
6917 tcg_temp_free_i64(tcg_op3);
6918 tcg_temp_free_i64(tcg_res);
6919}
6920
6921
6922static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6923 int rd, int rn, int rm, int ra)
6924{
6925 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6926 TCGv_i32 tcg_res = tcg_temp_new_i32();
6927 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6928
6929 tcg_op1 = read_fp_hreg(s, rn);
6930 tcg_op2 = read_fp_hreg(s, rm);
6931 tcg_op3 = read_fp_hreg(s, ra);
6932
6933
6934
6935
6936
6937
6938
6939
6940 if (o1 == true) {
6941 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6942 }
6943
6944 if (o0 != o1) {
6945 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6946 }
6947
6948 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6949
6950 write_fp_sreg(s, rd, tcg_res);
6951
6952 tcg_temp_free_ptr(fpst);
6953 tcg_temp_free_i32(tcg_op1);
6954 tcg_temp_free_i32(tcg_op2);
6955 tcg_temp_free_i32(tcg_op3);
6956 tcg_temp_free_i32(tcg_res);
6957}
6958
6959
6960
6961
6962
6963
6964
6965static void disas_fp_3src(DisasContext *s, uint32_t insn)
6966{
6967 int mos = extract32(insn, 29, 3);
6968 int type = extract32(insn, 22, 2);
6969 int rd = extract32(insn, 0, 5);
6970 int rn = extract32(insn, 5, 5);
6971 int ra = extract32(insn, 10, 5);
6972 int rm = extract32(insn, 16, 5);
6973 bool o0 = extract32(insn, 15, 1);
6974 bool o1 = extract32(insn, 21, 1);
6975
6976 if (mos) {
6977 unallocated_encoding(s);
6978 return;
6979 }
6980
6981 switch (type) {
6982 case 0:
6983 if (!fp_access_check(s)) {
6984 return;
6985 }
6986 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6987 break;
6988 case 1:
6989 if (!fp_access_check(s)) {
6990 return;
6991 }
6992 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6993 break;
6994 case 3:
6995 if (!dc_isar_feature(aa64_fp16, s)) {
6996 unallocated_encoding(s);
6997 return;
6998 }
6999 if (!fp_access_check(s)) {
7000 return;
7001 }
7002 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
7003 break;
7004 default:
7005 unallocated_encoding(s);
7006 }
7007}
7008
7009
7010
7011
7012
7013
7014
7015static void disas_fp_imm(DisasContext *s, uint32_t insn)
7016{
7017 int rd = extract32(insn, 0, 5);
7018 int imm5 = extract32(insn, 5, 5);
7019 int imm8 = extract32(insn, 13, 8);
7020 int type = extract32(insn, 22, 2);
7021 int mos = extract32(insn, 29, 3);
7022 uint64_t imm;
7023 MemOp sz;
7024
7025 if (mos || imm5) {
7026 unallocated_encoding(s);
7027 return;
7028 }
7029
7030 switch (type) {
7031 case 0:
7032 sz = MO_32;
7033 break;
7034 case 1:
7035 sz = MO_64;
7036 break;
7037 case 3:
7038 sz = MO_16;
7039 if (dc_isar_feature(aa64_fp16, s)) {
7040 break;
7041 }
7042
7043 default:
7044 unallocated_encoding(s);
7045 return;
7046 }
7047
7048 if (!fp_access_check(s)) {
7049 return;
7050 }
7051
7052 imm = vfp_expand_imm(sz, imm8);
7053 write_fp_dreg(s, rd, tcg_constant_i64(imm));
7054}
7055
7056
7057
7058
7059
7060
7061static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
7062 bool itof, int rmode, int scale, int sf, int type)
7063{
7064 bool is_signed = !(opcode & 1);
7065 TCGv_ptr tcg_fpstatus;
7066 TCGv_i32 tcg_shift, tcg_single;
7067 TCGv_i64 tcg_double;
7068
7069 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
7070
7071 tcg_shift = tcg_constant_i32(64 - scale);
7072
7073 if (itof) {
7074 TCGv_i64 tcg_int = cpu_reg(s, rn);
7075 if (!sf) {
7076 TCGv_i64 tcg_extend = new_tmp_a64(s);
7077
7078 if (is_signed) {
7079 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
7080 } else {
7081 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
7082 }
7083
7084 tcg_int = tcg_extend;
7085 }
7086
7087 switch (type) {
7088 case 1:
7089 tcg_double = tcg_temp_new_i64();
7090 if (is_signed) {
7091 gen_helper_vfp_sqtod(tcg_double, tcg_int,
7092 tcg_shift, tcg_fpstatus);
7093 } else {
7094 gen_helper_vfp_uqtod(tcg_double, tcg_int,
7095 tcg_shift, tcg_fpstatus);
7096 }
7097 write_fp_dreg(s, rd, tcg_double);
7098 tcg_temp_free_i64(tcg_double);
7099 break;
7100
7101 case 0:
7102 tcg_single = tcg_temp_new_i32();
7103 if (is_signed) {
7104 gen_helper_vfp_sqtos(tcg_single, tcg_int,
7105 tcg_shift, tcg_fpstatus);
7106 } else {
7107 gen_helper_vfp_uqtos(tcg_single, tcg_int,
7108 tcg_shift, tcg_fpstatus);
7109 }
7110 write_fp_sreg(s, rd, tcg_single);
7111 tcg_temp_free_i32(tcg_single);
7112 break;
7113
7114 case 3:
7115 tcg_single = tcg_temp_new_i32();
7116 if (is_signed) {
7117 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
7118 tcg_shift, tcg_fpstatus);
7119 } else {
7120 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
7121 tcg_shift, tcg_fpstatus);
7122 }
7123 write_fp_sreg(s, rd, tcg_single);
7124 tcg_temp_free_i32(tcg_single);
7125 break;
7126
7127 default:
7128 g_assert_not_reached();
7129 }
7130 } else {
7131 TCGv_i64 tcg_int = cpu_reg(s, rd);
7132 TCGv_i32 tcg_rmode;
7133
7134 if (extract32(opcode, 2, 1)) {
7135
7136
7137
7138 rmode = FPROUNDING_TIEAWAY;
7139 }
7140
7141 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7142
7143 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7144
7145 switch (type) {
7146 case 1:
7147 tcg_double = read_fp_dreg(s, rn);
7148 if (is_signed) {
7149 if (!sf) {
7150 gen_helper_vfp_tosld(tcg_int, tcg_double,
7151 tcg_shift, tcg_fpstatus);
7152 } else {
7153 gen_helper_vfp_tosqd(tcg_int, tcg_double,
7154 tcg_shift, tcg_fpstatus);
7155 }
7156 } else {
7157 if (!sf) {
7158 gen_helper_vfp_tould(tcg_int, tcg_double,
7159 tcg_shift, tcg_fpstatus);
7160 } else {
7161 gen_helper_vfp_touqd(tcg_int, tcg_double,
7162 tcg_shift, tcg_fpstatus);
7163 }
7164 }
7165 if (!sf) {
7166 tcg_gen_ext32u_i64(tcg_int, tcg_int);
7167 }
7168 tcg_temp_free_i64(tcg_double);
7169 break;
7170
7171 case 0:
7172 tcg_single = read_fp_sreg(s, rn);
7173 if (sf) {
7174 if (is_signed) {
7175 gen_helper_vfp_tosqs(tcg_int, tcg_single,
7176 tcg_shift, tcg_fpstatus);
7177 } else {
7178 gen_helper_vfp_touqs(tcg_int, tcg_single,
7179 tcg_shift, tcg_fpstatus);
7180 }
7181 } else {
7182 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7183 if (is_signed) {
7184 gen_helper_vfp_tosls(tcg_dest, tcg_single,
7185 tcg_shift, tcg_fpstatus);
7186 } else {
7187 gen_helper_vfp_touls(tcg_dest, tcg_single,
7188 tcg_shift, tcg_fpstatus);
7189 }
7190 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7191 tcg_temp_free_i32(tcg_dest);
7192 }
7193 tcg_temp_free_i32(tcg_single);
7194 break;
7195
7196 case 3:
7197 tcg_single = read_fp_sreg(s, rn);
7198 if (sf) {
7199 if (is_signed) {
7200 gen_helper_vfp_tosqh(tcg_int, tcg_single,
7201 tcg_shift, tcg_fpstatus);
7202 } else {
7203 gen_helper_vfp_touqh(tcg_int, tcg_single,
7204 tcg_shift, tcg_fpstatus);
7205 }
7206 } else {
7207 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7208 if (is_signed) {
7209 gen_helper_vfp_toslh(tcg_dest, tcg_single,
7210 tcg_shift, tcg_fpstatus);
7211 } else {
7212 gen_helper_vfp_toulh(tcg_dest, tcg_single,
7213 tcg_shift, tcg_fpstatus);
7214 }
7215 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7216 tcg_temp_free_i32(tcg_dest);
7217 }
7218 tcg_temp_free_i32(tcg_single);
7219 break;
7220
7221 default:
7222 g_assert_not_reached();
7223 }
7224
7225 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7226 tcg_temp_free_i32(tcg_rmode);
7227 }
7228
7229 tcg_temp_free_ptr(tcg_fpstatus);
7230}
7231
7232
7233
7234
7235
7236
7237
7238static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
7239{
7240 int rd = extract32(insn, 0, 5);
7241 int rn = extract32(insn, 5, 5);
7242 int scale = extract32(insn, 10, 6);
7243 int opcode = extract32(insn, 16, 3);
7244 int rmode = extract32(insn, 19, 2);
7245 int type = extract32(insn, 22, 2);
7246 bool sbit = extract32(insn, 29, 1);
7247 bool sf = extract32(insn, 31, 1);
7248 bool itof;
7249
7250 if (sbit || (!sf && scale < 32)) {
7251 unallocated_encoding(s);
7252 return;
7253 }
7254
7255 switch (type) {
7256 case 0:
7257 case 1:
7258 break;
7259 case 3:
7260 if (dc_isar_feature(aa64_fp16, s)) {
7261 break;
7262 }
7263
7264 default:
7265 unallocated_encoding(s);
7266 return;
7267 }
7268
7269 switch ((rmode << 3) | opcode) {
7270 case 0x2:
7271 case 0x3:
7272 itof = true;
7273 break;
7274 case 0x18:
7275 case 0x19:
7276 itof = false;
7277 break;
7278 default:
7279 unallocated_encoding(s);
7280 return;
7281 }
7282
7283 if (!fp_access_check(s)) {
7284 return;
7285 }
7286
7287 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
7288}
7289
7290static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
7291{
7292
7293
7294
7295
7296 if (itof) {
7297 TCGv_i64 tcg_rn = cpu_reg(s, rn);
7298 TCGv_i64 tmp;
7299
7300 switch (type) {
7301 case 0:
7302
7303 tmp = tcg_temp_new_i64();
7304 tcg_gen_ext32u_i64(tmp, tcg_rn);
7305 write_fp_dreg(s, rd, tmp);
7306 tcg_temp_free_i64(tmp);
7307 break;
7308 case 1:
7309
7310 write_fp_dreg(s, rd, tcg_rn);
7311 break;
7312 case 2:
7313
7314 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
7315 clear_vec_high(s, true, rd);
7316 break;
7317 case 3:
7318
7319 tmp = tcg_temp_new_i64();
7320 tcg_gen_ext16u_i64(tmp, tcg_rn);
7321 write_fp_dreg(s, rd, tmp);
7322 tcg_temp_free_i64(tmp);
7323 break;
7324 default:
7325 g_assert_not_reached();
7326 }
7327 } else {
7328 TCGv_i64 tcg_rd = cpu_reg(s, rd);
7329
7330 switch (type) {
7331 case 0:
7332
7333 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
7334 break;
7335 case 1:
7336
7337 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
7338 break;
7339 case 2:
7340
7341 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
7342 break;
7343 case 3:
7344
7345 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
7346 break;
7347 default:
7348 g_assert_not_reached();
7349 }
7350 }
7351}
7352
7353static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
7354{
7355 TCGv_i64 t = read_fp_dreg(s, rn);
7356 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
7357
7358 gen_helper_fjcvtzs(t, t, fpstatus);
7359
7360 tcg_temp_free_ptr(fpstatus);
7361
7362 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
7363 tcg_gen_extrh_i64_i32(cpu_ZF, t);
7364 tcg_gen_movi_i32(cpu_CF, 0);
7365 tcg_gen_movi_i32(cpu_NF, 0);
7366 tcg_gen_movi_i32(cpu_VF, 0);
7367
7368 tcg_temp_free_i64(t);
7369}
7370
7371
7372
7373
7374
7375
7376
7377static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
7378{
7379 int rd = extract32(insn, 0, 5);
7380 int rn = extract32(insn, 5, 5);
7381 int opcode = extract32(insn, 16, 3);
7382 int rmode = extract32(insn, 19, 2);
7383 int type = extract32(insn, 22, 2);
7384 bool sbit = extract32(insn, 29, 1);
7385 bool sf = extract32(insn, 31, 1);
7386 bool itof = false;
7387
7388 if (sbit) {
7389 goto do_unallocated;
7390 }
7391
7392 switch (opcode) {
7393 case 2:
7394 case 3:
7395 itof = true;
7396
7397 case 4:
7398 case 5:
7399 if (rmode != 0) {
7400 goto do_unallocated;
7401 }
7402
7403 case 0:
7404 case 1:
7405 switch (type) {
7406 case 0:
7407 case 1:
7408 break;
7409 case 3:
7410 if (!dc_isar_feature(aa64_fp16, s)) {
7411 goto do_unallocated;
7412 }
7413 break;
7414 default:
7415 goto do_unallocated;
7416 }
7417 if (!fp_access_check(s)) {
7418 return;
7419 }
7420 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7421 break;
7422
7423 default:
7424 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7425 case 0b01100110:
7426 case 0b01100111:
7427 case 0b11100110:
7428 case 0b11100111:
7429 if (!dc_isar_feature(aa64_fp16, s)) {
7430 goto do_unallocated;
7431 }
7432
7433 case 0b00000110:
7434 case 0b00000111:
7435 case 0b10100110:
7436 case 0b10100111:
7437 case 0b11001110:
7438 case 0b11001111:
7439 if (!fp_access_check(s)) {
7440 return;
7441 }
7442 itof = opcode & 1;
7443 handle_fmov(s, rd, rn, type, itof);
7444 break;
7445
7446 case 0b00111110:
7447 if (!dc_isar_feature(aa64_jscvt, s)) {
7448 goto do_unallocated;
7449 } else if (fp_access_check(s)) {
7450 handle_fjcvtzs(s, rd, rn);
7451 }
7452 break;
7453
7454 default:
7455 do_unallocated:
7456 unallocated_encoding(s);
7457 return;
7458 }
7459 break;
7460 }
7461}
7462
7463
7464
7465
7466
7467
7468
7469static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7470{
7471 if (extract32(insn, 24, 1)) {
7472
7473 disas_fp_3src(s, insn);
7474 } else if (extract32(insn, 21, 1) == 0) {
7475
7476 disas_fp_fixed_conv(s, insn);
7477 } else {
7478 switch (extract32(insn, 10, 2)) {
7479 case 1:
7480
7481 disas_fp_ccomp(s, insn);
7482 break;
7483 case 2:
7484
7485 disas_fp_2src(s, insn);
7486 break;
7487 case 3:
7488
7489 disas_fp_csel(s, insn);
7490 break;
7491 case 0:
7492 switch (ctz32(extract32(insn, 12, 4))) {
7493 case 0:
7494
7495 disas_fp_imm(s, insn);
7496 break;
7497 case 1:
7498
7499 disas_fp_compare(s, insn);
7500 break;
7501 case 2:
7502
7503 disas_fp_1src(s, insn);
7504 break;
7505 case 3:
7506 unallocated_encoding(s);
7507 break;
7508 default:
7509
7510 disas_fp_int_conv(s, insn);
7511 break;
7512 }
7513 break;
7514 }
7515 }
7516}
7517
7518static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7519 int pos)
7520{
7521
7522
7523
7524
7525
7526
7527 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7528 assert(pos > 0 && pos < 64);
7529
7530 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7531 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7532 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7533
7534 tcg_temp_free_i64(tcg_tmp);
7535}
7536
7537
7538
7539
7540
7541
7542
7543static void disas_simd_ext(DisasContext *s, uint32_t insn)
7544{
7545 int is_q = extract32(insn, 30, 1);
7546 int op2 = extract32(insn, 22, 2);
7547 int imm4 = extract32(insn, 11, 4);
7548 int rm = extract32(insn, 16, 5);
7549 int rn = extract32(insn, 5, 5);
7550 int rd = extract32(insn, 0, 5);
7551 int pos = imm4 << 3;
7552 TCGv_i64 tcg_resl, tcg_resh;
7553
7554 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7555 unallocated_encoding(s);
7556 return;
7557 }
7558
7559 if (!fp_access_check(s)) {
7560 return;
7561 }
7562
7563 tcg_resh = tcg_temp_new_i64();
7564 tcg_resl = tcg_temp_new_i64();
7565
7566
7567
7568
7569
7570 if (!is_q) {
7571 read_vec_element(s, tcg_resl, rn, 0, MO_64);
7572 if (pos != 0) {
7573 read_vec_element(s, tcg_resh, rm, 0, MO_64);
7574 do_ext64(s, tcg_resh, tcg_resl, pos);
7575 }
7576 } else {
7577 TCGv_i64 tcg_hh;
7578 typedef struct {
7579 int reg;
7580 int elt;
7581 } EltPosns;
7582 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7583 EltPosns *elt = eltposns;
7584
7585 if (pos >= 64) {
7586 elt++;
7587 pos -= 64;
7588 }
7589
7590 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7591 elt++;
7592 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7593 elt++;
7594 if (pos != 0) {
7595 do_ext64(s, tcg_resh, tcg_resl, pos);
7596 tcg_hh = tcg_temp_new_i64();
7597 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7598 do_ext64(s, tcg_hh, tcg_resh, pos);
7599 tcg_temp_free_i64(tcg_hh);
7600 }
7601 }
7602
7603 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7604 tcg_temp_free_i64(tcg_resl);
7605 if (is_q) {
7606 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7607 }
7608 tcg_temp_free_i64(tcg_resh);
7609 clear_vec_high(s, is_q, rd);
7610}
7611
7612
7613
7614
7615
7616
7617
7618static void disas_simd_tb(DisasContext *s, uint32_t insn)
7619{
7620 int op2 = extract32(insn, 22, 2);
7621 int is_q = extract32(insn, 30, 1);
7622 int rm = extract32(insn, 16, 5);
7623 int rn = extract32(insn, 5, 5);
7624 int rd = extract32(insn, 0, 5);
7625 int is_tbx = extract32(insn, 12, 1);
7626 int len = (extract32(insn, 13, 2) + 1) * 16;
7627
7628 if (op2 != 0) {
7629 unallocated_encoding(s);
7630 return;
7631 }
7632
7633 if (!fp_access_check(s)) {
7634 return;
7635 }
7636
7637 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7638 vec_full_reg_offset(s, rm), cpu_env,
7639 is_q ? 16 : 8, vec_full_reg_size(s),
7640 (len << 6) | (is_tbx << 5) | rn,
7641 gen_helper_simd_tblx);
7642}
7643
7644
7645
7646
7647
7648
7649
7650static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7651{
7652 int rd = extract32(insn, 0, 5);
7653 int rn = extract32(insn, 5, 5);
7654 int rm = extract32(insn, 16, 5);
7655 int size = extract32(insn, 22, 2);
7656
7657
7658
7659 int opcode = extract32(insn, 12, 2);
7660 bool part = extract32(insn, 14, 1);
7661 bool is_q = extract32(insn, 30, 1);
7662 int esize = 8 << size;
7663 int i, ofs;
7664 int datasize = is_q ? 128 : 64;
7665 int elements = datasize / esize;
7666 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7667
7668 if (opcode == 0 || (size == 3 && !is_q)) {
7669 unallocated_encoding(s);
7670 return;
7671 }
7672
7673 if (!fp_access_check(s)) {
7674 return;
7675 }
7676
7677 tcg_resl = tcg_const_i64(0);
7678 tcg_resh = is_q ? tcg_const_i64(0) : NULL;
7679 tcg_res = tcg_temp_new_i64();
7680
7681 for (i = 0; i < elements; i++) {
7682 switch (opcode) {
7683 case 1:
7684 {
7685 int midpoint = elements / 2;
7686 if (i < midpoint) {
7687 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7688 } else {
7689 read_vec_element(s, tcg_res, rm,
7690 2 * (i - midpoint) + part, size);
7691 }
7692 break;
7693 }
7694 case 2:
7695 if (i & 1) {
7696 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7697 } else {
7698 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7699 }
7700 break;
7701 case 3:
7702 {
7703 int base = part * elements / 2;
7704 if (i & 1) {
7705 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7706 } else {
7707 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7708 }
7709 break;
7710 }
7711 default:
7712 g_assert_not_reached();
7713 }
7714
7715 ofs = i * esize;
7716 if (ofs < 64) {
7717 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7718 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7719 } else {
7720 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7721 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7722 }
7723 }
7724
7725 tcg_temp_free_i64(tcg_res);
7726
7727 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7728 tcg_temp_free_i64(tcg_resl);
7729
7730 if (is_q) {
7731 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7732 tcg_temp_free_i64(tcg_resh);
7733 }
7734 clear_vec_high(s, is_q, rd);
7735}
7736
7737
7738
7739
7740
7741
7742
7743
7744
7745
7746
7747static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7748 int esize, int size, int vmap, TCGv_ptr fpst)
7749{
7750 if (esize == size) {
7751 int element;
7752 MemOp msize = esize == 16 ? MO_16 : MO_32;
7753 TCGv_i32 tcg_elem;
7754
7755
7756 assert(ctpop8(vmap) == 1);
7757 element = ctz32(vmap);
7758 assert(element < 8);
7759
7760 tcg_elem = tcg_temp_new_i32();
7761 read_vec_element_i32(s, tcg_elem, rn, element, msize);
7762 return tcg_elem;
7763 } else {
7764 int bits = size / 2;
7765 int shift = ctpop8(vmap) / 2;
7766 int vmap_lo = (vmap >> shift) & vmap;
7767 int vmap_hi = (vmap & ~vmap_lo);
7768 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7769
7770 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7771 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7772 tcg_res = tcg_temp_new_i32();
7773
7774 switch (fpopcode) {
7775 case 0x0c:
7776 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7777 break;
7778 case 0x0f:
7779 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7780 break;
7781 case 0x1c:
7782 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7783 break;
7784 case 0x1f:
7785 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7786 break;
7787 case 0x2c:
7788 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7789 break;
7790 case 0x2f:
7791 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7792 break;
7793 case 0x3c:
7794 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7795 break;
7796 case 0x3f:
7797 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7798 break;
7799 default:
7800 g_assert_not_reached();
7801 }
7802
7803 tcg_temp_free_i32(tcg_hi);
7804 tcg_temp_free_i32(tcg_lo);
7805 return tcg_res;
7806 }
7807}
7808
7809
7810
7811
7812
7813
7814
7815static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7816{
7817 int rd = extract32(insn, 0, 5);
7818 int rn = extract32(insn, 5, 5);
7819 int size = extract32(insn, 22, 2);
7820 int opcode = extract32(insn, 12, 5);
7821 bool is_q = extract32(insn, 30, 1);
7822 bool is_u = extract32(insn, 29, 1);
7823 bool is_fp = false;
7824 bool is_min = false;
7825 int esize;
7826 int elements;
7827 int i;
7828 TCGv_i64 tcg_res, tcg_elt;
7829
7830 switch (opcode) {
7831 case 0x1b:
7832 if (is_u) {
7833 unallocated_encoding(s);
7834 return;
7835 }
7836
7837 case 0x3:
7838 case 0xa:
7839 case 0x1a:
7840 if (size == 3 || (size == 2 && !is_q)) {
7841 unallocated_encoding(s);
7842 return;
7843 }
7844 break;
7845 case 0xc:
7846 case 0xf:
7847
7848
7849
7850
7851
7852 is_min = extract32(size, 1, 1);
7853 is_fp = true;
7854 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7855 size = 1;
7856 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7857 unallocated_encoding(s);
7858 return;
7859 } else {
7860 size = 2;
7861 }
7862 break;
7863 default:
7864 unallocated_encoding(s);
7865 return;
7866 }
7867
7868 if (!fp_access_check(s)) {
7869 return;
7870 }
7871
7872 esize = 8 << size;
7873 elements = (is_q ? 128 : 64) / esize;
7874
7875 tcg_res = tcg_temp_new_i64();
7876 tcg_elt = tcg_temp_new_i64();
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889
7890 if (!is_fp) {
7891 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7892
7893 for (i = 1; i < elements; i++) {
7894 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7895
7896 switch (opcode) {
7897 case 0x03:
7898 case 0x1b:
7899 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7900 break;
7901 case 0x0a:
7902 if (is_u) {
7903 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7904 } else {
7905 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7906 }
7907 break;
7908 case 0x1a:
7909 if (is_u) {
7910 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7911 } else {
7912 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7913 }
7914 break;
7915 default:
7916 g_assert_not_reached();
7917 }
7918
7919 }
7920 } else {
7921
7922
7923
7924
7925
7926 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7927 int fpopcode = opcode | is_min << 4 | is_u << 5;
7928 int vmap = (1 << elements) - 1;
7929 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7930 (is_q ? 128 : 64), vmap, fpst);
7931 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7932 tcg_temp_free_i32(tcg_res32);
7933 tcg_temp_free_ptr(fpst);
7934 }
7935
7936 tcg_temp_free_i64(tcg_elt);
7937
7938
7939 if (opcode == 0x03) {
7940
7941 size++;
7942 }
7943
7944 switch (size) {
7945 case 0:
7946 tcg_gen_ext8u_i64(tcg_res, tcg_res);
7947 break;
7948 case 1:
7949 tcg_gen_ext16u_i64(tcg_res, tcg_res);
7950 break;
7951 case 2:
7952 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7953 break;
7954 case 3:
7955 break;
7956 default:
7957 g_assert_not_reached();
7958 }
7959
7960 write_fp_dreg(s, rd, tcg_res);
7961 tcg_temp_free_i64(tcg_res);
7962}
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7974 int imm5)
7975{
7976 int size = ctz32(imm5);
7977 int index;
7978
7979 if (size > 3 || (size == 3 && !is_q)) {
7980 unallocated_encoding(s);
7981 return;
7982 }
7983
7984 if (!fp_access_check(s)) {
7985 return;
7986 }
7987
7988 index = imm5 >> (size + 1);
7989 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7990 vec_reg_offset(s, rn, index, size),
7991 is_q ? 16 : 8, vec_full_reg_size(s));
7992}
7993
7994
7995
7996
7997
7998
7999
8000static void handle_simd_dupes(DisasContext *s, int rd, int rn,
8001 int imm5)
8002{
8003 int size = ctz32(imm5);
8004 int index;
8005 TCGv_i64 tmp;
8006
8007 if (size > 3) {
8008 unallocated_encoding(s);
8009 return;
8010 }
8011
8012 if (!fp_access_check(s)) {
8013 return;
8014 }
8015
8016 index = imm5 >> (size + 1);
8017
8018
8019
8020
8021 tmp = tcg_temp_new_i64();
8022 read_vec_element(s, tmp, rn, index, size);
8023 write_fp_dreg(s, rd, tmp);
8024 tcg_temp_free_i64(tmp);
8025}
8026
8027
8028
8029
8030
8031
8032
8033
8034
8035
8036static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
8037 int imm5)
8038{
8039 int size = ctz32(imm5);
8040 uint32_t dofs, oprsz, maxsz;
8041
8042 if (size > 3 || ((size == 3) && !is_q)) {
8043 unallocated_encoding(s);
8044 return;
8045 }
8046
8047 if (!fp_access_check(s)) {
8048 return;
8049 }
8050
8051 dofs = vec_full_reg_offset(s, rd);
8052 oprsz = is_q ? 16 : 8;
8053 maxsz = vec_full_reg_size(s);
8054
8055 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
8056}
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067
8068static void handle_simd_inse(DisasContext *s, int rd, int rn,
8069 int imm4, int imm5)
8070{
8071 int size = ctz32(imm5);
8072 int src_index, dst_index;
8073 TCGv_i64 tmp;
8074
8075 if (size > 3) {
8076 unallocated_encoding(s);
8077 return;
8078 }
8079
8080 if (!fp_access_check(s)) {
8081 return;
8082 }
8083
8084 dst_index = extract32(imm5, 1+size, 5);
8085 src_index = extract32(imm4, size, 4);
8086
8087 tmp = tcg_temp_new_i64();
8088
8089 read_vec_element(s, tmp, rn, src_index, size);
8090 write_vec_element(s, tmp, rd, dst_index, size);
8091
8092 tcg_temp_free_i64(tmp);
8093
8094
8095 clear_vec_high(s, true, rd);
8096}
8097
8098
8099
8100
8101
8102
8103
8104
8105
8106
8107
8108
8109static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
8110{
8111 int size = ctz32(imm5);
8112 int idx;
8113
8114 if (size > 3) {
8115 unallocated_encoding(s);
8116 return;
8117 }
8118
8119 if (!fp_access_check(s)) {
8120 return;
8121 }
8122
8123 idx = extract32(imm5, 1 + size, 4 - size);
8124 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
8125
8126
8127 clear_vec_high(s, true, rd);
8128}
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140
8141
8142static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
8143 int rn, int rd, int imm5)
8144{
8145 int size = ctz32(imm5);
8146 int element;
8147 TCGv_i64 tcg_rd;
8148
8149
8150 if (is_signed) {
8151 if (size > 2 || (size == 2 && !is_q)) {
8152 unallocated_encoding(s);
8153 return;
8154 }
8155 } else {
8156 if (size > 3
8157 || (size < 3 && is_q)
8158 || (size == 3 && !is_q)) {
8159 unallocated_encoding(s);
8160 return;
8161 }
8162 }
8163
8164 if (!fp_access_check(s)) {
8165 return;
8166 }
8167
8168 element = extract32(imm5, 1+size, 4);
8169
8170 tcg_rd = cpu_reg(s, rd);
8171 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
8172 if (is_signed && !is_q) {
8173 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
8174 }
8175}
8176
8177
8178
8179
8180
8181
8182
8183static void disas_simd_copy(DisasContext *s, uint32_t insn)
8184{
8185 int rd = extract32(insn, 0, 5);
8186 int rn = extract32(insn, 5, 5);
8187 int imm4 = extract32(insn, 11, 4);
8188 int op = extract32(insn, 29, 1);
8189 int is_q = extract32(insn, 30, 1);
8190 int imm5 = extract32(insn, 16, 5);
8191
8192 if (op) {
8193 if (is_q) {
8194
8195 handle_simd_inse(s, rd, rn, imm4, imm5);
8196 } else {
8197 unallocated_encoding(s);
8198 }
8199 } else {
8200 switch (imm4) {
8201 case 0:
8202
8203 handle_simd_dupe(s, is_q, rd, rn, imm5);
8204 break;
8205 case 1:
8206
8207 handle_simd_dupg(s, is_q, rd, rn, imm5);
8208 break;
8209 case 3:
8210 if (is_q) {
8211
8212 handle_simd_insg(s, rd, rn, imm5);
8213 } else {
8214 unallocated_encoding(s);
8215 }
8216 break;
8217 case 5:
8218 case 7:
8219
8220 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
8221 break;
8222 default:
8223 unallocated_encoding(s);
8224 break;
8225 }
8226 }
8227}
8228
8229
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239
8240
8241
8242
8243static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
8244{
8245 int rd = extract32(insn, 0, 5);
8246 int cmode = extract32(insn, 12, 4);
8247 int o2 = extract32(insn, 11, 1);
8248 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
8249 bool is_neg = extract32(insn, 29, 1);
8250 bool is_q = extract32(insn, 30, 1);
8251 uint64_t imm = 0;
8252
8253 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
8254
8255 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
8256 unallocated_encoding(s);
8257 return;
8258 }
8259 }
8260
8261 if (!fp_access_check(s)) {
8262 return;
8263 }
8264
8265 if (cmode == 15 && o2 && !is_neg) {
8266
8267 imm = vfp_expand_imm(MO_16, abcdefgh);
8268
8269 imm = dup_const(MO_16, imm);
8270 } else {
8271 imm = asimd_imm_const(abcdefgh, cmode, is_neg);
8272 }
8273
8274 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
8275
8276 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
8277 vec_full_reg_size(s), imm);
8278 } else {
8279
8280 if (is_neg) {
8281 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
8282 } else {
8283 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
8284 }
8285 }
8286}
8287
8288
8289
8290
8291
8292
8293
8294static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
8295{
8296 int rd = extract32(insn, 0, 5);
8297 int rn = extract32(insn, 5, 5);
8298 int imm4 = extract32(insn, 11, 4);
8299 int imm5 = extract32(insn, 16, 5);
8300 int op = extract32(insn, 29, 1);
8301
8302 if (op != 0 || imm4 != 0) {
8303 unallocated_encoding(s);
8304 return;
8305 }
8306
8307
8308 handle_simd_dupes(s, rd, rn, imm5);
8309}
8310
8311
8312
8313
8314
8315
8316
8317static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
8318{
8319 int u = extract32(insn, 29, 1);
8320 int size = extract32(insn, 22, 2);
8321 int opcode = extract32(insn, 12, 5);
8322 int rn = extract32(insn, 5, 5);
8323 int rd = extract32(insn, 0, 5);
8324 TCGv_ptr fpst;
8325
8326
8327
8328
8329
8330 opcode |= (extract32(size, 1, 1) << 5);
8331
8332 switch (opcode) {
8333 case 0x3b:
8334 if (u || size != 3) {
8335 unallocated_encoding(s);
8336 return;
8337 }
8338 if (!fp_access_check(s)) {
8339 return;
8340 }
8341
8342 fpst = NULL;
8343 break;
8344 case 0xc:
8345 case 0xd:
8346 case 0xf:
8347 case 0x2c:
8348 case 0x2f:
8349
8350 if (!u) {
8351 if (!dc_isar_feature(aa64_fp16, s)) {
8352 unallocated_encoding(s);
8353 return;
8354 } else {
8355 size = MO_16;
8356 }
8357 } else {
8358 size = extract32(size, 0, 1) ? MO_64 : MO_32;
8359 }
8360
8361 if (!fp_access_check(s)) {
8362 return;
8363 }
8364
8365 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8366 break;
8367 default:
8368 unallocated_encoding(s);
8369 return;
8370 }
8371
8372 if (size == MO_64) {
8373 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8374 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8375 TCGv_i64 tcg_res = tcg_temp_new_i64();
8376
8377 read_vec_element(s, tcg_op1, rn, 0, MO_64);
8378 read_vec_element(s, tcg_op2, rn, 1, MO_64);
8379
8380 switch (opcode) {
8381 case 0x3b:
8382 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
8383 break;
8384 case 0xc:
8385 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8386 break;
8387 case 0xd:
8388 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8389 break;
8390 case 0xf:
8391 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8392 break;
8393 case 0x2c:
8394 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8395 break;
8396 case 0x2f:
8397 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8398 break;
8399 default:
8400 g_assert_not_reached();
8401 }
8402
8403 write_fp_dreg(s, rd, tcg_res);
8404
8405 tcg_temp_free_i64(tcg_op1);
8406 tcg_temp_free_i64(tcg_op2);
8407 tcg_temp_free_i64(tcg_res);
8408 } else {
8409 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8410 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8411 TCGv_i32 tcg_res = tcg_temp_new_i32();
8412
8413 read_vec_element_i32(s, tcg_op1, rn, 0, size);
8414 read_vec_element_i32(s, tcg_op2, rn, 1, size);
8415
8416 if (size == MO_16) {
8417 switch (opcode) {
8418 case 0xc:
8419 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8420 break;
8421 case 0xd:
8422 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8423 break;
8424 case 0xf:
8425 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8426 break;
8427 case 0x2c:
8428 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8429 break;
8430 case 0x2f:
8431 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8432 break;
8433 default:
8434 g_assert_not_reached();
8435 }
8436 } else {
8437 switch (opcode) {
8438 case 0xc:
8439 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8440 break;
8441 case 0xd:
8442 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8443 break;
8444 case 0xf:
8445 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8446 break;
8447 case 0x2c:
8448 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8449 break;
8450 case 0x2f:
8451 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8452 break;
8453 default:
8454 g_assert_not_reached();
8455 }
8456 }
8457
8458 write_fp_sreg(s, rd, tcg_res);
8459
8460 tcg_temp_free_i32(tcg_op1);
8461 tcg_temp_free_i32(tcg_op2);
8462 tcg_temp_free_i32(tcg_res);
8463 }
8464
8465 if (fpst) {
8466 tcg_temp_free_ptr(fpst);
8467 }
8468}
8469
8470
8471
8472
8473
8474
8475
8476static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8477 TCGv_i64 tcg_rnd, bool accumulate,
8478 bool is_u, int size, int shift)
8479{
8480 bool extended_result = false;
8481 bool round = tcg_rnd != NULL;
8482 int ext_lshift = 0;
8483 TCGv_i64 tcg_src_hi;
8484
8485 if (round && size == 3) {
8486 extended_result = true;
8487 ext_lshift = 64 - shift;
8488 tcg_src_hi = tcg_temp_new_i64();
8489 } else if (shift == 64) {
8490 if (!accumulate && is_u) {
8491
8492 tcg_gen_movi_i64(tcg_res, 0);
8493 return;
8494 }
8495 }
8496
8497
8498 if (round) {
8499 if (extended_result) {
8500 TCGv_i64 tcg_zero = tcg_constant_i64(0);
8501 if (!is_u) {
8502
8503 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8504 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8505 tcg_src, tcg_src_hi,
8506 tcg_rnd, tcg_zero);
8507 } else {
8508 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8509 tcg_src, tcg_zero,
8510 tcg_rnd, tcg_zero);
8511 }
8512 } else {
8513 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8514 }
8515 }
8516
8517
8518 if (round && extended_result) {
8519
8520 if (ext_lshift == 0) {
8521
8522 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8523 } else {
8524 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8525 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8526 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8527 }
8528 } else {
8529 if (is_u) {
8530 if (shift == 64) {
8531
8532 tcg_gen_movi_i64(tcg_src, 0);
8533 } else {
8534 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8535 }
8536 } else {
8537 if (shift == 64) {
8538
8539 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8540 } else {
8541 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8542 }
8543 }
8544 }
8545
8546 if (accumulate) {
8547 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8548 } else {
8549 tcg_gen_mov_i64(tcg_res, tcg_src);
8550 }
8551
8552 if (extended_result) {
8553 tcg_temp_free_i64(tcg_src_hi);
8554 }
8555}
8556
8557
8558static void handle_scalar_simd_shri(DisasContext *s,
8559 bool is_u, int immh, int immb,
8560 int opcode, int rn, int rd)
8561{
8562 const int size = 3;
8563 int immhb = immh << 3 | immb;
8564 int shift = 2 * (8 << size) - immhb;
8565 bool accumulate = false;
8566 bool round = false;
8567 bool insert = false;
8568 TCGv_i64 tcg_rn;
8569 TCGv_i64 tcg_rd;
8570 TCGv_i64 tcg_round;
8571
8572 if (!extract32(immh, 3, 1)) {
8573 unallocated_encoding(s);
8574 return;
8575 }
8576
8577 if (!fp_access_check(s)) {
8578 return;
8579 }
8580
8581 switch (opcode) {
8582 case 0x02:
8583 accumulate = true;
8584 break;
8585 case 0x04:
8586 round = true;
8587 break;
8588 case 0x06:
8589 accumulate = round = true;
8590 break;
8591 case 0x08:
8592 insert = true;
8593 break;
8594 }
8595
8596 if (round) {
8597 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8598 } else {
8599 tcg_round = NULL;
8600 }
8601
8602 tcg_rn = read_fp_dreg(s, rn);
8603 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8604
8605 if (insert) {
8606
8607
8608
8609 int esize = 8 << size;
8610 if (shift != esize) {
8611 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8612 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8613 }
8614 } else {
8615 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8616 accumulate, is_u, size, shift);
8617 }
8618
8619 write_fp_dreg(s, rd, tcg_rd);
8620
8621 tcg_temp_free_i64(tcg_rn);
8622 tcg_temp_free_i64(tcg_rd);
8623}
8624
8625
8626static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8627 int immh, int immb, int opcode,
8628 int rn, int rd)
8629{
8630 int size = 32 - clz32(immh) - 1;
8631 int immhb = immh << 3 | immb;
8632 int shift = immhb - (8 << size);
8633 TCGv_i64 tcg_rn;
8634 TCGv_i64 tcg_rd;
8635
8636 if (!extract32(immh, 3, 1)) {
8637 unallocated_encoding(s);
8638 return;
8639 }
8640
8641 if (!fp_access_check(s)) {
8642 return;
8643 }
8644
8645 tcg_rn = read_fp_dreg(s, rn);
8646 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8647
8648 if (insert) {
8649 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8650 } else {
8651 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8652 }
8653
8654 write_fp_dreg(s, rd, tcg_rd);
8655
8656 tcg_temp_free_i64(tcg_rn);
8657 tcg_temp_free_i64(tcg_rd);
8658}
8659
8660
8661
8662static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8663 bool is_u_shift, bool is_u_narrow,
8664 int immh, int immb, int opcode,
8665 int rn, int rd)
8666{
8667 int immhb = immh << 3 | immb;
8668 int size = 32 - clz32(immh) - 1;
8669 int esize = 8 << size;
8670 int shift = (2 * esize) - immhb;
8671 int elements = is_scalar ? 1 : (64 / esize);
8672 bool round = extract32(opcode, 0, 1);
8673 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8674 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8675 TCGv_i32 tcg_rd_narrowed;
8676 TCGv_i64 tcg_final;
8677
8678 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8679 { gen_helper_neon_narrow_sat_s8,
8680 gen_helper_neon_unarrow_sat8 },
8681 { gen_helper_neon_narrow_sat_s16,
8682 gen_helper_neon_unarrow_sat16 },
8683 { gen_helper_neon_narrow_sat_s32,
8684 gen_helper_neon_unarrow_sat32 },
8685 { NULL, NULL },
8686 };
8687 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8688 gen_helper_neon_narrow_sat_u8,
8689 gen_helper_neon_narrow_sat_u16,
8690 gen_helper_neon_narrow_sat_u32,
8691 NULL
8692 };
8693 NeonGenNarrowEnvFn *narrowfn;
8694
8695 int i;
8696
8697 assert(size < 4);
8698
8699 if (extract32(immh, 3, 1)) {
8700 unallocated_encoding(s);
8701 return;
8702 }
8703
8704 if (!fp_access_check(s)) {
8705 return;
8706 }
8707
8708 if (is_u_shift) {
8709 narrowfn = unsigned_narrow_fns[size];
8710 } else {
8711 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8712 }
8713
8714 tcg_rn = tcg_temp_new_i64();
8715 tcg_rd = tcg_temp_new_i64();
8716 tcg_rd_narrowed = tcg_temp_new_i32();
8717 tcg_final = tcg_const_i64(0);
8718
8719 if (round) {
8720 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8721 } else {
8722 tcg_round = NULL;
8723 }
8724
8725 for (i = 0; i < elements; i++) {
8726 read_vec_element(s, tcg_rn, rn, i, ldop);
8727 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8728 false, is_u_shift, size+1, shift);
8729 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8730 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8731 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8732 }
8733
8734 if (!is_q) {
8735 write_vec_element(s, tcg_final, rd, 0, MO_64);
8736 } else {
8737 write_vec_element(s, tcg_final, rd, 1, MO_64);
8738 }
8739
8740 tcg_temp_free_i64(tcg_rn);
8741 tcg_temp_free_i64(tcg_rd);
8742 tcg_temp_free_i32(tcg_rd_narrowed);
8743 tcg_temp_free_i64(tcg_final);
8744
8745 clear_vec_high(s, is_q, rd);
8746}
8747
8748
8749static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8750 bool src_unsigned, bool dst_unsigned,
8751 int immh, int immb, int rn, int rd)
8752{
8753 int immhb = immh << 3 | immb;
8754 int size = 32 - clz32(immh) - 1;
8755 int shift = immhb - (8 << size);
8756 int pass;
8757
8758 assert(immh != 0);
8759 assert(!(scalar && is_q));
8760
8761 if (!scalar) {
8762 if (!is_q && extract32(immh, 3, 1)) {
8763 unallocated_encoding(s);
8764 return;
8765 }
8766
8767
8768
8769
8770
8771 switch (size) {
8772 case 0:
8773 shift |= shift << 8;
8774
8775 case 1:
8776 shift |= shift << 16;
8777 break;
8778 case 2:
8779 case 3:
8780 break;
8781 default:
8782 g_assert_not_reached();
8783 }
8784 }
8785
8786 if (!fp_access_check(s)) {
8787 return;
8788 }
8789
8790 if (size == 3) {
8791 TCGv_i64 tcg_shift = tcg_constant_i64(shift);
8792 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8793 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8794 { NULL, gen_helper_neon_qshl_u64 },
8795 };
8796 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8797 int maxpass = is_q ? 2 : 1;
8798
8799 for (pass = 0; pass < maxpass; pass++) {
8800 TCGv_i64 tcg_op = tcg_temp_new_i64();
8801
8802 read_vec_element(s, tcg_op, rn, pass, MO_64);
8803 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8804 write_vec_element(s, tcg_op, rd, pass, MO_64);
8805
8806 tcg_temp_free_i64(tcg_op);
8807 }
8808 clear_vec_high(s, is_q, rd);
8809 } else {
8810 TCGv_i32 tcg_shift = tcg_constant_i32(shift);
8811 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8812 {
8813 { gen_helper_neon_qshl_s8,
8814 gen_helper_neon_qshl_s16,
8815 gen_helper_neon_qshl_s32 },
8816 { gen_helper_neon_qshlu_s8,
8817 gen_helper_neon_qshlu_s16,
8818 gen_helper_neon_qshlu_s32 }
8819 }, {
8820 { NULL, NULL, NULL },
8821 { gen_helper_neon_qshl_u8,
8822 gen_helper_neon_qshl_u16,
8823 gen_helper_neon_qshl_u32 }
8824 }
8825 };
8826 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8827 MemOp memop = scalar ? size : MO_32;
8828 int maxpass = scalar ? 1 : is_q ? 4 : 2;
8829
8830 for (pass = 0; pass < maxpass; pass++) {
8831 TCGv_i32 tcg_op = tcg_temp_new_i32();
8832
8833 read_vec_element_i32(s, tcg_op, rn, pass, memop);
8834 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8835 if (scalar) {
8836 switch (size) {
8837 case 0:
8838 tcg_gen_ext8u_i32(tcg_op, tcg_op);
8839 break;
8840 case 1:
8841 tcg_gen_ext16u_i32(tcg_op, tcg_op);
8842 break;
8843 case 2:
8844 break;
8845 default:
8846 g_assert_not_reached();
8847 }
8848 write_fp_sreg(s, rd, tcg_op);
8849 } else {
8850 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8851 }
8852
8853 tcg_temp_free_i32(tcg_op);
8854 }
8855
8856 if (!scalar) {
8857 clear_vec_high(s, is_q, rd);
8858 }
8859 }
8860}
8861
8862
8863static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8864 int elements, int is_signed,
8865 int fracbits, int size)
8866{
8867 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8868 TCGv_i32 tcg_shift = NULL;
8869
8870 MemOp mop = size | (is_signed ? MO_SIGN : 0);
8871 int pass;
8872
8873 if (fracbits || size == MO_64) {
8874 tcg_shift = tcg_constant_i32(fracbits);
8875 }
8876
8877 if (size == MO_64) {
8878 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8879 TCGv_i64 tcg_double = tcg_temp_new_i64();
8880
8881 for (pass = 0; pass < elements; pass++) {
8882 read_vec_element(s, tcg_int64, rn, pass, mop);
8883
8884 if (is_signed) {
8885 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8886 tcg_shift, tcg_fpst);
8887 } else {
8888 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8889 tcg_shift, tcg_fpst);
8890 }
8891 if (elements == 1) {
8892 write_fp_dreg(s, rd, tcg_double);
8893 } else {
8894 write_vec_element(s, tcg_double, rd, pass, MO_64);
8895 }
8896 }
8897
8898 tcg_temp_free_i64(tcg_int64);
8899 tcg_temp_free_i64(tcg_double);
8900
8901 } else {
8902 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8903 TCGv_i32 tcg_float = tcg_temp_new_i32();
8904
8905 for (pass = 0; pass < elements; pass++) {
8906 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8907
8908 switch (size) {
8909 case MO_32:
8910 if (fracbits) {
8911 if (is_signed) {
8912 gen_helper_vfp_sltos(tcg_float, tcg_int32,
8913 tcg_shift, tcg_fpst);
8914 } else {
8915 gen_helper_vfp_ultos(tcg_float, tcg_int32,
8916 tcg_shift, tcg_fpst);
8917 }
8918 } else {
8919 if (is_signed) {
8920 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8921 } else {
8922 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8923 }
8924 }
8925 break;
8926 case MO_16:
8927 if (fracbits) {
8928 if (is_signed) {
8929 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8930 tcg_shift, tcg_fpst);
8931 } else {
8932 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8933 tcg_shift, tcg_fpst);
8934 }
8935 } else {
8936 if (is_signed) {
8937 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8938 } else {
8939 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8940 }
8941 }
8942 break;
8943 default:
8944 g_assert_not_reached();
8945 }
8946
8947 if (elements == 1) {
8948 write_fp_sreg(s, rd, tcg_float);
8949 } else {
8950 write_vec_element_i32(s, tcg_float, rd, pass, size);
8951 }
8952 }
8953
8954 tcg_temp_free_i32(tcg_int32);
8955 tcg_temp_free_i32(tcg_float);
8956 }
8957
8958 tcg_temp_free_ptr(tcg_fpst);
8959
8960 clear_vec_high(s, elements << size == 16, rd);
8961}
8962
8963
8964static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8965 bool is_q, bool is_u,
8966 int immh, int immb, int opcode,
8967 int rn, int rd)
8968{
8969 int size, elements, fracbits;
8970 int immhb = immh << 3 | immb;
8971
8972 if (immh & 8) {
8973 size = MO_64;
8974 if (!is_scalar && !is_q) {
8975 unallocated_encoding(s);
8976 return;
8977 }
8978 } else if (immh & 4) {
8979 size = MO_32;
8980 } else if (immh & 2) {
8981 size = MO_16;
8982 if (!dc_isar_feature(aa64_fp16, s)) {
8983 unallocated_encoding(s);
8984 return;
8985 }
8986 } else {
8987
8988 g_assert(immh == 1);
8989 unallocated_encoding(s);
8990 return;
8991 }
8992
8993 if (is_scalar) {
8994 elements = 1;
8995 } else {
8996 elements = (8 << is_q) >> size;
8997 }
8998 fracbits = (16 << size) - immhb;
8999
9000 if (!fp_access_check(s)) {
9001 return;
9002 }
9003
9004 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
9005}
9006
9007
9008static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
9009 bool is_q, bool is_u,
9010 int immh, int immb, int rn, int rd)
9011{
9012 int immhb = immh << 3 | immb;
9013 int pass, size, fracbits;
9014 TCGv_ptr tcg_fpstatus;
9015 TCGv_i32 tcg_rmode, tcg_shift;
9016
9017 if (immh & 0x8) {
9018 size = MO_64;
9019 if (!is_scalar && !is_q) {
9020 unallocated_encoding(s);
9021 return;
9022 }
9023 } else if (immh & 0x4) {
9024 size = MO_32;
9025 } else if (immh & 0x2) {
9026 size = MO_16;
9027 if (!dc_isar_feature(aa64_fp16, s)) {
9028 unallocated_encoding(s);
9029 return;
9030 }
9031 } else {
9032
9033 assert(immh == 1);
9034 unallocated_encoding(s);
9035 return;
9036 }
9037
9038 if (!fp_access_check(s)) {
9039 return;
9040 }
9041
9042 assert(!(is_scalar && is_q));
9043
9044 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
9045 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9046 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9047 fracbits = (16 << size) - immhb;
9048 tcg_shift = tcg_constant_i32(fracbits);
9049
9050 if (size == MO_64) {
9051 int maxpass = is_scalar ? 1 : 2;
9052
9053 for (pass = 0; pass < maxpass; pass++) {
9054 TCGv_i64 tcg_op = tcg_temp_new_i64();
9055
9056 read_vec_element(s, tcg_op, rn, pass, MO_64);
9057 if (is_u) {
9058 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9059 } else {
9060 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9061 }
9062 write_vec_element(s, tcg_op, rd, pass, MO_64);
9063 tcg_temp_free_i64(tcg_op);
9064 }
9065 clear_vec_high(s, is_q, rd);
9066 } else {
9067 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
9068 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
9069
9070 switch (size) {
9071 case MO_16:
9072 if (is_u) {
9073 fn = gen_helper_vfp_touhh;
9074 } else {
9075 fn = gen_helper_vfp_toshh;
9076 }
9077 break;
9078 case MO_32:
9079 if (is_u) {
9080 fn = gen_helper_vfp_touls;
9081 } else {
9082 fn = gen_helper_vfp_tosls;
9083 }
9084 break;
9085 default:
9086 g_assert_not_reached();
9087 }
9088
9089 for (pass = 0; pass < maxpass; pass++) {
9090 TCGv_i32 tcg_op = tcg_temp_new_i32();
9091
9092 read_vec_element_i32(s, tcg_op, rn, pass, size);
9093 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9094 if (is_scalar) {
9095 write_fp_sreg(s, rd, tcg_op);
9096 } else {
9097 write_vec_element_i32(s, tcg_op, rd, pass, size);
9098 }
9099 tcg_temp_free_i32(tcg_op);
9100 }
9101 if (!is_scalar) {
9102 clear_vec_high(s, is_q, rd);
9103 }
9104 }
9105
9106 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9107 tcg_temp_free_ptr(tcg_fpstatus);
9108 tcg_temp_free_i32(tcg_rmode);
9109}
9110
9111
9112
9113
9114
9115
9116
9117
9118
9119static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
9120{
9121 int rd = extract32(insn, 0, 5);
9122 int rn = extract32(insn, 5, 5);
9123 int opcode = extract32(insn, 11, 5);
9124 int immb = extract32(insn, 16, 3);
9125 int immh = extract32(insn, 19, 4);
9126 bool is_u = extract32(insn, 29, 1);
9127
9128 if (immh == 0) {
9129 unallocated_encoding(s);
9130 return;
9131 }
9132
9133 switch (opcode) {
9134 case 0x08:
9135 if (!is_u) {
9136 unallocated_encoding(s);
9137 return;
9138 }
9139
9140 case 0x00:
9141 case 0x02:
9142 case 0x04:
9143 case 0x06:
9144 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
9145 break;
9146 case 0x0a:
9147 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
9148 break;
9149 case 0x1c:
9150 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
9151 opcode, rn, rd);
9152 break;
9153 case 0x10:
9154 case 0x11:
9155 if (!is_u) {
9156 unallocated_encoding(s);
9157 return;
9158 }
9159 handle_vec_simd_sqshrn(s, true, false, false, true,
9160 immh, immb, opcode, rn, rd);
9161 break;
9162 case 0x12:
9163 case 0x13:
9164 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
9165 immh, immb, opcode, rn, rd);
9166 break;
9167 case 0xc:
9168 if (!is_u) {
9169 unallocated_encoding(s);
9170 return;
9171 }
9172 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
9173 break;
9174 case 0xe:
9175 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
9176 break;
9177 case 0x1f:
9178 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
9179 break;
9180 default:
9181 unallocated_encoding(s);
9182 break;
9183 }
9184}
9185
9186
9187
9188
9189
9190
9191
9192static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
9193{
9194 bool is_u = extract32(insn, 29, 1);
9195 int size = extract32(insn, 22, 2);
9196 int opcode = extract32(insn, 12, 4);
9197 int rm = extract32(insn, 16, 5);
9198 int rn = extract32(insn, 5, 5);
9199 int rd = extract32(insn, 0, 5);
9200
9201 if (is_u) {
9202 unallocated_encoding(s);
9203 return;
9204 }
9205
9206 switch (opcode) {
9207 case 0x9:
9208 case 0xb:
9209 case 0xd:
9210 if (size == 0 || size == 3) {
9211 unallocated_encoding(s);
9212 return;
9213 }
9214 break;
9215 default:
9216 unallocated_encoding(s);
9217 return;
9218 }
9219
9220 if (!fp_access_check(s)) {
9221 return;
9222 }
9223
9224 if (size == 2) {
9225 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9226 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9227 TCGv_i64 tcg_res = tcg_temp_new_i64();
9228
9229 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
9230 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
9231
9232 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
9233 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
9234
9235 switch (opcode) {
9236 case 0xd:
9237 break;
9238 case 0xb:
9239 tcg_gen_neg_i64(tcg_res, tcg_res);
9240
9241 case 0x9:
9242 read_vec_element(s, tcg_op1, rd, 0, MO_64);
9243 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
9244 tcg_res, tcg_op1);
9245 break;
9246 default:
9247 g_assert_not_reached();
9248 }
9249
9250 write_fp_dreg(s, rd, tcg_res);
9251
9252 tcg_temp_free_i64(tcg_op1);
9253 tcg_temp_free_i64(tcg_op2);
9254 tcg_temp_free_i64(tcg_res);
9255 } else {
9256 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
9257 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
9258 TCGv_i64 tcg_res = tcg_temp_new_i64();
9259
9260 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
9261 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
9262
9263 switch (opcode) {
9264 case 0xd:
9265 break;
9266 case 0xb:
9267 gen_helper_neon_negl_u32(tcg_res, tcg_res);
9268
9269 case 0x9:
9270 {
9271 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
9272 read_vec_element(s, tcg_op3, rd, 0, MO_32);
9273 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
9274 tcg_res, tcg_op3);
9275 tcg_temp_free_i64(tcg_op3);
9276 break;
9277 }
9278 default:
9279 g_assert_not_reached();
9280 }
9281
9282 tcg_gen_ext32u_i64(tcg_res, tcg_res);
9283 write_fp_dreg(s, rd, tcg_res);
9284
9285 tcg_temp_free_i32(tcg_op1);
9286 tcg_temp_free_i32(tcg_op2);
9287 tcg_temp_free_i64(tcg_res);
9288 }
9289}
9290
9291static void handle_3same_64(DisasContext *s, int opcode, bool u,
9292 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
9293{
9294
9295
9296
9297
9298
9299 TCGCond cond;
9300
9301 switch (opcode) {
9302 case 0x1:
9303 if (u) {
9304 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9305 } else {
9306 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9307 }
9308 break;
9309 case 0x5:
9310 if (u) {
9311 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9312 } else {
9313 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9314 }
9315 break;
9316 case 0x6:
9317
9318
9319
9320 cond = u ? TCG_COND_GTU : TCG_COND_GT;
9321 do_cmop:
9322 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
9323 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9324 break;
9325 case 0x7:
9326 cond = u ? TCG_COND_GEU : TCG_COND_GE;
9327 goto do_cmop;
9328 case 0x11:
9329 if (u) {
9330 cond = TCG_COND_EQ;
9331 goto do_cmop;
9332 }
9333 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
9334 break;
9335 case 0x8:
9336 if (u) {
9337 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
9338 } else {
9339 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
9340 }
9341 break;
9342 case 0x9:
9343 if (u) {
9344 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9345 } else {
9346 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9347 }
9348 break;
9349 case 0xa:
9350 if (u) {
9351 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
9352 } else {
9353 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
9354 }
9355 break;
9356 case 0xb:
9357 if (u) {
9358 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9359 } else {
9360 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9361 }
9362 break;
9363 case 0x10:
9364 if (u) {
9365 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
9366 } else {
9367 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
9368 }
9369 break;
9370 default:
9371 g_assert_not_reached();
9372 }
9373}
9374
9375
9376
9377
9378
9379static void handle_3same_float(DisasContext *s, int size, int elements,
9380 int fpopcode, int rd, int rn, int rm)
9381{
9382 int pass;
9383 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9384
9385 for (pass = 0; pass < elements; pass++) {
9386 if (size) {
9387
9388 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9389 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9390 TCGv_i64 tcg_res = tcg_temp_new_i64();
9391
9392 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9393 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9394
9395 switch (fpopcode) {
9396 case 0x39:
9397
9398 gen_helper_vfp_negd(tcg_op1, tcg_op1);
9399
9400 case 0x19:
9401 read_vec_element(s, tcg_res, rd, pass, MO_64);
9402 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
9403 tcg_res, fpst);
9404 break;
9405 case 0x18:
9406 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9407 break;
9408 case 0x1a:
9409 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
9410 break;
9411 case 0x1b:
9412 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
9413 break;
9414 case 0x1c:
9415 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9416 break;
9417 case 0x1e:
9418 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
9419 break;
9420 case 0x1f:
9421 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9422 break;
9423 case 0x38:
9424 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9425 break;
9426 case 0x3a:
9427 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9428 break;
9429 case 0x3e:
9430 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
9431 break;
9432 case 0x3f:
9433 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9434 break;
9435 case 0x5b:
9436 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
9437 break;
9438 case 0x5c:
9439 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9440 break;
9441 case 0x5d:
9442 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9443 break;
9444 case 0x5f:
9445 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
9446 break;
9447 case 0x7a:
9448 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9449 gen_helper_vfp_absd(tcg_res, tcg_res);
9450 break;
9451 case 0x7c:
9452 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9453 break;
9454 case 0x7d:
9455 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9456 break;
9457 default:
9458 g_assert_not_reached();
9459 }
9460
9461 write_vec_element(s, tcg_res, rd, pass, MO_64);
9462
9463 tcg_temp_free_i64(tcg_res);
9464 tcg_temp_free_i64(tcg_op1);
9465 tcg_temp_free_i64(tcg_op2);
9466 } else {
9467
9468 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9469 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9470 TCGv_i32 tcg_res = tcg_temp_new_i32();
9471
9472 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9473 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9474
9475 switch (fpopcode) {
9476 case 0x39:
9477
9478 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9479
9480 case 0x19:
9481 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9482 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9483 tcg_res, fpst);
9484 break;
9485 case 0x1a:
9486 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9487 break;
9488 case 0x1b:
9489 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9490 break;
9491 case 0x1c:
9492 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9493 break;
9494 case 0x1e:
9495 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9496 break;
9497 case 0x1f:
9498 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9499 break;
9500 case 0x18:
9501 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9502 break;
9503 case 0x38:
9504 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9505 break;
9506 case 0x3a:
9507 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9508 break;
9509 case 0x3e:
9510 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9511 break;
9512 case 0x3f:
9513 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9514 break;
9515 case 0x5b:
9516 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9517 break;
9518 case 0x5c:
9519 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9520 break;
9521 case 0x5d:
9522 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9523 break;
9524 case 0x5f:
9525 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9526 break;
9527 case 0x7a:
9528 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9529 gen_helper_vfp_abss(tcg_res, tcg_res);
9530 break;
9531 case 0x7c:
9532 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9533 break;
9534 case 0x7d:
9535 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9536 break;
9537 default:
9538 g_assert_not_reached();
9539 }
9540
9541 if (elements == 1) {
9542
9543 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9544
9545 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9546 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9547 tcg_temp_free_i64(tcg_tmp);
9548 } else {
9549 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9550 }
9551
9552 tcg_temp_free_i32(tcg_res);
9553 tcg_temp_free_i32(tcg_op1);
9554 tcg_temp_free_i32(tcg_op2);
9555 }
9556 }
9557
9558 tcg_temp_free_ptr(fpst);
9559
9560 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9561}
9562
9563
9564
9565
9566
9567
9568
9569static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9570{
9571 int rd = extract32(insn, 0, 5);
9572 int rn = extract32(insn, 5, 5);
9573 int opcode = extract32(insn, 11, 5);
9574 int rm = extract32(insn, 16, 5);
9575 int size = extract32(insn, 22, 2);
9576 bool u = extract32(insn, 29, 1);
9577 TCGv_i64 tcg_rd;
9578
9579 if (opcode >= 0x18) {
9580
9581 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9582 switch (fpopcode) {
9583 case 0x1b:
9584 case 0x1f:
9585 case 0x3f:
9586 case 0x5d:
9587 case 0x7d:
9588 case 0x1c:
9589 case 0x5c:
9590 case 0x7c:
9591 case 0x7a:
9592 break;
9593 default:
9594 unallocated_encoding(s);
9595 return;
9596 }
9597
9598 if (!fp_access_check(s)) {
9599 return;
9600 }
9601
9602 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9603 return;
9604 }
9605
9606 switch (opcode) {
9607 case 0x1:
9608 case 0x5:
9609 case 0x9:
9610 case 0xb:
9611 break;
9612 case 0x8:
9613 case 0xa:
9614 case 0x6:
9615 case 0x7:
9616 case 0x11:
9617 case 0x10:
9618 if (size != 3) {
9619 unallocated_encoding(s);
9620 return;
9621 }
9622 break;
9623 case 0x16:
9624 if (size != 1 && size != 2) {
9625 unallocated_encoding(s);
9626 return;
9627 }
9628 break;
9629 default:
9630 unallocated_encoding(s);
9631 return;
9632 }
9633
9634 if (!fp_access_check(s)) {
9635 return;
9636 }
9637
9638 tcg_rd = tcg_temp_new_i64();
9639
9640 if (size == 3) {
9641 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9642 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9643
9644 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9645 tcg_temp_free_i64(tcg_rn);
9646 tcg_temp_free_i64(tcg_rm);
9647 } else {
9648
9649
9650
9651
9652
9653
9654 NeonGenTwoOpEnvFn *genenvfn;
9655 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9656 TCGv_i32 tcg_rm = tcg_temp_new_i32();
9657 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9658
9659 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9660 read_vec_element_i32(s, tcg_rm, rm, 0, size);
9661
9662 switch (opcode) {
9663 case 0x1:
9664 {
9665 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9666 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9667 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9668 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9669 };
9670 genenvfn = fns[size][u];
9671 break;
9672 }
9673 case 0x5:
9674 {
9675 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9676 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9677 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9678 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9679 };
9680 genenvfn = fns[size][u];
9681 break;
9682 }
9683 case 0x9:
9684 {
9685 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9686 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9687 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9688 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9689 };
9690 genenvfn = fns[size][u];
9691 break;
9692 }
9693 case 0xb:
9694 {
9695 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9696 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9697 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9698 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9699 };
9700 genenvfn = fns[size][u];
9701 break;
9702 }
9703 case 0x16:
9704 {
9705 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9706 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9707 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9708 };
9709 assert(size == 1 || size == 2);
9710 genenvfn = fns[size - 1][u];
9711 break;
9712 }
9713 default:
9714 g_assert_not_reached();
9715 }
9716
9717 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9718 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9719 tcg_temp_free_i32(tcg_rd32);
9720 tcg_temp_free_i32(tcg_rn);
9721 tcg_temp_free_i32(tcg_rm);
9722 }
9723
9724 write_fp_dreg(s, rd, tcg_rd);
9725
9726 tcg_temp_free_i64(tcg_rd);
9727}
9728
9729
9730
9731
9732
9733
9734
9735
9736
9737static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9738 uint32_t insn)
9739{
9740 int rd = extract32(insn, 0, 5);
9741 int rn = extract32(insn, 5, 5);
9742 int opcode = extract32(insn, 11, 3);
9743 int rm = extract32(insn, 16, 5);
9744 bool u = extract32(insn, 29, 1);
9745 bool a = extract32(insn, 23, 1);
9746 int fpopcode = opcode | (a << 3) | (u << 4);
9747 TCGv_ptr fpst;
9748 TCGv_i32 tcg_op1;
9749 TCGv_i32 tcg_op2;
9750 TCGv_i32 tcg_res;
9751
9752 switch (fpopcode) {
9753 case 0x03:
9754 case 0x04:
9755 case 0x07:
9756 case 0x0f:
9757 case 0x14:
9758 case 0x15:
9759 case 0x1a:
9760 case 0x1c:
9761 case 0x1d:
9762 break;
9763 default:
9764 unallocated_encoding(s);
9765 return;
9766 }
9767
9768 if (!dc_isar_feature(aa64_fp16, s)) {
9769 unallocated_encoding(s);
9770 }
9771
9772 if (!fp_access_check(s)) {
9773 return;
9774 }
9775
9776 fpst = fpstatus_ptr(FPST_FPCR_F16);
9777
9778 tcg_op1 = read_fp_hreg(s, rn);
9779 tcg_op2 = read_fp_hreg(s, rm);
9780 tcg_res = tcg_temp_new_i32();
9781
9782 switch (fpopcode) {
9783 case 0x03:
9784 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9785 break;
9786 case 0x04:
9787 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9788 break;
9789 case 0x07:
9790 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9791 break;
9792 case 0x0f:
9793 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9794 break;
9795 case 0x14:
9796 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9797 break;
9798 case 0x15:
9799 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9800 break;
9801 case 0x1a:
9802 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9803 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9804 break;
9805 case 0x1c:
9806 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9807 break;
9808 case 0x1d:
9809 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9810 break;
9811 default:
9812 g_assert_not_reached();
9813 }
9814
9815 write_fp_sreg(s, rd, tcg_res);
9816
9817
9818 tcg_temp_free_i32(tcg_res);
9819 tcg_temp_free_i32(tcg_op1);
9820 tcg_temp_free_i32(tcg_op2);
9821 tcg_temp_free_ptr(fpst);
9822}
9823
9824
9825
9826
9827
9828
9829
9830static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9831 uint32_t insn)
9832{
9833 int rd = extract32(insn, 0, 5);
9834 int rn = extract32(insn, 5, 5);
9835 int opcode = extract32(insn, 11, 4);
9836 int rm = extract32(insn, 16, 5);
9837 int size = extract32(insn, 22, 2);
9838 bool u = extract32(insn, 29, 1);
9839 TCGv_i32 ele1, ele2, ele3;
9840 TCGv_i64 res;
9841 bool feature;
9842
9843 switch (u * 16 + opcode) {
9844 case 0x10:
9845 case 0x11:
9846 if (size != 1 && size != 2) {
9847 unallocated_encoding(s);
9848 return;
9849 }
9850 feature = dc_isar_feature(aa64_rdm, s);
9851 break;
9852 default:
9853 unallocated_encoding(s);
9854 return;
9855 }
9856 if (!feature) {
9857 unallocated_encoding(s);
9858 return;
9859 }
9860 if (!fp_access_check(s)) {
9861 return;
9862 }
9863
9864
9865
9866
9867
9868
9869
9870 ele1 = tcg_temp_new_i32();
9871 ele2 = tcg_temp_new_i32();
9872 ele3 = tcg_temp_new_i32();
9873
9874 read_vec_element_i32(s, ele1, rn, 0, size);
9875 read_vec_element_i32(s, ele2, rm, 0, size);
9876 read_vec_element_i32(s, ele3, rd, 0, size);
9877
9878 switch (opcode) {
9879 case 0x0:
9880 if (size == 1) {
9881 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9882 } else {
9883 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9884 }
9885 break;
9886 case 0x1:
9887 if (size == 1) {
9888 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9889 } else {
9890 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9891 }
9892 break;
9893 default:
9894 g_assert_not_reached();
9895 }
9896 tcg_temp_free_i32(ele1);
9897 tcg_temp_free_i32(ele2);
9898
9899 res = tcg_temp_new_i64();
9900 tcg_gen_extu_i32_i64(res, ele3);
9901 tcg_temp_free_i32(ele3);
9902
9903 write_fp_dreg(s, rd, res);
9904 tcg_temp_free_i64(res);
9905}
9906
9907static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9908 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9909 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9910{
9911
9912
9913
9914
9915
9916
9917 TCGCond cond;
9918
9919 switch (opcode) {
9920 case 0x4:
9921 if (u) {
9922 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9923 } else {
9924 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9925 }
9926 break;
9927 case 0x5:
9928
9929
9930
9931 tcg_gen_not_i64(tcg_rd, tcg_rn);
9932 break;
9933 case 0x7:
9934 if (u) {
9935 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9936 } else {
9937 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9938 }
9939 break;
9940 case 0xa:
9941
9942
9943
9944
9945 cond = TCG_COND_LT;
9946 do_cmop:
9947 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9948 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9949 break;
9950 case 0x8:
9951 cond = u ? TCG_COND_GE : TCG_COND_GT;
9952 goto do_cmop;
9953 case 0x9:
9954 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9955 goto do_cmop;
9956 case 0xb:
9957 if (u) {
9958 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9959 } else {
9960 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9961 }
9962 break;
9963 case 0x2f:
9964 gen_helper_vfp_absd(tcg_rd, tcg_rn);
9965 break;
9966 case 0x6f:
9967 gen_helper_vfp_negd(tcg_rd, tcg_rn);
9968 break;
9969 case 0x7f:
9970 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9971 break;
9972 case 0x1a:
9973 case 0x1b:
9974 case 0x1c:
9975 case 0x3a:
9976 case 0x3b:
9977 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9978 break;
9979 case 0x5a:
9980 case 0x5b:
9981 case 0x5c:
9982 case 0x7a:
9983 case 0x7b:
9984 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9985 break;
9986 case 0x18:
9987 case 0x19:
9988 case 0x38:
9989 case 0x39:
9990 case 0x58:
9991 case 0x79:
9992 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9993 break;
9994 case 0x59:
9995 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9996 break;
9997 case 0x1e:
9998 case 0x5e:
9999 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
10000 break;
10001 case 0x1f:
10002 case 0x5f:
10003 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
10004 break;
10005 default:
10006 g_assert_not_reached();
10007 }
10008}
10009
10010static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
10011 bool is_scalar, bool is_u, bool is_q,
10012 int size, int rn, int rd)
10013{
10014 bool is_double = (size == MO_64);
10015 TCGv_ptr fpst;
10016
10017 if (!fp_access_check(s)) {
10018 return;
10019 }
10020
10021 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
10022
10023 if (is_double) {
10024 TCGv_i64 tcg_op = tcg_temp_new_i64();
10025 TCGv_i64 tcg_zero = tcg_constant_i64(0);
10026 TCGv_i64 tcg_res = tcg_temp_new_i64();
10027 NeonGenTwoDoubleOpFn *genfn;
10028 bool swap = false;
10029 int pass;
10030
10031 switch (opcode) {
10032 case 0x2e:
10033 swap = true;
10034
10035 case 0x2c:
10036 genfn = gen_helper_neon_cgt_f64;
10037 break;
10038 case 0x2d:
10039 genfn = gen_helper_neon_ceq_f64;
10040 break;
10041 case 0x6d:
10042 swap = true;
10043
10044 case 0x6c:
10045 genfn = gen_helper_neon_cge_f64;
10046 break;
10047 default:
10048 g_assert_not_reached();
10049 }
10050
10051 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10052 read_vec_element(s, tcg_op, rn, pass, MO_64);
10053 if (swap) {
10054 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10055 } else {
10056 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10057 }
10058 write_vec_element(s, tcg_res, rd, pass, MO_64);
10059 }
10060 tcg_temp_free_i64(tcg_res);
10061 tcg_temp_free_i64(tcg_op);
10062
10063 clear_vec_high(s, !is_scalar, rd);
10064 } else {
10065 TCGv_i32 tcg_op = tcg_temp_new_i32();
10066 TCGv_i32 tcg_zero = tcg_constant_i32(0);
10067 TCGv_i32 tcg_res = tcg_temp_new_i32();
10068 NeonGenTwoSingleOpFn *genfn;
10069 bool swap = false;
10070 int pass, maxpasses;
10071
10072 if (size == MO_16) {
10073 switch (opcode) {
10074 case 0x2e:
10075 swap = true;
10076
10077 case 0x2c:
10078 genfn = gen_helper_advsimd_cgt_f16;
10079 break;
10080 case 0x2d:
10081 genfn = gen_helper_advsimd_ceq_f16;
10082 break;
10083 case 0x6d:
10084 swap = true;
10085
10086 case 0x6c:
10087 genfn = gen_helper_advsimd_cge_f16;
10088 break;
10089 default:
10090 g_assert_not_reached();
10091 }
10092 } else {
10093 switch (opcode) {
10094 case 0x2e:
10095 swap = true;
10096
10097 case 0x2c:
10098 genfn = gen_helper_neon_cgt_f32;
10099 break;
10100 case 0x2d:
10101 genfn = gen_helper_neon_ceq_f32;
10102 break;
10103 case 0x6d:
10104 swap = true;
10105
10106 case 0x6c:
10107 genfn = gen_helper_neon_cge_f32;
10108 break;
10109 default:
10110 g_assert_not_reached();
10111 }
10112 }
10113
10114 if (is_scalar) {
10115 maxpasses = 1;
10116 } else {
10117 int vector_size = 8 << is_q;
10118 maxpasses = vector_size >> size;
10119 }
10120
10121 for (pass = 0; pass < maxpasses; pass++) {
10122 read_vec_element_i32(s, tcg_op, rn, pass, size);
10123 if (swap) {
10124 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10125 } else {
10126 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10127 }
10128 if (is_scalar) {
10129 write_fp_sreg(s, rd, tcg_res);
10130 } else {
10131 write_vec_element_i32(s, tcg_res, rd, pass, size);
10132 }
10133 }
10134 tcg_temp_free_i32(tcg_res);
10135 tcg_temp_free_i32(tcg_op);
10136 if (!is_scalar) {
10137 clear_vec_high(s, is_q, rd);
10138 }
10139 }
10140
10141 tcg_temp_free_ptr(fpst);
10142}
10143
10144static void handle_2misc_reciprocal(DisasContext *s, int opcode,
10145 bool is_scalar, bool is_u, bool is_q,
10146 int size, int rn, int rd)
10147{
10148 bool is_double = (size == 3);
10149 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10150
10151 if (is_double) {
10152 TCGv_i64 tcg_op = tcg_temp_new_i64();
10153 TCGv_i64 tcg_res = tcg_temp_new_i64();
10154 int pass;
10155
10156 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10157 read_vec_element(s, tcg_op, rn, pass, MO_64);
10158 switch (opcode) {
10159 case 0x3d:
10160 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
10161 break;
10162 case 0x3f:
10163 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
10164 break;
10165 case 0x7d:
10166 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
10167 break;
10168 default:
10169 g_assert_not_reached();
10170 }
10171 write_vec_element(s, tcg_res, rd, pass, MO_64);
10172 }
10173 tcg_temp_free_i64(tcg_res);
10174 tcg_temp_free_i64(tcg_op);
10175 clear_vec_high(s, !is_scalar, rd);
10176 } else {
10177 TCGv_i32 tcg_op = tcg_temp_new_i32();
10178 TCGv_i32 tcg_res = tcg_temp_new_i32();
10179 int pass, maxpasses;
10180
10181 if (is_scalar) {
10182 maxpasses = 1;
10183 } else {
10184 maxpasses = is_q ? 4 : 2;
10185 }
10186
10187 for (pass = 0; pass < maxpasses; pass++) {
10188 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10189
10190 switch (opcode) {
10191 case 0x3c:
10192 gen_helper_recpe_u32(tcg_res, tcg_op);
10193 break;
10194 case 0x3d:
10195 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
10196 break;
10197 case 0x3f:
10198 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
10199 break;
10200 case 0x7d:
10201 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
10202 break;
10203 default:
10204 g_assert_not_reached();
10205 }
10206
10207 if (is_scalar) {
10208 write_fp_sreg(s, rd, tcg_res);
10209 } else {
10210 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10211 }
10212 }
10213 tcg_temp_free_i32(tcg_res);
10214 tcg_temp_free_i32(tcg_op);
10215 if (!is_scalar) {
10216 clear_vec_high(s, is_q, rd);
10217 }
10218 }
10219 tcg_temp_free_ptr(fpst);
10220}
10221
10222static void handle_2misc_narrow(DisasContext *s, bool scalar,
10223 int opcode, bool u, bool is_q,
10224 int size, int rn, int rd)
10225{
10226
10227
10228
10229 int pass;
10230 TCGv_i32 tcg_res[2];
10231 int destelt = is_q ? 2 : 0;
10232 int passes = scalar ? 1 : 2;
10233
10234 if (scalar) {
10235 tcg_res[1] = tcg_constant_i32(0);
10236 }
10237
10238 for (pass = 0; pass < passes; pass++) {
10239 TCGv_i64 tcg_op = tcg_temp_new_i64();
10240 NeonGenNarrowFn *genfn = NULL;
10241 NeonGenNarrowEnvFn *genenvfn = NULL;
10242
10243 if (scalar) {
10244 read_vec_element(s, tcg_op, rn, pass, size + 1);
10245 } else {
10246 read_vec_element(s, tcg_op, rn, pass, MO_64);
10247 }
10248 tcg_res[pass] = tcg_temp_new_i32();
10249
10250 switch (opcode) {
10251 case 0x12:
10252 {
10253 static NeonGenNarrowFn * const xtnfns[3] = {
10254 gen_helper_neon_narrow_u8,
10255 gen_helper_neon_narrow_u16,
10256 tcg_gen_extrl_i64_i32,
10257 };
10258 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
10259 gen_helper_neon_unarrow_sat8,
10260 gen_helper_neon_unarrow_sat16,
10261 gen_helper_neon_unarrow_sat32,
10262 };
10263 if (u) {
10264 genenvfn = sqxtunfns[size];
10265 } else {
10266 genfn = xtnfns[size];
10267 }
10268 break;
10269 }
10270 case 0x14:
10271 {
10272 static NeonGenNarrowEnvFn * const fns[3][2] = {
10273 { gen_helper_neon_narrow_sat_s8,
10274 gen_helper_neon_narrow_sat_u8 },
10275 { gen_helper_neon_narrow_sat_s16,
10276 gen_helper_neon_narrow_sat_u16 },
10277 { gen_helper_neon_narrow_sat_s32,
10278 gen_helper_neon_narrow_sat_u32 },
10279 };
10280 genenvfn = fns[size][u];
10281 break;
10282 }
10283 case 0x16:
10284
10285 if (size == 2) {
10286 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
10287 } else {
10288 TCGv_i32 tcg_lo = tcg_temp_new_i32();
10289 TCGv_i32 tcg_hi = tcg_temp_new_i32();
10290 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10291 TCGv_i32 ahp = get_ahp_flag();
10292
10293 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
10294 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
10295 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
10296 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
10297 tcg_temp_free_i32(tcg_lo);
10298 tcg_temp_free_i32(tcg_hi);
10299 tcg_temp_free_ptr(fpst);
10300 tcg_temp_free_i32(ahp);
10301 }
10302 break;
10303 case 0x36:
10304 {
10305 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10306 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
10307 tcg_temp_free_ptr(fpst);
10308 }
10309 break;
10310 case 0x56:
10311
10312
10313
10314 assert(size == 2);
10315 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
10316 break;
10317 default:
10318 g_assert_not_reached();
10319 }
10320
10321 if (genfn) {
10322 genfn(tcg_res[pass], tcg_op);
10323 } else if (genenvfn) {
10324 genenvfn(tcg_res[pass], cpu_env, tcg_op);
10325 }
10326
10327 tcg_temp_free_i64(tcg_op);
10328 }
10329
10330 for (pass = 0; pass < 2; pass++) {
10331 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
10332 tcg_temp_free_i32(tcg_res[pass]);
10333 }
10334 clear_vec_high(s, is_q, rd);
10335}
10336
10337
10338static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
10339 bool is_q, int size, int rn, int rd)
10340{
10341 bool is_double = (size == 3);
10342
10343 if (is_double) {
10344 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10345 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10346 int pass;
10347
10348 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10349 read_vec_element(s, tcg_rn, rn, pass, MO_64);
10350 read_vec_element(s, tcg_rd, rd, pass, MO_64);
10351
10352 if (is_u) {
10353 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10354 } else {
10355 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10356 }
10357 write_vec_element(s, tcg_rd, rd, pass, MO_64);
10358 }
10359 tcg_temp_free_i64(tcg_rd);
10360 tcg_temp_free_i64(tcg_rn);
10361 clear_vec_high(s, !is_scalar, rd);
10362 } else {
10363 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10364 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10365 int pass, maxpasses;
10366
10367 if (is_scalar) {
10368 maxpasses = 1;
10369 } else {
10370 maxpasses = is_q ? 4 : 2;
10371 }
10372
10373 for (pass = 0; pass < maxpasses; pass++) {
10374 if (is_scalar) {
10375 read_vec_element_i32(s, tcg_rn, rn, pass, size);
10376 read_vec_element_i32(s, tcg_rd, rd, pass, size);
10377 } else {
10378 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
10379 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10380 }
10381
10382 if (is_u) {
10383 switch (size) {
10384 case 0:
10385 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10386 break;
10387 case 1:
10388 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10389 break;
10390 case 2:
10391 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10392 break;
10393 default:
10394 g_assert_not_reached();
10395 }
10396 } else {
10397 switch (size) {
10398 case 0:
10399 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10400 break;
10401 case 1:
10402 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10403 break;
10404 case 2:
10405 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10406 break;
10407 default:
10408 g_assert_not_reached();
10409 }
10410 }
10411
10412 if (is_scalar) {
10413 write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
10414 }
10415 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10416 }
10417 tcg_temp_free_i32(tcg_rd);
10418 tcg_temp_free_i32(tcg_rn);
10419 clear_vec_high(s, is_q, rd);
10420 }
10421}
10422
10423
10424
10425
10426
10427
10428
10429static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
10430{
10431 int rd = extract32(insn, 0, 5);
10432 int rn = extract32(insn, 5, 5);
10433 int opcode = extract32(insn, 12, 5);
10434 int size = extract32(insn, 22, 2);
10435 bool u = extract32(insn, 29, 1);
10436 bool is_fcvt = false;
10437 int rmode;
10438 TCGv_i32 tcg_rmode;
10439 TCGv_ptr tcg_fpstatus;
10440
10441 switch (opcode) {
10442 case 0x3:
10443 if (!fp_access_check(s)) {
10444 return;
10445 }
10446 handle_2misc_satacc(s, true, u, false, size, rn, rd);
10447 return;
10448 case 0x7:
10449 break;
10450 case 0xa:
10451 if (u) {
10452 unallocated_encoding(s);
10453 return;
10454 }
10455
10456 case 0x8:
10457 case 0x9:
10458 case 0xb:
10459 if (size != 3) {
10460 unallocated_encoding(s);
10461 return;
10462 }
10463 break;
10464 case 0x12:
10465 if (!u) {
10466 unallocated_encoding(s);
10467 return;
10468 }
10469
10470 case 0x14:
10471 if (size == 3) {
10472 unallocated_encoding(s);
10473 return;
10474 }
10475 if (!fp_access_check(s)) {
10476 return;
10477 }
10478 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10479 return;
10480 case 0xc ... 0xf:
10481 case 0x16 ... 0x1d:
10482 case 0x1f:
10483
10484
10485
10486 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10487 size = extract32(size, 0, 1) ? 3 : 2;
10488 switch (opcode) {
10489 case 0x2c:
10490 case 0x2d:
10491 case 0x2e:
10492 case 0x6c:
10493 case 0x6d:
10494 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10495 return;
10496 case 0x1d:
10497 case 0x5d:
10498 {
10499 bool is_signed = (opcode == 0x1d);
10500 if (!fp_access_check(s)) {
10501 return;
10502 }
10503 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10504 return;
10505 }
10506 case 0x3d:
10507 case 0x3f:
10508 case 0x7d:
10509 if (!fp_access_check(s)) {
10510 return;
10511 }
10512 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10513 return;
10514 case 0x1a:
10515 case 0x1b:
10516 case 0x3a:
10517 case 0x3b:
10518 case 0x5a:
10519 case 0x5b:
10520 case 0x7a:
10521 case 0x7b:
10522 is_fcvt = true;
10523 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10524 break;
10525 case 0x1c:
10526 case 0x5c:
10527
10528 is_fcvt = true;
10529 rmode = FPROUNDING_TIEAWAY;
10530 break;
10531 case 0x56:
10532 if (size == 2) {
10533 unallocated_encoding(s);
10534 return;
10535 }
10536 if (!fp_access_check(s)) {
10537 return;
10538 }
10539 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10540 return;
10541 default:
10542 unallocated_encoding(s);
10543 return;
10544 }
10545 break;
10546 default:
10547 unallocated_encoding(s);
10548 return;
10549 }
10550
10551 if (!fp_access_check(s)) {
10552 return;
10553 }
10554
10555 if (is_fcvt) {
10556 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10557 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10558 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10559 } else {
10560 tcg_rmode = NULL;
10561 tcg_fpstatus = NULL;
10562 }
10563
10564 if (size == 3) {
10565 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10566 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10567
10568 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10569 write_fp_dreg(s, rd, tcg_rd);
10570 tcg_temp_free_i64(tcg_rd);
10571 tcg_temp_free_i64(tcg_rn);
10572 } else {
10573 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10574 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10575
10576 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10577
10578 switch (opcode) {
10579 case 0x7:
10580 {
10581 NeonGenOneOpEnvFn *genfn;
10582 static NeonGenOneOpEnvFn * const fns[3][2] = {
10583 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10584 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10585 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10586 };
10587 genfn = fns[size][u];
10588 genfn(tcg_rd, cpu_env, tcg_rn);
10589 break;
10590 }
10591 case 0x1a:
10592 case 0x1b:
10593 case 0x1c:
10594 case 0x3a:
10595 case 0x3b:
10596 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10597 tcg_fpstatus);
10598 break;
10599 case 0x5a:
10600 case 0x5b:
10601 case 0x5c:
10602 case 0x7a:
10603 case 0x7b:
10604 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10605 tcg_fpstatus);
10606 break;
10607 default:
10608 g_assert_not_reached();
10609 }
10610
10611 write_fp_sreg(s, rd, tcg_rd);
10612 tcg_temp_free_i32(tcg_rd);
10613 tcg_temp_free_i32(tcg_rn);
10614 }
10615
10616 if (is_fcvt) {
10617 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10618 tcg_temp_free_i32(tcg_rmode);
10619 tcg_temp_free_ptr(tcg_fpstatus);
10620 }
10621}
10622
10623
10624static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10625 int immh, int immb, int opcode, int rn, int rd)
10626{
10627 int size = 32 - clz32(immh) - 1;
10628 int immhb = immh << 3 | immb;
10629 int shift = 2 * (8 << size) - immhb;
10630 GVecGen2iFn *gvec_fn;
10631
10632 if (extract32(immh, 3, 1) && !is_q) {
10633 unallocated_encoding(s);
10634 return;
10635 }
10636 tcg_debug_assert(size <= 3);
10637
10638 if (!fp_access_check(s)) {
10639 return;
10640 }
10641
10642 switch (opcode) {
10643 case 0x02:
10644 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10645 break;
10646
10647 case 0x08:
10648 gvec_fn = gen_gvec_sri;
10649 break;
10650
10651 case 0x00:
10652 if (is_u) {
10653 if (shift == 8 << size) {
10654
10655 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10656 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10657 return;
10658 }
10659 gvec_fn = tcg_gen_gvec_shri;
10660 } else {
10661
10662 if (shift == 8 << size) {
10663 shift -= 1;
10664 }
10665 gvec_fn = tcg_gen_gvec_sari;
10666 }
10667 break;
10668
10669 case 0x04:
10670 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10671 break;
10672
10673 case 0x06:
10674 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10675 break;
10676
10677 default:
10678 g_assert_not_reached();
10679 }
10680
10681 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10682}
10683
10684
10685static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10686 int immh, int immb, int opcode, int rn, int rd)
10687{
10688 int size = 32 - clz32(immh) - 1;
10689 int immhb = immh << 3 | immb;
10690 int shift = immhb - (8 << size);
10691
10692
10693 assert(size >= 0 && size <= 3);
10694
10695 if (extract32(immh, 3, 1) && !is_q) {
10696 unallocated_encoding(s);
10697 return;
10698 }
10699
10700 if (!fp_access_check(s)) {
10701 return;
10702 }
10703
10704 if (insert) {
10705 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10706 } else {
10707 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10708 }
10709}
10710
10711
10712static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10713 int immh, int immb, int opcode, int rn, int rd)
10714{
10715 int size = 32 - clz32(immh) - 1;
10716 int immhb = immh << 3 | immb;
10717 int shift = immhb - (8 << size);
10718 int dsize = 64;
10719 int esize = 8 << size;
10720 int elements = dsize/esize;
10721 TCGv_i64 tcg_rn = new_tmp_a64(s);
10722 TCGv_i64 tcg_rd = new_tmp_a64(s);
10723 int i;
10724
10725 if (size >= 3) {
10726 unallocated_encoding(s);
10727 return;
10728 }
10729
10730 if (!fp_access_check(s)) {
10731 return;
10732 }
10733
10734
10735
10736
10737
10738 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10739
10740 for (i = 0; i < elements; i++) {
10741 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10742 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10743 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10744 write_vec_element(s, tcg_rd, rd, i, size + 1);
10745 }
10746}
10747
10748
10749static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10750 int immh, int immb, int opcode, int rn, int rd)
10751{
10752 int immhb = immh << 3 | immb;
10753 int size = 32 - clz32(immh) - 1;
10754 int dsize = 64;
10755 int esize = 8 << size;
10756 int elements = dsize/esize;
10757 int shift = (2 * esize) - immhb;
10758 bool round = extract32(opcode, 0, 1);
10759 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10760 TCGv_i64 tcg_round;
10761 int i;
10762
10763 if (extract32(immh, 3, 1)) {
10764 unallocated_encoding(s);
10765 return;
10766 }
10767
10768 if (!fp_access_check(s)) {
10769 return;
10770 }
10771
10772 tcg_rn = tcg_temp_new_i64();
10773 tcg_rd = tcg_temp_new_i64();
10774 tcg_final = tcg_temp_new_i64();
10775 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10776
10777 if (round) {
10778 tcg_round = tcg_constant_i64(1ULL << (shift - 1));
10779 } else {
10780 tcg_round = NULL;
10781 }
10782
10783 for (i = 0; i < elements; i++) {
10784 read_vec_element(s, tcg_rn, rn, i, size+1);
10785 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10786 false, true, size+1, shift);
10787
10788 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10789 }
10790
10791 if (!is_q) {
10792 write_vec_element(s, tcg_final, rd, 0, MO_64);
10793 } else {
10794 write_vec_element(s, tcg_final, rd, 1, MO_64);
10795 }
10796 tcg_temp_free_i64(tcg_rn);
10797 tcg_temp_free_i64(tcg_rd);
10798 tcg_temp_free_i64(tcg_final);
10799
10800 clear_vec_high(s, is_q, rd);
10801}
10802
10803
10804
10805
10806
10807
10808
10809
10810static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10811{
10812 int rd = extract32(insn, 0, 5);
10813 int rn = extract32(insn, 5, 5);
10814 int opcode = extract32(insn, 11, 5);
10815 int immb = extract32(insn, 16, 3);
10816 int immh = extract32(insn, 19, 4);
10817 bool is_u = extract32(insn, 29, 1);
10818 bool is_q = extract32(insn, 30, 1);
10819
10820
10821 assert(immh != 0);
10822
10823 switch (opcode) {
10824 case 0x08:
10825 if (!is_u) {
10826 unallocated_encoding(s);
10827 return;
10828 }
10829
10830 case 0x00:
10831 case 0x02:
10832 case 0x04:
10833 case 0x06:
10834 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10835 break;
10836 case 0x0a:
10837 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10838 break;
10839 case 0x10:
10840 case 0x11:
10841 if (is_u) {
10842 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10843 opcode, rn, rd);
10844 } else {
10845 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10846 }
10847 break;
10848 case 0x12:
10849 case 0x13:
10850 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10851 opcode, rn, rd);
10852 break;
10853 case 0x14:
10854 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10855 break;
10856 case 0x1c:
10857 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10858 opcode, rn, rd);
10859 break;
10860 case 0xc:
10861 if (!is_u) {
10862 unallocated_encoding(s);
10863 return;
10864 }
10865 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10866 break;
10867 case 0xe:
10868 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10869 break;
10870 case 0x1f:
10871 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10872 return;
10873 default:
10874 unallocated_encoding(s);
10875 return;
10876 }
10877}
10878
10879
10880
10881
10882static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10883 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10884{
10885 static NeonGenTwo64OpFn * const fns[3][2] = {
10886 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10887 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10888 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10889 };
10890 NeonGenTwo64OpFn *genfn;
10891 assert(size < 3);
10892
10893 genfn = fns[size][is_sub];
10894 genfn(tcg_res, tcg_op1, tcg_op2);
10895}
10896
10897static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10898 int opcode, int rd, int rn, int rm)
10899{
10900
10901 TCGv_i64 tcg_res[2];
10902 int pass, accop;
10903
10904 tcg_res[0] = tcg_temp_new_i64();
10905 tcg_res[1] = tcg_temp_new_i64();
10906
10907
10908
10909
10910 switch (opcode) {
10911 case 5:
10912 case 8:
10913 case 9:
10914 accop = 1;
10915 break;
10916 case 10:
10917 case 11:
10918 accop = -1;
10919 break;
10920 default:
10921 accop = 0;
10922 break;
10923 }
10924
10925 if (accop != 0) {
10926 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10927 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10928 }
10929
10930
10931
10932
10933 if (size == 2) {
10934 for (pass = 0; pass < 2; pass++) {
10935 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10936 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10937 TCGv_i64 tcg_passres;
10938 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10939
10940 int elt = pass + is_q * 2;
10941
10942 read_vec_element(s, tcg_op1, rn, elt, memop);
10943 read_vec_element(s, tcg_op2, rm, elt, memop);
10944
10945 if (accop == 0) {
10946 tcg_passres = tcg_res[pass];
10947 } else {
10948 tcg_passres = tcg_temp_new_i64();
10949 }
10950
10951 switch (opcode) {
10952 case 0:
10953 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10954 break;
10955 case 2:
10956 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10957 break;
10958 case 5:
10959 case 7:
10960 {
10961 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10962 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10963
10964 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10965 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10966 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10967 tcg_passres,
10968 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10969 tcg_temp_free_i64(tcg_tmp1);
10970 tcg_temp_free_i64(tcg_tmp2);
10971 break;
10972 }
10973 case 8:
10974 case 10:
10975 case 12:
10976 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10977 break;
10978 case 9:
10979 case 11:
10980 case 13:
10981 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10982 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10983 tcg_passres, tcg_passres);
10984 break;
10985 default:
10986 g_assert_not_reached();
10987 }
10988
10989 if (opcode == 9 || opcode == 11) {
10990
10991 if (accop < 0) {
10992 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10993 }
10994 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10995 tcg_res[pass], tcg_passres);
10996 } else if (accop > 0) {
10997 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10998 } else if (accop < 0) {
10999 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
11000 }
11001
11002 if (accop != 0) {
11003 tcg_temp_free_i64(tcg_passres);
11004 }
11005
11006 tcg_temp_free_i64(tcg_op1);
11007 tcg_temp_free_i64(tcg_op2);
11008 }
11009 } else {
11010
11011 for (pass = 0; pass < 2; pass++) {
11012 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11013 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11014 TCGv_i64 tcg_passres;
11015 int elt = pass + is_q * 2;
11016
11017 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
11018 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
11019
11020 if (accop == 0) {
11021 tcg_passres = tcg_res[pass];
11022 } else {
11023 tcg_passres = tcg_temp_new_i64();
11024 }
11025
11026 switch (opcode) {
11027 case 0:
11028 case 2:
11029 {
11030 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
11031 static NeonGenWidenFn * const widenfns[2][2] = {
11032 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11033 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11034 };
11035 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11036
11037 widenfn(tcg_op2_64, tcg_op2);
11038 widenfn(tcg_passres, tcg_op1);
11039 gen_neon_addl(size, (opcode == 2), tcg_passres,
11040 tcg_passres, tcg_op2_64);
11041 tcg_temp_free_i64(tcg_op2_64);
11042 break;
11043 }
11044 case 5:
11045 case 7:
11046 if (size == 0) {
11047 if (is_u) {
11048 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
11049 } else {
11050 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
11051 }
11052 } else {
11053 if (is_u) {
11054 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
11055 } else {
11056 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
11057 }
11058 }
11059 break;
11060 case 8:
11061 case 10:
11062 case 12:
11063 if (size == 0) {
11064 if (is_u) {
11065 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
11066 } else {
11067 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
11068 }
11069 } else {
11070 if (is_u) {
11071 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
11072 } else {
11073 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11074 }
11075 }
11076 break;
11077 case 9:
11078 case 11:
11079 case 13:
11080 assert(size == 1);
11081 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11082 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
11083 tcg_passres, tcg_passres);
11084 break;
11085 default:
11086 g_assert_not_reached();
11087 }
11088 tcg_temp_free_i32(tcg_op1);
11089 tcg_temp_free_i32(tcg_op2);
11090
11091 if (accop != 0) {
11092 if (opcode == 9 || opcode == 11) {
11093
11094 if (accop < 0) {
11095 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
11096 }
11097 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
11098 tcg_res[pass],
11099 tcg_passres);
11100 } else {
11101 gen_neon_addl(size, (accop < 0), tcg_res[pass],
11102 tcg_res[pass], tcg_passres);
11103 }
11104 tcg_temp_free_i64(tcg_passres);
11105 }
11106 }
11107 }
11108
11109 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
11110 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
11111 tcg_temp_free_i64(tcg_res[0]);
11112 tcg_temp_free_i64(tcg_res[1]);
11113}
11114
11115static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
11116 int opcode, int rd, int rn, int rm)
11117{
11118 TCGv_i64 tcg_res[2];
11119 int part = is_q ? 2 : 0;
11120 int pass;
11121
11122 for (pass = 0; pass < 2; pass++) {
11123 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11124 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11125 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
11126 static NeonGenWidenFn * const widenfns[3][2] = {
11127 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11128 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11129 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
11130 };
11131 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11132
11133 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11134 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
11135 widenfn(tcg_op2_wide, tcg_op2);
11136 tcg_temp_free_i32(tcg_op2);
11137 tcg_res[pass] = tcg_temp_new_i64();
11138 gen_neon_addl(size, (opcode == 3),
11139 tcg_res[pass], tcg_op1, tcg_op2_wide);
11140 tcg_temp_free_i64(tcg_op1);
11141 tcg_temp_free_i64(tcg_op2_wide);
11142 }
11143
11144 for (pass = 0; pass < 2; pass++) {
11145 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11146 tcg_temp_free_i64(tcg_res[pass]);
11147 }
11148}
11149
11150static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
11151{
11152 tcg_gen_addi_i64(in, in, 1U << 31);
11153 tcg_gen_extrh_i64_i32(res, in);
11154}
11155
11156static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
11157 int opcode, int rd, int rn, int rm)
11158{
11159 TCGv_i32 tcg_res[2];
11160 int part = is_q ? 2 : 0;
11161 int pass;
11162
11163 for (pass = 0; pass < 2; pass++) {
11164 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11165 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11166 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
11167 static NeonGenNarrowFn * const narrowfns[3][2] = {
11168 { gen_helper_neon_narrow_high_u8,
11169 gen_helper_neon_narrow_round_high_u8 },
11170 { gen_helper_neon_narrow_high_u16,
11171 gen_helper_neon_narrow_round_high_u16 },
11172 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
11173 };
11174 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
11175
11176 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11177 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11178
11179 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
11180
11181 tcg_temp_free_i64(tcg_op1);
11182 tcg_temp_free_i64(tcg_op2);
11183
11184 tcg_res[pass] = tcg_temp_new_i32();
11185 gennarrow(tcg_res[pass], tcg_wideres);
11186 tcg_temp_free_i64(tcg_wideres);
11187 }
11188
11189 for (pass = 0; pass < 2; pass++) {
11190 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
11191 tcg_temp_free_i32(tcg_res[pass]);
11192 }
11193 clear_vec_high(s, is_q, rd);
11194}
11195
11196
11197
11198
11199
11200
11201
11202static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
11203{
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213
11214 int is_q = extract32(insn, 30, 1);
11215 int is_u = extract32(insn, 29, 1);
11216 int size = extract32(insn, 22, 2);
11217 int opcode = extract32(insn, 12, 4);
11218 int rm = extract32(insn, 16, 5);
11219 int rn = extract32(insn, 5, 5);
11220 int rd = extract32(insn, 0, 5);
11221
11222 switch (opcode) {
11223 case 1:
11224 case 3:
11225
11226 if (size == 3) {
11227 unallocated_encoding(s);
11228 return;
11229 }
11230 if (!fp_access_check(s)) {
11231 return;
11232 }
11233 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
11234 break;
11235 case 4:
11236 case 6:
11237
11238 if (size == 3) {
11239 unallocated_encoding(s);
11240 return;
11241 }
11242 if (!fp_access_check(s)) {
11243 return;
11244 }
11245 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
11246 break;
11247 case 14:
11248 if (is_u) {
11249 unallocated_encoding(s);
11250 return;
11251 }
11252 switch (size) {
11253 case 0:
11254 if (!fp_access_check(s)) {
11255 return;
11256 }
11257
11258 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11259 gen_helper_neon_pmull_h);
11260 break;
11261
11262 case 3:
11263 if (!dc_isar_feature(aa64_pmull, s)) {
11264 unallocated_encoding(s);
11265 return;
11266 }
11267 if (!fp_access_check(s)) {
11268 return;
11269 }
11270
11271 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11272 gen_helper_gvec_pmull_q);
11273 break;
11274
11275 default:
11276 unallocated_encoding(s);
11277 break;
11278 }
11279 return;
11280 case 9:
11281 case 11:
11282 case 13:
11283 if (is_u || size == 0) {
11284 unallocated_encoding(s);
11285 return;
11286 }
11287
11288 case 0:
11289 case 2:
11290 case 5:
11291 case 7:
11292 case 8:
11293 case 10:
11294 case 12:
11295
11296 if (size == 3) {
11297 unallocated_encoding(s);
11298 return;
11299 }
11300 if (!fp_access_check(s)) {
11301 return;
11302 }
11303
11304 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
11305 break;
11306 default:
11307
11308 unallocated_encoding(s);
11309 break;
11310 }
11311}
11312
11313
11314static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
11315{
11316 int rd = extract32(insn, 0, 5);
11317 int rn = extract32(insn, 5, 5);
11318 int rm = extract32(insn, 16, 5);
11319 int size = extract32(insn, 22, 2);
11320 bool is_u = extract32(insn, 29, 1);
11321 bool is_q = extract32(insn, 30, 1);
11322
11323 if (!fp_access_check(s)) {
11324 return;
11325 }
11326
11327 switch (size + 4 * is_u) {
11328 case 0:
11329 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
11330 return;
11331 case 1:
11332 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
11333 return;
11334 case 2:
11335 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
11336 return;
11337 case 3:
11338 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
11339 return;
11340 case 4:
11341 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
11342 return;
11343
11344 case 5:
11345 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
11346 return;
11347 case 6:
11348 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
11349 return;
11350 case 7:
11351 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
11352 return;
11353
11354 default:
11355 g_assert_not_reached();
11356 }
11357}
11358
11359
11360
11361
11362
11363
11364static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
11365 int size, int rn, int rm, int rd)
11366{
11367 TCGv_ptr fpst;
11368 int pass;
11369
11370
11371 if (opcode >= 0x58) {
11372 fpst = fpstatus_ptr(FPST_FPCR);
11373 } else {
11374 fpst = NULL;
11375 }
11376
11377 if (!fp_access_check(s)) {
11378 return;
11379 }
11380
11381
11382
11383
11384 if (size == 3) {
11385 TCGv_i64 tcg_res[2];
11386
11387 for (pass = 0; pass < 2; pass++) {
11388 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11389 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11390 int passreg = (pass == 0) ? rn : rm;
11391
11392 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
11393 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
11394 tcg_res[pass] = tcg_temp_new_i64();
11395
11396 switch (opcode) {
11397 case 0x17:
11398 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11399 break;
11400 case 0x58:
11401 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11402 break;
11403 case 0x5a:
11404 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11405 break;
11406 case 0x5e:
11407 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11408 break;
11409 case 0x78:
11410 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11411 break;
11412 case 0x7e:
11413 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11414 break;
11415 default:
11416 g_assert_not_reached();
11417 }
11418
11419 tcg_temp_free_i64(tcg_op1);
11420 tcg_temp_free_i64(tcg_op2);
11421 }
11422
11423 for (pass = 0; pass < 2; pass++) {
11424 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11425 tcg_temp_free_i64(tcg_res[pass]);
11426 }
11427 } else {
11428 int maxpass = is_q ? 4 : 2;
11429 TCGv_i32 tcg_res[4];
11430
11431 for (pass = 0; pass < maxpass; pass++) {
11432 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11433 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11434 NeonGenTwoOpFn *genfn = NULL;
11435 int passreg = pass < (maxpass / 2) ? rn : rm;
11436 int passelt = (is_q && (pass & 1)) ? 2 : 0;
11437
11438 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
11439 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
11440 tcg_res[pass] = tcg_temp_new_i32();
11441
11442 switch (opcode) {
11443 case 0x17:
11444 {
11445 static NeonGenTwoOpFn * const fns[3] = {
11446 gen_helper_neon_padd_u8,
11447 gen_helper_neon_padd_u16,
11448 tcg_gen_add_i32,
11449 };
11450 genfn = fns[size];
11451 break;
11452 }
11453 case 0x14:
11454 {
11455 static NeonGenTwoOpFn * const fns[3][2] = {
11456 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
11457 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
11458 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11459 };
11460 genfn = fns[size][u];
11461 break;
11462 }
11463 case 0x15:
11464 {
11465 static NeonGenTwoOpFn * const fns[3][2] = {
11466 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11467 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11468 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11469 };
11470 genfn = fns[size][u];
11471 break;
11472 }
11473
11474 case 0x58:
11475 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11476 break;
11477 case 0x5a:
11478 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11479 break;
11480 case 0x5e:
11481 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11482 break;
11483 case 0x78:
11484 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11485 break;
11486 case 0x7e:
11487 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11488 break;
11489 default:
11490 g_assert_not_reached();
11491 }
11492
11493
11494 if (genfn) {
11495 genfn(tcg_res[pass], tcg_op1, tcg_op2);
11496 }
11497
11498 tcg_temp_free_i32(tcg_op1);
11499 tcg_temp_free_i32(tcg_op2);
11500 }
11501
11502 for (pass = 0; pass < maxpass; pass++) {
11503 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11504 tcg_temp_free_i32(tcg_res[pass]);
11505 }
11506 clear_vec_high(s, is_q, rd);
11507 }
11508
11509 if (fpst) {
11510 tcg_temp_free_ptr(fpst);
11511 }
11512}
11513
11514
11515static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11516{
11517
11518
11519
11520
11521 int fpopcode = extract32(insn, 11, 5)
11522 | (extract32(insn, 23, 1) << 5)
11523 | (extract32(insn, 29, 1) << 6);
11524 int is_q = extract32(insn, 30, 1);
11525 int size = extract32(insn, 22, 1);
11526 int rm = extract32(insn, 16, 5);
11527 int rn = extract32(insn, 5, 5);
11528 int rd = extract32(insn, 0, 5);
11529
11530 int datasize = is_q ? 128 : 64;
11531 int esize = 32 << size;
11532 int elements = datasize / esize;
11533
11534 if (size == 1 && !is_q) {
11535 unallocated_encoding(s);
11536 return;
11537 }
11538
11539 switch (fpopcode) {
11540 case 0x58:
11541 case 0x5a:
11542 case 0x5e:
11543 case 0x78:
11544 case 0x7e:
11545 if (size && !is_q) {
11546 unallocated_encoding(s);
11547 return;
11548 }
11549 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11550 rn, rm, rd);
11551 return;
11552 case 0x1b:
11553 case 0x1f:
11554 case 0x3f:
11555 case 0x5d:
11556 case 0x7d:
11557 case 0x19:
11558 case 0x39:
11559 case 0x18:
11560 case 0x1a:
11561 case 0x1c:
11562 case 0x1e:
11563 case 0x38:
11564 case 0x3a:
11565 case 0x3e:
11566 case 0x5b:
11567 case 0x5c:
11568 case 0x5f:
11569 case 0x7a:
11570 case 0x7c:
11571 if (!fp_access_check(s)) {
11572 return;
11573 }
11574 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11575 return;
11576
11577 case 0x1d:
11578 case 0x3d:
11579 case 0x59:
11580 case 0x79:
11581 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11582 unallocated_encoding(s);
11583 return;
11584 }
11585 if (fp_access_check(s)) {
11586 int is_s = extract32(insn, 23, 1);
11587 int is_2 = extract32(insn, 29, 1);
11588 int data = (is_2 << 1) | is_s;
11589 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11590 vec_full_reg_offset(s, rn),
11591 vec_full_reg_offset(s, rm), cpu_env,
11592 is_q ? 16 : 8, vec_full_reg_size(s),
11593 data, gen_helper_gvec_fmlal_a64);
11594 }
11595 return;
11596
11597 default:
11598 unallocated_encoding(s);
11599 return;
11600 }
11601}
11602
11603
11604static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11605{
11606 int is_q = extract32(insn, 30, 1);
11607 int u = extract32(insn, 29, 1);
11608 int size = extract32(insn, 22, 2);
11609 int opcode = extract32(insn, 11, 5);
11610 int rm = extract32(insn, 16, 5);
11611 int rn = extract32(insn, 5, 5);
11612 int rd = extract32(insn, 0, 5);
11613 int pass;
11614 TCGCond cond;
11615
11616 switch (opcode) {
11617 case 0x13:
11618 if (u && size != 0) {
11619 unallocated_encoding(s);
11620 return;
11621 }
11622
11623 case 0x0:
11624 case 0x2:
11625 case 0x4:
11626 case 0xc:
11627 case 0xd:
11628 case 0xe:
11629 case 0xf:
11630 case 0x12:
11631 if (size == 3) {
11632 unallocated_encoding(s);
11633 return;
11634 }
11635 break;
11636 case 0x16:
11637 if (size == 0 || size == 3) {
11638 unallocated_encoding(s);
11639 return;
11640 }
11641 break;
11642 default:
11643 if (size == 3 && !is_q) {
11644 unallocated_encoding(s);
11645 return;
11646 }
11647 break;
11648 }
11649
11650 if (!fp_access_check(s)) {
11651 return;
11652 }
11653
11654 switch (opcode) {
11655 case 0x01:
11656 if (u) {
11657 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11658 } else {
11659 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11660 }
11661 return;
11662 case 0x05:
11663 if (u) {
11664 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11665 } else {
11666 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11667 }
11668 return;
11669 case 0x08:
11670 if (u) {
11671 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11672 } else {
11673 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11674 }
11675 return;
11676 case 0x0c:
11677 if (u) {
11678 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11679 } else {
11680 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11681 }
11682 return;
11683 case 0x0d:
11684 if (u) {
11685 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11686 } else {
11687 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11688 }
11689 return;
11690 case 0xe:
11691 if (u) {
11692 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11693 } else {
11694 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11695 }
11696 return;
11697 case 0xf:
11698 if (u) {
11699 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11700 } else {
11701 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11702 }
11703 return;
11704 case 0x10:
11705 if (u) {
11706 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11707 } else {
11708 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11709 }
11710 return;
11711 case 0x13:
11712 if (!u) {
11713 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11714 } else {
11715 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11716 }
11717 return;
11718 case 0x12:
11719 if (u) {
11720 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11721 } else {
11722 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11723 }
11724 return;
11725 case 0x16:
11726 {
11727 static gen_helper_gvec_3_ptr * const fns[2][2] = {
11728 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11729 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11730 };
11731 gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11732 }
11733 return;
11734 case 0x11:
11735 if (!u) {
11736 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11737 return;
11738 }
11739
11740 cond = TCG_COND_EQ;
11741 goto do_gvec_cmp;
11742 case 0x06:
11743 cond = u ? TCG_COND_GTU : TCG_COND_GT;
11744 goto do_gvec_cmp;
11745 case 0x07:
11746 cond = u ? TCG_COND_GEU : TCG_COND_GE;
11747 do_gvec_cmp:
11748 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11749 vec_full_reg_offset(s, rn),
11750 vec_full_reg_offset(s, rm),
11751 is_q ? 16 : 8, vec_full_reg_size(s));
11752 return;
11753 }
11754
11755 if (size == 3) {
11756 assert(is_q);
11757 for (pass = 0; pass < 2; pass++) {
11758 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11759 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11760 TCGv_i64 tcg_res = tcg_temp_new_i64();
11761
11762 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11763 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11764
11765 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11766
11767 write_vec_element(s, tcg_res, rd, pass, MO_64);
11768
11769 tcg_temp_free_i64(tcg_res);
11770 tcg_temp_free_i64(tcg_op1);
11771 tcg_temp_free_i64(tcg_op2);
11772 }
11773 } else {
11774 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11775 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11776 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11777 TCGv_i32 tcg_res = tcg_temp_new_i32();
11778 NeonGenTwoOpFn *genfn = NULL;
11779 NeonGenTwoOpEnvFn *genenvfn = NULL;
11780
11781 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11782 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11783
11784 switch (opcode) {
11785 case 0x0:
11786 {
11787 static NeonGenTwoOpFn * const fns[3][2] = {
11788 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11789 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11790 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11791 };
11792 genfn = fns[size][u];
11793 break;
11794 }
11795 case 0x2:
11796 {
11797 static NeonGenTwoOpFn * const fns[3][2] = {
11798 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11799 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11800 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11801 };
11802 genfn = fns[size][u];
11803 break;
11804 }
11805 case 0x4:
11806 {
11807 static NeonGenTwoOpFn * const fns[3][2] = {
11808 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11809 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11810 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11811 };
11812 genfn = fns[size][u];
11813 break;
11814 }
11815 case 0x9:
11816 {
11817 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11818 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11819 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11820 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11821 };
11822 genenvfn = fns[size][u];
11823 break;
11824 }
11825 case 0xa:
11826 {
11827 static NeonGenTwoOpFn * const fns[3][2] = {
11828 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11829 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11830 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11831 };
11832 genfn = fns[size][u];
11833 break;
11834 }
11835 case 0xb:
11836 {
11837 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11838 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11839 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11840 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11841 };
11842 genenvfn = fns[size][u];
11843 break;
11844 }
11845 default:
11846 g_assert_not_reached();
11847 }
11848
11849 if (genenvfn) {
11850 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11851 } else {
11852 genfn(tcg_res, tcg_op1, tcg_op2);
11853 }
11854
11855 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11856
11857 tcg_temp_free_i32(tcg_res);
11858 tcg_temp_free_i32(tcg_op1);
11859 tcg_temp_free_i32(tcg_op2);
11860 }
11861 }
11862 clear_vec_high(s, is_q, rd);
11863}
11864
11865
11866
11867
11868
11869
11870
11871static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11872{
11873 int opcode = extract32(insn, 11, 5);
11874
11875 switch (opcode) {
11876 case 0x3:
11877 disas_simd_3same_logic(s, insn);
11878 break;
11879 case 0x17:
11880 case 0x14:
11881 case 0x15:
11882 {
11883
11884 int is_q = extract32(insn, 30, 1);
11885 int u = extract32(insn, 29, 1);
11886 int size = extract32(insn, 22, 2);
11887 int rm = extract32(insn, 16, 5);
11888 int rn = extract32(insn, 5, 5);
11889 int rd = extract32(insn, 0, 5);
11890 if (opcode == 0x17) {
11891 if (u || (size == 3 && !is_q)) {
11892 unallocated_encoding(s);
11893 return;
11894 }
11895 } else {
11896 if (size == 3) {
11897 unallocated_encoding(s);
11898 return;
11899 }
11900 }
11901 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11902 break;
11903 }
11904 case 0x18 ... 0x31:
11905
11906 disas_simd_3same_float(s, insn);
11907 break;
11908 default:
11909 disas_simd_3same_int(s, insn);
11910 break;
11911 }
11912}
11913
11914
11915
11916
11917
11918
11919
11920
11921
11922
11923
11924
11925
11926static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11927{
11928 int opcode = extract32(insn, 11, 3);
11929 int u = extract32(insn, 29, 1);
11930 int a = extract32(insn, 23, 1);
11931 int is_q = extract32(insn, 30, 1);
11932 int rm = extract32(insn, 16, 5);
11933 int rn = extract32(insn, 5, 5);
11934 int rd = extract32(insn, 0, 5);
11935
11936
11937
11938
11939 int fpopcode = opcode | (a << 3) | (u << 4);
11940 int datasize = is_q ? 128 : 64;
11941 int elements = datasize / 16;
11942 bool pairwise;
11943 TCGv_ptr fpst;
11944 int pass;
11945
11946 switch (fpopcode) {
11947 case 0x0:
11948 case 0x1:
11949 case 0x2:
11950 case 0x3:
11951 case 0x4:
11952 case 0x6:
11953 case 0x7:
11954 case 0x8:
11955 case 0x9:
11956 case 0xa:
11957 case 0xe:
11958 case 0xf:
11959 case 0x13:
11960 case 0x14:
11961 case 0x15:
11962 case 0x17:
11963 case 0x1a:
11964 case 0x1c:
11965 case 0x1d:
11966 pairwise = false;
11967 break;
11968 case 0x10:
11969 case 0x12:
11970 case 0x16:
11971 case 0x18:
11972 case 0x1e:
11973 pairwise = true;
11974 break;
11975 default:
11976 unallocated_encoding(s);
11977 return;
11978 }
11979
11980 if (!dc_isar_feature(aa64_fp16, s)) {
11981 unallocated_encoding(s);
11982 return;
11983 }
11984
11985 if (!fp_access_check(s)) {
11986 return;
11987 }
11988
11989 fpst = fpstatus_ptr(FPST_FPCR_F16);
11990
11991 if (pairwise) {
11992 int maxpass = is_q ? 8 : 4;
11993 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11994 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11995 TCGv_i32 tcg_res[8];
11996
11997 for (pass = 0; pass < maxpass; pass++) {
11998 int passreg = pass < (maxpass / 2) ? rn : rm;
11999 int passelt = (pass << 1) & (maxpass - 1);
12000
12001 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
12002 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
12003 tcg_res[pass] = tcg_temp_new_i32();
12004
12005 switch (fpopcode) {
12006 case 0x10:
12007 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
12008 fpst);
12009 break;
12010 case 0x12:
12011 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12012 break;
12013 case 0x16:
12014 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12015 break;
12016 case 0x18:
12017 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
12018 fpst);
12019 break;
12020 case 0x1e:
12021 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12022 break;
12023 default:
12024 g_assert_not_reached();
12025 }
12026 }
12027
12028 for (pass = 0; pass < maxpass; pass++) {
12029 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
12030 tcg_temp_free_i32(tcg_res[pass]);
12031 }
12032
12033 tcg_temp_free_i32(tcg_op1);
12034 tcg_temp_free_i32(tcg_op2);
12035
12036 } else {
12037 for (pass = 0; pass < elements; pass++) {
12038 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
12039 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
12040 TCGv_i32 tcg_res = tcg_temp_new_i32();
12041
12042 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
12043 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
12044
12045 switch (fpopcode) {
12046 case 0x0:
12047 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12048 break;
12049 case 0x1:
12050 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12051 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12052 fpst);
12053 break;
12054 case 0x2:
12055 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
12056 break;
12057 case 0x3:
12058 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
12059 break;
12060 case 0x4:
12061 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12062 break;
12063 case 0x6:
12064 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
12065 break;
12066 case 0x7:
12067 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12068 break;
12069 case 0x8:
12070 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12071 break;
12072 case 0x9:
12073
12074 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
12075 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12076 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12077 fpst);
12078 break;
12079 case 0xa:
12080 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12081 break;
12082 case 0xe:
12083 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
12084 break;
12085 case 0xf:
12086 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12087 break;
12088 case 0x13:
12089 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
12090 break;
12091 case 0x14:
12092 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12093 break;
12094 case 0x15:
12095 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12096 break;
12097 case 0x17:
12098 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
12099 break;
12100 case 0x1a:
12101 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12102 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
12103 break;
12104 case 0x1c:
12105 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12106 break;
12107 case 0x1d:
12108 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12109 break;
12110 default:
12111 g_assert_not_reached();
12112 }
12113
12114 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12115 tcg_temp_free_i32(tcg_res);
12116 tcg_temp_free_i32(tcg_op1);
12117 tcg_temp_free_i32(tcg_op2);
12118 }
12119 }
12120
12121 tcg_temp_free_ptr(fpst);
12122
12123 clear_vec_high(s, is_q, rd);
12124}
12125
12126
12127
12128
12129
12130
12131
12132static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
12133{
12134 int rd = extract32(insn, 0, 5);
12135 int rn = extract32(insn, 5, 5);
12136 int opcode = extract32(insn, 11, 4);
12137 int rm = extract32(insn, 16, 5);
12138 int size = extract32(insn, 22, 2);
12139 bool u = extract32(insn, 29, 1);
12140 bool is_q = extract32(insn, 30, 1);
12141 bool feature;
12142 int rot;
12143
12144 switch (u * 16 + opcode) {
12145 case 0x10:
12146 case 0x11:
12147 if (size != 1 && size != 2) {
12148 unallocated_encoding(s);
12149 return;
12150 }
12151 feature = dc_isar_feature(aa64_rdm, s);
12152 break;
12153 case 0x02:
12154 case 0x12:
12155 if (size != MO_32) {
12156 unallocated_encoding(s);
12157 return;
12158 }
12159 feature = dc_isar_feature(aa64_dp, s);
12160 break;
12161 case 0x03:
12162 if (size != MO_32) {
12163 unallocated_encoding(s);
12164 return;
12165 }
12166 feature = dc_isar_feature(aa64_i8mm, s);
12167 break;
12168 case 0x04:
12169 case 0x14:
12170 case 0x05:
12171 if (!is_q || size != MO_32) {
12172 unallocated_encoding(s);
12173 return;
12174 }
12175 feature = dc_isar_feature(aa64_i8mm, s);
12176 break;
12177 case 0x18:
12178 case 0x19:
12179 case 0x1a:
12180 case 0x1b:
12181 case 0x1c:
12182 case 0x1e:
12183 if (size == 0
12184 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
12185 || (size == 3 && !is_q)) {
12186 unallocated_encoding(s);
12187 return;
12188 }
12189 feature = dc_isar_feature(aa64_fcma, s);
12190 break;
12191 case 0x1d:
12192 if (size != MO_16 || !is_q) {
12193 unallocated_encoding(s);
12194 return;
12195 }
12196 feature = dc_isar_feature(aa64_bf16, s);
12197 break;
12198 case 0x1f:
12199 switch (size) {
12200 case 1:
12201 case 3:
12202 feature = dc_isar_feature(aa64_bf16, s);
12203 break;
12204 default:
12205 unallocated_encoding(s);
12206 return;
12207 }
12208 break;
12209 default:
12210 unallocated_encoding(s);
12211 return;
12212 }
12213 if (!feature) {
12214 unallocated_encoding(s);
12215 return;
12216 }
12217 if (!fp_access_check(s)) {
12218 return;
12219 }
12220
12221 switch (opcode) {
12222 case 0x0:
12223 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
12224 return;
12225
12226 case 0x1:
12227 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
12228 return;
12229
12230 case 0x2:
12231 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
12232 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
12233 return;
12234
12235 case 0x3:
12236 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
12237 return;
12238
12239 case 0x04:
12240 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
12241 u ? gen_helper_gvec_ummla_b
12242 : gen_helper_gvec_smmla_b);
12243 return;
12244 case 0x05:
12245 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
12246 return;
12247
12248 case 0x8:
12249 case 0x9:
12250 case 0xa:
12251 case 0xb:
12252 rot = extract32(opcode, 0, 2);
12253 switch (size) {
12254 case 1:
12255 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
12256 gen_helper_gvec_fcmlah);
12257 break;
12258 case 2:
12259 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12260 gen_helper_gvec_fcmlas);
12261 break;
12262 case 3:
12263 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12264 gen_helper_gvec_fcmlad);
12265 break;
12266 default:
12267 g_assert_not_reached();
12268 }
12269 return;
12270
12271 case 0xc:
12272 case 0xe:
12273 rot = extract32(opcode, 1, 1);
12274 switch (size) {
12275 case 1:
12276 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12277 gen_helper_gvec_fcaddh);
12278 break;
12279 case 2:
12280 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12281 gen_helper_gvec_fcadds);
12282 break;
12283 case 3:
12284 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12285 gen_helper_gvec_fcaddd);
12286 break;
12287 default:
12288 g_assert_not_reached();
12289 }
12290 return;
12291
12292 case 0xd:
12293 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
12294 return;
12295 case 0xf:
12296 switch (size) {
12297 case 1:
12298 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
12299 break;
12300 case 3:
12301 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
12302 gen_helper_gvec_bfmlal);
12303 break;
12304 default:
12305 g_assert_not_reached();
12306 }
12307 return;
12308
12309 default:
12310 g_assert_not_reached();
12311 }
12312}
12313
12314static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
12315 int size, int rn, int rd)
12316{
12317
12318
12319
12320
12321 int pass;
12322
12323 if (size == 3) {
12324
12325 TCGv_i64 tcg_res[2];
12326 int srcelt = is_q ? 2 : 0;
12327
12328 for (pass = 0; pass < 2; pass++) {
12329 TCGv_i32 tcg_op = tcg_temp_new_i32();
12330 tcg_res[pass] = tcg_temp_new_i64();
12331
12332 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
12333 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
12334 tcg_temp_free_i32(tcg_op);
12335 }
12336 for (pass = 0; pass < 2; pass++) {
12337 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12338 tcg_temp_free_i64(tcg_res[pass]);
12339 }
12340 } else {
12341
12342 int srcelt = is_q ? 4 : 0;
12343 TCGv_i32 tcg_res[4];
12344 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
12345 TCGv_i32 ahp = get_ahp_flag();
12346
12347 for (pass = 0; pass < 4; pass++) {
12348 tcg_res[pass] = tcg_temp_new_i32();
12349
12350 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
12351 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
12352 fpst, ahp);
12353 }
12354 for (pass = 0; pass < 4; pass++) {
12355 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
12356 tcg_temp_free_i32(tcg_res[pass]);
12357 }
12358
12359 tcg_temp_free_ptr(fpst);
12360 tcg_temp_free_i32(ahp);
12361 }
12362}
12363
12364static void handle_rev(DisasContext *s, int opcode, bool u,
12365 bool is_q, int size, int rn, int rd)
12366{
12367 int op = (opcode << 1) | u;
12368 int opsz = op + size;
12369 int grp_size = 3 - opsz;
12370 int dsize = is_q ? 128 : 64;
12371 int i;
12372
12373 if (opsz >= 3) {
12374 unallocated_encoding(s);
12375 return;
12376 }
12377
12378 if (!fp_access_check(s)) {
12379 return;
12380 }
12381
12382 if (size == 0) {
12383
12384 int groups = dsize / (8 << grp_size);
12385
12386 for (i = 0; i < groups; i++) {
12387 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
12388
12389 read_vec_element(s, tcg_tmp, rn, i, grp_size);
12390 switch (grp_size) {
12391 case MO_16:
12392 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12393 break;
12394 case MO_32:
12395 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12396 break;
12397 case MO_64:
12398 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
12399 break;
12400 default:
12401 g_assert_not_reached();
12402 }
12403 write_vec_element(s, tcg_tmp, rd, i, grp_size);
12404 tcg_temp_free_i64(tcg_tmp);
12405 }
12406 clear_vec_high(s, is_q, rd);
12407 } else {
12408 int revmask = (1 << grp_size) - 1;
12409 int esize = 8 << size;
12410 int elements = dsize / esize;
12411 TCGv_i64 tcg_rn = tcg_temp_new_i64();
12412 TCGv_i64 tcg_rd = tcg_const_i64(0);
12413 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
12414
12415 for (i = 0; i < elements; i++) {
12416 int e_rev = (i & 0xf) ^ revmask;
12417 int off = e_rev * esize;
12418 read_vec_element(s, tcg_rn, rn, i, size);
12419 if (off >= 64) {
12420 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
12421 tcg_rn, off - 64, esize);
12422 } else {
12423 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
12424 }
12425 }
12426 write_vec_element(s, tcg_rd, rd, 0, MO_64);
12427 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
12428
12429 tcg_temp_free_i64(tcg_rd_hi);
12430 tcg_temp_free_i64(tcg_rd);
12431 tcg_temp_free_i64(tcg_rn);
12432 }
12433}
12434
12435static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
12436 bool is_q, int size, int rn, int rd)
12437{
12438
12439
12440
12441
12442
12443 bool accum = (opcode == 0x6);
12444 int maxpass = is_q ? 2 : 1;
12445 int pass;
12446 TCGv_i64 tcg_res[2];
12447
12448 if (size == 2) {
12449
12450 MemOp memop = size + (u ? 0 : MO_SIGN);
12451
12452 for (pass = 0; pass < maxpass; pass++) {
12453 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
12454 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
12455
12456 tcg_res[pass] = tcg_temp_new_i64();
12457
12458 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
12459 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
12460 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
12461 if (accum) {
12462 read_vec_element(s, tcg_op1, rd, pass, MO_64);
12463 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12464 }
12465
12466 tcg_temp_free_i64(tcg_op1);
12467 tcg_temp_free_i64(tcg_op2);
12468 }
12469 } else {
12470 for (pass = 0; pass < maxpass; pass++) {
12471 TCGv_i64 tcg_op = tcg_temp_new_i64();
12472 NeonGenOne64OpFn *genfn;
12473 static NeonGenOne64OpFn * const fns[2][2] = {
12474 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
12475 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
12476 };
12477
12478 genfn = fns[size][u];
12479
12480 tcg_res[pass] = tcg_temp_new_i64();
12481
12482 read_vec_element(s, tcg_op, rn, pass, MO_64);
12483 genfn(tcg_res[pass], tcg_op);
12484
12485 if (accum) {
12486 read_vec_element(s, tcg_op, rd, pass, MO_64);
12487 if (size == 0) {
12488 gen_helper_neon_addl_u16(tcg_res[pass],
12489 tcg_res[pass], tcg_op);
12490 } else {
12491 gen_helper_neon_addl_u32(tcg_res[pass],
12492 tcg_res[pass], tcg_op);
12493 }
12494 }
12495 tcg_temp_free_i64(tcg_op);
12496 }
12497 }
12498 if (!is_q) {
12499 tcg_res[1] = tcg_constant_i64(0);
12500 }
12501 for (pass = 0; pass < 2; pass++) {
12502 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12503 tcg_temp_free_i64(tcg_res[pass]);
12504 }
12505}
12506
12507static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
12508{
12509
12510 int pass;
12511 int part = is_q ? 2 : 0;
12512 TCGv_i64 tcg_res[2];
12513
12514 for (pass = 0; pass < 2; pass++) {
12515 static NeonGenWidenFn * const widenfns[3] = {
12516 gen_helper_neon_widen_u8,
12517 gen_helper_neon_widen_u16,
12518 tcg_gen_extu_i32_i64,
12519 };
12520 NeonGenWidenFn *widenfn = widenfns[size];
12521 TCGv_i32 tcg_op = tcg_temp_new_i32();
12522
12523 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
12524 tcg_res[pass] = tcg_temp_new_i64();
12525 widenfn(tcg_res[pass], tcg_op);
12526 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
12527
12528 tcg_temp_free_i32(tcg_op);
12529 }
12530
12531 for (pass = 0; pass < 2; pass++) {
12532 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12533 tcg_temp_free_i64(tcg_res[pass]);
12534 }
12535}
12536
12537
12538
12539
12540
12541
12542
12543static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12544{
12545 int size = extract32(insn, 22, 2);
12546 int opcode = extract32(insn, 12, 5);
12547 bool u = extract32(insn, 29, 1);
12548 bool is_q = extract32(insn, 30, 1);
12549 int rn = extract32(insn, 5, 5);
12550 int rd = extract32(insn, 0, 5);
12551 bool need_fpstatus = false;
12552 bool need_rmode = false;
12553 int rmode = -1;
12554 TCGv_i32 tcg_rmode;
12555 TCGv_ptr tcg_fpstatus;
12556
12557 switch (opcode) {
12558 case 0x0:
12559 case 0x1:
12560 handle_rev(s, opcode, u, is_q, size, rn, rd);
12561 return;
12562 case 0x5:
12563 if (u && size == 0) {
12564
12565 break;
12566 } else if (u && size == 1) {
12567
12568 break;
12569 } else if (!u && size == 0) {
12570
12571 break;
12572 }
12573 unallocated_encoding(s);
12574 return;
12575 case 0x12:
12576 case 0x14:
12577 if (size == 3) {
12578 unallocated_encoding(s);
12579 return;
12580 }
12581 if (!fp_access_check(s)) {
12582 return;
12583 }
12584
12585 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12586 return;
12587 case 0x4:
12588 if (size == 3) {
12589 unallocated_encoding(s);
12590 return;
12591 }
12592 break;
12593 case 0x2:
12594 case 0x6:
12595 if (size == 3) {
12596 unallocated_encoding(s);
12597 return;
12598 }
12599 if (!fp_access_check(s)) {
12600 return;
12601 }
12602 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12603 return;
12604 case 0x13:
12605 if (u == 0 || size == 3) {
12606 unallocated_encoding(s);
12607 return;
12608 }
12609 if (!fp_access_check(s)) {
12610 return;
12611 }
12612 handle_shll(s, is_q, size, rn, rd);
12613 return;
12614 case 0xa:
12615 if (u == 1) {
12616 unallocated_encoding(s);
12617 return;
12618 }
12619
12620 case 0x8:
12621 case 0x9:
12622 case 0xb:
12623 if (size == 3 && !is_q) {
12624 unallocated_encoding(s);
12625 return;
12626 }
12627 break;
12628 case 0x3:
12629 if (size == 3 && !is_q) {
12630 unallocated_encoding(s);
12631 return;
12632 }
12633 if (!fp_access_check(s)) {
12634 return;
12635 }
12636 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12637 return;
12638 case 0x7:
12639 if (size == 3 && !is_q) {
12640 unallocated_encoding(s);
12641 return;
12642 }
12643 break;
12644 case 0xc ... 0xf:
12645 case 0x16 ... 0x1f:
12646 {
12647
12648
12649
12650 int is_double = extract32(size, 0, 1);
12651 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12652 size = is_double ? 3 : 2;
12653 switch (opcode) {
12654 case 0x2f:
12655 case 0x6f:
12656 if (size == 3 && !is_q) {
12657 unallocated_encoding(s);
12658 return;
12659 }
12660 break;
12661 case 0x1d:
12662 case 0x5d:
12663 {
12664 bool is_signed = (opcode == 0x1d) ? true : false;
12665 int elements = is_double ? 2 : is_q ? 4 : 2;
12666 if (is_double && !is_q) {
12667 unallocated_encoding(s);
12668 return;
12669 }
12670 if (!fp_access_check(s)) {
12671 return;
12672 }
12673 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12674 return;
12675 }
12676 case 0x2c:
12677 case 0x2d:
12678 case 0x2e:
12679 case 0x6c:
12680 case 0x6d:
12681 if (size == 3 && !is_q) {
12682 unallocated_encoding(s);
12683 return;
12684 }
12685 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12686 return;
12687 case 0x7f:
12688 if (size == 3 && !is_q) {
12689 unallocated_encoding(s);
12690 return;
12691 }
12692 break;
12693 case 0x1a:
12694 case 0x1b:
12695 case 0x3a:
12696 case 0x3b:
12697 case 0x5a:
12698 case 0x5b:
12699 case 0x7a:
12700 case 0x7b:
12701 need_fpstatus = true;
12702 need_rmode = true;
12703 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12704 if (size == 3 && !is_q) {
12705 unallocated_encoding(s);
12706 return;
12707 }
12708 break;
12709 case 0x5c:
12710 case 0x1c:
12711 need_fpstatus = true;
12712 need_rmode = true;
12713 rmode = FPROUNDING_TIEAWAY;
12714 if (size == 3 && !is_q) {
12715 unallocated_encoding(s);
12716 return;
12717 }
12718 break;
12719 case 0x3c:
12720 if (size == 3) {
12721 unallocated_encoding(s);
12722 return;
12723 }
12724
12725 case 0x3d:
12726 case 0x7d:
12727 if (size == 3 && !is_q) {
12728 unallocated_encoding(s);
12729 return;
12730 }
12731 if (!fp_access_check(s)) {
12732 return;
12733 }
12734 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12735 return;
12736 case 0x56:
12737 if (size == 2) {
12738 unallocated_encoding(s);
12739 return;
12740 }
12741
12742 case 0x16:
12743
12744
12745
12746 if (!fp_access_check(s)) {
12747 return;
12748 }
12749 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12750 return;
12751 case 0x36:
12752 if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
12753 unallocated_encoding(s);
12754 return;
12755 }
12756 if (!fp_access_check(s)) {
12757 return;
12758 }
12759 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12760 return;
12761 case 0x17:
12762 if (!fp_access_check(s)) {
12763 return;
12764 }
12765 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12766 return;
12767 case 0x18:
12768 case 0x19:
12769 case 0x38:
12770 case 0x39:
12771 need_rmode = true;
12772 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12773
12774 case 0x59:
12775 case 0x79:
12776 need_fpstatus = true;
12777 if (size == 3 && !is_q) {
12778 unallocated_encoding(s);
12779 return;
12780 }
12781 break;
12782 case 0x58:
12783 need_rmode = true;
12784 rmode = FPROUNDING_TIEAWAY;
12785 need_fpstatus = true;
12786 if (size == 3 && !is_q) {
12787 unallocated_encoding(s);
12788 return;
12789 }
12790 break;
12791 case 0x7c:
12792 if (size == 3) {
12793 unallocated_encoding(s);
12794 return;
12795 }
12796 break;
12797 case 0x1e:
12798 case 0x1f:
12799 need_rmode = true;
12800 rmode = FPROUNDING_ZERO;
12801
12802 case 0x5e:
12803 case 0x5f:
12804 need_fpstatus = true;
12805 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12806 unallocated_encoding(s);
12807 return;
12808 }
12809 break;
12810 default:
12811 unallocated_encoding(s);
12812 return;
12813 }
12814 break;
12815 }
12816 default:
12817 unallocated_encoding(s);
12818 return;
12819 }
12820
12821 if (!fp_access_check(s)) {
12822 return;
12823 }
12824
12825 if (need_fpstatus || need_rmode) {
12826 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12827 } else {
12828 tcg_fpstatus = NULL;
12829 }
12830 if (need_rmode) {
12831 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12832 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12833 } else {
12834 tcg_rmode = NULL;
12835 }
12836
12837 switch (opcode) {
12838 case 0x5:
12839 if (u && size == 0) {
12840 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12841 return;
12842 }
12843 break;
12844 case 0x8:
12845 if (u) {
12846 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12847 } else {
12848 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12849 }
12850 return;
12851 case 0x9:
12852 if (u) {
12853 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12854 } else {
12855 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12856 }
12857 return;
12858 case 0xa:
12859 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12860 return;
12861 case 0xb:
12862 if (u) {
12863 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12864 } else {
12865 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12866 }
12867 return;
12868 }
12869
12870 if (size == 3) {
12871
12872 int pass;
12873
12874
12875
12876
12877 tcg_debug_assert(is_q);
12878 for (pass = 0; pass < 2; pass++) {
12879 TCGv_i64 tcg_op = tcg_temp_new_i64();
12880 TCGv_i64 tcg_res = tcg_temp_new_i64();
12881
12882 read_vec_element(s, tcg_op, rn, pass, MO_64);
12883
12884 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12885 tcg_rmode, tcg_fpstatus);
12886
12887 write_vec_element(s, tcg_res, rd, pass, MO_64);
12888
12889 tcg_temp_free_i64(tcg_res);
12890 tcg_temp_free_i64(tcg_op);
12891 }
12892 } else {
12893 int pass;
12894
12895 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12896 TCGv_i32 tcg_op = tcg_temp_new_i32();
12897 TCGv_i32 tcg_res = tcg_temp_new_i32();
12898
12899 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12900
12901 if (size == 2) {
12902
12903 switch (opcode) {
12904 case 0x4:
12905 if (u) {
12906 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12907 } else {
12908 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12909 }
12910 break;
12911 case 0x7:
12912 if (u) {
12913 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12914 } else {
12915 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12916 }
12917 break;
12918 case 0x2f:
12919 gen_helper_vfp_abss(tcg_res, tcg_op);
12920 break;
12921 case 0x6f:
12922 gen_helper_vfp_negs(tcg_res, tcg_op);
12923 break;
12924 case 0x7f:
12925 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12926 break;
12927 case 0x1a:
12928 case 0x1b:
12929 case 0x1c:
12930 case 0x3a:
12931 case 0x3b:
12932 gen_helper_vfp_tosls(tcg_res, tcg_op,
12933 tcg_constant_i32(0), tcg_fpstatus);
12934 break;
12935 case 0x5a:
12936 case 0x5b:
12937 case 0x5c:
12938 case 0x7a:
12939 case 0x7b:
12940 gen_helper_vfp_touls(tcg_res, tcg_op,
12941 tcg_constant_i32(0), tcg_fpstatus);
12942 break;
12943 case 0x18:
12944 case 0x19:
12945 case 0x38:
12946 case 0x39:
12947 case 0x58:
12948 case 0x79:
12949 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12950 break;
12951 case 0x59:
12952 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12953 break;
12954 case 0x7c:
12955 gen_helper_rsqrte_u32(tcg_res, tcg_op);
12956 break;
12957 case 0x1e:
12958 case 0x5e:
12959 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12960 break;
12961 case 0x1f:
12962 case 0x5f:
12963 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12964 break;
12965 default:
12966 g_assert_not_reached();
12967 }
12968 } else {
12969
12970 switch (opcode) {
12971 case 0x5:
12972
12973
12974
12975 if (u) {
12976 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12977 } else {
12978 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12979 }
12980 break;
12981 case 0x7:
12982 {
12983 NeonGenOneOpEnvFn *genfn;
12984 static NeonGenOneOpEnvFn * const fns[2][2] = {
12985 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12986 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12987 };
12988 genfn = fns[size][u];
12989 genfn(tcg_res, cpu_env, tcg_op);
12990 break;
12991 }
12992 case 0x4:
12993 if (u) {
12994 if (size == 0) {
12995 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12996 } else {
12997 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12998 }
12999 } else {
13000 if (size == 0) {
13001 gen_helper_neon_cls_s8(tcg_res, tcg_op);
13002 } else {
13003 gen_helper_neon_cls_s16(tcg_res, tcg_op);
13004 }
13005 }
13006 break;
13007 default:
13008 g_assert_not_reached();
13009 }
13010 }
13011
13012 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13013
13014 tcg_temp_free_i32(tcg_res);
13015 tcg_temp_free_i32(tcg_op);
13016 }
13017 }
13018 clear_vec_high(s, is_q, rd);
13019
13020 if (need_rmode) {
13021 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13022 tcg_temp_free_i32(tcg_rmode);
13023 }
13024 if (need_fpstatus) {
13025 tcg_temp_free_ptr(tcg_fpstatus);
13026 }
13027}
13028
13029
13030
13031
13032
13033
13034
13035
13036
13037
13038
13039
13040
13041
13042
13043static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
13044{
13045 int fpop, opcode, a, u;
13046 int rn, rd;
13047 bool is_q;
13048 bool is_scalar;
13049 bool only_in_vector = false;
13050
13051 int pass;
13052 TCGv_i32 tcg_rmode = NULL;
13053 TCGv_ptr tcg_fpstatus = NULL;
13054 bool need_rmode = false;
13055 bool need_fpst = true;
13056 int rmode;
13057
13058 if (!dc_isar_feature(aa64_fp16, s)) {
13059 unallocated_encoding(s);
13060 return;
13061 }
13062
13063 rd = extract32(insn, 0, 5);
13064 rn = extract32(insn, 5, 5);
13065
13066 a = extract32(insn, 23, 1);
13067 u = extract32(insn, 29, 1);
13068 is_scalar = extract32(insn, 28, 1);
13069 is_q = extract32(insn, 30, 1);
13070
13071 opcode = extract32(insn, 12, 5);
13072 fpop = deposit32(opcode, 5, 1, a);
13073 fpop = deposit32(fpop, 6, 1, u);
13074
13075 switch (fpop) {
13076 case 0x1d:
13077 case 0x5d:
13078 {
13079 int elements;
13080
13081 if (is_scalar) {
13082 elements = 1;
13083 } else {
13084 elements = (is_q ? 8 : 4);
13085 }
13086
13087 if (!fp_access_check(s)) {
13088 return;
13089 }
13090 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
13091 return;
13092 }
13093 break;
13094 case 0x2c:
13095 case 0x2d:
13096 case 0x2e:
13097 case 0x6c:
13098 case 0x6d:
13099 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
13100 return;
13101 case 0x3d:
13102 case 0x3f:
13103 break;
13104 case 0x18:
13105 need_rmode = true;
13106 only_in_vector = true;
13107 rmode = FPROUNDING_TIEEVEN;
13108 break;
13109 case 0x19:
13110 need_rmode = true;
13111 only_in_vector = true;
13112 rmode = FPROUNDING_NEGINF;
13113 break;
13114 case 0x38:
13115 need_rmode = true;
13116 only_in_vector = true;
13117 rmode = FPROUNDING_POSINF;
13118 break;
13119 case 0x39:
13120 need_rmode = true;
13121 only_in_vector = true;
13122 rmode = FPROUNDING_ZERO;
13123 break;
13124 case 0x58:
13125 need_rmode = true;
13126 only_in_vector = true;
13127 rmode = FPROUNDING_TIEAWAY;
13128 break;
13129 case 0x59:
13130 case 0x79:
13131 only_in_vector = true;
13132
13133 break;
13134 case 0x1a:
13135 need_rmode = true;
13136 rmode = FPROUNDING_TIEEVEN;
13137 break;
13138 case 0x1b:
13139 need_rmode = true;
13140 rmode = FPROUNDING_NEGINF;
13141 break;
13142 case 0x1c:
13143 need_rmode = true;
13144 rmode = FPROUNDING_TIEAWAY;
13145 break;
13146 case 0x3a:
13147 need_rmode = true;
13148 rmode = FPROUNDING_POSINF;
13149 break;
13150 case 0x3b:
13151 need_rmode = true;
13152 rmode = FPROUNDING_ZERO;
13153 break;
13154 case 0x5a:
13155 need_rmode = true;
13156 rmode = FPROUNDING_TIEEVEN;
13157 break;
13158 case 0x5b:
13159 need_rmode = true;
13160 rmode = FPROUNDING_NEGINF;
13161 break;
13162 case 0x5c:
13163 need_rmode = true;
13164 rmode = FPROUNDING_TIEAWAY;
13165 break;
13166 case 0x7a:
13167 need_rmode = true;
13168 rmode = FPROUNDING_POSINF;
13169 break;
13170 case 0x7b:
13171 need_rmode = true;
13172 rmode = FPROUNDING_ZERO;
13173 break;
13174 case 0x2f:
13175 case 0x6f:
13176 need_fpst = false;
13177 break;
13178 case 0x7d:
13179 case 0x7f:
13180 break;
13181 default:
13182 unallocated_encoding(s);
13183 return;
13184 }
13185
13186
13187
13188 if (is_scalar) {
13189 if (!is_q) {
13190 unallocated_encoding(s);
13191 return;
13192 }
13193
13194 if (only_in_vector) {
13195 unallocated_encoding(s);
13196 return;
13197 }
13198 }
13199
13200 if (!fp_access_check(s)) {
13201 return;
13202 }
13203
13204 if (need_rmode || need_fpst) {
13205 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
13206 }
13207
13208 if (need_rmode) {
13209 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
13210 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13211 }
13212
13213 if (is_scalar) {
13214 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
13215 TCGv_i32 tcg_res = tcg_temp_new_i32();
13216
13217 switch (fpop) {
13218 case 0x1a:
13219 case 0x1b:
13220 case 0x1c:
13221 case 0x3a:
13222 case 0x3b:
13223 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13224 break;
13225 case 0x3d:
13226 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13227 break;
13228 case 0x3f:
13229 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
13230 break;
13231 case 0x5a:
13232 case 0x5b:
13233 case 0x5c:
13234 case 0x7a:
13235 case 0x7b:
13236 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13237 break;
13238 case 0x6f:
13239 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13240 break;
13241 case 0x7d:
13242 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13243 break;
13244 default:
13245 g_assert_not_reached();
13246 }
13247
13248
13249 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
13250 write_fp_sreg(s, rd, tcg_res);
13251
13252 tcg_temp_free_i32(tcg_res);
13253 tcg_temp_free_i32(tcg_op);
13254 } else {
13255 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
13256 TCGv_i32 tcg_op = tcg_temp_new_i32();
13257 TCGv_i32 tcg_res = tcg_temp_new_i32();
13258
13259 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
13260
13261 switch (fpop) {
13262 case 0x1a:
13263 case 0x1b:
13264 case 0x1c:
13265 case 0x3a:
13266 case 0x3b:
13267 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13268 break;
13269 case 0x3d:
13270 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13271 break;
13272 case 0x5a:
13273 case 0x5b:
13274 case 0x5c:
13275 case 0x7a:
13276 case 0x7b:
13277 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13278 break;
13279 case 0x18:
13280 case 0x19:
13281 case 0x38:
13282 case 0x39:
13283 case 0x58:
13284 case 0x79:
13285 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
13286 break;
13287 case 0x59:
13288 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
13289 break;
13290 case 0x2f:
13291 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
13292 break;
13293 case 0x6f:
13294 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13295 break;
13296 case 0x7d:
13297 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13298 break;
13299 case 0x7f:
13300 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
13301 break;
13302 default:
13303 g_assert_not_reached();
13304 }
13305
13306 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
13307
13308 tcg_temp_free_i32(tcg_res);
13309 tcg_temp_free_i32(tcg_op);
13310 }
13311
13312 clear_vec_high(s, is_q, rd);
13313 }
13314
13315 if (tcg_rmode) {
13316 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13317 tcg_temp_free_i32(tcg_rmode);
13318 }
13319
13320 if (tcg_fpstatus) {
13321 tcg_temp_free_ptr(tcg_fpstatus);
13322 }
13323}
13324
13325
13326
13327
13328
13329
13330
13331
13332
13333
13334
13335
13336static void disas_simd_indexed(DisasContext *s, uint32_t insn)
13337{
13338
13339
13340
13341
13342
13343
13344
13345 bool is_scalar = extract32(insn, 28, 1);
13346 bool is_q = extract32(insn, 30, 1);
13347 bool u = extract32(insn, 29, 1);
13348 int size = extract32(insn, 22, 2);
13349 int l = extract32(insn, 21, 1);
13350 int m = extract32(insn, 20, 1);
13351
13352 int rm = extract32(insn, 16, 4);
13353 int opcode = extract32(insn, 12, 4);
13354 int h = extract32(insn, 11, 1);
13355 int rn = extract32(insn, 5, 5);
13356 int rd = extract32(insn, 0, 5);
13357 bool is_long = false;
13358 int is_fp = 0;
13359 bool is_fp16 = false;
13360 int index;
13361 TCGv_ptr fpst;
13362
13363 switch (16 * u + opcode) {
13364 case 0x08:
13365 case 0x10:
13366 case 0x14:
13367 if (is_scalar) {
13368 unallocated_encoding(s);
13369 return;
13370 }
13371 break;
13372 case 0x02:
13373 case 0x12:
13374 case 0x06:
13375 case 0x16:
13376 case 0x0a:
13377 case 0x1a:
13378 if (is_scalar) {
13379 unallocated_encoding(s);
13380 return;
13381 }
13382 is_long = true;
13383 break;
13384 case 0x03:
13385 case 0x07:
13386 case 0x0b:
13387 is_long = true;
13388 break;
13389 case 0x0c:
13390 case 0x0d:
13391 break;
13392 case 0x01:
13393 case 0x05:
13394 case 0x09:
13395 case 0x19:
13396 is_fp = 1;
13397 break;
13398 case 0x1d:
13399 case 0x1f:
13400 if (!dc_isar_feature(aa64_rdm, s)) {
13401 unallocated_encoding(s);
13402 return;
13403 }
13404 break;
13405 case 0x0e:
13406 case 0x1e:
13407 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
13408 unallocated_encoding(s);
13409 return;
13410 }
13411 break;
13412 case 0x0f:
13413 switch (size) {
13414 case 0:
13415 case 2:
13416 if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
13417 unallocated_encoding(s);
13418 return;
13419 }
13420 size = MO_32;
13421 break;
13422 case 1:
13423 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13424 unallocated_encoding(s);
13425 return;
13426 }
13427 size = MO_32;
13428 break;
13429 case 3:
13430 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13431 unallocated_encoding(s);
13432 return;
13433 }
13434
13435 size = MO_16;
13436 break;
13437 default:
13438 unallocated_encoding(s);
13439 return;
13440 }
13441 break;
13442 case 0x11:
13443 case 0x13:
13444 case 0x15:
13445 case 0x17:
13446 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
13447 unallocated_encoding(s);
13448 return;
13449 }
13450 is_fp = 2;
13451 break;
13452 case 0x00:
13453 case 0x04:
13454 case 0x18:
13455 case 0x1c:
13456 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
13457 unallocated_encoding(s);
13458 return;
13459 }
13460 size = MO_16;
13461
13462 break;
13463 default:
13464 unallocated_encoding(s);
13465 return;
13466 }
13467
13468 switch (is_fp) {
13469 case 1:
13470
13471 switch (size) {
13472 case 0:
13473 size = MO_16;
13474 is_fp16 = true;
13475 break;
13476 case MO_32:
13477 case MO_64:
13478 break;
13479 default:
13480 unallocated_encoding(s);
13481 return;
13482 }
13483 break;
13484
13485 case 2:
13486
13487 size += 1;
13488 switch (size) {
13489 case MO_32:
13490 if (h && !is_q) {
13491 unallocated_encoding(s);
13492 return;
13493 }
13494 is_fp16 = true;
13495 break;
13496 case MO_64:
13497 break;
13498 default:
13499 unallocated_encoding(s);
13500 return;
13501 }
13502 break;
13503
13504 default:
13505 switch (size) {
13506 case MO_8:
13507 case MO_64:
13508 unallocated_encoding(s);
13509 return;
13510 }
13511 break;
13512 }
13513 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
13514 unallocated_encoding(s);
13515 return;
13516 }
13517
13518
13519 switch (size) {
13520 case MO_16:
13521 index = h << 2 | l << 1 | m;
13522 break;
13523 case MO_32:
13524 index = h << 1 | l;
13525 rm |= m << 4;
13526 break;
13527 case MO_64:
13528 if (l || !is_q) {
13529 unallocated_encoding(s);
13530 return;
13531 }
13532 index = h;
13533 rm |= m << 4;
13534 break;
13535 default:
13536 g_assert_not_reached();
13537 }
13538
13539 if (!fp_access_check(s)) {
13540 return;
13541 }
13542
13543 if (is_fp) {
13544 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
13545 } else {
13546 fpst = NULL;
13547 }
13548
13549 switch (16 * u + opcode) {
13550 case 0x0e:
13551 case 0x1e:
13552 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13553 u ? gen_helper_gvec_udot_idx_b
13554 : gen_helper_gvec_sdot_idx_b);
13555 return;
13556 case 0x0f:
13557 switch (extract32(insn, 22, 2)) {
13558 case 0:
13559 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13560 gen_helper_gvec_sudot_idx_b);
13561 return;
13562 case 1:
13563 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13564 gen_helper_gvec_bfdot_idx);
13565 return;
13566 case 2:
13567 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13568 gen_helper_gvec_usdot_idx_b);
13569 return;
13570 case 3:
13571 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
13572 gen_helper_gvec_bfmlal_idx);
13573 return;
13574 }
13575 g_assert_not_reached();
13576 case 0x11:
13577 case 0x13:
13578 case 0x15:
13579 case 0x17:
13580 {
13581 int rot = extract32(insn, 13, 2);
13582 int data = (index << 2) | rot;
13583 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
13584 vec_full_reg_offset(s, rn),
13585 vec_full_reg_offset(s, rm),
13586 vec_full_reg_offset(s, rd), fpst,
13587 is_q ? 16 : 8, vec_full_reg_size(s), data,
13588 size == MO_64
13589 ? gen_helper_gvec_fcmlas_idx
13590 : gen_helper_gvec_fcmlah_idx);
13591 tcg_temp_free_ptr(fpst);
13592 }
13593 return;
13594
13595 case 0x00:
13596 case 0x04:
13597 case 0x18:
13598 case 0x1c:
13599 {
13600 int is_s = extract32(opcode, 2, 1);
13601 int is_2 = u;
13602 int data = (index << 2) | (is_2 << 1) | is_s;
13603 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13604 vec_full_reg_offset(s, rn),
13605 vec_full_reg_offset(s, rm), cpu_env,
13606 is_q ? 16 : 8, vec_full_reg_size(s),
13607 data, gen_helper_gvec_fmlal_idx_a64);
13608 }
13609 return;
13610
13611 case 0x08:
13612 if (!is_long && !is_scalar) {
13613 static gen_helper_gvec_3 * const fns[3] = {
13614 gen_helper_gvec_mul_idx_h,
13615 gen_helper_gvec_mul_idx_s,
13616 gen_helper_gvec_mul_idx_d,
13617 };
13618 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
13619 vec_full_reg_offset(s, rn),
13620 vec_full_reg_offset(s, rm),
13621 is_q ? 16 : 8, vec_full_reg_size(s),
13622 index, fns[size - 1]);
13623 return;
13624 }
13625 break;
13626
13627 case 0x10:
13628 if (!is_long && !is_scalar) {
13629 static gen_helper_gvec_4 * const fns[3] = {
13630 gen_helper_gvec_mla_idx_h,
13631 gen_helper_gvec_mla_idx_s,
13632 gen_helper_gvec_mla_idx_d,
13633 };
13634 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13635 vec_full_reg_offset(s, rn),
13636 vec_full_reg_offset(s, rm),
13637 vec_full_reg_offset(s, rd),
13638 is_q ? 16 : 8, vec_full_reg_size(s),
13639 index, fns[size - 1]);
13640 return;
13641 }
13642 break;
13643
13644 case 0x14:
13645 if (!is_long && !is_scalar) {
13646 static gen_helper_gvec_4 * const fns[3] = {
13647 gen_helper_gvec_mls_idx_h,
13648 gen_helper_gvec_mls_idx_s,
13649 gen_helper_gvec_mls_idx_d,
13650 };
13651 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13652 vec_full_reg_offset(s, rn),
13653 vec_full_reg_offset(s, rm),
13654 vec_full_reg_offset(s, rd),
13655 is_q ? 16 : 8, vec_full_reg_size(s),
13656 index, fns[size - 1]);
13657 return;
13658 }
13659 break;
13660 }
13661
13662 if (size == 3) {
13663 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13664 int pass;
13665
13666 assert(is_fp && is_q && !is_long);
13667
13668 read_vec_element(s, tcg_idx, rm, index, MO_64);
13669
13670 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13671 TCGv_i64 tcg_op = tcg_temp_new_i64();
13672 TCGv_i64 tcg_res = tcg_temp_new_i64();
13673
13674 read_vec_element(s, tcg_op, rn, pass, MO_64);
13675
13676 switch (16 * u + opcode) {
13677 case 0x05:
13678
13679 gen_helper_vfp_negd(tcg_op, tcg_op);
13680
13681 case 0x01:
13682 read_vec_element(s, tcg_res, rd, pass, MO_64);
13683 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13684 break;
13685 case 0x09:
13686 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13687 break;
13688 case 0x19:
13689 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13690 break;
13691 default:
13692 g_assert_not_reached();
13693 }
13694
13695 write_vec_element(s, tcg_res, rd, pass, MO_64);
13696 tcg_temp_free_i64(tcg_op);
13697 tcg_temp_free_i64(tcg_res);
13698 }
13699
13700 tcg_temp_free_i64(tcg_idx);
13701 clear_vec_high(s, !is_scalar, rd);
13702 } else if (!is_long) {
13703
13704
13705
13706
13707 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13708 int pass, maxpasses;
13709
13710 if (is_scalar) {
13711 maxpasses = 1;
13712 } else {
13713 maxpasses = is_q ? 4 : 2;
13714 }
13715
13716 read_vec_element_i32(s, tcg_idx, rm, index, size);
13717
13718 if (size == 1 && !is_scalar) {
13719
13720
13721
13722
13723 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13724 }
13725
13726 for (pass = 0; pass < maxpasses; pass++) {
13727 TCGv_i32 tcg_op = tcg_temp_new_i32();
13728 TCGv_i32 tcg_res = tcg_temp_new_i32();
13729
13730 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13731
13732 switch (16 * u + opcode) {
13733 case 0x08:
13734 case 0x10:
13735 case 0x14:
13736 {
13737 static NeonGenTwoOpFn * const fns[2][2] = {
13738 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13739 { tcg_gen_add_i32, tcg_gen_sub_i32 },
13740 };
13741 NeonGenTwoOpFn *genfn;
13742 bool is_sub = opcode == 0x4;
13743
13744 if (size == 1) {
13745 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13746 } else {
13747 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13748 }
13749 if (opcode == 0x8) {
13750 break;
13751 }
13752 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13753 genfn = fns[size - 1][is_sub];
13754 genfn(tcg_res, tcg_op, tcg_res);
13755 break;
13756 }
13757 case 0x05:
13758 case 0x01:
13759 read_vec_element_i32(s, tcg_res, rd, pass,
13760 is_scalar ? size : MO_32);
13761 switch (size) {
13762 case 1:
13763 if (opcode == 0x5) {
13764
13765
13766 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13767 }
13768 if (is_scalar) {
13769 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13770 tcg_res, fpst);
13771 } else {
13772 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13773 tcg_res, fpst);
13774 }
13775 break;
13776 case 2:
13777 if (opcode == 0x5) {
13778
13779
13780 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13781 }
13782 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13783 tcg_res, fpst);
13784 break;
13785 default:
13786 g_assert_not_reached();
13787 }
13788 break;
13789 case 0x09:
13790 switch (size) {
13791 case 1:
13792 if (is_scalar) {
13793 gen_helper_advsimd_mulh(tcg_res, tcg_op,
13794 tcg_idx, fpst);
13795 } else {
13796 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13797 tcg_idx, fpst);
13798 }
13799 break;
13800 case 2:
13801 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13802 break;
13803 default:
13804 g_assert_not_reached();
13805 }
13806 break;
13807 case 0x19:
13808 switch (size) {
13809 case 1:
13810 if (is_scalar) {
13811 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13812 tcg_idx, fpst);
13813 } else {
13814 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13815 tcg_idx, fpst);
13816 }
13817 break;
13818 case 2:
13819 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13820 break;
13821 default:
13822 g_assert_not_reached();
13823 }
13824 break;
13825 case 0x0c:
13826 if (size == 1) {
13827 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13828 tcg_op, tcg_idx);
13829 } else {
13830 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13831 tcg_op, tcg_idx);
13832 }
13833 break;
13834 case 0x0d:
13835 if (size == 1) {
13836 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13837 tcg_op, tcg_idx);
13838 } else {
13839 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13840 tcg_op, tcg_idx);
13841 }
13842 break;
13843 case 0x1d:
13844 read_vec_element_i32(s, tcg_res, rd, pass,
13845 is_scalar ? size : MO_32);
13846 if (size == 1) {
13847 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13848 tcg_op, tcg_idx, tcg_res);
13849 } else {
13850 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13851 tcg_op, tcg_idx, tcg_res);
13852 }
13853 break;
13854 case 0x1f:
13855 read_vec_element_i32(s, tcg_res, rd, pass,
13856 is_scalar ? size : MO_32);
13857 if (size == 1) {
13858 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13859 tcg_op, tcg_idx, tcg_res);
13860 } else {
13861 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13862 tcg_op, tcg_idx, tcg_res);
13863 }
13864 break;
13865 default:
13866 g_assert_not_reached();
13867 }
13868
13869 if (is_scalar) {
13870 write_fp_sreg(s, rd, tcg_res);
13871 } else {
13872 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13873 }
13874
13875 tcg_temp_free_i32(tcg_op);
13876 tcg_temp_free_i32(tcg_res);
13877 }
13878
13879 tcg_temp_free_i32(tcg_idx);
13880 clear_vec_high(s, is_q, rd);
13881 } else {
13882
13883 TCGv_i64 tcg_res[2];
13884 int pass;
13885 bool satop = extract32(opcode, 0, 1);
13886 MemOp memop = MO_32;
13887
13888 if (satop || !u) {
13889 memop |= MO_SIGN;
13890 }
13891
13892 if (size == 2) {
13893 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13894
13895 read_vec_element(s, tcg_idx, rm, index, memop);
13896
13897 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13898 TCGv_i64 tcg_op = tcg_temp_new_i64();
13899 TCGv_i64 tcg_passres;
13900 int passelt;
13901
13902 if (is_scalar) {
13903 passelt = 0;
13904 } else {
13905 passelt = pass + (is_q * 2);
13906 }
13907
13908 read_vec_element(s, tcg_op, rn, passelt, memop);
13909
13910 tcg_res[pass] = tcg_temp_new_i64();
13911
13912 if (opcode == 0xa || opcode == 0xb) {
13913
13914 tcg_passres = tcg_res[pass];
13915 } else {
13916 tcg_passres = tcg_temp_new_i64();
13917 }
13918
13919 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13920 tcg_temp_free_i64(tcg_op);
13921
13922 if (satop) {
13923
13924 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13925 tcg_passres, tcg_passres);
13926 }
13927
13928 if (opcode == 0xa || opcode == 0xb) {
13929 continue;
13930 }
13931
13932
13933 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13934
13935 switch (opcode) {
13936 case 0x2:
13937 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13938 break;
13939 case 0x6:
13940 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13941 break;
13942 case 0x7:
13943 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13944
13945 case 0x3:
13946 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13947 tcg_res[pass],
13948 tcg_passres);
13949 break;
13950 default:
13951 g_assert_not_reached();
13952 }
13953 tcg_temp_free_i64(tcg_passres);
13954 }
13955 tcg_temp_free_i64(tcg_idx);
13956
13957 clear_vec_high(s, !is_scalar, rd);
13958 } else {
13959 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13960
13961 assert(size == 1);
13962 read_vec_element_i32(s, tcg_idx, rm, index, size);
13963
13964 if (!is_scalar) {
13965
13966
13967
13968
13969 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13970 }
13971
13972 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13973 TCGv_i32 tcg_op = tcg_temp_new_i32();
13974 TCGv_i64 tcg_passres;
13975
13976 if (is_scalar) {
13977 read_vec_element_i32(s, tcg_op, rn, pass, size);
13978 } else {
13979 read_vec_element_i32(s, tcg_op, rn,
13980 pass + (is_q * 2), MO_32);
13981 }
13982
13983 tcg_res[pass] = tcg_temp_new_i64();
13984
13985 if (opcode == 0xa || opcode == 0xb) {
13986
13987 tcg_passres = tcg_res[pass];
13988 } else {
13989 tcg_passres = tcg_temp_new_i64();
13990 }
13991
13992 if (memop & MO_SIGN) {
13993 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13994 } else {
13995 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13996 }
13997 if (satop) {
13998 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13999 tcg_passres, tcg_passres);
14000 }
14001 tcg_temp_free_i32(tcg_op);
14002
14003 if (opcode == 0xa || opcode == 0xb) {
14004 continue;
14005 }
14006
14007
14008 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
14009
14010 switch (opcode) {
14011 case 0x2:
14012 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
14013 tcg_passres);
14014 break;
14015 case 0x6:
14016 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
14017 tcg_passres);
14018 break;
14019 case 0x7:
14020 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
14021
14022 case 0x3:
14023 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
14024 tcg_res[pass],
14025 tcg_passres);
14026 break;
14027 default:
14028 g_assert_not_reached();
14029 }
14030 tcg_temp_free_i64(tcg_passres);
14031 }
14032 tcg_temp_free_i32(tcg_idx);
14033
14034 if (is_scalar) {
14035 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
14036 }
14037 }
14038
14039 if (is_scalar) {
14040 tcg_res[1] = tcg_constant_i64(0);
14041 }
14042
14043 for (pass = 0; pass < 2; pass++) {
14044 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
14045 tcg_temp_free_i64(tcg_res[pass]);
14046 }
14047 }
14048
14049 if (fpst) {
14050 tcg_temp_free_ptr(fpst);
14051 }
14052}
14053
14054
14055
14056
14057
14058
14059
14060static void disas_crypto_aes(DisasContext *s, uint32_t insn)
14061{
14062 int size = extract32(insn, 22, 2);
14063 int opcode = extract32(insn, 12, 5);
14064 int rn = extract32(insn, 5, 5);
14065 int rd = extract32(insn, 0, 5);
14066 int decrypt;
14067 gen_helper_gvec_2 *genfn2 = NULL;
14068 gen_helper_gvec_3 *genfn3 = NULL;
14069
14070 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
14071 unallocated_encoding(s);
14072 return;
14073 }
14074
14075 switch (opcode) {
14076 case 0x4:
14077 decrypt = 0;
14078 genfn3 = gen_helper_crypto_aese;
14079 break;
14080 case 0x6:
14081 decrypt = 0;
14082 genfn2 = gen_helper_crypto_aesmc;
14083 break;
14084 case 0x5:
14085 decrypt = 1;
14086 genfn3 = gen_helper_crypto_aese;
14087 break;
14088 case 0x7:
14089 decrypt = 1;
14090 genfn2 = gen_helper_crypto_aesmc;
14091 break;
14092 default:
14093 unallocated_encoding(s);
14094 return;
14095 }
14096
14097 if (!fp_access_check(s)) {
14098 return;
14099 }
14100 if (genfn2) {
14101 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
14102 } else {
14103 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
14104 }
14105}
14106
14107
14108
14109
14110
14111
14112
14113static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
14114{
14115 int size = extract32(insn, 22, 2);
14116 int opcode = extract32(insn, 12, 3);
14117 int rm = extract32(insn, 16, 5);
14118 int rn = extract32(insn, 5, 5);
14119 int rd = extract32(insn, 0, 5);
14120 gen_helper_gvec_3 *genfn;
14121 bool feature;
14122
14123 if (size != 0) {
14124 unallocated_encoding(s);
14125 return;
14126 }
14127
14128 switch (opcode) {
14129 case 0:
14130 genfn = gen_helper_crypto_sha1c;
14131 feature = dc_isar_feature(aa64_sha1, s);
14132 break;
14133 case 1:
14134 genfn = gen_helper_crypto_sha1p;
14135 feature = dc_isar_feature(aa64_sha1, s);
14136 break;
14137 case 2:
14138 genfn = gen_helper_crypto_sha1m;
14139 feature = dc_isar_feature(aa64_sha1, s);
14140 break;
14141 case 3:
14142 genfn = gen_helper_crypto_sha1su0;
14143 feature = dc_isar_feature(aa64_sha1, s);
14144 break;
14145 case 4:
14146 genfn = gen_helper_crypto_sha256h;
14147 feature = dc_isar_feature(aa64_sha256, s);
14148 break;
14149 case 5:
14150 genfn = gen_helper_crypto_sha256h2;
14151 feature = dc_isar_feature(aa64_sha256, s);
14152 break;
14153 case 6:
14154 genfn = gen_helper_crypto_sha256su1;
14155 feature = dc_isar_feature(aa64_sha256, s);
14156 break;
14157 default:
14158 unallocated_encoding(s);
14159 return;
14160 }
14161
14162 if (!feature) {
14163 unallocated_encoding(s);
14164 return;
14165 }
14166
14167 if (!fp_access_check(s)) {
14168 return;
14169 }
14170 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
14171}
14172
14173
14174
14175
14176
14177
14178
14179static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
14180{
14181 int size = extract32(insn, 22, 2);
14182 int opcode = extract32(insn, 12, 5);
14183 int rn = extract32(insn, 5, 5);
14184 int rd = extract32(insn, 0, 5);
14185 gen_helper_gvec_2 *genfn;
14186 bool feature;
14187
14188 if (size != 0) {
14189 unallocated_encoding(s);
14190 return;
14191 }
14192
14193 switch (opcode) {
14194 case 0:
14195 feature = dc_isar_feature(aa64_sha1, s);
14196 genfn = gen_helper_crypto_sha1h;
14197 break;
14198 case 1:
14199 feature = dc_isar_feature(aa64_sha1, s);
14200 genfn = gen_helper_crypto_sha1su1;
14201 break;
14202 case 2:
14203 feature = dc_isar_feature(aa64_sha256, s);
14204 genfn = gen_helper_crypto_sha256su0;
14205 break;
14206 default:
14207 unallocated_encoding(s);
14208 return;
14209 }
14210
14211 if (!feature) {
14212 unallocated_encoding(s);
14213 return;
14214 }
14215
14216 if (!fp_access_check(s)) {
14217 return;
14218 }
14219 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
14220}
14221
14222static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
14223{
14224 tcg_gen_rotli_i64(d, m, 1);
14225 tcg_gen_xor_i64(d, d, n);
14226}
14227
14228static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
14229{
14230 tcg_gen_rotli_vec(vece, d, m, 1);
14231 tcg_gen_xor_vec(vece, d, d, n);
14232}
14233
14234void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
14235 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
14236{
14237 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
14238 static const GVecGen3 op = {
14239 .fni8 = gen_rax1_i64,
14240 .fniv = gen_rax1_vec,
14241 .opt_opc = vecop_list,
14242 .fno = gen_helper_crypto_rax1,
14243 .vece = MO_64,
14244 };
14245 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
14246}
14247
14248
14249
14250
14251
14252
14253
14254static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
14255{
14256 int opcode = extract32(insn, 10, 2);
14257 int o = extract32(insn, 14, 1);
14258 int rm = extract32(insn, 16, 5);
14259 int rn = extract32(insn, 5, 5);
14260 int rd = extract32(insn, 0, 5);
14261 bool feature;
14262 gen_helper_gvec_3 *oolfn = NULL;
14263 GVecGen3Fn *gvecfn = NULL;
14264
14265 if (o == 0) {
14266 switch (opcode) {
14267 case 0:
14268 feature = dc_isar_feature(aa64_sha512, s);
14269 oolfn = gen_helper_crypto_sha512h;
14270 break;
14271 case 1:
14272 feature = dc_isar_feature(aa64_sha512, s);
14273 oolfn = gen_helper_crypto_sha512h2;
14274 break;
14275 case 2:
14276 feature = dc_isar_feature(aa64_sha512, s);
14277 oolfn = gen_helper_crypto_sha512su1;
14278 break;
14279 case 3:
14280 feature = dc_isar_feature(aa64_sha3, s);
14281 gvecfn = gen_gvec_rax1;
14282 break;
14283 default:
14284 g_assert_not_reached();
14285 }
14286 } else {
14287 switch (opcode) {
14288 case 0:
14289 feature = dc_isar_feature(aa64_sm3, s);
14290 oolfn = gen_helper_crypto_sm3partw1;
14291 break;
14292 case 1:
14293 feature = dc_isar_feature(aa64_sm3, s);
14294 oolfn = gen_helper_crypto_sm3partw2;
14295 break;
14296 case 2:
14297 feature = dc_isar_feature(aa64_sm4, s);
14298 oolfn = gen_helper_crypto_sm4ekey;
14299 break;
14300 default:
14301 unallocated_encoding(s);
14302 return;
14303 }
14304 }
14305
14306 if (!feature) {
14307 unallocated_encoding(s);
14308 return;
14309 }
14310
14311 if (!fp_access_check(s)) {
14312 return;
14313 }
14314
14315 if (oolfn) {
14316 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
14317 } else {
14318 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
14319 }
14320}
14321
14322
14323
14324
14325
14326
14327
14328static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
14329{
14330 int opcode = extract32(insn, 10, 2);
14331 int rn = extract32(insn, 5, 5);
14332 int rd = extract32(insn, 0, 5);
14333 bool feature;
14334
14335 switch (opcode) {
14336 case 0:
14337 feature = dc_isar_feature(aa64_sha512, s);
14338 break;
14339 case 1:
14340 feature = dc_isar_feature(aa64_sm4, s);
14341 break;
14342 default:
14343 unallocated_encoding(s);
14344 return;
14345 }
14346
14347 if (!feature) {
14348 unallocated_encoding(s);
14349 return;
14350 }
14351
14352 if (!fp_access_check(s)) {
14353 return;
14354 }
14355
14356 switch (opcode) {
14357 case 0:
14358 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
14359 break;
14360 case 1:
14361 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
14362 break;
14363 default:
14364 g_assert_not_reached();
14365 }
14366}
14367
14368
14369
14370
14371
14372
14373
14374static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
14375{
14376 int op0 = extract32(insn, 21, 2);
14377 int rm = extract32(insn, 16, 5);
14378 int ra = extract32(insn, 10, 5);
14379 int rn = extract32(insn, 5, 5);
14380 int rd = extract32(insn, 0, 5);
14381 bool feature;
14382
14383 switch (op0) {
14384 case 0:
14385 case 1:
14386 feature = dc_isar_feature(aa64_sha3, s);
14387 break;
14388 case 2:
14389 feature = dc_isar_feature(aa64_sm3, s);
14390 break;
14391 default:
14392 unallocated_encoding(s);
14393 return;
14394 }
14395
14396 if (!feature) {
14397 unallocated_encoding(s);
14398 return;
14399 }
14400
14401 if (!fp_access_check(s)) {
14402 return;
14403 }
14404
14405 if (op0 < 2) {
14406 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
14407 int pass;
14408
14409 tcg_op1 = tcg_temp_new_i64();
14410 tcg_op2 = tcg_temp_new_i64();
14411 tcg_op3 = tcg_temp_new_i64();
14412 tcg_res[0] = tcg_temp_new_i64();
14413 tcg_res[1] = tcg_temp_new_i64();
14414
14415 for (pass = 0; pass < 2; pass++) {
14416 read_vec_element(s, tcg_op1, rn, pass, MO_64);
14417 read_vec_element(s, tcg_op2, rm, pass, MO_64);
14418 read_vec_element(s, tcg_op3, ra, pass, MO_64);
14419
14420 if (op0 == 0) {
14421
14422 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
14423 } else {
14424
14425 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
14426 }
14427 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
14428 }
14429 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
14430 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
14431
14432 tcg_temp_free_i64(tcg_op1);
14433 tcg_temp_free_i64(tcg_op2);
14434 tcg_temp_free_i64(tcg_op3);
14435 tcg_temp_free_i64(tcg_res[0]);
14436 tcg_temp_free_i64(tcg_res[1]);
14437 } else {
14438 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
14439
14440 tcg_op1 = tcg_temp_new_i32();
14441 tcg_op2 = tcg_temp_new_i32();
14442 tcg_op3 = tcg_temp_new_i32();
14443 tcg_res = tcg_temp_new_i32();
14444 tcg_zero = tcg_constant_i32(0);
14445
14446 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
14447 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
14448 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
14449
14450 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
14451 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
14452 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
14453 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
14454
14455 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
14456 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
14457 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
14458 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
14459
14460 tcg_temp_free_i32(tcg_op1);
14461 tcg_temp_free_i32(tcg_op2);
14462 tcg_temp_free_i32(tcg_op3);
14463 tcg_temp_free_i32(tcg_res);
14464 }
14465}
14466
14467
14468
14469
14470
14471
14472
14473static void disas_crypto_xar(DisasContext *s, uint32_t insn)
14474{
14475 int rm = extract32(insn, 16, 5);
14476 int imm6 = extract32(insn, 10, 6);
14477 int rn = extract32(insn, 5, 5);
14478 int rd = extract32(insn, 0, 5);
14479
14480 if (!dc_isar_feature(aa64_sha3, s)) {
14481 unallocated_encoding(s);
14482 return;
14483 }
14484
14485 if (!fp_access_check(s)) {
14486 return;
14487 }
14488
14489 gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
14490 vec_full_reg_offset(s, rn),
14491 vec_full_reg_offset(s, rm), imm6, 16,
14492 vec_full_reg_size(s));
14493}
14494
14495
14496
14497
14498
14499
14500
14501static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
14502{
14503 static gen_helper_gvec_3 * const fns[4] = {
14504 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
14505 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
14506 };
14507 int opcode = extract32(insn, 10, 2);
14508 int imm2 = extract32(insn, 12, 2);
14509 int rm = extract32(insn, 16, 5);
14510 int rn = extract32(insn, 5, 5);
14511 int rd = extract32(insn, 0, 5);
14512
14513 if (!dc_isar_feature(aa64_sm3, s)) {
14514 unallocated_encoding(s);
14515 return;
14516 }
14517
14518 if (!fp_access_check(s)) {
14519 return;
14520 }
14521
14522 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
14523}
14524
14525
14526
14527
14528
14529
14530static const AArch64DecodeTable data_proc_simd[] = {
14531
14532 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
14533 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
14534 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
14535 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
14536 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
14537 { 0x0e000400, 0x9fe08400, disas_simd_copy },
14538 { 0x0f000000, 0x9f000400, disas_simd_indexed },
14539
14540 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
14541 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
14542 { 0x0e000000, 0xbf208c00, disas_simd_tb },
14543 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
14544 { 0x2e000000, 0xbf208400, disas_simd_ext },
14545 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
14546 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
14547 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
14548 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
14549 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
14550 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
14551 { 0x5f000000, 0xdf000400, disas_simd_indexed },
14552 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
14553 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
14554 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
14555 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
14556 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
14557 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
14558 { 0xce000000, 0xff808000, disas_crypto_four_reg },
14559 { 0xce800000, 0xffe00000, disas_crypto_xar },
14560 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
14561 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
14562 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
14563 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
14564 { 0x00000000, 0x00000000, NULL }
14565};
14566
14567static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
14568{
14569
14570
14571
14572
14573 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
14574 if (fn) {
14575 fn(s, insn);
14576 } else {
14577 unallocated_encoding(s);
14578 }
14579}
14580
14581
14582static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
14583{
14584 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
14585 disas_data_proc_fp(s, insn);
14586 } else {
14587
14588 disas_data_proc_simd(s, insn);
14589 }
14590}
14591
14592
14593
14594
14595
14596#include "decode-sme-fa64.c.inc"
14597
14598static bool trans_OK(DisasContext *s, arg_OK *a)
14599{
14600 return true;
14601}
14602
14603static bool trans_FAIL(DisasContext *s, arg_OK *a)
14604{
14605 s->is_nonstreaming = true;
14606 return true;
14607}
14608
14609
14610
14611
14612
14613
14614
14615
14616static bool is_guarded_page(CPUARMState *env, DisasContext *s)
14617{
14618 uint64_t addr = s->base.pc_first;
14619#ifdef CONFIG_USER_ONLY
14620 return page_get_flags(addr) & PAGE_BTI;
14621#else
14622 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
14623 unsigned int index = tlb_index(env, mmu_idx, addr);
14624 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
14625
14626
14627
14628
14629
14630
14631
14632
14633
14634
14635
14636 return (tlb_hit(entry->addr_code, addr) &&
14637 arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
14638#endif
14639}
14640
14641
14642
14643
14644
14645
14646
14647
14648
14649
14650
14651
14652
14653
14654
14655
14656
14657static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14658{
14659 if ((insn & 0xfffff01fu) == 0xd503201fu) {
14660
14661 switch (extract32(insn, 5, 7)) {
14662 case 0b011001:
14663 case 0b011011:
14664
14665
14666
14667
14668 return !bt || btype != 3;
14669 case 0b100000:
14670
14671 return false;
14672 case 0b100010:
14673
14674 return btype != 3;
14675 case 0b100100:
14676
14677 return btype != 2;
14678 case 0b100110:
14679
14680 return true;
14681 }
14682 } else {
14683 switch (insn & 0xffe0001fu) {
14684 case 0xd4200000u:
14685 case 0xd4400000u:
14686
14687 return true;
14688 }
14689 }
14690 return false;
14691}
14692
14693static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14694 CPUState *cpu)
14695{
14696 DisasContext *dc = container_of(dcbase, DisasContext, base);
14697 CPUARMState *env = cpu->env_ptr;
14698 ARMCPU *arm_cpu = env_archcpu(env);
14699 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
14700 int bound, core_mmu_idx;
14701
14702 dc->isar = &arm_cpu->isar;
14703 dc->condjmp = 0;
14704
14705 dc->aarch64 = true;
14706 dc->thumb = false;
14707 dc->sctlr_b = 0;
14708 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
14709 dc->condexec_mask = 0;
14710 dc->condexec_cond = 0;
14711 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
14712 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14713 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
14714 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
14715 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
14716 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14717#if !defined(CONFIG_USER_ONLY)
14718 dc->user = (dc->current_el == 0);
14719#endif
14720 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
14721 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
14722 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
14723 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
14724 dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
14725 dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
14726 dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
14727 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
14728 dc->bt = EX_TBFLAG_A64(tb_flags, BT);
14729 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
14730 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
14731 dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
14732 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
14733 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
14734 dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
14735 dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
14736 dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
14737 dc->vec_len = 0;
14738 dc->vec_stride = 0;
14739 dc->cp_regs = arm_cpu->cp_regs;
14740 dc->features = env->features;
14741 dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14742
14743#ifdef CONFIG_USER_ONLY
14744
14745 tcg_debug_assert(dc->tbid & 1);
14746#endif
14747
14748
14749
14750
14751
14752
14753
14754
14755
14756
14757
14758
14759
14760
14761
14762
14763 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
14764 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
14765 dc->is_ldex = false;
14766
14767
14768 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14769
14770
14771 if (dc->ss_active) {
14772 bound = 1;
14773 }
14774 dc->base.max_insns = MIN(dc->base.max_insns, bound);
14775
14776 init_tmp_a64_array(dc);
14777}
14778
14779static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14780{
14781}
14782
14783static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14784{
14785 DisasContext *dc = container_of(dcbase, DisasContext, base);
14786
14787 tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14788 dc->insn_start = tcg_last_op();
14789}
14790
14791static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14792{
14793 DisasContext *s = container_of(dcbase, DisasContext, base);
14794 CPUARMState *env = cpu->env_ptr;
14795 uint64_t pc = s->base.pc_next;
14796 uint32_t insn;
14797
14798
14799 if (s->ss_active && !s->pstate_ss) {
14800
14801
14802
14803
14804
14805
14806
14807
14808
14809
14810 assert(s->base.num_insns == 1);
14811 gen_swstep_exception(s, 0, 0);
14812 s->base.is_jmp = DISAS_NORETURN;
14813 s->base.pc_next = pc + 4;
14814 return;
14815 }
14816
14817 if (pc & 3) {
14818
14819
14820
14821
14822
14823
14824 assert(s->base.num_insns == 1);
14825 gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
14826 s->base.is_jmp = DISAS_NORETURN;
14827 s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
14828 return;
14829 }
14830
14831 s->pc_curr = pc;
14832 insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
14833 s->insn = insn;
14834 s->base.pc_next = pc + 4;
14835
14836 s->fp_access_checked = false;
14837 s->sve_access_checked = false;
14838
14839 if (s->pstate_il) {
14840
14841
14842
14843
14844 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_illegalstate());
14845 return;
14846 }
14847
14848 if (dc_isar_feature(aa64_bti, s)) {
14849 if (s->base.num_insns == 1) {
14850
14851
14852
14853
14854
14855
14856
14857
14858
14859
14860
14861 s->guarded_page = is_guarded_page(env, s);
14862
14863
14864 tcg_debug_assert(s->btype >= 0);
14865
14866
14867
14868
14869
14870
14871
14872 if (s->btype != 0
14873 && s->guarded_page
14874 && !btype_destination_ok(insn, s->bt, s->btype)) {
14875 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14876 syn_btitrap(s->btype));
14877 return;
14878 }
14879 } else {
14880
14881 tcg_debug_assert(s->btype == 0);
14882 }
14883 }
14884
14885 s->is_nonstreaming = false;
14886 if (s->sme_trap_nonstreaming) {
14887 disas_sme_fa64(s, insn);
14888 }
14889
14890 switch (extract32(insn, 25, 4)) {
14891 case 0x0:
14892 if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
14893 unallocated_encoding(s);
14894 }
14895 break;
14896 case 0x1: case 0x3:
14897 unallocated_encoding(s);
14898 break;
14899 case 0x2:
14900 if (!disas_sve(s, insn)) {
14901 unallocated_encoding(s);
14902 }
14903 break;
14904 case 0x8: case 0x9:
14905 disas_data_proc_imm(s, insn);
14906 break;
14907 case 0xa: case 0xb:
14908 disas_b_exc_sys(s, insn);
14909 break;
14910 case 0x4:
14911 case 0x6:
14912 case 0xc:
14913 case 0xe:
14914 disas_ldst(s, insn);
14915 break;
14916 case 0x5:
14917 case 0xd:
14918 disas_data_proc_reg(s, insn);
14919 break;
14920 case 0x7:
14921 case 0xf:
14922 disas_data_proc_simd_fp(s, insn);
14923 break;
14924 default:
14925 assert(FALSE);
14926 break;
14927 }
14928
14929
14930 free_tmp_a64(s);
14931
14932
14933
14934
14935
14936 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14937 reset_btype(s);
14938 }
14939
14940 translator_loop_temp_check(&s->base);
14941}
14942
14943static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14944{
14945 DisasContext *dc = container_of(dcbase, DisasContext, base);
14946
14947 if (unlikely(dc->ss_active)) {
14948
14949
14950
14951
14952
14953 switch (dc->base.is_jmp) {
14954 default:
14955 gen_a64_set_pc_im(dc->base.pc_next);
14956
14957 case DISAS_EXIT:
14958 case DISAS_JUMP:
14959 gen_step_complete_exception(dc);
14960 break;
14961 case DISAS_NORETURN:
14962 break;
14963 }
14964 } else {
14965 switch (dc->base.is_jmp) {
14966 case DISAS_NEXT:
14967 case DISAS_TOO_MANY:
14968 gen_goto_tb(dc, 1, dc->base.pc_next);
14969 break;
14970 default:
14971 case DISAS_UPDATE_EXIT:
14972 gen_a64_set_pc_im(dc->base.pc_next);
14973
14974 case DISAS_EXIT:
14975 tcg_gen_exit_tb(NULL, 0);
14976 break;
14977 case DISAS_UPDATE_NOCHAIN:
14978 gen_a64_set_pc_im(dc->base.pc_next);
14979
14980 case DISAS_JUMP:
14981 tcg_gen_lookup_and_goto_ptr();
14982 break;
14983 case DISAS_NORETURN:
14984 case DISAS_SWI:
14985 break;
14986 case DISAS_WFE:
14987 gen_a64_set_pc_im(dc->base.pc_next);
14988 gen_helper_wfe(cpu_env);
14989 break;
14990 case DISAS_YIELD:
14991 gen_a64_set_pc_im(dc->base.pc_next);
14992 gen_helper_yield(cpu_env);
14993 break;
14994 case DISAS_WFI:
14995
14996
14997
14998
14999 gen_a64_set_pc_im(dc->base.pc_next);
15000 gen_helper_wfi(cpu_env, tcg_constant_i32(4));
15001
15002
15003
15004
15005 tcg_gen_exit_tb(NULL, 0);
15006 break;
15007 }
15008 }
15009}
15010
15011static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
15012 CPUState *cpu, FILE *logfile)
15013{
15014 DisasContext *dc = container_of(dcbase, DisasContext, base);
15015
15016 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
15017 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
15018}
15019
15020const TranslatorOps aarch64_translator_ops = {
15021 .init_disas_context = aarch64_tr_init_disas_context,
15022 .tb_start = aarch64_tr_tb_start,
15023 .insn_start = aarch64_tr_insn_start,
15024 .translate_insn = aarch64_tr_translate_insn,
15025 .tb_stop = aarch64_tr_tb_stop,
15026 .disas_log = aarch64_tr_disas_log,
15027};
15028