1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "tcg/tcg-op.h"
24#include "tcg/tcg-op-gvec.h"
25#include "qemu/log.h"
26#include "arm_ldst.h"
27#include "translate.h"
28#include "internals.h"
29#include "qemu/host-utils.h"
30
31#include "semihosting/semihost.h"
32#include "exec/gen-icount.h"
33
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
36#include "exec/log.h"
37
38#include "trace-tcg.h"
39#include "translate-a64.h"
40#include "qemu/atomic128.h"
41
42static TCGv_i64 cpu_X[32];
43static TCGv_i64 cpu_pc;
44
45
46static TCGv_i64 cpu_exclusive_high;
47
48static const char *regnames[] = {
49 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
50 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
51 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
52 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
53};
54
55enum a64_shift_type {
56 A64_SHIFT_TYPE_LSL = 0,
57 A64_SHIFT_TYPE_LSR = 1,
58 A64_SHIFT_TYPE_ASR = 2,
59 A64_SHIFT_TYPE_ROR = 3
60};
61
62
63
64
65typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
66
67typedef struct AArch64DecodeTable {
68 uint32_t pattern;
69 uint32_t mask;
70 AArch64DecodeFn *disas_fn;
71} AArch64DecodeTable;
72
73
74void a64_translate_init(void)
75{
76 int i;
77
78 cpu_pc = tcg_global_mem_new_i64(cpu_env,
79 offsetof(CPUARMState, pc),
80 "pc");
81 for (i = 0; i < 32; i++) {
82 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
83 offsetof(CPUARMState, xregs[i]),
84 regnames[i]);
85 }
86
87 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
88 offsetof(CPUARMState, exclusive_high), "exclusive_high");
89}
90
91
92
93
94static int get_a64_user_mem_index(DisasContext *s)
95{
96
97
98
99
100 ARMMMUIdx useridx = s->mmu_idx;
101
102 if (s->unpriv) {
103
104
105
106
107
108 switch (useridx) {
109 case ARMMMUIdx_E10_1:
110 case ARMMMUIdx_E10_1_PAN:
111 useridx = ARMMMUIdx_E10_0;
112 break;
113 case ARMMMUIdx_E20_2:
114 case ARMMMUIdx_E20_2_PAN:
115 useridx = ARMMMUIdx_E20_0;
116 break;
117 case ARMMMUIdx_SE10_1:
118 case ARMMMUIdx_SE10_1_PAN:
119 useridx = ARMMMUIdx_SE10_0;
120 break;
121 case ARMMMUIdx_SE20_2:
122 case ARMMMUIdx_SE20_2_PAN:
123 useridx = ARMMMUIdx_SE20_0;
124 break;
125 default:
126 g_assert_not_reached();
127 }
128 }
129 return arm_to_core_mmu_idx(useridx);
130}
131
132static void reset_btype(DisasContext *s)
133{
134 if (s->btype != 0) {
135 TCGv_i32 zero = tcg_const_i32(0);
136 tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
137 tcg_temp_free_i32(zero);
138 s->btype = 0;
139 }
140}
141
142static void set_btype(DisasContext *s, int val)
143{
144 TCGv_i32 tcg_val;
145
146
147 tcg_debug_assert(val >= 1 && val <= 3);
148
149 tcg_val = tcg_const_i32(val);
150 tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
151 tcg_temp_free_i32(tcg_val);
152 s->btype = -1;
153}
154
155void gen_a64_set_pc_im(uint64_t val)
156{
157 tcg_gen_movi_i64(cpu_pc, val);
158}
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
174 TCGv_i64 src, int tbi)
175{
176 if (tbi == 0) {
177
178 tcg_gen_mov_i64(dst, src);
179 } else if (!regime_has_2_ranges(s->mmu_idx)) {
180
181 tcg_gen_extract_i64(dst, src, 0, 56);
182 } else {
183
184 tcg_gen_sextract_i64(dst, src, 0, 56);
185
186 switch (tbi) {
187 case 1:
188
189 tcg_gen_and_i64(dst, dst, src);
190 break;
191 case 2:
192
193 tcg_gen_or_i64(dst, dst, src);
194 break;
195 case 3:
196
197 break;
198 default:
199 g_assert_not_reached();
200 }
201 }
202}
203
204static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
205{
206
207
208
209
210 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
226{
227 TCGv_i64 clean = new_tmp_a64(s);
228#ifdef CONFIG_USER_ONLY
229 gen_top_byte_ignore(s, clean, addr, s->tbid);
230#else
231 tcg_gen_mov_i64(clean, addr);
232#endif
233 return clean;
234}
235
236
237static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
238{
239 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
240}
241
242static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
243 MMUAccessType acc, int log2_size)
244{
245 TCGv_i32 t_acc = tcg_const_i32(acc);
246 TCGv_i32 t_idx = tcg_const_i32(get_mem_index(s));
247 TCGv_i32 t_size = tcg_const_i32(1 << log2_size);
248
249 gen_helper_probe_access(cpu_env, ptr, t_acc, t_idx, t_size);
250 tcg_temp_free_i32(t_acc);
251 tcg_temp_free_i32(t_idx);
252 tcg_temp_free_i32(t_size);
253}
254
255
256
257
258
259
260
261static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
262 bool is_write, bool tag_checked,
263 int log2_size, bool is_unpriv,
264 int core_idx)
265{
266 if (tag_checked && s->mte_active[is_unpriv]) {
267 TCGv_i32 tcg_desc;
268 TCGv_i64 ret;
269 int desc = 0;
270
271 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
272 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
273 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
274 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
275 desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_size);
276 tcg_desc = tcg_const_i32(desc);
277
278 ret = new_tmp_a64(s);
279 gen_helper_mte_check1(ret, cpu_env, tcg_desc, addr);
280 tcg_temp_free_i32(tcg_desc);
281
282 return ret;
283 }
284 return clean_data_tbi(s, addr);
285}
286
287TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
288 bool tag_checked, int log2_size)
289{
290 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
291 false, get_mem_index(s));
292}
293
294
295
296
297TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
298 bool tag_checked, int log2_esize, int total_size)
299{
300 if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) {
301 TCGv_i32 tcg_desc;
302 TCGv_i64 ret;
303 int desc = 0;
304
305 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
306 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
307 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
308 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
309 desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize);
310 desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size);
311 tcg_desc = tcg_const_i32(desc);
312
313 ret = new_tmp_a64(s);
314 gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr);
315 tcg_temp_free_i32(tcg_desc);
316
317 return ret;
318 }
319 return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize);
320}
321
322typedef struct DisasCompare64 {
323 TCGCond cond;
324 TCGv_i64 value;
325} DisasCompare64;
326
327static void a64_test_cc(DisasCompare64 *c64, int cc)
328{
329 DisasCompare c32;
330
331 arm_test_cc(&c32, cc);
332
333
334
335 c64->cond = c32.cond;
336 c64->value = tcg_temp_new_i64();
337 tcg_gen_ext_i32_i64(c64->value, c32.value);
338
339 arm_free_cc(&c32);
340}
341
342static void a64_free_cc(DisasCompare64 *c64)
343{
344 tcg_temp_free_i64(c64->value);
345}
346
347static void gen_exception_internal(int excp)
348{
349 TCGv_i32 tcg_excp = tcg_const_i32(excp);
350
351 assert(excp_is_internal(excp));
352 gen_helper_exception_internal(cpu_env, tcg_excp);
353 tcg_temp_free_i32(tcg_excp);
354}
355
356static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
357{
358 gen_a64_set_pc_im(pc);
359 gen_exception_internal(excp);
360 s->base.is_jmp = DISAS_NORETURN;
361}
362
363static void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
364 uint32_t syndrome, uint32_t target_el)
365{
366 gen_a64_set_pc_im(pc);
367 gen_exception(excp, syndrome, target_el);
368 s->base.is_jmp = DISAS_NORETURN;
369}
370
371static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
372{
373 TCGv_i32 tcg_syn;
374
375 gen_a64_set_pc_im(s->pc_curr);
376 tcg_syn = tcg_const_i32(syndrome);
377 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
378 tcg_temp_free_i32(tcg_syn);
379 s->base.is_jmp = DISAS_NORETURN;
380}
381
382static void gen_step_complete_exception(DisasContext *s)
383{
384
385
386
387
388
389
390
391
392
393 gen_ss_advance(s);
394 gen_swstep_exception(s, 1, s->is_ldex);
395 s->base.is_jmp = DISAS_NORETURN;
396}
397
398static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
399{
400
401
402
403 if (s->base.singlestep_enabled || s->ss_active ||
404 (tb_cflags(s->base.tb) & CF_LAST_IO)) {
405 return false;
406 }
407
408#ifndef CONFIG_USER_ONLY
409
410 if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
411 return false;
412 }
413#endif
414
415 return true;
416}
417
418static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
419{
420 const TranslationBlock *tb;
421
422 tb = s->base.tb;
423 if (use_goto_tb(s, n, dest)) {
424 tcg_gen_goto_tb(n);
425 gen_a64_set_pc_im(dest);
426 tcg_gen_exit_tb(tb, n);
427 s->base.is_jmp = DISAS_NORETURN;
428 } else {
429 gen_a64_set_pc_im(dest);
430 if (s->ss_active) {
431 gen_step_complete_exception(s);
432 } else if (s->base.singlestep_enabled) {
433 gen_exception_internal(EXCP_DEBUG);
434 } else {
435 tcg_gen_lookup_and_goto_ptr();
436 s->base.is_jmp = DISAS_NORETURN;
437 }
438 }
439}
440
441void unallocated_encoding(DisasContext *s)
442{
443
444 gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
445 default_exception_el(s));
446}
447
448static void init_tmp_a64_array(DisasContext *s)
449{
450#ifdef CONFIG_DEBUG_TCG
451 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
452#endif
453 s->tmp_a64_count = 0;
454}
455
456static void free_tmp_a64(DisasContext *s)
457{
458 int i;
459 for (i = 0; i < s->tmp_a64_count; i++) {
460 tcg_temp_free_i64(s->tmp_a64[i]);
461 }
462 init_tmp_a64_array(s);
463}
464
465TCGv_i64 new_tmp_a64(DisasContext *s)
466{
467 assert(s->tmp_a64_count < TMP_A64_MAX);
468 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
469}
470
471TCGv_i64 new_tmp_a64_local(DisasContext *s)
472{
473 assert(s->tmp_a64_count < TMP_A64_MAX);
474 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
475}
476
477TCGv_i64 new_tmp_a64_zero(DisasContext *s)
478{
479 TCGv_i64 t = new_tmp_a64(s);
480 tcg_gen_movi_i64(t, 0);
481 return t;
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499TCGv_i64 cpu_reg(DisasContext *s, int reg)
500{
501 if (reg == 31) {
502 return new_tmp_a64_zero(s);
503 } else {
504 return cpu_X[reg];
505 }
506}
507
508
509TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
510{
511 return cpu_X[reg];
512}
513
514
515
516
517
518TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
519{
520 TCGv_i64 v = new_tmp_a64(s);
521 if (reg != 31) {
522 if (sf) {
523 tcg_gen_mov_i64(v, cpu_X[reg]);
524 } else {
525 tcg_gen_ext32u_i64(v, cpu_X[reg]);
526 }
527 } else {
528 tcg_gen_movi_i64(v, 0);
529 }
530 return v;
531}
532
533TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
534{
535 TCGv_i64 v = new_tmp_a64(s);
536 if (sf) {
537 tcg_gen_mov_i64(v, cpu_X[reg]);
538 } else {
539 tcg_gen_ext32u_i64(v, cpu_X[reg]);
540 }
541 return v;
542}
543
544
545
546
547
548
549static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
550{
551 return vec_reg_offset(s, regno, 0, size);
552}
553
554
555static inline int fp_reg_hi_offset(DisasContext *s, int regno)
556{
557 return vec_reg_offset(s, regno, 1, MO_64);
558}
559
560
561
562
563
564
565
566static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
567{
568 TCGv_i64 v = tcg_temp_new_i64();
569
570 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
571 return v;
572}
573
574static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
575{
576 TCGv_i32 v = tcg_temp_new_i32();
577
578 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
579 return v;
580}
581
582static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
583{
584 TCGv_i32 v = tcg_temp_new_i32();
585
586 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
587 return v;
588}
589
590
591
592
593static void clear_vec_high(DisasContext *s, bool is_q, int rd)
594{
595 unsigned ofs = fp_reg_offset(s, rd, MO_64);
596 unsigned vsz = vec_full_reg_size(s);
597
598
599 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
600}
601
602void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
603{
604 unsigned ofs = fp_reg_offset(s, reg, MO_64);
605
606 tcg_gen_st_i64(v, cpu_env, ofs);
607 clear_vec_high(s, false, reg);
608}
609
610static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
611{
612 TCGv_i64 tmp = tcg_temp_new_i64();
613
614 tcg_gen_extu_i32_i64(tmp, v);
615 write_fp_dreg(s, reg, tmp);
616 tcg_temp_free_i64(tmp);
617}
618
619
620static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
621 GVecGen2Fn *gvec_fn, int vece)
622{
623 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
624 is_q ? 16 : 8, vec_full_reg_size(s));
625}
626
627
628
629
630static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
631 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
632{
633 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
634 imm, is_q ? 16 : 8, vec_full_reg_size(s));
635}
636
637
638static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
639 GVecGen3Fn *gvec_fn, int vece)
640{
641 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
642 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
643}
644
645
646static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
647 int rx, GVecGen4Fn *gvec_fn, int vece)
648{
649 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
650 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
651 is_q ? 16 : 8, vec_full_reg_size(s));
652}
653
654
655static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
656 int rn, int data, gen_helper_gvec_2 *fn)
657{
658 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
659 vec_full_reg_offset(s, rn),
660 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
661}
662
663
664static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
665 int rn, int rm, int data, gen_helper_gvec_3 *fn)
666{
667 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
668 vec_full_reg_offset(s, rn),
669 vec_full_reg_offset(s, rm),
670 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
671}
672
673
674
675
676static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
677 int rm, bool is_fp16, int data,
678 gen_helper_gvec_3_ptr *fn)
679{
680 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
681 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
682 vec_full_reg_offset(s, rn),
683 vec_full_reg_offset(s, rm), fpst,
684 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
685 tcg_temp_free_ptr(fpst);
686}
687
688
689static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
690 int rm, gen_helper_gvec_3_ptr *fn)
691{
692 TCGv_ptr qc_ptr = tcg_temp_new_ptr();
693
694 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
695 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
696 vec_full_reg_offset(s, rn),
697 vec_full_reg_offset(s, rm), qc_ptr,
698 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
699 tcg_temp_free_ptr(qc_ptr);
700}
701
702
703
704
705static inline void gen_set_NZ64(TCGv_i64 result)
706{
707 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
708 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
709}
710
711
712static inline void gen_logic_CC(int sf, TCGv_i64 result)
713{
714 if (sf) {
715 gen_set_NZ64(result);
716 } else {
717 tcg_gen_extrl_i64_i32(cpu_ZF, result);
718 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
719 }
720 tcg_gen_movi_i32(cpu_CF, 0);
721 tcg_gen_movi_i32(cpu_VF, 0);
722}
723
724
725static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
726{
727 if (sf) {
728 TCGv_i64 result, flag, tmp;
729 result = tcg_temp_new_i64();
730 flag = tcg_temp_new_i64();
731 tmp = tcg_temp_new_i64();
732
733 tcg_gen_movi_i64(tmp, 0);
734 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
735
736 tcg_gen_extrl_i64_i32(cpu_CF, flag);
737
738 gen_set_NZ64(result);
739
740 tcg_gen_xor_i64(flag, result, t0);
741 tcg_gen_xor_i64(tmp, t0, t1);
742 tcg_gen_andc_i64(flag, flag, tmp);
743 tcg_temp_free_i64(tmp);
744 tcg_gen_extrh_i64_i32(cpu_VF, flag);
745
746 tcg_gen_mov_i64(dest, result);
747 tcg_temp_free_i64(result);
748 tcg_temp_free_i64(flag);
749 } else {
750
751 TCGv_i32 t0_32 = tcg_temp_new_i32();
752 TCGv_i32 t1_32 = tcg_temp_new_i32();
753 TCGv_i32 tmp = tcg_temp_new_i32();
754
755 tcg_gen_movi_i32(tmp, 0);
756 tcg_gen_extrl_i64_i32(t0_32, t0);
757 tcg_gen_extrl_i64_i32(t1_32, t1);
758 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
759 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
760 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
761 tcg_gen_xor_i32(tmp, t0_32, t1_32);
762 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
763 tcg_gen_extu_i32_i64(dest, cpu_NF);
764
765 tcg_temp_free_i32(tmp);
766 tcg_temp_free_i32(t0_32);
767 tcg_temp_free_i32(t1_32);
768 }
769}
770
771
772static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
773{
774 if (sf) {
775
776 TCGv_i64 result, flag, tmp;
777
778 result = tcg_temp_new_i64();
779 flag = tcg_temp_new_i64();
780 tcg_gen_sub_i64(result, t0, t1);
781
782 gen_set_NZ64(result);
783
784 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
785 tcg_gen_extrl_i64_i32(cpu_CF, flag);
786
787 tcg_gen_xor_i64(flag, result, t0);
788 tmp = tcg_temp_new_i64();
789 tcg_gen_xor_i64(tmp, t0, t1);
790 tcg_gen_and_i64(flag, flag, tmp);
791 tcg_temp_free_i64(tmp);
792 tcg_gen_extrh_i64_i32(cpu_VF, flag);
793 tcg_gen_mov_i64(dest, result);
794 tcg_temp_free_i64(flag);
795 tcg_temp_free_i64(result);
796 } else {
797
798 TCGv_i32 t0_32 = tcg_temp_new_i32();
799 TCGv_i32 t1_32 = tcg_temp_new_i32();
800 TCGv_i32 tmp;
801
802 tcg_gen_extrl_i64_i32(t0_32, t0);
803 tcg_gen_extrl_i64_i32(t1_32, t1);
804 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
805 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
806 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
807 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
808 tmp = tcg_temp_new_i32();
809 tcg_gen_xor_i32(tmp, t0_32, t1_32);
810 tcg_temp_free_i32(t0_32);
811 tcg_temp_free_i32(t1_32);
812 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
813 tcg_temp_free_i32(tmp);
814 tcg_gen_extu_i32_i64(dest, cpu_NF);
815 }
816}
817
818
819static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
820{
821 TCGv_i64 flag = tcg_temp_new_i64();
822 tcg_gen_extu_i32_i64(flag, cpu_CF);
823 tcg_gen_add_i64(dest, t0, t1);
824 tcg_gen_add_i64(dest, dest, flag);
825 tcg_temp_free_i64(flag);
826
827 if (!sf) {
828 tcg_gen_ext32u_i64(dest, dest);
829 }
830}
831
832
833static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
834{
835 if (sf) {
836 TCGv_i64 result, cf_64, vf_64, tmp;
837 result = tcg_temp_new_i64();
838 cf_64 = tcg_temp_new_i64();
839 vf_64 = tcg_temp_new_i64();
840 tmp = tcg_const_i64(0);
841
842 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
843 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
844 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
845 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
846 gen_set_NZ64(result);
847
848 tcg_gen_xor_i64(vf_64, result, t0);
849 tcg_gen_xor_i64(tmp, t0, t1);
850 tcg_gen_andc_i64(vf_64, vf_64, tmp);
851 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
852
853 tcg_gen_mov_i64(dest, result);
854
855 tcg_temp_free_i64(tmp);
856 tcg_temp_free_i64(vf_64);
857 tcg_temp_free_i64(cf_64);
858 tcg_temp_free_i64(result);
859 } else {
860 TCGv_i32 t0_32, t1_32, tmp;
861 t0_32 = tcg_temp_new_i32();
862 t1_32 = tcg_temp_new_i32();
863 tmp = tcg_const_i32(0);
864
865 tcg_gen_extrl_i64_i32(t0_32, t0);
866 tcg_gen_extrl_i64_i32(t1_32, t1);
867 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
868 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
869
870 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
871 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
872 tcg_gen_xor_i32(tmp, t0_32, t1_32);
873 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
874 tcg_gen_extu_i32_i64(dest, cpu_NF);
875
876 tcg_temp_free_i32(tmp);
877 tcg_temp_free_i32(t1_32);
878 tcg_temp_free_i32(t0_32);
879 }
880}
881
882
883
884
885
886
887
888
889static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
890 TCGv_i64 tcg_addr, int size, int memidx,
891 bool iss_valid,
892 unsigned int iss_srt,
893 bool iss_sf, bool iss_ar)
894{
895 g_assert(size <= 3);
896 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
897
898 if (iss_valid) {
899 uint32_t syn;
900
901 syn = syn_data_abort_with_iss(0,
902 size,
903 false,
904 iss_srt,
905 iss_sf,
906 iss_ar,
907 0, 0, 0, 0, 0, false);
908 disas_set_insn_syndrome(s, syn);
909 }
910}
911
912static void do_gpr_st(DisasContext *s, TCGv_i64 source,
913 TCGv_i64 tcg_addr, int size,
914 bool iss_valid,
915 unsigned int iss_srt,
916 bool iss_sf, bool iss_ar)
917{
918 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
919 iss_valid, iss_srt, iss_sf, iss_ar);
920}
921
922
923
924
925static void do_gpr_ld_memidx(DisasContext *s,
926 TCGv_i64 dest, TCGv_i64 tcg_addr,
927 int size, bool is_signed,
928 bool extend, int memidx,
929 bool iss_valid, unsigned int iss_srt,
930 bool iss_sf, bool iss_ar)
931{
932 MemOp memop = s->be_data + size;
933
934 g_assert(size <= 3);
935
936 if (is_signed) {
937 memop += MO_SIGN;
938 }
939
940 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
941
942 if (extend && is_signed) {
943 g_assert(size < 3);
944 tcg_gen_ext32u_i64(dest, dest);
945 }
946
947 if (iss_valid) {
948 uint32_t syn;
949
950 syn = syn_data_abort_with_iss(0,
951 size,
952 is_signed,
953 iss_srt,
954 iss_sf,
955 iss_ar,
956 0, 0, 0, 0, 0, false);
957 disas_set_insn_syndrome(s, syn);
958 }
959}
960
961static void do_gpr_ld(DisasContext *s,
962 TCGv_i64 dest, TCGv_i64 tcg_addr,
963 int size, bool is_signed, bool extend,
964 bool iss_valid, unsigned int iss_srt,
965 bool iss_sf, bool iss_ar)
966{
967 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
968 get_mem_index(s),
969 iss_valid, iss_srt, iss_sf, iss_ar);
970}
971
972
973
974
975static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
976{
977
978 TCGv_i64 tmp = tcg_temp_new_i64();
979 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
980 if (size < 4) {
981 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
982 s->be_data + size);
983 } else {
984 bool be = s->be_data == MO_BE;
985 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
986
987 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
988 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
989 s->be_data | MO_Q);
990 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
991 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
992 s->be_data | MO_Q);
993 tcg_temp_free_i64(tcg_hiaddr);
994 }
995
996 tcg_temp_free_i64(tmp);
997}
998
999
1000
1001
1002static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
1003{
1004
1005 TCGv_i64 tmplo = tcg_temp_new_i64();
1006 TCGv_i64 tmphi = NULL;
1007
1008 if (size < 4) {
1009 MemOp memop = s->be_data + size;
1010 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
1011 } else {
1012 bool be = s->be_data == MO_BE;
1013 TCGv_i64 tcg_hiaddr;
1014
1015 tmphi = tcg_temp_new_i64();
1016 tcg_hiaddr = tcg_temp_new_i64();
1017
1018 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1019 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1020 s->be_data | MO_Q);
1021 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1022 s->be_data | MO_Q);
1023 tcg_temp_free_i64(tcg_hiaddr);
1024 }
1025
1026 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1027 tcg_temp_free_i64(tmplo);
1028
1029 if (tmphi) {
1030 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1031 tcg_temp_free_i64(tmphi);
1032 }
1033 clear_vec_high(s, tmphi != NULL, destidx);
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1050 int element, MemOp memop)
1051{
1052 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1053 switch (memop) {
1054 case MO_8:
1055 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1056 break;
1057 case MO_16:
1058 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1059 break;
1060 case MO_32:
1061 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1062 break;
1063 case MO_8|MO_SIGN:
1064 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1065 break;
1066 case MO_16|MO_SIGN:
1067 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1068 break;
1069 case MO_32|MO_SIGN:
1070 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1071 break;
1072 case MO_64:
1073 case MO_64|MO_SIGN:
1074 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1075 break;
1076 default:
1077 g_assert_not_reached();
1078 }
1079}
1080
1081static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1082 int element, MemOp memop)
1083{
1084 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1085 switch (memop) {
1086 case MO_8:
1087 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1088 break;
1089 case MO_16:
1090 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1091 break;
1092 case MO_8|MO_SIGN:
1093 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1094 break;
1095 case MO_16|MO_SIGN:
1096 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1097 break;
1098 case MO_32:
1099 case MO_32|MO_SIGN:
1100 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1101 break;
1102 default:
1103 g_assert_not_reached();
1104 }
1105}
1106
1107
1108static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1109 int element, MemOp memop)
1110{
1111 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1112 switch (memop) {
1113 case MO_8:
1114 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1115 break;
1116 case MO_16:
1117 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1118 break;
1119 case MO_32:
1120 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1121 break;
1122 case MO_64:
1123 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1124 break;
1125 default:
1126 g_assert_not_reached();
1127 }
1128}
1129
1130static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1131 int destidx, int element, MemOp memop)
1132{
1133 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1134 switch (memop) {
1135 case MO_8:
1136 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1137 break;
1138 case MO_16:
1139 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1140 break;
1141 case MO_32:
1142 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1143 break;
1144 default:
1145 g_assert_not_reached();
1146 }
1147}
1148
1149
1150static void do_vec_st(DisasContext *s, int srcidx, int element,
1151 TCGv_i64 tcg_addr, int size, MemOp endian)
1152{
1153 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1154
1155 read_vec_element(s, tcg_tmp, srcidx, element, size);
1156 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1157
1158 tcg_temp_free_i64(tcg_tmp);
1159}
1160
1161
1162static void do_vec_ld(DisasContext *s, int destidx, int element,
1163 TCGv_i64 tcg_addr, int size, MemOp endian)
1164{
1165 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1166
1167 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
1168 write_vec_element(s, tcg_tmp, destidx, element, size);
1169
1170 tcg_temp_free_i64(tcg_tmp);
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180static bool fp_access_check(DisasContext *s)
1181{
1182 if (s->fp_excp_el) {
1183 assert(!s->fp_access_checked);
1184 s->fp_access_checked = true;
1185
1186 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1187 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
1188 return false;
1189 }
1190 s->fp_access_checked = true;
1191 return true;
1192}
1193
1194
1195
1196
1197bool sve_access_check(DisasContext *s)
1198{
1199 if (s->sve_excp_el) {
1200 assert(!s->sve_access_checked);
1201 s->sve_access_checked = true;
1202
1203 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1204 syn_sve_access_trap(), s->sve_excp_el);
1205 return false;
1206 }
1207 s->sve_access_checked = true;
1208 return fp_access_check(s);
1209}
1210
1211
1212
1213
1214
1215
1216static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1217 int option, unsigned int shift)
1218{
1219 int extsize = extract32(option, 0, 2);
1220 bool is_signed = extract32(option, 2, 1);
1221
1222 if (is_signed) {
1223 switch (extsize) {
1224 case 0:
1225 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1226 break;
1227 case 1:
1228 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1229 break;
1230 case 2:
1231 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1232 break;
1233 case 3:
1234 tcg_gen_mov_i64(tcg_out, tcg_in);
1235 break;
1236 }
1237 } else {
1238 switch (extsize) {
1239 case 0:
1240 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1241 break;
1242 case 1:
1243 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1244 break;
1245 case 2:
1246 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1247 break;
1248 case 3:
1249 tcg_gen_mov_i64(tcg_out, tcg_in);
1250 break;
1251 }
1252 }
1253
1254 if (shift) {
1255 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1256 }
1257}
1258
1259static inline void gen_check_sp_alignment(DisasContext *s)
1260{
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1284 uint32_t insn)
1285{
1286 const AArch64DecodeTable *tptr = table;
1287
1288 while (tptr->mask) {
1289 if ((insn & tptr->mask) == tptr->pattern) {
1290 return tptr->disas_fn;
1291 }
1292 tptr++;
1293 }
1294 return NULL;
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1312{
1313 uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1314
1315 if (insn & (1U << 31)) {
1316
1317 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1318 }
1319
1320
1321 reset_btype(s);
1322 gen_goto_tb(s, 0, addr);
1323}
1324
1325
1326
1327
1328
1329
1330
1331static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1332{
1333 unsigned int sf, op, rt;
1334 uint64_t addr;
1335 TCGLabel *label_match;
1336 TCGv_i64 tcg_cmp;
1337
1338 sf = extract32(insn, 31, 1);
1339 op = extract32(insn, 24, 1);
1340 rt = extract32(insn, 0, 5);
1341 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1342
1343 tcg_cmp = read_cpu_reg(s, rt, sf);
1344 label_match = gen_new_label();
1345
1346 reset_btype(s);
1347 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1348 tcg_cmp, 0, label_match);
1349
1350 gen_goto_tb(s, 0, s->base.pc_next);
1351 gen_set_label(label_match);
1352 gen_goto_tb(s, 1, addr);
1353}
1354
1355
1356
1357
1358
1359
1360
1361static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1362{
1363 unsigned int bit_pos, op, rt;
1364 uint64_t addr;
1365 TCGLabel *label_match;
1366 TCGv_i64 tcg_cmp;
1367
1368 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1369 op = extract32(insn, 24, 1);
1370 addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1371 rt = extract32(insn, 0, 5);
1372
1373 tcg_cmp = tcg_temp_new_i64();
1374 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1375 label_match = gen_new_label();
1376
1377 reset_btype(s);
1378 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1379 tcg_cmp, 0, label_match);
1380 tcg_temp_free_i64(tcg_cmp);
1381 gen_goto_tb(s, 0, s->base.pc_next);
1382 gen_set_label(label_match);
1383 gen_goto_tb(s, 1, addr);
1384}
1385
1386
1387
1388
1389
1390
1391
1392static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1393{
1394 unsigned int cond;
1395 uint64_t addr;
1396
1397 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1398 unallocated_encoding(s);
1399 return;
1400 }
1401 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1402 cond = extract32(insn, 0, 4);
1403
1404 reset_btype(s);
1405 if (cond < 0x0e) {
1406
1407 TCGLabel *label_match = gen_new_label();
1408 arm_gen_test_cc(cond, label_match);
1409 gen_goto_tb(s, 0, s->base.pc_next);
1410 gen_set_label(label_match);
1411 gen_goto_tb(s, 1, addr);
1412 } else {
1413
1414 gen_goto_tb(s, 0, addr);
1415 }
1416}
1417
1418
1419static void handle_hint(DisasContext *s, uint32_t insn,
1420 unsigned int op1, unsigned int op2, unsigned int crm)
1421{
1422 unsigned int selector = crm << 3 | op2;
1423
1424 if (op1 != 3) {
1425 unallocated_encoding(s);
1426 return;
1427 }
1428
1429 switch (selector) {
1430 case 0b00000:
1431 break;
1432 case 0b00011:
1433 s->base.is_jmp = DISAS_WFI;
1434 break;
1435 case 0b00001:
1436
1437
1438
1439
1440
1441 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1442 s->base.is_jmp = DISAS_YIELD;
1443 }
1444 break;
1445 case 0b00010:
1446 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1447 s->base.is_jmp = DISAS_WFE;
1448 }
1449 break;
1450 case 0b00100:
1451 case 0b00101:
1452
1453 break;
1454 case 0b00111:
1455 if (s->pauth_active) {
1456 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1457 }
1458 break;
1459 case 0b01000:
1460 if (s->pauth_active) {
1461 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1462 }
1463 break;
1464 case 0b01010:
1465 if (s->pauth_active) {
1466 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1467 }
1468 break;
1469 case 0b01100:
1470 if (s->pauth_active) {
1471 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1472 }
1473 break;
1474 case 0b01110:
1475 if (s->pauth_active) {
1476 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1477 }
1478 break;
1479 case 0b11000:
1480 if (s->pauth_active) {
1481 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1482 new_tmp_a64_zero(s));
1483 }
1484 break;
1485 case 0b11001:
1486 if (s->pauth_active) {
1487 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1488 }
1489 break;
1490 case 0b11010:
1491 if (s->pauth_active) {
1492 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1493 new_tmp_a64_zero(s));
1494 }
1495 break;
1496 case 0b11011:
1497 if (s->pauth_active) {
1498 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1499 }
1500 break;
1501 case 0b11100:
1502 if (s->pauth_active) {
1503 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1504 new_tmp_a64_zero(s));
1505 }
1506 break;
1507 case 0b11101:
1508 if (s->pauth_active) {
1509 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1510 }
1511 break;
1512 case 0b11110:
1513 if (s->pauth_active) {
1514 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1515 new_tmp_a64_zero(s));
1516 }
1517 break;
1518 case 0b11111:
1519 if (s->pauth_active) {
1520 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1521 }
1522 break;
1523 default:
1524
1525 break;
1526 }
1527}
1528
1529static void gen_clrex(DisasContext *s, uint32_t insn)
1530{
1531 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1532}
1533
1534
1535static void handle_sync(DisasContext *s, uint32_t insn,
1536 unsigned int op1, unsigned int op2, unsigned int crm)
1537{
1538 TCGBar bar;
1539
1540 if (op1 != 3) {
1541 unallocated_encoding(s);
1542 return;
1543 }
1544
1545 switch (op2) {
1546 case 2:
1547 gen_clrex(s, insn);
1548 return;
1549 case 4:
1550 case 5:
1551 switch (crm & 3) {
1552 case 1:
1553 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1554 break;
1555 case 2:
1556 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1557 break;
1558 default:
1559 bar = TCG_BAR_SC | TCG_MO_ALL;
1560 break;
1561 }
1562 tcg_gen_mb(bar);
1563 return;
1564 case 6:
1565
1566
1567
1568
1569 reset_btype(s);
1570 gen_goto_tb(s, 0, s->base.pc_next);
1571 return;
1572
1573 case 7:
1574 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1575 goto do_unallocated;
1576 }
1577
1578
1579
1580
1581 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1582 gen_goto_tb(s, 0, s->base.pc_next);
1583 return;
1584
1585 default:
1586 do_unallocated:
1587 unallocated_encoding(s);
1588 return;
1589 }
1590}
1591
1592static void gen_xaflag(void)
1593{
1594 TCGv_i32 z = tcg_temp_new_i32();
1595
1596 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1597
1598
1599
1600
1601
1602
1603
1604
1605 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1606 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1607
1608
1609 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1610 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1611
1612
1613 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1614 tcg_gen_neg_i32(cpu_VF, cpu_VF);
1615
1616
1617 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1618
1619 tcg_temp_free_i32(z);
1620}
1621
1622static void gen_axflag(void)
1623{
1624 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);
1625 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);
1626
1627
1628 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1629
1630 tcg_gen_movi_i32(cpu_NF, 0);
1631 tcg_gen_movi_i32(cpu_VF, 0);
1632}
1633
1634
1635static void handle_msr_i(DisasContext *s, uint32_t insn,
1636 unsigned int op1, unsigned int op2, unsigned int crm)
1637{
1638 TCGv_i32 t1;
1639 int op = op1 << 3 | op2;
1640
1641
1642 s->base.is_jmp = DISAS_TOO_MANY;
1643
1644 switch (op) {
1645 case 0x00:
1646 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1647 goto do_unallocated;
1648 }
1649 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1650 s->base.is_jmp = DISAS_NEXT;
1651 break;
1652
1653 case 0x01:
1654 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1655 goto do_unallocated;
1656 }
1657 gen_xaflag();
1658 s->base.is_jmp = DISAS_NEXT;
1659 break;
1660
1661 case 0x02:
1662 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1663 goto do_unallocated;
1664 }
1665 gen_axflag();
1666 s->base.is_jmp = DISAS_NEXT;
1667 break;
1668
1669 case 0x03:
1670 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1671 goto do_unallocated;
1672 }
1673 if (crm & 1) {
1674 set_pstate_bits(PSTATE_UAO);
1675 } else {
1676 clear_pstate_bits(PSTATE_UAO);
1677 }
1678 t1 = tcg_const_i32(s->current_el);
1679 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1680 tcg_temp_free_i32(t1);
1681 break;
1682
1683 case 0x04:
1684 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1685 goto do_unallocated;
1686 }
1687 if (crm & 1) {
1688 set_pstate_bits(PSTATE_PAN);
1689 } else {
1690 clear_pstate_bits(PSTATE_PAN);
1691 }
1692 t1 = tcg_const_i32(s->current_el);
1693 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1694 tcg_temp_free_i32(t1);
1695 break;
1696
1697 case 0x05:
1698 if (s->current_el == 0) {
1699 goto do_unallocated;
1700 }
1701 t1 = tcg_const_i32(crm & PSTATE_SP);
1702 gen_helper_msr_i_spsel(cpu_env, t1);
1703 tcg_temp_free_i32(t1);
1704 break;
1705
1706 case 0x19:
1707 if (!dc_isar_feature(aa64_ssbs, s)) {
1708 goto do_unallocated;
1709 }
1710 if (crm & 1) {
1711 set_pstate_bits(PSTATE_SSBS);
1712 } else {
1713 clear_pstate_bits(PSTATE_SSBS);
1714 }
1715
1716 break;
1717
1718 case 0x1a:
1719 if (!dc_isar_feature(aa64_dit, s)) {
1720 goto do_unallocated;
1721 }
1722 if (crm & 1) {
1723 set_pstate_bits(PSTATE_DIT);
1724 } else {
1725 clear_pstate_bits(PSTATE_DIT);
1726 }
1727
1728 break;
1729
1730 case 0x1e:
1731 t1 = tcg_const_i32(crm);
1732 gen_helper_msr_i_daifset(cpu_env, t1);
1733 tcg_temp_free_i32(t1);
1734 break;
1735
1736 case 0x1f:
1737 t1 = tcg_const_i32(crm);
1738 gen_helper_msr_i_daifclear(cpu_env, t1);
1739 tcg_temp_free_i32(t1);
1740
1741 s->base.is_jmp = DISAS_UPDATE_EXIT;
1742 break;
1743
1744 case 0x1c:
1745 if (dc_isar_feature(aa64_mte, s)) {
1746
1747 if (crm & 1) {
1748 set_pstate_bits(PSTATE_TCO);
1749 } else {
1750 clear_pstate_bits(PSTATE_TCO);
1751 }
1752 t1 = tcg_const_i32(s->current_el);
1753 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1754 tcg_temp_free_i32(t1);
1755
1756 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1757 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1758
1759 s->base.is_jmp = DISAS_NEXT;
1760 } else {
1761 goto do_unallocated;
1762 }
1763 break;
1764
1765 default:
1766 do_unallocated:
1767 unallocated_encoding(s);
1768 return;
1769 }
1770}
1771
1772static void gen_get_nzcv(TCGv_i64 tcg_rt)
1773{
1774 TCGv_i32 tmp = tcg_temp_new_i32();
1775 TCGv_i32 nzcv = tcg_temp_new_i32();
1776
1777
1778 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1779
1780 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1781 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1782
1783 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1784
1785 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1786 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1787
1788 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1789
1790 tcg_temp_free_i32(nzcv);
1791 tcg_temp_free_i32(tmp);
1792}
1793
1794static void gen_set_nzcv(TCGv_i64 tcg_rt)
1795{
1796 TCGv_i32 nzcv = tcg_temp_new_i32();
1797
1798
1799 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1800
1801
1802 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1803
1804 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1805 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1806
1807 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1808 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1809
1810 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1811 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1812 tcg_temp_free_i32(nzcv);
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1823 unsigned int op0, unsigned int op1, unsigned int op2,
1824 unsigned int crn, unsigned int crm, unsigned int rt)
1825{
1826 const ARMCPRegInfo *ri;
1827 TCGv_i64 tcg_rt;
1828
1829 ri = get_arm_cp_reginfo(s->cp_regs,
1830 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1831 crn, crm, op0, op1, op2));
1832
1833 if (!ri) {
1834
1835
1836
1837 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1838 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1839 isread ? "read" : "write", op0, op1, crn, crm, op2);
1840 unallocated_encoding(s);
1841 return;
1842 }
1843
1844
1845 if (!cp_access_ok(s->current_el, ri, isread)) {
1846 unallocated_encoding(s);
1847 return;
1848 }
1849
1850 if (ri->accessfn) {
1851
1852
1853
1854 TCGv_ptr tmpptr;
1855 TCGv_i32 tcg_syn, tcg_isread;
1856 uint32_t syndrome;
1857
1858 gen_a64_set_pc_im(s->pc_curr);
1859 tmpptr = tcg_const_ptr(ri);
1860 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1861 tcg_syn = tcg_const_i32(syndrome);
1862 tcg_isread = tcg_const_i32(isread);
1863 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1864 tcg_temp_free_ptr(tmpptr);
1865 tcg_temp_free_i32(tcg_syn);
1866 tcg_temp_free_i32(tcg_isread);
1867 } else if (ri->type & ARM_CP_RAISES_EXC) {
1868
1869
1870
1871
1872 gen_a64_set_pc_im(s->pc_curr);
1873 }
1874
1875
1876 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1877 case ARM_CP_NOP:
1878 return;
1879 case ARM_CP_NZCV:
1880 tcg_rt = cpu_reg(s, rt);
1881 if (isread) {
1882 gen_get_nzcv(tcg_rt);
1883 } else {
1884 gen_set_nzcv(tcg_rt);
1885 }
1886 return;
1887 case ARM_CP_CURRENTEL:
1888
1889
1890
1891 tcg_rt = cpu_reg(s, rt);
1892 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1893 return;
1894 case ARM_CP_DC_ZVA:
1895
1896 if (s->mte_active[0]) {
1897 TCGv_i32 t_desc;
1898 int desc = 0;
1899
1900 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
1901 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
1902 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
1903 t_desc = tcg_const_i32(desc);
1904
1905 tcg_rt = new_tmp_a64(s);
1906 gen_helper_mte_check_zva(tcg_rt, cpu_env, t_desc, cpu_reg(s, rt));
1907 tcg_temp_free_i32(t_desc);
1908 } else {
1909 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
1910 }
1911 gen_helper_dc_zva(cpu_env, tcg_rt);
1912 return;
1913 case ARM_CP_DC_GVA:
1914 {
1915 TCGv_i64 clean_addr, tag;
1916
1917
1918
1919
1920
1921 tcg_rt = cpu_reg(s, rt);
1922 clean_addr = clean_data_tbi(s, tcg_rt);
1923 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
1924
1925 if (s->ata) {
1926
1927 tag = tcg_temp_new_i64();
1928 tcg_gen_shri_i64(tag, tcg_rt, 56);
1929 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
1930 tcg_temp_free_i64(tag);
1931 }
1932 }
1933 return;
1934 case ARM_CP_DC_GZVA:
1935 {
1936 TCGv_i64 clean_addr, tag;
1937
1938
1939 tcg_rt = cpu_reg(s, rt);
1940 clean_addr = clean_data_tbi(s, tcg_rt);
1941 gen_helper_dc_zva(cpu_env, clean_addr);
1942
1943 if (s->ata) {
1944
1945 tag = tcg_temp_new_i64();
1946 tcg_gen_shri_i64(tag, tcg_rt, 56);
1947 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
1948 tcg_temp_free_i64(tag);
1949 }
1950 }
1951 return;
1952 default:
1953 break;
1954 }
1955 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1956 return;
1957 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1958 return;
1959 }
1960
1961 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1962 gen_io_start();
1963 }
1964
1965 tcg_rt = cpu_reg(s, rt);
1966
1967 if (isread) {
1968 if (ri->type & ARM_CP_CONST) {
1969 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1970 } else if (ri->readfn) {
1971 TCGv_ptr tmpptr;
1972 tmpptr = tcg_const_ptr(ri);
1973 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1974 tcg_temp_free_ptr(tmpptr);
1975 } else {
1976 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1977 }
1978 } else {
1979 if (ri->type & ARM_CP_CONST) {
1980
1981 return;
1982 } else if (ri->writefn) {
1983 TCGv_ptr tmpptr;
1984 tmpptr = tcg_const_ptr(ri);
1985 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1986 tcg_temp_free_ptr(tmpptr);
1987 } else {
1988 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1989 }
1990 }
1991
1992 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1993
1994 s->base.is_jmp = DISAS_UPDATE_EXIT;
1995 }
1996 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1997
1998
1999
2000
2001 TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
2002 gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
2003 tcg_temp_free_i32(tcg_el);
2004
2005
2006
2007
2008
2009 s->base.is_jmp = DISAS_UPDATE_EXIT;
2010 }
2011}
2012
2013
2014
2015
2016
2017
2018
2019static void disas_system(DisasContext *s, uint32_t insn)
2020{
2021 unsigned int l, op0, op1, crn, crm, op2, rt;
2022 l = extract32(insn, 21, 1);
2023 op0 = extract32(insn, 19, 2);
2024 op1 = extract32(insn, 16, 3);
2025 crn = extract32(insn, 12, 4);
2026 crm = extract32(insn, 8, 4);
2027 op2 = extract32(insn, 5, 3);
2028 rt = extract32(insn, 0, 5);
2029
2030 if (op0 == 0) {
2031 if (l || rt != 31) {
2032 unallocated_encoding(s);
2033 return;
2034 }
2035 switch (crn) {
2036 case 2:
2037 handle_hint(s, insn, op1, op2, crm);
2038 break;
2039 case 3:
2040 handle_sync(s, insn, op1, op2, crm);
2041 break;
2042 case 4:
2043 handle_msr_i(s, insn, op1, op2, crm);
2044 break;
2045 default:
2046 unallocated_encoding(s);
2047 break;
2048 }
2049 return;
2050 }
2051 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2052}
2053
2054
2055
2056
2057
2058
2059
2060
2061static void disas_exc(DisasContext *s, uint32_t insn)
2062{
2063 int opc = extract32(insn, 21, 3);
2064 int op2_ll = extract32(insn, 0, 5);
2065 int imm16 = extract32(insn, 5, 16);
2066 TCGv_i32 tmp;
2067
2068 switch (opc) {
2069 case 0:
2070
2071
2072
2073
2074
2075 switch (op2_ll) {
2076 case 1:
2077 gen_ss_advance(s);
2078 gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
2079 syn_aa64_svc(imm16), default_exception_el(s));
2080 break;
2081 case 2:
2082 if (s->current_el == 0) {
2083 unallocated_encoding(s);
2084 break;
2085 }
2086
2087
2088
2089 gen_a64_set_pc_im(s->pc_curr);
2090 gen_helper_pre_hvc(cpu_env);
2091 gen_ss_advance(s);
2092 gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
2093 syn_aa64_hvc(imm16), 2);
2094 break;
2095 case 3:
2096 if (s->current_el == 0) {
2097 unallocated_encoding(s);
2098 break;
2099 }
2100 gen_a64_set_pc_im(s->pc_curr);
2101 tmp = tcg_const_i32(syn_aa64_smc(imm16));
2102 gen_helper_pre_smc(cpu_env, tmp);
2103 tcg_temp_free_i32(tmp);
2104 gen_ss_advance(s);
2105 gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
2106 syn_aa64_smc(imm16), 3);
2107 break;
2108 default:
2109 unallocated_encoding(s);
2110 break;
2111 }
2112 break;
2113 case 1:
2114 if (op2_ll != 0) {
2115 unallocated_encoding(s);
2116 break;
2117 }
2118
2119 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2120 break;
2121 case 2:
2122 if (op2_ll != 0) {
2123 unallocated_encoding(s);
2124 break;
2125 }
2126
2127
2128
2129
2130
2131
2132 if (semihosting_enabled() && imm16 == 0xf000) {
2133#ifndef CONFIG_USER_ONLY
2134
2135
2136
2137
2138 if (s->current_el == 0) {
2139 unsupported_encoding(s, insn);
2140 break;
2141 }
2142#endif
2143 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
2144 } else {
2145 unsupported_encoding(s, insn);
2146 }
2147 break;
2148 case 5:
2149 if (op2_ll < 1 || op2_ll > 3) {
2150 unallocated_encoding(s);
2151 break;
2152 }
2153
2154 unsupported_encoding(s, insn);
2155 break;
2156 default:
2157 unallocated_encoding(s);
2158 break;
2159 }
2160}
2161
2162
2163
2164
2165
2166
2167
2168static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
2169{
2170 unsigned int opc, op2, op3, rn, op4;
2171 unsigned btype_mod = 2;
2172 TCGv_i64 dst;
2173 TCGv_i64 modifier;
2174
2175 opc = extract32(insn, 21, 4);
2176 op2 = extract32(insn, 16, 5);
2177 op3 = extract32(insn, 10, 6);
2178 rn = extract32(insn, 5, 5);
2179 op4 = extract32(insn, 0, 5);
2180
2181 if (op2 != 0x1f) {
2182 goto do_unallocated;
2183 }
2184
2185 switch (opc) {
2186 case 0:
2187 case 1:
2188 case 2:
2189 btype_mod = opc;
2190 switch (op3) {
2191 case 0:
2192
2193 if (op4 != 0) {
2194 goto do_unallocated;
2195 }
2196 dst = cpu_reg(s, rn);
2197 break;
2198
2199 case 2:
2200 case 3:
2201 if (!dc_isar_feature(aa64_pauth, s)) {
2202 goto do_unallocated;
2203 }
2204 if (opc == 2) {
2205
2206 if (rn != 0x1f || op4 != 0x1f) {
2207 goto do_unallocated;
2208 }
2209 rn = 30;
2210 modifier = cpu_X[31];
2211 } else {
2212
2213 if (op4 != 0x1f) {
2214 goto do_unallocated;
2215 }
2216 modifier = new_tmp_a64_zero(s);
2217 }
2218 if (s->pauth_active) {
2219 dst = new_tmp_a64(s);
2220 if (op3 == 2) {
2221 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2222 } else {
2223 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2224 }
2225 } else {
2226 dst = cpu_reg(s, rn);
2227 }
2228 break;
2229
2230 default:
2231 goto do_unallocated;
2232 }
2233 gen_a64_set_pc(s, dst);
2234
2235 if (opc == 1) {
2236 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2237 }
2238 break;
2239
2240 case 8:
2241 case 9:
2242 if (!dc_isar_feature(aa64_pauth, s)) {
2243 goto do_unallocated;
2244 }
2245 if ((op3 & ~1) != 2) {
2246 goto do_unallocated;
2247 }
2248 btype_mod = opc & 1;
2249 if (s->pauth_active) {
2250 dst = new_tmp_a64(s);
2251 modifier = cpu_reg_sp(s, op4);
2252 if (op3 == 2) {
2253 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2254 } else {
2255 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2256 }
2257 } else {
2258 dst = cpu_reg(s, rn);
2259 }
2260 gen_a64_set_pc(s, dst);
2261
2262 if (opc == 9) {
2263 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2264 }
2265 break;
2266
2267 case 4:
2268 if (s->current_el == 0) {
2269 goto do_unallocated;
2270 }
2271 switch (op3) {
2272 case 0:
2273 if (op4 != 0) {
2274 goto do_unallocated;
2275 }
2276 dst = tcg_temp_new_i64();
2277 tcg_gen_ld_i64(dst, cpu_env,
2278 offsetof(CPUARMState, elr_el[s->current_el]));
2279 break;
2280
2281 case 2:
2282 case 3:
2283 if (!dc_isar_feature(aa64_pauth, s)) {
2284 goto do_unallocated;
2285 }
2286 if (rn != 0x1f || op4 != 0x1f) {
2287 goto do_unallocated;
2288 }
2289 dst = tcg_temp_new_i64();
2290 tcg_gen_ld_i64(dst, cpu_env,
2291 offsetof(CPUARMState, elr_el[s->current_el]));
2292 if (s->pauth_active) {
2293 modifier = cpu_X[31];
2294 if (op3 == 2) {
2295 gen_helper_autia(dst, cpu_env, dst, modifier);
2296 } else {
2297 gen_helper_autib(dst, cpu_env, dst, modifier);
2298 }
2299 }
2300 break;
2301
2302 default:
2303 goto do_unallocated;
2304 }
2305 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2306 gen_io_start();
2307 }
2308
2309 gen_helper_exception_return(cpu_env, dst);
2310 tcg_temp_free_i64(dst);
2311
2312 s->base.is_jmp = DISAS_EXIT;
2313 return;
2314
2315 case 5:
2316 if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2317 goto do_unallocated;
2318 } else {
2319 unsupported_encoding(s, insn);
2320 }
2321 return;
2322
2323 default:
2324 do_unallocated:
2325 unallocated_encoding(s);
2326 return;
2327 }
2328
2329 switch (btype_mod) {
2330 case 0:
2331 if (dc_isar_feature(aa64_bti, s)) {
2332
2333 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2334 }
2335 break;
2336
2337 case 1:
2338 if (dc_isar_feature(aa64_bti, s)) {
2339
2340 set_btype(s, 2);
2341 }
2342 break;
2343
2344 default:
2345
2346 break;
2347 }
2348
2349 s->base.is_jmp = DISAS_JUMP;
2350}
2351
2352
2353static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2354{
2355 switch (extract32(insn, 25, 7)) {
2356 case 0x0a: case 0x0b:
2357 case 0x4a: case 0x4b:
2358 disas_uncond_b_imm(s, insn);
2359 break;
2360 case 0x1a: case 0x5a:
2361 disas_comp_b_imm(s, insn);
2362 break;
2363 case 0x1b: case 0x5b:
2364 disas_test_b_imm(s, insn);
2365 break;
2366 case 0x2a:
2367 disas_cond_b_imm(s, insn);
2368 break;
2369 case 0x6a:
2370 if (insn & (1 << 24)) {
2371 if (extract32(insn, 22, 2) == 0) {
2372 disas_system(s, insn);
2373 } else {
2374 unallocated_encoding(s);
2375 }
2376 } else {
2377 disas_exc(s, insn);
2378 }
2379 break;
2380 case 0x6b:
2381 disas_uncond_b_reg(s, insn);
2382 break;
2383 default:
2384 unallocated_encoding(s);
2385 break;
2386 }
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2401 TCGv_i64 addr, int size, bool is_pair)
2402{
2403 int idx = get_mem_index(s);
2404 MemOp memop = s->be_data;
2405
2406 g_assert(size <= 3);
2407 if (is_pair) {
2408 g_assert(size >= 2);
2409 if (size == 2) {
2410
2411 memop |= MO_64 | MO_ALIGN;
2412 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2413 if (s->be_data == MO_LE) {
2414 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2415 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2416 } else {
2417 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2418 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2419 }
2420 } else {
2421
2422
2423 memop |= MO_64;
2424 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2425 memop | MO_ALIGN_16);
2426
2427 TCGv_i64 addr2 = tcg_temp_new_i64();
2428 tcg_gen_addi_i64(addr2, addr, 8);
2429 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2430 tcg_temp_free_i64(addr2);
2431
2432 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2433 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2434 }
2435 } else {
2436 memop |= size | MO_ALIGN;
2437 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2438 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2439 }
2440 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2441}
2442
2443static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2444 TCGv_i64 addr, int size, int is_pair)
2445{
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458 TCGLabel *fail_label = gen_new_label();
2459 TCGLabel *done_label = gen_new_label();
2460 TCGv_i64 tmp;
2461
2462 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2463
2464 tmp = tcg_temp_new_i64();
2465 if (is_pair) {
2466 if (size == 2) {
2467 if (s->be_data == MO_LE) {
2468 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2469 } else {
2470 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2471 }
2472 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2473 cpu_exclusive_val, tmp,
2474 get_mem_index(s),
2475 MO_64 | MO_ALIGN | s->be_data);
2476 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2477 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2478 if (!HAVE_CMPXCHG128) {
2479 gen_helper_exit_atomic(cpu_env);
2480 s->base.is_jmp = DISAS_NORETURN;
2481 } else if (s->be_data == MO_LE) {
2482 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2483 cpu_exclusive_addr,
2484 cpu_reg(s, rt),
2485 cpu_reg(s, rt2));
2486 } else {
2487 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2488 cpu_exclusive_addr,
2489 cpu_reg(s, rt),
2490 cpu_reg(s, rt2));
2491 }
2492 } else if (s->be_data == MO_LE) {
2493 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2494 cpu_reg(s, rt), cpu_reg(s, rt2));
2495 } else {
2496 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2497 cpu_reg(s, rt), cpu_reg(s, rt2));
2498 }
2499 } else {
2500 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2501 cpu_reg(s, rt), get_mem_index(s),
2502 size | MO_ALIGN | s->be_data);
2503 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2504 }
2505 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2506 tcg_temp_free_i64(tmp);
2507 tcg_gen_br(done_label);
2508
2509 gen_set_label(fail_label);
2510 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2511 gen_set_label(done_label);
2512 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2513}
2514
2515static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2516 int rn, int size)
2517{
2518 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2519 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2520 int memidx = get_mem_index(s);
2521 TCGv_i64 clean_addr;
2522
2523 if (rn == 31) {
2524 gen_check_sp_alignment(s);
2525 }
2526 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2527 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2528 size | MO_ALIGN | s->be_data);
2529}
2530
2531static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2532 int rn, int size)
2533{
2534 TCGv_i64 s1 = cpu_reg(s, rs);
2535 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2536 TCGv_i64 t1 = cpu_reg(s, rt);
2537 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2538 TCGv_i64 clean_addr;
2539 int memidx = get_mem_index(s);
2540
2541 if (rn == 31) {
2542 gen_check_sp_alignment(s);
2543 }
2544
2545
2546 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2547
2548 if (size == 2) {
2549 TCGv_i64 cmp = tcg_temp_new_i64();
2550 TCGv_i64 val = tcg_temp_new_i64();
2551
2552 if (s->be_data == MO_LE) {
2553 tcg_gen_concat32_i64(val, t1, t2);
2554 tcg_gen_concat32_i64(cmp, s1, s2);
2555 } else {
2556 tcg_gen_concat32_i64(val, t2, t1);
2557 tcg_gen_concat32_i64(cmp, s2, s1);
2558 }
2559
2560 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2561 MO_64 | MO_ALIGN | s->be_data);
2562 tcg_temp_free_i64(val);
2563
2564 if (s->be_data == MO_LE) {
2565 tcg_gen_extr32_i64(s1, s2, cmp);
2566 } else {
2567 tcg_gen_extr32_i64(s2, s1, cmp);
2568 }
2569 tcg_temp_free_i64(cmp);
2570 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2571 if (HAVE_CMPXCHG128) {
2572 TCGv_i32 tcg_rs = tcg_const_i32(rs);
2573 if (s->be_data == MO_LE) {
2574 gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2575 clean_addr, t1, t2);
2576 } else {
2577 gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2578 clean_addr, t1, t2);
2579 }
2580 tcg_temp_free_i32(tcg_rs);
2581 } else {
2582 gen_helper_exit_atomic(cpu_env);
2583 s->base.is_jmp = DISAS_NORETURN;
2584 }
2585 } else {
2586 TCGv_i64 d1 = tcg_temp_new_i64();
2587 TCGv_i64 d2 = tcg_temp_new_i64();
2588 TCGv_i64 a2 = tcg_temp_new_i64();
2589 TCGv_i64 c1 = tcg_temp_new_i64();
2590 TCGv_i64 c2 = tcg_temp_new_i64();
2591 TCGv_i64 zero = tcg_const_i64(0);
2592
2593
2594 tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2595 MO_64 | MO_ALIGN_16 | s->be_data);
2596 tcg_gen_addi_i64(a2, clean_addr, 8);
2597 tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2598
2599
2600 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2601 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2602 tcg_gen_and_i64(c2, c2, c1);
2603
2604
2605 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2606 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2607 tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2608 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2609 tcg_temp_free_i64(a2);
2610 tcg_temp_free_i64(c1);
2611 tcg_temp_free_i64(c2);
2612 tcg_temp_free_i64(zero);
2613
2614
2615 tcg_gen_mov_i64(s1, d1);
2616 tcg_gen_mov_i64(s2, d2);
2617 tcg_temp_free_i64(d1);
2618 tcg_temp_free_i64(d2);
2619 }
2620}
2621
2622
2623
2624
2625static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2626{
2627 int opc0 = extract32(opc, 0, 1);
2628 int regsize;
2629
2630 if (is_signed) {
2631 regsize = opc0 ? 32 : 64;
2632 } else {
2633 regsize = size == 3 ? 64 : 32;
2634 }
2635 return regsize == 64;
2636}
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2652{
2653 int rt = extract32(insn, 0, 5);
2654 int rn = extract32(insn, 5, 5);
2655 int rt2 = extract32(insn, 10, 5);
2656 int rs = extract32(insn, 16, 5);
2657 int is_lasr = extract32(insn, 15, 1);
2658 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2659 int size = extract32(insn, 30, 2);
2660 TCGv_i64 clean_addr;
2661
2662 switch (o2_L_o1_o0) {
2663 case 0x0:
2664 case 0x1:
2665 if (rn == 31) {
2666 gen_check_sp_alignment(s);
2667 }
2668 if (is_lasr) {
2669 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2670 }
2671 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2672 true, rn != 31, size);
2673 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2674 return;
2675
2676 case 0x4:
2677 case 0x5:
2678 if (rn == 31) {
2679 gen_check_sp_alignment(s);
2680 }
2681 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2682 false, rn != 31, size);
2683 s->is_ldex = true;
2684 gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2685 if (is_lasr) {
2686 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2687 }
2688 return;
2689
2690 case 0x8:
2691 if (!dc_isar_feature(aa64_lor, s)) {
2692 break;
2693 }
2694
2695
2696 case 0x9:
2697
2698 if (rn == 31) {
2699 gen_check_sp_alignment(s);
2700 }
2701 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2702 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2703 true, rn != 31, size);
2704 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
2705 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2706 return;
2707
2708 case 0xc:
2709 if (!dc_isar_feature(aa64_lor, s)) {
2710 break;
2711 }
2712
2713
2714 case 0xd:
2715
2716 if (rn == 31) {
2717 gen_check_sp_alignment(s);
2718 }
2719 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2720 false, rn != 31, size);
2721 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
2722 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2723 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2724 return;
2725
2726 case 0x2: case 0x3:
2727 if (size & 2) {
2728 if (rn == 31) {
2729 gen_check_sp_alignment(s);
2730 }
2731 if (is_lasr) {
2732 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2733 }
2734 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2735 true, rn != 31, size);
2736 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2737 return;
2738 }
2739 if (rt2 == 31
2740 && ((rt | rs) & 1) == 0
2741 && dc_isar_feature(aa64_atomics, s)) {
2742
2743 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2744 return;
2745 }
2746 break;
2747
2748 case 0x6: case 0x7:
2749 if (size & 2) {
2750 if (rn == 31) {
2751 gen_check_sp_alignment(s);
2752 }
2753 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2754 false, rn != 31, size);
2755 s->is_ldex = true;
2756 gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2757 if (is_lasr) {
2758 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2759 }
2760 return;
2761 }
2762 if (rt2 == 31
2763 && ((rt | rs) & 1) == 0
2764 && dc_isar_feature(aa64_atomics, s)) {
2765
2766 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2767 return;
2768 }
2769 break;
2770
2771 case 0xa:
2772 case 0xb:
2773 case 0xe:
2774 case 0xf:
2775 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2776 gen_compare_and_swap(s, rs, rt, rn, size);
2777 return;
2778 }
2779 break;
2780 }
2781 unallocated_encoding(s);
2782}
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797static void disas_ld_lit(DisasContext *s, uint32_t insn)
2798{
2799 int rt = extract32(insn, 0, 5);
2800 int64_t imm = sextract32(insn, 5, 19) << 2;
2801 bool is_vector = extract32(insn, 26, 1);
2802 int opc = extract32(insn, 30, 2);
2803 bool is_signed = false;
2804 int size = 2;
2805 TCGv_i64 tcg_rt, clean_addr;
2806
2807 if (is_vector) {
2808 if (opc == 3) {
2809 unallocated_encoding(s);
2810 return;
2811 }
2812 size = 2 + opc;
2813 if (!fp_access_check(s)) {
2814 return;
2815 }
2816 } else {
2817 if (opc == 3) {
2818
2819 return;
2820 }
2821 size = 2 + extract32(opc, 0, 1);
2822 is_signed = extract32(opc, 1, 1);
2823 }
2824
2825 tcg_rt = cpu_reg(s, rt);
2826
2827 clean_addr = tcg_const_i64(s->pc_curr + imm);
2828 if (is_vector) {
2829 do_fp_ld(s, rt, clean_addr, size);
2830 } else {
2831
2832 bool iss_sf = opc != 0;
2833
2834 do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, false,
2835 true, rt, iss_sf, false);
2836 }
2837 tcg_temp_free_i64(clean_addr);
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2869{
2870 int rt = extract32(insn, 0, 5);
2871 int rn = extract32(insn, 5, 5);
2872 int rt2 = extract32(insn, 10, 5);
2873 uint64_t offset = sextract64(insn, 15, 7);
2874 int index = extract32(insn, 23, 2);
2875 bool is_vector = extract32(insn, 26, 1);
2876 bool is_load = extract32(insn, 22, 1);
2877 int opc = extract32(insn, 30, 2);
2878
2879 bool is_signed = false;
2880 bool postindex = false;
2881 bool wback = false;
2882 bool set_tag = false;
2883
2884 TCGv_i64 clean_addr, dirty_addr;
2885
2886 int size;
2887
2888 if (opc == 3) {
2889 unallocated_encoding(s);
2890 return;
2891 }
2892
2893 if (is_vector) {
2894 size = 2 + opc;
2895 } else if (opc == 1 && !is_load) {
2896
2897 if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2898 unallocated_encoding(s);
2899 return;
2900 }
2901 size = 3;
2902 set_tag = true;
2903 } else {
2904 size = 2 + extract32(opc, 1, 1);
2905 is_signed = extract32(opc, 0, 1);
2906 if (!is_load && is_signed) {
2907 unallocated_encoding(s);
2908 return;
2909 }
2910 }
2911
2912 switch (index) {
2913 case 1:
2914 postindex = true;
2915 wback = true;
2916 break;
2917 case 0:
2918
2919
2920
2921
2922
2923 if (is_signed) {
2924
2925 unallocated_encoding(s);
2926 return;
2927 }
2928 postindex = false;
2929 break;
2930 case 2:
2931 postindex = false;
2932 break;
2933 case 3:
2934 postindex = false;
2935 wback = true;
2936 break;
2937 }
2938
2939 if (is_vector && !fp_access_check(s)) {
2940 return;
2941 }
2942
2943 offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
2944
2945 if (rn == 31) {
2946 gen_check_sp_alignment(s);
2947 }
2948
2949 dirty_addr = read_cpu_reg_sp(s, rn, 1);
2950 if (!postindex) {
2951 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2952 }
2953
2954 if (set_tag) {
2955 if (!s->ata) {
2956
2957
2958
2959
2960 gen_helper_stg_stub(cpu_env, dirty_addr);
2961 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2962 gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
2963 } else {
2964 gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
2965 }
2966 }
2967
2968 clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
2969 (wback || rn != 31) && !set_tag,
2970 size, 2 << size);
2971
2972 if (is_vector) {
2973 if (is_load) {
2974 do_fp_ld(s, rt, clean_addr, size);
2975 } else {
2976 do_fp_st(s, rt, clean_addr, size);
2977 }
2978 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2979 if (is_load) {
2980 do_fp_ld(s, rt2, clean_addr, size);
2981 } else {
2982 do_fp_st(s, rt2, clean_addr, size);
2983 }
2984 } else {
2985 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2986 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2987
2988 if (is_load) {
2989 TCGv_i64 tmp = tcg_temp_new_i64();
2990
2991
2992
2993
2994 do_gpr_ld(s, tmp, clean_addr, size, is_signed, false,
2995 false, 0, false, false);
2996 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2997 do_gpr_ld(s, tcg_rt2, clean_addr, size, is_signed, false,
2998 false, 0, false, false);
2999
3000 tcg_gen_mov_i64(tcg_rt, tmp);
3001 tcg_temp_free_i64(tmp);
3002 } else {
3003 do_gpr_st(s, tcg_rt, clean_addr, size,
3004 false, 0, false, false);
3005 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3006 do_gpr_st(s, tcg_rt2, clean_addr, size,
3007 false, 0, false, false);
3008 }
3009 }
3010
3011 if (wback) {
3012 if (postindex) {
3013 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3014 }
3015 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3016 }
3017}
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
3036 int opc,
3037 int size,
3038 int rt,
3039 bool is_vector)
3040{
3041 int rn = extract32(insn, 5, 5);
3042 int imm9 = sextract32(insn, 12, 9);
3043 int idx = extract32(insn, 10, 2);
3044 bool is_signed = false;
3045 bool is_store = false;
3046 bool is_extended = false;
3047 bool is_unpriv = (idx == 2);
3048 bool iss_valid = !is_vector;
3049 bool post_index;
3050 bool writeback;
3051 int memidx;
3052
3053 TCGv_i64 clean_addr, dirty_addr;
3054
3055 if (is_vector) {
3056 size |= (opc & 2) << 1;
3057 if (size > 4 || is_unpriv) {
3058 unallocated_encoding(s);
3059 return;
3060 }
3061 is_store = ((opc & 1) == 0);
3062 if (!fp_access_check(s)) {
3063 return;
3064 }
3065 } else {
3066 if (size == 3 && opc == 2) {
3067
3068 if (idx != 0) {
3069 unallocated_encoding(s);
3070 return;
3071 }
3072 return;
3073 }
3074 if (opc == 3 && size > 1) {
3075 unallocated_encoding(s);
3076 return;
3077 }
3078 is_store = (opc == 0);
3079 is_signed = extract32(opc, 1, 1);
3080 is_extended = (size < 3) && extract32(opc, 0, 1);
3081 }
3082
3083 switch (idx) {
3084 case 0:
3085 case 2:
3086 post_index = false;
3087 writeback = false;
3088 break;
3089 case 1:
3090 post_index = true;
3091 writeback = true;
3092 break;
3093 case 3:
3094 post_index = false;
3095 writeback = true;
3096 break;
3097 default:
3098 g_assert_not_reached();
3099 }
3100
3101 if (rn == 31) {
3102 gen_check_sp_alignment(s);
3103 }
3104
3105 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3106 if (!post_index) {
3107 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3108 }
3109
3110 memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3111 clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3112 writeback || rn != 31,
3113 size, is_unpriv, memidx);
3114
3115 if (is_vector) {
3116 if (is_store) {
3117 do_fp_st(s, rt, clean_addr, size);
3118 } else {
3119 do_fp_ld(s, rt, clean_addr, size);
3120 }
3121 } else {
3122 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3123 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3124
3125 if (is_store) {
3126 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3127 iss_valid, rt, iss_sf, false);
3128 } else {
3129 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size,
3130 is_signed, is_extended, memidx,
3131 iss_valid, rt, iss_sf, false);
3132 }
3133 }
3134
3135 if (writeback) {
3136 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3137 if (post_index) {
3138 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3139 }
3140 tcg_gen_mov_i64(tcg_rn, dirty_addr);
3141 }
3142}
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3166 int opc,
3167 int size,
3168 int rt,
3169 bool is_vector)
3170{
3171 int rn = extract32(insn, 5, 5);
3172 int shift = extract32(insn, 12, 1);
3173 int rm = extract32(insn, 16, 5);
3174 int opt = extract32(insn, 13, 3);
3175 bool is_signed = false;
3176 bool is_store = false;
3177 bool is_extended = false;
3178
3179 TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3180
3181 if (extract32(opt, 1, 1) == 0) {
3182 unallocated_encoding(s);
3183 return;
3184 }
3185
3186 if (is_vector) {
3187 size |= (opc & 2) << 1;
3188 if (size > 4) {
3189 unallocated_encoding(s);
3190 return;
3191 }
3192 is_store = !extract32(opc, 0, 1);
3193 if (!fp_access_check(s)) {
3194 return;
3195 }
3196 } else {
3197 if (size == 3 && opc == 2) {
3198
3199 return;
3200 }
3201 if (opc == 3 && size > 1) {
3202 unallocated_encoding(s);
3203 return;
3204 }
3205 is_store = (opc == 0);
3206 is_signed = extract32(opc, 1, 1);
3207 is_extended = (size < 3) && extract32(opc, 0, 1);
3208 }
3209
3210 if (rn == 31) {
3211 gen_check_sp_alignment(s);
3212 }
3213 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3214
3215 tcg_rm = read_cpu_reg(s, rm, 1);
3216 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3217
3218 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3219 clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3220
3221 if (is_vector) {
3222 if (is_store) {
3223 do_fp_st(s, rt, clean_addr, size);
3224 } else {
3225 do_fp_ld(s, rt, clean_addr, size);
3226 }
3227 } else {
3228 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3229 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3230 if (is_store) {
3231 do_gpr_st(s, tcg_rt, clean_addr, size,
3232 true, rt, iss_sf, false);
3233 } else {
3234 do_gpr_ld(s, tcg_rt, clean_addr, size,
3235 is_signed, is_extended,
3236 true, rt, iss_sf, false);
3237 }
3238 }
3239}
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3259 int opc,
3260 int size,
3261 int rt,
3262 bool is_vector)
3263{
3264 int rn = extract32(insn, 5, 5);
3265 unsigned int imm12 = extract32(insn, 10, 12);
3266 unsigned int offset;
3267
3268 TCGv_i64 clean_addr, dirty_addr;
3269
3270 bool is_store;
3271 bool is_signed = false;
3272 bool is_extended = false;
3273
3274 if (is_vector) {
3275 size |= (opc & 2) << 1;
3276 if (size > 4) {
3277 unallocated_encoding(s);
3278 return;
3279 }
3280 is_store = !extract32(opc, 0, 1);
3281 if (!fp_access_check(s)) {
3282 return;
3283 }
3284 } else {
3285 if (size == 3 && opc == 2) {
3286
3287 return;
3288 }
3289 if (opc == 3 && size > 1) {
3290 unallocated_encoding(s);
3291 return;
3292 }
3293 is_store = (opc == 0);
3294 is_signed = extract32(opc, 1, 1);
3295 is_extended = (size < 3) && extract32(opc, 0, 1);
3296 }
3297
3298 if (rn == 31) {
3299 gen_check_sp_alignment(s);
3300 }
3301 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3302 offset = imm12 << size;
3303 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3304 clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3305
3306 if (is_vector) {
3307 if (is_store) {
3308 do_fp_st(s, rt, clean_addr, size);
3309 } else {
3310 do_fp_ld(s, rt, clean_addr, size);
3311 }
3312 } else {
3313 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3314 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3315 if (is_store) {
3316 do_gpr_st(s, tcg_rt, clean_addr, size,
3317 true, rt, iss_sf, false);
3318 } else {
3319 do_gpr_ld(s, tcg_rt, clean_addr, size, is_signed, is_extended,
3320 true, rt, iss_sf, false);
3321 }
3322 }
3323}
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3340 int size, int rt, bool is_vector)
3341{
3342 int rs = extract32(insn, 16, 5);
3343 int rn = extract32(insn, 5, 5);
3344 int o3_opc = extract32(insn, 12, 4);
3345 bool r = extract32(insn, 22, 1);
3346 bool a = extract32(insn, 23, 1);
3347 TCGv_i64 tcg_rs, clean_addr;
3348 AtomicThreeOpFn *fn = NULL;
3349
3350 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3351 unallocated_encoding(s);
3352 return;
3353 }
3354 switch (o3_opc) {
3355 case 000:
3356 fn = tcg_gen_atomic_fetch_add_i64;
3357 break;
3358 case 001:
3359 fn = tcg_gen_atomic_fetch_and_i64;
3360 break;
3361 case 002:
3362 fn = tcg_gen_atomic_fetch_xor_i64;
3363 break;
3364 case 003:
3365 fn = tcg_gen_atomic_fetch_or_i64;
3366 break;
3367 case 004:
3368 fn = tcg_gen_atomic_fetch_smax_i64;
3369 break;
3370 case 005:
3371 fn = tcg_gen_atomic_fetch_smin_i64;
3372 break;
3373 case 006:
3374 fn = tcg_gen_atomic_fetch_umax_i64;
3375 break;
3376 case 007:
3377 fn = tcg_gen_atomic_fetch_umin_i64;
3378 break;
3379 case 010:
3380 fn = tcg_gen_atomic_xchg_i64;
3381 break;
3382 case 014:
3383 if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3384 rs != 31 || a != 1 || r != 0) {
3385 unallocated_encoding(s);
3386 return;
3387 }
3388 break;
3389 default:
3390 unallocated_encoding(s);
3391 return;
3392 }
3393
3394 if (rn == 31) {
3395 gen_check_sp_alignment(s);
3396 }
3397 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3398
3399 if (o3_opc == 014) {
3400
3401
3402
3403
3404
3405
3406
3407 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
3408 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3409 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3410 return;
3411 }
3412
3413 tcg_rs = read_cpu_reg(s, rs, true);
3414
3415 if (o3_opc == 1) {
3416 tcg_gen_not_i64(tcg_rs, tcg_rs);
3417 }
3418
3419
3420
3421
3422 fn(cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s),
3423 s->be_data | size | MO_ALIGN);
3424}
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3442 int size, int rt, bool is_vector)
3443{
3444 int rn = extract32(insn, 5, 5);
3445 bool is_wback = extract32(insn, 11, 1);
3446 bool use_key_a = !extract32(insn, 23, 1);
3447 int offset;
3448 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3449
3450 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3451 unallocated_encoding(s);
3452 return;
3453 }
3454
3455 if (rn == 31) {
3456 gen_check_sp_alignment(s);
3457 }
3458 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3459
3460 if (s->pauth_active) {
3461 if (use_key_a) {
3462 gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3463 new_tmp_a64_zero(s));
3464 } else {
3465 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3466 new_tmp_a64_zero(s));
3467 }
3468 }
3469
3470
3471 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3472 offset = sextract32(offset << size, 0, 10 + size);
3473 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3474
3475
3476 clean_addr = gen_mte_check1(s, dirty_addr, false,
3477 is_wback || rn != 31, size);
3478
3479 tcg_rt = cpu_reg(s, rt);
3480 do_gpr_ld(s, tcg_rt, clean_addr, size, false,
3481 false, !is_wback,
3482 rt, true, false);
3483
3484 if (is_wback) {
3485 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3486 }
3487}
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3504{
3505 int rt = extract32(insn, 0, 5);
3506 int rn = extract32(insn, 5, 5);
3507 int offset = sextract32(insn, 12, 9);
3508 int opc = extract32(insn, 22, 2);
3509 int size = extract32(insn, 30, 2);
3510 TCGv_i64 clean_addr, dirty_addr;
3511 bool is_store = false;
3512 bool is_signed = false;
3513 bool extend = false;
3514 bool iss_sf;
3515
3516 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3517 unallocated_encoding(s);
3518 return;
3519 }
3520
3521 switch (opc) {
3522 case 0:
3523 is_store = true;
3524 break;
3525 case 1:
3526 break;
3527 case 2:
3528 if (size == 3) {
3529 unallocated_encoding(s);
3530 return;
3531 }
3532 is_signed = true;
3533 break;
3534 case 3:
3535 if (size > 1) {
3536 unallocated_encoding(s);
3537 return;
3538 }
3539 is_signed = true;
3540 extend = true;
3541 break;
3542 default:
3543 g_assert_not_reached();
3544 }
3545
3546 iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3547
3548 if (rn == 31) {
3549 gen_check_sp_alignment(s);
3550 }
3551
3552 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3553 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3554 clean_addr = clean_data_tbi(s, dirty_addr);
3555
3556 if (is_store) {
3557
3558 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3559 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
3560 } else {
3561
3562
3563
3564
3565 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, is_signed, extend,
3566 true, rt, iss_sf, true);
3567 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3568 }
3569}
3570
3571
3572static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3573{
3574 int rt = extract32(insn, 0, 5);
3575 int opc = extract32(insn, 22, 2);
3576 bool is_vector = extract32(insn, 26, 1);
3577 int size = extract32(insn, 30, 2);
3578
3579 switch (extract32(insn, 24, 2)) {
3580 case 0:
3581 if (extract32(insn, 21, 1) == 0) {
3582
3583
3584
3585
3586 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3587 return;
3588 }
3589 switch (extract32(insn, 10, 2)) {
3590 case 0:
3591 disas_ldst_atomic(s, insn, size, rt, is_vector);
3592 return;
3593 case 2:
3594 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3595 return;
3596 default:
3597 disas_ldst_pac(s, insn, size, rt, is_vector);
3598 return;
3599 }
3600 break;
3601 case 1:
3602 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3603 return;
3604 }
3605 unallocated_encoding(s);
3606}
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3627{
3628 int rt = extract32(insn, 0, 5);
3629 int rn = extract32(insn, 5, 5);
3630 int rm = extract32(insn, 16, 5);
3631 int size = extract32(insn, 10, 2);
3632 int opcode = extract32(insn, 12, 4);
3633 bool is_store = !extract32(insn, 22, 1);
3634 bool is_postidx = extract32(insn, 23, 1);
3635 bool is_q = extract32(insn, 30, 1);
3636 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3637 MemOp endian = s->be_data;
3638
3639 int total;
3640 int elements;
3641 int rpt;
3642 int selem;
3643 int r;
3644
3645 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3646 unallocated_encoding(s);
3647 return;
3648 }
3649
3650 if (!is_postidx && rm != 0) {
3651 unallocated_encoding(s);
3652 return;
3653 }
3654
3655
3656 switch (opcode) {
3657 case 0x0:
3658 rpt = 1;
3659 selem = 4;
3660 break;
3661 case 0x2:
3662 rpt = 4;
3663 selem = 1;
3664 break;
3665 case 0x4:
3666 rpt = 1;
3667 selem = 3;
3668 break;
3669 case 0x6:
3670 rpt = 3;
3671 selem = 1;
3672 break;
3673 case 0x7:
3674 rpt = 1;
3675 selem = 1;
3676 break;
3677 case 0x8:
3678 rpt = 1;
3679 selem = 2;
3680 break;
3681 case 0xa:
3682 rpt = 2;
3683 selem = 1;
3684 break;
3685 default:
3686 unallocated_encoding(s);
3687 return;
3688 }
3689
3690 if (size == 3 && !is_q && selem != 1) {
3691
3692 unallocated_encoding(s);
3693 return;
3694 }
3695
3696 if (!fp_access_check(s)) {
3697 return;
3698 }
3699
3700 if (rn == 31) {
3701 gen_check_sp_alignment(s);
3702 }
3703
3704
3705 if (size == 0) {
3706 endian = MO_LE;
3707 }
3708
3709 total = rpt * selem * (is_q ? 16 : 8);
3710 tcg_rn = cpu_reg_sp(s, rn);
3711
3712
3713
3714
3715
3716 clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3717 size, total);
3718
3719
3720
3721
3722
3723 if (selem == 1 && endian == MO_LE) {
3724 size = 3;
3725 }
3726 elements = (is_q ? 16 : 8) >> size;
3727
3728 tcg_ebytes = tcg_const_i64(1 << size);
3729 for (r = 0; r < rpt; r++) {
3730 int e;
3731 for (e = 0; e < elements; e++) {
3732 int xs;
3733 for (xs = 0; xs < selem; xs++) {
3734 int tt = (rt + r + xs) % 32;
3735 if (is_store) {
3736 do_vec_st(s, tt, e, clean_addr, size, endian);
3737 } else {
3738 do_vec_ld(s, tt, e, clean_addr, size, endian);
3739 }
3740 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3741 }
3742 }
3743 }
3744 tcg_temp_free_i64(tcg_ebytes);
3745
3746 if (!is_store) {
3747
3748
3749
3750
3751
3752
3753
3754 for (r = 0; r < rpt * selem; r++) {
3755 int tt = (rt + r) % 32;
3756 clear_vec_high(s, is_q, tt);
3757 }
3758 }
3759
3760 if (is_postidx) {
3761 if (rm == 31) {
3762 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3763 } else {
3764 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3765 }
3766 }
3767}
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3792{
3793 int rt = extract32(insn, 0, 5);
3794 int rn = extract32(insn, 5, 5);
3795 int rm = extract32(insn, 16, 5);
3796 int size = extract32(insn, 10, 2);
3797 int S = extract32(insn, 12, 1);
3798 int opc = extract32(insn, 13, 3);
3799 int R = extract32(insn, 21, 1);
3800 int is_load = extract32(insn, 22, 1);
3801 int is_postidx = extract32(insn, 23, 1);
3802 int is_q = extract32(insn, 30, 1);
3803
3804 int scale = extract32(opc, 1, 2);
3805 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3806 bool replicate = false;
3807 int index = is_q << 3 | S << 2 | size;
3808 int xs, total;
3809 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3810
3811 if (extract32(insn, 31, 1)) {
3812 unallocated_encoding(s);
3813 return;
3814 }
3815 if (!is_postidx && rm != 0) {
3816 unallocated_encoding(s);
3817 return;
3818 }
3819
3820 switch (scale) {
3821 case 3:
3822 if (!is_load || S) {
3823 unallocated_encoding(s);
3824 return;
3825 }
3826 scale = size;
3827 replicate = true;
3828 break;
3829 case 0:
3830 break;
3831 case 1:
3832 if (extract32(size, 0, 1)) {
3833 unallocated_encoding(s);
3834 return;
3835 }
3836 index >>= 1;
3837 break;
3838 case 2:
3839 if (extract32(size, 1, 1)) {
3840 unallocated_encoding(s);
3841 return;
3842 }
3843 if (!extract32(size, 0, 1)) {
3844 index >>= 2;
3845 } else {
3846 if (S) {
3847 unallocated_encoding(s);
3848 return;
3849 }
3850 index >>= 3;
3851 scale = 3;
3852 }
3853 break;
3854 default:
3855 g_assert_not_reached();
3856 }
3857
3858 if (!fp_access_check(s)) {
3859 return;
3860 }
3861
3862 if (rn == 31) {
3863 gen_check_sp_alignment(s);
3864 }
3865
3866 total = selem << scale;
3867 tcg_rn = cpu_reg_sp(s, rn);
3868
3869 clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3870 scale, total);
3871
3872 tcg_ebytes = tcg_const_i64(1 << scale);
3873 for (xs = 0; xs < selem; xs++) {
3874 if (replicate) {
3875
3876 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3877
3878 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr,
3879 get_mem_index(s), s->be_data + scale);
3880 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3881 (is_q + 1) * 8, vec_full_reg_size(s),
3882 tcg_tmp);
3883 tcg_temp_free_i64(tcg_tmp);
3884 } else {
3885
3886 if (is_load) {
3887 do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
3888 } else {
3889 do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
3890 }
3891 }
3892 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3893 rt = (rt + 1) % 32;
3894 }
3895 tcg_temp_free_i64(tcg_ebytes);
3896
3897 if (is_postidx) {
3898 if (rm == 31) {
3899 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3900 } else {
3901 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3902 }
3903 }
3904}
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914static void disas_ldst_tag(DisasContext *s, uint32_t insn)
3915{
3916 int rt = extract32(insn, 0, 5);
3917 int rn = extract32(insn, 5, 5);
3918 uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
3919 int op2 = extract32(insn, 10, 2);
3920 int op1 = extract32(insn, 22, 2);
3921 bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
3922 int index = 0;
3923 TCGv_i64 addr, clean_addr, tcg_rt;
3924
3925
3926 if (extract32(insn, 30, 2) != 3) {
3927 goto do_unallocated;
3928 }
3929
3930
3931
3932
3933
3934
3935
3936 switch (op1) {
3937 case 0:
3938 if (op2 != 0) {
3939
3940 index = op2 - 2;
3941 } else {
3942
3943 if (s->current_el == 0 || offset != 0) {
3944 goto do_unallocated;
3945 }
3946 is_mult = is_zero = true;
3947 }
3948 break;
3949 case 1:
3950 if (op2 != 0) {
3951
3952 is_zero = true;
3953 index = op2 - 2;
3954 } else {
3955
3956 is_load = true;
3957 }
3958 break;
3959 case 2:
3960 if (op2 != 0) {
3961
3962 is_pair = true;
3963 index = op2 - 2;
3964 } else {
3965
3966 if (s->current_el == 0 || offset != 0) {
3967 goto do_unallocated;
3968 }
3969 is_mult = true;
3970 }
3971 break;
3972 case 3:
3973 if (op2 != 0) {
3974
3975 is_pair = is_zero = true;
3976 index = op2 - 2;
3977 } else {
3978
3979 if (s->current_el == 0 || offset != 0) {
3980 goto do_unallocated;
3981 }
3982 is_mult = is_load = true;
3983 }
3984 break;
3985
3986 default:
3987 do_unallocated:
3988 unallocated_encoding(s);
3989 return;
3990 }
3991
3992 if (is_mult
3993 ? !dc_isar_feature(aa64_mte, s)
3994 : !dc_isar_feature(aa64_mte_insn_reg, s)) {
3995 goto do_unallocated;
3996 }
3997
3998 if (rn == 31) {
3999 gen_check_sp_alignment(s);
4000 }
4001
4002 addr = read_cpu_reg_sp(s, rn, true);
4003 if (index >= 0) {
4004
4005 tcg_gen_addi_i64(addr, addr, offset);
4006 }
4007
4008 if (is_mult) {
4009 tcg_rt = cpu_reg(s, rt);
4010
4011 if (is_zero) {
4012 int size = 4 << s->dcz_blocksize;
4013
4014 if (s->ata) {
4015 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
4016 }
4017
4018
4019
4020
4021 clean_addr = clean_data_tbi(s, addr);
4022 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4023 gen_helper_dc_zva(cpu_env, clean_addr);
4024 } else if (s->ata) {
4025 if (is_load) {
4026 gen_helper_ldgm(tcg_rt, cpu_env, addr);
4027 } else {
4028 gen_helper_stgm(cpu_env, addr, tcg_rt);
4029 }
4030 } else {
4031 MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4032 int size = 4 << GMID_EL1_BS;
4033
4034 clean_addr = clean_data_tbi(s, addr);
4035 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4036 gen_probe_access(s, clean_addr, acc, size);
4037
4038 if (is_load) {
4039
4040 tcg_gen_movi_i64(tcg_rt, 0);
4041 }
4042 }
4043 return;
4044 }
4045
4046 if (is_load) {
4047 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4048 tcg_rt = cpu_reg(s, rt);
4049 if (s->ata) {
4050 gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4051 } else {
4052 clean_addr = clean_data_tbi(s, addr);
4053 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4054 gen_address_with_allocation_tag0(tcg_rt, addr);
4055 }
4056 } else {
4057 tcg_rt = cpu_reg_sp(s, rt);
4058 if (!s->ata) {
4059
4060
4061
4062
4063
4064 if (is_pair) {
4065 gen_helper_st2g_stub(cpu_env, addr);
4066 } else {
4067 gen_helper_stg_stub(cpu_env, addr);
4068 }
4069 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4070 if (is_pair) {
4071 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4072 } else {
4073 gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4074 }
4075 } else {
4076 if (is_pair) {
4077 gen_helper_st2g(cpu_env, addr, tcg_rt);
4078 } else {
4079 gen_helper_stg(cpu_env, addr, tcg_rt);
4080 }
4081 }
4082 }
4083
4084 if (is_zero) {
4085 TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4086 TCGv_i64 tcg_zero = tcg_const_i64(0);
4087 int mem_index = get_mem_index(s);
4088 int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
4089
4090 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
4091 MO_Q | MO_ALIGN_16);
4092 for (i = 8; i < n; i += 8) {
4093 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4094 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_Q);
4095 }
4096 tcg_temp_free_i64(tcg_zero);
4097 }
4098
4099 if (index != 0) {
4100
4101 if (index < 0) {
4102
4103 tcg_gen_addi_i64(addr, addr, offset);
4104 }
4105 tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4106 }
4107}
4108
4109
4110static void disas_ldst(DisasContext *s, uint32_t insn)
4111{
4112 switch (extract32(insn, 24, 6)) {
4113 case 0x08:
4114 disas_ldst_excl(s, insn);
4115 break;
4116 case 0x18: case 0x1c:
4117 disas_ld_lit(s, insn);
4118 break;
4119 case 0x28: case 0x29:
4120 case 0x2c: case 0x2d:
4121 disas_ldst_pair(s, insn);
4122 break;
4123 case 0x38: case 0x39:
4124 case 0x3c: case 0x3d:
4125 disas_ldst_reg(s, insn);
4126 break;
4127 case 0x0c:
4128 disas_ldst_multiple_struct(s, insn);
4129 break;
4130 case 0x0d:
4131 disas_ldst_single_struct(s, insn);
4132 break;
4133 case 0x19:
4134 if (extract32(insn, 21, 1) != 0) {
4135 disas_ldst_tag(s, insn);
4136 } else if (extract32(insn, 10, 2) == 0) {
4137 disas_ldst_ldapr_stlr(s, insn);
4138 } else {
4139 unallocated_encoding(s);
4140 }
4141 break;
4142 default:
4143 unallocated_encoding(s);
4144 break;
4145 }
4146}
4147
4148
4149
4150
4151
4152
4153
4154static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
4155{
4156 unsigned int page, rd;
4157 uint64_t base;
4158 uint64_t offset;
4159
4160 page = extract32(insn, 31, 1);
4161
4162 offset = sextract64(insn, 5, 19);
4163 offset = offset << 2 | extract32(insn, 29, 2);
4164 rd = extract32(insn, 0, 5);
4165 base = s->pc_curr;
4166
4167 if (page) {
4168
4169 base &= ~0xfff;
4170 offset <<= 12;
4171 }
4172
4173 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
4174}
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
4190{
4191 int rd = extract32(insn, 0, 5);
4192 int rn = extract32(insn, 5, 5);
4193 uint64_t imm = extract32(insn, 10, 12);
4194 bool shift = extract32(insn, 22, 1);
4195 bool setflags = extract32(insn, 29, 1);
4196 bool sub_op = extract32(insn, 30, 1);
4197 bool is_64bit = extract32(insn, 31, 1);
4198
4199 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
4200 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
4201 TCGv_i64 tcg_result;
4202
4203 if (shift) {
4204 imm <<= 12;
4205 }
4206
4207 tcg_result = tcg_temp_new_i64();
4208 if (!setflags) {
4209 if (sub_op) {
4210 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
4211 } else {
4212 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
4213 }
4214 } else {
4215 TCGv_i64 tcg_imm = tcg_const_i64(imm);
4216 if (sub_op) {
4217 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4218 } else {
4219 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4220 }
4221 tcg_temp_free_i64(tcg_imm);
4222 }
4223
4224 if (is_64bit) {
4225 tcg_gen_mov_i64(tcg_rd, tcg_result);
4226 } else {
4227 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4228 }
4229
4230 tcg_temp_free_i64(tcg_result);
4231}
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
4244{
4245 int rd = extract32(insn, 0, 5);
4246 int rn = extract32(insn, 5, 5);
4247 int uimm4 = extract32(insn, 10, 4);
4248 int uimm6 = extract32(insn, 16, 6);
4249 bool sub_op = extract32(insn, 30, 1);
4250 TCGv_i64 tcg_rn, tcg_rd;
4251 int imm;
4252
4253
4254 if ((insn & 0xa040c000u) != 0x80000000u ||
4255 !dc_isar_feature(aa64_mte_insn_reg, s)) {
4256 unallocated_encoding(s);
4257 return;
4258 }
4259
4260 imm = uimm6 << LOG2_TAG_GRANULE;
4261 if (sub_op) {
4262 imm = -imm;
4263 }
4264
4265 tcg_rn = cpu_reg_sp(s, rn);
4266 tcg_rd = cpu_reg_sp(s, rd);
4267
4268 if (s->ata) {
4269 TCGv_i32 offset = tcg_const_i32(imm);
4270 TCGv_i32 tag_offset = tcg_const_i32(uimm4);
4271
4272 gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, offset, tag_offset);
4273 tcg_temp_free_i32(tag_offset);
4274 tcg_temp_free_i32(offset);
4275 } else {
4276 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4277 gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4278 }
4279}
4280
4281
4282
4283
4284
4285static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4286{
4287 assert(e != 0);
4288 while (e < 64) {
4289 mask |= mask << e;
4290 e *= 2;
4291 }
4292 return mask;
4293}
4294
4295
4296static inline uint64_t bitmask64(unsigned int length)
4297{
4298 assert(length > 0 && length <= 64);
4299 return ~0ULL >> (64 - length);
4300}
4301
4302
4303
4304
4305
4306
4307bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4308 unsigned int imms, unsigned int immr)
4309{
4310 uint64_t mask;
4311 unsigned e, levels, s, r;
4312 int len;
4313
4314 assert(immn < 2 && imms < 64 && immr < 64);
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4339 if (len < 1) {
4340
4341 return false;
4342 }
4343 e = 1 << len;
4344
4345 levels = e - 1;
4346 s = imms & levels;
4347 r = immr & levels;
4348
4349 if (s == levels) {
4350
4351 return false;
4352 }
4353
4354
4355
4356
4357 mask = bitmask64(s + 1);
4358 if (r) {
4359 mask = (mask >> r) | (mask << (e - r));
4360 mask &= bitmask64(e);
4361 }
4362
4363 mask = bitfield_replicate(mask, e);
4364 *result = mask;
4365 return true;
4366}
4367
4368
4369
4370
4371
4372
4373
4374static void disas_logic_imm(DisasContext *s, uint32_t insn)
4375{
4376 unsigned int sf, opc, is_n, immr, imms, rn, rd;
4377 TCGv_i64 tcg_rd, tcg_rn;
4378 uint64_t wmask;
4379 bool is_and = false;
4380
4381 sf = extract32(insn, 31, 1);
4382 opc = extract32(insn, 29, 2);
4383 is_n = extract32(insn, 22, 1);
4384 immr = extract32(insn, 16, 6);
4385 imms = extract32(insn, 10, 6);
4386 rn = extract32(insn, 5, 5);
4387 rd = extract32(insn, 0, 5);
4388
4389 if (!sf && is_n) {
4390 unallocated_encoding(s);
4391 return;
4392 }
4393
4394 if (opc == 0x3) {
4395 tcg_rd = cpu_reg(s, rd);
4396 } else {
4397 tcg_rd = cpu_reg_sp(s, rd);
4398 }
4399 tcg_rn = cpu_reg(s, rn);
4400
4401 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
4402
4403 unallocated_encoding(s);
4404 return;
4405 }
4406
4407 if (!sf) {
4408 wmask &= 0xffffffff;
4409 }
4410
4411 switch (opc) {
4412 case 0x3:
4413 case 0x0:
4414 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
4415 is_and = true;
4416 break;
4417 case 0x1:
4418 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
4419 break;
4420 case 0x2:
4421 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
4422 break;
4423 default:
4424 assert(FALSE);
4425 break;
4426 }
4427
4428 if (!sf && !is_and) {
4429
4430
4431
4432 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4433 }
4434
4435 if (opc == 3) {
4436 gen_logic_CC(sf, tcg_rd);
4437 }
4438}
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452static void disas_movw_imm(DisasContext *s, uint32_t insn)
4453{
4454 int rd = extract32(insn, 0, 5);
4455 uint64_t imm = extract32(insn, 5, 16);
4456 int sf = extract32(insn, 31, 1);
4457 int opc = extract32(insn, 29, 2);
4458 int pos = extract32(insn, 21, 2) << 4;
4459 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4460 TCGv_i64 tcg_imm;
4461
4462 if (!sf && (pos >= 32)) {
4463 unallocated_encoding(s);
4464 return;
4465 }
4466
4467 switch (opc) {
4468 case 0:
4469 case 2:
4470 imm <<= pos;
4471 if (opc == 0) {
4472 imm = ~imm;
4473 }
4474 if (!sf) {
4475 imm &= 0xffffffffu;
4476 }
4477 tcg_gen_movi_i64(tcg_rd, imm);
4478 break;
4479 case 3:
4480 tcg_imm = tcg_const_i64(imm);
4481 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
4482 tcg_temp_free_i64(tcg_imm);
4483 if (!sf) {
4484 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4485 }
4486 break;
4487 default:
4488 unallocated_encoding(s);
4489 break;
4490 }
4491}
4492
4493
4494
4495
4496
4497
4498
4499static void disas_bitfield(DisasContext *s, uint32_t insn)
4500{
4501 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4502 TCGv_i64 tcg_rd, tcg_tmp;
4503
4504 sf = extract32(insn, 31, 1);
4505 opc = extract32(insn, 29, 2);
4506 n = extract32(insn, 22, 1);
4507 ri = extract32(insn, 16, 6);
4508 si = extract32(insn, 10, 6);
4509 rn = extract32(insn, 5, 5);
4510 rd = extract32(insn, 0, 5);
4511 bitsize = sf ? 64 : 32;
4512
4513 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4514 unallocated_encoding(s);
4515 return;
4516 }
4517
4518 tcg_rd = cpu_reg(s, rd);
4519
4520
4521
4522
4523 tcg_tmp = read_cpu_reg(s, rn, 1);
4524
4525
4526 if (si >= ri) {
4527
4528 len = (si - ri) + 1;
4529 if (opc == 0) {
4530 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4531 goto done;
4532 } else if (opc == 2) {
4533 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4534 return;
4535 }
4536
4537 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4538 pos = 0;
4539 } else {
4540
4541
4542
4543 len = si + 1;
4544 pos = (bitsize - ri) & (bitsize - 1);
4545 }
4546
4547 if (opc == 0 && len < ri) {
4548
4549
4550
4551 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4552 len = ri;
4553 }
4554
4555 if (opc == 1) {
4556 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4557 } else {
4558
4559
4560
4561 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4562 return;
4563 }
4564
4565 done:
4566 if (!sf) {
4567 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4568 }
4569}
4570
4571
4572
4573
4574
4575
4576
4577static void disas_extract(DisasContext *s, uint32_t insn)
4578{
4579 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4580
4581 sf = extract32(insn, 31, 1);
4582 n = extract32(insn, 22, 1);
4583 rm = extract32(insn, 16, 5);
4584 imm = extract32(insn, 10, 6);
4585 rn = extract32(insn, 5, 5);
4586 rd = extract32(insn, 0, 5);
4587 op21 = extract32(insn, 29, 2);
4588 op0 = extract32(insn, 21, 1);
4589 bitsize = sf ? 64 : 32;
4590
4591 if (sf != n || op21 || op0 || imm >= bitsize) {
4592 unallocated_encoding(s);
4593 } else {
4594 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4595
4596 tcg_rd = cpu_reg(s, rd);
4597
4598 if (unlikely(imm == 0)) {
4599
4600
4601
4602 if (sf) {
4603 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4604 } else {
4605 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4606 }
4607 } else {
4608 tcg_rm = cpu_reg(s, rm);
4609 tcg_rn = cpu_reg(s, rn);
4610
4611 if (sf) {
4612
4613 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4614 } else {
4615 TCGv_i32 t0 = tcg_temp_new_i32();
4616
4617 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4618 if (rm == rn) {
4619 tcg_gen_rotri_i32(t0, t0, imm);
4620 } else {
4621 TCGv_i32 t1 = tcg_temp_new_i32();
4622 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4623 tcg_gen_extract2_i32(t0, t0, t1, imm);
4624 tcg_temp_free_i32(t1);
4625 }
4626 tcg_gen_extu_i32_i64(tcg_rd, t0);
4627 tcg_temp_free_i32(t0);
4628 }
4629 }
4630 }
4631}
4632
4633
4634static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4635{
4636 switch (extract32(insn, 23, 6)) {
4637 case 0x20: case 0x21:
4638 disas_pc_rel_adr(s, insn);
4639 break;
4640 case 0x22:
4641 disas_add_sub_imm(s, insn);
4642 break;
4643 case 0x23:
4644 disas_add_sub_imm_with_tags(s, insn);
4645 break;
4646 case 0x24:
4647 disas_logic_imm(s, insn);
4648 break;
4649 case 0x25:
4650 disas_movw_imm(s, insn);
4651 break;
4652 case 0x26:
4653 disas_bitfield(s, insn);
4654 break;
4655 case 0x27:
4656 disas_extract(s, insn);
4657 break;
4658 default:
4659 unallocated_encoding(s);
4660 break;
4661 }
4662}
4663
4664
4665
4666
4667
4668
4669static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4670 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4671{
4672 switch (shift_type) {
4673 case A64_SHIFT_TYPE_LSL:
4674 tcg_gen_shl_i64(dst, src, shift_amount);
4675 break;
4676 case A64_SHIFT_TYPE_LSR:
4677 tcg_gen_shr_i64(dst, src, shift_amount);
4678 break;
4679 case A64_SHIFT_TYPE_ASR:
4680 if (!sf) {
4681 tcg_gen_ext32s_i64(dst, src);
4682 }
4683 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4684 break;
4685 case A64_SHIFT_TYPE_ROR:
4686 if (sf) {
4687 tcg_gen_rotr_i64(dst, src, shift_amount);
4688 } else {
4689 TCGv_i32 t0, t1;
4690 t0 = tcg_temp_new_i32();
4691 t1 = tcg_temp_new_i32();
4692 tcg_gen_extrl_i64_i32(t0, src);
4693 tcg_gen_extrl_i64_i32(t1, shift_amount);
4694 tcg_gen_rotr_i32(t0, t0, t1);
4695 tcg_gen_extu_i32_i64(dst, t0);
4696 tcg_temp_free_i32(t0);
4697 tcg_temp_free_i32(t1);
4698 }
4699 break;
4700 default:
4701 assert(FALSE);
4702 break;
4703 }
4704
4705 if (!sf) {
4706 tcg_gen_ext32u_i64(dst, dst);
4707 }
4708}
4709
4710
4711
4712
4713
4714static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4715 enum a64_shift_type shift_type, unsigned int shift_i)
4716{
4717 assert(shift_i < (sf ? 64 : 32));
4718
4719 if (shift_i == 0) {
4720 tcg_gen_mov_i64(dst, src);
4721 } else {
4722 TCGv_i64 shift_const;
4723
4724 shift_const = tcg_const_i64(shift_i);
4725 shift_reg(dst, src, sf, shift_type, shift_const);
4726 tcg_temp_free_i64(shift_const);
4727 }
4728}
4729
4730
4731
4732
4733
4734
4735
4736static void disas_logic_reg(DisasContext *s, uint32_t insn)
4737{
4738 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4739 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4740
4741 sf = extract32(insn, 31, 1);
4742 opc = extract32(insn, 29, 2);
4743 shift_type = extract32(insn, 22, 2);
4744 invert = extract32(insn, 21, 1);
4745 rm = extract32(insn, 16, 5);
4746 shift_amount = extract32(insn, 10, 6);
4747 rn = extract32(insn, 5, 5);
4748 rd = extract32(insn, 0, 5);
4749
4750 if (!sf && (shift_amount & (1 << 5))) {
4751 unallocated_encoding(s);
4752 return;
4753 }
4754
4755 tcg_rd = cpu_reg(s, rd);
4756
4757 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4758
4759
4760
4761 tcg_rm = cpu_reg(s, rm);
4762 if (invert) {
4763 tcg_gen_not_i64(tcg_rd, tcg_rm);
4764 if (!sf) {
4765 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4766 }
4767 } else {
4768 if (sf) {
4769 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4770 } else {
4771 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4772 }
4773 }
4774 return;
4775 }
4776
4777 tcg_rm = read_cpu_reg(s, rm, sf);
4778
4779 if (shift_amount) {
4780 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4781 }
4782
4783 tcg_rn = cpu_reg(s, rn);
4784
4785 switch (opc | (invert << 2)) {
4786 case 0:
4787 case 3:
4788 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4789 break;
4790 case 1:
4791 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4792 break;
4793 case 2:
4794 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4795 break;
4796 case 4:
4797 case 7:
4798 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4799 break;
4800 case 5:
4801 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4802 break;
4803 case 6:
4804 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4805 break;
4806 default:
4807 assert(FALSE);
4808 break;
4809 }
4810
4811 if (!sf) {
4812 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4813 }
4814
4815 if (opc == 3) {
4816 gen_logic_CC(sf, tcg_rd);
4817 }
4818}
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4838{
4839 int rd = extract32(insn, 0, 5);
4840 int rn = extract32(insn, 5, 5);
4841 int imm3 = extract32(insn, 10, 3);
4842 int option = extract32(insn, 13, 3);
4843 int rm = extract32(insn, 16, 5);
4844 int opt = extract32(insn, 22, 2);
4845 bool setflags = extract32(insn, 29, 1);
4846 bool sub_op = extract32(insn, 30, 1);
4847 bool sf = extract32(insn, 31, 1);
4848
4849 TCGv_i64 tcg_rm, tcg_rn;
4850 TCGv_i64 tcg_rd;
4851 TCGv_i64 tcg_result;
4852
4853 if (imm3 > 4 || opt != 0) {
4854 unallocated_encoding(s);
4855 return;
4856 }
4857
4858
4859 if (!setflags) {
4860 tcg_rd = cpu_reg_sp(s, rd);
4861 } else {
4862 tcg_rd = cpu_reg(s, rd);
4863 }
4864 tcg_rn = read_cpu_reg_sp(s, rn, sf);
4865
4866 tcg_rm = read_cpu_reg(s, rm, sf);
4867 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4868
4869 tcg_result = tcg_temp_new_i64();
4870
4871 if (!setflags) {
4872 if (sub_op) {
4873 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4874 } else {
4875 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4876 }
4877 } else {
4878 if (sub_op) {
4879 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4880 } else {
4881 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4882 }
4883 }
4884
4885 if (sf) {
4886 tcg_gen_mov_i64(tcg_rd, tcg_result);
4887 } else {
4888 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4889 }
4890
4891 tcg_temp_free_i64(tcg_result);
4892}
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4909{
4910 int rd = extract32(insn, 0, 5);
4911 int rn = extract32(insn, 5, 5);
4912 int imm6 = extract32(insn, 10, 6);
4913 int rm = extract32(insn, 16, 5);
4914 int shift_type = extract32(insn, 22, 2);
4915 bool setflags = extract32(insn, 29, 1);
4916 bool sub_op = extract32(insn, 30, 1);
4917 bool sf = extract32(insn, 31, 1);
4918
4919 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4920 TCGv_i64 tcg_rn, tcg_rm;
4921 TCGv_i64 tcg_result;
4922
4923 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4924 unallocated_encoding(s);
4925 return;
4926 }
4927
4928 tcg_rn = read_cpu_reg(s, rn, sf);
4929 tcg_rm = read_cpu_reg(s, rm, sf);
4930
4931 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4932
4933 tcg_result = tcg_temp_new_i64();
4934
4935 if (!setflags) {
4936 if (sub_op) {
4937 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4938 } else {
4939 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4940 }
4941 } else {
4942 if (sub_op) {
4943 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4944 } else {
4945 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4946 }
4947 }
4948
4949 if (sf) {
4950 tcg_gen_mov_i64(tcg_rd, tcg_result);
4951 } else {
4952 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4953 }
4954
4955 tcg_temp_free_i64(tcg_result);
4956}
4957
4958
4959
4960
4961
4962
4963
4964
4965static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4966{
4967 int rd = extract32(insn, 0, 5);
4968 int rn = extract32(insn, 5, 5);
4969 int ra = extract32(insn, 10, 5);
4970 int rm = extract32(insn, 16, 5);
4971 int op_id = (extract32(insn, 29, 3) << 4) |
4972 (extract32(insn, 21, 3) << 1) |
4973 extract32(insn, 15, 1);
4974 bool sf = extract32(insn, 31, 1);
4975 bool is_sub = extract32(op_id, 0, 1);
4976 bool is_high = extract32(op_id, 2, 1);
4977 bool is_signed = false;
4978 TCGv_i64 tcg_op1;
4979 TCGv_i64 tcg_op2;
4980 TCGv_i64 tcg_tmp;
4981
4982
4983 switch (op_id) {
4984 case 0x42:
4985 case 0x43:
4986 case 0x44:
4987 is_signed = true;
4988 break;
4989 case 0x0:
4990 case 0x1:
4991 case 0x40:
4992 case 0x41:
4993 case 0x4a:
4994 case 0x4b:
4995 case 0x4c:
4996 break;
4997 default:
4998 unallocated_encoding(s);
4999 return;
5000 }
5001
5002 if (is_high) {
5003 TCGv_i64 low_bits = tcg_temp_new_i64();
5004 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5005 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5006 TCGv_i64 tcg_rm = cpu_reg(s, rm);
5007
5008 if (is_signed) {
5009 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5010 } else {
5011 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5012 }
5013
5014 tcg_temp_free_i64(low_bits);
5015 return;
5016 }
5017
5018 tcg_op1 = tcg_temp_new_i64();
5019 tcg_op2 = tcg_temp_new_i64();
5020 tcg_tmp = tcg_temp_new_i64();
5021
5022 if (op_id < 0x42) {
5023 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
5024 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
5025 } else {
5026 if (is_signed) {
5027 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
5028 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
5029 } else {
5030 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
5031 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
5032 }
5033 }
5034
5035 if (ra == 31 && !is_sub) {
5036
5037 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
5038 } else {
5039 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
5040 if (is_sub) {
5041 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5042 } else {
5043 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5044 }
5045 }
5046
5047 if (!sf) {
5048 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
5049 }
5050
5051 tcg_temp_free_i64(tcg_op1);
5052 tcg_temp_free_i64(tcg_op2);
5053 tcg_temp_free_i64(tcg_tmp);
5054}
5055
5056
5057
5058
5059
5060
5061
5062
5063static void disas_adc_sbc(DisasContext *s, uint32_t insn)
5064{
5065 unsigned int sf, op, setflags, rm, rn, rd;
5066 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
5067
5068 sf = extract32(insn, 31, 1);
5069 op = extract32(insn, 30, 1);
5070 setflags = extract32(insn, 29, 1);
5071 rm = extract32(insn, 16, 5);
5072 rn = extract32(insn, 5, 5);
5073 rd = extract32(insn, 0, 5);
5074
5075 tcg_rd = cpu_reg(s, rd);
5076 tcg_rn = cpu_reg(s, rn);
5077
5078 if (op) {
5079 tcg_y = new_tmp_a64(s);
5080 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
5081 } else {
5082 tcg_y = cpu_reg(s, rm);
5083 }
5084
5085 if (setflags) {
5086 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
5087 } else {
5088 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
5089 }
5090}
5091
5092
5093
5094
5095
5096
5097
5098
5099static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
5100{
5101 int mask = extract32(insn, 0, 4);
5102 int o2 = extract32(insn, 4, 1);
5103 int rn = extract32(insn, 5, 5);
5104 int imm6 = extract32(insn, 15, 6);
5105 int sf_op_s = extract32(insn, 29, 3);
5106 TCGv_i64 tcg_rn;
5107 TCGv_i32 nzcv;
5108
5109 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
5110 unallocated_encoding(s);
5111 return;
5112 }
5113
5114 tcg_rn = read_cpu_reg(s, rn, 1);
5115 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
5116
5117 nzcv = tcg_temp_new_i32();
5118 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
5119
5120 if (mask & 8) {
5121 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
5122 }
5123 if (mask & 4) {
5124 tcg_gen_not_i32(cpu_ZF, nzcv);
5125 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
5126 }
5127 if (mask & 2) {
5128 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
5129 }
5130 if (mask & 1) {
5131 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
5132 }
5133
5134 tcg_temp_free_i32(nzcv);
5135}
5136
5137
5138
5139
5140
5141
5142
5143
5144static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
5145{
5146 int o3_mask = extract32(insn, 0, 5);
5147 int rn = extract32(insn, 5, 5);
5148 int o2 = extract32(insn, 15, 6);
5149 int sz = extract32(insn, 14, 1);
5150 int sf_op_s = extract32(insn, 29, 3);
5151 TCGv_i32 tmp;
5152 int shift;
5153
5154 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
5155 !dc_isar_feature(aa64_condm_4, s)) {
5156 unallocated_encoding(s);
5157 return;
5158 }
5159 shift = sz ? 16 : 24;
5160
5161 tmp = tcg_temp_new_i32();
5162 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
5163 tcg_gen_shli_i32(cpu_NF, tmp, shift);
5164 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
5165 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
5166 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
5167 tcg_temp_free_i32(tmp);
5168}
5169
5170
5171
5172
5173
5174
5175
5176
5177static void disas_cc(DisasContext *s, uint32_t insn)
5178{
5179 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
5180 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
5181 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
5182 DisasCompare c;
5183
5184 if (!extract32(insn, 29, 1)) {
5185 unallocated_encoding(s);
5186 return;
5187 }
5188 if (insn & (1 << 10 | 1 << 4)) {
5189 unallocated_encoding(s);
5190 return;
5191 }
5192 sf = extract32(insn, 31, 1);
5193 op = extract32(insn, 30, 1);
5194 is_imm = extract32(insn, 11, 1);
5195 y = extract32(insn, 16, 5);
5196 cond = extract32(insn, 12, 4);
5197 rn = extract32(insn, 5, 5);
5198 nzcv = extract32(insn, 0, 4);
5199
5200
5201 tcg_t0 = tcg_temp_new_i32();
5202 arm_test_cc(&c, cond);
5203 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5204 arm_free_cc(&c);
5205
5206
5207 if (is_imm) {
5208 tcg_y = new_tmp_a64(s);
5209 tcg_gen_movi_i64(tcg_y, y);
5210 } else {
5211 tcg_y = cpu_reg(s, y);
5212 }
5213 tcg_rn = cpu_reg(s, rn);
5214
5215
5216 tcg_tmp = tcg_temp_new_i64();
5217 if (op) {
5218 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5219 } else {
5220 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5221 }
5222 tcg_temp_free_i64(tcg_tmp);
5223
5224
5225
5226
5227
5228
5229 tcg_t1 = tcg_temp_new_i32();
5230 tcg_t2 = tcg_temp_new_i32();
5231 tcg_gen_neg_i32(tcg_t1, tcg_t0);
5232 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5233
5234 if (nzcv & 8) {
5235 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5236 } else {
5237 if (TCG_TARGET_HAS_andc_i32) {
5238 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5239 } else {
5240 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5241 }
5242 }
5243 if (nzcv & 4) {
5244 if (TCG_TARGET_HAS_andc_i32) {
5245 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5246 } else {
5247 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5248 }
5249 } else {
5250 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5251 }
5252 if (nzcv & 2) {
5253 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5254 } else {
5255 if (TCG_TARGET_HAS_andc_i32) {
5256 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5257 } else {
5258 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5259 }
5260 }
5261 if (nzcv & 1) {
5262 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5263 } else {
5264 if (TCG_TARGET_HAS_andc_i32) {
5265 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5266 } else {
5267 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5268 }
5269 }
5270 tcg_temp_free_i32(tcg_t0);
5271 tcg_temp_free_i32(tcg_t1);
5272 tcg_temp_free_i32(tcg_t2);
5273}
5274
5275
5276
5277
5278
5279
5280
5281static void disas_cond_select(DisasContext *s, uint32_t insn)
5282{
5283 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5284 TCGv_i64 tcg_rd, zero;
5285 DisasCompare64 c;
5286
5287 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5288
5289 unallocated_encoding(s);
5290 return;
5291 }
5292 sf = extract32(insn, 31, 1);
5293 else_inv = extract32(insn, 30, 1);
5294 rm = extract32(insn, 16, 5);
5295 cond = extract32(insn, 12, 4);
5296 else_inc = extract32(insn, 10, 1);
5297 rn = extract32(insn, 5, 5);
5298 rd = extract32(insn, 0, 5);
5299
5300 tcg_rd = cpu_reg(s, rd);
5301
5302 a64_test_cc(&c, cond);
5303 zero = tcg_const_i64(0);
5304
5305 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5306
5307 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5308 if (else_inv) {
5309 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5310 }
5311 } else {
5312 TCGv_i64 t_true = cpu_reg(s, rn);
5313 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5314 if (else_inv && else_inc) {
5315 tcg_gen_neg_i64(t_false, t_false);
5316 } else if (else_inv) {
5317 tcg_gen_not_i64(t_false, t_false);
5318 } else if (else_inc) {
5319 tcg_gen_addi_i64(t_false, t_false, 1);
5320 }
5321 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5322 }
5323
5324 tcg_temp_free_i64(zero);
5325 a64_free_cc(&c);
5326
5327 if (!sf) {
5328 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5329 }
5330}
5331
5332static void handle_clz(DisasContext *s, unsigned int sf,
5333 unsigned int rn, unsigned int rd)
5334{
5335 TCGv_i64 tcg_rd, tcg_rn;
5336 tcg_rd = cpu_reg(s, rd);
5337 tcg_rn = cpu_reg(s, rn);
5338
5339 if (sf) {
5340 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5341 } else {
5342 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5343 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5344 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5345 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5346 tcg_temp_free_i32(tcg_tmp32);
5347 }
5348}
5349
5350static void handle_cls(DisasContext *s, unsigned int sf,
5351 unsigned int rn, unsigned int rd)
5352{
5353 TCGv_i64 tcg_rd, tcg_rn;
5354 tcg_rd = cpu_reg(s, rd);
5355 tcg_rn = cpu_reg(s, rn);
5356
5357 if (sf) {
5358 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5359 } else {
5360 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5361 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5362 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5363 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5364 tcg_temp_free_i32(tcg_tmp32);
5365 }
5366}
5367
5368static void handle_rbit(DisasContext *s, unsigned int sf,
5369 unsigned int rn, unsigned int rd)
5370{
5371 TCGv_i64 tcg_rd, tcg_rn;
5372 tcg_rd = cpu_reg(s, rd);
5373 tcg_rn = cpu_reg(s, rn);
5374
5375 if (sf) {
5376 gen_helper_rbit64(tcg_rd, tcg_rn);
5377 } else {
5378 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5379 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5380 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5381 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5382 tcg_temp_free_i32(tcg_tmp32);
5383 }
5384}
5385
5386
5387static void handle_rev64(DisasContext *s, unsigned int sf,
5388 unsigned int rn, unsigned int rd)
5389{
5390 if (!sf) {
5391 unallocated_encoding(s);
5392 return;
5393 }
5394 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5395}
5396
5397
5398
5399
5400static void handle_rev32(DisasContext *s, unsigned int sf,
5401 unsigned int rn, unsigned int rd)
5402{
5403 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5404
5405 if (sf) {
5406 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5407 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5408
5409
5410 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
5411 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
5412 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
5413 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
5414 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
5415
5416 tcg_temp_free_i64(tcg_tmp);
5417 } else {
5418 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
5419 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
5420 }
5421}
5422
5423
5424static void handle_rev16(DisasContext *s, unsigned int sf,
5425 unsigned int rn, unsigned int rd)
5426{
5427 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5428 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5429 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5430 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5431
5432 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5433 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5434 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5435 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5436 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5437
5438 tcg_temp_free_i64(mask);
5439 tcg_temp_free_i64(tcg_tmp);
5440}
5441
5442
5443
5444
5445
5446
5447
5448static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5449{
5450 unsigned int sf, opcode, opcode2, rn, rd;
5451 TCGv_i64 tcg_rd;
5452
5453 if (extract32(insn, 29, 1)) {
5454 unallocated_encoding(s);
5455 return;
5456 }
5457
5458 sf = extract32(insn, 31, 1);
5459 opcode = extract32(insn, 10, 6);
5460 opcode2 = extract32(insn, 16, 5);
5461 rn = extract32(insn, 5, 5);
5462 rd = extract32(insn, 0, 5);
5463
5464#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5465
5466 switch (MAP(sf, opcode2, opcode)) {
5467 case MAP(0, 0x00, 0x00):
5468 case MAP(1, 0x00, 0x00):
5469 handle_rbit(s, sf, rn, rd);
5470 break;
5471 case MAP(0, 0x00, 0x01):
5472 case MAP(1, 0x00, 0x01):
5473 handle_rev16(s, sf, rn, rd);
5474 break;
5475 case MAP(0, 0x00, 0x02):
5476 case MAP(1, 0x00, 0x02):
5477 handle_rev32(s, sf, rn, rd);
5478 break;
5479 case MAP(1, 0x00, 0x03):
5480 handle_rev64(s, sf, rn, rd);
5481 break;
5482 case MAP(0, 0x00, 0x04):
5483 case MAP(1, 0x00, 0x04):
5484 handle_clz(s, sf, rn, rd);
5485 break;
5486 case MAP(0, 0x00, 0x05):
5487 case MAP(1, 0x00, 0x05):
5488 handle_cls(s, sf, rn, rd);
5489 break;
5490 case MAP(1, 0x01, 0x00):
5491 if (s->pauth_active) {
5492 tcg_rd = cpu_reg(s, rd);
5493 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5494 } else if (!dc_isar_feature(aa64_pauth, s)) {
5495 goto do_unallocated;
5496 }
5497 break;
5498 case MAP(1, 0x01, 0x01):
5499 if (s->pauth_active) {
5500 tcg_rd = cpu_reg(s, rd);
5501 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5502 } else if (!dc_isar_feature(aa64_pauth, s)) {
5503 goto do_unallocated;
5504 }
5505 break;
5506 case MAP(1, 0x01, 0x02):
5507 if (s->pauth_active) {
5508 tcg_rd = cpu_reg(s, rd);
5509 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5510 } else if (!dc_isar_feature(aa64_pauth, s)) {
5511 goto do_unallocated;
5512 }
5513 break;
5514 case MAP(1, 0x01, 0x03):
5515 if (s->pauth_active) {
5516 tcg_rd = cpu_reg(s, rd);
5517 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5518 } else if (!dc_isar_feature(aa64_pauth, s)) {
5519 goto do_unallocated;
5520 }
5521 break;
5522 case MAP(1, 0x01, 0x04):
5523 if (s->pauth_active) {
5524 tcg_rd = cpu_reg(s, rd);
5525 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5526 } else if (!dc_isar_feature(aa64_pauth, s)) {
5527 goto do_unallocated;
5528 }
5529 break;
5530 case MAP(1, 0x01, 0x05):
5531 if (s->pauth_active) {
5532 tcg_rd = cpu_reg(s, rd);
5533 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5534 } else if (!dc_isar_feature(aa64_pauth, s)) {
5535 goto do_unallocated;
5536 }
5537 break;
5538 case MAP(1, 0x01, 0x06):
5539 if (s->pauth_active) {
5540 tcg_rd = cpu_reg(s, rd);
5541 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5542 } else if (!dc_isar_feature(aa64_pauth, s)) {
5543 goto do_unallocated;
5544 }
5545 break;
5546 case MAP(1, 0x01, 0x07):
5547 if (s->pauth_active) {
5548 tcg_rd = cpu_reg(s, rd);
5549 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5550 } else if (!dc_isar_feature(aa64_pauth, s)) {
5551 goto do_unallocated;
5552 }
5553 break;
5554 case MAP(1, 0x01, 0x08):
5555 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5556 goto do_unallocated;
5557 } else if (s->pauth_active) {
5558 tcg_rd = cpu_reg(s, rd);
5559 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5560 }
5561 break;
5562 case MAP(1, 0x01, 0x09):
5563 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5564 goto do_unallocated;
5565 } else if (s->pauth_active) {
5566 tcg_rd = cpu_reg(s, rd);
5567 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5568 }
5569 break;
5570 case MAP(1, 0x01, 0x0a):
5571 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5572 goto do_unallocated;
5573 } else if (s->pauth_active) {
5574 tcg_rd = cpu_reg(s, rd);
5575 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5576 }
5577 break;
5578 case MAP(1, 0x01, 0x0b):
5579 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5580 goto do_unallocated;
5581 } else if (s->pauth_active) {
5582 tcg_rd = cpu_reg(s, rd);
5583 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5584 }
5585 break;
5586 case MAP(1, 0x01, 0x0c):
5587 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5588 goto do_unallocated;
5589 } else if (s->pauth_active) {
5590 tcg_rd = cpu_reg(s, rd);
5591 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5592 }
5593 break;
5594 case MAP(1, 0x01, 0x0d):
5595 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5596 goto do_unallocated;
5597 } else if (s->pauth_active) {
5598 tcg_rd = cpu_reg(s, rd);
5599 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5600 }
5601 break;
5602 case MAP(1, 0x01, 0x0e):
5603 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5604 goto do_unallocated;
5605 } else if (s->pauth_active) {
5606 tcg_rd = cpu_reg(s, rd);
5607 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5608 }
5609 break;
5610 case MAP(1, 0x01, 0x0f):
5611 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5612 goto do_unallocated;
5613 } else if (s->pauth_active) {
5614 tcg_rd = cpu_reg(s, rd);
5615 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5616 }
5617 break;
5618 case MAP(1, 0x01, 0x10):
5619 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5620 goto do_unallocated;
5621 } else if (s->pauth_active) {
5622 tcg_rd = cpu_reg(s, rd);
5623 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5624 }
5625 break;
5626 case MAP(1, 0x01, 0x11):
5627 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5628 goto do_unallocated;
5629 } else if (s->pauth_active) {
5630 tcg_rd = cpu_reg(s, rd);
5631 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5632 }
5633 break;
5634 default:
5635 do_unallocated:
5636 unallocated_encoding(s);
5637 break;
5638 }
5639
5640#undef MAP
5641}
5642
5643static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5644 unsigned int rm, unsigned int rn, unsigned int rd)
5645{
5646 TCGv_i64 tcg_n, tcg_m, tcg_rd;
5647 tcg_rd = cpu_reg(s, rd);
5648
5649 if (!sf && is_signed) {
5650 tcg_n = new_tmp_a64(s);
5651 tcg_m = new_tmp_a64(s);
5652 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5653 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5654 } else {
5655 tcg_n = read_cpu_reg(s, rn, sf);
5656 tcg_m = read_cpu_reg(s, rm, sf);
5657 }
5658
5659 if (is_signed) {
5660 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5661 } else {
5662 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5663 }
5664
5665 if (!sf) {
5666 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5667 }
5668}
5669
5670
5671static void handle_shift_reg(DisasContext *s,
5672 enum a64_shift_type shift_type, unsigned int sf,
5673 unsigned int rm, unsigned int rn, unsigned int rd)
5674{
5675 TCGv_i64 tcg_shift = tcg_temp_new_i64();
5676 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5677 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5678
5679 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5680 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5681 tcg_temp_free_i64(tcg_shift);
5682}
5683
5684
5685static void handle_crc32(DisasContext *s,
5686 unsigned int sf, unsigned int sz, bool crc32c,
5687 unsigned int rm, unsigned int rn, unsigned int rd)
5688{
5689 TCGv_i64 tcg_acc, tcg_val;
5690 TCGv_i32 tcg_bytes;
5691
5692 if (!dc_isar_feature(aa64_crc32, s)
5693 || (sf == 1 && sz != 3)
5694 || (sf == 0 && sz == 3)) {
5695 unallocated_encoding(s);
5696 return;
5697 }
5698
5699 if (sz == 3) {
5700 tcg_val = cpu_reg(s, rm);
5701 } else {
5702 uint64_t mask;
5703 switch (sz) {
5704 case 0:
5705 mask = 0xFF;
5706 break;
5707 case 1:
5708 mask = 0xFFFF;
5709 break;
5710 case 2:
5711 mask = 0xFFFFFFFF;
5712 break;
5713 default:
5714 g_assert_not_reached();
5715 }
5716 tcg_val = new_tmp_a64(s);
5717 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5718 }
5719
5720 tcg_acc = cpu_reg(s, rn);
5721 tcg_bytes = tcg_const_i32(1 << sz);
5722
5723 if (crc32c) {
5724 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5725 } else {
5726 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5727 }
5728
5729 tcg_temp_free_i32(tcg_bytes);
5730}
5731
5732
5733
5734
5735
5736
5737
5738static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5739{
5740 unsigned int sf, rm, opcode, rn, rd, setflag;
5741 sf = extract32(insn, 31, 1);
5742 setflag = extract32(insn, 29, 1);
5743 rm = extract32(insn, 16, 5);
5744 opcode = extract32(insn, 10, 6);
5745 rn = extract32(insn, 5, 5);
5746 rd = extract32(insn, 0, 5);
5747
5748 if (setflag && opcode != 0) {
5749 unallocated_encoding(s);
5750 return;
5751 }
5752
5753 switch (opcode) {
5754 case 0:
5755 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5756 goto do_unallocated;
5757 } else {
5758 TCGv_i64 tcg_n, tcg_m, tcg_d;
5759
5760 tcg_n = read_cpu_reg_sp(s, rn, true);
5761 tcg_m = read_cpu_reg_sp(s, rm, true);
5762 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5763 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5764 tcg_d = cpu_reg(s, rd);
5765
5766 if (setflag) {
5767 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5768 } else {
5769 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5770 }
5771 }
5772 break;
5773 case 2:
5774 handle_div(s, false, sf, rm, rn, rd);
5775 break;
5776 case 3:
5777 handle_div(s, true, sf, rm, rn, rd);
5778 break;
5779 case 4:
5780 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5781 goto do_unallocated;
5782 }
5783 if (s->ata) {
5784 gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5785 cpu_reg_sp(s, rn), cpu_reg(s, rm));
5786 } else {
5787 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5788 cpu_reg_sp(s, rn));
5789 }
5790 break;
5791 case 5:
5792 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5793 goto do_unallocated;
5794 } else {
5795 TCGv_i64 t1 = tcg_const_i64(1);
5796 TCGv_i64 t2 = tcg_temp_new_i64();
5797
5798 tcg_gen_extract_i64(t2, cpu_reg_sp(s, rn), 56, 4);
5799 tcg_gen_shl_i64(t1, t1, t2);
5800 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t1);
5801
5802 tcg_temp_free_i64(t1);
5803 tcg_temp_free_i64(t2);
5804 }
5805 break;
5806 case 8:
5807 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5808 break;
5809 case 9:
5810 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5811 break;
5812 case 10:
5813 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5814 break;
5815 case 11:
5816 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5817 break;
5818 case 12:
5819 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5820 goto do_unallocated;
5821 }
5822 gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5823 cpu_reg(s, rn), cpu_reg_sp(s, rm));
5824 break;
5825 case 16:
5826 case 17:
5827 case 18:
5828 case 19:
5829 case 20:
5830 case 21:
5831 case 22:
5832 case 23:
5833 {
5834 int sz = extract32(opcode, 0, 2);
5835 bool crc32c = extract32(opcode, 2, 1);
5836 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5837 break;
5838 }
5839 default:
5840 do_unallocated:
5841 unallocated_encoding(s);
5842 break;
5843 }
5844}
5845
5846
5847
5848
5849
5850
5851
5852
5853static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5854{
5855 int op0 = extract32(insn, 30, 1);
5856 int op1 = extract32(insn, 28, 1);
5857 int op2 = extract32(insn, 21, 4);
5858 int op3 = extract32(insn, 10, 6);
5859
5860 if (!op1) {
5861 if (op2 & 8) {
5862 if (op2 & 1) {
5863
5864 disas_add_sub_ext_reg(s, insn);
5865 } else {
5866
5867 disas_add_sub_reg(s, insn);
5868 }
5869 } else {
5870
5871 disas_logic_reg(s, insn);
5872 }
5873 return;
5874 }
5875
5876 switch (op2) {
5877 case 0x0:
5878 switch (op3) {
5879 case 0x00:
5880 disas_adc_sbc(s, insn);
5881 break;
5882
5883 case 0x01:
5884 case 0x21:
5885 disas_rotate_right_into_flags(s, insn);
5886 break;
5887
5888 case 0x02:
5889 case 0x12:
5890 case 0x22:
5891 case 0x32:
5892 disas_evaluate_into_flags(s, insn);
5893 break;
5894
5895 default:
5896 goto do_unallocated;
5897 }
5898 break;
5899
5900 case 0x2:
5901 disas_cc(s, insn);
5902 break;
5903
5904 case 0x4:
5905 disas_cond_select(s, insn);
5906 break;
5907
5908 case 0x6:
5909 if (op0) {
5910 disas_data_proc_1src(s, insn);
5911 } else {
5912 disas_data_proc_2src(s, insn);
5913 }
5914 break;
5915 case 0x8 ... 0xf:
5916 disas_data_proc_3src(s, insn);
5917 break;
5918
5919 default:
5920 do_unallocated:
5921 unallocated_encoding(s);
5922 break;
5923 }
5924}
5925
5926static void handle_fp_compare(DisasContext *s, int size,
5927 unsigned int rn, unsigned int rm,
5928 bool cmp_with_zero, bool signal_all_nans)
5929{
5930 TCGv_i64 tcg_flags = tcg_temp_new_i64();
5931 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
5932
5933 if (size == MO_64) {
5934 TCGv_i64 tcg_vn, tcg_vm;
5935
5936 tcg_vn = read_fp_dreg(s, rn);
5937 if (cmp_with_zero) {
5938 tcg_vm = tcg_const_i64(0);
5939 } else {
5940 tcg_vm = read_fp_dreg(s, rm);
5941 }
5942 if (signal_all_nans) {
5943 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5944 } else {
5945 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5946 }
5947 tcg_temp_free_i64(tcg_vn);
5948 tcg_temp_free_i64(tcg_vm);
5949 } else {
5950 TCGv_i32 tcg_vn = tcg_temp_new_i32();
5951 TCGv_i32 tcg_vm = tcg_temp_new_i32();
5952
5953 read_vec_element_i32(s, tcg_vn, rn, 0, size);
5954 if (cmp_with_zero) {
5955 tcg_gen_movi_i32(tcg_vm, 0);
5956 } else {
5957 read_vec_element_i32(s, tcg_vm, rm, 0, size);
5958 }
5959
5960 switch (size) {
5961 case MO_32:
5962 if (signal_all_nans) {
5963 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5964 } else {
5965 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5966 }
5967 break;
5968 case MO_16:
5969 if (signal_all_nans) {
5970 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5971 } else {
5972 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5973 }
5974 break;
5975 default:
5976 g_assert_not_reached();
5977 }
5978
5979 tcg_temp_free_i32(tcg_vn);
5980 tcg_temp_free_i32(tcg_vm);
5981 }
5982
5983 tcg_temp_free_ptr(fpst);
5984
5985 gen_set_nzcv(tcg_flags);
5986
5987 tcg_temp_free_i64(tcg_flags);
5988}
5989
5990
5991
5992
5993
5994
5995
5996static void disas_fp_compare(DisasContext *s, uint32_t insn)
5997{
5998 unsigned int mos, type, rm, op, rn, opc, op2r;
5999 int size;
6000
6001 mos = extract32(insn, 29, 3);
6002 type = extract32(insn, 22, 2);
6003 rm = extract32(insn, 16, 5);
6004 op = extract32(insn, 14, 2);
6005 rn = extract32(insn, 5, 5);
6006 opc = extract32(insn, 3, 2);
6007 op2r = extract32(insn, 0, 3);
6008
6009 if (mos || op || op2r) {
6010 unallocated_encoding(s);
6011 return;
6012 }
6013
6014 switch (type) {
6015 case 0:
6016 size = MO_32;
6017 break;
6018 case 1:
6019 size = MO_64;
6020 break;
6021 case 3:
6022 size = MO_16;
6023 if (dc_isar_feature(aa64_fp16, s)) {
6024 break;
6025 }
6026
6027 default:
6028 unallocated_encoding(s);
6029 return;
6030 }
6031
6032 if (!fp_access_check(s)) {
6033 return;
6034 }
6035
6036 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
6037}
6038
6039
6040
6041
6042
6043
6044
6045static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
6046{
6047 unsigned int mos, type, rm, cond, rn, op, nzcv;
6048 TCGv_i64 tcg_flags;
6049 TCGLabel *label_continue = NULL;
6050 int size;
6051
6052 mos = extract32(insn, 29, 3);
6053 type = extract32(insn, 22, 2);
6054 rm = extract32(insn, 16, 5);
6055 cond = extract32(insn, 12, 4);
6056 rn = extract32(insn, 5, 5);
6057 op = extract32(insn, 4, 1);
6058 nzcv = extract32(insn, 0, 4);
6059
6060 if (mos) {
6061 unallocated_encoding(s);
6062 return;
6063 }
6064
6065 switch (type) {
6066 case 0:
6067 size = MO_32;
6068 break;
6069 case 1:
6070 size = MO_64;
6071 break;
6072 case 3:
6073 size = MO_16;
6074 if (dc_isar_feature(aa64_fp16, s)) {
6075 break;
6076 }
6077
6078 default:
6079 unallocated_encoding(s);
6080 return;
6081 }
6082
6083 if (!fp_access_check(s)) {
6084 return;
6085 }
6086
6087 if (cond < 0x0e) {
6088 TCGLabel *label_match = gen_new_label();
6089 label_continue = gen_new_label();
6090 arm_gen_test_cc(cond, label_match);
6091
6092 tcg_flags = tcg_const_i64(nzcv << 28);
6093 gen_set_nzcv(tcg_flags);
6094 tcg_temp_free_i64(tcg_flags);
6095 tcg_gen_br(label_continue);
6096 gen_set_label(label_match);
6097 }
6098
6099 handle_fp_compare(s, size, rn, rm, false, op);
6100
6101 if (cond < 0x0e) {
6102 gen_set_label(label_continue);
6103 }
6104}
6105
6106
6107
6108
6109
6110
6111
6112static void disas_fp_csel(DisasContext *s, uint32_t insn)
6113{
6114 unsigned int mos, type, rm, cond, rn, rd;
6115 TCGv_i64 t_true, t_false, t_zero;
6116 DisasCompare64 c;
6117 MemOp sz;
6118
6119 mos = extract32(insn, 29, 3);
6120 type = extract32(insn, 22, 2);
6121 rm = extract32(insn, 16, 5);
6122 cond = extract32(insn, 12, 4);
6123 rn = extract32(insn, 5, 5);
6124 rd = extract32(insn, 0, 5);
6125
6126 if (mos) {
6127 unallocated_encoding(s);
6128 return;
6129 }
6130
6131 switch (type) {
6132 case 0:
6133 sz = MO_32;
6134 break;
6135 case 1:
6136 sz = MO_64;
6137 break;
6138 case 3:
6139 sz = MO_16;
6140 if (dc_isar_feature(aa64_fp16, s)) {
6141 break;
6142 }
6143
6144 default:
6145 unallocated_encoding(s);
6146 return;
6147 }
6148
6149 if (!fp_access_check(s)) {
6150 return;
6151 }
6152
6153
6154 t_true = tcg_temp_new_i64();
6155 t_false = tcg_temp_new_i64();
6156 read_vec_element(s, t_true, rn, 0, sz);
6157 read_vec_element(s, t_false, rm, 0, sz);
6158
6159 a64_test_cc(&c, cond);
6160 t_zero = tcg_const_i64(0);
6161 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
6162 tcg_temp_free_i64(t_zero);
6163 tcg_temp_free_i64(t_false);
6164 a64_free_cc(&c);
6165
6166
6167
6168 write_fp_dreg(s, rd, t_true);
6169 tcg_temp_free_i64(t_true);
6170}
6171
6172
6173static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
6174{
6175 TCGv_ptr fpst = NULL;
6176 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
6177 TCGv_i32 tcg_res = tcg_temp_new_i32();
6178
6179 switch (opcode) {
6180 case 0x0:
6181 tcg_gen_mov_i32(tcg_res, tcg_op);
6182 break;
6183 case 0x1:
6184 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
6185 break;
6186 case 0x2:
6187 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
6188 break;
6189 case 0x3:
6190 fpst = fpstatus_ptr(FPST_FPCR_F16);
6191 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
6192 break;
6193 case 0x8:
6194 case 0x9:
6195 case 0xa:
6196 case 0xb:
6197 case 0xc:
6198 {
6199 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
6200 fpst = fpstatus_ptr(FPST_FPCR_F16);
6201
6202 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6203 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6204
6205 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6206 tcg_temp_free_i32(tcg_rmode);
6207 break;
6208 }
6209 case 0xe:
6210 fpst = fpstatus_ptr(FPST_FPCR_F16);
6211 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
6212 break;
6213 case 0xf:
6214 fpst = fpstatus_ptr(FPST_FPCR_F16);
6215 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6216 break;
6217 default:
6218 abort();
6219 }
6220
6221 write_fp_sreg(s, rd, tcg_res);
6222
6223 if (fpst) {
6224 tcg_temp_free_ptr(fpst);
6225 }
6226 tcg_temp_free_i32(tcg_op);
6227 tcg_temp_free_i32(tcg_res);
6228}
6229
6230
6231static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
6232{
6233 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
6234 TCGv_i32 tcg_op, tcg_res;
6235 TCGv_ptr fpst;
6236 int rmode = -1;
6237
6238 tcg_op = read_fp_sreg(s, rn);
6239 tcg_res = tcg_temp_new_i32();
6240
6241 switch (opcode) {
6242 case 0x0:
6243 tcg_gen_mov_i32(tcg_res, tcg_op);
6244 goto done;
6245 case 0x1:
6246 gen_helper_vfp_abss(tcg_res, tcg_op);
6247 goto done;
6248 case 0x2:
6249 gen_helper_vfp_negs(tcg_res, tcg_op);
6250 goto done;
6251 case 0x3:
6252 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6253 goto done;
6254 case 0x8:
6255 case 0x9:
6256 case 0xa:
6257 case 0xb:
6258 case 0xc:
6259 rmode = arm_rmode_to_sf(opcode & 7);
6260 gen_fpst = gen_helper_rints;
6261 break;
6262 case 0xe:
6263 gen_fpst = gen_helper_rints_exact;
6264 break;
6265 case 0xf:
6266 gen_fpst = gen_helper_rints;
6267 break;
6268 case 0x10:
6269 rmode = float_round_to_zero;
6270 gen_fpst = gen_helper_frint32_s;
6271 break;
6272 case 0x11:
6273 gen_fpst = gen_helper_frint32_s;
6274 break;
6275 case 0x12:
6276 rmode = float_round_to_zero;
6277 gen_fpst = gen_helper_frint64_s;
6278 break;
6279 case 0x13:
6280 gen_fpst = gen_helper_frint64_s;
6281 break;
6282 default:
6283 g_assert_not_reached();
6284 }
6285
6286 fpst = fpstatus_ptr(FPST_FPCR);
6287 if (rmode >= 0) {
6288 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6289 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6290 gen_fpst(tcg_res, tcg_op, fpst);
6291 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6292 tcg_temp_free_i32(tcg_rmode);
6293 } else {
6294 gen_fpst(tcg_res, tcg_op, fpst);
6295 }
6296 tcg_temp_free_ptr(fpst);
6297
6298 done:
6299 write_fp_sreg(s, rd, tcg_res);
6300 tcg_temp_free_i32(tcg_op);
6301 tcg_temp_free_i32(tcg_res);
6302}
6303
6304
6305static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6306{
6307 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6308 TCGv_i64 tcg_op, tcg_res;
6309 TCGv_ptr fpst;
6310 int rmode = -1;
6311
6312 switch (opcode) {
6313 case 0x0:
6314 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6315 return;
6316 }
6317
6318 tcg_op = read_fp_dreg(s, rn);
6319 tcg_res = tcg_temp_new_i64();
6320
6321 switch (opcode) {
6322 case 0x1:
6323 gen_helper_vfp_absd(tcg_res, tcg_op);
6324 goto done;
6325 case 0x2:
6326 gen_helper_vfp_negd(tcg_res, tcg_op);
6327 goto done;
6328 case 0x3:
6329 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6330 goto done;
6331 case 0x8:
6332 case 0x9:
6333 case 0xa:
6334 case 0xb:
6335 case 0xc:
6336 rmode = arm_rmode_to_sf(opcode & 7);
6337 gen_fpst = gen_helper_rintd;
6338 break;
6339 case 0xe:
6340 gen_fpst = gen_helper_rintd_exact;
6341 break;
6342 case 0xf:
6343 gen_fpst = gen_helper_rintd;
6344 break;
6345 case 0x10:
6346 rmode = float_round_to_zero;
6347 gen_fpst = gen_helper_frint32_d;
6348 break;
6349 case 0x11:
6350 gen_fpst = gen_helper_frint32_d;
6351 break;
6352 case 0x12:
6353 rmode = float_round_to_zero;
6354 gen_fpst = gen_helper_frint64_d;
6355 break;
6356 case 0x13:
6357 gen_fpst = gen_helper_frint64_d;
6358 break;
6359 default:
6360 g_assert_not_reached();
6361 }
6362
6363 fpst = fpstatus_ptr(FPST_FPCR);
6364 if (rmode >= 0) {
6365 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6366 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6367 gen_fpst(tcg_res, tcg_op, fpst);
6368 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6369 tcg_temp_free_i32(tcg_rmode);
6370 } else {
6371 gen_fpst(tcg_res, tcg_op, fpst);
6372 }
6373 tcg_temp_free_ptr(fpst);
6374
6375 done:
6376 write_fp_dreg(s, rd, tcg_res);
6377 tcg_temp_free_i64(tcg_op);
6378 tcg_temp_free_i64(tcg_res);
6379}
6380
6381static void handle_fp_fcvt(DisasContext *s, int opcode,
6382 int rd, int rn, int dtype, int ntype)
6383{
6384 switch (ntype) {
6385 case 0x0:
6386 {
6387 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6388 if (dtype == 1) {
6389
6390 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6391 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6392 write_fp_dreg(s, rd, tcg_rd);
6393 tcg_temp_free_i64(tcg_rd);
6394 } else {
6395
6396 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6397 TCGv_i32 ahp = get_ahp_flag();
6398 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6399
6400 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6401
6402 write_fp_sreg(s, rd, tcg_rd);
6403 tcg_temp_free_i32(tcg_rd);
6404 tcg_temp_free_i32(ahp);
6405 tcg_temp_free_ptr(fpst);
6406 }
6407 tcg_temp_free_i32(tcg_rn);
6408 break;
6409 }
6410 case 0x1:
6411 {
6412 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6413 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6414 if (dtype == 0) {
6415
6416 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6417 } else {
6418 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6419 TCGv_i32 ahp = get_ahp_flag();
6420
6421 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6422
6423 tcg_temp_free_ptr(fpst);
6424 tcg_temp_free_i32(ahp);
6425 }
6426 write_fp_sreg(s, rd, tcg_rd);
6427 tcg_temp_free_i32(tcg_rd);
6428 tcg_temp_free_i64(tcg_rn);
6429 break;
6430 }
6431 case 0x3:
6432 {
6433 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6434 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6435 TCGv_i32 tcg_ahp = get_ahp_flag();
6436 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6437 if (dtype == 0) {
6438
6439 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6440 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6441 write_fp_sreg(s, rd, tcg_rd);
6442 tcg_temp_free_i32(tcg_rd);
6443 } else {
6444
6445 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6446 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6447 write_fp_dreg(s, rd, tcg_rd);
6448 tcg_temp_free_i64(tcg_rd);
6449 }
6450 tcg_temp_free_i32(tcg_rn);
6451 tcg_temp_free_ptr(tcg_fpst);
6452 tcg_temp_free_i32(tcg_ahp);
6453 break;
6454 }
6455 default:
6456 abort();
6457 }
6458}
6459
6460
6461
6462
6463
6464
6465
6466static void disas_fp_1src(DisasContext *s, uint32_t insn)
6467{
6468 int mos = extract32(insn, 29, 3);
6469 int type = extract32(insn, 22, 2);
6470 int opcode = extract32(insn, 15, 6);
6471 int rn = extract32(insn, 5, 5);
6472 int rd = extract32(insn, 0, 5);
6473
6474 if (mos) {
6475 unallocated_encoding(s);
6476 return;
6477 }
6478
6479 switch (opcode) {
6480 case 0x4: case 0x5: case 0x7:
6481 {
6482
6483 int dtype = extract32(opcode, 0, 2);
6484 if (type == 2 || dtype == type) {
6485 unallocated_encoding(s);
6486 return;
6487 }
6488 if (!fp_access_check(s)) {
6489 return;
6490 }
6491
6492 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6493 break;
6494 }
6495
6496 case 0x10 ... 0x13:
6497 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6498 unallocated_encoding(s);
6499 return;
6500 }
6501
6502 case 0x0 ... 0x3:
6503 case 0x8 ... 0xc:
6504 case 0xe ... 0xf:
6505
6506 switch (type) {
6507 case 0:
6508 if (!fp_access_check(s)) {
6509 return;
6510 }
6511 handle_fp_1src_single(s, opcode, rd, rn);
6512 break;
6513 case 1:
6514 if (!fp_access_check(s)) {
6515 return;
6516 }
6517 handle_fp_1src_double(s, opcode, rd, rn);
6518 break;
6519 case 3:
6520 if (!dc_isar_feature(aa64_fp16, s)) {
6521 unallocated_encoding(s);
6522 return;
6523 }
6524
6525 if (!fp_access_check(s)) {
6526 return;
6527 }
6528 handle_fp_1src_half(s, opcode, rd, rn);
6529 break;
6530 default:
6531 unallocated_encoding(s);
6532 }
6533 break;
6534
6535 default:
6536 unallocated_encoding(s);
6537 break;
6538 }
6539}
6540
6541
6542static void handle_fp_2src_single(DisasContext *s, int opcode,
6543 int rd, int rn, int rm)
6544{
6545 TCGv_i32 tcg_op1;
6546 TCGv_i32 tcg_op2;
6547 TCGv_i32 tcg_res;
6548 TCGv_ptr fpst;
6549
6550 tcg_res = tcg_temp_new_i32();
6551 fpst = fpstatus_ptr(FPST_FPCR);
6552 tcg_op1 = read_fp_sreg(s, rn);
6553 tcg_op2 = read_fp_sreg(s, rm);
6554
6555 switch (opcode) {
6556 case 0x0:
6557 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6558 break;
6559 case 0x1:
6560 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6561 break;
6562 case 0x2:
6563 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6564 break;
6565 case 0x3:
6566 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6567 break;
6568 case 0x4:
6569 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6570 break;
6571 case 0x5:
6572 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6573 break;
6574 case 0x6:
6575 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6576 break;
6577 case 0x7:
6578 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6579 break;
6580 case 0x8:
6581 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6582 gen_helper_vfp_negs(tcg_res, tcg_res);
6583 break;
6584 }
6585
6586 write_fp_sreg(s, rd, tcg_res);
6587
6588 tcg_temp_free_ptr(fpst);
6589 tcg_temp_free_i32(tcg_op1);
6590 tcg_temp_free_i32(tcg_op2);
6591 tcg_temp_free_i32(tcg_res);
6592}
6593
6594
6595static void handle_fp_2src_double(DisasContext *s, int opcode,
6596 int rd, int rn, int rm)
6597{
6598 TCGv_i64 tcg_op1;
6599 TCGv_i64 tcg_op2;
6600 TCGv_i64 tcg_res;
6601 TCGv_ptr fpst;
6602
6603 tcg_res = tcg_temp_new_i64();
6604 fpst = fpstatus_ptr(FPST_FPCR);
6605 tcg_op1 = read_fp_dreg(s, rn);
6606 tcg_op2 = read_fp_dreg(s, rm);
6607
6608 switch (opcode) {
6609 case 0x0:
6610 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6611 break;
6612 case 0x1:
6613 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6614 break;
6615 case 0x2:
6616 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6617 break;
6618 case 0x3:
6619 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6620 break;
6621 case 0x4:
6622 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6623 break;
6624 case 0x5:
6625 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6626 break;
6627 case 0x6:
6628 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6629 break;
6630 case 0x7:
6631 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6632 break;
6633 case 0x8:
6634 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6635 gen_helper_vfp_negd(tcg_res, tcg_res);
6636 break;
6637 }
6638
6639 write_fp_dreg(s, rd, tcg_res);
6640
6641 tcg_temp_free_ptr(fpst);
6642 tcg_temp_free_i64(tcg_op1);
6643 tcg_temp_free_i64(tcg_op2);
6644 tcg_temp_free_i64(tcg_res);
6645}
6646
6647
6648static void handle_fp_2src_half(DisasContext *s, int opcode,
6649 int rd, int rn, int rm)
6650{
6651 TCGv_i32 tcg_op1;
6652 TCGv_i32 tcg_op2;
6653 TCGv_i32 tcg_res;
6654 TCGv_ptr fpst;
6655
6656 tcg_res = tcg_temp_new_i32();
6657 fpst = fpstatus_ptr(FPST_FPCR_F16);
6658 tcg_op1 = read_fp_hreg(s, rn);
6659 tcg_op2 = read_fp_hreg(s, rm);
6660
6661 switch (opcode) {
6662 case 0x0:
6663 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6664 break;
6665 case 0x1:
6666 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6667 break;
6668 case 0x2:
6669 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6670 break;
6671 case 0x3:
6672 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6673 break;
6674 case 0x4:
6675 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6676 break;
6677 case 0x5:
6678 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6679 break;
6680 case 0x6:
6681 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6682 break;
6683 case 0x7:
6684 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6685 break;
6686 case 0x8:
6687 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6688 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6689 break;
6690 default:
6691 g_assert_not_reached();
6692 }
6693
6694 write_fp_sreg(s, rd, tcg_res);
6695
6696 tcg_temp_free_ptr(fpst);
6697 tcg_temp_free_i32(tcg_op1);
6698 tcg_temp_free_i32(tcg_op2);
6699 tcg_temp_free_i32(tcg_res);
6700}
6701
6702
6703
6704
6705
6706
6707
6708static void disas_fp_2src(DisasContext *s, uint32_t insn)
6709{
6710 int mos = extract32(insn, 29, 3);
6711 int type = extract32(insn, 22, 2);
6712 int rd = extract32(insn, 0, 5);
6713 int rn = extract32(insn, 5, 5);
6714 int rm = extract32(insn, 16, 5);
6715 int opcode = extract32(insn, 12, 4);
6716
6717 if (opcode > 8 || mos) {
6718 unallocated_encoding(s);
6719 return;
6720 }
6721
6722 switch (type) {
6723 case 0:
6724 if (!fp_access_check(s)) {
6725 return;
6726 }
6727 handle_fp_2src_single(s, opcode, rd, rn, rm);
6728 break;
6729 case 1:
6730 if (!fp_access_check(s)) {
6731 return;
6732 }
6733 handle_fp_2src_double(s, opcode, rd, rn, rm);
6734 break;
6735 case 3:
6736 if (!dc_isar_feature(aa64_fp16, s)) {
6737 unallocated_encoding(s);
6738 return;
6739 }
6740 if (!fp_access_check(s)) {
6741 return;
6742 }
6743 handle_fp_2src_half(s, opcode, rd, rn, rm);
6744 break;
6745 default:
6746 unallocated_encoding(s);
6747 }
6748}
6749
6750
6751static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6752 int rd, int rn, int rm, int ra)
6753{
6754 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6755 TCGv_i32 tcg_res = tcg_temp_new_i32();
6756 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6757
6758 tcg_op1 = read_fp_sreg(s, rn);
6759 tcg_op2 = read_fp_sreg(s, rm);
6760 tcg_op3 = read_fp_sreg(s, ra);
6761
6762
6763
6764
6765
6766
6767
6768
6769 if (o1 == true) {
6770 gen_helper_vfp_negs(tcg_op3, tcg_op3);
6771 }
6772
6773 if (o0 != o1) {
6774 gen_helper_vfp_negs(tcg_op1, tcg_op1);
6775 }
6776
6777 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6778
6779 write_fp_sreg(s, rd, tcg_res);
6780
6781 tcg_temp_free_ptr(fpst);
6782 tcg_temp_free_i32(tcg_op1);
6783 tcg_temp_free_i32(tcg_op2);
6784 tcg_temp_free_i32(tcg_op3);
6785 tcg_temp_free_i32(tcg_res);
6786}
6787
6788
6789static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6790 int rd, int rn, int rm, int ra)
6791{
6792 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6793 TCGv_i64 tcg_res = tcg_temp_new_i64();
6794 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6795
6796 tcg_op1 = read_fp_dreg(s, rn);
6797 tcg_op2 = read_fp_dreg(s, rm);
6798 tcg_op3 = read_fp_dreg(s, ra);
6799
6800
6801
6802
6803
6804
6805
6806
6807 if (o1 == true) {
6808 gen_helper_vfp_negd(tcg_op3, tcg_op3);
6809 }
6810
6811 if (o0 != o1) {
6812 gen_helper_vfp_negd(tcg_op1, tcg_op1);
6813 }
6814
6815 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6816
6817 write_fp_dreg(s, rd, tcg_res);
6818
6819 tcg_temp_free_ptr(fpst);
6820 tcg_temp_free_i64(tcg_op1);
6821 tcg_temp_free_i64(tcg_op2);
6822 tcg_temp_free_i64(tcg_op3);
6823 tcg_temp_free_i64(tcg_res);
6824}
6825
6826
6827static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6828 int rd, int rn, int rm, int ra)
6829{
6830 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6831 TCGv_i32 tcg_res = tcg_temp_new_i32();
6832 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6833
6834 tcg_op1 = read_fp_hreg(s, rn);
6835 tcg_op2 = read_fp_hreg(s, rm);
6836 tcg_op3 = read_fp_hreg(s, ra);
6837
6838
6839
6840
6841
6842
6843
6844
6845 if (o1 == true) {
6846 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6847 }
6848
6849 if (o0 != o1) {
6850 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6851 }
6852
6853 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6854
6855 write_fp_sreg(s, rd, tcg_res);
6856
6857 tcg_temp_free_ptr(fpst);
6858 tcg_temp_free_i32(tcg_op1);
6859 tcg_temp_free_i32(tcg_op2);
6860 tcg_temp_free_i32(tcg_op3);
6861 tcg_temp_free_i32(tcg_res);
6862}
6863
6864
6865
6866
6867
6868
6869
6870static void disas_fp_3src(DisasContext *s, uint32_t insn)
6871{
6872 int mos = extract32(insn, 29, 3);
6873 int type = extract32(insn, 22, 2);
6874 int rd = extract32(insn, 0, 5);
6875 int rn = extract32(insn, 5, 5);
6876 int ra = extract32(insn, 10, 5);
6877 int rm = extract32(insn, 16, 5);
6878 bool o0 = extract32(insn, 15, 1);
6879 bool o1 = extract32(insn, 21, 1);
6880
6881 if (mos) {
6882 unallocated_encoding(s);
6883 return;
6884 }
6885
6886 switch (type) {
6887 case 0:
6888 if (!fp_access_check(s)) {
6889 return;
6890 }
6891 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6892 break;
6893 case 1:
6894 if (!fp_access_check(s)) {
6895 return;
6896 }
6897 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6898 break;
6899 case 3:
6900 if (!dc_isar_feature(aa64_fp16, s)) {
6901 unallocated_encoding(s);
6902 return;
6903 }
6904 if (!fp_access_check(s)) {
6905 return;
6906 }
6907 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6908 break;
6909 default:
6910 unallocated_encoding(s);
6911 }
6912}
6913
6914
6915
6916
6917
6918
6919
6920static void disas_fp_imm(DisasContext *s, uint32_t insn)
6921{
6922 int rd = extract32(insn, 0, 5);
6923 int imm5 = extract32(insn, 5, 5);
6924 int imm8 = extract32(insn, 13, 8);
6925 int type = extract32(insn, 22, 2);
6926 int mos = extract32(insn, 29, 3);
6927 uint64_t imm;
6928 TCGv_i64 tcg_res;
6929 MemOp sz;
6930
6931 if (mos || imm5) {
6932 unallocated_encoding(s);
6933 return;
6934 }
6935
6936 switch (type) {
6937 case 0:
6938 sz = MO_32;
6939 break;
6940 case 1:
6941 sz = MO_64;
6942 break;
6943 case 3:
6944 sz = MO_16;
6945 if (dc_isar_feature(aa64_fp16, s)) {
6946 break;
6947 }
6948
6949 default:
6950 unallocated_encoding(s);
6951 return;
6952 }
6953
6954 if (!fp_access_check(s)) {
6955 return;
6956 }
6957
6958 imm = vfp_expand_imm(sz, imm8);
6959
6960 tcg_res = tcg_const_i64(imm);
6961 write_fp_dreg(s, rd, tcg_res);
6962 tcg_temp_free_i64(tcg_res);
6963}
6964
6965
6966
6967
6968
6969
6970static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6971 bool itof, int rmode, int scale, int sf, int type)
6972{
6973 bool is_signed = !(opcode & 1);
6974 TCGv_ptr tcg_fpstatus;
6975 TCGv_i32 tcg_shift, tcg_single;
6976 TCGv_i64 tcg_double;
6977
6978 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
6979
6980 tcg_shift = tcg_const_i32(64 - scale);
6981
6982 if (itof) {
6983 TCGv_i64 tcg_int = cpu_reg(s, rn);
6984 if (!sf) {
6985 TCGv_i64 tcg_extend = new_tmp_a64(s);
6986
6987 if (is_signed) {
6988 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
6989 } else {
6990 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
6991 }
6992
6993 tcg_int = tcg_extend;
6994 }
6995
6996 switch (type) {
6997 case 1:
6998 tcg_double = tcg_temp_new_i64();
6999 if (is_signed) {
7000 gen_helper_vfp_sqtod(tcg_double, tcg_int,
7001 tcg_shift, tcg_fpstatus);
7002 } else {
7003 gen_helper_vfp_uqtod(tcg_double, tcg_int,
7004 tcg_shift, tcg_fpstatus);
7005 }
7006 write_fp_dreg(s, rd, tcg_double);
7007 tcg_temp_free_i64(tcg_double);
7008 break;
7009
7010 case 0:
7011 tcg_single = tcg_temp_new_i32();
7012 if (is_signed) {
7013 gen_helper_vfp_sqtos(tcg_single, tcg_int,
7014 tcg_shift, tcg_fpstatus);
7015 } else {
7016 gen_helper_vfp_uqtos(tcg_single, tcg_int,
7017 tcg_shift, tcg_fpstatus);
7018 }
7019 write_fp_sreg(s, rd, tcg_single);
7020 tcg_temp_free_i32(tcg_single);
7021 break;
7022
7023 case 3:
7024 tcg_single = tcg_temp_new_i32();
7025 if (is_signed) {
7026 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
7027 tcg_shift, tcg_fpstatus);
7028 } else {
7029 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
7030 tcg_shift, tcg_fpstatus);
7031 }
7032 write_fp_sreg(s, rd, tcg_single);
7033 tcg_temp_free_i32(tcg_single);
7034 break;
7035
7036 default:
7037 g_assert_not_reached();
7038 }
7039 } else {
7040 TCGv_i64 tcg_int = cpu_reg(s, rd);
7041 TCGv_i32 tcg_rmode;
7042
7043 if (extract32(opcode, 2, 1)) {
7044
7045
7046
7047 rmode = FPROUNDING_TIEAWAY;
7048 }
7049
7050 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7051
7052 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7053
7054 switch (type) {
7055 case 1:
7056 tcg_double = read_fp_dreg(s, rn);
7057 if (is_signed) {
7058 if (!sf) {
7059 gen_helper_vfp_tosld(tcg_int, tcg_double,
7060 tcg_shift, tcg_fpstatus);
7061 } else {
7062 gen_helper_vfp_tosqd(tcg_int, tcg_double,
7063 tcg_shift, tcg_fpstatus);
7064 }
7065 } else {
7066 if (!sf) {
7067 gen_helper_vfp_tould(tcg_int, tcg_double,
7068 tcg_shift, tcg_fpstatus);
7069 } else {
7070 gen_helper_vfp_touqd(tcg_int, tcg_double,
7071 tcg_shift, tcg_fpstatus);
7072 }
7073 }
7074 if (!sf) {
7075 tcg_gen_ext32u_i64(tcg_int, tcg_int);
7076 }
7077 tcg_temp_free_i64(tcg_double);
7078 break;
7079
7080 case 0:
7081 tcg_single = read_fp_sreg(s, rn);
7082 if (sf) {
7083 if (is_signed) {
7084 gen_helper_vfp_tosqs(tcg_int, tcg_single,
7085 tcg_shift, tcg_fpstatus);
7086 } else {
7087 gen_helper_vfp_touqs(tcg_int, tcg_single,
7088 tcg_shift, tcg_fpstatus);
7089 }
7090 } else {
7091 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7092 if (is_signed) {
7093 gen_helper_vfp_tosls(tcg_dest, tcg_single,
7094 tcg_shift, tcg_fpstatus);
7095 } else {
7096 gen_helper_vfp_touls(tcg_dest, tcg_single,
7097 tcg_shift, tcg_fpstatus);
7098 }
7099 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7100 tcg_temp_free_i32(tcg_dest);
7101 }
7102 tcg_temp_free_i32(tcg_single);
7103 break;
7104
7105 case 3:
7106 tcg_single = read_fp_sreg(s, rn);
7107 if (sf) {
7108 if (is_signed) {
7109 gen_helper_vfp_tosqh(tcg_int, tcg_single,
7110 tcg_shift, tcg_fpstatus);
7111 } else {
7112 gen_helper_vfp_touqh(tcg_int, tcg_single,
7113 tcg_shift, tcg_fpstatus);
7114 }
7115 } else {
7116 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7117 if (is_signed) {
7118 gen_helper_vfp_toslh(tcg_dest, tcg_single,
7119 tcg_shift, tcg_fpstatus);
7120 } else {
7121 gen_helper_vfp_toulh(tcg_dest, tcg_single,
7122 tcg_shift, tcg_fpstatus);
7123 }
7124 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7125 tcg_temp_free_i32(tcg_dest);
7126 }
7127 tcg_temp_free_i32(tcg_single);
7128 break;
7129
7130 default:
7131 g_assert_not_reached();
7132 }
7133
7134 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7135 tcg_temp_free_i32(tcg_rmode);
7136 }
7137
7138 tcg_temp_free_ptr(tcg_fpstatus);
7139 tcg_temp_free_i32(tcg_shift);
7140}
7141
7142
7143
7144
7145
7146
7147
7148static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
7149{
7150 int rd = extract32(insn, 0, 5);
7151 int rn = extract32(insn, 5, 5);
7152 int scale = extract32(insn, 10, 6);
7153 int opcode = extract32(insn, 16, 3);
7154 int rmode = extract32(insn, 19, 2);
7155 int type = extract32(insn, 22, 2);
7156 bool sbit = extract32(insn, 29, 1);
7157 bool sf = extract32(insn, 31, 1);
7158 bool itof;
7159
7160 if (sbit || (!sf && scale < 32)) {
7161 unallocated_encoding(s);
7162 return;
7163 }
7164
7165 switch (type) {
7166 case 0:
7167 case 1:
7168 break;
7169 case 3:
7170 if (dc_isar_feature(aa64_fp16, s)) {
7171 break;
7172 }
7173
7174 default:
7175 unallocated_encoding(s);
7176 return;
7177 }
7178
7179 switch ((rmode << 3) | opcode) {
7180 case 0x2:
7181 case 0x3:
7182 itof = true;
7183 break;
7184 case 0x18:
7185 case 0x19:
7186 itof = false;
7187 break;
7188 default:
7189 unallocated_encoding(s);
7190 return;
7191 }
7192
7193 if (!fp_access_check(s)) {
7194 return;
7195 }
7196
7197 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
7198}
7199
7200static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
7201{
7202
7203
7204
7205
7206 if (itof) {
7207 TCGv_i64 tcg_rn = cpu_reg(s, rn);
7208 TCGv_i64 tmp;
7209
7210 switch (type) {
7211 case 0:
7212
7213 tmp = tcg_temp_new_i64();
7214 tcg_gen_ext32u_i64(tmp, tcg_rn);
7215 write_fp_dreg(s, rd, tmp);
7216 tcg_temp_free_i64(tmp);
7217 break;
7218 case 1:
7219
7220 write_fp_dreg(s, rd, tcg_rn);
7221 break;
7222 case 2:
7223
7224 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
7225 clear_vec_high(s, true, rd);
7226 break;
7227 case 3:
7228
7229 tmp = tcg_temp_new_i64();
7230 tcg_gen_ext16u_i64(tmp, tcg_rn);
7231 write_fp_dreg(s, rd, tmp);
7232 tcg_temp_free_i64(tmp);
7233 break;
7234 default:
7235 g_assert_not_reached();
7236 }
7237 } else {
7238 TCGv_i64 tcg_rd = cpu_reg(s, rd);
7239
7240 switch (type) {
7241 case 0:
7242
7243 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
7244 break;
7245 case 1:
7246
7247 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
7248 break;
7249 case 2:
7250
7251 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
7252 break;
7253 case 3:
7254
7255 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
7256 break;
7257 default:
7258 g_assert_not_reached();
7259 }
7260 }
7261}
7262
7263static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
7264{
7265 TCGv_i64 t = read_fp_dreg(s, rn);
7266 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
7267
7268 gen_helper_fjcvtzs(t, t, fpstatus);
7269
7270 tcg_temp_free_ptr(fpstatus);
7271
7272 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
7273 tcg_gen_extrh_i64_i32(cpu_ZF, t);
7274 tcg_gen_movi_i32(cpu_CF, 0);
7275 tcg_gen_movi_i32(cpu_NF, 0);
7276 tcg_gen_movi_i32(cpu_VF, 0);
7277
7278 tcg_temp_free_i64(t);
7279}
7280
7281
7282
7283
7284
7285
7286
7287static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
7288{
7289 int rd = extract32(insn, 0, 5);
7290 int rn = extract32(insn, 5, 5);
7291 int opcode = extract32(insn, 16, 3);
7292 int rmode = extract32(insn, 19, 2);
7293 int type = extract32(insn, 22, 2);
7294 bool sbit = extract32(insn, 29, 1);
7295 bool sf = extract32(insn, 31, 1);
7296 bool itof = false;
7297
7298 if (sbit) {
7299 goto do_unallocated;
7300 }
7301
7302 switch (opcode) {
7303 case 2:
7304 case 3:
7305 itof = true;
7306
7307 case 4:
7308 case 5:
7309 if (rmode != 0) {
7310 goto do_unallocated;
7311 }
7312
7313 case 0:
7314 case 1:
7315 switch (type) {
7316 case 0:
7317 case 1:
7318 break;
7319 case 3:
7320 if (!dc_isar_feature(aa64_fp16, s)) {
7321 goto do_unallocated;
7322 }
7323 break;
7324 default:
7325 goto do_unallocated;
7326 }
7327 if (!fp_access_check(s)) {
7328 return;
7329 }
7330 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7331 break;
7332
7333 default:
7334 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7335 case 0b01100110:
7336 case 0b01100111:
7337 case 0b11100110:
7338 case 0b11100111:
7339 if (!dc_isar_feature(aa64_fp16, s)) {
7340 goto do_unallocated;
7341 }
7342
7343 case 0b00000110:
7344 case 0b00000111:
7345 case 0b10100110:
7346 case 0b10100111:
7347 case 0b11001110:
7348 case 0b11001111:
7349 if (!fp_access_check(s)) {
7350 return;
7351 }
7352 itof = opcode & 1;
7353 handle_fmov(s, rd, rn, type, itof);
7354 break;
7355
7356 case 0b00111110:
7357 if (!dc_isar_feature(aa64_jscvt, s)) {
7358 goto do_unallocated;
7359 } else if (fp_access_check(s)) {
7360 handle_fjcvtzs(s, rd, rn);
7361 }
7362 break;
7363
7364 default:
7365 do_unallocated:
7366 unallocated_encoding(s);
7367 return;
7368 }
7369 break;
7370 }
7371}
7372
7373
7374
7375
7376
7377
7378
7379static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7380{
7381 if (extract32(insn, 24, 1)) {
7382
7383 disas_fp_3src(s, insn);
7384 } else if (extract32(insn, 21, 1) == 0) {
7385
7386 disas_fp_fixed_conv(s, insn);
7387 } else {
7388 switch (extract32(insn, 10, 2)) {
7389 case 1:
7390
7391 disas_fp_ccomp(s, insn);
7392 break;
7393 case 2:
7394
7395 disas_fp_2src(s, insn);
7396 break;
7397 case 3:
7398
7399 disas_fp_csel(s, insn);
7400 break;
7401 case 0:
7402 switch (ctz32(extract32(insn, 12, 4))) {
7403 case 0:
7404
7405 disas_fp_imm(s, insn);
7406 break;
7407 case 1:
7408
7409 disas_fp_compare(s, insn);
7410 break;
7411 case 2:
7412
7413 disas_fp_1src(s, insn);
7414 break;
7415 case 3:
7416 unallocated_encoding(s);
7417 break;
7418 default:
7419
7420 disas_fp_int_conv(s, insn);
7421 break;
7422 }
7423 break;
7424 }
7425 }
7426}
7427
7428static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7429 int pos)
7430{
7431
7432
7433
7434
7435
7436
7437 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7438 assert(pos > 0 && pos < 64);
7439
7440 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7441 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7442 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7443
7444 tcg_temp_free_i64(tcg_tmp);
7445}
7446
7447
7448
7449
7450
7451
7452
7453static void disas_simd_ext(DisasContext *s, uint32_t insn)
7454{
7455 int is_q = extract32(insn, 30, 1);
7456 int op2 = extract32(insn, 22, 2);
7457 int imm4 = extract32(insn, 11, 4);
7458 int rm = extract32(insn, 16, 5);
7459 int rn = extract32(insn, 5, 5);
7460 int rd = extract32(insn, 0, 5);
7461 int pos = imm4 << 3;
7462 TCGv_i64 tcg_resl, tcg_resh;
7463
7464 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7465 unallocated_encoding(s);
7466 return;
7467 }
7468
7469 if (!fp_access_check(s)) {
7470 return;
7471 }
7472
7473 tcg_resh = tcg_temp_new_i64();
7474 tcg_resl = tcg_temp_new_i64();
7475
7476
7477
7478
7479
7480 if (!is_q) {
7481 read_vec_element(s, tcg_resl, rn, 0, MO_64);
7482 if (pos != 0) {
7483 read_vec_element(s, tcg_resh, rm, 0, MO_64);
7484 do_ext64(s, tcg_resh, tcg_resl, pos);
7485 }
7486 } else {
7487 TCGv_i64 tcg_hh;
7488 typedef struct {
7489 int reg;
7490 int elt;
7491 } EltPosns;
7492 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7493 EltPosns *elt = eltposns;
7494
7495 if (pos >= 64) {
7496 elt++;
7497 pos -= 64;
7498 }
7499
7500 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7501 elt++;
7502 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7503 elt++;
7504 if (pos != 0) {
7505 do_ext64(s, tcg_resh, tcg_resl, pos);
7506 tcg_hh = tcg_temp_new_i64();
7507 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7508 do_ext64(s, tcg_hh, tcg_resh, pos);
7509 tcg_temp_free_i64(tcg_hh);
7510 }
7511 }
7512
7513 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7514 tcg_temp_free_i64(tcg_resl);
7515 if (is_q) {
7516 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7517 }
7518 tcg_temp_free_i64(tcg_resh);
7519 clear_vec_high(s, is_q, rd);
7520}
7521
7522
7523
7524
7525
7526
7527
7528static void disas_simd_tb(DisasContext *s, uint32_t insn)
7529{
7530 int op2 = extract32(insn, 22, 2);
7531 int is_q = extract32(insn, 30, 1);
7532 int rm = extract32(insn, 16, 5);
7533 int rn = extract32(insn, 5, 5);
7534 int rd = extract32(insn, 0, 5);
7535 int is_tbx = extract32(insn, 12, 1);
7536 int len = (extract32(insn, 13, 2) + 1) * 16;
7537
7538 if (op2 != 0) {
7539 unallocated_encoding(s);
7540 return;
7541 }
7542
7543 if (!fp_access_check(s)) {
7544 return;
7545 }
7546
7547 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7548 vec_full_reg_offset(s, rm), cpu_env,
7549 is_q ? 16 : 8, vec_full_reg_size(s),
7550 (len << 6) | (is_tbx << 5) | rn,
7551 gen_helper_simd_tblx);
7552}
7553
7554
7555
7556
7557
7558
7559
7560static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7561{
7562 int rd = extract32(insn, 0, 5);
7563 int rn = extract32(insn, 5, 5);
7564 int rm = extract32(insn, 16, 5);
7565 int size = extract32(insn, 22, 2);
7566
7567
7568
7569 int opcode = extract32(insn, 12, 2);
7570 bool part = extract32(insn, 14, 1);
7571 bool is_q = extract32(insn, 30, 1);
7572 int esize = 8 << size;
7573 int i, ofs;
7574 int datasize = is_q ? 128 : 64;
7575 int elements = datasize / esize;
7576 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7577
7578 if (opcode == 0 || (size == 3 && !is_q)) {
7579 unallocated_encoding(s);
7580 return;
7581 }
7582
7583 if (!fp_access_check(s)) {
7584 return;
7585 }
7586
7587 tcg_resl = tcg_const_i64(0);
7588 tcg_resh = is_q ? tcg_const_i64(0) : NULL;
7589 tcg_res = tcg_temp_new_i64();
7590
7591 for (i = 0; i < elements; i++) {
7592 switch (opcode) {
7593 case 1:
7594 {
7595 int midpoint = elements / 2;
7596 if (i < midpoint) {
7597 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7598 } else {
7599 read_vec_element(s, tcg_res, rm,
7600 2 * (i - midpoint) + part, size);
7601 }
7602 break;
7603 }
7604 case 2:
7605 if (i & 1) {
7606 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7607 } else {
7608 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7609 }
7610 break;
7611 case 3:
7612 {
7613 int base = part * elements / 2;
7614 if (i & 1) {
7615 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7616 } else {
7617 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7618 }
7619 break;
7620 }
7621 default:
7622 g_assert_not_reached();
7623 }
7624
7625 ofs = i * esize;
7626 if (ofs < 64) {
7627 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7628 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7629 } else {
7630 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7631 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7632 }
7633 }
7634
7635 tcg_temp_free_i64(tcg_res);
7636
7637 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7638 tcg_temp_free_i64(tcg_resl);
7639
7640 if (is_q) {
7641 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7642 tcg_temp_free_i64(tcg_resh);
7643 }
7644 clear_vec_high(s, is_q, rd);
7645}
7646
7647
7648
7649
7650
7651
7652
7653
7654
7655
7656
7657static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7658 int esize, int size, int vmap, TCGv_ptr fpst)
7659{
7660 if (esize == size) {
7661 int element;
7662 MemOp msize = esize == 16 ? MO_16 : MO_32;
7663 TCGv_i32 tcg_elem;
7664
7665
7666 assert(ctpop8(vmap) == 1);
7667 element = ctz32(vmap);
7668 assert(element < 8);
7669
7670 tcg_elem = tcg_temp_new_i32();
7671 read_vec_element_i32(s, tcg_elem, rn, element, msize);
7672 return tcg_elem;
7673 } else {
7674 int bits = size / 2;
7675 int shift = ctpop8(vmap) / 2;
7676 int vmap_lo = (vmap >> shift) & vmap;
7677 int vmap_hi = (vmap & ~vmap_lo);
7678 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7679
7680 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7681 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7682 tcg_res = tcg_temp_new_i32();
7683
7684 switch (fpopcode) {
7685 case 0x0c:
7686 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7687 break;
7688 case 0x0f:
7689 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7690 break;
7691 case 0x1c:
7692 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7693 break;
7694 case 0x1f:
7695 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7696 break;
7697 case 0x2c:
7698 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7699 break;
7700 case 0x2f:
7701 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7702 break;
7703 case 0x3c:
7704 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7705 break;
7706 case 0x3f:
7707 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7708 break;
7709 default:
7710 g_assert_not_reached();
7711 }
7712
7713 tcg_temp_free_i32(tcg_hi);
7714 tcg_temp_free_i32(tcg_lo);
7715 return tcg_res;
7716 }
7717}
7718
7719
7720
7721
7722
7723
7724
7725static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7726{
7727 int rd = extract32(insn, 0, 5);
7728 int rn = extract32(insn, 5, 5);
7729 int size = extract32(insn, 22, 2);
7730 int opcode = extract32(insn, 12, 5);
7731 bool is_q = extract32(insn, 30, 1);
7732 bool is_u = extract32(insn, 29, 1);
7733 bool is_fp = false;
7734 bool is_min = false;
7735 int esize;
7736 int elements;
7737 int i;
7738 TCGv_i64 tcg_res, tcg_elt;
7739
7740 switch (opcode) {
7741 case 0x1b:
7742 if (is_u) {
7743 unallocated_encoding(s);
7744 return;
7745 }
7746
7747 case 0x3:
7748 case 0xa:
7749 case 0x1a:
7750 if (size == 3 || (size == 2 && !is_q)) {
7751 unallocated_encoding(s);
7752 return;
7753 }
7754 break;
7755 case 0xc:
7756 case 0xf:
7757
7758
7759
7760
7761
7762 is_min = extract32(size, 1, 1);
7763 is_fp = true;
7764 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7765 size = 1;
7766 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7767 unallocated_encoding(s);
7768 return;
7769 } else {
7770 size = 2;
7771 }
7772 break;
7773 default:
7774 unallocated_encoding(s);
7775 return;
7776 }
7777
7778 if (!fp_access_check(s)) {
7779 return;
7780 }
7781
7782 esize = 8 << size;
7783 elements = (is_q ? 128 : 64) / esize;
7784
7785 tcg_res = tcg_temp_new_i64();
7786 tcg_elt = tcg_temp_new_i64();
7787
7788
7789
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799
7800 if (!is_fp) {
7801 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7802
7803 for (i = 1; i < elements; i++) {
7804 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7805
7806 switch (opcode) {
7807 case 0x03:
7808 case 0x1b:
7809 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7810 break;
7811 case 0x0a:
7812 if (is_u) {
7813 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7814 } else {
7815 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7816 }
7817 break;
7818 case 0x1a:
7819 if (is_u) {
7820 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7821 } else {
7822 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7823 }
7824 break;
7825 default:
7826 g_assert_not_reached();
7827 }
7828
7829 }
7830 } else {
7831
7832
7833
7834
7835
7836 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7837 int fpopcode = opcode | is_min << 4 | is_u << 5;
7838 int vmap = (1 << elements) - 1;
7839 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7840 (is_q ? 128 : 64), vmap, fpst);
7841 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7842 tcg_temp_free_i32(tcg_res32);
7843 tcg_temp_free_ptr(fpst);
7844 }
7845
7846 tcg_temp_free_i64(tcg_elt);
7847
7848
7849 if (opcode == 0x03) {
7850
7851 size++;
7852 }
7853
7854 switch (size) {
7855 case 0:
7856 tcg_gen_ext8u_i64(tcg_res, tcg_res);
7857 break;
7858 case 1:
7859 tcg_gen_ext16u_i64(tcg_res, tcg_res);
7860 break;
7861 case 2:
7862 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7863 break;
7864 case 3:
7865 break;
7866 default:
7867 g_assert_not_reached();
7868 }
7869
7870 write_fp_dreg(s, rd, tcg_res);
7871 tcg_temp_free_i64(tcg_res);
7872}
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7884 int imm5)
7885{
7886 int size = ctz32(imm5);
7887 int index;
7888
7889 if (size > 3 || (size == 3 && !is_q)) {
7890 unallocated_encoding(s);
7891 return;
7892 }
7893
7894 if (!fp_access_check(s)) {
7895 return;
7896 }
7897
7898 index = imm5 >> (size + 1);
7899 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7900 vec_reg_offset(s, rn, index, size),
7901 is_q ? 16 : 8, vec_full_reg_size(s));
7902}
7903
7904
7905
7906
7907
7908
7909
7910static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7911 int imm5)
7912{
7913 int size = ctz32(imm5);
7914 int index;
7915 TCGv_i64 tmp;
7916
7917 if (size > 3) {
7918 unallocated_encoding(s);
7919 return;
7920 }
7921
7922 if (!fp_access_check(s)) {
7923 return;
7924 }
7925
7926 index = imm5 >> (size + 1);
7927
7928
7929
7930
7931 tmp = tcg_temp_new_i64();
7932 read_vec_element(s, tmp, rn, index, size);
7933 write_fp_dreg(s, rd, tmp);
7934 tcg_temp_free_i64(tmp);
7935}
7936
7937
7938
7939
7940
7941
7942
7943
7944
7945
7946static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7947 int imm5)
7948{
7949 int size = ctz32(imm5);
7950 uint32_t dofs, oprsz, maxsz;
7951
7952 if (size > 3 || ((size == 3) && !is_q)) {
7953 unallocated_encoding(s);
7954 return;
7955 }
7956
7957 if (!fp_access_check(s)) {
7958 return;
7959 }
7960
7961 dofs = vec_full_reg_offset(s, rd);
7962 oprsz = is_q ? 16 : 8;
7963 maxsz = vec_full_reg_size(s);
7964
7965 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7966}
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978static void handle_simd_inse(DisasContext *s, int rd, int rn,
7979 int imm4, int imm5)
7980{
7981 int size = ctz32(imm5);
7982 int src_index, dst_index;
7983 TCGv_i64 tmp;
7984
7985 if (size > 3) {
7986 unallocated_encoding(s);
7987 return;
7988 }
7989
7990 if (!fp_access_check(s)) {
7991 return;
7992 }
7993
7994 dst_index = extract32(imm5, 1+size, 5);
7995 src_index = extract32(imm4, size, 4);
7996
7997 tmp = tcg_temp_new_i64();
7998
7999 read_vec_element(s, tmp, rn, src_index, size);
8000 write_vec_element(s, tmp, rd, dst_index, size);
8001
8002 tcg_temp_free_i64(tmp);
8003
8004
8005 clear_vec_high(s, true, rd);
8006}
8007
8008
8009
8010
8011
8012
8013
8014
8015
8016
8017
8018
8019static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
8020{
8021 int size = ctz32(imm5);
8022 int idx;
8023
8024 if (size > 3) {
8025 unallocated_encoding(s);
8026 return;
8027 }
8028
8029 if (!fp_access_check(s)) {
8030 return;
8031 }
8032
8033 idx = extract32(imm5, 1 + size, 4 - size);
8034 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
8035
8036
8037 clear_vec_high(s, true, rd);
8038}
8039
8040
8041
8042
8043
8044
8045
8046
8047
8048
8049
8050
8051
8052static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
8053 int rn, int rd, int imm5)
8054{
8055 int size = ctz32(imm5);
8056 int element;
8057 TCGv_i64 tcg_rd;
8058
8059
8060 if (is_signed) {
8061 if (size > 2 || (size == 2 && !is_q)) {
8062 unallocated_encoding(s);
8063 return;
8064 }
8065 } else {
8066 if (size > 3
8067 || (size < 3 && is_q)
8068 || (size == 3 && !is_q)) {
8069 unallocated_encoding(s);
8070 return;
8071 }
8072 }
8073
8074 if (!fp_access_check(s)) {
8075 return;
8076 }
8077
8078 element = extract32(imm5, 1+size, 4);
8079
8080 tcg_rd = cpu_reg(s, rd);
8081 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
8082 if (is_signed && !is_q) {
8083 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
8084 }
8085}
8086
8087
8088
8089
8090
8091
8092
8093static void disas_simd_copy(DisasContext *s, uint32_t insn)
8094{
8095 int rd = extract32(insn, 0, 5);
8096 int rn = extract32(insn, 5, 5);
8097 int imm4 = extract32(insn, 11, 4);
8098 int op = extract32(insn, 29, 1);
8099 int is_q = extract32(insn, 30, 1);
8100 int imm5 = extract32(insn, 16, 5);
8101
8102 if (op) {
8103 if (is_q) {
8104
8105 handle_simd_inse(s, rd, rn, imm4, imm5);
8106 } else {
8107 unallocated_encoding(s);
8108 }
8109 } else {
8110 switch (imm4) {
8111 case 0:
8112
8113 handle_simd_dupe(s, is_q, rd, rn, imm5);
8114 break;
8115 case 1:
8116
8117 handle_simd_dupg(s, is_q, rd, rn, imm5);
8118 break;
8119 case 3:
8120 if (is_q) {
8121
8122 handle_simd_insg(s, rd, rn, imm5);
8123 } else {
8124 unallocated_encoding(s);
8125 }
8126 break;
8127 case 5:
8128 case 7:
8129
8130 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
8131 break;
8132 default:
8133 unallocated_encoding(s);
8134 break;
8135 }
8136 }
8137}
8138
8139
8140
8141
8142
8143
8144
8145
8146
8147
8148
8149
8150
8151
8152
8153static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
8154{
8155 int rd = extract32(insn, 0, 5);
8156 int cmode = extract32(insn, 12, 4);
8157 int cmode_3_1 = extract32(cmode, 1, 3);
8158 int cmode_0 = extract32(cmode, 0, 1);
8159 int o2 = extract32(insn, 11, 1);
8160 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
8161 bool is_neg = extract32(insn, 29, 1);
8162 bool is_q = extract32(insn, 30, 1);
8163 uint64_t imm = 0;
8164
8165 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
8166
8167 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
8168 unallocated_encoding(s);
8169 return;
8170 }
8171 }
8172
8173 if (!fp_access_check(s)) {
8174 return;
8175 }
8176
8177
8178 switch (cmode_3_1) {
8179 case 0:
8180 case 1:
8181 case 2:
8182 case 3:
8183 {
8184 int shift = cmode_3_1 * 8;
8185 imm = bitfield_replicate(abcdefgh << shift, 32);
8186 break;
8187 }
8188 case 4:
8189 case 5:
8190 {
8191 int shift = (cmode_3_1 & 0x1) * 8;
8192 imm = bitfield_replicate(abcdefgh << shift, 16);
8193 break;
8194 }
8195 case 6:
8196 if (cmode_0) {
8197
8198 imm = (abcdefgh << 16) | 0xffff;
8199 } else {
8200
8201 imm = (abcdefgh << 8) | 0xff;
8202 }
8203 imm = bitfield_replicate(imm, 32);
8204 break;
8205 case 7:
8206 if (!cmode_0 && !is_neg) {
8207 imm = bitfield_replicate(abcdefgh, 8);
8208 } else if (!cmode_0 && is_neg) {
8209 int i;
8210 imm = 0;
8211 for (i = 0; i < 8; i++) {
8212 if ((abcdefgh) & (1 << i)) {
8213 imm |= 0xffULL << (i * 8);
8214 }
8215 }
8216 } else if (cmode_0) {
8217 if (is_neg) {
8218 imm = (abcdefgh & 0x3f) << 48;
8219 if (abcdefgh & 0x80) {
8220 imm |= 0x8000000000000000ULL;
8221 }
8222 if (abcdefgh & 0x40) {
8223 imm |= 0x3fc0000000000000ULL;
8224 } else {
8225 imm |= 0x4000000000000000ULL;
8226 }
8227 } else {
8228 if (o2) {
8229
8230 imm = vfp_expand_imm(MO_16, abcdefgh);
8231
8232 imm = bitfield_replicate(imm, 16);
8233 } else {
8234 imm = (abcdefgh & 0x3f) << 19;
8235 if (abcdefgh & 0x80) {
8236 imm |= 0x80000000;
8237 }
8238 if (abcdefgh & 0x40) {
8239 imm |= 0x3e000000;
8240 } else {
8241 imm |= 0x40000000;
8242 }
8243 imm |= (imm << 32);
8244 }
8245 }
8246 }
8247 break;
8248 default:
8249 fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
8250 g_assert_not_reached();
8251 }
8252
8253 if (cmode_3_1 != 7 && is_neg) {
8254 imm = ~imm;
8255 }
8256
8257 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
8258
8259 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
8260 vec_full_reg_size(s), imm);
8261 } else {
8262
8263 if (is_neg) {
8264 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
8265 } else {
8266 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
8267 }
8268 }
8269}
8270
8271
8272
8273
8274
8275
8276
8277static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
8278{
8279 int rd = extract32(insn, 0, 5);
8280 int rn = extract32(insn, 5, 5);
8281 int imm4 = extract32(insn, 11, 4);
8282 int imm5 = extract32(insn, 16, 5);
8283 int op = extract32(insn, 29, 1);
8284
8285 if (op != 0 || imm4 != 0) {
8286 unallocated_encoding(s);
8287 return;
8288 }
8289
8290
8291 handle_simd_dupes(s, rd, rn, imm5);
8292}
8293
8294
8295
8296
8297
8298
8299
8300static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
8301{
8302 int u = extract32(insn, 29, 1);
8303 int size = extract32(insn, 22, 2);
8304 int opcode = extract32(insn, 12, 5);
8305 int rn = extract32(insn, 5, 5);
8306 int rd = extract32(insn, 0, 5);
8307 TCGv_ptr fpst;
8308
8309
8310
8311
8312
8313 opcode |= (extract32(size, 1, 1) << 5);
8314
8315 switch (opcode) {
8316 case 0x3b:
8317 if (u || size != 3) {
8318 unallocated_encoding(s);
8319 return;
8320 }
8321 if (!fp_access_check(s)) {
8322 return;
8323 }
8324
8325 fpst = NULL;
8326 break;
8327 case 0xc:
8328 case 0xd:
8329 case 0xf:
8330 case 0x2c:
8331 case 0x2f:
8332
8333 if (!u) {
8334 if (!dc_isar_feature(aa64_fp16, s)) {
8335 unallocated_encoding(s);
8336 return;
8337 } else {
8338 size = MO_16;
8339 }
8340 } else {
8341 size = extract32(size, 0, 1) ? MO_64 : MO_32;
8342 }
8343
8344 if (!fp_access_check(s)) {
8345 return;
8346 }
8347
8348 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8349 break;
8350 default:
8351 unallocated_encoding(s);
8352 return;
8353 }
8354
8355 if (size == MO_64) {
8356 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8357 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8358 TCGv_i64 tcg_res = tcg_temp_new_i64();
8359
8360 read_vec_element(s, tcg_op1, rn, 0, MO_64);
8361 read_vec_element(s, tcg_op2, rn, 1, MO_64);
8362
8363 switch (opcode) {
8364 case 0x3b:
8365 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
8366 break;
8367 case 0xc:
8368 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8369 break;
8370 case 0xd:
8371 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8372 break;
8373 case 0xf:
8374 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8375 break;
8376 case 0x2c:
8377 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8378 break;
8379 case 0x2f:
8380 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8381 break;
8382 default:
8383 g_assert_not_reached();
8384 }
8385
8386 write_fp_dreg(s, rd, tcg_res);
8387
8388 tcg_temp_free_i64(tcg_op1);
8389 tcg_temp_free_i64(tcg_op2);
8390 tcg_temp_free_i64(tcg_res);
8391 } else {
8392 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8393 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8394 TCGv_i32 tcg_res = tcg_temp_new_i32();
8395
8396 read_vec_element_i32(s, tcg_op1, rn, 0, size);
8397 read_vec_element_i32(s, tcg_op2, rn, 1, size);
8398
8399 if (size == MO_16) {
8400 switch (opcode) {
8401 case 0xc:
8402 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8403 break;
8404 case 0xd:
8405 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8406 break;
8407 case 0xf:
8408 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8409 break;
8410 case 0x2c:
8411 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8412 break;
8413 case 0x2f:
8414 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8415 break;
8416 default:
8417 g_assert_not_reached();
8418 }
8419 } else {
8420 switch (opcode) {
8421 case 0xc:
8422 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8423 break;
8424 case 0xd:
8425 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8426 break;
8427 case 0xf:
8428 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8429 break;
8430 case 0x2c:
8431 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8432 break;
8433 case 0x2f:
8434 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8435 break;
8436 default:
8437 g_assert_not_reached();
8438 }
8439 }
8440
8441 write_fp_sreg(s, rd, tcg_res);
8442
8443 tcg_temp_free_i32(tcg_op1);
8444 tcg_temp_free_i32(tcg_op2);
8445 tcg_temp_free_i32(tcg_res);
8446 }
8447
8448 if (fpst) {
8449 tcg_temp_free_ptr(fpst);
8450 }
8451}
8452
8453
8454
8455
8456
8457
8458
8459static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8460 TCGv_i64 tcg_rnd, bool accumulate,
8461 bool is_u, int size, int shift)
8462{
8463 bool extended_result = false;
8464 bool round = tcg_rnd != NULL;
8465 int ext_lshift = 0;
8466 TCGv_i64 tcg_src_hi;
8467
8468 if (round && size == 3) {
8469 extended_result = true;
8470 ext_lshift = 64 - shift;
8471 tcg_src_hi = tcg_temp_new_i64();
8472 } else if (shift == 64) {
8473 if (!accumulate && is_u) {
8474
8475 tcg_gen_movi_i64(tcg_res, 0);
8476 return;
8477 }
8478 }
8479
8480
8481 if (round) {
8482 if (extended_result) {
8483 TCGv_i64 tcg_zero = tcg_const_i64(0);
8484 if (!is_u) {
8485
8486 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8487 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8488 tcg_src, tcg_src_hi,
8489 tcg_rnd, tcg_zero);
8490 } else {
8491 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8492 tcg_src, tcg_zero,
8493 tcg_rnd, tcg_zero);
8494 }
8495 tcg_temp_free_i64(tcg_zero);
8496 } else {
8497 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8498 }
8499 }
8500
8501
8502 if (round && extended_result) {
8503
8504 if (ext_lshift == 0) {
8505
8506 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8507 } else {
8508 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8509 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8510 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8511 }
8512 } else {
8513 if (is_u) {
8514 if (shift == 64) {
8515
8516 tcg_gen_movi_i64(tcg_src, 0);
8517 } else {
8518 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8519 }
8520 } else {
8521 if (shift == 64) {
8522
8523 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8524 } else {
8525 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8526 }
8527 }
8528 }
8529
8530 if (accumulate) {
8531 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8532 } else {
8533 tcg_gen_mov_i64(tcg_res, tcg_src);
8534 }
8535
8536 if (extended_result) {
8537 tcg_temp_free_i64(tcg_src_hi);
8538 }
8539}
8540
8541
8542static void handle_scalar_simd_shri(DisasContext *s,
8543 bool is_u, int immh, int immb,
8544 int opcode, int rn, int rd)
8545{
8546 const int size = 3;
8547 int immhb = immh << 3 | immb;
8548 int shift = 2 * (8 << size) - immhb;
8549 bool accumulate = false;
8550 bool round = false;
8551 bool insert = false;
8552 TCGv_i64 tcg_rn;
8553 TCGv_i64 tcg_rd;
8554 TCGv_i64 tcg_round;
8555
8556 if (!extract32(immh, 3, 1)) {
8557 unallocated_encoding(s);
8558 return;
8559 }
8560
8561 if (!fp_access_check(s)) {
8562 return;
8563 }
8564
8565 switch (opcode) {
8566 case 0x02:
8567 accumulate = true;
8568 break;
8569 case 0x04:
8570 round = true;
8571 break;
8572 case 0x06:
8573 accumulate = round = true;
8574 break;
8575 case 0x08:
8576 insert = true;
8577 break;
8578 }
8579
8580 if (round) {
8581 uint64_t round_const = 1ULL << (shift - 1);
8582 tcg_round = tcg_const_i64(round_const);
8583 } else {
8584 tcg_round = NULL;
8585 }
8586
8587 tcg_rn = read_fp_dreg(s, rn);
8588 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8589
8590 if (insert) {
8591
8592
8593
8594 int esize = 8 << size;
8595 if (shift != esize) {
8596 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8597 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8598 }
8599 } else {
8600 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8601 accumulate, is_u, size, shift);
8602 }
8603
8604 write_fp_dreg(s, rd, tcg_rd);
8605
8606 tcg_temp_free_i64(tcg_rn);
8607 tcg_temp_free_i64(tcg_rd);
8608 if (round) {
8609 tcg_temp_free_i64(tcg_round);
8610 }
8611}
8612
8613
8614static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8615 int immh, int immb, int opcode,
8616 int rn, int rd)
8617{
8618 int size = 32 - clz32(immh) - 1;
8619 int immhb = immh << 3 | immb;
8620 int shift = immhb - (8 << size);
8621 TCGv_i64 tcg_rn;
8622 TCGv_i64 tcg_rd;
8623
8624 if (!extract32(immh, 3, 1)) {
8625 unallocated_encoding(s);
8626 return;
8627 }
8628
8629 if (!fp_access_check(s)) {
8630 return;
8631 }
8632
8633 tcg_rn = read_fp_dreg(s, rn);
8634 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8635
8636 if (insert) {
8637 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8638 } else {
8639 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8640 }
8641
8642 write_fp_dreg(s, rd, tcg_rd);
8643
8644 tcg_temp_free_i64(tcg_rn);
8645 tcg_temp_free_i64(tcg_rd);
8646}
8647
8648
8649
8650static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8651 bool is_u_shift, bool is_u_narrow,
8652 int immh, int immb, int opcode,
8653 int rn, int rd)
8654{
8655 int immhb = immh << 3 | immb;
8656 int size = 32 - clz32(immh) - 1;
8657 int esize = 8 << size;
8658 int shift = (2 * esize) - immhb;
8659 int elements = is_scalar ? 1 : (64 / esize);
8660 bool round = extract32(opcode, 0, 1);
8661 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8662 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8663 TCGv_i32 tcg_rd_narrowed;
8664 TCGv_i64 tcg_final;
8665
8666 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8667 { gen_helper_neon_narrow_sat_s8,
8668 gen_helper_neon_unarrow_sat8 },
8669 { gen_helper_neon_narrow_sat_s16,
8670 gen_helper_neon_unarrow_sat16 },
8671 { gen_helper_neon_narrow_sat_s32,
8672 gen_helper_neon_unarrow_sat32 },
8673 { NULL, NULL },
8674 };
8675 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8676 gen_helper_neon_narrow_sat_u8,
8677 gen_helper_neon_narrow_sat_u16,
8678 gen_helper_neon_narrow_sat_u32,
8679 NULL
8680 };
8681 NeonGenNarrowEnvFn *narrowfn;
8682
8683 int i;
8684
8685 assert(size < 4);
8686
8687 if (extract32(immh, 3, 1)) {
8688 unallocated_encoding(s);
8689 return;
8690 }
8691
8692 if (!fp_access_check(s)) {
8693 return;
8694 }
8695
8696 if (is_u_shift) {
8697 narrowfn = unsigned_narrow_fns[size];
8698 } else {
8699 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8700 }
8701
8702 tcg_rn = tcg_temp_new_i64();
8703 tcg_rd = tcg_temp_new_i64();
8704 tcg_rd_narrowed = tcg_temp_new_i32();
8705 tcg_final = tcg_const_i64(0);
8706
8707 if (round) {
8708 uint64_t round_const = 1ULL << (shift - 1);
8709 tcg_round = tcg_const_i64(round_const);
8710 } else {
8711 tcg_round = NULL;
8712 }
8713
8714 for (i = 0; i < elements; i++) {
8715 read_vec_element(s, tcg_rn, rn, i, ldop);
8716 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8717 false, is_u_shift, size+1, shift);
8718 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8719 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8720 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8721 }
8722
8723 if (!is_q) {
8724 write_vec_element(s, tcg_final, rd, 0, MO_64);
8725 } else {
8726 write_vec_element(s, tcg_final, rd, 1, MO_64);
8727 }
8728
8729 if (round) {
8730 tcg_temp_free_i64(tcg_round);
8731 }
8732 tcg_temp_free_i64(tcg_rn);
8733 tcg_temp_free_i64(tcg_rd);
8734 tcg_temp_free_i32(tcg_rd_narrowed);
8735 tcg_temp_free_i64(tcg_final);
8736
8737 clear_vec_high(s, is_q, rd);
8738}
8739
8740
8741static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8742 bool src_unsigned, bool dst_unsigned,
8743 int immh, int immb, int rn, int rd)
8744{
8745 int immhb = immh << 3 | immb;
8746 int size = 32 - clz32(immh) - 1;
8747 int shift = immhb - (8 << size);
8748 int pass;
8749
8750 assert(immh != 0);
8751 assert(!(scalar && is_q));
8752
8753 if (!scalar) {
8754 if (!is_q && extract32(immh, 3, 1)) {
8755 unallocated_encoding(s);
8756 return;
8757 }
8758
8759
8760
8761
8762
8763 switch (size) {
8764 case 0:
8765 shift |= shift << 8;
8766
8767 case 1:
8768 shift |= shift << 16;
8769 break;
8770 case 2:
8771 case 3:
8772 break;
8773 default:
8774 g_assert_not_reached();
8775 }
8776 }
8777
8778 if (!fp_access_check(s)) {
8779 return;
8780 }
8781
8782 if (size == 3) {
8783 TCGv_i64 tcg_shift = tcg_const_i64(shift);
8784 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8785 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8786 { NULL, gen_helper_neon_qshl_u64 },
8787 };
8788 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8789 int maxpass = is_q ? 2 : 1;
8790
8791 for (pass = 0; pass < maxpass; pass++) {
8792 TCGv_i64 tcg_op = tcg_temp_new_i64();
8793
8794 read_vec_element(s, tcg_op, rn, pass, MO_64);
8795 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8796 write_vec_element(s, tcg_op, rd, pass, MO_64);
8797
8798 tcg_temp_free_i64(tcg_op);
8799 }
8800 tcg_temp_free_i64(tcg_shift);
8801 clear_vec_high(s, is_q, rd);
8802 } else {
8803 TCGv_i32 tcg_shift = tcg_const_i32(shift);
8804 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8805 {
8806 { gen_helper_neon_qshl_s8,
8807 gen_helper_neon_qshl_s16,
8808 gen_helper_neon_qshl_s32 },
8809 { gen_helper_neon_qshlu_s8,
8810 gen_helper_neon_qshlu_s16,
8811 gen_helper_neon_qshlu_s32 }
8812 }, {
8813 { NULL, NULL, NULL },
8814 { gen_helper_neon_qshl_u8,
8815 gen_helper_neon_qshl_u16,
8816 gen_helper_neon_qshl_u32 }
8817 }
8818 };
8819 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8820 MemOp memop = scalar ? size : MO_32;
8821 int maxpass = scalar ? 1 : is_q ? 4 : 2;
8822
8823 for (pass = 0; pass < maxpass; pass++) {
8824 TCGv_i32 tcg_op = tcg_temp_new_i32();
8825
8826 read_vec_element_i32(s, tcg_op, rn, pass, memop);
8827 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8828 if (scalar) {
8829 switch (size) {
8830 case 0:
8831 tcg_gen_ext8u_i32(tcg_op, tcg_op);
8832 break;
8833 case 1:
8834 tcg_gen_ext16u_i32(tcg_op, tcg_op);
8835 break;
8836 case 2:
8837 break;
8838 default:
8839 g_assert_not_reached();
8840 }
8841 write_fp_sreg(s, rd, tcg_op);
8842 } else {
8843 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8844 }
8845
8846 tcg_temp_free_i32(tcg_op);
8847 }
8848 tcg_temp_free_i32(tcg_shift);
8849
8850 if (!scalar) {
8851 clear_vec_high(s, is_q, rd);
8852 }
8853 }
8854}
8855
8856
8857static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8858 int elements, int is_signed,
8859 int fracbits, int size)
8860{
8861 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8862 TCGv_i32 tcg_shift = NULL;
8863
8864 MemOp mop = size | (is_signed ? MO_SIGN : 0);
8865 int pass;
8866
8867 if (fracbits || size == MO_64) {
8868 tcg_shift = tcg_const_i32(fracbits);
8869 }
8870
8871 if (size == MO_64) {
8872 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8873 TCGv_i64 tcg_double = tcg_temp_new_i64();
8874
8875 for (pass = 0; pass < elements; pass++) {
8876 read_vec_element(s, tcg_int64, rn, pass, mop);
8877
8878 if (is_signed) {
8879 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8880 tcg_shift, tcg_fpst);
8881 } else {
8882 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8883 tcg_shift, tcg_fpst);
8884 }
8885 if (elements == 1) {
8886 write_fp_dreg(s, rd, tcg_double);
8887 } else {
8888 write_vec_element(s, tcg_double, rd, pass, MO_64);
8889 }
8890 }
8891
8892 tcg_temp_free_i64(tcg_int64);
8893 tcg_temp_free_i64(tcg_double);
8894
8895 } else {
8896 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8897 TCGv_i32 tcg_float = tcg_temp_new_i32();
8898
8899 for (pass = 0; pass < elements; pass++) {
8900 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8901
8902 switch (size) {
8903 case MO_32:
8904 if (fracbits) {
8905 if (is_signed) {
8906 gen_helper_vfp_sltos(tcg_float, tcg_int32,
8907 tcg_shift, tcg_fpst);
8908 } else {
8909 gen_helper_vfp_ultos(tcg_float, tcg_int32,
8910 tcg_shift, tcg_fpst);
8911 }
8912 } else {
8913 if (is_signed) {
8914 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8915 } else {
8916 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8917 }
8918 }
8919 break;
8920 case MO_16:
8921 if (fracbits) {
8922 if (is_signed) {
8923 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8924 tcg_shift, tcg_fpst);
8925 } else {
8926 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8927 tcg_shift, tcg_fpst);
8928 }
8929 } else {
8930 if (is_signed) {
8931 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8932 } else {
8933 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8934 }
8935 }
8936 break;
8937 default:
8938 g_assert_not_reached();
8939 }
8940
8941 if (elements == 1) {
8942 write_fp_sreg(s, rd, tcg_float);
8943 } else {
8944 write_vec_element_i32(s, tcg_float, rd, pass, size);
8945 }
8946 }
8947
8948 tcg_temp_free_i32(tcg_int32);
8949 tcg_temp_free_i32(tcg_float);
8950 }
8951
8952 tcg_temp_free_ptr(tcg_fpst);
8953 if (tcg_shift) {
8954 tcg_temp_free_i32(tcg_shift);
8955 }
8956
8957 clear_vec_high(s, elements << size == 16, rd);
8958}
8959
8960
8961static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8962 bool is_q, bool is_u,
8963 int immh, int immb, int opcode,
8964 int rn, int rd)
8965{
8966 int size, elements, fracbits;
8967 int immhb = immh << 3 | immb;
8968
8969 if (immh & 8) {
8970 size = MO_64;
8971 if (!is_scalar && !is_q) {
8972 unallocated_encoding(s);
8973 return;
8974 }
8975 } else if (immh & 4) {
8976 size = MO_32;
8977 } else if (immh & 2) {
8978 size = MO_16;
8979 if (!dc_isar_feature(aa64_fp16, s)) {
8980 unallocated_encoding(s);
8981 return;
8982 }
8983 } else {
8984
8985 g_assert(immh == 1);
8986 unallocated_encoding(s);
8987 return;
8988 }
8989
8990 if (is_scalar) {
8991 elements = 1;
8992 } else {
8993 elements = (8 << is_q) >> size;
8994 }
8995 fracbits = (16 << size) - immhb;
8996
8997 if (!fp_access_check(s)) {
8998 return;
8999 }
9000
9001 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
9002}
9003
9004
9005static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
9006 bool is_q, bool is_u,
9007 int immh, int immb, int rn, int rd)
9008{
9009 int immhb = immh << 3 | immb;
9010 int pass, size, fracbits;
9011 TCGv_ptr tcg_fpstatus;
9012 TCGv_i32 tcg_rmode, tcg_shift;
9013
9014 if (immh & 0x8) {
9015 size = MO_64;
9016 if (!is_scalar && !is_q) {
9017 unallocated_encoding(s);
9018 return;
9019 }
9020 } else if (immh & 0x4) {
9021 size = MO_32;
9022 } else if (immh & 0x2) {
9023 size = MO_16;
9024 if (!dc_isar_feature(aa64_fp16, s)) {
9025 unallocated_encoding(s);
9026 return;
9027 }
9028 } else {
9029
9030 assert(immh == 1);
9031 unallocated_encoding(s);
9032 return;
9033 }
9034
9035 if (!fp_access_check(s)) {
9036 return;
9037 }
9038
9039 assert(!(is_scalar && is_q));
9040
9041 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
9042 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9043 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9044 fracbits = (16 << size) - immhb;
9045 tcg_shift = tcg_const_i32(fracbits);
9046
9047 if (size == MO_64) {
9048 int maxpass = is_scalar ? 1 : 2;
9049
9050 for (pass = 0; pass < maxpass; pass++) {
9051 TCGv_i64 tcg_op = tcg_temp_new_i64();
9052
9053 read_vec_element(s, tcg_op, rn, pass, MO_64);
9054 if (is_u) {
9055 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9056 } else {
9057 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9058 }
9059 write_vec_element(s, tcg_op, rd, pass, MO_64);
9060 tcg_temp_free_i64(tcg_op);
9061 }
9062 clear_vec_high(s, is_q, rd);
9063 } else {
9064 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
9065 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
9066
9067 switch (size) {
9068 case MO_16:
9069 if (is_u) {
9070 fn = gen_helper_vfp_touhh;
9071 } else {
9072 fn = gen_helper_vfp_toshh;
9073 }
9074 break;
9075 case MO_32:
9076 if (is_u) {
9077 fn = gen_helper_vfp_touls;
9078 } else {
9079 fn = gen_helper_vfp_tosls;
9080 }
9081 break;
9082 default:
9083 g_assert_not_reached();
9084 }
9085
9086 for (pass = 0; pass < maxpass; pass++) {
9087 TCGv_i32 tcg_op = tcg_temp_new_i32();
9088
9089 read_vec_element_i32(s, tcg_op, rn, pass, size);
9090 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9091 if (is_scalar) {
9092 write_fp_sreg(s, rd, tcg_op);
9093 } else {
9094 write_vec_element_i32(s, tcg_op, rd, pass, size);
9095 }
9096 tcg_temp_free_i32(tcg_op);
9097 }
9098 if (!is_scalar) {
9099 clear_vec_high(s, is_q, rd);
9100 }
9101 }
9102
9103 tcg_temp_free_ptr(tcg_fpstatus);
9104 tcg_temp_free_i32(tcg_shift);
9105 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9106 tcg_temp_free_i32(tcg_rmode);
9107}
9108
9109
9110
9111
9112
9113
9114
9115
9116
9117static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
9118{
9119 int rd = extract32(insn, 0, 5);
9120 int rn = extract32(insn, 5, 5);
9121 int opcode = extract32(insn, 11, 5);
9122 int immb = extract32(insn, 16, 3);
9123 int immh = extract32(insn, 19, 4);
9124 bool is_u = extract32(insn, 29, 1);
9125
9126 if (immh == 0) {
9127 unallocated_encoding(s);
9128 return;
9129 }
9130
9131 switch (opcode) {
9132 case 0x08:
9133 if (!is_u) {
9134 unallocated_encoding(s);
9135 return;
9136 }
9137
9138 case 0x00:
9139 case 0x02:
9140 case 0x04:
9141 case 0x06:
9142 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
9143 break;
9144 case 0x0a:
9145 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
9146 break;
9147 case 0x1c:
9148 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
9149 opcode, rn, rd);
9150 break;
9151 case 0x10:
9152 case 0x11:
9153 if (!is_u) {
9154 unallocated_encoding(s);
9155 return;
9156 }
9157 handle_vec_simd_sqshrn(s, true, false, false, true,
9158 immh, immb, opcode, rn, rd);
9159 break;
9160 case 0x12:
9161 case 0x13:
9162 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
9163 immh, immb, opcode, rn, rd);
9164 break;
9165 case 0xc:
9166 if (!is_u) {
9167 unallocated_encoding(s);
9168 return;
9169 }
9170 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
9171 break;
9172 case 0xe:
9173 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
9174 break;
9175 case 0x1f:
9176 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
9177 break;
9178 default:
9179 unallocated_encoding(s);
9180 break;
9181 }
9182}
9183
9184
9185
9186
9187
9188
9189
9190static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
9191{
9192 bool is_u = extract32(insn, 29, 1);
9193 int size = extract32(insn, 22, 2);
9194 int opcode = extract32(insn, 12, 4);
9195 int rm = extract32(insn, 16, 5);
9196 int rn = extract32(insn, 5, 5);
9197 int rd = extract32(insn, 0, 5);
9198
9199 if (is_u) {
9200 unallocated_encoding(s);
9201 return;
9202 }
9203
9204 switch (opcode) {
9205 case 0x9:
9206 case 0xb:
9207 case 0xd:
9208 if (size == 0 || size == 3) {
9209 unallocated_encoding(s);
9210 return;
9211 }
9212 break;
9213 default:
9214 unallocated_encoding(s);
9215 return;
9216 }
9217
9218 if (!fp_access_check(s)) {
9219 return;
9220 }
9221
9222 if (size == 2) {
9223 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9224 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9225 TCGv_i64 tcg_res = tcg_temp_new_i64();
9226
9227 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
9228 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
9229
9230 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
9231 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
9232
9233 switch (opcode) {
9234 case 0xd:
9235 break;
9236 case 0xb:
9237 tcg_gen_neg_i64(tcg_res, tcg_res);
9238
9239 case 0x9:
9240 read_vec_element(s, tcg_op1, rd, 0, MO_64);
9241 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
9242 tcg_res, tcg_op1);
9243 break;
9244 default:
9245 g_assert_not_reached();
9246 }
9247
9248 write_fp_dreg(s, rd, tcg_res);
9249
9250 tcg_temp_free_i64(tcg_op1);
9251 tcg_temp_free_i64(tcg_op2);
9252 tcg_temp_free_i64(tcg_res);
9253 } else {
9254 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
9255 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
9256 TCGv_i64 tcg_res = tcg_temp_new_i64();
9257
9258 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
9259 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
9260
9261 switch (opcode) {
9262 case 0xd:
9263 break;
9264 case 0xb:
9265 gen_helper_neon_negl_u32(tcg_res, tcg_res);
9266
9267 case 0x9:
9268 {
9269 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
9270 read_vec_element(s, tcg_op3, rd, 0, MO_32);
9271 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
9272 tcg_res, tcg_op3);
9273 tcg_temp_free_i64(tcg_op3);
9274 break;
9275 }
9276 default:
9277 g_assert_not_reached();
9278 }
9279
9280 tcg_gen_ext32u_i64(tcg_res, tcg_res);
9281 write_fp_dreg(s, rd, tcg_res);
9282
9283 tcg_temp_free_i32(tcg_op1);
9284 tcg_temp_free_i32(tcg_op2);
9285 tcg_temp_free_i64(tcg_res);
9286 }
9287}
9288
9289static void handle_3same_64(DisasContext *s, int opcode, bool u,
9290 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
9291{
9292
9293
9294
9295
9296
9297 TCGCond cond;
9298
9299 switch (opcode) {
9300 case 0x1:
9301 if (u) {
9302 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9303 } else {
9304 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9305 }
9306 break;
9307 case 0x5:
9308 if (u) {
9309 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9310 } else {
9311 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9312 }
9313 break;
9314 case 0x6:
9315
9316
9317
9318 cond = u ? TCG_COND_GTU : TCG_COND_GT;
9319 do_cmop:
9320 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
9321 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9322 break;
9323 case 0x7:
9324 cond = u ? TCG_COND_GEU : TCG_COND_GE;
9325 goto do_cmop;
9326 case 0x11:
9327 if (u) {
9328 cond = TCG_COND_EQ;
9329 goto do_cmop;
9330 }
9331 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
9332 break;
9333 case 0x8:
9334 if (u) {
9335 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
9336 } else {
9337 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
9338 }
9339 break;
9340 case 0x9:
9341 if (u) {
9342 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9343 } else {
9344 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9345 }
9346 break;
9347 case 0xa:
9348 if (u) {
9349 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
9350 } else {
9351 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
9352 }
9353 break;
9354 case 0xb:
9355 if (u) {
9356 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9357 } else {
9358 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9359 }
9360 break;
9361 case 0x10:
9362 if (u) {
9363 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
9364 } else {
9365 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
9366 }
9367 break;
9368 default:
9369 g_assert_not_reached();
9370 }
9371}
9372
9373
9374
9375
9376
9377static void handle_3same_float(DisasContext *s, int size, int elements,
9378 int fpopcode, int rd, int rn, int rm)
9379{
9380 int pass;
9381 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9382
9383 for (pass = 0; pass < elements; pass++) {
9384 if (size) {
9385
9386 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9387 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9388 TCGv_i64 tcg_res = tcg_temp_new_i64();
9389
9390 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9391 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9392
9393 switch (fpopcode) {
9394 case 0x39:
9395
9396 gen_helper_vfp_negd(tcg_op1, tcg_op1);
9397
9398 case 0x19:
9399 read_vec_element(s, tcg_res, rd, pass, MO_64);
9400 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
9401 tcg_res, fpst);
9402 break;
9403 case 0x18:
9404 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9405 break;
9406 case 0x1a:
9407 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
9408 break;
9409 case 0x1b:
9410 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
9411 break;
9412 case 0x1c:
9413 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9414 break;
9415 case 0x1e:
9416 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
9417 break;
9418 case 0x1f:
9419 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9420 break;
9421 case 0x38:
9422 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9423 break;
9424 case 0x3a:
9425 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9426 break;
9427 case 0x3e:
9428 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
9429 break;
9430 case 0x3f:
9431 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9432 break;
9433 case 0x5b:
9434 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
9435 break;
9436 case 0x5c:
9437 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9438 break;
9439 case 0x5d:
9440 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9441 break;
9442 case 0x5f:
9443 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
9444 break;
9445 case 0x7a:
9446 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9447 gen_helper_vfp_absd(tcg_res, tcg_res);
9448 break;
9449 case 0x7c:
9450 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9451 break;
9452 case 0x7d:
9453 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9454 break;
9455 default:
9456 g_assert_not_reached();
9457 }
9458
9459 write_vec_element(s, tcg_res, rd, pass, MO_64);
9460
9461 tcg_temp_free_i64(tcg_res);
9462 tcg_temp_free_i64(tcg_op1);
9463 tcg_temp_free_i64(tcg_op2);
9464 } else {
9465
9466 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9467 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9468 TCGv_i32 tcg_res = tcg_temp_new_i32();
9469
9470 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9471 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9472
9473 switch (fpopcode) {
9474 case 0x39:
9475
9476 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9477
9478 case 0x19:
9479 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9480 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9481 tcg_res, fpst);
9482 break;
9483 case 0x1a:
9484 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9485 break;
9486 case 0x1b:
9487 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9488 break;
9489 case 0x1c:
9490 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9491 break;
9492 case 0x1e:
9493 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9494 break;
9495 case 0x1f:
9496 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9497 break;
9498 case 0x18:
9499 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9500 break;
9501 case 0x38:
9502 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9503 break;
9504 case 0x3a:
9505 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9506 break;
9507 case 0x3e:
9508 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9509 break;
9510 case 0x3f:
9511 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9512 break;
9513 case 0x5b:
9514 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9515 break;
9516 case 0x5c:
9517 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9518 break;
9519 case 0x5d:
9520 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9521 break;
9522 case 0x5f:
9523 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9524 break;
9525 case 0x7a:
9526 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9527 gen_helper_vfp_abss(tcg_res, tcg_res);
9528 break;
9529 case 0x7c:
9530 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9531 break;
9532 case 0x7d:
9533 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9534 break;
9535 default:
9536 g_assert_not_reached();
9537 }
9538
9539 if (elements == 1) {
9540
9541 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9542
9543 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9544 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9545 tcg_temp_free_i64(tcg_tmp);
9546 } else {
9547 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9548 }
9549
9550 tcg_temp_free_i32(tcg_res);
9551 tcg_temp_free_i32(tcg_op1);
9552 tcg_temp_free_i32(tcg_op2);
9553 }
9554 }
9555
9556 tcg_temp_free_ptr(fpst);
9557
9558 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9559}
9560
9561
9562
9563
9564
9565
9566
9567static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9568{
9569 int rd = extract32(insn, 0, 5);
9570 int rn = extract32(insn, 5, 5);
9571 int opcode = extract32(insn, 11, 5);
9572 int rm = extract32(insn, 16, 5);
9573 int size = extract32(insn, 22, 2);
9574 bool u = extract32(insn, 29, 1);
9575 TCGv_i64 tcg_rd;
9576
9577 if (opcode >= 0x18) {
9578
9579 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9580 switch (fpopcode) {
9581 case 0x1b:
9582 case 0x1f:
9583 case 0x3f:
9584 case 0x5d:
9585 case 0x7d:
9586 case 0x1c:
9587 case 0x5c:
9588 case 0x7c:
9589 case 0x7a:
9590 break;
9591 default:
9592 unallocated_encoding(s);
9593 return;
9594 }
9595
9596 if (!fp_access_check(s)) {
9597 return;
9598 }
9599
9600 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9601 return;
9602 }
9603
9604 switch (opcode) {
9605 case 0x1:
9606 case 0x5:
9607 case 0x9:
9608 case 0xb:
9609 break;
9610 case 0x8:
9611 case 0xa:
9612 case 0x6:
9613 case 0x7:
9614 case 0x11:
9615 case 0x10:
9616 if (size != 3) {
9617 unallocated_encoding(s);
9618 return;
9619 }
9620 break;
9621 case 0x16:
9622 if (size != 1 && size != 2) {
9623 unallocated_encoding(s);
9624 return;
9625 }
9626 break;
9627 default:
9628 unallocated_encoding(s);
9629 return;
9630 }
9631
9632 if (!fp_access_check(s)) {
9633 return;
9634 }
9635
9636 tcg_rd = tcg_temp_new_i64();
9637
9638 if (size == 3) {
9639 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9640 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9641
9642 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9643 tcg_temp_free_i64(tcg_rn);
9644 tcg_temp_free_i64(tcg_rm);
9645 } else {
9646
9647
9648
9649
9650
9651
9652 NeonGenTwoOpEnvFn *genenvfn;
9653 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9654 TCGv_i32 tcg_rm = tcg_temp_new_i32();
9655 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9656
9657 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9658 read_vec_element_i32(s, tcg_rm, rm, 0, size);
9659
9660 switch (opcode) {
9661 case 0x1:
9662 {
9663 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9664 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9665 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9666 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9667 };
9668 genenvfn = fns[size][u];
9669 break;
9670 }
9671 case 0x5:
9672 {
9673 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9674 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9675 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9676 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9677 };
9678 genenvfn = fns[size][u];
9679 break;
9680 }
9681 case 0x9:
9682 {
9683 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9684 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9685 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9686 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9687 };
9688 genenvfn = fns[size][u];
9689 break;
9690 }
9691 case 0xb:
9692 {
9693 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9694 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9695 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9696 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9697 };
9698 genenvfn = fns[size][u];
9699 break;
9700 }
9701 case 0x16:
9702 {
9703 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9704 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9705 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9706 };
9707 assert(size == 1 || size == 2);
9708 genenvfn = fns[size - 1][u];
9709 break;
9710 }
9711 default:
9712 g_assert_not_reached();
9713 }
9714
9715 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9716 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9717 tcg_temp_free_i32(tcg_rd32);
9718 tcg_temp_free_i32(tcg_rn);
9719 tcg_temp_free_i32(tcg_rm);
9720 }
9721
9722 write_fp_dreg(s, rd, tcg_rd);
9723
9724 tcg_temp_free_i64(tcg_rd);
9725}
9726
9727
9728
9729
9730
9731
9732
9733
9734
9735static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9736 uint32_t insn)
9737{
9738 int rd = extract32(insn, 0, 5);
9739 int rn = extract32(insn, 5, 5);
9740 int opcode = extract32(insn, 11, 3);
9741 int rm = extract32(insn, 16, 5);
9742 bool u = extract32(insn, 29, 1);
9743 bool a = extract32(insn, 23, 1);
9744 int fpopcode = opcode | (a << 3) | (u << 4);
9745 TCGv_ptr fpst;
9746 TCGv_i32 tcg_op1;
9747 TCGv_i32 tcg_op2;
9748 TCGv_i32 tcg_res;
9749
9750 switch (fpopcode) {
9751 case 0x03:
9752 case 0x04:
9753 case 0x07:
9754 case 0x0f:
9755 case 0x14:
9756 case 0x15:
9757 case 0x1a:
9758 case 0x1c:
9759 case 0x1d:
9760 break;
9761 default:
9762 unallocated_encoding(s);
9763 return;
9764 }
9765
9766 if (!dc_isar_feature(aa64_fp16, s)) {
9767 unallocated_encoding(s);
9768 }
9769
9770 if (!fp_access_check(s)) {
9771 return;
9772 }
9773
9774 fpst = fpstatus_ptr(FPST_FPCR_F16);
9775
9776 tcg_op1 = read_fp_hreg(s, rn);
9777 tcg_op2 = read_fp_hreg(s, rm);
9778 tcg_res = tcg_temp_new_i32();
9779
9780 switch (fpopcode) {
9781 case 0x03:
9782 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9783 break;
9784 case 0x04:
9785 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9786 break;
9787 case 0x07:
9788 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9789 break;
9790 case 0x0f:
9791 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9792 break;
9793 case 0x14:
9794 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9795 break;
9796 case 0x15:
9797 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9798 break;
9799 case 0x1a:
9800 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9801 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9802 break;
9803 case 0x1c:
9804 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9805 break;
9806 case 0x1d:
9807 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9808 break;
9809 default:
9810 g_assert_not_reached();
9811 }
9812
9813 write_fp_sreg(s, rd, tcg_res);
9814
9815
9816 tcg_temp_free_i32(tcg_res);
9817 tcg_temp_free_i32(tcg_op1);
9818 tcg_temp_free_i32(tcg_op2);
9819 tcg_temp_free_ptr(fpst);
9820}
9821
9822
9823
9824
9825
9826
9827
9828static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9829 uint32_t insn)
9830{
9831 int rd = extract32(insn, 0, 5);
9832 int rn = extract32(insn, 5, 5);
9833 int opcode = extract32(insn, 11, 4);
9834 int rm = extract32(insn, 16, 5);
9835 int size = extract32(insn, 22, 2);
9836 bool u = extract32(insn, 29, 1);
9837 TCGv_i32 ele1, ele2, ele3;
9838 TCGv_i64 res;
9839 bool feature;
9840
9841 switch (u * 16 + opcode) {
9842 case 0x10:
9843 case 0x11:
9844 if (size != 1 && size != 2) {
9845 unallocated_encoding(s);
9846 return;
9847 }
9848 feature = dc_isar_feature(aa64_rdm, s);
9849 break;
9850 default:
9851 unallocated_encoding(s);
9852 return;
9853 }
9854 if (!feature) {
9855 unallocated_encoding(s);
9856 return;
9857 }
9858 if (!fp_access_check(s)) {
9859 return;
9860 }
9861
9862
9863
9864
9865
9866
9867
9868 ele1 = tcg_temp_new_i32();
9869 ele2 = tcg_temp_new_i32();
9870 ele3 = tcg_temp_new_i32();
9871
9872 read_vec_element_i32(s, ele1, rn, 0, size);
9873 read_vec_element_i32(s, ele2, rm, 0, size);
9874 read_vec_element_i32(s, ele3, rd, 0, size);
9875
9876 switch (opcode) {
9877 case 0x0:
9878 if (size == 1) {
9879 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9880 } else {
9881 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9882 }
9883 break;
9884 case 0x1:
9885 if (size == 1) {
9886 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9887 } else {
9888 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9889 }
9890 break;
9891 default:
9892 g_assert_not_reached();
9893 }
9894 tcg_temp_free_i32(ele1);
9895 tcg_temp_free_i32(ele2);
9896
9897 res = tcg_temp_new_i64();
9898 tcg_gen_extu_i32_i64(res, ele3);
9899 tcg_temp_free_i32(ele3);
9900
9901 write_fp_dreg(s, rd, res);
9902 tcg_temp_free_i64(res);
9903}
9904
9905static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9906 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9907 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9908{
9909
9910
9911
9912
9913
9914
9915 TCGCond cond;
9916
9917 switch (opcode) {
9918 case 0x4:
9919 if (u) {
9920 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9921 } else {
9922 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9923 }
9924 break;
9925 case 0x5:
9926
9927
9928
9929 tcg_gen_not_i64(tcg_rd, tcg_rn);
9930 break;
9931 case 0x7:
9932 if (u) {
9933 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9934 } else {
9935 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9936 }
9937 break;
9938 case 0xa:
9939
9940
9941
9942
9943 cond = TCG_COND_LT;
9944 do_cmop:
9945 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9946 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9947 break;
9948 case 0x8:
9949 cond = u ? TCG_COND_GE : TCG_COND_GT;
9950 goto do_cmop;
9951 case 0x9:
9952 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9953 goto do_cmop;
9954 case 0xb:
9955 if (u) {
9956 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9957 } else {
9958 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9959 }
9960 break;
9961 case 0x2f:
9962 gen_helper_vfp_absd(tcg_rd, tcg_rn);
9963 break;
9964 case 0x6f:
9965 gen_helper_vfp_negd(tcg_rd, tcg_rn);
9966 break;
9967 case 0x7f:
9968 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9969 break;
9970 case 0x1a:
9971 case 0x1b:
9972 case 0x1c:
9973 case 0x3a:
9974 case 0x3b:
9975 {
9976 TCGv_i32 tcg_shift = tcg_const_i32(0);
9977 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9978 tcg_temp_free_i32(tcg_shift);
9979 break;
9980 }
9981 case 0x5a:
9982 case 0x5b:
9983 case 0x5c:
9984 case 0x7a:
9985 case 0x7b:
9986 {
9987 TCGv_i32 tcg_shift = tcg_const_i32(0);
9988 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9989 tcg_temp_free_i32(tcg_shift);
9990 break;
9991 }
9992 case 0x18:
9993 case 0x19:
9994 case 0x38:
9995 case 0x39:
9996 case 0x58:
9997 case 0x79:
9998 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9999 break;
10000 case 0x59:
10001 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
10002 break;
10003 case 0x1e:
10004 case 0x5e:
10005 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
10006 break;
10007 case 0x1f:
10008 case 0x5f:
10009 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
10010 break;
10011 default:
10012 g_assert_not_reached();
10013 }
10014}
10015
10016static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
10017 bool is_scalar, bool is_u, bool is_q,
10018 int size, int rn, int rd)
10019{
10020 bool is_double = (size == MO_64);
10021 TCGv_ptr fpst;
10022
10023 if (!fp_access_check(s)) {
10024 return;
10025 }
10026
10027 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
10028
10029 if (is_double) {
10030 TCGv_i64 tcg_op = tcg_temp_new_i64();
10031 TCGv_i64 tcg_zero = tcg_const_i64(0);
10032 TCGv_i64 tcg_res = tcg_temp_new_i64();
10033 NeonGenTwoDoubleOpFn *genfn;
10034 bool swap = false;
10035 int pass;
10036
10037 switch (opcode) {
10038 case 0x2e:
10039 swap = true;
10040
10041 case 0x2c:
10042 genfn = gen_helper_neon_cgt_f64;
10043 break;
10044 case 0x2d:
10045 genfn = gen_helper_neon_ceq_f64;
10046 break;
10047 case 0x6d:
10048 swap = true;
10049
10050 case 0x6c:
10051 genfn = gen_helper_neon_cge_f64;
10052 break;
10053 default:
10054 g_assert_not_reached();
10055 }
10056
10057 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10058 read_vec_element(s, tcg_op, rn, pass, MO_64);
10059 if (swap) {
10060 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10061 } else {
10062 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10063 }
10064 write_vec_element(s, tcg_res, rd, pass, MO_64);
10065 }
10066 tcg_temp_free_i64(tcg_res);
10067 tcg_temp_free_i64(tcg_zero);
10068 tcg_temp_free_i64(tcg_op);
10069
10070 clear_vec_high(s, !is_scalar, rd);
10071 } else {
10072 TCGv_i32 tcg_op = tcg_temp_new_i32();
10073 TCGv_i32 tcg_zero = tcg_const_i32(0);
10074 TCGv_i32 tcg_res = tcg_temp_new_i32();
10075 NeonGenTwoSingleOpFn *genfn;
10076 bool swap = false;
10077 int pass, maxpasses;
10078
10079 if (size == MO_16) {
10080 switch (opcode) {
10081 case 0x2e:
10082 swap = true;
10083
10084 case 0x2c:
10085 genfn = gen_helper_advsimd_cgt_f16;
10086 break;
10087 case 0x2d:
10088 genfn = gen_helper_advsimd_ceq_f16;
10089 break;
10090 case 0x6d:
10091 swap = true;
10092
10093 case 0x6c:
10094 genfn = gen_helper_advsimd_cge_f16;
10095 break;
10096 default:
10097 g_assert_not_reached();
10098 }
10099 } else {
10100 switch (opcode) {
10101 case 0x2e:
10102 swap = true;
10103
10104 case 0x2c:
10105 genfn = gen_helper_neon_cgt_f32;
10106 break;
10107 case 0x2d:
10108 genfn = gen_helper_neon_ceq_f32;
10109 break;
10110 case 0x6d:
10111 swap = true;
10112
10113 case 0x6c:
10114 genfn = gen_helper_neon_cge_f32;
10115 break;
10116 default:
10117 g_assert_not_reached();
10118 }
10119 }
10120
10121 if (is_scalar) {
10122 maxpasses = 1;
10123 } else {
10124 int vector_size = 8 << is_q;
10125 maxpasses = vector_size >> size;
10126 }
10127
10128 for (pass = 0; pass < maxpasses; pass++) {
10129 read_vec_element_i32(s, tcg_op, rn, pass, size);
10130 if (swap) {
10131 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10132 } else {
10133 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10134 }
10135 if (is_scalar) {
10136 write_fp_sreg(s, rd, tcg_res);
10137 } else {
10138 write_vec_element_i32(s, tcg_res, rd, pass, size);
10139 }
10140 }
10141 tcg_temp_free_i32(tcg_res);
10142 tcg_temp_free_i32(tcg_zero);
10143 tcg_temp_free_i32(tcg_op);
10144 if (!is_scalar) {
10145 clear_vec_high(s, is_q, rd);
10146 }
10147 }
10148
10149 tcg_temp_free_ptr(fpst);
10150}
10151
10152static void handle_2misc_reciprocal(DisasContext *s, int opcode,
10153 bool is_scalar, bool is_u, bool is_q,
10154 int size, int rn, int rd)
10155{
10156 bool is_double = (size == 3);
10157 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10158
10159 if (is_double) {
10160 TCGv_i64 tcg_op = tcg_temp_new_i64();
10161 TCGv_i64 tcg_res = tcg_temp_new_i64();
10162 int pass;
10163
10164 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10165 read_vec_element(s, tcg_op, rn, pass, MO_64);
10166 switch (opcode) {
10167 case 0x3d:
10168 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
10169 break;
10170 case 0x3f:
10171 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
10172 break;
10173 case 0x7d:
10174 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
10175 break;
10176 default:
10177 g_assert_not_reached();
10178 }
10179 write_vec_element(s, tcg_res, rd, pass, MO_64);
10180 }
10181 tcg_temp_free_i64(tcg_res);
10182 tcg_temp_free_i64(tcg_op);
10183 clear_vec_high(s, !is_scalar, rd);
10184 } else {
10185 TCGv_i32 tcg_op = tcg_temp_new_i32();
10186 TCGv_i32 tcg_res = tcg_temp_new_i32();
10187 int pass, maxpasses;
10188
10189 if (is_scalar) {
10190 maxpasses = 1;
10191 } else {
10192 maxpasses = is_q ? 4 : 2;
10193 }
10194
10195 for (pass = 0; pass < maxpasses; pass++) {
10196 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10197
10198 switch (opcode) {
10199 case 0x3c:
10200 gen_helper_recpe_u32(tcg_res, tcg_op);
10201 break;
10202 case 0x3d:
10203 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
10204 break;
10205 case 0x3f:
10206 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
10207 break;
10208 case 0x7d:
10209 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
10210 break;
10211 default:
10212 g_assert_not_reached();
10213 }
10214
10215 if (is_scalar) {
10216 write_fp_sreg(s, rd, tcg_res);
10217 } else {
10218 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10219 }
10220 }
10221 tcg_temp_free_i32(tcg_res);
10222 tcg_temp_free_i32(tcg_op);
10223 if (!is_scalar) {
10224 clear_vec_high(s, is_q, rd);
10225 }
10226 }
10227 tcg_temp_free_ptr(fpst);
10228}
10229
10230static void handle_2misc_narrow(DisasContext *s, bool scalar,
10231 int opcode, bool u, bool is_q,
10232 int size, int rn, int rd)
10233{
10234
10235
10236
10237 int pass;
10238 TCGv_i32 tcg_res[2];
10239 int destelt = is_q ? 2 : 0;
10240 int passes = scalar ? 1 : 2;
10241
10242 if (scalar) {
10243 tcg_res[1] = tcg_const_i32(0);
10244 }
10245
10246 for (pass = 0; pass < passes; pass++) {
10247 TCGv_i64 tcg_op = tcg_temp_new_i64();
10248 NeonGenNarrowFn *genfn = NULL;
10249 NeonGenNarrowEnvFn *genenvfn = NULL;
10250
10251 if (scalar) {
10252 read_vec_element(s, tcg_op, rn, pass, size + 1);
10253 } else {
10254 read_vec_element(s, tcg_op, rn, pass, MO_64);
10255 }
10256 tcg_res[pass] = tcg_temp_new_i32();
10257
10258 switch (opcode) {
10259 case 0x12:
10260 {
10261 static NeonGenNarrowFn * const xtnfns[3] = {
10262 gen_helper_neon_narrow_u8,
10263 gen_helper_neon_narrow_u16,
10264 tcg_gen_extrl_i64_i32,
10265 };
10266 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
10267 gen_helper_neon_unarrow_sat8,
10268 gen_helper_neon_unarrow_sat16,
10269 gen_helper_neon_unarrow_sat32,
10270 };
10271 if (u) {
10272 genenvfn = sqxtunfns[size];
10273 } else {
10274 genfn = xtnfns[size];
10275 }
10276 break;
10277 }
10278 case 0x14:
10279 {
10280 static NeonGenNarrowEnvFn * const fns[3][2] = {
10281 { gen_helper_neon_narrow_sat_s8,
10282 gen_helper_neon_narrow_sat_u8 },
10283 { gen_helper_neon_narrow_sat_s16,
10284 gen_helper_neon_narrow_sat_u16 },
10285 { gen_helper_neon_narrow_sat_s32,
10286 gen_helper_neon_narrow_sat_u32 },
10287 };
10288 genenvfn = fns[size][u];
10289 break;
10290 }
10291 case 0x16:
10292
10293 if (size == 2) {
10294 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
10295 } else {
10296 TCGv_i32 tcg_lo = tcg_temp_new_i32();
10297 TCGv_i32 tcg_hi = tcg_temp_new_i32();
10298 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10299 TCGv_i32 ahp = get_ahp_flag();
10300
10301 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
10302 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
10303 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
10304 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
10305 tcg_temp_free_i32(tcg_lo);
10306 tcg_temp_free_i32(tcg_hi);
10307 tcg_temp_free_ptr(fpst);
10308 tcg_temp_free_i32(ahp);
10309 }
10310 break;
10311 case 0x56:
10312
10313
10314
10315 assert(size == 2);
10316 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
10317 break;
10318 default:
10319 g_assert_not_reached();
10320 }
10321
10322 if (genfn) {
10323 genfn(tcg_res[pass], tcg_op);
10324 } else if (genenvfn) {
10325 genenvfn(tcg_res[pass], cpu_env, tcg_op);
10326 }
10327
10328 tcg_temp_free_i64(tcg_op);
10329 }
10330
10331 for (pass = 0; pass < 2; pass++) {
10332 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
10333 tcg_temp_free_i32(tcg_res[pass]);
10334 }
10335 clear_vec_high(s, is_q, rd);
10336}
10337
10338
10339static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
10340 bool is_q, int size, int rn, int rd)
10341{
10342 bool is_double = (size == 3);
10343
10344 if (is_double) {
10345 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10346 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10347 int pass;
10348
10349 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10350 read_vec_element(s, tcg_rn, rn, pass, MO_64);
10351 read_vec_element(s, tcg_rd, rd, pass, MO_64);
10352
10353 if (is_u) {
10354 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10355 } else {
10356 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10357 }
10358 write_vec_element(s, tcg_rd, rd, pass, MO_64);
10359 }
10360 tcg_temp_free_i64(tcg_rd);
10361 tcg_temp_free_i64(tcg_rn);
10362 clear_vec_high(s, !is_scalar, rd);
10363 } else {
10364 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10365 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10366 int pass, maxpasses;
10367
10368 if (is_scalar) {
10369 maxpasses = 1;
10370 } else {
10371 maxpasses = is_q ? 4 : 2;
10372 }
10373
10374 for (pass = 0; pass < maxpasses; pass++) {
10375 if (is_scalar) {
10376 read_vec_element_i32(s, tcg_rn, rn, pass, size);
10377 read_vec_element_i32(s, tcg_rd, rd, pass, size);
10378 } else {
10379 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
10380 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10381 }
10382
10383 if (is_u) {
10384 switch (size) {
10385 case 0:
10386 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10387 break;
10388 case 1:
10389 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10390 break;
10391 case 2:
10392 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10393 break;
10394 default:
10395 g_assert_not_reached();
10396 }
10397 } else {
10398 switch (size) {
10399 case 0:
10400 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10401 break;
10402 case 1:
10403 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10404 break;
10405 case 2:
10406 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10407 break;
10408 default:
10409 g_assert_not_reached();
10410 }
10411 }
10412
10413 if (is_scalar) {
10414 TCGv_i64 tcg_zero = tcg_const_i64(0);
10415 write_vec_element(s, tcg_zero, rd, 0, MO_64);
10416 tcg_temp_free_i64(tcg_zero);
10417 }
10418 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10419 }
10420 tcg_temp_free_i32(tcg_rd);
10421 tcg_temp_free_i32(tcg_rn);
10422 clear_vec_high(s, is_q, rd);
10423 }
10424}
10425
10426
10427
10428
10429
10430
10431
10432static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
10433{
10434 int rd = extract32(insn, 0, 5);
10435 int rn = extract32(insn, 5, 5);
10436 int opcode = extract32(insn, 12, 5);
10437 int size = extract32(insn, 22, 2);
10438 bool u = extract32(insn, 29, 1);
10439 bool is_fcvt = false;
10440 int rmode;
10441 TCGv_i32 tcg_rmode;
10442 TCGv_ptr tcg_fpstatus;
10443
10444 switch (opcode) {
10445 case 0x3:
10446 if (!fp_access_check(s)) {
10447 return;
10448 }
10449 handle_2misc_satacc(s, true, u, false, size, rn, rd);
10450 return;
10451 case 0x7:
10452 break;
10453 case 0xa:
10454 if (u) {
10455 unallocated_encoding(s);
10456 return;
10457 }
10458
10459 case 0x8:
10460 case 0x9:
10461 case 0xb:
10462 if (size != 3) {
10463 unallocated_encoding(s);
10464 return;
10465 }
10466 break;
10467 case 0x12:
10468 if (!u) {
10469 unallocated_encoding(s);
10470 return;
10471 }
10472
10473 case 0x14:
10474 if (size == 3) {
10475 unallocated_encoding(s);
10476 return;
10477 }
10478 if (!fp_access_check(s)) {
10479 return;
10480 }
10481 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10482 return;
10483 case 0xc ... 0xf:
10484 case 0x16 ... 0x1d:
10485 case 0x1f:
10486
10487
10488
10489 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10490 size = extract32(size, 0, 1) ? 3 : 2;
10491 switch (opcode) {
10492 case 0x2c:
10493 case 0x2d:
10494 case 0x2e:
10495 case 0x6c:
10496 case 0x6d:
10497 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10498 return;
10499 case 0x1d:
10500 case 0x5d:
10501 {
10502 bool is_signed = (opcode == 0x1d);
10503 if (!fp_access_check(s)) {
10504 return;
10505 }
10506 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10507 return;
10508 }
10509 case 0x3d:
10510 case 0x3f:
10511 case 0x7d:
10512 if (!fp_access_check(s)) {
10513 return;
10514 }
10515 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10516 return;
10517 case 0x1a:
10518 case 0x1b:
10519 case 0x3a:
10520 case 0x3b:
10521 case 0x5a:
10522 case 0x5b:
10523 case 0x7a:
10524 case 0x7b:
10525 is_fcvt = true;
10526 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10527 break;
10528 case 0x1c:
10529 case 0x5c:
10530
10531 is_fcvt = true;
10532 rmode = FPROUNDING_TIEAWAY;
10533 break;
10534 case 0x56:
10535 if (size == 2) {
10536 unallocated_encoding(s);
10537 return;
10538 }
10539 if (!fp_access_check(s)) {
10540 return;
10541 }
10542 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10543 return;
10544 default:
10545 unallocated_encoding(s);
10546 return;
10547 }
10548 break;
10549 default:
10550 unallocated_encoding(s);
10551 return;
10552 }
10553
10554 if (!fp_access_check(s)) {
10555 return;
10556 }
10557
10558 if (is_fcvt) {
10559 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10560 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10561 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10562 } else {
10563 tcg_rmode = NULL;
10564 tcg_fpstatus = NULL;
10565 }
10566
10567 if (size == 3) {
10568 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10569 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10570
10571 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10572 write_fp_dreg(s, rd, tcg_rd);
10573 tcg_temp_free_i64(tcg_rd);
10574 tcg_temp_free_i64(tcg_rn);
10575 } else {
10576 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10577 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10578
10579 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10580
10581 switch (opcode) {
10582 case 0x7:
10583 {
10584 NeonGenOneOpEnvFn *genfn;
10585 static NeonGenOneOpEnvFn * const fns[3][2] = {
10586 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10587 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10588 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10589 };
10590 genfn = fns[size][u];
10591 genfn(tcg_rd, cpu_env, tcg_rn);
10592 break;
10593 }
10594 case 0x1a:
10595 case 0x1b:
10596 case 0x1c:
10597 case 0x3a:
10598 case 0x3b:
10599 {
10600 TCGv_i32 tcg_shift = tcg_const_i32(0);
10601 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10602 tcg_temp_free_i32(tcg_shift);
10603 break;
10604 }
10605 case 0x5a:
10606 case 0x5b:
10607 case 0x5c:
10608 case 0x7a:
10609 case 0x7b:
10610 {
10611 TCGv_i32 tcg_shift = tcg_const_i32(0);
10612 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10613 tcg_temp_free_i32(tcg_shift);
10614 break;
10615 }
10616 default:
10617 g_assert_not_reached();
10618 }
10619
10620 write_fp_sreg(s, rd, tcg_rd);
10621 tcg_temp_free_i32(tcg_rd);
10622 tcg_temp_free_i32(tcg_rn);
10623 }
10624
10625 if (is_fcvt) {
10626 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10627 tcg_temp_free_i32(tcg_rmode);
10628 tcg_temp_free_ptr(tcg_fpstatus);
10629 }
10630}
10631
10632
10633static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10634 int immh, int immb, int opcode, int rn, int rd)
10635{
10636 int size = 32 - clz32(immh) - 1;
10637 int immhb = immh << 3 | immb;
10638 int shift = 2 * (8 << size) - immhb;
10639 GVecGen2iFn *gvec_fn;
10640
10641 if (extract32(immh, 3, 1) && !is_q) {
10642 unallocated_encoding(s);
10643 return;
10644 }
10645 tcg_debug_assert(size <= 3);
10646
10647 if (!fp_access_check(s)) {
10648 return;
10649 }
10650
10651 switch (opcode) {
10652 case 0x02:
10653 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10654 break;
10655
10656 case 0x08:
10657 gvec_fn = gen_gvec_sri;
10658 break;
10659
10660 case 0x00:
10661 if (is_u) {
10662 if (shift == 8 << size) {
10663
10664 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10665 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10666 return;
10667 }
10668 gvec_fn = tcg_gen_gvec_shri;
10669 } else {
10670
10671 if (shift == 8 << size) {
10672 shift -= 1;
10673 }
10674 gvec_fn = tcg_gen_gvec_sari;
10675 }
10676 break;
10677
10678 case 0x04:
10679 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10680 break;
10681
10682 case 0x06:
10683 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10684 break;
10685
10686 default:
10687 g_assert_not_reached();
10688 }
10689
10690 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10691}
10692
10693
10694static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10695 int immh, int immb, int opcode, int rn, int rd)
10696{
10697 int size = 32 - clz32(immh) - 1;
10698 int immhb = immh << 3 | immb;
10699 int shift = immhb - (8 << size);
10700
10701
10702 assert(size >= 0 && size <= 3);
10703
10704 if (extract32(immh, 3, 1) && !is_q) {
10705 unallocated_encoding(s);
10706 return;
10707 }
10708
10709 if (!fp_access_check(s)) {
10710 return;
10711 }
10712
10713 if (insert) {
10714 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10715 } else {
10716 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10717 }
10718}
10719
10720
10721static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10722 int immh, int immb, int opcode, int rn, int rd)
10723{
10724 int size = 32 - clz32(immh) - 1;
10725 int immhb = immh << 3 | immb;
10726 int shift = immhb - (8 << size);
10727 int dsize = 64;
10728 int esize = 8 << size;
10729 int elements = dsize/esize;
10730 TCGv_i64 tcg_rn = new_tmp_a64(s);
10731 TCGv_i64 tcg_rd = new_tmp_a64(s);
10732 int i;
10733
10734 if (size >= 3) {
10735 unallocated_encoding(s);
10736 return;
10737 }
10738
10739 if (!fp_access_check(s)) {
10740 return;
10741 }
10742
10743
10744
10745
10746
10747 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10748
10749 for (i = 0; i < elements; i++) {
10750 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10751 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10752 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10753 write_vec_element(s, tcg_rd, rd, i, size + 1);
10754 }
10755}
10756
10757
10758static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10759 int immh, int immb, int opcode, int rn, int rd)
10760{
10761 int immhb = immh << 3 | immb;
10762 int size = 32 - clz32(immh) - 1;
10763 int dsize = 64;
10764 int esize = 8 << size;
10765 int elements = dsize/esize;
10766 int shift = (2 * esize) - immhb;
10767 bool round = extract32(opcode, 0, 1);
10768 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10769 TCGv_i64 tcg_round;
10770 int i;
10771
10772 if (extract32(immh, 3, 1)) {
10773 unallocated_encoding(s);
10774 return;
10775 }
10776
10777 if (!fp_access_check(s)) {
10778 return;
10779 }
10780
10781 tcg_rn = tcg_temp_new_i64();
10782 tcg_rd = tcg_temp_new_i64();
10783 tcg_final = tcg_temp_new_i64();
10784 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10785
10786 if (round) {
10787 uint64_t round_const = 1ULL << (shift - 1);
10788 tcg_round = tcg_const_i64(round_const);
10789 } else {
10790 tcg_round = NULL;
10791 }
10792
10793 for (i = 0; i < elements; i++) {
10794 read_vec_element(s, tcg_rn, rn, i, size+1);
10795 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10796 false, true, size+1, shift);
10797
10798 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10799 }
10800
10801 if (!is_q) {
10802 write_vec_element(s, tcg_final, rd, 0, MO_64);
10803 } else {
10804 write_vec_element(s, tcg_final, rd, 1, MO_64);
10805 }
10806 if (round) {
10807 tcg_temp_free_i64(tcg_round);
10808 }
10809 tcg_temp_free_i64(tcg_rn);
10810 tcg_temp_free_i64(tcg_rd);
10811 tcg_temp_free_i64(tcg_final);
10812
10813 clear_vec_high(s, is_q, rd);
10814}
10815
10816
10817
10818
10819
10820
10821
10822
10823static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10824{
10825 int rd = extract32(insn, 0, 5);
10826 int rn = extract32(insn, 5, 5);
10827 int opcode = extract32(insn, 11, 5);
10828 int immb = extract32(insn, 16, 3);
10829 int immh = extract32(insn, 19, 4);
10830 bool is_u = extract32(insn, 29, 1);
10831 bool is_q = extract32(insn, 30, 1);
10832
10833
10834 assert(immh != 0);
10835
10836 switch (opcode) {
10837 case 0x08:
10838 if (!is_u) {
10839 unallocated_encoding(s);
10840 return;
10841 }
10842
10843 case 0x00:
10844 case 0x02:
10845 case 0x04:
10846 case 0x06:
10847 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10848 break;
10849 case 0x0a:
10850 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10851 break;
10852 case 0x10:
10853 case 0x11:
10854 if (is_u) {
10855 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10856 opcode, rn, rd);
10857 } else {
10858 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10859 }
10860 break;
10861 case 0x12:
10862 case 0x13:
10863 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10864 opcode, rn, rd);
10865 break;
10866 case 0x14:
10867 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10868 break;
10869 case 0x1c:
10870 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10871 opcode, rn, rd);
10872 break;
10873 case 0xc:
10874 if (!is_u) {
10875 unallocated_encoding(s);
10876 return;
10877 }
10878 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10879 break;
10880 case 0xe:
10881 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10882 break;
10883 case 0x1f:
10884 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10885 return;
10886 default:
10887 unallocated_encoding(s);
10888 return;
10889 }
10890}
10891
10892
10893
10894
10895static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10896 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10897{
10898 static NeonGenTwo64OpFn * const fns[3][2] = {
10899 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10900 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10901 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10902 };
10903 NeonGenTwo64OpFn *genfn;
10904 assert(size < 3);
10905
10906 genfn = fns[size][is_sub];
10907 genfn(tcg_res, tcg_op1, tcg_op2);
10908}
10909
10910static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10911 int opcode, int rd, int rn, int rm)
10912{
10913
10914 TCGv_i64 tcg_res[2];
10915 int pass, accop;
10916
10917 tcg_res[0] = tcg_temp_new_i64();
10918 tcg_res[1] = tcg_temp_new_i64();
10919
10920
10921
10922
10923 switch (opcode) {
10924 case 5:
10925 case 8:
10926 case 9:
10927 accop = 1;
10928 break;
10929 case 10:
10930 case 11:
10931 accop = -1;
10932 break;
10933 default:
10934 accop = 0;
10935 break;
10936 }
10937
10938 if (accop != 0) {
10939 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10940 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10941 }
10942
10943
10944
10945
10946 if (size == 2) {
10947 for (pass = 0; pass < 2; pass++) {
10948 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10949 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10950 TCGv_i64 tcg_passres;
10951 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10952
10953 int elt = pass + is_q * 2;
10954
10955 read_vec_element(s, tcg_op1, rn, elt, memop);
10956 read_vec_element(s, tcg_op2, rm, elt, memop);
10957
10958 if (accop == 0) {
10959 tcg_passres = tcg_res[pass];
10960 } else {
10961 tcg_passres = tcg_temp_new_i64();
10962 }
10963
10964 switch (opcode) {
10965 case 0:
10966 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10967 break;
10968 case 2:
10969 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10970 break;
10971 case 5:
10972 case 7:
10973 {
10974 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10975 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10976
10977 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10978 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10979 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10980 tcg_passres,
10981 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10982 tcg_temp_free_i64(tcg_tmp1);
10983 tcg_temp_free_i64(tcg_tmp2);
10984 break;
10985 }
10986 case 8:
10987 case 10:
10988 case 12:
10989 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10990 break;
10991 case 9:
10992 case 11:
10993 case 13:
10994 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10995 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10996 tcg_passres, tcg_passres);
10997 break;
10998 default:
10999 g_assert_not_reached();
11000 }
11001
11002 if (opcode == 9 || opcode == 11) {
11003
11004 if (accop < 0) {
11005 tcg_gen_neg_i64(tcg_passres, tcg_passres);
11006 }
11007 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
11008 tcg_res[pass], tcg_passres);
11009 } else if (accop > 0) {
11010 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
11011 } else if (accop < 0) {
11012 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
11013 }
11014
11015 if (accop != 0) {
11016 tcg_temp_free_i64(tcg_passres);
11017 }
11018
11019 tcg_temp_free_i64(tcg_op1);
11020 tcg_temp_free_i64(tcg_op2);
11021 }
11022 } else {
11023
11024 for (pass = 0; pass < 2; pass++) {
11025 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11026 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11027 TCGv_i64 tcg_passres;
11028 int elt = pass + is_q * 2;
11029
11030 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
11031 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
11032
11033 if (accop == 0) {
11034 tcg_passres = tcg_res[pass];
11035 } else {
11036 tcg_passres = tcg_temp_new_i64();
11037 }
11038
11039 switch (opcode) {
11040 case 0:
11041 case 2:
11042 {
11043 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
11044 static NeonGenWidenFn * const widenfns[2][2] = {
11045 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11046 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11047 };
11048 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11049
11050 widenfn(tcg_op2_64, tcg_op2);
11051 widenfn(tcg_passres, tcg_op1);
11052 gen_neon_addl(size, (opcode == 2), tcg_passres,
11053 tcg_passres, tcg_op2_64);
11054 tcg_temp_free_i64(tcg_op2_64);
11055 break;
11056 }
11057 case 5:
11058 case 7:
11059 if (size == 0) {
11060 if (is_u) {
11061 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
11062 } else {
11063 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
11064 }
11065 } else {
11066 if (is_u) {
11067 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
11068 } else {
11069 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
11070 }
11071 }
11072 break;
11073 case 8:
11074 case 10:
11075 case 12:
11076 if (size == 0) {
11077 if (is_u) {
11078 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
11079 } else {
11080 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
11081 }
11082 } else {
11083 if (is_u) {
11084 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
11085 } else {
11086 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11087 }
11088 }
11089 break;
11090 case 9:
11091 case 11:
11092 case 13:
11093 assert(size == 1);
11094 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11095 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
11096 tcg_passres, tcg_passres);
11097 break;
11098 default:
11099 g_assert_not_reached();
11100 }
11101 tcg_temp_free_i32(tcg_op1);
11102 tcg_temp_free_i32(tcg_op2);
11103
11104 if (accop != 0) {
11105 if (opcode == 9 || opcode == 11) {
11106
11107 if (accop < 0) {
11108 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
11109 }
11110 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
11111 tcg_res[pass],
11112 tcg_passres);
11113 } else {
11114 gen_neon_addl(size, (accop < 0), tcg_res[pass],
11115 tcg_res[pass], tcg_passres);
11116 }
11117 tcg_temp_free_i64(tcg_passres);
11118 }
11119 }
11120 }
11121
11122 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
11123 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
11124 tcg_temp_free_i64(tcg_res[0]);
11125 tcg_temp_free_i64(tcg_res[1]);
11126}
11127
11128static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
11129 int opcode, int rd, int rn, int rm)
11130{
11131 TCGv_i64 tcg_res[2];
11132 int part = is_q ? 2 : 0;
11133 int pass;
11134
11135 for (pass = 0; pass < 2; pass++) {
11136 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11137 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11138 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
11139 static NeonGenWidenFn * const widenfns[3][2] = {
11140 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11141 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11142 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
11143 };
11144 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11145
11146 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11147 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
11148 widenfn(tcg_op2_wide, tcg_op2);
11149 tcg_temp_free_i32(tcg_op2);
11150 tcg_res[pass] = tcg_temp_new_i64();
11151 gen_neon_addl(size, (opcode == 3),
11152 tcg_res[pass], tcg_op1, tcg_op2_wide);
11153 tcg_temp_free_i64(tcg_op1);
11154 tcg_temp_free_i64(tcg_op2_wide);
11155 }
11156
11157 for (pass = 0; pass < 2; pass++) {
11158 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11159 tcg_temp_free_i64(tcg_res[pass]);
11160 }
11161}
11162
11163static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
11164{
11165 tcg_gen_addi_i64(in, in, 1U << 31);
11166 tcg_gen_extrh_i64_i32(res, in);
11167}
11168
11169static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
11170 int opcode, int rd, int rn, int rm)
11171{
11172 TCGv_i32 tcg_res[2];
11173 int part = is_q ? 2 : 0;
11174 int pass;
11175
11176 for (pass = 0; pass < 2; pass++) {
11177 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11178 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11179 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
11180 static NeonGenNarrowFn * const narrowfns[3][2] = {
11181 { gen_helper_neon_narrow_high_u8,
11182 gen_helper_neon_narrow_round_high_u8 },
11183 { gen_helper_neon_narrow_high_u16,
11184 gen_helper_neon_narrow_round_high_u16 },
11185 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
11186 };
11187 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
11188
11189 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11190 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11191
11192 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
11193
11194 tcg_temp_free_i64(tcg_op1);
11195 tcg_temp_free_i64(tcg_op2);
11196
11197 tcg_res[pass] = tcg_temp_new_i32();
11198 gennarrow(tcg_res[pass], tcg_wideres);
11199 tcg_temp_free_i64(tcg_wideres);
11200 }
11201
11202 for (pass = 0; pass < 2; pass++) {
11203 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
11204 tcg_temp_free_i32(tcg_res[pass]);
11205 }
11206 clear_vec_high(s, is_q, rd);
11207}
11208
11209
11210
11211
11212
11213
11214
11215static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
11216{
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227 int is_q = extract32(insn, 30, 1);
11228 int is_u = extract32(insn, 29, 1);
11229 int size = extract32(insn, 22, 2);
11230 int opcode = extract32(insn, 12, 4);
11231 int rm = extract32(insn, 16, 5);
11232 int rn = extract32(insn, 5, 5);
11233 int rd = extract32(insn, 0, 5);
11234
11235 switch (opcode) {
11236 case 1:
11237 case 3:
11238
11239 if (size == 3) {
11240 unallocated_encoding(s);
11241 return;
11242 }
11243 if (!fp_access_check(s)) {
11244 return;
11245 }
11246 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
11247 break;
11248 case 4:
11249 case 6:
11250
11251 if (size == 3) {
11252 unallocated_encoding(s);
11253 return;
11254 }
11255 if (!fp_access_check(s)) {
11256 return;
11257 }
11258 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
11259 break;
11260 case 14:
11261 if (is_u) {
11262 unallocated_encoding(s);
11263 return;
11264 }
11265 switch (size) {
11266 case 0:
11267 if (!fp_access_check(s)) {
11268 return;
11269 }
11270
11271 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11272 gen_helper_neon_pmull_h);
11273 break;
11274
11275 case 3:
11276 if (!dc_isar_feature(aa64_pmull, s)) {
11277 unallocated_encoding(s);
11278 return;
11279 }
11280 if (!fp_access_check(s)) {
11281 return;
11282 }
11283
11284 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11285 gen_helper_gvec_pmull_q);
11286 break;
11287
11288 default:
11289 unallocated_encoding(s);
11290 break;
11291 }
11292 return;
11293 case 9:
11294 case 11:
11295 case 13:
11296 if (is_u || size == 0) {
11297 unallocated_encoding(s);
11298 return;
11299 }
11300
11301 case 0:
11302 case 2:
11303 case 5:
11304 case 7:
11305 case 8:
11306 case 10:
11307 case 12:
11308
11309 if (size == 3) {
11310 unallocated_encoding(s);
11311 return;
11312 }
11313 if (!fp_access_check(s)) {
11314 return;
11315 }
11316
11317 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
11318 break;
11319 default:
11320
11321 unallocated_encoding(s);
11322 break;
11323 }
11324}
11325
11326
11327static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
11328{
11329 int rd = extract32(insn, 0, 5);
11330 int rn = extract32(insn, 5, 5);
11331 int rm = extract32(insn, 16, 5);
11332 int size = extract32(insn, 22, 2);
11333 bool is_u = extract32(insn, 29, 1);
11334 bool is_q = extract32(insn, 30, 1);
11335
11336 if (!fp_access_check(s)) {
11337 return;
11338 }
11339
11340 switch (size + 4 * is_u) {
11341 case 0:
11342 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
11343 return;
11344 case 1:
11345 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
11346 return;
11347 case 2:
11348 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
11349 return;
11350 case 3:
11351 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
11352 return;
11353 case 4:
11354 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
11355 return;
11356
11357 case 5:
11358 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
11359 return;
11360 case 6:
11361 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
11362 return;
11363 case 7:
11364 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
11365 return;
11366
11367 default:
11368 g_assert_not_reached();
11369 }
11370}
11371
11372
11373
11374
11375
11376
11377static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
11378 int size, int rn, int rm, int rd)
11379{
11380 TCGv_ptr fpst;
11381 int pass;
11382
11383
11384 if (opcode >= 0x58) {
11385 fpst = fpstatus_ptr(FPST_FPCR);
11386 } else {
11387 fpst = NULL;
11388 }
11389
11390 if (!fp_access_check(s)) {
11391 return;
11392 }
11393
11394
11395
11396
11397 if (size == 3) {
11398 TCGv_i64 tcg_res[2];
11399
11400 for (pass = 0; pass < 2; pass++) {
11401 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11402 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11403 int passreg = (pass == 0) ? rn : rm;
11404
11405 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
11406 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
11407 tcg_res[pass] = tcg_temp_new_i64();
11408
11409 switch (opcode) {
11410 case 0x17:
11411 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11412 break;
11413 case 0x58:
11414 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11415 break;
11416 case 0x5a:
11417 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11418 break;
11419 case 0x5e:
11420 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11421 break;
11422 case 0x78:
11423 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11424 break;
11425 case 0x7e:
11426 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11427 break;
11428 default:
11429 g_assert_not_reached();
11430 }
11431
11432 tcg_temp_free_i64(tcg_op1);
11433 tcg_temp_free_i64(tcg_op2);
11434 }
11435
11436 for (pass = 0; pass < 2; pass++) {
11437 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11438 tcg_temp_free_i64(tcg_res[pass]);
11439 }
11440 } else {
11441 int maxpass = is_q ? 4 : 2;
11442 TCGv_i32 tcg_res[4];
11443
11444 for (pass = 0; pass < maxpass; pass++) {
11445 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11446 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11447 NeonGenTwoOpFn *genfn = NULL;
11448 int passreg = pass < (maxpass / 2) ? rn : rm;
11449 int passelt = (is_q && (pass & 1)) ? 2 : 0;
11450
11451 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
11452 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
11453 tcg_res[pass] = tcg_temp_new_i32();
11454
11455 switch (opcode) {
11456 case 0x17:
11457 {
11458 static NeonGenTwoOpFn * const fns[3] = {
11459 gen_helper_neon_padd_u8,
11460 gen_helper_neon_padd_u16,
11461 tcg_gen_add_i32,
11462 };
11463 genfn = fns[size];
11464 break;
11465 }
11466 case 0x14:
11467 {
11468 static NeonGenTwoOpFn * const fns[3][2] = {
11469 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
11470 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
11471 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11472 };
11473 genfn = fns[size][u];
11474 break;
11475 }
11476 case 0x15:
11477 {
11478 static NeonGenTwoOpFn * const fns[3][2] = {
11479 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11480 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11481 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11482 };
11483 genfn = fns[size][u];
11484 break;
11485 }
11486
11487 case 0x58:
11488 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11489 break;
11490 case 0x5a:
11491 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11492 break;
11493 case 0x5e:
11494 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11495 break;
11496 case 0x78:
11497 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11498 break;
11499 case 0x7e:
11500 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11501 break;
11502 default:
11503 g_assert_not_reached();
11504 }
11505
11506
11507 if (genfn) {
11508 genfn(tcg_res[pass], tcg_op1, tcg_op2);
11509 }
11510
11511 tcg_temp_free_i32(tcg_op1);
11512 tcg_temp_free_i32(tcg_op2);
11513 }
11514
11515 for (pass = 0; pass < maxpass; pass++) {
11516 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11517 tcg_temp_free_i32(tcg_res[pass]);
11518 }
11519 clear_vec_high(s, is_q, rd);
11520 }
11521
11522 if (fpst) {
11523 tcg_temp_free_ptr(fpst);
11524 }
11525}
11526
11527
11528static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11529{
11530
11531
11532
11533
11534 int fpopcode = extract32(insn, 11, 5)
11535 | (extract32(insn, 23, 1) << 5)
11536 | (extract32(insn, 29, 1) << 6);
11537 int is_q = extract32(insn, 30, 1);
11538 int size = extract32(insn, 22, 1);
11539 int rm = extract32(insn, 16, 5);
11540 int rn = extract32(insn, 5, 5);
11541 int rd = extract32(insn, 0, 5);
11542
11543 int datasize = is_q ? 128 : 64;
11544 int esize = 32 << size;
11545 int elements = datasize / esize;
11546
11547 if (size == 1 && !is_q) {
11548 unallocated_encoding(s);
11549 return;
11550 }
11551
11552 switch (fpopcode) {
11553 case 0x58:
11554 case 0x5a:
11555 case 0x5e:
11556 case 0x78:
11557 case 0x7e:
11558 if (size && !is_q) {
11559 unallocated_encoding(s);
11560 return;
11561 }
11562 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11563 rn, rm, rd);
11564 return;
11565 case 0x1b:
11566 case 0x1f:
11567 case 0x3f:
11568 case 0x5d:
11569 case 0x7d:
11570 case 0x19:
11571 case 0x39:
11572 case 0x18:
11573 case 0x1a:
11574 case 0x1c:
11575 case 0x1e:
11576 case 0x38:
11577 case 0x3a:
11578 case 0x3e:
11579 case 0x5b:
11580 case 0x5c:
11581 case 0x5f:
11582 case 0x7a:
11583 case 0x7c:
11584 if (!fp_access_check(s)) {
11585 return;
11586 }
11587 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11588 return;
11589
11590 case 0x1d:
11591 case 0x3d:
11592 case 0x59:
11593 case 0x79:
11594 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11595 unallocated_encoding(s);
11596 return;
11597 }
11598 if (fp_access_check(s)) {
11599 int is_s = extract32(insn, 23, 1);
11600 int is_2 = extract32(insn, 29, 1);
11601 int data = (is_2 << 1) | is_s;
11602 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11603 vec_full_reg_offset(s, rn),
11604 vec_full_reg_offset(s, rm), cpu_env,
11605 is_q ? 16 : 8, vec_full_reg_size(s),
11606 data, gen_helper_gvec_fmlal_a64);
11607 }
11608 return;
11609
11610 default:
11611 unallocated_encoding(s);
11612 return;
11613 }
11614}
11615
11616
11617static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11618{
11619 int is_q = extract32(insn, 30, 1);
11620 int u = extract32(insn, 29, 1);
11621 int size = extract32(insn, 22, 2);
11622 int opcode = extract32(insn, 11, 5);
11623 int rm = extract32(insn, 16, 5);
11624 int rn = extract32(insn, 5, 5);
11625 int rd = extract32(insn, 0, 5);
11626 int pass;
11627 TCGCond cond;
11628
11629 switch (opcode) {
11630 case 0x13:
11631 if (u && size != 0) {
11632 unallocated_encoding(s);
11633 return;
11634 }
11635
11636 case 0x0:
11637 case 0x2:
11638 case 0x4:
11639 case 0xc:
11640 case 0xd:
11641 case 0xe:
11642 case 0xf:
11643 case 0x12:
11644 if (size == 3) {
11645 unallocated_encoding(s);
11646 return;
11647 }
11648 break;
11649 case 0x16:
11650 if (size == 0 || size == 3) {
11651 unallocated_encoding(s);
11652 return;
11653 }
11654 break;
11655 default:
11656 if (size == 3 && !is_q) {
11657 unallocated_encoding(s);
11658 return;
11659 }
11660 break;
11661 }
11662
11663 if (!fp_access_check(s)) {
11664 return;
11665 }
11666
11667 switch (opcode) {
11668 case 0x01:
11669 if (u) {
11670 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11671 } else {
11672 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11673 }
11674 return;
11675 case 0x05:
11676 if (u) {
11677 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11678 } else {
11679 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11680 }
11681 return;
11682 case 0x08:
11683 if (u) {
11684 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11685 } else {
11686 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11687 }
11688 return;
11689 case 0x0c:
11690 if (u) {
11691 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11692 } else {
11693 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11694 }
11695 return;
11696 case 0x0d:
11697 if (u) {
11698 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11699 } else {
11700 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11701 }
11702 return;
11703 case 0xe:
11704 if (u) {
11705 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11706 } else {
11707 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11708 }
11709 return;
11710 case 0xf:
11711 if (u) {
11712 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11713 } else {
11714 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11715 }
11716 return;
11717 case 0x10:
11718 if (u) {
11719 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11720 } else {
11721 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11722 }
11723 return;
11724 case 0x13:
11725 if (!u) {
11726 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11727 } else {
11728 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11729 }
11730 return;
11731 case 0x12:
11732 if (u) {
11733 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11734 } else {
11735 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11736 }
11737 return;
11738 case 0x16:
11739 {
11740 static gen_helper_gvec_3_ptr * const fns[2][2] = {
11741 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11742 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11743 };
11744 gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11745 }
11746 return;
11747 case 0x11:
11748 if (!u) {
11749 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11750 return;
11751 }
11752
11753 cond = TCG_COND_EQ;
11754 goto do_gvec_cmp;
11755 case 0x06:
11756 cond = u ? TCG_COND_GTU : TCG_COND_GT;
11757 goto do_gvec_cmp;
11758 case 0x07:
11759 cond = u ? TCG_COND_GEU : TCG_COND_GE;
11760 do_gvec_cmp:
11761 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11762 vec_full_reg_offset(s, rn),
11763 vec_full_reg_offset(s, rm),
11764 is_q ? 16 : 8, vec_full_reg_size(s));
11765 return;
11766 }
11767
11768 if (size == 3) {
11769 assert(is_q);
11770 for (pass = 0; pass < 2; pass++) {
11771 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11772 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11773 TCGv_i64 tcg_res = tcg_temp_new_i64();
11774
11775 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11776 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11777
11778 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11779
11780 write_vec_element(s, tcg_res, rd, pass, MO_64);
11781
11782 tcg_temp_free_i64(tcg_res);
11783 tcg_temp_free_i64(tcg_op1);
11784 tcg_temp_free_i64(tcg_op2);
11785 }
11786 } else {
11787 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11788 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11789 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11790 TCGv_i32 tcg_res = tcg_temp_new_i32();
11791 NeonGenTwoOpFn *genfn = NULL;
11792 NeonGenTwoOpEnvFn *genenvfn = NULL;
11793
11794 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11795 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11796
11797 switch (opcode) {
11798 case 0x0:
11799 {
11800 static NeonGenTwoOpFn * const fns[3][2] = {
11801 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11802 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11803 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11804 };
11805 genfn = fns[size][u];
11806 break;
11807 }
11808 case 0x2:
11809 {
11810 static NeonGenTwoOpFn * const fns[3][2] = {
11811 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11812 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11813 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11814 };
11815 genfn = fns[size][u];
11816 break;
11817 }
11818 case 0x4:
11819 {
11820 static NeonGenTwoOpFn * const fns[3][2] = {
11821 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11822 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11823 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11824 };
11825 genfn = fns[size][u];
11826 break;
11827 }
11828 case 0x9:
11829 {
11830 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11831 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11832 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11833 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11834 };
11835 genenvfn = fns[size][u];
11836 break;
11837 }
11838 case 0xa:
11839 {
11840 static NeonGenTwoOpFn * const fns[3][2] = {
11841 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11842 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11843 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11844 };
11845 genfn = fns[size][u];
11846 break;
11847 }
11848 case 0xb:
11849 {
11850 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11851 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11852 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11853 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11854 };
11855 genenvfn = fns[size][u];
11856 break;
11857 }
11858 default:
11859 g_assert_not_reached();
11860 }
11861
11862 if (genenvfn) {
11863 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11864 } else {
11865 genfn(tcg_res, tcg_op1, tcg_op2);
11866 }
11867
11868 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11869
11870 tcg_temp_free_i32(tcg_res);
11871 tcg_temp_free_i32(tcg_op1);
11872 tcg_temp_free_i32(tcg_op2);
11873 }
11874 }
11875 clear_vec_high(s, is_q, rd);
11876}
11877
11878
11879
11880
11881
11882
11883
11884static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11885{
11886 int opcode = extract32(insn, 11, 5);
11887
11888 switch (opcode) {
11889 case 0x3:
11890 disas_simd_3same_logic(s, insn);
11891 break;
11892 case 0x17:
11893 case 0x14:
11894 case 0x15:
11895 {
11896
11897 int is_q = extract32(insn, 30, 1);
11898 int u = extract32(insn, 29, 1);
11899 int size = extract32(insn, 22, 2);
11900 int rm = extract32(insn, 16, 5);
11901 int rn = extract32(insn, 5, 5);
11902 int rd = extract32(insn, 0, 5);
11903 if (opcode == 0x17) {
11904 if (u || (size == 3 && !is_q)) {
11905 unallocated_encoding(s);
11906 return;
11907 }
11908 } else {
11909 if (size == 3) {
11910 unallocated_encoding(s);
11911 return;
11912 }
11913 }
11914 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11915 break;
11916 }
11917 case 0x18 ... 0x31:
11918
11919 disas_simd_3same_float(s, insn);
11920 break;
11921 default:
11922 disas_simd_3same_int(s, insn);
11923 break;
11924 }
11925}
11926
11927
11928
11929
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11940{
11941 int opcode, fpopcode;
11942 int is_q, u, a, rm, rn, rd;
11943 int datasize, elements;
11944 int pass;
11945 TCGv_ptr fpst;
11946 bool pairwise = false;
11947
11948 if (!dc_isar_feature(aa64_fp16, s)) {
11949 unallocated_encoding(s);
11950 return;
11951 }
11952
11953 if (!fp_access_check(s)) {
11954 return;
11955 }
11956
11957
11958
11959
11960 opcode = extract32(insn, 11, 3);
11961 u = extract32(insn, 29, 1);
11962 a = extract32(insn, 23, 1);
11963 is_q = extract32(insn, 30, 1);
11964 rm = extract32(insn, 16, 5);
11965 rn = extract32(insn, 5, 5);
11966 rd = extract32(insn, 0, 5);
11967
11968 fpopcode = opcode | (a << 3) | (u << 4);
11969 datasize = is_q ? 128 : 64;
11970 elements = datasize / 16;
11971
11972 switch (fpopcode) {
11973 case 0x10:
11974 case 0x12:
11975 case 0x16:
11976 case 0x18:
11977 case 0x1e:
11978 pairwise = true;
11979 break;
11980 }
11981
11982 fpst = fpstatus_ptr(FPST_FPCR_F16);
11983
11984 if (pairwise) {
11985 int maxpass = is_q ? 8 : 4;
11986 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11987 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11988 TCGv_i32 tcg_res[8];
11989
11990 for (pass = 0; pass < maxpass; pass++) {
11991 int passreg = pass < (maxpass / 2) ? rn : rm;
11992 int passelt = (pass << 1) & (maxpass - 1);
11993
11994 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11995 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11996 tcg_res[pass] = tcg_temp_new_i32();
11997
11998 switch (fpopcode) {
11999 case 0x10:
12000 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
12001 fpst);
12002 break;
12003 case 0x12:
12004 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12005 break;
12006 case 0x16:
12007 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12008 break;
12009 case 0x18:
12010 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
12011 fpst);
12012 break;
12013 case 0x1e:
12014 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
12015 break;
12016 default:
12017 g_assert_not_reached();
12018 }
12019 }
12020
12021 for (pass = 0; pass < maxpass; pass++) {
12022 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
12023 tcg_temp_free_i32(tcg_res[pass]);
12024 }
12025
12026 tcg_temp_free_i32(tcg_op1);
12027 tcg_temp_free_i32(tcg_op2);
12028
12029 } else {
12030 for (pass = 0; pass < elements; pass++) {
12031 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
12032 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
12033 TCGv_i32 tcg_res = tcg_temp_new_i32();
12034
12035 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
12036 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
12037
12038 switch (fpopcode) {
12039 case 0x0:
12040 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12041 break;
12042 case 0x1:
12043 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12044 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12045 fpst);
12046 break;
12047 case 0x2:
12048 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
12049 break;
12050 case 0x3:
12051 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
12052 break;
12053 case 0x4:
12054 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12055 break;
12056 case 0x6:
12057 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
12058 break;
12059 case 0x7:
12060 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12061 break;
12062 case 0x8:
12063 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12064 break;
12065 case 0x9:
12066
12067 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
12068 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12069 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12070 fpst);
12071 break;
12072 case 0xa:
12073 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12074 break;
12075 case 0xe:
12076 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
12077 break;
12078 case 0xf:
12079 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12080 break;
12081 case 0x13:
12082 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
12083 break;
12084 case 0x14:
12085 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12086 break;
12087 case 0x15:
12088 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12089 break;
12090 case 0x17:
12091 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
12092 break;
12093 case 0x1a:
12094 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12095 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
12096 break;
12097 case 0x1c:
12098 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12099 break;
12100 case 0x1d:
12101 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12102 break;
12103 default:
12104 fprintf(stderr, "%s: insn 0x%04x, fpop 0x%2x @ 0x%" PRIx64 "\n",
12105 __func__, insn, fpopcode, s->pc_curr);
12106 g_assert_not_reached();
12107 }
12108
12109 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12110 tcg_temp_free_i32(tcg_res);
12111 tcg_temp_free_i32(tcg_op1);
12112 tcg_temp_free_i32(tcg_op2);
12113 }
12114 }
12115
12116 tcg_temp_free_ptr(fpst);
12117
12118 clear_vec_high(s, is_q, rd);
12119}
12120
12121
12122
12123
12124
12125
12126
12127static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
12128{
12129 int rd = extract32(insn, 0, 5);
12130 int rn = extract32(insn, 5, 5);
12131 int opcode = extract32(insn, 11, 4);
12132 int rm = extract32(insn, 16, 5);
12133 int size = extract32(insn, 22, 2);
12134 bool u = extract32(insn, 29, 1);
12135 bool is_q = extract32(insn, 30, 1);
12136 bool feature;
12137 int rot;
12138
12139 switch (u * 16 + opcode) {
12140 case 0x10:
12141 case 0x11:
12142 if (size != 1 && size != 2) {
12143 unallocated_encoding(s);
12144 return;
12145 }
12146 feature = dc_isar_feature(aa64_rdm, s);
12147 break;
12148 case 0x02:
12149 case 0x12:
12150 if (size != MO_32) {
12151 unallocated_encoding(s);
12152 return;
12153 }
12154 feature = dc_isar_feature(aa64_dp, s);
12155 break;
12156 case 0x18:
12157 case 0x19:
12158 case 0x1a:
12159 case 0x1b:
12160 case 0x1c:
12161 case 0x1e:
12162 if (size == 0
12163 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
12164 || (size == 3 && !is_q)) {
12165 unallocated_encoding(s);
12166 return;
12167 }
12168 feature = dc_isar_feature(aa64_fcma, s);
12169 break;
12170 default:
12171 unallocated_encoding(s);
12172 return;
12173 }
12174 if (!feature) {
12175 unallocated_encoding(s);
12176 return;
12177 }
12178 if (!fp_access_check(s)) {
12179 return;
12180 }
12181
12182 switch (opcode) {
12183 case 0x0:
12184 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
12185 return;
12186
12187 case 0x1:
12188 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
12189 return;
12190
12191 case 0x2:
12192 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0,
12193 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
12194 return;
12195
12196 case 0x8:
12197 case 0x9:
12198 case 0xa:
12199 case 0xb:
12200 rot = extract32(opcode, 0, 2);
12201 switch (size) {
12202 case 1:
12203 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
12204 gen_helper_gvec_fcmlah);
12205 break;
12206 case 2:
12207 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
12208 gen_helper_gvec_fcmlas);
12209 break;
12210 case 3:
12211 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
12212 gen_helper_gvec_fcmlad);
12213 break;
12214 default:
12215 g_assert_not_reached();
12216 }
12217 return;
12218
12219 case 0xc:
12220 case 0xe:
12221 rot = extract32(opcode, 1, 1);
12222 switch (size) {
12223 case 1:
12224 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12225 gen_helper_gvec_fcaddh);
12226 break;
12227 case 2:
12228 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12229 gen_helper_gvec_fcadds);
12230 break;
12231 case 3:
12232 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12233 gen_helper_gvec_fcaddd);
12234 break;
12235 default:
12236 g_assert_not_reached();
12237 }
12238 return;
12239
12240 default:
12241 g_assert_not_reached();
12242 }
12243}
12244
12245static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
12246 int size, int rn, int rd)
12247{
12248
12249
12250
12251
12252 int pass;
12253
12254 if (size == 3) {
12255
12256 TCGv_i64 tcg_res[2];
12257 int srcelt = is_q ? 2 : 0;
12258
12259 for (pass = 0; pass < 2; pass++) {
12260 TCGv_i32 tcg_op = tcg_temp_new_i32();
12261 tcg_res[pass] = tcg_temp_new_i64();
12262
12263 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
12264 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
12265 tcg_temp_free_i32(tcg_op);
12266 }
12267 for (pass = 0; pass < 2; pass++) {
12268 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12269 tcg_temp_free_i64(tcg_res[pass]);
12270 }
12271 } else {
12272
12273 int srcelt = is_q ? 4 : 0;
12274 TCGv_i32 tcg_res[4];
12275 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
12276 TCGv_i32 ahp = get_ahp_flag();
12277
12278 for (pass = 0; pass < 4; pass++) {
12279 tcg_res[pass] = tcg_temp_new_i32();
12280
12281 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
12282 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
12283 fpst, ahp);
12284 }
12285 for (pass = 0; pass < 4; pass++) {
12286 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
12287 tcg_temp_free_i32(tcg_res[pass]);
12288 }
12289
12290 tcg_temp_free_ptr(fpst);
12291 tcg_temp_free_i32(ahp);
12292 }
12293}
12294
12295static void handle_rev(DisasContext *s, int opcode, bool u,
12296 bool is_q, int size, int rn, int rd)
12297{
12298 int op = (opcode << 1) | u;
12299 int opsz = op + size;
12300 int grp_size = 3 - opsz;
12301 int dsize = is_q ? 128 : 64;
12302 int i;
12303
12304 if (opsz >= 3) {
12305 unallocated_encoding(s);
12306 return;
12307 }
12308
12309 if (!fp_access_check(s)) {
12310 return;
12311 }
12312
12313 if (size == 0) {
12314
12315 int groups = dsize / (8 << grp_size);
12316
12317 for (i = 0; i < groups; i++) {
12318 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
12319
12320 read_vec_element(s, tcg_tmp, rn, i, grp_size);
12321 switch (grp_size) {
12322 case MO_16:
12323 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
12324 break;
12325 case MO_32:
12326 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
12327 break;
12328 case MO_64:
12329 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
12330 break;
12331 default:
12332 g_assert_not_reached();
12333 }
12334 write_vec_element(s, tcg_tmp, rd, i, grp_size);
12335 tcg_temp_free_i64(tcg_tmp);
12336 }
12337 clear_vec_high(s, is_q, rd);
12338 } else {
12339 int revmask = (1 << grp_size) - 1;
12340 int esize = 8 << size;
12341 int elements = dsize / esize;
12342 TCGv_i64 tcg_rn = tcg_temp_new_i64();
12343 TCGv_i64 tcg_rd = tcg_const_i64(0);
12344 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
12345
12346 for (i = 0; i < elements; i++) {
12347 int e_rev = (i & 0xf) ^ revmask;
12348 int off = e_rev * esize;
12349 read_vec_element(s, tcg_rn, rn, i, size);
12350 if (off >= 64) {
12351 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
12352 tcg_rn, off - 64, esize);
12353 } else {
12354 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
12355 }
12356 }
12357 write_vec_element(s, tcg_rd, rd, 0, MO_64);
12358 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
12359
12360 tcg_temp_free_i64(tcg_rd_hi);
12361 tcg_temp_free_i64(tcg_rd);
12362 tcg_temp_free_i64(tcg_rn);
12363 }
12364}
12365
12366static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
12367 bool is_q, int size, int rn, int rd)
12368{
12369
12370
12371
12372
12373
12374 bool accum = (opcode == 0x6);
12375 int maxpass = is_q ? 2 : 1;
12376 int pass;
12377 TCGv_i64 tcg_res[2];
12378
12379 if (size == 2) {
12380
12381 MemOp memop = size + (u ? 0 : MO_SIGN);
12382
12383 for (pass = 0; pass < maxpass; pass++) {
12384 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
12385 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
12386
12387 tcg_res[pass] = tcg_temp_new_i64();
12388
12389 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
12390 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
12391 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
12392 if (accum) {
12393 read_vec_element(s, tcg_op1, rd, pass, MO_64);
12394 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12395 }
12396
12397 tcg_temp_free_i64(tcg_op1);
12398 tcg_temp_free_i64(tcg_op2);
12399 }
12400 } else {
12401 for (pass = 0; pass < maxpass; pass++) {
12402 TCGv_i64 tcg_op = tcg_temp_new_i64();
12403 NeonGenOne64OpFn *genfn;
12404 static NeonGenOne64OpFn * const fns[2][2] = {
12405 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
12406 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
12407 };
12408
12409 genfn = fns[size][u];
12410
12411 tcg_res[pass] = tcg_temp_new_i64();
12412
12413 read_vec_element(s, tcg_op, rn, pass, MO_64);
12414 genfn(tcg_res[pass], tcg_op);
12415
12416 if (accum) {
12417 read_vec_element(s, tcg_op, rd, pass, MO_64);
12418 if (size == 0) {
12419 gen_helper_neon_addl_u16(tcg_res[pass],
12420 tcg_res[pass], tcg_op);
12421 } else {
12422 gen_helper_neon_addl_u32(tcg_res[pass],
12423 tcg_res[pass], tcg_op);
12424 }
12425 }
12426 tcg_temp_free_i64(tcg_op);
12427 }
12428 }
12429 if (!is_q) {
12430 tcg_res[1] = tcg_const_i64(0);
12431 }
12432 for (pass = 0; pass < 2; pass++) {
12433 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12434 tcg_temp_free_i64(tcg_res[pass]);
12435 }
12436}
12437
12438static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
12439{
12440
12441 int pass;
12442 int part = is_q ? 2 : 0;
12443 TCGv_i64 tcg_res[2];
12444
12445 for (pass = 0; pass < 2; pass++) {
12446 static NeonGenWidenFn * const widenfns[3] = {
12447 gen_helper_neon_widen_u8,
12448 gen_helper_neon_widen_u16,
12449 tcg_gen_extu_i32_i64,
12450 };
12451 NeonGenWidenFn *widenfn = widenfns[size];
12452 TCGv_i32 tcg_op = tcg_temp_new_i32();
12453
12454 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
12455 tcg_res[pass] = tcg_temp_new_i64();
12456 widenfn(tcg_res[pass], tcg_op);
12457 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
12458
12459 tcg_temp_free_i32(tcg_op);
12460 }
12461
12462 for (pass = 0; pass < 2; pass++) {
12463 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12464 tcg_temp_free_i64(tcg_res[pass]);
12465 }
12466}
12467
12468
12469
12470
12471
12472
12473
12474static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12475{
12476 int size = extract32(insn, 22, 2);
12477 int opcode = extract32(insn, 12, 5);
12478 bool u = extract32(insn, 29, 1);
12479 bool is_q = extract32(insn, 30, 1);
12480 int rn = extract32(insn, 5, 5);
12481 int rd = extract32(insn, 0, 5);
12482 bool need_fpstatus = false;
12483 bool need_rmode = false;
12484 int rmode = -1;
12485 TCGv_i32 tcg_rmode;
12486 TCGv_ptr tcg_fpstatus;
12487
12488 switch (opcode) {
12489 case 0x0:
12490 case 0x1:
12491 handle_rev(s, opcode, u, is_q, size, rn, rd);
12492 return;
12493 case 0x5:
12494 if (u && size == 0) {
12495
12496 break;
12497 } else if (u && size == 1) {
12498
12499 break;
12500 } else if (!u && size == 0) {
12501
12502 break;
12503 }
12504 unallocated_encoding(s);
12505 return;
12506 case 0x12:
12507 case 0x14:
12508 if (size == 3) {
12509 unallocated_encoding(s);
12510 return;
12511 }
12512 if (!fp_access_check(s)) {
12513 return;
12514 }
12515
12516 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12517 return;
12518 case 0x4:
12519 if (size == 3) {
12520 unallocated_encoding(s);
12521 return;
12522 }
12523 break;
12524 case 0x2:
12525 case 0x6:
12526 if (size == 3) {
12527 unallocated_encoding(s);
12528 return;
12529 }
12530 if (!fp_access_check(s)) {
12531 return;
12532 }
12533 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12534 return;
12535 case 0x13:
12536 if (u == 0 || size == 3) {
12537 unallocated_encoding(s);
12538 return;
12539 }
12540 if (!fp_access_check(s)) {
12541 return;
12542 }
12543 handle_shll(s, is_q, size, rn, rd);
12544 return;
12545 case 0xa:
12546 if (u == 1) {
12547 unallocated_encoding(s);
12548 return;
12549 }
12550
12551 case 0x8:
12552 case 0x9:
12553 case 0xb:
12554 if (size == 3 && !is_q) {
12555 unallocated_encoding(s);
12556 return;
12557 }
12558 break;
12559 case 0x3:
12560 if (size == 3 && !is_q) {
12561 unallocated_encoding(s);
12562 return;
12563 }
12564 if (!fp_access_check(s)) {
12565 return;
12566 }
12567 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12568 return;
12569 case 0x7:
12570 if (size == 3 && !is_q) {
12571 unallocated_encoding(s);
12572 return;
12573 }
12574 break;
12575 case 0xc ... 0xf:
12576 case 0x16 ... 0x1f:
12577 {
12578
12579
12580
12581 int is_double = extract32(size, 0, 1);
12582 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12583 size = is_double ? 3 : 2;
12584 switch (opcode) {
12585 case 0x2f:
12586 case 0x6f:
12587 if (size == 3 && !is_q) {
12588 unallocated_encoding(s);
12589 return;
12590 }
12591 break;
12592 case 0x1d:
12593 case 0x5d:
12594 {
12595 bool is_signed = (opcode == 0x1d) ? true : false;
12596 int elements = is_double ? 2 : is_q ? 4 : 2;
12597 if (is_double && !is_q) {
12598 unallocated_encoding(s);
12599 return;
12600 }
12601 if (!fp_access_check(s)) {
12602 return;
12603 }
12604 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12605 return;
12606 }
12607 case 0x2c:
12608 case 0x2d:
12609 case 0x2e:
12610 case 0x6c:
12611 case 0x6d:
12612 if (size == 3 && !is_q) {
12613 unallocated_encoding(s);
12614 return;
12615 }
12616 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12617 return;
12618 case 0x7f:
12619 if (size == 3 && !is_q) {
12620 unallocated_encoding(s);
12621 return;
12622 }
12623 break;
12624 case 0x1a:
12625 case 0x1b:
12626 case 0x3a:
12627 case 0x3b:
12628 case 0x5a:
12629 case 0x5b:
12630 case 0x7a:
12631 case 0x7b:
12632 need_fpstatus = true;
12633 need_rmode = true;
12634 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12635 if (size == 3 && !is_q) {
12636 unallocated_encoding(s);
12637 return;
12638 }
12639 break;
12640 case 0x5c:
12641 case 0x1c:
12642 need_fpstatus = true;
12643 need_rmode = true;
12644 rmode = FPROUNDING_TIEAWAY;
12645 if (size == 3 && !is_q) {
12646 unallocated_encoding(s);
12647 return;
12648 }
12649 break;
12650 case 0x3c:
12651 if (size == 3) {
12652 unallocated_encoding(s);
12653 return;
12654 }
12655
12656 case 0x3d:
12657 case 0x7d:
12658 if (size == 3 && !is_q) {
12659 unallocated_encoding(s);
12660 return;
12661 }
12662 if (!fp_access_check(s)) {
12663 return;
12664 }
12665 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12666 return;
12667 case 0x56:
12668 if (size == 2) {
12669 unallocated_encoding(s);
12670 return;
12671 }
12672
12673 case 0x16:
12674
12675
12676
12677 if (!fp_access_check(s)) {
12678 return;
12679 }
12680 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12681 return;
12682 case 0x17:
12683 if (!fp_access_check(s)) {
12684 return;
12685 }
12686 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12687 return;
12688 case 0x18:
12689 case 0x19:
12690 case 0x38:
12691 case 0x39:
12692 need_rmode = true;
12693 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12694
12695 case 0x59:
12696 case 0x79:
12697 need_fpstatus = true;
12698 if (size == 3 && !is_q) {
12699 unallocated_encoding(s);
12700 return;
12701 }
12702 break;
12703 case 0x58:
12704 need_rmode = true;
12705 rmode = FPROUNDING_TIEAWAY;
12706 need_fpstatus = true;
12707 if (size == 3 && !is_q) {
12708 unallocated_encoding(s);
12709 return;
12710 }
12711 break;
12712 case 0x7c:
12713 if (size == 3) {
12714 unallocated_encoding(s);
12715 return;
12716 }
12717 break;
12718 case 0x1e:
12719 case 0x1f:
12720 need_rmode = true;
12721 rmode = FPROUNDING_ZERO;
12722
12723 case 0x5e:
12724 case 0x5f:
12725 need_fpstatus = true;
12726 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12727 unallocated_encoding(s);
12728 return;
12729 }
12730 break;
12731 default:
12732 unallocated_encoding(s);
12733 return;
12734 }
12735 break;
12736 }
12737 default:
12738 unallocated_encoding(s);
12739 return;
12740 }
12741
12742 if (!fp_access_check(s)) {
12743 return;
12744 }
12745
12746 if (need_fpstatus || need_rmode) {
12747 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12748 } else {
12749 tcg_fpstatus = NULL;
12750 }
12751 if (need_rmode) {
12752 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12753 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12754 } else {
12755 tcg_rmode = NULL;
12756 }
12757
12758 switch (opcode) {
12759 case 0x5:
12760 if (u && size == 0) {
12761 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12762 return;
12763 }
12764 break;
12765 case 0x8:
12766 if (u) {
12767 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12768 } else {
12769 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12770 }
12771 return;
12772 case 0x9:
12773 if (u) {
12774 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12775 } else {
12776 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12777 }
12778 return;
12779 case 0xa:
12780 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12781 return;
12782 case 0xb:
12783 if (u) {
12784 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12785 } else {
12786 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12787 }
12788 return;
12789 }
12790
12791 if (size == 3) {
12792
12793 int pass;
12794
12795
12796
12797
12798 tcg_debug_assert(is_q);
12799 for (pass = 0; pass < 2; pass++) {
12800 TCGv_i64 tcg_op = tcg_temp_new_i64();
12801 TCGv_i64 tcg_res = tcg_temp_new_i64();
12802
12803 read_vec_element(s, tcg_op, rn, pass, MO_64);
12804
12805 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12806 tcg_rmode, tcg_fpstatus);
12807
12808 write_vec_element(s, tcg_res, rd, pass, MO_64);
12809
12810 tcg_temp_free_i64(tcg_res);
12811 tcg_temp_free_i64(tcg_op);
12812 }
12813 } else {
12814 int pass;
12815
12816 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12817 TCGv_i32 tcg_op = tcg_temp_new_i32();
12818 TCGv_i32 tcg_res = tcg_temp_new_i32();
12819
12820 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12821
12822 if (size == 2) {
12823
12824 switch (opcode) {
12825 case 0x4:
12826 if (u) {
12827 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12828 } else {
12829 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12830 }
12831 break;
12832 case 0x7:
12833 if (u) {
12834 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12835 } else {
12836 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12837 }
12838 break;
12839 case 0x2f:
12840 gen_helper_vfp_abss(tcg_res, tcg_op);
12841 break;
12842 case 0x6f:
12843 gen_helper_vfp_negs(tcg_res, tcg_op);
12844 break;
12845 case 0x7f:
12846 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12847 break;
12848 case 0x1a:
12849 case 0x1b:
12850 case 0x1c:
12851 case 0x3a:
12852 case 0x3b:
12853 {
12854 TCGv_i32 tcg_shift = tcg_const_i32(0);
12855 gen_helper_vfp_tosls(tcg_res, tcg_op,
12856 tcg_shift, tcg_fpstatus);
12857 tcg_temp_free_i32(tcg_shift);
12858 break;
12859 }
12860 case 0x5a:
12861 case 0x5b:
12862 case 0x5c:
12863 case 0x7a:
12864 case 0x7b:
12865 {
12866 TCGv_i32 tcg_shift = tcg_const_i32(0);
12867 gen_helper_vfp_touls(tcg_res, tcg_op,
12868 tcg_shift, tcg_fpstatus);
12869 tcg_temp_free_i32(tcg_shift);
12870 break;
12871 }
12872 case 0x18:
12873 case 0x19:
12874 case 0x38:
12875 case 0x39:
12876 case 0x58:
12877 case 0x79:
12878 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12879 break;
12880 case 0x59:
12881 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12882 break;
12883 case 0x7c:
12884 gen_helper_rsqrte_u32(tcg_res, tcg_op);
12885 break;
12886 case 0x1e:
12887 case 0x5e:
12888 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12889 break;
12890 case 0x1f:
12891 case 0x5f:
12892 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12893 break;
12894 default:
12895 g_assert_not_reached();
12896 }
12897 } else {
12898
12899 switch (opcode) {
12900 case 0x5:
12901
12902
12903
12904 if (u) {
12905 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12906 } else {
12907 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12908 }
12909 break;
12910 case 0x7:
12911 {
12912 NeonGenOneOpEnvFn *genfn;
12913 static NeonGenOneOpEnvFn * const fns[2][2] = {
12914 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12915 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12916 };
12917 genfn = fns[size][u];
12918 genfn(tcg_res, cpu_env, tcg_op);
12919 break;
12920 }
12921 case 0x4:
12922 if (u) {
12923 if (size == 0) {
12924 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12925 } else {
12926 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12927 }
12928 } else {
12929 if (size == 0) {
12930 gen_helper_neon_cls_s8(tcg_res, tcg_op);
12931 } else {
12932 gen_helper_neon_cls_s16(tcg_res, tcg_op);
12933 }
12934 }
12935 break;
12936 default:
12937 g_assert_not_reached();
12938 }
12939 }
12940
12941 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12942
12943 tcg_temp_free_i32(tcg_res);
12944 tcg_temp_free_i32(tcg_op);
12945 }
12946 }
12947 clear_vec_high(s, is_q, rd);
12948
12949 if (need_rmode) {
12950 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12951 tcg_temp_free_i32(tcg_rmode);
12952 }
12953 if (need_fpstatus) {
12954 tcg_temp_free_ptr(tcg_fpstatus);
12955 }
12956}
12957
12958
12959
12960
12961
12962
12963
12964
12965
12966
12967
12968
12969
12970
12971
12972static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12973{
12974 int fpop, opcode, a, u;
12975 int rn, rd;
12976 bool is_q;
12977 bool is_scalar;
12978 bool only_in_vector = false;
12979
12980 int pass;
12981 TCGv_i32 tcg_rmode = NULL;
12982 TCGv_ptr tcg_fpstatus = NULL;
12983 bool need_rmode = false;
12984 bool need_fpst = true;
12985 int rmode;
12986
12987 if (!dc_isar_feature(aa64_fp16, s)) {
12988 unallocated_encoding(s);
12989 return;
12990 }
12991
12992 rd = extract32(insn, 0, 5);
12993 rn = extract32(insn, 5, 5);
12994
12995 a = extract32(insn, 23, 1);
12996 u = extract32(insn, 29, 1);
12997 is_scalar = extract32(insn, 28, 1);
12998 is_q = extract32(insn, 30, 1);
12999
13000 opcode = extract32(insn, 12, 5);
13001 fpop = deposit32(opcode, 5, 1, a);
13002 fpop = deposit32(fpop, 6, 1, u);
13003
13004 switch (fpop) {
13005 case 0x1d:
13006 case 0x5d:
13007 {
13008 int elements;
13009
13010 if (is_scalar) {
13011 elements = 1;
13012 } else {
13013 elements = (is_q ? 8 : 4);
13014 }
13015
13016 if (!fp_access_check(s)) {
13017 return;
13018 }
13019 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
13020 return;
13021 }
13022 break;
13023 case 0x2c:
13024 case 0x2d:
13025 case 0x2e:
13026 case 0x6c:
13027 case 0x6d:
13028 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
13029 return;
13030 case 0x3d:
13031 case 0x3f:
13032 break;
13033 case 0x18:
13034 need_rmode = true;
13035 only_in_vector = true;
13036 rmode = FPROUNDING_TIEEVEN;
13037 break;
13038 case 0x19:
13039 need_rmode = true;
13040 only_in_vector = true;
13041 rmode = FPROUNDING_NEGINF;
13042 break;
13043 case 0x38:
13044 need_rmode = true;
13045 only_in_vector = true;
13046 rmode = FPROUNDING_POSINF;
13047 break;
13048 case 0x39:
13049 need_rmode = true;
13050 only_in_vector = true;
13051 rmode = FPROUNDING_ZERO;
13052 break;
13053 case 0x58:
13054 need_rmode = true;
13055 only_in_vector = true;
13056 rmode = FPROUNDING_TIEAWAY;
13057 break;
13058 case 0x59:
13059 case 0x79:
13060 only_in_vector = true;
13061
13062 break;
13063 case 0x1a:
13064 need_rmode = true;
13065 rmode = FPROUNDING_TIEEVEN;
13066 break;
13067 case 0x1b:
13068 need_rmode = true;
13069 rmode = FPROUNDING_NEGINF;
13070 break;
13071 case 0x1c:
13072 need_rmode = true;
13073 rmode = FPROUNDING_TIEAWAY;
13074 break;
13075 case 0x3a:
13076 need_rmode = true;
13077 rmode = FPROUNDING_POSINF;
13078 break;
13079 case 0x3b:
13080 need_rmode = true;
13081 rmode = FPROUNDING_ZERO;
13082 break;
13083 case 0x5a:
13084 need_rmode = true;
13085 rmode = FPROUNDING_TIEEVEN;
13086 break;
13087 case 0x5b:
13088 need_rmode = true;
13089 rmode = FPROUNDING_NEGINF;
13090 break;
13091 case 0x5c:
13092 need_rmode = true;
13093 rmode = FPROUNDING_TIEAWAY;
13094 break;
13095 case 0x7a:
13096 need_rmode = true;
13097 rmode = FPROUNDING_POSINF;
13098 break;
13099 case 0x7b:
13100 need_rmode = true;
13101 rmode = FPROUNDING_ZERO;
13102 break;
13103 case 0x2f:
13104 case 0x6f:
13105 need_fpst = false;
13106 break;
13107 case 0x7d:
13108 case 0x7f:
13109 break;
13110 default:
13111 fprintf(stderr, "%s: insn 0x%04x fpop 0x%2x\n", __func__, insn, fpop);
13112 g_assert_not_reached();
13113 }
13114
13115
13116
13117 if (is_scalar) {
13118 if (!is_q) {
13119 unallocated_encoding(s);
13120 return;
13121 }
13122
13123 if (only_in_vector) {
13124 unallocated_encoding(s);
13125 return;
13126 }
13127 }
13128
13129 if (!fp_access_check(s)) {
13130 return;
13131 }
13132
13133 if (need_rmode || need_fpst) {
13134 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
13135 }
13136
13137 if (need_rmode) {
13138 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
13139 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13140 }
13141
13142 if (is_scalar) {
13143 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
13144 TCGv_i32 tcg_res = tcg_temp_new_i32();
13145
13146 switch (fpop) {
13147 case 0x1a:
13148 case 0x1b:
13149 case 0x1c:
13150 case 0x3a:
13151 case 0x3b:
13152 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13153 break;
13154 case 0x3d:
13155 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13156 break;
13157 case 0x3f:
13158 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
13159 break;
13160 case 0x5a:
13161 case 0x5b:
13162 case 0x5c:
13163 case 0x7a:
13164 case 0x7b:
13165 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13166 break;
13167 case 0x6f:
13168 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13169 break;
13170 case 0x7d:
13171 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13172 break;
13173 default:
13174 g_assert_not_reached();
13175 }
13176
13177
13178 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
13179 write_fp_sreg(s, rd, tcg_res);
13180
13181 tcg_temp_free_i32(tcg_res);
13182 tcg_temp_free_i32(tcg_op);
13183 } else {
13184 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
13185 TCGv_i32 tcg_op = tcg_temp_new_i32();
13186 TCGv_i32 tcg_res = tcg_temp_new_i32();
13187
13188 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
13189
13190 switch (fpop) {
13191 case 0x1a:
13192 case 0x1b:
13193 case 0x1c:
13194 case 0x3a:
13195 case 0x3b:
13196 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13197 break;
13198 case 0x3d:
13199 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13200 break;
13201 case 0x5a:
13202 case 0x5b:
13203 case 0x5c:
13204 case 0x7a:
13205 case 0x7b:
13206 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13207 break;
13208 case 0x18:
13209 case 0x19:
13210 case 0x38:
13211 case 0x39:
13212 case 0x58:
13213 case 0x79:
13214 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
13215 break;
13216 case 0x59:
13217 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
13218 break;
13219 case 0x2f:
13220 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
13221 break;
13222 case 0x6f:
13223 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13224 break;
13225 case 0x7d:
13226 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13227 break;
13228 case 0x7f:
13229 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
13230 break;
13231 default:
13232 g_assert_not_reached();
13233 }
13234
13235 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
13236
13237 tcg_temp_free_i32(tcg_res);
13238 tcg_temp_free_i32(tcg_op);
13239 }
13240
13241 clear_vec_high(s, is_q, rd);
13242 }
13243
13244 if (tcg_rmode) {
13245 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13246 tcg_temp_free_i32(tcg_rmode);
13247 }
13248
13249 if (tcg_fpstatus) {
13250 tcg_temp_free_ptr(tcg_fpstatus);
13251 }
13252}
13253
13254
13255
13256
13257
13258
13259
13260
13261
13262
13263
13264
13265static void disas_simd_indexed(DisasContext *s, uint32_t insn)
13266{
13267
13268
13269
13270
13271
13272
13273
13274 bool is_scalar = extract32(insn, 28, 1);
13275 bool is_q = extract32(insn, 30, 1);
13276 bool u = extract32(insn, 29, 1);
13277 int size = extract32(insn, 22, 2);
13278 int l = extract32(insn, 21, 1);
13279 int m = extract32(insn, 20, 1);
13280
13281 int rm = extract32(insn, 16, 4);
13282 int opcode = extract32(insn, 12, 4);
13283 int h = extract32(insn, 11, 1);
13284 int rn = extract32(insn, 5, 5);
13285 int rd = extract32(insn, 0, 5);
13286 bool is_long = false;
13287 int is_fp = 0;
13288 bool is_fp16 = false;
13289 int index;
13290 TCGv_ptr fpst;
13291
13292 switch (16 * u + opcode) {
13293 case 0x08:
13294 case 0x10:
13295 case 0x14:
13296 if (is_scalar) {
13297 unallocated_encoding(s);
13298 return;
13299 }
13300 break;
13301 case 0x02:
13302 case 0x12:
13303 case 0x06:
13304 case 0x16:
13305 case 0x0a:
13306 case 0x1a:
13307 if (is_scalar) {
13308 unallocated_encoding(s);
13309 return;
13310 }
13311 is_long = true;
13312 break;
13313 case 0x03:
13314 case 0x07:
13315 case 0x0b:
13316 is_long = true;
13317 break;
13318 case 0x0c:
13319 case 0x0d:
13320 break;
13321 case 0x01:
13322 case 0x05:
13323 case 0x09:
13324 case 0x19:
13325 is_fp = 1;
13326 break;
13327 case 0x1d:
13328 case 0x1f:
13329 if (!dc_isar_feature(aa64_rdm, s)) {
13330 unallocated_encoding(s);
13331 return;
13332 }
13333 break;
13334 case 0x0e:
13335 case 0x1e:
13336 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
13337 unallocated_encoding(s);
13338 return;
13339 }
13340 break;
13341 case 0x11:
13342 case 0x13:
13343 case 0x15:
13344 case 0x17:
13345 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
13346 unallocated_encoding(s);
13347 return;
13348 }
13349 is_fp = 2;
13350 break;
13351 case 0x00:
13352 case 0x04:
13353 case 0x18:
13354 case 0x1c:
13355 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
13356 unallocated_encoding(s);
13357 return;
13358 }
13359 size = MO_16;
13360
13361 break;
13362 default:
13363 unallocated_encoding(s);
13364 return;
13365 }
13366
13367 switch (is_fp) {
13368 case 1:
13369
13370 switch (size) {
13371 case 0:
13372 size = MO_16;
13373 is_fp16 = true;
13374 break;
13375 case MO_32:
13376 case MO_64:
13377 break;
13378 default:
13379 unallocated_encoding(s);
13380 return;
13381 }
13382 break;
13383
13384 case 2:
13385
13386 size += 1;
13387 switch (size) {
13388 case MO_32:
13389 if (h && !is_q) {
13390 unallocated_encoding(s);
13391 return;
13392 }
13393 is_fp16 = true;
13394 break;
13395 case MO_64:
13396 break;
13397 default:
13398 unallocated_encoding(s);
13399 return;
13400 }
13401 break;
13402
13403 default:
13404 switch (size) {
13405 case MO_8:
13406 case MO_64:
13407 unallocated_encoding(s);
13408 return;
13409 }
13410 break;
13411 }
13412 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
13413 unallocated_encoding(s);
13414 return;
13415 }
13416
13417
13418 switch (size) {
13419 case MO_16:
13420 index = h << 2 | l << 1 | m;
13421 break;
13422 case MO_32:
13423 index = h << 1 | l;
13424 rm |= m << 4;
13425 break;
13426 case MO_64:
13427 if (l || !is_q) {
13428 unallocated_encoding(s);
13429 return;
13430 }
13431 index = h;
13432 rm |= m << 4;
13433 break;
13434 default:
13435 g_assert_not_reached();
13436 }
13437
13438 if (!fp_access_check(s)) {
13439 return;
13440 }
13441
13442 if (is_fp) {
13443 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
13444 } else {
13445 fpst = NULL;
13446 }
13447
13448 switch (16 * u + opcode) {
13449 case 0x0e:
13450 case 0x1e:
13451 gen_gvec_op3_ool(s, is_q, rd, rn, rm, index,
13452 u ? gen_helper_gvec_udot_idx_b
13453 : gen_helper_gvec_sdot_idx_b);
13454 return;
13455 case 0x11:
13456 case 0x13:
13457 case 0x15:
13458 case 0x17:
13459 {
13460 int rot = extract32(insn, 13, 2);
13461 int data = (index << 2) | rot;
13462 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13463 vec_full_reg_offset(s, rn),
13464 vec_full_reg_offset(s, rm), fpst,
13465 is_q ? 16 : 8, vec_full_reg_size(s), data,
13466 size == MO_64
13467 ? gen_helper_gvec_fcmlas_idx
13468 : gen_helper_gvec_fcmlah_idx);
13469 tcg_temp_free_ptr(fpst);
13470 }
13471 return;
13472
13473 case 0x00:
13474 case 0x04:
13475 case 0x18:
13476 case 0x1c:
13477 {
13478 int is_s = extract32(opcode, 2, 1);
13479 int is_2 = u;
13480 int data = (index << 2) | (is_2 << 1) | is_s;
13481 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13482 vec_full_reg_offset(s, rn),
13483 vec_full_reg_offset(s, rm), cpu_env,
13484 is_q ? 16 : 8, vec_full_reg_size(s),
13485 data, gen_helper_gvec_fmlal_idx_a64);
13486 }
13487 return;
13488
13489 case 0x08:
13490 if (!is_long && !is_scalar) {
13491 static gen_helper_gvec_3 * const fns[3] = {
13492 gen_helper_gvec_mul_idx_h,
13493 gen_helper_gvec_mul_idx_s,
13494 gen_helper_gvec_mul_idx_d,
13495 };
13496 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
13497 vec_full_reg_offset(s, rn),
13498 vec_full_reg_offset(s, rm),
13499 is_q ? 16 : 8, vec_full_reg_size(s),
13500 index, fns[size - 1]);
13501 return;
13502 }
13503 break;
13504
13505 case 0x10:
13506 if (!is_long && !is_scalar) {
13507 static gen_helper_gvec_4 * const fns[3] = {
13508 gen_helper_gvec_mla_idx_h,
13509 gen_helper_gvec_mla_idx_s,
13510 gen_helper_gvec_mla_idx_d,
13511 };
13512 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13513 vec_full_reg_offset(s, rn),
13514 vec_full_reg_offset(s, rm),
13515 vec_full_reg_offset(s, rd),
13516 is_q ? 16 : 8, vec_full_reg_size(s),
13517 index, fns[size - 1]);
13518 return;
13519 }
13520 break;
13521
13522 case 0x14:
13523 if (!is_long && !is_scalar) {
13524 static gen_helper_gvec_4 * const fns[3] = {
13525 gen_helper_gvec_mls_idx_h,
13526 gen_helper_gvec_mls_idx_s,
13527 gen_helper_gvec_mls_idx_d,
13528 };
13529 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13530 vec_full_reg_offset(s, rn),
13531 vec_full_reg_offset(s, rm),
13532 vec_full_reg_offset(s, rd),
13533 is_q ? 16 : 8, vec_full_reg_size(s),
13534 index, fns[size - 1]);
13535 return;
13536 }
13537 break;
13538 }
13539
13540 if (size == 3) {
13541 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13542 int pass;
13543
13544 assert(is_fp && is_q && !is_long);
13545
13546 read_vec_element(s, tcg_idx, rm, index, MO_64);
13547
13548 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13549 TCGv_i64 tcg_op = tcg_temp_new_i64();
13550 TCGv_i64 tcg_res = tcg_temp_new_i64();
13551
13552 read_vec_element(s, tcg_op, rn, pass, MO_64);
13553
13554 switch (16 * u + opcode) {
13555 case 0x05:
13556
13557 gen_helper_vfp_negd(tcg_op, tcg_op);
13558
13559 case 0x01:
13560 read_vec_element(s, tcg_res, rd, pass, MO_64);
13561 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13562 break;
13563 case 0x09:
13564 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13565 break;
13566 case 0x19:
13567 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13568 break;
13569 default:
13570 g_assert_not_reached();
13571 }
13572
13573 write_vec_element(s, tcg_res, rd, pass, MO_64);
13574 tcg_temp_free_i64(tcg_op);
13575 tcg_temp_free_i64(tcg_res);
13576 }
13577
13578 tcg_temp_free_i64(tcg_idx);
13579 clear_vec_high(s, !is_scalar, rd);
13580 } else if (!is_long) {
13581
13582
13583
13584
13585 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13586 int pass, maxpasses;
13587
13588 if (is_scalar) {
13589 maxpasses = 1;
13590 } else {
13591 maxpasses = is_q ? 4 : 2;
13592 }
13593
13594 read_vec_element_i32(s, tcg_idx, rm, index, size);
13595
13596 if (size == 1 && !is_scalar) {
13597
13598
13599
13600
13601 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13602 }
13603
13604 for (pass = 0; pass < maxpasses; pass++) {
13605 TCGv_i32 tcg_op = tcg_temp_new_i32();
13606 TCGv_i32 tcg_res = tcg_temp_new_i32();
13607
13608 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13609
13610 switch (16 * u + opcode) {
13611 case 0x08:
13612 case 0x10:
13613 case 0x14:
13614 {
13615 static NeonGenTwoOpFn * const fns[2][2] = {
13616 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13617 { tcg_gen_add_i32, tcg_gen_sub_i32 },
13618 };
13619 NeonGenTwoOpFn *genfn;
13620 bool is_sub = opcode == 0x4;
13621
13622 if (size == 1) {
13623 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13624 } else {
13625 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13626 }
13627 if (opcode == 0x8) {
13628 break;
13629 }
13630 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13631 genfn = fns[size - 1][is_sub];
13632 genfn(tcg_res, tcg_op, tcg_res);
13633 break;
13634 }
13635 case 0x05:
13636 case 0x01:
13637 read_vec_element_i32(s, tcg_res, rd, pass,
13638 is_scalar ? size : MO_32);
13639 switch (size) {
13640 case 1:
13641 if (opcode == 0x5) {
13642
13643
13644 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13645 }
13646 if (is_scalar) {
13647 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13648 tcg_res, fpst);
13649 } else {
13650 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13651 tcg_res, fpst);
13652 }
13653 break;
13654 case 2:
13655 if (opcode == 0x5) {
13656
13657
13658 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13659 }
13660 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13661 tcg_res, fpst);
13662 break;
13663 default:
13664 g_assert_not_reached();
13665 }
13666 break;
13667 case 0x09:
13668 switch (size) {
13669 case 1:
13670 if (is_scalar) {
13671 gen_helper_advsimd_mulh(tcg_res, tcg_op,
13672 tcg_idx, fpst);
13673 } else {
13674 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13675 tcg_idx, fpst);
13676 }
13677 break;
13678 case 2:
13679 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13680 break;
13681 default:
13682 g_assert_not_reached();
13683 }
13684 break;
13685 case 0x19:
13686 switch (size) {
13687 case 1:
13688 if (is_scalar) {
13689 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13690 tcg_idx, fpst);
13691 } else {
13692 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13693 tcg_idx, fpst);
13694 }
13695 break;
13696 case 2:
13697 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13698 break;
13699 default:
13700 g_assert_not_reached();
13701 }
13702 break;
13703 case 0x0c:
13704 if (size == 1) {
13705 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13706 tcg_op, tcg_idx);
13707 } else {
13708 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13709 tcg_op, tcg_idx);
13710 }
13711 break;
13712 case 0x0d:
13713 if (size == 1) {
13714 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13715 tcg_op, tcg_idx);
13716 } else {
13717 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13718 tcg_op, tcg_idx);
13719 }
13720 break;
13721 case 0x1d:
13722 read_vec_element_i32(s, tcg_res, rd, pass,
13723 is_scalar ? size : MO_32);
13724 if (size == 1) {
13725 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13726 tcg_op, tcg_idx, tcg_res);
13727 } else {
13728 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13729 tcg_op, tcg_idx, tcg_res);
13730 }
13731 break;
13732 case 0x1f:
13733 read_vec_element_i32(s, tcg_res, rd, pass,
13734 is_scalar ? size : MO_32);
13735 if (size == 1) {
13736 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13737 tcg_op, tcg_idx, tcg_res);
13738 } else {
13739 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13740 tcg_op, tcg_idx, tcg_res);
13741 }
13742 break;
13743 default:
13744 g_assert_not_reached();
13745 }
13746
13747 if (is_scalar) {
13748 write_fp_sreg(s, rd, tcg_res);
13749 } else {
13750 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13751 }
13752
13753 tcg_temp_free_i32(tcg_op);
13754 tcg_temp_free_i32(tcg_res);
13755 }
13756
13757 tcg_temp_free_i32(tcg_idx);
13758 clear_vec_high(s, is_q, rd);
13759 } else {
13760
13761 TCGv_i64 tcg_res[2];
13762 int pass;
13763 bool satop = extract32(opcode, 0, 1);
13764 MemOp memop = MO_32;
13765
13766 if (satop || !u) {
13767 memop |= MO_SIGN;
13768 }
13769
13770 if (size == 2) {
13771 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13772
13773 read_vec_element(s, tcg_idx, rm, index, memop);
13774
13775 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13776 TCGv_i64 tcg_op = tcg_temp_new_i64();
13777 TCGv_i64 tcg_passres;
13778 int passelt;
13779
13780 if (is_scalar) {
13781 passelt = 0;
13782 } else {
13783 passelt = pass + (is_q * 2);
13784 }
13785
13786 read_vec_element(s, tcg_op, rn, passelt, memop);
13787
13788 tcg_res[pass] = tcg_temp_new_i64();
13789
13790 if (opcode == 0xa || opcode == 0xb) {
13791
13792 tcg_passres = tcg_res[pass];
13793 } else {
13794 tcg_passres = tcg_temp_new_i64();
13795 }
13796
13797 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13798 tcg_temp_free_i64(tcg_op);
13799
13800 if (satop) {
13801
13802 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13803 tcg_passres, tcg_passres);
13804 }
13805
13806 if (opcode == 0xa || opcode == 0xb) {
13807 continue;
13808 }
13809
13810
13811 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13812
13813 switch (opcode) {
13814 case 0x2:
13815 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13816 break;
13817 case 0x6:
13818 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13819 break;
13820 case 0x7:
13821 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13822
13823 case 0x3:
13824 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13825 tcg_res[pass],
13826 tcg_passres);
13827 break;
13828 default:
13829 g_assert_not_reached();
13830 }
13831 tcg_temp_free_i64(tcg_passres);
13832 }
13833 tcg_temp_free_i64(tcg_idx);
13834
13835 clear_vec_high(s, !is_scalar, rd);
13836 } else {
13837 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13838
13839 assert(size == 1);
13840 read_vec_element_i32(s, tcg_idx, rm, index, size);
13841
13842 if (!is_scalar) {
13843
13844
13845
13846
13847 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13848 }
13849
13850 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13851 TCGv_i32 tcg_op = tcg_temp_new_i32();
13852 TCGv_i64 tcg_passres;
13853
13854 if (is_scalar) {
13855 read_vec_element_i32(s, tcg_op, rn, pass, size);
13856 } else {
13857 read_vec_element_i32(s, tcg_op, rn,
13858 pass + (is_q * 2), MO_32);
13859 }
13860
13861 tcg_res[pass] = tcg_temp_new_i64();
13862
13863 if (opcode == 0xa || opcode == 0xb) {
13864
13865 tcg_passres = tcg_res[pass];
13866 } else {
13867 tcg_passres = tcg_temp_new_i64();
13868 }
13869
13870 if (memop & MO_SIGN) {
13871 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13872 } else {
13873 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13874 }
13875 if (satop) {
13876 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13877 tcg_passres, tcg_passres);
13878 }
13879 tcg_temp_free_i32(tcg_op);
13880
13881 if (opcode == 0xa || opcode == 0xb) {
13882 continue;
13883 }
13884
13885
13886 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13887
13888 switch (opcode) {
13889 case 0x2:
13890 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13891 tcg_passres);
13892 break;
13893 case 0x6:
13894 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13895 tcg_passres);
13896 break;
13897 case 0x7:
13898 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13899
13900 case 0x3:
13901 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13902 tcg_res[pass],
13903 tcg_passres);
13904 break;
13905 default:
13906 g_assert_not_reached();
13907 }
13908 tcg_temp_free_i64(tcg_passres);
13909 }
13910 tcg_temp_free_i32(tcg_idx);
13911
13912 if (is_scalar) {
13913 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13914 }
13915 }
13916
13917 if (is_scalar) {
13918 tcg_res[1] = tcg_const_i64(0);
13919 }
13920
13921 for (pass = 0; pass < 2; pass++) {
13922 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13923 tcg_temp_free_i64(tcg_res[pass]);
13924 }
13925 }
13926
13927 if (fpst) {
13928 tcg_temp_free_ptr(fpst);
13929 }
13930}
13931
13932
13933
13934
13935
13936
13937
13938static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13939{
13940 int size = extract32(insn, 22, 2);
13941 int opcode = extract32(insn, 12, 5);
13942 int rn = extract32(insn, 5, 5);
13943 int rd = extract32(insn, 0, 5);
13944 int decrypt;
13945 gen_helper_gvec_2 *genfn2 = NULL;
13946 gen_helper_gvec_3 *genfn3 = NULL;
13947
13948 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
13949 unallocated_encoding(s);
13950 return;
13951 }
13952
13953 switch (opcode) {
13954 case 0x4:
13955 decrypt = 0;
13956 genfn3 = gen_helper_crypto_aese;
13957 break;
13958 case 0x6:
13959 decrypt = 0;
13960 genfn2 = gen_helper_crypto_aesmc;
13961 break;
13962 case 0x5:
13963 decrypt = 1;
13964 genfn3 = gen_helper_crypto_aese;
13965 break;
13966 case 0x7:
13967 decrypt = 1;
13968 genfn2 = gen_helper_crypto_aesmc;
13969 break;
13970 default:
13971 unallocated_encoding(s);
13972 return;
13973 }
13974
13975 if (!fp_access_check(s)) {
13976 return;
13977 }
13978 if (genfn2) {
13979 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
13980 } else {
13981 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
13982 }
13983}
13984
13985
13986
13987
13988
13989
13990
13991static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13992{
13993 int size = extract32(insn, 22, 2);
13994 int opcode = extract32(insn, 12, 3);
13995 int rm = extract32(insn, 16, 5);
13996 int rn = extract32(insn, 5, 5);
13997 int rd = extract32(insn, 0, 5);
13998 gen_helper_gvec_3 *genfn;
13999 bool feature;
14000
14001 if (size != 0) {
14002 unallocated_encoding(s);
14003 return;
14004 }
14005
14006 switch (opcode) {
14007 case 0:
14008 genfn = gen_helper_crypto_sha1c;
14009 feature = dc_isar_feature(aa64_sha1, s);
14010 break;
14011 case 1:
14012 genfn = gen_helper_crypto_sha1p;
14013 feature = dc_isar_feature(aa64_sha1, s);
14014 break;
14015 case 2:
14016 genfn = gen_helper_crypto_sha1m;
14017 feature = dc_isar_feature(aa64_sha1, s);
14018 break;
14019 case 3:
14020 genfn = gen_helper_crypto_sha1su0;
14021 feature = dc_isar_feature(aa64_sha1, s);
14022 break;
14023 case 4:
14024 genfn = gen_helper_crypto_sha256h;
14025 feature = dc_isar_feature(aa64_sha256, s);
14026 break;
14027 case 5:
14028 genfn = gen_helper_crypto_sha256h2;
14029 feature = dc_isar_feature(aa64_sha256, s);
14030 break;
14031 case 6:
14032 genfn = gen_helper_crypto_sha256su1;
14033 feature = dc_isar_feature(aa64_sha256, s);
14034 break;
14035 default:
14036 unallocated_encoding(s);
14037 return;
14038 }
14039
14040 if (!feature) {
14041 unallocated_encoding(s);
14042 return;
14043 }
14044
14045 if (!fp_access_check(s)) {
14046 return;
14047 }
14048 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
14049}
14050
14051
14052
14053
14054
14055
14056
14057static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
14058{
14059 int size = extract32(insn, 22, 2);
14060 int opcode = extract32(insn, 12, 5);
14061 int rn = extract32(insn, 5, 5);
14062 int rd = extract32(insn, 0, 5);
14063 gen_helper_gvec_2 *genfn;
14064 bool feature;
14065
14066 if (size != 0) {
14067 unallocated_encoding(s);
14068 return;
14069 }
14070
14071 switch (opcode) {
14072 case 0:
14073 feature = dc_isar_feature(aa64_sha1, s);
14074 genfn = gen_helper_crypto_sha1h;
14075 break;
14076 case 1:
14077 feature = dc_isar_feature(aa64_sha1, s);
14078 genfn = gen_helper_crypto_sha1su1;
14079 break;
14080 case 2:
14081 feature = dc_isar_feature(aa64_sha256, s);
14082 genfn = gen_helper_crypto_sha256su0;
14083 break;
14084 default:
14085 unallocated_encoding(s);
14086 return;
14087 }
14088
14089 if (!feature) {
14090 unallocated_encoding(s);
14091 return;
14092 }
14093
14094 if (!fp_access_check(s)) {
14095 return;
14096 }
14097 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
14098}
14099
14100static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
14101{
14102 tcg_gen_rotli_i64(d, m, 1);
14103 tcg_gen_xor_i64(d, d, n);
14104}
14105
14106static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
14107{
14108 tcg_gen_rotli_vec(vece, d, m, 1);
14109 tcg_gen_xor_vec(vece, d, d, n);
14110}
14111
14112void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
14113 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
14114{
14115 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
14116 static const GVecGen3 op = {
14117 .fni8 = gen_rax1_i64,
14118 .fniv = gen_rax1_vec,
14119 .opt_opc = vecop_list,
14120 .fno = gen_helper_crypto_rax1,
14121 .vece = MO_64,
14122 };
14123 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
14124}
14125
14126
14127
14128
14129
14130
14131
14132static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
14133{
14134 int opcode = extract32(insn, 10, 2);
14135 int o = extract32(insn, 14, 1);
14136 int rm = extract32(insn, 16, 5);
14137 int rn = extract32(insn, 5, 5);
14138 int rd = extract32(insn, 0, 5);
14139 bool feature;
14140 gen_helper_gvec_3 *oolfn = NULL;
14141 GVecGen3Fn *gvecfn = NULL;
14142
14143 if (o == 0) {
14144 switch (opcode) {
14145 case 0:
14146 feature = dc_isar_feature(aa64_sha512, s);
14147 oolfn = gen_helper_crypto_sha512h;
14148 break;
14149 case 1:
14150 feature = dc_isar_feature(aa64_sha512, s);
14151 oolfn = gen_helper_crypto_sha512h2;
14152 break;
14153 case 2:
14154 feature = dc_isar_feature(aa64_sha512, s);
14155 oolfn = gen_helper_crypto_sha512su1;
14156 break;
14157 case 3:
14158 feature = dc_isar_feature(aa64_sha3, s);
14159 gvecfn = gen_gvec_rax1;
14160 break;
14161 default:
14162 g_assert_not_reached();
14163 }
14164 } else {
14165 switch (opcode) {
14166 case 0:
14167 feature = dc_isar_feature(aa64_sm3, s);
14168 oolfn = gen_helper_crypto_sm3partw1;
14169 break;
14170 case 1:
14171 feature = dc_isar_feature(aa64_sm3, s);
14172 oolfn = gen_helper_crypto_sm3partw2;
14173 break;
14174 case 2:
14175 feature = dc_isar_feature(aa64_sm4, s);
14176 oolfn = gen_helper_crypto_sm4ekey;
14177 break;
14178 default:
14179 unallocated_encoding(s);
14180 return;
14181 }
14182 }
14183
14184 if (!feature) {
14185 unallocated_encoding(s);
14186 return;
14187 }
14188
14189 if (!fp_access_check(s)) {
14190 return;
14191 }
14192
14193 if (oolfn) {
14194 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
14195 } else {
14196 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
14197 }
14198}
14199
14200
14201
14202
14203
14204
14205
14206static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
14207{
14208 int opcode = extract32(insn, 10, 2);
14209 int rn = extract32(insn, 5, 5);
14210 int rd = extract32(insn, 0, 5);
14211 bool feature;
14212
14213 switch (opcode) {
14214 case 0:
14215 feature = dc_isar_feature(aa64_sha512, s);
14216 break;
14217 case 1:
14218 feature = dc_isar_feature(aa64_sm4, s);
14219 break;
14220 default:
14221 unallocated_encoding(s);
14222 return;
14223 }
14224
14225 if (!feature) {
14226 unallocated_encoding(s);
14227 return;
14228 }
14229
14230 if (!fp_access_check(s)) {
14231 return;
14232 }
14233
14234 switch (opcode) {
14235 case 0:
14236 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
14237 break;
14238 case 1:
14239 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
14240 break;
14241 default:
14242 g_assert_not_reached();
14243 }
14244}
14245
14246
14247
14248
14249
14250
14251
14252static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
14253{
14254 int op0 = extract32(insn, 21, 2);
14255 int rm = extract32(insn, 16, 5);
14256 int ra = extract32(insn, 10, 5);
14257 int rn = extract32(insn, 5, 5);
14258 int rd = extract32(insn, 0, 5);
14259 bool feature;
14260
14261 switch (op0) {
14262 case 0:
14263 case 1:
14264 feature = dc_isar_feature(aa64_sha3, s);
14265 break;
14266 case 2:
14267 feature = dc_isar_feature(aa64_sm3, s);
14268 break;
14269 default:
14270 unallocated_encoding(s);
14271 return;
14272 }
14273
14274 if (!feature) {
14275 unallocated_encoding(s);
14276 return;
14277 }
14278
14279 if (!fp_access_check(s)) {
14280 return;
14281 }
14282
14283 if (op0 < 2) {
14284 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
14285 int pass;
14286
14287 tcg_op1 = tcg_temp_new_i64();
14288 tcg_op2 = tcg_temp_new_i64();
14289 tcg_op3 = tcg_temp_new_i64();
14290 tcg_res[0] = tcg_temp_new_i64();
14291 tcg_res[1] = tcg_temp_new_i64();
14292
14293 for (pass = 0; pass < 2; pass++) {
14294 read_vec_element(s, tcg_op1, rn, pass, MO_64);
14295 read_vec_element(s, tcg_op2, rm, pass, MO_64);
14296 read_vec_element(s, tcg_op3, ra, pass, MO_64);
14297
14298 if (op0 == 0) {
14299
14300 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
14301 } else {
14302
14303 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
14304 }
14305 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
14306 }
14307 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
14308 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
14309
14310 tcg_temp_free_i64(tcg_op1);
14311 tcg_temp_free_i64(tcg_op2);
14312 tcg_temp_free_i64(tcg_op3);
14313 tcg_temp_free_i64(tcg_res[0]);
14314 tcg_temp_free_i64(tcg_res[1]);
14315 } else {
14316 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
14317
14318 tcg_op1 = tcg_temp_new_i32();
14319 tcg_op2 = tcg_temp_new_i32();
14320 tcg_op3 = tcg_temp_new_i32();
14321 tcg_res = tcg_temp_new_i32();
14322 tcg_zero = tcg_const_i32(0);
14323
14324 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
14325 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
14326 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
14327
14328 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
14329 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
14330 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
14331 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
14332
14333 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
14334 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
14335 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
14336 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
14337
14338 tcg_temp_free_i32(tcg_op1);
14339 tcg_temp_free_i32(tcg_op2);
14340 tcg_temp_free_i32(tcg_op3);
14341 tcg_temp_free_i32(tcg_res);
14342 tcg_temp_free_i32(tcg_zero);
14343 }
14344}
14345
14346
14347
14348
14349
14350
14351
14352static void disas_crypto_xar(DisasContext *s, uint32_t insn)
14353{
14354 int rm = extract32(insn, 16, 5);
14355 int imm6 = extract32(insn, 10, 6);
14356 int rn = extract32(insn, 5, 5);
14357 int rd = extract32(insn, 0, 5);
14358 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
14359 int pass;
14360
14361 if (!dc_isar_feature(aa64_sha3, s)) {
14362 unallocated_encoding(s);
14363 return;
14364 }
14365
14366 if (!fp_access_check(s)) {
14367 return;
14368 }
14369
14370 tcg_op1 = tcg_temp_new_i64();
14371 tcg_op2 = tcg_temp_new_i64();
14372 tcg_res[0] = tcg_temp_new_i64();
14373 tcg_res[1] = tcg_temp_new_i64();
14374
14375 for (pass = 0; pass < 2; pass++) {
14376 read_vec_element(s, tcg_op1, rn, pass, MO_64);
14377 read_vec_element(s, tcg_op2, rm, pass, MO_64);
14378
14379 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
14380 tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
14381 }
14382 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
14383 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
14384
14385 tcg_temp_free_i64(tcg_op1);
14386 tcg_temp_free_i64(tcg_op2);
14387 tcg_temp_free_i64(tcg_res[0]);
14388 tcg_temp_free_i64(tcg_res[1]);
14389}
14390
14391
14392
14393
14394
14395
14396
14397static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
14398{
14399 static gen_helper_gvec_3 * const fns[4] = {
14400 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
14401 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
14402 };
14403 int opcode = extract32(insn, 10, 2);
14404 int imm2 = extract32(insn, 12, 2);
14405 int rm = extract32(insn, 16, 5);
14406 int rn = extract32(insn, 5, 5);
14407 int rd = extract32(insn, 0, 5);
14408
14409 if (!dc_isar_feature(aa64_sm3, s)) {
14410 unallocated_encoding(s);
14411 return;
14412 }
14413
14414 if (!fp_access_check(s)) {
14415 return;
14416 }
14417
14418 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
14419}
14420
14421
14422
14423
14424
14425
14426static const AArch64DecodeTable data_proc_simd[] = {
14427
14428 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
14429 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
14430 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
14431 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
14432 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
14433 { 0x0e000400, 0x9fe08400, disas_simd_copy },
14434 { 0x0f000000, 0x9f000400, disas_simd_indexed },
14435
14436 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
14437 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
14438 { 0x0e000000, 0xbf208c00, disas_simd_tb },
14439 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
14440 { 0x2e000000, 0xbf208400, disas_simd_ext },
14441 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
14442 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
14443 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
14444 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
14445 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
14446 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
14447 { 0x5f000000, 0xdf000400, disas_simd_indexed },
14448 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
14449 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
14450 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
14451 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
14452 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
14453 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
14454 { 0xce000000, 0xff808000, disas_crypto_four_reg },
14455 { 0xce800000, 0xffe00000, disas_crypto_xar },
14456 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
14457 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
14458 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
14459 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
14460 { 0x00000000, 0x00000000, NULL }
14461};
14462
14463static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
14464{
14465
14466
14467
14468
14469 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
14470 if (fn) {
14471 fn(s, insn);
14472 } else {
14473 unallocated_encoding(s);
14474 }
14475}
14476
14477
14478static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
14479{
14480 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
14481 disas_data_proc_fp(s, insn);
14482 } else {
14483
14484 disas_data_proc_simd(s, insn);
14485 }
14486}
14487
14488
14489
14490
14491
14492
14493
14494
14495static bool is_guarded_page(CPUARMState *env, DisasContext *s)
14496{
14497 uint64_t addr = s->base.pc_first;
14498#ifdef CONFIG_USER_ONLY
14499 return page_get_flags(addr) & PAGE_BTI;
14500#else
14501 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
14502 unsigned int index = tlb_index(env, mmu_idx, addr);
14503 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
14504
14505
14506
14507
14508
14509
14510
14511
14512
14513
14514
14515 return (tlb_hit(entry->addr_code, addr) &&
14516 arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
14517#endif
14518}
14519
14520
14521
14522
14523
14524
14525
14526
14527
14528
14529
14530
14531
14532
14533
14534
14535
14536static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14537{
14538 if ((insn & 0xfffff01fu) == 0xd503201fu) {
14539
14540 switch (extract32(insn, 5, 7)) {
14541 case 0b011001:
14542 case 0b011011:
14543
14544
14545
14546
14547 return !bt || btype != 3;
14548 case 0b100000:
14549
14550 return false;
14551 case 0b100010:
14552
14553 return btype != 3;
14554 case 0b100100:
14555
14556 return btype != 2;
14557 case 0b100110:
14558
14559 return true;
14560 }
14561 } else {
14562 switch (insn & 0xffe0001fu) {
14563 case 0xd4200000u:
14564 case 0xd4400000u:
14565
14566 return true;
14567 }
14568 }
14569 return false;
14570}
14571
14572
14573static void disas_a64_insn(CPUARMState *env, DisasContext *s)
14574{
14575 uint32_t insn;
14576
14577 s->pc_curr = s->base.pc_next;
14578 insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
14579 s->insn = insn;
14580 s->base.pc_next += 4;
14581
14582 s->fp_access_checked = false;
14583 s->sve_access_checked = false;
14584
14585 if (dc_isar_feature(aa64_bti, s)) {
14586 if (s->base.num_insns == 1) {
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596
14597
14598 s->guarded_page = is_guarded_page(env, s);
14599
14600
14601 tcg_debug_assert(s->btype >= 0);
14602
14603
14604
14605
14606
14607
14608
14609 if (s->btype != 0
14610 && s->guarded_page
14611 && !btype_destination_ok(insn, s->bt, s->btype)) {
14612 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14613 syn_btitrap(s->btype),
14614 default_exception_el(s));
14615 return;
14616 }
14617 } else {
14618
14619 tcg_debug_assert(s->btype == 0);
14620 }
14621 }
14622
14623 switch (extract32(insn, 25, 4)) {
14624 case 0x0: case 0x1: case 0x3:
14625 unallocated_encoding(s);
14626 break;
14627 case 0x2:
14628 if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
14629 unallocated_encoding(s);
14630 }
14631 break;
14632 case 0x8: case 0x9:
14633 disas_data_proc_imm(s, insn);
14634 break;
14635 case 0xa: case 0xb:
14636 disas_b_exc_sys(s, insn);
14637 break;
14638 case 0x4:
14639 case 0x6:
14640 case 0xc:
14641 case 0xe:
14642 disas_ldst(s, insn);
14643 break;
14644 case 0x5:
14645 case 0xd:
14646 disas_data_proc_reg(s, insn);
14647 break;
14648 case 0x7:
14649 case 0xf:
14650 disas_data_proc_simd_fp(s, insn);
14651 break;
14652 default:
14653 assert(FALSE);
14654 break;
14655 }
14656
14657
14658 free_tmp_a64(s);
14659
14660
14661
14662
14663
14664 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14665 reset_btype(s);
14666 }
14667}
14668
14669static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14670 CPUState *cpu)
14671{
14672 DisasContext *dc = container_of(dcbase, DisasContext, base);
14673 CPUARMState *env = cpu->env_ptr;
14674 ARMCPU *arm_cpu = env_archcpu(env);
14675 uint32_t tb_flags = dc->base.tb->flags;
14676 int bound, core_mmu_idx;
14677
14678 dc->isar = &arm_cpu->isar;
14679 dc->condjmp = 0;
14680
14681 dc->aarch64 = 1;
14682
14683
14684
14685 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
14686 !arm_el_is_aa64(env, 3);
14687 dc->thumb = 0;
14688 dc->sctlr_b = 0;
14689 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
14690 dc->condexec_mask = 0;
14691 dc->condexec_cond = 0;
14692 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
14693 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14694 dc->tbii = FIELD_EX32(tb_flags, TBFLAG_A64, TBII);
14695 dc->tbid = FIELD_EX32(tb_flags, TBFLAG_A64, TBID);
14696 dc->tcma = FIELD_EX32(tb_flags, TBFLAG_A64, TCMA);
14697 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14698#if !defined(CONFIG_USER_ONLY)
14699 dc->user = (dc->current_el == 0);
14700#endif
14701 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
14702 dc->sve_excp_el = FIELD_EX32(tb_flags, TBFLAG_A64, SVEEXC_EL);
14703 dc->sve_len = (FIELD_EX32(tb_flags, TBFLAG_A64, ZCR_LEN) + 1) * 16;
14704 dc->pauth_active = FIELD_EX32(tb_flags, TBFLAG_A64, PAUTH_ACTIVE);
14705 dc->bt = FIELD_EX32(tb_flags, TBFLAG_A64, BT);
14706 dc->btype = FIELD_EX32(tb_flags, TBFLAG_A64, BTYPE);
14707 dc->unpriv = FIELD_EX32(tb_flags, TBFLAG_A64, UNPRIV);
14708 dc->ata = FIELD_EX32(tb_flags, TBFLAG_A64, ATA);
14709 dc->mte_active[0] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE_ACTIVE);
14710 dc->mte_active[1] = FIELD_EX32(tb_flags, TBFLAG_A64, MTE0_ACTIVE);
14711 dc->vec_len = 0;
14712 dc->vec_stride = 0;
14713 dc->cp_regs = arm_cpu->cp_regs;
14714 dc->features = env->features;
14715 dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14716
14717#ifdef CONFIG_USER_ONLY
14718
14719 tcg_debug_assert(dc->tbid & 1);
14720#endif
14721
14722
14723
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733
14734
14735
14736
14737 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
14738 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
14739 dc->is_ldex = false;
14740 dc->debug_target_el = FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
14741
14742
14743 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14744
14745
14746 if (dc->ss_active) {
14747 bound = 1;
14748 }
14749 dc->base.max_insns = MIN(dc->base.max_insns, bound);
14750
14751 init_tmp_a64_array(dc);
14752}
14753
14754static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14755{
14756}
14757
14758static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14759{
14760 DisasContext *dc = container_of(dcbase, DisasContext, base);
14761
14762 tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14763 dc->insn_start = tcg_last_op();
14764}
14765
14766static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
14767 const CPUBreakpoint *bp)
14768{
14769 DisasContext *dc = container_of(dcbase, DisasContext, base);
14770
14771 if (bp->flags & BP_CPU) {
14772 gen_a64_set_pc_im(dc->base.pc_next);
14773 gen_helper_check_breakpoints(cpu_env);
14774
14775 dc->base.is_jmp = DISAS_TOO_MANY;
14776 } else {
14777 gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
14778
14779
14780
14781
14782
14783 dc->base.pc_next += 4;
14784 dc->base.is_jmp = DISAS_NORETURN;
14785 }
14786
14787 return true;
14788}
14789
14790static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14791{
14792 DisasContext *dc = container_of(dcbase, DisasContext, base);
14793 CPUARMState *env = cpu->env_ptr;
14794
14795 if (dc->ss_active && !dc->pstate_ss) {
14796
14797
14798
14799
14800
14801
14802
14803
14804
14805
14806 assert(dc->base.num_insns == 1);
14807 gen_swstep_exception(dc, 0, 0);
14808 dc->base.is_jmp = DISAS_NORETURN;
14809 } else {
14810 disas_a64_insn(env, dc);
14811 }
14812
14813 translator_loop_temp_check(&dc->base);
14814}
14815
14816static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14817{
14818 DisasContext *dc = container_of(dcbase, DisasContext, base);
14819
14820 if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
14821
14822
14823
14824
14825
14826 switch (dc->base.is_jmp) {
14827 default:
14828 gen_a64_set_pc_im(dc->base.pc_next);
14829
14830 case DISAS_EXIT:
14831 case DISAS_JUMP:
14832 if (dc->base.singlestep_enabled) {
14833 gen_exception_internal(EXCP_DEBUG);
14834 } else {
14835 gen_step_complete_exception(dc);
14836 }
14837 break;
14838 case DISAS_NORETURN:
14839 break;
14840 }
14841 } else {
14842 switch (dc->base.is_jmp) {
14843 case DISAS_NEXT:
14844 case DISAS_TOO_MANY:
14845 gen_goto_tb(dc, 1, dc->base.pc_next);
14846 break;
14847 default:
14848 case DISAS_UPDATE_EXIT:
14849 gen_a64_set_pc_im(dc->base.pc_next);
14850
14851 case DISAS_EXIT:
14852 tcg_gen_exit_tb(NULL, 0);
14853 break;
14854 case DISAS_UPDATE_NOCHAIN:
14855 gen_a64_set_pc_im(dc->base.pc_next);
14856
14857 case DISAS_JUMP:
14858 tcg_gen_lookup_and_goto_ptr();
14859 break;
14860 case DISAS_NORETURN:
14861 case DISAS_SWI:
14862 break;
14863 case DISAS_WFE:
14864 gen_a64_set_pc_im(dc->base.pc_next);
14865 gen_helper_wfe(cpu_env);
14866 break;
14867 case DISAS_YIELD:
14868 gen_a64_set_pc_im(dc->base.pc_next);
14869 gen_helper_yield(cpu_env);
14870 break;
14871 case DISAS_WFI:
14872 {
14873
14874
14875
14876 TCGv_i32 tmp = tcg_const_i32(4);
14877
14878 gen_a64_set_pc_im(dc->base.pc_next);
14879 gen_helper_wfi(cpu_env, tmp);
14880 tcg_temp_free_i32(tmp);
14881
14882
14883
14884 tcg_gen_exit_tb(NULL, 0);
14885 break;
14886 }
14887 }
14888 }
14889}
14890
14891static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14892 CPUState *cpu)
14893{
14894 DisasContext *dc = container_of(dcbase, DisasContext, base);
14895
14896 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
14897 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
14898}
14899
14900const TranslatorOps aarch64_translator_ops = {
14901 .init_disas_context = aarch64_tr_init_disas_context,
14902 .tb_start = aarch64_tr_tb_start,
14903 .insn_start = aarch64_tr_insn_start,
14904 .breakpoint_check = aarch64_tr_breakpoint_check,
14905 .translate_insn = aarch64_tr_translate_insn,
14906 .tb_stop = aarch64_tr_tb_stop,
14907 .disas_log = aarch64_tr_disas_log,
14908};
14909