1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "tcg/tcg-op.h"
24#include "tcg/tcg-op-gvec.h"
25#include "qemu/log.h"
26#include "arm_ldst.h"
27#include "translate.h"
28#include "internals.h"
29#include "qemu/host-utils.h"
30
31#include "semihosting/semihost.h"
32#include "exec/gen-icount.h"
33
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
36#include "exec/log.h"
37
38#include "translate-a64.h"
39#include "qemu/atomic128.h"
40
41static TCGv_i64 cpu_X[32];
42static TCGv_i64 cpu_pc;
43
44
45static TCGv_i64 cpu_exclusive_high;
46
47static const char *regnames[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
52};
53
54enum a64_shift_type {
55 A64_SHIFT_TYPE_LSL = 0,
56 A64_SHIFT_TYPE_LSR = 1,
57 A64_SHIFT_TYPE_ASR = 2,
58 A64_SHIFT_TYPE_ROR = 3
59};
60
61
62
63
64typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
65
66typedef struct AArch64DecodeTable {
67 uint32_t pattern;
68 uint32_t mask;
69 AArch64DecodeFn *disas_fn;
70} AArch64DecodeTable;
71
72
73void a64_translate_init(void)
74{
75 int i;
76
77 cpu_pc = tcg_global_mem_new_i64(cpu_env,
78 offsetof(CPUARMState, pc),
79 "pc");
80 for (i = 0; i < 32; i++) {
81 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
82 offsetof(CPUARMState, xregs[i]),
83 regnames[i]);
84 }
85
86 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
87 offsetof(CPUARMState, exclusive_high), "exclusive_high");
88}
89
90
91
92
93static int get_a64_user_mem_index(DisasContext *s)
94{
95
96
97
98
99 ARMMMUIdx useridx = s->mmu_idx;
100
101 if (s->unpriv) {
102
103
104
105
106
107 switch (useridx) {
108 case ARMMMUIdx_E10_1:
109 case ARMMMUIdx_E10_1_PAN:
110 useridx = ARMMMUIdx_E10_0;
111 break;
112 case ARMMMUIdx_E20_2:
113 case ARMMMUIdx_E20_2_PAN:
114 useridx = ARMMMUIdx_E20_0;
115 break;
116 case ARMMMUIdx_SE10_1:
117 case ARMMMUIdx_SE10_1_PAN:
118 useridx = ARMMMUIdx_SE10_0;
119 break;
120 case ARMMMUIdx_SE20_2:
121 case ARMMMUIdx_SE20_2_PAN:
122 useridx = ARMMMUIdx_SE20_0;
123 break;
124 default:
125 g_assert_not_reached();
126 }
127 }
128 return arm_to_core_mmu_idx(useridx);
129}
130
131static void reset_btype(DisasContext *s)
132{
133 if (s->btype != 0) {
134 TCGv_i32 zero = tcg_const_i32(0);
135 tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
136 tcg_temp_free_i32(zero);
137 s->btype = 0;
138 }
139}
140
141static void set_btype(DisasContext *s, int val)
142{
143 TCGv_i32 tcg_val;
144
145
146 tcg_debug_assert(val >= 1 && val <= 3);
147
148 tcg_val = tcg_const_i32(val);
149 tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
150 tcg_temp_free_i32(tcg_val);
151 s->btype = -1;
152}
153
154void gen_a64_set_pc_im(uint64_t val)
155{
156 tcg_gen_movi_i64(cpu_pc, val);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
173 TCGv_i64 src, int tbi)
174{
175 if (tbi == 0) {
176
177 tcg_gen_mov_i64(dst, src);
178 } else if (!regime_has_2_ranges(s->mmu_idx)) {
179
180 tcg_gen_extract_i64(dst, src, 0, 56);
181 } else {
182
183 tcg_gen_sextract_i64(dst, src, 0, 56);
184
185 switch (tbi) {
186 case 1:
187
188 tcg_gen_and_i64(dst, dst, src);
189 break;
190 case 2:
191
192 tcg_gen_or_i64(dst, dst, src);
193 break;
194 case 3:
195
196 break;
197 default:
198 g_assert_not_reached();
199 }
200 }
201}
202
203static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
204{
205
206
207
208
209 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
210}
211
212
213
214
215
216
217
218
219
220
221
222
223
224TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
225{
226 TCGv_i64 clean = new_tmp_a64(s);
227#ifdef CONFIG_USER_ONLY
228 gen_top_byte_ignore(s, clean, addr, s->tbid);
229#else
230 tcg_gen_mov_i64(clean, addr);
231#endif
232 return clean;
233}
234
235
236static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
237{
238 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
239}
240
241static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
242 MMUAccessType acc, int log2_size)
243{
244 TCGv_i32 t_acc = tcg_const_i32(acc);
245 TCGv_i32 t_idx = tcg_const_i32(get_mem_index(s));
246 TCGv_i32 t_size = tcg_const_i32(1 << log2_size);
247
248 gen_helper_probe_access(cpu_env, ptr, t_acc, t_idx, t_size);
249 tcg_temp_free_i32(t_acc);
250 tcg_temp_free_i32(t_idx);
251 tcg_temp_free_i32(t_size);
252}
253
254
255
256
257
258
259
260static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
261 bool is_write, bool tag_checked,
262 int log2_size, bool is_unpriv,
263 int core_idx)
264{
265 if (tag_checked && s->mte_active[is_unpriv]) {
266 TCGv_i32 tcg_desc;
267 TCGv_i64 ret;
268 int desc = 0;
269
270 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
271 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
272 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
273 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
274 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
275 tcg_desc = tcg_const_i32(desc);
276
277 ret = new_tmp_a64(s);
278 gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
279 tcg_temp_free_i32(tcg_desc);
280
281 return ret;
282 }
283 return clean_data_tbi(s, addr);
284}
285
286TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
287 bool tag_checked, int log2_size)
288{
289 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
290 false, get_mem_index(s));
291}
292
293
294
295
296TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
297 bool tag_checked, int size)
298{
299 if (tag_checked && s->mte_active[0]) {
300 TCGv_i32 tcg_desc;
301 TCGv_i64 ret;
302 int desc = 0;
303
304 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
305 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
306 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
307 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
308 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
309 tcg_desc = tcg_const_i32(desc);
310
311 ret = new_tmp_a64(s);
312 gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
313 tcg_temp_free_i32(tcg_desc);
314
315 return ret;
316 }
317 return clean_data_tbi(s, addr);
318}
319
320typedef struct DisasCompare64 {
321 TCGCond cond;
322 TCGv_i64 value;
323} DisasCompare64;
324
325static void a64_test_cc(DisasCompare64 *c64, int cc)
326{
327 DisasCompare c32;
328
329 arm_test_cc(&c32, cc);
330
331
332
333 c64->cond = c32.cond;
334 c64->value = tcg_temp_new_i64();
335 tcg_gen_ext_i32_i64(c64->value, c32.value);
336
337 arm_free_cc(&c32);
338}
339
340static void a64_free_cc(DisasCompare64 *c64)
341{
342 tcg_temp_free_i64(c64->value);
343}
344
345static void gen_exception_internal(int excp)
346{
347 TCGv_i32 tcg_excp = tcg_const_i32(excp);
348
349 assert(excp_is_internal(excp));
350 gen_helper_exception_internal(cpu_env, tcg_excp);
351 tcg_temp_free_i32(tcg_excp);
352}
353
354static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
355{
356 gen_a64_set_pc_im(pc);
357 gen_exception_internal(excp);
358 s->base.is_jmp = DISAS_NORETURN;
359}
360
361static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
362{
363 TCGv_i32 tcg_syn;
364
365 gen_a64_set_pc_im(s->pc_curr);
366 tcg_syn = tcg_const_i32(syndrome);
367 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
368 tcg_temp_free_i32(tcg_syn);
369 s->base.is_jmp = DISAS_NORETURN;
370}
371
372static void gen_step_complete_exception(DisasContext *s)
373{
374
375
376
377
378
379
380
381
382
383 gen_ss_advance(s);
384 gen_swstep_exception(s, 1, s->is_ldex);
385 s->base.is_jmp = DISAS_NORETURN;
386}
387
388static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
389{
390 if (s->ss_active) {
391 return false;
392 }
393 return translator_use_goto_tb(&s->base, dest);
394}
395
396static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
397{
398 if (use_goto_tb(s, dest)) {
399 tcg_gen_goto_tb(n);
400 gen_a64_set_pc_im(dest);
401 tcg_gen_exit_tb(s->base.tb, n);
402 s->base.is_jmp = DISAS_NORETURN;
403 } else {
404 gen_a64_set_pc_im(dest);
405 if (s->ss_active) {
406 gen_step_complete_exception(s);
407 } else {
408 tcg_gen_lookup_and_goto_ptr();
409 s->base.is_jmp = DISAS_NORETURN;
410 }
411 }
412}
413
414static void init_tmp_a64_array(DisasContext *s)
415{
416#ifdef CONFIG_DEBUG_TCG
417 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
418#endif
419 s->tmp_a64_count = 0;
420}
421
422static void free_tmp_a64(DisasContext *s)
423{
424 int i;
425 for (i = 0; i < s->tmp_a64_count; i++) {
426 tcg_temp_free_i64(s->tmp_a64[i]);
427 }
428 init_tmp_a64_array(s);
429}
430
431TCGv_i64 new_tmp_a64(DisasContext *s)
432{
433 assert(s->tmp_a64_count < TMP_A64_MAX);
434 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
435}
436
437TCGv_i64 new_tmp_a64_local(DisasContext *s)
438{
439 assert(s->tmp_a64_count < TMP_A64_MAX);
440 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
441}
442
443TCGv_i64 new_tmp_a64_zero(DisasContext *s)
444{
445 TCGv_i64 t = new_tmp_a64(s);
446 tcg_gen_movi_i64(t, 0);
447 return t;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465TCGv_i64 cpu_reg(DisasContext *s, int reg)
466{
467 if (reg == 31) {
468 return new_tmp_a64_zero(s);
469 } else {
470 return cpu_X[reg];
471 }
472}
473
474
475TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
476{
477 return cpu_X[reg];
478}
479
480
481
482
483
484TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
485{
486 TCGv_i64 v = new_tmp_a64(s);
487 if (reg != 31) {
488 if (sf) {
489 tcg_gen_mov_i64(v, cpu_X[reg]);
490 } else {
491 tcg_gen_ext32u_i64(v, cpu_X[reg]);
492 }
493 } else {
494 tcg_gen_movi_i64(v, 0);
495 }
496 return v;
497}
498
499TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
500{
501 TCGv_i64 v = new_tmp_a64(s);
502 if (sf) {
503 tcg_gen_mov_i64(v, cpu_X[reg]);
504 } else {
505 tcg_gen_ext32u_i64(v, cpu_X[reg]);
506 }
507 return v;
508}
509
510
511
512
513
514
515static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
516{
517 return vec_reg_offset(s, regno, 0, size);
518}
519
520
521static inline int fp_reg_hi_offset(DisasContext *s, int regno)
522{
523 return vec_reg_offset(s, regno, 1, MO_64);
524}
525
526
527
528
529
530
531
532static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
533{
534 TCGv_i64 v = tcg_temp_new_i64();
535
536 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
537 return v;
538}
539
540static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
541{
542 TCGv_i32 v = tcg_temp_new_i32();
543
544 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
545 return v;
546}
547
548static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
549{
550 TCGv_i32 v = tcg_temp_new_i32();
551
552 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
553 return v;
554}
555
556
557
558
559static void clear_vec_high(DisasContext *s, bool is_q, int rd)
560{
561 unsigned ofs = fp_reg_offset(s, rd, MO_64);
562 unsigned vsz = vec_full_reg_size(s);
563
564
565 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
566}
567
568void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
569{
570 unsigned ofs = fp_reg_offset(s, reg, MO_64);
571
572 tcg_gen_st_i64(v, cpu_env, ofs);
573 clear_vec_high(s, false, reg);
574}
575
576static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
577{
578 TCGv_i64 tmp = tcg_temp_new_i64();
579
580 tcg_gen_extu_i32_i64(tmp, v);
581 write_fp_dreg(s, reg, tmp);
582 tcg_temp_free_i64(tmp);
583}
584
585
586static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
587 GVecGen2Fn *gvec_fn, int vece)
588{
589 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
590 is_q ? 16 : 8, vec_full_reg_size(s));
591}
592
593
594
595
596static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
597 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
598{
599 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
600 imm, is_q ? 16 : 8, vec_full_reg_size(s));
601}
602
603
604static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
605 GVecGen3Fn *gvec_fn, int vece)
606{
607 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
608 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
609}
610
611
612static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
613 int rx, GVecGen4Fn *gvec_fn, int vece)
614{
615 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
616 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
617 is_q ? 16 : 8, vec_full_reg_size(s));
618}
619
620
621static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
622 int rn, int data, gen_helper_gvec_2 *fn)
623{
624 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
625 vec_full_reg_offset(s, rn),
626 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
627}
628
629
630static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
631 int rn, int rm, int data, gen_helper_gvec_3 *fn)
632{
633 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
634 vec_full_reg_offset(s, rn),
635 vec_full_reg_offset(s, rm),
636 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
637}
638
639
640
641
642static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
643 int rm, bool is_fp16, int data,
644 gen_helper_gvec_3_ptr *fn)
645{
646 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
647 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
648 vec_full_reg_offset(s, rn),
649 vec_full_reg_offset(s, rm), fpst,
650 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
651 tcg_temp_free_ptr(fpst);
652}
653
654
655static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
656 int rm, gen_helper_gvec_3_ptr *fn)
657{
658 TCGv_ptr qc_ptr = tcg_temp_new_ptr();
659
660 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
661 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
662 vec_full_reg_offset(s, rn),
663 vec_full_reg_offset(s, rm), qc_ptr,
664 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
665 tcg_temp_free_ptr(qc_ptr);
666}
667
668
669static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
670 int rm, int ra, int data, gen_helper_gvec_4 *fn)
671{
672 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
673 vec_full_reg_offset(s, rn),
674 vec_full_reg_offset(s, rm),
675 vec_full_reg_offset(s, ra),
676 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
677}
678
679
680
681
682
683static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
684 int rm, int ra, bool is_fp16, int data,
685 gen_helper_gvec_4_ptr *fn)
686{
687 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
688 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
689 vec_full_reg_offset(s, rn),
690 vec_full_reg_offset(s, rm),
691 vec_full_reg_offset(s, ra), fpst,
692 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
693 tcg_temp_free_ptr(fpst);
694}
695
696
697
698
699static inline void gen_set_NZ64(TCGv_i64 result)
700{
701 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
702 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
703}
704
705
706static inline void gen_logic_CC(int sf, TCGv_i64 result)
707{
708 if (sf) {
709 gen_set_NZ64(result);
710 } else {
711 tcg_gen_extrl_i64_i32(cpu_ZF, result);
712 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
713 }
714 tcg_gen_movi_i32(cpu_CF, 0);
715 tcg_gen_movi_i32(cpu_VF, 0);
716}
717
718
719static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
720{
721 if (sf) {
722 TCGv_i64 result, flag, tmp;
723 result = tcg_temp_new_i64();
724 flag = tcg_temp_new_i64();
725 tmp = tcg_temp_new_i64();
726
727 tcg_gen_movi_i64(tmp, 0);
728 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
729
730 tcg_gen_extrl_i64_i32(cpu_CF, flag);
731
732 gen_set_NZ64(result);
733
734 tcg_gen_xor_i64(flag, result, t0);
735 tcg_gen_xor_i64(tmp, t0, t1);
736 tcg_gen_andc_i64(flag, flag, tmp);
737 tcg_temp_free_i64(tmp);
738 tcg_gen_extrh_i64_i32(cpu_VF, flag);
739
740 tcg_gen_mov_i64(dest, result);
741 tcg_temp_free_i64(result);
742 tcg_temp_free_i64(flag);
743 } else {
744
745 TCGv_i32 t0_32 = tcg_temp_new_i32();
746 TCGv_i32 t1_32 = tcg_temp_new_i32();
747 TCGv_i32 tmp = tcg_temp_new_i32();
748
749 tcg_gen_movi_i32(tmp, 0);
750 tcg_gen_extrl_i64_i32(t0_32, t0);
751 tcg_gen_extrl_i64_i32(t1_32, t1);
752 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
753 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
754 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
755 tcg_gen_xor_i32(tmp, t0_32, t1_32);
756 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
757 tcg_gen_extu_i32_i64(dest, cpu_NF);
758
759 tcg_temp_free_i32(tmp);
760 tcg_temp_free_i32(t0_32);
761 tcg_temp_free_i32(t1_32);
762 }
763}
764
765
766static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
767{
768 if (sf) {
769
770 TCGv_i64 result, flag, tmp;
771
772 result = tcg_temp_new_i64();
773 flag = tcg_temp_new_i64();
774 tcg_gen_sub_i64(result, t0, t1);
775
776 gen_set_NZ64(result);
777
778 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
779 tcg_gen_extrl_i64_i32(cpu_CF, flag);
780
781 tcg_gen_xor_i64(flag, result, t0);
782 tmp = tcg_temp_new_i64();
783 tcg_gen_xor_i64(tmp, t0, t1);
784 tcg_gen_and_i64(flag, flag, tmp);
785 tcg_temp_free_i64(tmp);
786 tcg_gen_extrh_i64_i32(cpu_VF, flag);
787 tcg_gen_mov_i64(dest, result);
788 tcg_temp_free_i64(flag);
789 tcg_temp_free_i64(result);
790 } else {
791
792 TCGv_i32 t0_32 = tcg_temp_new_i32();
793 TCGv_i32 t1_32 = tcg_temp_new_i32();
794 TCGv_i32 tmp;
795
796 tcg_gen_extrl_i64_i32(t0_32, t0);
797 tcg_gen_extrl_i64_i32(t1_32, t1);
798 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
799 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
800 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
801 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
802 tmp = tcg_temp_new_i32();
803 tcg_gen_xor_i32(tmp, t0_32, t1_32);
804 tcg_temp_free_i32(t0_32);
805 tcg_temp_free_i32(t1_32);
806 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
807 tcg_temp_free_i32(tmp);
808 tcg_gen_extu_i32_i64(dest, cpu_NF);
809 }
810}
811
812
813static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
814{
815 TCGv_i64 flag = tcg_temp_new_i64();
816 tcg_gen_extu_i32_i64(flag, cpu_CF);
817 tcg_gen_add_i64(dest, t0, t1);
818 tcg_gen_add_i64(dest, dest, flag);
819 tcg_temp_free_i64(flag);
820
821 if (!sf) {
822 tcg_gen_ext32u_i64(dest, dest);
823 }
824}
825
826
827static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
828{
829 if (sf) {
830 TCGv_i64 result, cf_64, vf_64, tmp;
831 result = tcg_temp_new_i64();
832 cf_64 = tcg_temp_new_i64();
833 vf_64 = tcg_temp_new_i64();
834 tmp = tcg_const_i64(0);
835
836 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
837 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
838 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
839 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
840 gen_set_NZ64(result);
841
842 tcg_gen_xor_i64(vf_64, result, t0);
843 tcg_gen_xor_i64(tmp, t0, t1);
844 tcg_gen_andc_i64(vf_64, vf_64, tmp);
845 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
846
847 tcg_gen_mov_i64(dest, result);
848
849 tcg_temp_free_i64(tmp);
850 tcg_temp_free_i64(vf_64);
851 tcg_temp_free_i64(cf_64);
852 tcg_temp_free_i64(result);
853 } else {
854 TCGv_i32 t0_32, t1_32, tmp;
855 t0_32 = tcg_temp_new_i32();
856 t1_32 = tcg_temp_new_i32();
857 tmp = tcg_const_i32(0);
858
859 tcg_gen_extrl_i64_i32(t0_32, t0);
860 tcg_gen_extrl_i64_i32(t1_32, t1);
861 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
862 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
863
864 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
865 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
866 tcg_gen_xor_i32(tmp, t0_32, t1_32);
867 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
868 tcg_gen_extu_i32_i64(dest, cpu_NF);
869
870 tcg_temp_free_i32(tmp);
871 tcg_temp_free_i32(t1_32);
872 tcg_temp_free_i32(t0_32);
873 }
874}
875
876
877
878
879
880
881
882
883static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
884 TCGv_i64 tcg_addr, MemOp memop, int memidx,
885 bool iss_valid,
886 unsigned int iss_srt,
887 bool iss_sf, bool iss_ar)
888{
889 memop = finalize_memop(s, memop);
890 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
891
892 if (iss_valid) {
893 uint32_t syn;
894
895 syn = syn_data_abort_with_iss(0,
896 (memop & MO_SIZE),
897 false,
898 iss_srt,
899 iss_sf,
900 iss_ar,
901 0, 0, 0, 0, 0, false);
902 disas_set_insn_syndrome(s, syn);
903 }
904}
905
906static void do_gpr_st(DisasContext *s, TCGv_i64 source,
907 TCGv_i64 tcg_addr, MemOp memop,
908 bool iss_valid,
909 unsigned int iss_srt,
910 bool iss_sf, bool iss_ar)
911{
912 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
913 iss_valid, iss_srt, iss_sf, iss_ar);
914}
915
916
917
918
919static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
920 MemOp memop, bool extend, int memidx,
921 bool iss_valid, unsigned int iss_srt,
922 bool iss_sf, bool iss_ar)
923{
924 memop = finalize_memop(s, memop);
925 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
926
927 if (extend && (memop & MO_SIGN)) {
928 g_assert((memop & MO_SIZE) <= MO_32);
929 tcg_gen_ext32u_i64(dest, dest);
930 }
931
932 if (iss_valid) {
933 uint32_t syn;
934
935 syn = syn_data_abort_with_iss(0,
936 (memop & MO_SIZE),
937 (memop & MO_SIGN) != 0,
938 iss_srt,
939 iss_sf,
940 iss_ar,
941 0, 0, 0, 0, 0, false);
942 disas_set_insn_syndrome(s, syn);
943 }
944}
945
946static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
947 MemOp memop, bool extend,
948 bool iss_valid, unsigned int iss_srt,
949 bool iss_sf, bool iss_ar)
950{
951 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
952 iss_valid, iss_srt, iss_sf, iss_ar);
953}
954
955
956
957
958static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
959{
960
961 TCGv_i64 tmplo = tcg_temp_new_i64();
962 MemOp mop;
963
964 tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
965
966 if (size < 4) {
967 mop = finalize_memop(s, size);
968 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
969 } else {
970 bool be = s->be_data == MO_BE;
971 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
972 TCGv_i64 tmphi = tcg_temp_new_i64();
973
974 tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
975
976 mop = s->be_data | MO_UQ;
977 tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
978 mop | (s->align_mem ? MO_ALIGN_16 : 0));
979 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
980 tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
981 get_mem_index(s), mop);
982
983 tcg_temp_free_i64(tcg_hiaddr);
984 tcg_temp_free_i64(tmphi);
985 }
986
987 tcg_temp_free_i64(tmplo);
988}
989
990
991
992
993static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
994{
995
996 TCGv_i64 tmplo = tcg_temp_new_i64();
997 TCGv_i64 tmphi = NULL;
998 MemOp mop;
999
1000 if (size < 4) {
1001 mop = finalize_memop(s, size);
1002 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
1003 } else {
1004 bool be = s->be_data == MO_BE;
1005 TCGv_i64 tcg_hiaddr;
1006
1007 tmphi = tcg_temp_new_i64();
1008 tcg_hiaddr = tcg_temp_new_i64();
1009
1010 mop = s->be_data | MO_UQ;
1011 tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
1012 mop | (s->align_mem ? MO_ALIGN_16 : 0));
1013 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1014 tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
1015 get_mem_index(s), mop);
1016 tcg_temp_free_i64(tcg_hiaddr);
1017 }
1018
1019 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1020 tcg_temp_free_i64(tmplo);
1021
1022 if (tmphi) {
1023 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1024 tcg_temp_free_i64(tmphi);
1025 }
1026 clear_vec_high(s, tmphi != NULL, destidx);
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1043 int element, MemOp memop)
1044{
1045 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1046 switch ((unsigned)memop) {
1047 case MO_8:
1048 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1049 break;
1050 case MO_16:
1051 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1052 break;
1053 case MO_32:
1054 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1055 break;
1056 case MO_8|MO_SIGN:
1057 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1058 break;
1059 case MO_16|MO_SIGN:
1060 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1061 break;
1062 case MO_32|MO_SIGN:
1063 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1064 break;
1065 case MO_64:
1066 case MO_64|MO_SIGN:
1067 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1068 break;
1069 default:
1070 g_assert_not_reached();
1071 }
1072}
1073
1074static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1075 int element, MemOp memop)
1076{
1077 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1078 switch (memop) {
1079 case MO_8:
1080 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1081 break;
1082 case MO_16:
1083 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1084 break;
1085 case MO_8|MO_SIGN:
1086 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1087 break;
1088 case MO_16|MO_SIGN:
1089 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1090 break;
1091 case MO_32:
1092 case MO_32|MO_SIGN:
1093 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1094 break;
1095 default:
1096 g_assert_not_reached();
1097 }
1098}
1099
1100
1101static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1102 int element, MemOp memop)
1103{
1104 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1105 switch (memop) {
1106 case MO_8:
1107 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1108 break;
1109 case MO_16:
1110 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1111 break;
1112 case MO_32:
1113 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1114 break;
1115 case MO_64:
1116 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1117 break;
1118 default:
1119 g_assert_not_reached();
1120 }
1121}
1122
1123static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1124 int destidx, int element, MemOp memop)
1125{
1126 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1127 switch (memop) {
1128 case MO_8:
1129 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1130 break;
1131 case MO_16:
1132 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1133 break;
1134 case MO_32:
1135 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1136 break;
1137 default:
1138 g_assert_not_reached();
1139 }
1140}
1141
1142
1143static void do_vec_st(DisasContext *s, int srcidx, int element,
1144 TCGv_i64 tcg_addr, MemOp mop)
1145{
1146 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1147
1148 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1149 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1150
1151 tcg_temp_free_i64(tcg_tmp);
1152}
1153
1154
1155static void do_vec_ld(DisasContext *s, int destidx, int element,
1156 TCGv_i64 tcg_addr, MemOp mop)
1157{
1158 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1159
1160 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1161 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1162
1163 tcg_temp_free_i64(tcg_tmp);
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173static bool fp_access_check(DisasContext *s)
1174{
1175 if (s->fp_excp_el) {
1176 assert(!s->fp_access_checked);
1177 s->fp_access_checked = true;
1178
1179 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1180 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
1181 return false;
1182 }
1183 s->fp_access_checked = true;
1184 return true;
1185}
1186
1187
1188
1189
1190bool sve_access_check(DisasContext *s)
1191{
1192 if (s->sve_excp_el) {
1193 assert(!s->sve_access_checked);
1194 s->sve_access_checked = true;
1195
1196 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1197 syn_sve_access_trap(), s->sve_excp_el);
1198 return false;
1199 }
1200 s->sve_access_checked = true;
1201 return fp_access_check(s);
1202}
1203
1204
1205
1206
1207
1208
1209static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1210 int option, unsigned int shift)
1211{
1212 int extsize = extract32(option, 0, 2);
1213 bool is_signed = extract32(option, 2, 1);
1214
1215 if (is_signed) {
1216 switch (extsize) {
1217 case 0:
1218 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1219 break;
1220 case 1:
1221 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1222 break;
1223 case 2:
1224 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1225 break;
1226 case 3:
1227 tcg_gen_mov_i64(tcg_out, tcg_in);
1228 break;
1229 }
1230 } else {
1231 switch (extsize) {
1232 case 0:
1233 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1234 break;
1235 case 1:
1236 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1237 break;
1238 case 2:
1239 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1240 break;
1241 case 3:
1242 tcg_gen_mov_i64(tcg_out, tcg_in);
1243 break;
1244 }
1245 }
1246
1247 if (shift) {
1248 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1249 }
1250}
1251
1252static inline void gen_check_sp_alignment(DisasContext *s)
1253{
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1277 uint32_t insn)
1278{
1279 const AArch64DecodeTable *tptr = table;
1280
1281 while (tptr->mask) {
1282 if ((insn & tptr->mask) == tptr->pattern) {
1283 return tptr->disas_fn;
1284 }
1285 tptr++;
1286 }
1287 return NULL;
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1305{
1306 uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1307
1308 if (insn & (1U << 31)) {
1309
1310 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1311 }
1312
1313
1314 reset_btype(s);
1315 gen_goto_tb(s, 0, addr);
1316}
1317
1318
1319
1320
1321
1322
1323
1324static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1325{
1326 unsigned int sf, op, rt;
1327 uint64_t addr;
1328 TCGLabel *label_match;
1329 TCGv_i64 tcg_cmp;
1330
1331 sf = extract32(insn, 31, 1);
1332 op = extract32(insn, 24, 1);
1333 rt = extract32(insn, 0, 5);
1334 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1335
1336 tcg_cmp = read_cpu_reg(s, rt, sf);
1337 label_match = gen_new_label();
1338
1339 reset_btype(s);
1340 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1341 tcg_cmp, 0, label_match);
1342
1343 gen_goto_tb(s, 0, s->base.pc_next);
1344 gen_set_label(label_match);
1345 gen_goto_tb(s, 1, addr);
1346}
1347
1348
1349
1350
1351
1352
1353
1354static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1355{
1356 unsigned int bit_pos, op, rt;
1357 uint64_t addr;
1358 TCGLabel *label_match;
1359 TCGv_i64 tcg_cmp;
1360
1361 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1362 op = extract32(insn, 24, 1);
1363 addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1364 rt = extract32(insn, 0, 5);
1365
1366 tcg_cmp = tcg_temp_new_i64();
1367 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1368 label_match = gen_new_label();
1369
1370 reset_btype(s);
1371 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1372 tcg_cmp, 0, label_match);
1373 tcg_temp_free_i64(tcg_cmp);
1374 gen_goto_tb(s, 0, s->base.pc_next);
1375 gen_set_label(label_match);
1376 gen_goto_tb(s, 1, addr);
1377}
1378
1379
1380
1381
1382
1383
1384
1385static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1386{
1387 unsigned int cond;
1388 uint64_t addr;
1389
1390 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1391 unallocated_encoding(s);
1392 return;
1393 }
1394 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1395 cond = extract32(insn, 0, 4);
1396
1397 reset_btype(s);
1398 if (cond < 0x0e) {
1399
1400 TCGLabel *label_match = gen_new_label();
1401 arm_gen_test_cc(cond, label_match);
1402 gen_goto_tb(s, 0, s->base.pc_next);
1403 gen_set_label(label_match);
1404 gen_goto_tb(s, 1, addr);
1405 } else {
1406
1407 gen_goto_tb(s, 0, addr);
1408 }
1409}
1410
1411
1412static void handle_hint(DisasContext *s, uint32_t insn,
1413 unsigned int op1, unsigned int op2, unsigned int crm)
1414{
1415 unsigned int selector = crm << 3 | op2;
1416
1417 if (op1 != 3) {
1418 unallocated_encoding(s);
1419 return;
1420 }
1421
1422 switch (selector) {
1423 case 0b00000:
1424 break;
1425 case 0b00011:
1426 s->base.is_jmp = DISAS_WFI;
1427 break;
1428 case 0b00001:
1429
1430
1431
1432
1433
1434 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1435 s->base.is_jmp = DISAS_YIELD;
1436 }
1437 break;
1438 case 0b00010:
1439 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1440 s->base.is_jmp = DISAS_WFE;
1441 }
1442 break;
1443 case 0b00100:
1444 case 0b00101:
1445
1446 break;
1447 case 0b00111:
1448 if (s->pauth_active) {
1449 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1450 }
1451 break;
1452 case 0b01000:
1453 if (s->pauth_active) {
1454 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1455 }
1456 break;
1457 case 0b01010:
1458 if (s->pauth_active) {
1459 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1460 }
1461 break;
1462 case 0b01100:
1463 if (s->pauth_active) {
1464 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1465 }
1466 break;
1467 case 0b01110:
1468 if (s->pauth_active) {
1469 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1470 }
1471 break;
1472 case 0b11000:
1473 if (s->pauth_active) {
1474 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1475 new_tmp_a64_zero(s));
1476 }
1477 break;
1478 case 0b11001:
1479 if (s->pauth_active) {
1480 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1481 }
1482 break;
1483 case 0b11010:
1484 if (s->pauth_active) {
1485 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1486 new_tmp_a64_zero(s));
1487 }
1488 break;
1489 case 0b11011:
1490 if (s->pauth_active) {
1491 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1492 }
1493 break;
1494 case 0b11100:
1495 if (s->pauth_active) {
1496 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1497 new_tmp_a64_zero(s));
1498 }
1499 break;
1500 case 0b11101:
1501 if (s->pauth_active) {
1502 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1503 }
1504 break;
1505 case 0b11110:
1506 if (s->pauth_active) {
1507 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1508 new_tmp_a64_zero(s));
1509 }
1510 break;
1511 case 0b11111:
1512 if (s->pauth_active) {
1513 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1514 }
1515 break;
1516 default:
1517
1518 break;
1519 }
1520}
1521
1522static void gen_clrex(DisasContext *s, uint32_t insn)
1523{
1524 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1525}
1526
1527
1528static void handle_sync(DisasContext *s, uint32_t insn,
1529 unsigned int op1, unsigned int op2, unsigned int crm)
1530{
1531 TCGBar bar;
1532
1533 if (op1 != 3) {
1534 unallocated_encoding(s);
1535 return;
1536 }
1537
1538 switch (op2) {
1539 case 2:
1540 gen_clrex(s, insn);
1541 return;
1542 case 4:
1543 case 5:
1544 switch (crm & 3) {
1545 case 1:
1546 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1547 break;
1548 case 2:
1549 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1550 break;
1551 default:
1552 bar = TCG_BAR_SC | TCG_MO_ALL;
1553 break;
1554 }
1555 tcg_gen_mb(bar);
1556 return;
1557 case 6:
1558
1559
1560
1561
1562 reset_btype(s);
1563 gen_goto_tb(s, 0, s->base.pc_next);
1564 return;
1565
1566 case 7:
1567 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1568 goto do_unallocated;
1569 }
1570
1571
1572
1573
1574 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1575 gen_goto_tb(s, 0, s->base.pc_next);
1576 return;
1577
1578 default:
1579 do_unallocated:
1580 unallocated_encoding(s);
1581 return;
1582 }
1583}
1584
1585static void gen_xaflag(void)
1586{
1587 TCGv_i32 z = tcg_temp_new_i32();
1588
1589 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1590
1591
1592
1593
1594
1595
1596
1597
1598 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1599 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1600
1601
1602 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1603 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1604
1605
1606 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1607 tcg_gen_neg_i32(cpu_VF, cpu_VF);
1608
1609
1610 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1611
1612 tcg_temp_free_i32(z);
1613}
1614
1615static void gen_axflag(void)
1616{
1617 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);
1618 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);
1619
1620
1621 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1622
1623 tcg_gen_movi_i32(cpu_NF, 0);
1624 tcg_gen_movi_i32(cpu_VF, 0);
1625}
1626
1627
1628static void handle_msr_i(DisasContext *s, uint32_t insn,
1629 unsigned int op1, unsigned int op2, unsigned int crm)
1630{
1631 TCGv_i32 t1;
1632 int op = op1 << 3 | op2;
1633
1634
1635 s->base.is_jmp = DISAS_TOO_MANY;
1636
1637 switch (op) {
1638 case 0x00:
1639 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1640 goto do_unallocated;
1641 }
1642 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1643 s->base.is_jmp = DISAS_NEXT;
1644 break;
1645
1646 case 0x01:
1647 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1648 goto do_unallocated;
1649 }
1650 gen_xaflag();
1651 s->base.is_jmp = DISAS_NEXT;
1652 break;
1653
1654 case 0x02:
1655 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1656 goto do_unallocated;
1657 }
1658 gen_axflag();
1659 s->base.is_jmp = DISAS_NEXT;
1660 break;
1661
1662 case 0x03:
1663 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1664 goto do_unallocated;
1665 }
1666 if (crm & 1) {
1667 set_pstate_bits(PSTATE_UAO);
1668 } else {
1669 clear_pstate_bits(PSTATE_UAO);
1670 }
1671 t1 = tcg_const_i32(s->current_el);
1672 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1673 tcg_temp_free_i32(t1);
1674 break;
1675
1676 case 0x04:
1677 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1678 goto do_unallocated;
1679 }
1680 if (crm & 1) {
1681 set_pstate_bits(PSTATE_PAN);
1682 } else {
1683 clear_pstate_bits(PSTATE_PAN);
1684 }
1685 t1 = tcg_const_i32(s->current_el);
1686 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1687 tcg_temp_free_i32(t1);
1688 break;
1689
1690 case 0x05:
1691 if (s->current_el == 0) {
1692 goto do_unallocated;
1693 }
1694 t1 = tcg_const_i32(crm & PSTATE_SP);
1695 gen_helper_msr_i_spsel(cpu_env, t1);
1696 tcg_temp_free_i32(t1);
1697 break;
1698
1699 case 0x19:
1700 if (!dc_isar_feature(aa64_ssbs, s)) {
1701 goto do_unallocated;
1702 }
1703 if (crm & 1) {
1704 set_pstate_bits(PSTATE_SSBS);
1705 } else {
1706 clear_pstate_bits(PSTATE_SSBS);
1707 }
1708
1709 break;
1710
1711 case 0x1a:
1712 if (!dc_isar_feature(aa64_dit, s)) {
1713 goto do_unallocated;
1714 }
1715 if (crm & 1) {
1716 set_pstate_bits(PSTATE_DIT);
1717 } else {
1718 clear_pstate_bits(PSTATE_DIT);
1719 }
1720
1721 break;
1722
1723 case 0x1e:
1724 t1 = tcg_const_i32(crm);
1725 gen_helper_msr_i_daifset(cpu_env, t1);
1726 tcg_temp_free_i32(t1);
1727 break;
1728
1729 case 0x1f:
1730 t1 = tcg_const_i32(crm);
1731 gen_helper_msr_i_daifclear(cpu_env, t1);
1732 tcg_temp_free_i32(t1);
1733
1734 s->base.is_jmp = DISAS_UPDATE_EXIT;
1735 break;
1736
1737 case 0x1c:
1738 if (dc_isar_feature(aa64_mte, s)) {
1739
1740 if (crm & 1) {
1741 set_pstate_bits(PSTATE_TCO);
1742 } else {
1743 clear_pstate_bits(PSTATE_TCO);
1744 }
1745 t1 = tcg_const_i32(s->current_el);
1746 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1747 tcg_temp_free_i32(t1);
1748
1749 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1750 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1751
1752 s->base.is_jmp = DISAS_NEXT;
1753 } else {
1754 goto do_unallocated;
1755 }
1756 break;
1757
1758 default:
1759 do_unallocated:
1760 unallocated_encoding(s);
1761 return;
1762 }
1763}
1764
1765static void gen_get_nzcv(TCGv_i64 tcg_rt)
1766{
1767 TCGv_i32 tmp = tcg_temp_new_i32();
1768 TCGv_i32 nzcv = tcg_temp_new_i32();
1769
1770
1771 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1772
1773 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1774 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1775
1776 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1777
1778 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1779 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1780
1781 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1782
1783 tcg_temp_free_i32(nzcv);
1784 tcg_temp_free_i32(tmp);
1785}
1786
1787static void gen_set_nzcv(TCGv_i64 tcg_rt)
1788{
1789 TCGv_i32 nzcv = tcg_temp_new_i32();
1790
1791
1792 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1793
1794
1795 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1796
1797 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1798 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1799
1800 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1801 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1802
1803 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1804 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1805 tcg_temp_free_i32(nzcv);
1806}
1807
1808
1809
1810
1811
1812
1813
1814
1815static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1816 unsigned int op0, unsigned int op1, unsigned int op2,
1817 unsigned int crn, unsigned int crm, unsigned int rt)
1818{
1819 const ARMCPRegInfo *ri;
1820 TCGv_i64 tcg_rt;
1821
1822 ri = get_arm_cp_reginfo(s->cp_regs,
1823 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1824 crn, crm, op0, op1, op2));
1825
1826 if (!ri) {
1827
1828
1829
1830 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1831 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1832 isread ? "read" : "write", op0, op1, crn, crm, op2);
1833 unallocated_encoding(s);
1834 return;
1835 }
1836
1837
1838 if (!cp_access_ok(s->current_el, ri, isread)) {
1839 unallocated_encoding(s);
1840 return;
1841 }
1842
1843 if (ri->accessfn) {
1844
1845
1846
1847 TCGv_ptr tmpptr;
1848 TCGv_i32 tcg_syn, tcg_isread;
1849 uint32_t syndrome;
1850
1851 gen_a64_set_pc_im(s->pc_curr);
1852 tmpptr = tcg_const_ptr(ri);
1853 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1854 tcg_syn = tcg_const_i32(syndrome);
1855 tcg_isread = tcg_const_i32(isread);
1856 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1857 tcg_temp_free_ptr(tmpptr);
1858 tcg_temp_free_i32(tcg_syn);
1859 tcg_temp_free_i32(tcg_isread);
1860 } else if (ri->type & ARM_CP_RAISES_EXC) {
1861
1862
1863
1864
1865 gen_a64_set_pc_im(s->pc_curr);
1866 }
1867
1868
1869 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1870 case ARM_CP_NOP:
1871 return;
1872 case ARM_CP_NZCV:
1873 tcg_rt = cpu_reg(s, rt);
1874 if (isread) {
1875 gen_get_nzcv(tcg_rt);
1876 } else {
1877 gen_set_nzcv(tcg_rt);
1878 }
1879 return;
1880 case ARM_CP_CURRENTEL:
1881
1882
1883
1884 tcg_rt = cpu_reg(s, rt);
1885 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1886 return;
1887 case ARM_CP_DC_ZVA:
1888
1889 if (s->mte_active[0]) {
1890 TCGv_i32 t_desc;
1891 int desc = 0;
1892
1893 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
1894 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
1895 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
1896 t_desc = tcg_const_i32(desc);
1897
1898 tcg_rt = new_tmp_a64(s);
1899 gen_helper_mte_check_zva(tcg_rt, cpu_env, t_desc, cpu_reg(s, rt));
1900 tcg_temp_free_i32(t_desc);
1901 } else {
1902 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
1903 }
1904 gen_helper_dc_zva(cpu_env, tcg_rt);
1905 return;
1906 case ARM_CP_DC_GVA:
1907 {
1908 TCGv_i64 clean_addr, tag;
1909
1910
1911
1912
1913
1914 tcg_rt = cpu_reg(s, rt);
1915 clean_addr = clean_data_tbi(s, tcg_rt);
1916 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
1917
1918 if (s->ata) {
1919
1920 tag = tcg_temp_new_i64();
1921 tcg_gen_shri_i64(tag, tcg_rt, 56);
1922 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
1923 tcg_temp_free_i64(tag);
1924 }
1925 }
1926 return;
1927 case ARM_CP_DC_GZVA:
1928 {
1929 TCGv_i64 clean_addr, tag;
1930
1931
1932 tcg_rt = cpu_reg(s, rt);
1933 clean_addr = clean_data_tbi(s, tcg_rt);
1934 gen_helper_dc_zva(cpu_env, clean_addr);
1935
1936 if (s->ata) {
1937
1938 tag = tcg_temp_new_i64();
1939 tcg_gen_shri_i64(tag, tcg_rt, 56);
1940 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
1941 tcg_temp_free_i64(tag);
1942 }
1943 }
1944 return;
1945 default:
1946 break;
1947 }
1948 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1949 return;
1950 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1951 return;
1952 }
1953
1954 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1955 gen_io_start();
1956 }
1957
1958 tcg_rt = cpu_reg(s, rt);
1959
1960 if (isread) {
1961 if (ri->type & ARM_CP_CONST) {
1962 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1963 } else if (ri->readfn) {
1964 TCGv_ptr tmpptr;
1965 tmpptr = tcg_const_ptr(ri);
1966 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1967 tcg_temp_free_ptr(tmpptr);
1968 } else {
1969 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1970 }
1971 } else {
1972 if (ri->type & ARM_CP_CONST) {
1973
1974 return;
1975 } else if (ri->writefn) {
1976 TCGv_ptr tmpptr;
1977 tmpptr = tcg_const_ptr(ri);
1978 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1979 tcg_temp_free_ptr(tmpptr);
1980 } else {
1981 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1982 }
1983 }
1984
1985 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1986
1987 s->base.is_jmp = DISAS_UPDATE_EXIT;
1988 }
1989 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1990
1991
1992
1993
1994 TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
1995 gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
1996 tcg_temp_free_i32(tcg_el);
1997
1998
1999
2000
2001
2002 s->base.is_jmp = DISAS_UPDATE_EXIT;
2003 }
2004}
2005
2006
2007
2008
2009
2010
2011
2012static void disas_system(DisasContext *s, uint32_t insn)
2013{
2014 unsigned int l, op0, op1, crn, crm, op2, rt;
2015 l = extract32(insn, 21, 1);
2016 op0 = extract32(insn, 19, 2);
2017 op1 = extract32(insn, 16, 3);
2018 crn = extract32(insn, 12, 4);
2019 crm = extract32(insn, 8, 4);
2020 op2 = extract32(insn, 5, 3);
2021 rt = extract32(insn, 0, 5);
2022
2023 if (op0 == 0) {
2024 if (l || rt != 31) {
2025 unallocated_encoding(s);
2026 return;
2027 }
2028 switch (crn) {
2029 case 2:
2030 handle_hint(s, insn, op1, op2, crm);
2031 break;
2032 case 3:
2033 handle_sync(s, insn, op1, op2, crm);
2034 break;
2035 case 4:
2036 handle_msr_i(s, insn, op1, op2, crm);
2037 break;
2038 default:
2039 unallocated_encoding(s);
2040 break;
2041 }
2042 return;
2043 }
2044 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2045}
2046
2047
2048
2049
2050
2051
2052
2053
2054static void disas_exc(DisasContext *s, uint32_t insn)
2055{
2056 int opc = extract32(insn, 21, 3);
2057 int op2_ll = extract32(insn, 0, 5);
2058 int imm16 = extract32(insn, 5, 16);
2059 TCGv_i32 tmp;
2060
2061 switch (opc) {
2062 case 0:
2063
2064
2065
2066
2067
2068 switch (op2_ll) {
2069 case 1:
2070 gen_ss_advance(s);
2071 gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
2072 syn_aa64_svc(imm16), default_exception_el(s));
2073 break;
2074 case 2:
2075 if (s->current_el == 0) {
2076 unallocated_encoding(s);
2077 break;
2078 }
2079
2080
2081
2082 gen_a64_set_pc_im(s->pc_curr);
2083 gen_helper_pre_hvc(cpu_env);
2084 gen_ss_advance(s);
2085 gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
2086 syn_aa64_hvc(imm16), 2);
2087 break;
2088 case 3:
2089 if (s->current_el == 0) {
2090 unallocated_encoding(s);
2091 break;
2092 }
2093 gen_a64_set_pc_im(s->pc_curr);
2094 tmp = tcg_const_i32(syn_aa64_smc(imm16));
2095 gen_helper_pre_smc(cpu_env, tmp);
2096 tcg_temp_free_i32(tmp);
2097 gen_ss_advance(s);
2098 gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
2099 syn_aa64_smc(imm16), 3);
2100 break;
2101 default:
2102 unallocated_encoding(s);
2103 break;
2104 }
2105 break;
2106 case 1:
2107 if (op2_ll != 0) {
2108 unallocated_encoding(s);
2109 break;
2110 }
2111
2112 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2113 break;
2114 case 2:
2115 if (op2_ll != 0) {
2116 unallocated_encoding(s);
2117 break;
2118 }
2119
2120
2121
2122
2123
2124
2125 if (semihosting_enabled() && imm16 == 0xf000) {
2126#ifndef CONFIG_USER_ONLY
2127
2128
2129
2130
2131 if (s->current_el == 0) {
2132 unsupported_encoding(s, insn);
2133 break;
2134 }
2135#endif
2136 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
2137 } else {
2138 unsupported_encoding(s, insn);
2139 }
2140 break;
2141 case 5:
2142 if (op2_ll < 1 || op2_ll > 3) {
2143 unallocated_encoding(s);
2144 break;
2145 }
2146
2147 unsupported_encoding(s, insn);
2148 break;
2149 default:
2150 unallocated_encoding(s);
2151 break;
2152 }
2153}
2154
2155
2156
2157
2158
2159
2160
2161static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
2162{
2163 unsigned int opc, op2, op3, rn, op4;
2164 unsigned btype_mod = 2;
2165 TCGv_i64 dst;
2166 TCGv_i64 modifier;
2167
2168 opc = extract32(insn, 21, 4);
2169 op2 = extract32(insn, 16, 5);
2170 op3 = extract32(insn, 10, 6);
2171 rn = extract32(insn, 5, 5);
2172 op4 = extract32(insn, 0, 5);
2173
2174 if (op2 != 0x1f) {
2175 goto do_unallocated;
2176 }
2177
2178 switch (opc) {
2179 case 0:
2180 case 1:
2181 case 2:
2182 btype_mod = opc;
2183 switch (op3) {
2184 case 0:
2185
2186 if (op4 != 0) {
2187 goto do_unallocated;
2188 }
2189 dst = cpu_reg(s, rn);
2190 break;
2191
2192 case 2:
2193 case 3:
2194 if (!dc_isar_feature(aa64_pauth, s)) {
2195 goto do_unallocated;
2196 }
2197 if (opc == 2) {
2198
2199 if (rn != 0x1f || op4 != 0x1f) {
2200 goto do_unallocated;
2201 }
2202 rn = 30;
2203 modifier = cpu_X[31];
2204 } else {
2205
2206 if (op4 != 0x1f) {
2207 goto do_unallocated;
2208 }
2209 modifier = new_tmp_a64_zero(s);
2210 }
2211 if (s->pauth_active) {
2212 dst = new_tmp_a64(s);
2213 if (op3 == 2) {
2214 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2215 } else {
2216 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2217 }
2218 } else {
2219 dst = cpu_reg(s, rn);
2220 }
2221 break;
2222
2223 default:
2224 goto do_unallocated;
2225 }
2226 gen_a64_set_pc(s, dst);
2227
2228 if (opc == 1) {
2229 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2230 }
2231 break;
2232
2233 case 8:
2234 case 9:
2235 if (!dc_isar_feature(aa64_pauth, s)) {
2236 goto do_unallocated;
2237 }
2238 if ((op3 & ~1) != 2) {
2239 goto do_unallocated;
2240 }
2241 btype_mod = opc & 1;
2242 if (s->pauth_active) {
2243 dst = new_tmp_a64(s);
2244 modifier = cpu_reg_sp(s, op4);
2245 if (op3 == 2) {
2246 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2247 } else {
2248 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2249 }
2250 } else {
2251 dst = cpu_reg(s, rn);
2252 }
2253 gen_a64_set_pc(s, dst);
2254
2255 if (opc == 9) {
2256 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2257 }
2258 break;
2259
2260 case 4:
2261 if (s->current_el == 0) {
2262 goto do_unallocated;
2263 }
2264 switch (op3) {
2265 case 0:
2266 if (op4 != 0) {
2267 goto do_unallocated;
2268 }
2269 dst = tcg_temp_new_i64();
2270 tcg_gen_ld_i64(dst, cpu_env,
2271 offsetof(CPUARMState, elr_el[s->current_el]));
2272 break;
2273
2274 case 2:
2275 case 3:
2276 if (!dc_isar_feature(aa64_pauth, s)) {
2277 goto do_unallocated;
2278 }
2279 if (rn != 0x1f || op4 != 0x1f) {
2280 goto do_unallocated;
2281 }
2282 dst = tcg_temp_new_i64();
2283 tcg_gen_ld_i64(dst, cpu_env,
2284 offsetof(CPUARMState, elr_el[s->current_el]));
2285 if (s->pauth_active) {
2286 modifier = cpu_X[31];
2287 if (op3 == 2) {
2288 gen_helper_autia(dst, cpu_env, dst, modifier);
2289 } else {
2290 gen_helper_autib(dst, cpu_env, dst, modifier);
2291 }
2292 }
2293 break;
2294
2295 default:
2296 goto do_unallocated;
2297 }
2298 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2299 gen_io_start();
2300 }
2301
2302 gen_helper_exception_return(cpu_env, dst);
2303 tcg_temp_free_i64(dst);
2304
2305 s->base.is_jmp = DISAS_EXIT;
2306 return;
2307
2308 case 5:
2309 if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2310 goto do_unallocated;
2311 } else {
2312 unsupported_encoding(s, insn);
2313 }
2314 return;
2315
2316 default:
2317 do_unallocated:
2318 unallocated_encoding(s);
2319 return;
2320 }
2321
2322 switch (btype_mod) {
2323 case 0:
2324 if (dc_isar_feature(aa64_bti, s)) {
2325
2326 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2327 }
2328 break;
2329
2330 case 1:
2331 if (dc_isar_feature(aa64_bti, s)) {
2332
2333 set_btype(s, 2);
2334 }
2335 break;
2336
2337 default:
2338
2339 break;
2340 }
2341
2342 s->base.is_jmp = DISAS_JUMP;
2343}
2344
2345
2346static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2347{
2348 switch (extract32(insn, 25, 7)) {
2349 case 0x0a: case 0x0b:
2350 case 0x4a: case 0x4b:
2351 disas_uncond_b_imm(s, insn);
2352 break;
2353 case 0x1a: case 0x5a:
2354 disas_comp_b_imm(s, insn);
2355 break;
2356 case 0x1b: case 0x5b:
2357 disas_test_b_imm(s, insn);
2358 break;
2359 case 0x2a:
2360 disas_cond_b_imm(s, insn);
2361 break;
2362 case 0x6a:
2363 if (insn & (1 << 24)) {
2364 if (extract32(insn, 22, 2) == 0) {
2365 disas_system(s, insn);
2366 } else {
2367 unallocated_encoding(s);
2368 }
2369 } else {
2370 disas_exc(s, insn);
2371 }
2372 break;
2373 case 0x6b:
2374 disas_uncond_b_reg(s, insn);
2375 break;
2376 default:
2377 unallocated_encoding(s);
2378 break;
2379 }
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2394 TCGv_i64 addr, int size, bool is_pair)
2395{
2396 int idx = get_mem_index(s);
2397 MemOp memop = s->be_data;
2398
2399 g_assert(size <= 3);
2400 if (is_pair) {
2401 g_assert(size >= 2);
2402 if (size == 2) {
2403
2404 memop |= MO_64 | MO_ALIGN;
2405 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2406 if (s->be_data == MO_LE) {
2407 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2408 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2409 } else {
2410 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2411 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2412 }
2413 } else {
2414
2415
2416 memop |= MO_64;
2417 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2418 memop | MO_ALIGN_16);
2419
2420 TCGv_i64 addr2 = tcg_temp_new_i64();
2421 tcg_gen_addi_i64(addr2, addr, 8);
2422 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2423 tcg_temp_free_i64(addr2);
2424
2425 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2426 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2427 }
2428 } else {
2429 memop |= size | MO_ALIGN;
2430 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2431 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2432 }
2433 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2434}
2435
2436static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2437 TCGv_i64 addr, int size, int is_pair)
2438{
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451 TCGLabel *fail_label = gen_new_label();
2452 TCGLabel *done_label = gen_new_label();
2453 TCGv_i64 tmp;
2454
2455 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2456
2457 tmp = tcg_temp_new_i64();
2458 if (is_pair) {
2459 if (size == 2) {
2460 if (s->be_data == MO_LE) {
2461 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2462 } else {
2463 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2464 }
2465 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2466 cpu_exclusive_val, tmp,
2467 get_mem_index(s),
2468 MO_64 | MO_ALIGN | s->be_data);
2469 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2470 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2471 if (!HAVE_CMPXCHG128) {
2472 gen_helper_exit_atomic(cpu_env);
2473
2474
2475
2476
2477
2478 tcg_gen_movi_i64(tmp, 0);
2479 } else if (s->be_data == MO_LE) {
2480 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2481 cpu_exclusive_addr,
2482 cpu_reg(s, rt),
2483 cpu_reg(s, rt2));
2484 } else {
2485 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2486 cpu_exclusive_addr,
2487 cpu_reg(s, rt),
2488 cpu_reg(s, rt2));
2489 }
2490 } else if (s->be_data == MO_LE) {
2491 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2492 cpu_reg(s, rt), cpu_reg(s, rt2));
2493 } else {
2494 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2495 cpu_reg(s, rt), cpu_reg(s, rt2));
2496 }
2497 } else {
2498 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2499 cpu_reg(s, rt), get_mem_index(s),
2500 size | MO_ALIGN | s->be_data);
2501 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2502 }
2503 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2504 tcg_temp_free_i64(tmp);
2505 tcg_gen_br(done_label);
2506
2507 gen_set_label(fail_label);
2508 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2509 gen_set_label(done_label);
2510 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2511}
2512
2513static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2514 int rn, int size)
2515{
2516 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2517 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2518 int memidx = get_mem_index(s);
2519 TCGv_i64 clean_addr;
2520
2521 if (rn == 31) {
2522 gen_check_sp_alignment(s);
2523 }
2524 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2525 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2526 size | MO_ALIGN | s->be_data);
2527}
2528
2529static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2530 int rn, int size)
2531{
2532 TCGv_i64 s1 = cpu_reg(s, rs);
2533 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2534 TCGv_i64 t1 = cpu_reg(s, rt);
2535 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2536 TCGv_i64 clean_addr;
2537 int memidx = get_mem_index(s);
2538
2539 if (rn == 31) {
2540 gen_check_sp_alignment(s);
2541 }
2542
2543
2544 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2545
2546 if (size == 2) {
2547 TCGv_i64 cmp = tcg_temp_new_i64();
2548 TCGv_i64 val = tcg_temp_new_i64();
2549
2550 if (s->be_data == MO_LE) {
2551 tcg_gen_concat32_i64(val, t1, t2);
2552 tcg_gen_concat32_i64(cmp, s1, s2);
2553 } else {
2554 tcg_gen_concat32_i64(val, t2, t1);
2555 tcg_gen_concat32_i64(cmp, s2, s1);
2556 }
2557
2558 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2559 MO_64 | MO_ALIGN | s->be_data);
2560 tcg_temp_free_i64(val);
2561
2562 if (s->be_data == MO_LE) {
2563 tcg_gen_extr32_i64(s1, s2, cmp);
2564 } else {
2565 tcg_gen_extr32_i64(s2, s1, cmp);
2566 }
2567 tcg_temp_free_i64(cmp);
2568 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2569 if (HAVE_CMPXCHG128) {
2570 TCGv_i32 tcg_rs = tcg_const_i32(rs);
2571 if (s->be_data == MO_LE) {
2572 gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2573 clean_addr, t1, t2);
2574 } else {
2575 gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2576 clean_addr, t1, t2);
2577 }
2578 tcg_temp_free_i32(tcg_rs);
2579 } else {
2580 gen_helper_exit_atomic(cpu_env);
2581 s->base.is_jmp = DISAS_NORETURN;
2582 }
2583 } else {
2584 TCGv_i64 d1 = tcg_temp_new_i64();
2585 TCGv_i64 d2 = tcg_temp_new_i64();
2586 TCGv_i64 a2 = tcg_temp_new_i64();
2587 TCGv_i64 c1 = tcg_temp_new_i64();
2588 TCGv_i64 c2 = tcg_temp_new_i64();
2589 TCGv_i64 zero = tcg_const_i64(0);
2590
2591
2592 tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2593 MO_64 | MO_ALIGN_16 | s->be_data);
2594 tcg_gen_addi_i64(a2, clean_addr, 8);
2595 tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2596
2597
2598 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2599 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2600 tcg_gen_and_i64(c2, c2, c1);
2601
2602
2603 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2604 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2605 tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2606 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2607 tcg_temp_free_i64(a2);
2608 tcg_temp_free_i64(c1);
2609 tcg_temp_free_i64(c2);
2610 tcg_temp_free_i64(zero);
2611
2612
2613 tcg_gen_mov_i64(s1, d1);
2614 tcg_gen_mov_i64(s2, d2);
2615 tcg_temp_free_i64(d1);
2616 tcg_temp_free_i64(d2);
2617 }
2618}
2619
2620
2621
2622
2623static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2624{
2625 int opc0 = extract32(opc, 0, 1);
2626 int regsize;
2627
2628 if (is_signed) {
2629 regsize = opc0 ? 32 : 64;
2630 } else {
2631 regsize = size == 3 ? 64 : 32;
2632 }
2633 return regsize == 64;
2634}
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2650{
2651 int rt = extract32(insn, 0, 5);
2652 int rn = extract32(insn, 5, 5);
2653 int rt2 = extract32(insn, 10, 5);
2654 int rs = extract32(insn, 16, 5);
2655 int is_lasr = extract32(insn, 15, 1);
2656 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2657 int size = extract32(insn, 30, 2);
2658 TCGv_i64 clean_addr;
2659
2660 switch (o2_L_o1_o0) {
2661 case 0x0:
2662 case 0x1:
2663 if (rn == 31) {
2664 gen_check_sp_alignment(s);
2665 }
2666 if (is_lasr) {
2667 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2668 }
2669 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2670 true, rn != 31, size);
2671 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2672 return;
2673
2674 case 0x4:
2675 case 0x5:
2676 if (rn == 31) {
2677 gen_check_sp_alignment(s);
2678 }
2679 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2680 false, rn != 31, size);
2681 s->is_ldex = true;
2682 gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2683 if (is_lasr) {
2684 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2685 }
2686 return;
2687
2688 case 0x8:
2689 if (!dc_isar_feature(aa64_lor, s)) {
2690 break;
2691 }
2692
2693
2694 case 0x9:
2695
2696 if (rn == 31) {
2697 gen_check_sp_alignment(s);
2698 }
2699 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2700 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2701 true, rn != 31, size);
2702
2703 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
2704 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2705 return;
2706
2707 case 0xc:
2708 if (!dc_isar_feature(aa64_lor, s)) {
2709 break;
2710 }
2711
2712
2713 case 0xd:
2714
2715 if (rn == 31) {
2716 gen_check_sp_alignment(s);
2717 }
2718 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2719 false, rn != 31, size);
2720
2721 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
2722 rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2723 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2724 return;
2725
2726 case 0x2: case 0x3:
2727 if (size & 2) {
2728 if (rn == 31) {
2729 gen_check_sp_alignment(s);
2730 }
2731 if (is_lasr) {
2732 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2733 }
2734 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2735 true, rn != 31, size);
2736 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2737 return;
2738 }
2739 if (rt2 == 31
2740 && ((rt | rs) & 1) == 0
2741 && dc_isar_feature(aa64_atomics, s)) {
2742
2743 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2744 return;
2745 }
2746 break;
2747
2748 case 0x6: case 0x7:
2749 if (size & 2) {
2750 if (rn == 31) {
2751 gen_check_sp_alignment(s);
2752 }
2753 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2754 false, rn != 31, size);
2755 s->is_ldex = true;
2756 gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2757 if (is_lasr) {
2758 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2759 }
2760 return;
2761 }
2762 if (rt2 == 31
2763 && ((rt | rs) & 1) == 0
2764 && dc_isar_feature(aa64_atomics, s)) {
2765
2766 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2767 return;
2768 }
2769 break;
2770
2771 case 0xa:
2772 case 0xb:
2773 case 0xe:
2774 case 0xf:
2775 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2776 gen_compare_and_swap(s, rs, rt, rn, size);
2777 return;
2778 }
2779 break;
2780 }
2781 unallocated_encoding(s);
2782}
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797static void disas_ld_lit(DisasContext *s, uint32_t insn)
2798{
2799 int rt = extract32(insn, 0, 5);
2800 int64_t imm = sextract32(insn, 5, 19) << 2;
2801 bool is_vector = extract32(insn, 26, 1);
2802 int opc = extract32(insn, 30, 2);
2803 bool is_signed = false;
2804 int size = 2;
2805 TCGv_i64 tcg_rt, clean_addr;
2806
2807 if (is_vector) {
2808 if (opc == 3) {
2809 unallocated_encoding(s);
2810 return;
2811 }
2812 size = 2 + opc;
2813 if (!fp_access_check(s)) {
2814 return;
2815 }
2816 } else {
2817 if (opc == 3) {
2818
2819 return;
2820 }
2821 size = 2 + extract32(opc, 0, 1);
2822 is_signed = extract32(opc, 1, 1);
2823 }
2824
2825 tcg_rt = cpu_reg(s, rt);
2826
2827 clean_addr = tcg_const_i64(s->pc_curr + imm);
2828 if (is_vector) {
2829 do_fp_ld(s, rt, clean_addr, size);
2830 } else {
2831
2832 bool iss_sf = opc != 0;
2833
2834 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
2835 false, true, rt, iss_sf, false);
2836 }
2837 tcg_temp_free_i64(clean_addr);
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2869{
2870 int rt = extract32(insn, 0, 5);
2871 int rn = extract32(insn, 5, 5);
2872 int rt2 = extract32(insn, 10, 5);
2873 uint64_t offset = sextract64(insn, 15, 7);
2874 int index = extract32(insn, 23, 2);
2875 bool is_vector = extract32(insn, 26, 1);
2876 bool is_load = extract32(insn, 22, 1);
2877 int opc = extract32(insn, 30, 2);
2878
2879 bool is_signed = false;
2880 bool postindex = false;
2881 bool wback = false;
2882 bool set_tag = false;
2883
2884 TCGv_i64 clean_addr, dirty_addr;
2885
2886 int size;
2887
2888 if (opc == 3) {
2889 unallocated_encoding(s);
2890 return;
2891 }
2892
2893 if (is_vector) {
2894 size = 2 + opc;
2895 } else if (opc == 1 && !is_load) {
2896
2897 if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2898 unallocated_encoding(s);
2899 return;
2900 }
2901 size = 3;
2902 set_tag = true;
2903 } else {
2904 size = 2 + extract32(opc, 1, 1);
2905 is_signed = extract32(opc, 0, 1);
2906 if (!is_load && is_signed) {
2907 unallocated_encoding(s);
2908 return;
2909 }
2910 }
2911
2912 switch (index) {
2913 case 1:
2914 postindex = true;
2915 wback = true;
2916 break;
2917 case 0:
2918
2919
2920
2921
2922
2923 if (is_signed) {
2924
2925 unallocated_encoding(s);
2926 return;
2927 }
2928 postindex = false;
2929 break;
2930 case 2:
2931 postindex = false;
2932 break;
2933 case 3:
2934 postindex = false;
2935 wback = true;
2936 break;
2937 }
2938
2939 if (is_vector && !fp_access_check(s)) {
2940 return;
2941 }
2942
2943 offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
2944
2945 if (rn == 31) {
2946 gen_check_sp_alignment(s);
2947 }
2948
2949 dirty_addr = read_cpu_reg_sp(s, rn, 1);
2950 if (!postindex) {
2951 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2952 }
2953
2954 if (set_tag) {
2955 if (!s->ata) {
2956
2957
2958
2959
2960 gen_helper_stg_stub(cpu_env, dirty_addr);
2961 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2962 gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
2963 } else {
2964 gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
2965 }
2966 }
2967
2968 clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
2969 (wback || rn != 31) && !set_tag, 2 << size);
2970
2971 if (is_vector) {
2972 if (is_load) {
2973 do_fp_ld(s, rt, clean_addr, size);
2974 } else {
2975 do_fp_st(s, rt, clean_addr, size);
2976 }
2977 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2978 if (is_load) {
2979 do_fp_ld(s, rt2, clean_addr, size);
2980 } else {
2981 do_fp_st(s, rt2, clean_addr, size);
2982 }
2983 } else {
2984 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2985 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2986
2987 if (is_load) {
2988 TCGv_i64 tmp = tcg_temp_new_i64();
2989
2990
2991
2992
2993 do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
2994 false, false, 0, false, false);
2995 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2996 do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
2997 false, false, 0, false, false);
2998
2999 tcg_gen_mov_i64(tcg_rt, tmp);
3000 tcg_temp_free_i64(tmp);
3001 } else {
3002 do_gpr_st(s, tcg_rt, clean_addr, size,
3003 false, 0, false, false);
3004 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3005 do_gpr_st(s, tcg_rt2, clean_addr, size,
3006 false, 0, false, false);
3007 }
3008 }
3009
3010 if (wback) {
3011 if (postindex) {
3012 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3013 }
3014 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3015 }
3016}
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
3035 int opc,
3036 int size,
3037 int rt,
3038 bool is_vector)
3039{
3040 int rn = extract32(insn, 5, 5);
3041 int imm9 = sextract32(insn, 12, 9);
3042 int idx = extract32(insn, 10, 2);
3043 bool is_signed = false;
3044 bool is_store = false;
3045 bool is_extended = false;
3046 bool is_unpriv = (idx == 2);
3047 bool iss_valid = !is_vector;
3048 bool post_index;
3049 bool writeback;
3050 int memidx;
3051
3052 TCGv_i64 clean_addr, dirty_addr;
3053
3054 if (is_vector) {
3055 size |= (opc & 2) << 1;
3056 if (size > 4 || is_unpriv) {
3057 unallocated_encoding(s);
3058 return;
3059 }
3060 is_store = ((opc & 1) == 0);
3061 if (!fp_access_check(s)) {
3062 return;
3063 }
3064 } else {
3065 if (size == 3 && opc == 2) {
3066
3067 if (idx != 0) {
3068 unallocated_encoding(s);
3069 return;
3070 }
3071 return;
3072 }
3073 if (opc == 3 && size > 1) {
3074 unallocated_encoding(s);
3075 return;
3076 }
3077 is_store = (opc == 0);
3078 is_signed = extract32(opc, 1, 1);
3079 is_extended = (size < 3) && extract32(opc, 0, 1);
3080 }
3081
3082 switch (idx) {
3083 case 0:
3084 case 2:
3085 post_index = false;
3086 writeback = false;
3087 break;
3088 case 1:
3089 post_index = true;
3090 writeback = true;
3091 break;
3092 case 3:
3093 post_index = false;
3094 writeback = true;
3095 break;
3096 default:
3097 g_assert_not_reached();
3098 }
3099
3100 if (rn == 31) {
3101 gen_check_sp_alignment(s);
3102 }
3103
3104 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3105 if (!post_index) {
3106 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3107 }
3108
3109 memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3110 clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3111 writeback || rn != 31,
3112 size, is_unpriv, memidx);
3113
3114 if (is_vector) {
3115 if (is_store) {
3116 do_fp_st(s, rt, clean_addr, size);
3117 } else {
3118 do_fp_ld(s, rt, clean_addr, size);
3119 }
3120 } else {
3121 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3122 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3123
3124 if (is_store) {
3125 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3126 iss_valid, rt, iss_sf, false);
3127 } else {
3128 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3129 is_extended, memidx,
3130 iss_valid, rt, iss_sf, false);
3131 }
3132 }
3133
3134 if (writeback) {
3135 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3136 if (post_index) {
3137 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3138 }
3139 tcg_gen_mov_i64(tcg_rn, dirty_addr);
3140 }
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3165 int opc,
3166 int size,
3167 int rt,
3168 bool is_vector)
3169{
3170 int rn = extract32(insn, 5, 5);
3171 int shift = extract32(insn, 12, 1);
3172 int rm = extract32(insn, 16, 5);
3173 int opt = extract32(insn, 13, 3);
3174 bool is_signed = false;
3175 bool is_store = false;
3176 bool is_extended = false;
3177
3178 TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3179
3180 if (extract32(opt, 1, 1) == 0) {
3181 unallocated_encoding(s);
3182 return;
3183 }
3184
3185 if (is_vector) {
3186 size |= (opc & 2) << 1;
3187 if (size > 4) {
3188 unallocated_encoding(s);
3189 return;
3190 }
3191 is_store = !extract32(opc, 0, 1);
3192 if (!fp_access_check(s)) {
3193 return;
3194 }
3195 } else {
3196 if (size == 3 && opc == 2) {
3197
3198 return;
3199 }
3200 if (opc == 3 && size > 1) {
3201 unallocated_encoding(s);
3202 return;
3203 }
3204 is_store = (opc == 0);
3205 is_signed = extract32(opc, 1, 1);
3206 is_extended = (size < 3) && extract32(opc, 0, 1);
3207 }
3208
3209 if (rn == 31) {
3210 gen_check_sp_alignment(s);
3211 }
3212 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3213
3214 tcg_rm = read_cpu_reg(s, rm, 1);
3215 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3216
3217 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3218 clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3219
3220 if (is_vector) {
3221 if (is_store) {
3222 do_fp_st(s, rt, clean_addr, size);
3223 } else {
3224 do_fp_ld(s, rt, clean_addr, size);
3225 }
3226 } else {
3227 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3228 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3229 if (is_store) {
3230 do_gpr_st(s, tcg_rt, clean_addr, size,
3231 true, rt, iss_sf, false);
3232 } else {
3233 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3234 is_extended, true, rt, iss_sf, false);
3235 }
3236 }
3237}
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3257 int opc,
3258 int size,
3259 int rt,
3260 bool is_vector)
3261{
3262 int rn = extract32(insn, 5, 5);
3263 unsigned int imm12 = extract32(insn, 10, 12);
3264 unsigned int offset;
3265
3266 TCGv_i64 clean_addr, dirty_addr;
3267
3268 bool is_store;
3269 bool is_signed = false;
3270 bool is_extended = false;
3271
3272 if (is_vector) {
3273 size |= (opc & 2) << 1;
3274 if (size > 4) {
3275 unallocated_encoding(s);
3276 return;
3277 }
3278 is_store = !extract32(opc, 0, 1);
3279 if (!fp_access_check(s)) {
3280 return;
3281 }
3282 } else {
3283 if (size == 3 && opc == 2) {
3284
3285 return;
3286 }
3287 if (opc == 3 && size > 1) {
3288 unallocated_encoding(s);
3289 return;
3290 }
3291 is_store = (opc == 0);
3292 is_signed = extract32(opc, 1, 1);
3293 is_extended = (size < 3) && extract32(opc, 0, 1);
3294 }
3295
3296 if (rn == 31) {
3297 gen_check_sp_alignment(s);
3298 }
3299 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3300 offset = imm12 << size;
3301 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3302 clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3303
3304 if (is_vector) {
3305 if (is_store) {
3306 do_fp_st(s, rt, clean_addr, size);
3307 } else {
3308 do_fp_ld(s, rt, clean_addr, size);
3309 }
3310 } else {
3311 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3312 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3313 if (is_store) {
3314 do_gpr_st(s, tcg_rt, clean_addr, size,
3315 true, rt, iss_sf, false);
3316 } else {
3317 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3318 is_extended, true, rt, iss_sf, false);
3319 }
3320 }
3321}
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3338 int size, int rt, bool is_vector)
3339{
3340 int rs = extract32(insn, 16, 5);
3341 int rn = extract32(insn, 5, 5);
3342 int o3_opc = extract32(insn, 12, 4);
3343 bool r = extract32(insn, 22, 1);
3344 bool a = extract32(insn, 23, 1);
3345 TCGv_i64 tcg_rs, tcg_rt, clean_addr;
3346 AtomicThreeOpFn *fn = NULL;
3347 MemOp mop = s->be_data | size | MO_ALIGN;
3348
3349 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3350 unallocated_encoding(s);
3351 return;
3352 }
3353 switch (o3_opc) {
3354 case 000:
3355 fn = tcg_gen_atomic_fetch_add_i64;
3356 break;
3357 case 001:
3358 fn = tcg_gen_atomic_fetch_and_i64;
3359 break;
3360 case 002:
3361 fn = tcg_gen_atomic_fetch_xor_i64;
3362 break;
3363 case 003:
3364 fn = tcg_gen_atomic_fetch_or_i64;
3365 break;
3366 case 004:
3367 fn = tcg_gen_atomic_fetch_smax_i64;
3368 mop |= MO_SIGN;
3369 break;
3370 case 005:
3371 fn = tcg_gen_atomic_fetch_smin_i64;
3372 mop |= MO_SIGN;
3373 break;
3374 case 006:
3375 fn = tcg_gen_atomic_fetch_umax_i64;
3376 break;
3377 case 007:
3378 fn = tcg_gen_atomic_fetch_umin_i64;
3379 break;
3380 case 010:
3381 fn = tcg_gen_atomic_xchg_i64;
3382 break;
3383 case 014:
3384 if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3385 rs != 31 || a != 1 || r != 0) {
3386 unallocated_encoding(s);
3387 return;
3388 }
3389 break;
3390 default:
3391 unallocated_encoding(s);
3392 return;
3393 }
3394
3395 if (rn == 31) {
3396 gen_check_sp_alignment(s);
3397 }
3398 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3399
3400 if (o3_opc == 014) {
3401
3402
3403
3404
3405
3406
3407
3408 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
3409 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3410 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3411 return;
3412 }
3413
3414 tcg_rs = read_cpu_reg(s, rs, true);
3415 tcg_rt = cpu_reg(s, rt);
3416
3417 if (o3_opc == 1) {
3418 tcg_gen_not_i64(tcg_rs, tcg_rs);
3419 }
3420
3421
3422
3423
3424 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3425
3426 if ((mop & MO_SIGN) && size != MO_64) {
3427 tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3428 }
3429}
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3447 int size, int rt, bool is_vector)
3448{
3449 int rn = extract32(insn, 5, 5);
3450 bool is_wback = extract32(insn, 11, 1);
3451 bool use_key_a = !extract32(insn, 23, 1);
3452 int offset;
3453 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3454
3455 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3456 unallocated_encoding(s);
3457 return;
3458 }
3459
3460 if (rn == 31) {
3461 gen_check_sp_alignment(s);
3462 }
3463 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3464
3465 if (s->pauth_active) {
3466 if (use_key_a) {
3467 gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3468 new_tmp_a64_zero(s));
3469 } else {
3470 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3471 new_tmp_a64_zero(s));
3472 }
3473 }
3474
3475
3476 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3477 offset = sextract32(offset << size, 0, 10 + size);
3478 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3479
3480
3481 clean_addr = gen_mte_check1(s, dirty_addr, false,
3482 is_wback || rn != 31, size);
3483
3484 tcg_rt = cpu_reg(s, rt);
3485 do_gpr_ld(s, tcg_rt, clean_addr, size,
3486 false, !is_wback,
3487 rt, true, false);
3488
3489 if (is_wback) {
3490 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3491 }
3492}
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3509{
3510 int rt = extract32(insn, 0, 5);
3511 int rn = extract32(insn, 5, 5);
3512 int offset = sextract32(insn, 12, 9);
3513 int opc = extract32(insn, 22, 2);
3514 int size = extract32(insn, 30, 2);
3515 TCGv_i64 clean_addr, dirty_addr;
3516 bool is_store = false;
3517 bool extend = false;
3518 bool iss_sf;
3519 MemOp mop;
3520
3521 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3522 unallocated_encoding(s);
3523 return;
3524 }
3525
3526
3527 mop = size | MO_ALIGN;
3528
3529 switch (opc) {
3530 case 0:
3531 is_store = true;
3532 break;
3533 case 1:
3534 break;
3535 case 2:
3536 if (size == 3) {
3537 unallocated_encoding(s);
3538 return;
3539 }
3540 mop |= MO_SIGN;
3541 break;
3542 case 3:
3543 if (size > 1) {
3544 unallocated_encoding(s);
3545 return;
3546 }
3547 mop |= MO_SIGN;
3548 extend = true;
3549 break;
3550 default:
3551 g_assert_not_reached();
3552 }
3553
3554 iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
3555
3556 if (rn == 31) {
3557 gen_check_sp_alignment(s);
3558 }
3559
3560 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3561 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3562 clean_addr = clean_data_tbi(s, dirty_addr);
3563
3564 if (is_store) {
3565
3566 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3567 do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
3568 } else {
3569
3570
3571
3572
3573 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
3574 extend, true, rt, iss_sf, true);
3575 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3576 }
3577}
3578
3579
3580static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3581{
3582 int rt = extract32(insn, 0, 5);
3583 int opc = extract32(insn, 22, 2);
3584 bool is_vector = extract32(insn, 26, 1);
3585 int size = extract32(insn, 30, 2);
3586
3587 switch (extract32(insn, 24, 2)) {
3588 case 0:
3589 if (extract32(insn, 21, 1) == 0) {
3590
3591
3592
3593
3594 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3595 return;
3596 }
3597 switch (extract32(insn, 10, 2)) {
3598 case 0:
3599 disas_ldst_atomic(s, insn, size, rt, is_vector);
3600 return;
3601 case 2:
3602 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3603 return;
3604 default:
3605 disas_ldst_pac(s, insn, size, rt, is_vector);
3606 return;
3607 }
3608 break;
3609 case 1:
3610 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3611 return;
3612 }
3613 unallocated_encoding(s);
3614}
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3635{
3636 int rt = extract32(insn, 0, 5);
3637 int rn = extract32(insn, 5, 5);
3638 int rm = extract32(insn, 16, 5);
3639 int size = extract32(insn, 10, 2);
3640 int opcode = extract32(insn, 12, 4);
3641 bool is_store = !extract32(insn, 22, 1);
3642 bool is_postidx = extract32(insn, 23, 1);
3643 bool is_q = extract32(insn, 30, 1);
3644 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3645 MemOp endian, align, mop;
3646
3647 int total;
3648 int elements;
3649 int rpt;
3650 int selem;
3651 int r;
3652
3653 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3654 unallocated_encoding(s);
3655 return;
3656 }
3657
3658 if (!is_postidx && rm != 0) {
3659 unallocated_encoding(s);
3660 return;
3661 }
3662
3663
3664 switch (opcode) {
3665 case 0x0:
3666 rpt = 1;
3667 selem = 4;
3668 break;
3669 case 0x2:
3670 rpt = 4;
3671 selem = 1;
3672 break;
3673 case 0x4:
3674 rpt = 1;
3675 selem = 3;
3676 break;
3677 case 0x6:
3678 rpt = 3;
3679 selem = 1;
3680 break;
3681 case 0x7:
3682 rpt = 1;
3683 selem = 1;
3684 break;
3685 case 0x8:
3686 rpt = 1;
3687 selem = 2;
3688 break;
3689 case 0xa:
3690 rpt = 2;
3691 selem = 1;
3692 break;
3693 default:
3694 unallocated_encoding(s);
3695 return;
3696 }
3697
3698 if (size == 3 && !is_q && selem != 1) {
3699
3700 unallocated_encoding(s);
3701 return;
3702 }
3703
3704 if (!fp_access_check(s)) {
3705 return;
3706 }
3707
3708 if (rn == 31) {
3709 gen_check_sp_alignment(s);
3710 }
3711
3712
3713 endian = s->be_data;
3714 if (size == 0) {
3715 endian = MO_LE;
3716 }
3717
3718 total = rpt * selem * (is_q ? 16 : 8);
3719 tcg_rn = cpu_reg_sp(s, rn);
3720
3721
3722
3723
3724
3725 clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3726 total);
3727
3728
3729
3730
3731
3732 align = MO_ALIGN;
3733 if (selem == 1 && endian == MO_LE) {
3734 align = pow2_align(size);
3735 size = 3;
3736 }
3737 if (!s->align_mem) {
3738 align = 0;
3739 }
3740 mop = endian | size | align;
3741
3742 elements = (is_q ? 16 : 8) >> size;
3743 tcg_ebytes = tcg_const_i64(1 << size);
3744 for (r = 0; r < rpt; r++) {
3745 int e;
3746 for (e = 0; e < elements; e++) {
3747 int xs;
3748 for (xs = 0; xs < selem; xs++) {
3749 int tt = (rt + r + xs) % 32;
3750 if (is_store) {
3751 do_vec_st(s, tt, e, clean_addr, mop);
3752 } else {
3753 do_vec_ld(s, tt, e, clean_addr, mop);
3754 }
3755 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3756 }
3757 }
3758 }
3759 tcg_temp_free_i64(tcg_ebytes);
3760
3761 if (!is_store) {
3762
3763
3764
3765
3766
3767
3768
3769 for (r = 0; r < rpt * selem; r++) {
3770 int tt = (rt + r) % 32;
3771 clear_vec_high(s, is_q, tt);
3772 }
3773 }
3774
3775 if (is_postidx) {
3776 if (rm == 31) {
3777 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3778 } else {
3779 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3780 }
3781 }
3782}
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3807{
3808 int rt = extract32(insn, 0, 5);
3809 int rn = extract32(insn, 5, 5);
3810 int rm = extract32(insn, 16, 5);
3811 int size = extract32(insn, 10, 2);
3812 int S = extract32(insn, 12, 1);
3813 int opc = extract32(insn, 13, 3);
3814 int R = extract32(insn, 21, 1);
3815 int is_load = extract32(insn, 22, 1);
3816 int is_postidx = extract32(insn, 23, 1);
3817 int is_q = extract32(insn, 30, 1);
3818
3819 int scale = extract32(opc, 1, 2);
3820 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3821 bool replicate = false;
3822 int index = is_q << 3 | S << 2 | size;
3823 int xs, total;
3824 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3825 MemOp mop;
3826
3827 if (extract32(insn, 31, 1)) {
3828 unallocated_encoding(s);
3829 return;
3830 }
3831 if (!is_postidx && rm != 0) {
3832 unallocated_encoding(s);
3833 return;
3834 }
3835
3836 switch (scale) {
3837 case 3:
3838 if (!is_load || S) {
3839 unallocated_encoding(s);
3840 return;
3841 }
3842 scale = size;
3843 replicate = true;
3844 break;
3845 case 0:
3846 break;
3847 case 1:
3848 if (extract32(size, 0, 1)) {
3849 unallocated_encoding(s);
3850 return;
3851 }
3852 index >>= 1;
3853 break;
3854 case 2:
3855 if (extract32(size, 1, 1)) {
3856 unallocated_encoding(s);
3857 return;
3858 }
3859 if (!extract32(size, 0, 1)) {
3860 index >>= 2;
3861 } else {
3862 if (S) {
3863 unallocated_encoding(s);
3864 return;
3865 }
3866 index >>= 3;
3867 scale = 3;
3868 }
3869 break;
3870 default:
3871 g_assert_not_reached();
3872 }
3873
3874 if (!fp_access_check(s)) {
3875 return;
3876 }
3877
3878 if (rn == 31) {
3879 gen_check_sp_alignment(s);
3880 }
3881
3882 total = selem << scale;
3883 tcg_rn = cpu_reg_sp(s, rn);
3884
3885 clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3886 total);
3887 mop = finalize_memop(s, scale);
3888
3889 tcg_ebytes = tcg_const_i64(1 << scale);
3890 for (xs = 0; xs < selem; xs++) {
3891 if (replicate) {
3892
3893 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3894
3895 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3896 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3897 (is_q + 1) * 8, vec_full_reg_size(s),
3898 tcg_tmp);
3899 tcg_temp_free_i64(tcg_tmp);
3900 } else {
3901
3902 if (is_load) {
3903 do_vec_ld(s, rt, index, clean_addr, mop);
3904 } else {
3905 do_vec_st(s, rt, index, clean_addr, mop);
3906 }
3907 }
3908 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3909 rt = (rt + 1) % 32;
3910 }
3911 tcg_temp_free_i64(tcg_ebytes);
3912
3913 if (is_postidx) {
3914 if (rm == 31) {
3915 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3916 } else {
3917 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3918 }
3919 }
3920}
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930static void disas_ldst_tag(DisasContext *s, uint32_t insn)
3931{
3932 int rt = extract32(insn, 0, 5);
3933 int rn = extract32(insn, 5, 5);
3934 uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
3935 int op2 = extract32(insn, 10, 2);
3936 int op1 = extract32(insn, 22, 2);
3937 bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
3938 int index = 0;
3939 TCGv_i64 addr, clean_addr, tcg_rt;
3940
3941
3942 if (extract32(insn, 30, 2) != 3) {
3943 goto do_unallocated;
3944 }
3945
3946
3947
3948
3949
3950
3951
3952 switch (op1) {
3953 case 0:
3954 if (op2 != 0) {
3955
3956 index = op2 - 2;
3957 } else {
3958
3959 if (s->current_el == 0 || offset != 0) {
3960 goto do_unallocated;
3961 }
3962 is_mult = is_zero = true;
3963 }
3964 break;
3965 case 1:
3966 if (op2 != 0) {
3967
3968 is_zero = true;
3969 index = op2 - 2;
3970 } else {
3971
3972 is_load = true;
3973 }
3974 break;
3975 case 2:
3976 if (op2 != 0) {
3977
3978 is_pair = true;
3979 index = op2 - 2;
3980 } else {
3981
3982 if (s->current_el == 0 || offset != 0) {
3983 goto do_unallocated;
3984 }
3985 is_mult = true;
3986 }
3987 break;
3988 case 3:
3989 if (op2 != 0) {
3990
3991 is_pair = is_zero = true;
3992 index = op2 - 2;
3993 } else {
3994
3995 if (s->current_el == 0 || offset != 0) {
3996 goto do_unallocated;
3997 }
3998 is_mult = is_load = true;
3999 }
4000 break;
4001
4002 default:
4003 do_unallocated:
4004 unallocated_encoding(s);
4005 return;
4006 }
4007
4008 if (is_mult
4009 ? !dc_isar_feature(aa64_mte, s)
4010 : !dc_isar_feature(aa64_mte_insn_reg, s)) {
4011 goto do_unallocated;
4012 }
4013
4014 if (rn == 31) {
4015 gen_check_sp_alignment(s);
4016 }
4017
4018 addr = read_cpu_reg_sp(s, rn, true);
4019 if (index >= 0) {
4020
4021 tcg_gen_addi_i64(addr, addr, offset);
4022 }
4023
4024 if (is_mult) {
4025 tcg_rt = cpu_reg(s, rt);
4026
4027 if (is_zero) {
4028 int size = 4 << s->dcz_blocksize;
4029
4030 if (s->ata) {
4031 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
4032 }
4033
4034
4035
4036
4037 clean_addr = clean_data_tbi(s, addr);
4038 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4039 gen_helper_dc_zva(cpu_env, clean_addr);
4040 } else if (s->ata) {
4041 if (is_load) {
4042 gen_helper_ldgm(tcg_rt, cpu_env, addr);
4043 } else {
4044 gen_helper_stgm(cpu_env, addr, tcg_rt);
4045 }
4046 } else {
4047 MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4048 int size = 4 << GMID_EL1_BS;
4049
4050 clean_addr = clean_data_tbi(s, addr);
4051 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4052 gen_probe_access(s, clean_addr, acc, size);
4053
4054 if (is_load) {
4055
4056 tcg_gen_movi_i64(tcg_rt, 0);
4057 }
4058 }
4059 return;
4060 }
4061
4062 if (is_load) {
4063 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4064 tcg_rt = cpu_reg(s, rt);
4065 if (s->ata) {
4066 gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4067 } else {
4068 clean_addr = clean_data_tbi(s, addr);
4069 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4070 gen_address_with_allocation_tag0(tcg_rt, addr);
4071 }
4072 } else {
4073 tcg_rt = cpu_reg_sp(s, rt);
4074 if (!s->ata) {
4075
4076
4077
4078
4079
4080 if (is_pair) {
4081 gen_helper_st2g_stub(cpu_env, addr);
4082 } else {
4083 gen_helper_stg_stub(cpu_env, addr);
4084 }
4085 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4086 if (is_pair) {
4087 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4088 } else {
4089 gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4090 }
4091 } else {
4092 if (is_pair) {
4093 gen_helper_st2g(cpu_env, addr, tcg_rt);
4094 } else {
4095 gen_helper_stg(cpu_env, addr, tcg_rt);
4096 }
4097 }
4098 }
4099
4100 if (is_zero) {
4101 TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4102 TCGv_i64 tcg_zero = tcg_const_i64(0);
4103 int mem_index = get_mem_index(s);
4104 int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
4105
4106 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
4107 MO_UQ | MO_ALIGN_16);
4108 for (i = 8; i < n; i += 8) {
4109 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4110 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ);
4111 }
4112 tcg_temp_free_i64(tcg_zero);
4113 }
4114
4115 if (index != 0) {
4116
4117 if (index < 0) {
4118
4119 tcg_gen_addi_i64(addr, addr, offset);
4120 }
4121 tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4122 }
4123}
4124
4125
4126static void disas_ldst(DisasContext *s, uint32_t insn)
4127{
4128 switch (extract32(insn, 24, 6)) {
4129 case 0x08:
4130 disas_ldst_excl(s, insn);
4131 break;
4132 case 0x18: case 0x1c:
4133 disas_ld_lit(s, insn);
4134 break;
4135 case 0x28: case 0x29:
4136 case 0x2c: case 0x2d:
4137 disas_ldst_pair(s, insn);
4138 break;
4139 case 0x38: case 0x39:
4140 case 0x3c: case 0x3d:
4141 disas_ldst_reg(s, insn);
4142 break;
4143 case 0x0c:
4144 disas_ldst_multiple_struct(s, insn);
4145 break;
4146 case 0x0d:
4147 disas_ldst_single_struct(s, insn);
4148 break;
4149 case 0x19:
4150 if (extract32(insn, 21, 1) != 0) {
4151 disas_ldst_tag(s, insn);
4152 } else if (extract32(insn, 10, 2) == 0) {
4153 disas_ldst_ldapr_stlr(s, insn);
4154 } else {
4155 unallocated_encoding(s);
4156 }
4157 break;
4158 default:
4159 unallocated_encoding(s);
4160 break;
4161 }
4162}
4163
4164
4165
4166
4167
4168
4169
4170static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
4171{
4172 unsigned int page, rd;
4173 uint64_t base;
4174 uint64_t offset;
4175
4176 page = extract32(insn, 31, 1);
4177
4178 offset = sextract64(insn, 5, 19);
4179 offset = offset << 2 | extract32(insn, 29, 2);
4180 rd = extract32(insn, 0, 5);
4181 base = s->pc_curr;
4182
4183 if (page) {
4184
4185 base &= ~0xfff;
4186 offset <<= 12;
4187 }
4188
4189 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
4190}
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
4206{
4207 int rd = extract32(insn, 0, 5);
4208 int rn = extract32(insn, 5, 5);
4209 uint64_t imm = extract32(insn, 10, 12);
4210 bool shift = extract32(insn, 22, 1);
4211 bool setflags = extract32(insn, 29, 1);
4212 bool sub_op = extract32(insn, 30, 1);
4213 bool is_64bit = extract32(insn, 31, 1);
4214
4215 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
4216 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
4217 TCGv_i64 tcg_result;
4218
4219 if (shift) {
4220 imm <<= 12;
4221 }
4222
4223 tcg_result = tcg_temp_new_i64();
4224 if (!setflags) {
4225 if (sub_op) {
4226 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
4227 } else {
4228 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
4229 }
4230 } else {
4231 TCGv_i64 tcg_imm = tcg_const_i64(imm);
4232 if (sub_op) {
4233 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4234 } else {
4235 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4236 }
4237 tcg_temp_free_i64(tcg_imm);
4238 }
4239
4240 if (is_64bit) {
4241 tcg_gen_mov_i64(tcg_rd, tcg_result);
4242 } else {
4243 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4244 }
4245
4246 tcg_temp_free_i64(tcg_result);
4247}
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
4260{
4261 int rd = extract32(insn, 0, 5);
4262 int rn = extract32(insn, 5, 5);
4263 int uimm4 = extract32(insn, 10, 4);
4264 int uimm6 = extract32(insn, 16, 6);
4265 bool sub_op = extract32(insn, 30, 1);
4266 TCGv_i64 tcg_rn, tcg_rd;
4267 int imm;
4268
4269
4270 if ((insn & 0xa040c000u) != 0x80000000u ||
4271 !dc_isar_feature(aa64_mte_insn_reg, s)) {
4272 unallocated_encoding(s);
4273 return;
4274 }
4275
4276 imm = uimm6 << LOG2_TAG_GRANULE;
4277 if (sub_op) {
4278 imm = -imm;
4279 }
4280
4281 tcg_rn = cpu_reg_sp(s, rn);
4282 tcg_rd = cpu_reg_sp(s, rd);
4283
4284 if (s->ata) {
4285 TCGv_i32 offset = tcg_const_i32(imm);
4286 TCGv_i32 tag_offset = tcg_const_i32(uimm4);
4287
4288 gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, offset, tag_offset);
4289 tcg_temp_free_i32(tag_offset);
4290 tcg_temp_free_i32(offset);
4291 } else {
4292 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4293 gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4294 }
4295}
4296
4297
4298
4299
4300
4301static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4302{
4303 assert(e != 0);
4304 while (e < 64) {
4305 mask |= mask << e;
4306 e *= 2;
4307 }
4308 return mask;
4309}
4310
4311
4312static inline uint64_t bitmask64(unsigned int length)
4313{
4314 assert(length > 0 && length <= 64);
4315 return ~0ULL >> (64 - length);
4316}
4317
4318
4319
4320
4321
4322
4323bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4324 unsigned int imms, unsigned int immr)
4325{
4326 uint64_t mask;
4327 unsigned e, levels, s, r;
4328 int len;
4329
4330 assert(immn < 2 && imms < 64 && immr < 64);
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4355 if (len < 1) {
4356
4357 return false;
4358 }
4359 e = 1 << len;
4360
4361 levels = e - 1;
4362 s = imms & levels;
4363 r = immr & levels;
4364
4365 if (s == levels) {
4366
4367 return false;
4368 }
4369
4370
4371
4372
4373 mask = bitmask64(s + 1);
4374 if (r) {
4375 mask = (mask >> r) | (mask << (e - r));
4376 mask &= bitmask64(e);
4377 }
4378
4379 mask = bitfield_replicate(mask, e);
4380 *result = mask;
4381 return true;
4382}
4383
4384
4385
4386
4387
4388
4389
4390static void disas_logic_imm(DisasContext *s, uint32_t insn)
4391{
4392 unsigned int sf, opc, is_n, immr, imms, rn, rd;
4393 TCGv_i64 tcg_rd, tcg_rn;
4394 uint64_t wmask;
4395 bool is_and = false;
4396
4397 sf = extract32(insn, 31, 1);
4398 opc = extract32(insn, 29, 2);
4399 is_n = extract32(insn, 22, 1);
4400 immr = extract32(insn, 16, 6);
4401 imms = extract32(insn, 10, 6);
4402 rn = extract32(insn, 5, 5);
4403 rd = extract32(insn, 0, 5);
4404
4405 if (!sf && is_n) {
4406 unallocated_encoding(s);
4407 return;
4408 }
4409
4410 if (opc == 0x3) {
4411 tcg_rd = cpu_reg(s, rd);
4412 } else {
4413 tcg_rd = cpu_reg_sp(s, rd);
4414 }
4415 tcg_rn = cpu_reg(s, rn);
4416
4417 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
4418
4419 unallocated_encoding(s);
4420 return;
4421 }
4422
4423 if (!sf) {
4424 wmask &= 0xffffffff;
4425 }
4426
4427 switch (opc) {
4428 case 0x3:
4429 case 0x0:
4430 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
4431 is_and = true;
4432 break;
4433 case 0x1:
4434 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
4435 break;
4436 case 0x2:
4437 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
4438 break;
4439 default:
4440 assert(FALSE);
4441 break;
4442 }
4443
4444 if (!sf && !is_and) {
4445
4446
4447
4448 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4449 }
4450
4451 if (opc == 3) {
4452 gen_logic_CC(sf, tcg_rd);
4453 }
4454}
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468static void disas_movw_imm(DisasContext *s, uint32_t insn)
4469{
4470 int rd = extract32(insn, 0, 5);
4471 uint64_t imm = extract32(insn, 5, 16);
4472 int sf = extract32(insn, 31, 1);
4473 int opc = extract32(insn, 29, 2);
4474 int pos = extract32(insn, 21, 2) << 4;
4475 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4476 TCGv_i64 tcg_imm;
4477
4478 if (!sf && (pos >= 32)) {
4479 unallocated_encoding(s);
4480 return;
4481 }
4482
4483 switch (opc) {
4484 case 0:
4485 case 2:
4486 imm <<= pos;
4487 if (opc == 0) {
4488 imm = ~imm;
4489 }
4490 if (!sf) {
4491 imm &= 0xffffffffu;
4492 }
4493 tcg_gen_movi_i64(tcg_rd, imm);
4494 break;
4495 case 3:
4496 tcg_imm = tcg_const_i64(imm);
4497 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
4498 tcg_temp_free_i64(tcg_imm);
4499 if (!sf) {
4500 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4501 }
4502 break;
4503 default:
4504 unallocated_encoding(s);
4505 break;
4506 }
4507}
4508
4509
4510
4511
4512
4513
4514
4515static void disas_bitfield(DisasContext *s, uint32_t insn)
4516{
4517 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4518 TCGv_i64 tcg_rd, tcg_tmp;
4519
4520 sf = extract32(insn, 31, 1);
4521 opc = extract32(insn, 29, 2);
4522 n = extract32(insn, 22, 1);
4523 ri = extract32(insn, 16, 6);
4524 si = extract32(insn, 10, 6);
4525 rn = extract32(insn, 5, 5);
4526 rd = extract32(insn, 0, 5);
4527 bitsize = sf ? 64 : 32;
4528
4529 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4530 unallocated_encoding(s);
4531 return;
4532 }
4533
4534 tcg_rd = cpu_reg(s, rd);
4535
4536
4537
4538
4539 tcg_tmp = read_cpu_reg(s, rn, 1);
4540
4541
4542 if (si >= ri) {
4543
4544 len = (si - ri) + 1;
4545 if (opc == 0) {
4546 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4547 goto done;
4548 } else if (opc == 2) {
4549 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4550 return;
4551 }
4552
4553 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4554 pos = 0;
4555 } else {
4556
4557
4558
4559 len = si + 1;
4560 pos = (bitsize - ri) & (bitsize - 1);
4561 }
4562
4563 if (opc == 0 && len < ri) {
4564
4565
4566
4567 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4568 len = ri;
4569 }
4570
4571 if (opc == 1) {
4572 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4573 } else {
4574
4575
4576
4577 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4578 return;
4579 }
4580
4581 done:
4582 if (!sf) {
4583 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4584 }
4585}
4586
4587
4588
4589
4590
4591
4592
4593static void disas_extract(DisasContext *s, uint32_t insn)
4594{
4595 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4596
4597 sf = extract32(insn, 31, 1);
4598 n = extract32(insn, 22, 1);
4599 rm = extract32(insn, 16, 5);
4600 imm = extract32(insn, 10, 6);
4601 rn = extract32(insn, 5, 5);
4602 rd = extract32(insn, 0, 5);
4603 op21 = extract32(insn, 29, 2);
4604 op0 = extract32(insn, 21, 1);
4605 bitsize = sf ? 64 : 32;
4606
4607 if (sf != n || op21 || op0 || imm >= bitsize) {
4608 unallocated_encoding(s);
4609 } else {
4610 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4611
4612 tcg_rd = cpu_reg(s, rd);
4613
4614 if (unlikely(imm == 0)) {
4615
4616
4617
4618 if (sf) {
4619 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4620 } else {
4621 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4622 }
4623 } else {
4624 tcg_rm = cpu_reg(s, rm);
4625 tcg_rn = cpu_reg(s, rn);
4626
4627 if (sf) {
4628
4629 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4630 } else {
4631 TCGv_i32 t0 = tcg_temp_new_i32();
4632
4633 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4634 if (rm == rn) {
4635 tcg_gen_rotri_i32(t0, t0, imm);
4636 } else {
4637 TCGv_i32 t1 = tcg_temp_new_i32();
4638 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4639 tcg_gen_extract2_i32(t0, t0, t1, imm);
4640 tcg_temp_free_i32(t1);
4641 }
4642 tcg_gen_extu_i32_i64(tcg_rd, t0);
4643 tcg_temp_free_i32(t0);
4644 }
4645 }
4646 }
4647}
4648
4649
4650static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4651{
4652 switch (extract32(insn, 23, 6)) {
4653 case 0x20: case 0x21:
4654 disas_pc_rel_adr(s, insn);
4655 break;
4656 case 0x22:
4657 disas_add_sub_imm(s, insn);
4658 break;
4659 case 0x23:
4660 disas_add_sub_imm_with_tags(s, insn);
4661 break;
4662 case 0x24:
4663 disas_logic_imm(s, insn);
4664 break;
4665 case 0x25:
4666 disas_movw_imm(s, insn);
4667 break;
4668 case 0x26:
4669 disas_bitfield(s, insn);
4670 break;
4671 case 0x27:
4672 disas_extract(s, insn);
4673 break;
4674 default:
4675 unallocated_encoding(s);
4676 break;
4677 }
4678}
4679
4680
4681
4682
4683
4684
4685static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4686 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4687{
4688 switch (shift_type) {
4689 case A64_SHIFT_TYPE_LSL:
4690 tcg_gen_shl_i64(dst, src, shift_amount);
4691 break;
4692 case A64_SHIFT_TYPE_LSR:
4693 tcg_gen_shr_i64(dst, src, shift_amount);
4694 break;
4695 case A64_SHIFT_TYPE_ASR:
4696 if (!sf) {
4697 tcg_gen_ext32s_i64(dst, src);
4698 }
4699 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4700 break;
4701 case A64_SHIFT_TYPE_ROR:
4702 if (sf) {
4703 tcg_gen_rotr_i64(dst, src, shift_amount);
4704 } else {
4705 TCGv_i32 t0, t1;
4706 t0 = tcg_temp_new_i32();
4707 t1 = tcg_temp_new_i32();
4708 tcg_gen_extrl_i64_i32(t0, src);
4709 tcg_gen_extrl_i64_i32(t1, shift_amount);
4710 tcg_gen_rotr_i32(t0, t0, t1);
4711 tcg_gen_extu_i32_i64(dst, t0);
4712 tcg_temp_free_i32(t0);
4713 tcg_temp_free_i32(t1);
4714 }
4715 break;
4716 default:
4717 assert(FALSE);
4718 break;
4719 }
4720
4721 if (!sf) {
4722 tcg_gen_ext32u_i64(dst, dst);
4723 }
4724}
4725
4726
4727
4728
4729
4730static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4731 enum a64_shift_type shift_type, unsigned int shift_i)
4732{
4733 assert(shift_i < (sf ? 64 : 32));
4734
4735 if (shift_i == 0) {
4736 tcg_gen_mov_i64(dst, src);
4737 } else {
4738 TCGv_i64 shift_const;
4739
4740 shift_const = tcg_const_i64(shift_i);
4741 shift_reg(dst, src, sf, shift_type, shift_const);
4742 tcg_temp_free_i64(shift_const);
4743 }
4744}
4745
4746
4747
4748
4749
4750
4751
4752static void disas_logic_reg(DisasContext *s, uint32_t insn)
4753{
4754 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4755 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4756
4757 sf = extract32(insn, 31, 1);
4758 opc = extract32(insn, 29, 2);
4759 shift_type = extract32(insn, 22, 2);
4760 invert = extract32(insn, 21, 1);
4761 rm = extract32(insn, 16, 5);
4762 shift_amount = extract32(insn, 10, 6);
4763 rn = extract32(insn, 5, 5);
4764 rd = extract32(insn, 0, 5);
4765
4766 if (!sf && (shift_amount & (1 << 5))) {
4767 unallocated_encoding(s);
4768 return;
4769 }
4770
4771 tcg_rd = cpu_reg(s, rd);
4772
4773 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4774
4775
4776
4777 tcg_rm = cpu_reg(s, rm);
4778 if (invert) {
4779 tcg_gen_not_i64(tcg_rd, tcg_rm);
4780 if (!sf) {
4781 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4782 }
4783 } else {
4784 if (sf) {
4785 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4786 } else {
4787 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4788 }
4789 }
4790 return;
4791 }
4792
4793 tcg_rm = read_cpu_reg(s, rm, sf);
4794
4795 if (shift_amount) {
4796 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4797 }
4798
4799 tcg_rn = cpu_reg(s, rn);
4800
4801 switch (opc | (invert << 2)) {
4802 case 0:
4803 case 3:
4804 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4805 break;
4806 case 1:
4807 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4808 break;
4809 case 2:
4810 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4811 break;
4812 case 4:
4813 case 7:
4814 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4815 break;
4816 case 5:
4817 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4818 break;
4819 case 6:
4820 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4821 break;
4822 default:
4823 assert(FALSE);
4824 break;
4825 }
4826
4827 if (!sf) {
4828 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4829 }
4830
4831 if (opc == 3) {
4832 gen_logic_CC(sf, tcg_rd);
4833 }
4834}
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4854{
4855 int rd = extract32(insn, 0, 5);
4856 int rn = extract32(insn, 5, 5);
4857 int imm3 = extract32(insn, 10, 3);
4858 int option = extract32(insn, 13, 3);
4859 int rm = extract32(insn, 16, 5);
4860 int opt = extract32(insn, 22, 2);
4861 bool setflags = extract32(insn, 29, 1);
4862 bool sub_op = extract32(insn, 30, 1);
4863 bool sf = extract32(insn, 31, 1);
4864
4865 TCGv_i64 tcg_rm, tcg_rn;
4866 TCGv_i64 tcg_rd;
4867 TCGv_i64 tcg_result;
4868
4869 if (imm3 > 4 || opt != 0) {
4870 unallocated_encoding(s);
4871 return;
4872 }
4873
4874
4875 if (!setflags) {
4876 tcg_rd = cpu_reg_sp(s, rd);
4877 } else {
4878 tcg_rd = cpu_reg(s, rd);
4879 }
4880 tcg_rn = read_cpu_reg_sp(s, rn, sf);
4881
4882 tcg_rm = read_cpu_reg(s, rm, sf);
4883 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4884
4885 tcg_result = tcg_temp_new_i64();
4886
4887 if (!setflags) {
4888 if (sub_op) {
4889 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4890 } else {
4891 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4892 }
4893 } else {
4894 if (sub_op) {
4895 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4896 } else {
4897 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4898 }
4899 }
4900
4901 if (sf) {
4902 tcg_gen_mov_i64(tcg_rd, tcg_result);
4903 } else {
4904 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4905 }
4906
4907 tcg_temp_free_i64(tcg_result);
4908}
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4925{
4926 int rd = extract32(insn, 0, 5);
4927 int rn = extract32(insn, 5, 5);
4928 int imm6 = extract32(insn, 10, 6);
4929 int rm = extract32(insn, 16, 5);
4930 int shift_type = extract32(insn, 22, 2);
4931 bool setflags = extract32(insn, 29, 1);
4932 bool sub_op = extract32(insn, 30, 1);
4933 bool sf = extract32(insn, 31, 1);
4934
4935 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4936 TCGv_i64 tcg_rn, tcg_rm;
4937 TCGv_i64 tcg_result;
4938
4939 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4940 unallocated_encoding(s);
4941 return;
4942 }
4943
4944 tcg_rn = read_cpu_reg(s, rn, sf);
4945 tcg_rm = read_cpu_reg(s, rm, sf);
4946
4947 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4948
4949 tcg_result = tcg_temp_new_i64();
4950
4951 if (!setflags) {
4952 if (sub_op) {
4953 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4954 } else {
4955 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4956 }
4957 } else {
4958 if (sub_op) {
4959 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4960 } else {
4961 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4962 }
4963 }
4964
4965 if (sf) {
4966 tcg_gen_mov_i64(tcg_rd, tcg_result);
4967 } else {
4968 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4969 }
4970
4971 tcg_temp_free_i64(tcg_result);
4972}
4973
4974
4975
4976
4977
4978
4979
4980
4981static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4982{
4983 int rd = extract32(insn, 0, 5);
4984 int rn = extract32(insn, 5, 5);
4985 int ra = extract32(insn, 10, 5);
4986 int rm = extract32(insn, 16, 5);
4987 int op_id = (extract32(insn, 29, 3) << 4) |
4988 (extract32(insn, 21, 3) << 1) |
4989 extract32(insn, 15, 1);
4990 bool sf = extract32(insn, 31, 1);
4991 bool is_sub = extract32(op_id, 0, 1);
4992 bool is_high = extract32(op_id, 2, 1);
4993 bool is_signed = false;
4994 TCGv_i64 tcg_op1;
4995 TCGv_i64 tcg_op2;
4996 TCGv_i64 tcg_tmp;
4997
4998
4999 switch (op_id) {
5000 case 0x42:
5001 case 0x43:
5002 case 0x44:
5003 is_signed = true;
5004 break;
5005 case 0x0:
5006 case 0x1:
5007 case 0x40:
5008 case 0x41:
5009 case 0x4a:
5010 case 0x4b:
5011 case 0x4c:
5012 break;
5013 default:
5014 unallocated_encoding(s);
5015 return;
5016 }
5017
5018 if (is_high) {
5019 TCGv_i64 low_bits = tcg_temp_new_i64();
5020 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5021 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5022 TCGv_i64 tcg_rm = cpu_reg(s, rm);
5023
5024 if (is_signed) {
5025 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5026 } else {
5027 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5028 }
5029
5030 tcg_temp_free_i64(low_bits);
5031 return;
5032 }
5033
5034 tcg_op1 = tcg_temp_new_i64();
5035 tcg_op2 = tcg_temp_new_i64();
5036 tcg_tmp = tcg_temp_new_i64();
5037
5038 if (op_id < 0x42) {
5039 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
5040 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
5041 } else {
5042 if (is_signed) {
5043 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
5044 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
5045 } else {
5046 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
5047 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
5048 }
5049 }
5050
5051 if (ra == 31 && !is_sub) {
5052
5053 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
5054 } else {
5055 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
5056 if (is_sub) {
5057 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5058 } else {
5059 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5060 }
5061 }
5062
5063 if (!sf) {
5064 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
5065 }
5066
5067 tcg_temp_free_i64(tcg_op1);
5068 tcg_temp_free_i64(tcg_op2);
5069 tcg_temp_free_i64(tcg_tmp);
5070}
5071
5072
5073
5074
5075
5076
5077
5078
5079static void disas_adc_sbc(DisasContext *s, uint32_t insn)
5080{
5081 unsigned int sf, op, setflags, rm, rn, rd;
5082 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
5083
5084 sf = extract32(insn, 31, 1);
5085 op = extract32(insn, 30, 1);
5086 setflags = extract32(insn, 29, 1);
5087 rm = extract32(insn, 16, 5);
5088 rn = extract32(insn, 5, 5);
5089 rd = extract32(insn, 0, 5);
5090
5091 tcg_rd = cpu_reg(s, rd);
5092 tcg_rn = cpu_reg(s, rn);
5093
5094 if (op) {
5095 tcg_y = new_tmp_a64(s);
5096 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
5097 } else {
5098 tcg_y = cpu_reg(s, rm);
5099 }
5100
5101 if (setflags) {
5102 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
5103 } else {
5104 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
5105 }
5106}
5107
5108
5109
5110
5111
5112
5113
5114
5115static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
5116{
5117 int mask = extract32(insn, 0, 4);
5118 int o2 = extract32(insn, 4, 1);
5119 int rn = extract32(insn, 5, 5);
5120 int imm6 = extract32(insn, 15, 6);
5121 int sf_op_s = extract32(insn, 29, 3);
5122 TCGv_i64 tcg_rn;
5123 TCGv_i32 nzcv;
5124
5125 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
5126 unallocated_encoding(s);
5127 return;
5128 }
5129
5130 tcg_rn = read_cpu_reg(s, rn, 1);
5131 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
5132
5133 nzcv = tcg_temp_new_i32();
5134 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
5135
5136 if (mask & 8) {
5137 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
5138 }
5139 if (mask & 4) {
5140 tcg_gen_not_i32(cpu_ZF, nzcv);
5141 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
5142 }
5143 if (mask & 2) {
5144 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
5145 }
5146 if (mask & 1) {
5147 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
5148 }
5149
5150 tcg_temp_free_i32(nzcv);
5151}
5152
5153
5154
5155
5156
5157
5158
5159
5160static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
5161{
5162 int o3_mask = extract32(insn, 0, 5);
5163 int rn = extract32(insn, 5, 5);
5164 int o2 = extract32(insn, 15, 6);
5165 int sz = extract32(insn, 14, 1);
5166 int sf_op_s = extract32(insn, 29, 3);
5167 TCGv_i32 tmp;
5168 int shift;
5169
5170 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
5171 !dc_isar_feature(aa64_condm_4, s)) {
5172 unallocated_encoding(s);
5173 return;
5174 }
5175 shift = sz ? 16 : 24;
5176
5177 tmp = tcg_temp_new_i32();
5178 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
5179 tcg_gen_shli_i32(cpu_NF, tmp, shift);
5180 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
5181 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
5182 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
5183 tcg_temp_free_i32(tmp);
5184}
5185
5186
5187
5188
5189
5190
5191
5192
5193static void disas_cc(DisasContext *s, uint32_t insn)
5194{
5195 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
5196 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
5197 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
5198 DisasCompare c;
5199
5200 if (!extract32(insn, 29, 1)) {
5201 unallocated_encoding(s);
5202 return;
5203 }
5204 if (insn & (1 << 10 | 1 << 4)) {
5205 unallocated_encoding(s);
5206 return;
5207 }
5208 sf = extract32(insn, 31, 1);
5209 op = extract32(insn, 30, 1);
5210 is_imm = extract32(insn, 11, 1);
5211 y = extract32(insn, 16, 5);
5212 cond = extract32(insn, 12, 4);
5213 rn = extract32(insn, 5, 5);
5214 nzcv = extract32(insn, 0, 4);
5215
5216
5217 tcg_t0 = tcg_temp_new_i32();
5218 arm_test_cc(&c, cond);
5219 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5220 arm_free_cc(&c);
5221
5222
5223 if (is_imm) {
5224 tcg_y = new_tmp_a64(s);
5225 tcg_gen_movi_i64(tcg_y, y);
5226 } else {
5227 tcg_y = cpu_reg(s, y);
5228 }
5229 tcg_rn = cpu_reg(s, rn);
5230
5231
5232 tcg_tmp = tcg_temp_new_i64();
5233 if (op) {
5234 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5235 } else {
5236 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5237 }
5238 tcg_temp_free_i64(tcg_tmp);
5239
5240
5241
5242
5243
5244
5245 tcg_t1 = tcg_temp_new_i32();
5246 tcg_t2 = tcg_temp_new_i32();
5247 tcg_gen_neg_i32(tcg_t1, tcg_t0);
5248 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5249
5250 if (nzcv & 8) {
5251 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5252 } else {
5253 if (TCG_TARGET_HAS_andc_i32) {
5254 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5255 } else {
5256 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5257 }
5258 }
5259 if (nzcv & 4) {
5260 if (TCG_TARGET_HAS_andc_i32) {
5261 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5262 } else {
5263 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5264 }
5265 } else {
5266 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5267 }
5268 if (nzcv & 2) {
5269 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5270 } else {
5271 if (TCG_TARGET_HAS_andc_i32) {
5272 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5273 } else {
5274 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5275 }
5276 }
5277 if (nzcv & 1) {
5278 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5279 } else {
5280 if (TCG_TARGET_HAS_andc_i32) {
5281 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5282 } else {
5283 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5284 }
5285 }
5286 tcg_temp_free_i32(tcg_t0);
5287 tcg_temp_free_i32(tcg_t1);
5288 tcg_temp_free_i32(tcg_t2);
5289}
5290
5291
5292
5293
5294
5295
5296
5297static void disas_cond_select(DisasContext *s, uint32_t insn)
5298{
5299 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5300 TCGv_i64 tcg_rd, zero;
5301 DisasCompare64 c;
5302
5303 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5304
5305 unallocated_encoding(s);
5306 return;
5307 }
5308 sf = extract32(insn, 31, 1);
5309 else_inv = extract32(insn, 30, 1);
5310 rm = extract32(insn, 16, 5);
5311 cond = extract32(insn, 12, 4);
5312 else_inc = extract32(insn, 10, 1);
5313 rn = extract32(insn, 5, 5);
5314 rd = extract32(insn, 0, 5);
5315
5316 tcg_rd = cpu_reg(s, rd);
5317
5318 a64_test_cc(&c, cond);
5319 zero = tcg_const_i64(0);
5320
5321 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5322
5323 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5324 if (else_inv) {
5325 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5326 }
5327 } else {
5328 TCGv_i64 t_true = cpu_reg(s, rn);
5329 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5330 if (else_inv && else_inc) {
5331 tcg_gen_neg_i64(t_false, t_false);
5332 } else if (else_inv) {
5333 tcg_gen_not_i64(t_false, t_false);
5334 } else if (else_inc) {
5335 tcg_gen_addi_i64(t_false, t_false, 1);
5336 }
5337 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5338 }
5339
5340 tcg_temp_free_i64(zero);
5341 a64_free_cc(&c);
5342
5343 if (!sf) {
5344 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5345 }
5346}
5347
5348static void handle_clz(DisasContext *s, unsigned int sf,
5349 unsigned int rn, unsigned int rd)
5350{
5351 TCGv_i64 tcg_rd, tcg_rn;
5352 tcg_rd = cpu_reg(s, rd);
5353 tcg_rn = cpu_reg(s, rn);
5354
5355 if (sf) {
5356 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5357 } else {
5358 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5359 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5360 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5361 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5362 tcg_temp_free_i32(tcg_tmp32);
5363 }
5364}
5365
5366static void handle_cls(DisasContext *s, unsigned int sf,
5367 unsigned int rn, unsigned int rd)
5368{
5369 TCGv_i64 tcg_rd, tcg_rn;
5370 tcg_rd = cpu_reg(s, rd);
5371 tcg_rn = cpu_reg(s, rn);
5372
5373 if (sf) {
5374 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5375 } else {
5376 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5377 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5378 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5379 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5380 tcg_temp_free_i32(tcg_tmp32);
5381 }
5382}
5383
5384static void handle_rbit(DisasContext *s, unsigned int sf,
5385 unsigned int rn, unsigned int rd)
5386{
5387 TCGv_i64 tcg_rd, tcg_rn;
5388 tcg_rd = cpu_reg(s, rd);
5389 tcg_rn = cpu_reg(s, rn);
5390
5391 if (sf) {
5392 gen_helper_rbit64(tcg_rd, tcg_rn);
5393 } else {
5394 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5395 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5396 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5397 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5398 tcg_temp_free_i32(tcg_tmp32);
5399 }
5400}
5401
5402
5403static void handle_rev64(DisasContext *s, unsigned int sf,
5404 unsigned int rn, unsigned int rd)
5405{
5406 if (!sf) {
5407 unallocated_encoding(s);
5408 return;
5409 }
5410 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5411}
5412
5413
5414
5415
5416static void handle_rev32(DisasContext *s, unsigned int sf,
5417 unsigned int rn, unsigned int rd)
5418{
5419 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5420 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5421
5422 if (sf) {
5423 tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5424 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5425 } else {
5426 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5427 }
5428}
5429
5430
5431static void handle_rev16(DisasContext *s, unsigned int sf,
5432 unsigned int rn, unsigned int rd)
5433{
5434 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5435 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5436 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5437 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5438
5439 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5440 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5441 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5442 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5443 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5444
5445 tcg_temp_free_i64(mask);
5446 tcg_temp_free_i64(tcg_tmp);
5447}
5448
5449
5450
5451
5452
5453
5454
5455static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5456{
5457 unsigned int sf, opcode, opcode2, rn, rd;
5458 TCGv_i64 tcg_rd;
5459
5460 if (extract32(insn, 29, 1)) {
5461 unallocated_encoding(s);
5462 return;
5463 }
5464
5465 sf = extract32(insn, 31, 1);
5466 opcode = extract32(insn, 10, 6);
5467 opcode2 = extract32(insn, 16, 5);
5468 rn = extract32(insn, 5, 5);
5469 rd = extract32(insn, 0, 5);
5470
5471#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5472
5473 switch (MAP(sf, opcode2, opcode)) {
5474 case MAP(0, 0x00, 0x00):
5475 case MAP(1, 0x00, 0x00):
5476 handle_rbit(s, sf, rn, rd);
5477 break;
5478 case MAP(0, 0x00, 0x01):
5479 case MAP(1, 0x00, 0x01):
5480 handle_rev16(s, sf, rn, rd);
5481 break;
5482 case MAP(0, 0x00, 0x02):
5483 case MAP(1, 0x00, 0x02):
5484 handle_rev32(s, sf, rn, rd);
5485 break;
5486 case MAP(1, 0x00, 0x03):
5487 handle_rev64(s, sf, rn, rd);
5488 break;
5489 case MAP(0, 0x00, 0x04):
5490 case MAP(1, 0x00, 0x04):
5491 handle_clz(s, sf, rn, rd);
5492 break;
5493 case MAP(0, 0x00, 0x05):
5494 case MAP(1, 0x00, 0x05):
5495 handle_cls(s, sf, rn, rd);
5496 break;
5497 case MAP(1, 0x01, 0x00):
5498 if (s->pauth_active) {
5499 tcg_rd = cpu_reg(s, rd);
5500 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5501 } else if (!dc_isar_feature(aa64_pauth, s)) {
5502 goto do_unallocated;
5503 }
5504 break;
5505 case MAP(1, 0x01, 0x01):
5506 if (s->pauth_active) {
5507 tcg_rd = cpu_reg(s, rd);
5508 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5509 } else if (!dc_isar_feature(aa64_pauth, s)) {
5510 goto do_unallocated;
5511 }
5512 break;
5513 case MAP(1, 0x01, 0x02):
5514 if (s->pauth_active) {
5515 tcg_rd = cpu_reg(s, rd);
5516 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5517 } else if (!dc_isar_feature(aa64_pauth, s)) {
5518 goto do_unallocated;
5519 }
5520 break;
5521 case MAP(1, 0x01, 0x03):
5522 if (s->pauth_active) {
5523 tcg_rd = cpu_reg(s, rd);
5524 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5525 } else if (!dc_isar_feature(aa64_pauth, s)) {
5526 goto do_unallocated;
5527 }
5528 break;
5529 case MAP(1, 0x01, 0x04):
5530 if (s->pauth_active) {
5531 tcg_rd = cpu_reg(s, rd);
5532 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5533 } else if (!dc_isar_feature(aa64_pauth, s)) {
5534 goto do_unallocated;
5535 }
5536 break;
5537 case MAP(1, 0x01, 0x05):
5538 if (s->pauth_active) {
5539 tcg_rd = cpu_reg(s, rd);
5540 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5541 } else if (!dc_isar_feature(aa64_pauth, s)) {
5542 goto do_unallocated;
5543 }
5544 break;
5545 case MAP(1, 0x01, 0x06):
5546 if (s->pauth_active) {
5547 tcg_rd = cpu_reg(s, rd);
5548 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5549 } else if (!dc_isar_feature(aa64_pauth, s)) {
5550 goto do_unallocated;
5551 }
5552 break;
5553 case MAP(1, 0x01, 0x07):
5554 if (s->pauth_active) {
5555 tcg_rd = cpu_reg(s, rd);
5556 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5557 } else if (!dc_isar_feature(aa64_pauth, s)) {
5558 goto do_unallocated;
5559 }
5560 break;
5561 case MAP(1, 0x01, 0x08):
5562 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5563 goto do_unallocated;
5564 } else if (s->pauth_active) {
5565 tcg_rd = cpu_reg(s, rd);
5566 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5567 }
5568 break;
5569 case MAP(1, 0x01, 0x09):
5570 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5571 goto do_unallocated;
5572 } else if (s->pauth_active) {
5573 tcg_rd = cpu_reg(s, rd);
5574 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5575 }
5576 break;
5577 case MAP(1, 0x01, 0x0a):
5578 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5579 goto do_unallocated;
5580 } else if (s->pauth_active) {
5581 tcg_rd = cpu_reg(s, rd);
5582 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5583 }
5584 break;
5585 case MAP(1, 0x01, 0x0b):
5586 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5587 goto do_unallocated;
5588 } else if (s->pauth_active) {
5589 tcg_rd = cpu_reg(s, rd);
5590 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5591 }
5592 break;
5593 case MAP(1, 0x01, 0x0c):
5594 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5595 goto do_unallocated;
5596 } else if (s->pauth_active) {
5597 tcg_rd = cpu_reg(s, rd);
5598 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5599 }
5600 break;
5601 case MAP(1, 0x01, 0x0d):
5602 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5603 goto do_unallocated;
5604 } else if (s->pauth_active) {
5605 tcg_rd = cpu_reg(s, rd);
5606 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5607 }
5608 break;
5609 case MAP(1, 0x01, 0x0e):
5610 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5611 goto do_unallocated;
5612 } else if (s->pauth_active) {
5613 tcg_rd = cpu_reg(s, rd);
5614 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5615 }
5616 break;
5617 case MAP(1, 0x01, 0x0f):
5618 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5619 goto do_unallocated;
5620 } else if (s->pauth_active) {
5621 tcg_rd = cpu_reg(s, rd);
5622 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5623 }
5624 break;
5625 case MAP(1, 0x01, 0x10):
5626 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5627 goto do_unallocated;
5628 } else if (s->pauth_active) {
5629 tcg_rd = cpu_reg(s, rd);
5630 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5631 }
5632 break;
5633 case MAP(1, 0x01, 0x11):
5634 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5635 goto do_unallocated;
5636 } else if (s->pauth_active) {
5637 tcg_rd = cpu_reg(s, rd);
5638 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5639 }
5640 break;
5641 default:
5642 do_unallocated:
5643 unallocated_encoding(s);
5644 break;
5645 }
5646
5647#undef MAP
5648}
5649
5650static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5651 unsigned int rm, unsigned int rn, unsigned int rd)
5652{
5653 TCGv_i64 tcg_n, tcg_m, tcg_rd;
5654 tcg_rd = cpu_reg(s, rd);
5655
5656 if (!sf && is_signed) {
5657 tcg_n = new_tmp_a64(s);
5658 tcg_m = new_tmp_a64(s);
5659 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5660 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5661 } else {
5662 tcg_n = read_cpu_reg(s, rn, sf);
5663 tcg_m = read_cpu_reg(s, rm, sf);
5664 }
5665
5666 if (is_signed) {
5667 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5668 } else {
5669 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5670 }
5671
5672 if (!sf) {
5673 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5674 }
5675}
5676
5677
5678static void handle_shift_reg(DisasContext *s,
5679 enum a64_shift_type shift_type, unsigned int sf,
5680 unsigned int rm, unsigned int rn, unsigned int rd)
5681{
5682 TCGv_i64 tcg_shift = tcg_temp_new_i64();
5683 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5684 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5685
5686 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5687 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5688 tcg_temp_free_i64(tcg_shift);
5689}
5690
5691
5692static void handle_crc32(DisasContext *s,
5693 unsigned int sf, unsigned int sz, bool crc32c,
5694 unsigned int rm, unsigned int rn, unsigned int rd)
5695{
5696 TCGv_i64 tcg_acc, tcg_val;
5697 TCGv_i32 tcg_bytes;
5698
5699 if (!dc_isar_feature(aa64_crc32, s)
5700 || (sf == 1 && sz != 3)
5701 || (sf == 0 && sz == 3)) {
5702 unallocated_encoding(s);
5703 return;
5704 }
5705
5706 if (sz == 3) {
5707 tcg_val = cpu_reg(s, rm);
5708 } else {
5709 uint64_t mask;
5710 switch (sz) {
5711 case 0:
5712 mask = 0xFF;
5713 break;
5714 case 1:
5715 mask = 0xFFFF;
5716 break;
5717 case 2:
5718 mask = 0xFFFFFFFF;
5719 break;
5720 default:
5721 g_assert_not_reached();
5722 }
5723 tcg_val = new_tmp_a64(s);
5724 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5725 }
5726
5727 tcg_acc = cpu_reg(s, rn);
5728 tcg_bytes = tcg_const_i32(1 << sz);
5729
5730 if (crc32c) {
5731 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5732 } else {
5733 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5734 }
5735
5736 tcg_temp_free_i32(tcg_bytes);
5737}
5738
5739
5740
5741
5742
5743
5744
5745static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5746{
5747 unsigned int sf, rm, opcode, rn, rd, setflag;
5748 sf = extract32(insn, 31, 1);
5749 setflag = extract32(insn, 29, 1);
5750 rm = extract32(insn, 16, 5);
5751 opcode = extract32(insn, 10, 6);
5752 rn = extract32(insn, 5, 5);
5753 rd = extract32(insn, 0, 5);
5754
5755 if (setflag && opcode != 0) {
5756 unallocated_encoding(s);
5757 return;
5758 }
5759
5760 switch (opcode) {
5761 case 0:
5762 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5763 goto do_unallocated;
5764 } else {
5765 TCGv_i64 tcg_n, tcg_m, tcg_d;
5766
5767 tcg_n = read_cpu_reg_sp(s, rn, true);
5768 tcg_m = read_cpu_reg_sp(s, rm, true);
5769 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5770 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5771 tcg_d = cpu_reg(s, rd);
5772
5773 if (setflag) {
5774 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5775 } else {
5776 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5777 }
5778 }
5779 break;
5780 case 2:
5781 handle_div(s, false, sf, rm, rn, rd);
5782 break;
5783 case 3:
5784 handle_div(s, true, sf, rm, rn, rd);
5785 break;
5786 case 4:
5787 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5788 goto do_unallocated;
5789 }
5790 if (s->ata) {
5791 gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5792 cpu_reg_sp(s, rn), cpu_reg(s, rm));
5793 } else {
5794 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5795 cpu_reg_sp(s, rn));
5796 }
5797 break;
5798 case 5:
5799 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5800 goto do_unallocated;
5801 } else {
5802 TCGv_i64 t1 = tcg_const_i64(1);
5803 TCGv_i64 t2 = tcg_temp_new_i64();
5804
5805 tcg_gen_extract_i64(t2, cpu_reg_sp(s, rn), 56, 4);
5806 tcg_gen_shl_i64(t1, t1, t2);
5807 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t1);
5808
5809 tcg_temp_free_i64(t1);
5810 tcg_temp_free_i64(t2);
5811 }
5812 break;
5813 case 8:
5814 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5815 break;
5816 case 9:
5817 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5818 break;
5819 case 10:
5820 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5821 break;
5822 case 11:
5823 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5824 break;
5825 case 12:
5826 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5827 goto do_unallocated;
5828 }
5829 gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5830 cpu_reg(s, rn), cpu_reg_sp(s, rm));
5831 break;
5832 case 16:
5833 case 17:
5834 case 18:
5835 case 19:
5836 case 20:
5837 case 21:
5838 case 22:
5839 case 23:
5840 {
5841 int sz = extract32(opcode, 0, 2);
5842 bool crc32c = extract32(opcode, 2, 1);
5843 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5844 break;
5845 }
5846 default:
5847 do_unallocated:
5848 unallocated_encoding(s);
5849 break;
5850 }
5851}
5852
5853
5854
5855
5856
5857
5858
5859
5860static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5861{
5862 int op0 = extract32(insn, 30, 1);
5863 int op1 = extract32(insn, 28, 1);
5864 int op2 = extract32(insn, 21, 4);
5865 int op3 = extract32(insn, 10, 6);
5866
5867 if (!op1) {
5868 if (op2 & 8) {
5869 if (op2 & 1) {
5870
5871 disas_add_sub_ext_reg(s, insn);
5872 } else {
5873
5874 disas_add_sub_reg(s, insn);
5875 }
5876 } else {
5877
5878 disas_logic_reg(s, insn);
5879 }
5880 return;
5881 }
5882
5883 switch (op2) {
5884 case 0x0:
5885 switch (op3) {
5886 case 0x00:
5887 disas_adc_sbc(s, insn);
5888 break;
5889
5890 case 0x01:
5891 case 0x21:
5892 disas_rotate_right_into_flags(s, insn);
5893 break;
5894
5895 case 0x02:
5896 case 0x12:
5897 case 0x22:
5898 case 0x32:
5899 disas_evaluate_into_flags(s, insn);
5900 break;
5901
5902 default:
5903 goto do_unallocated;
5904 }
5905 break;
5906
5907 case 0x2:
5908 disas_cc(s, insn);
5909 break;
5910
5911 case 0x4:
5912 disas_cond_select(s, insn);
5913 break;
5914
5915 case 0x6:
5916 if (op0) {
5917 disas_data_proc_1src(s, insn);
5918 } else {
5919 disas_data_proc_2src(s, insn);
5920 }
5921 break;
5922 case 0x8 ... 0xf:
5923 disas_data_proc_3src(s, insn);
5924 break;
5925
5926 default:
5927 do_unallocated:
5928 unallocated_encoding(s);
5929 break;
5930 }
5931}
5932
5933static void handle_fp_compare(DisasContext *s, int size,
5934 unsigned int rn, unsigned int rm,
5935 bool cmp_with_zero, bool signal_all_nans)
5936{
5937 TCGv_i64 tcg_flags = tcg_temp_new_i64();
5938 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
5939
5940 if (size == MO_64) {
5941 TCGv_i64 tcg_vn, tcg_vm;
5942
5943 tcg_vn = read_fp_dreg(s, rn);
5944 if (cmp_with_zero) {
5945 tcg_vm = tcg_const_i64(0);
5946 } else {
5947 tcg_vm = read_fp_dreg(s, rm);
5948 }
5949 if (signal_all_nans) {
5950 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5951 } else {
5952 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5953 }
5954 tcg_temp_free_i64(tcg_vn);
5955 tcg_temp_free_i64(tcg_vm);
5956 } else {
5957 TCGv_i32 tcg_vn = tcg_temp_new_i32();
5958 TCGv_i32 tcg_vm = tcg_temp_new_i32();
5959
5960 read_vec_element_i32(s, tcg_vn, rn, 0, size);
5961 if (cmp_with_zero) {
5962 tcg_gen_movi_i32(tcg_vm, 0);
5963 } else {
5964 read_vec_element_i32(s, tcg_vm, rm, 0, size);
5965 }
5966
5967 switch (size) {
5968 case MO_32:
5969 if (signal_all_nans) {
5970 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5971 } else {
5972 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5973 }
5974 break;
5975 case MO_16:
5976 if (signal_all_nans) {
5977 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5978 } else {
5979 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5980 }
5981 break;
5982 default:
5983 g_assert_not_reached();
5984 }
5985
5986 tcg_temp_free_i32(tcg_vn);
5987 tcg_temp_free_i32(tcg_vm);
5988 }
5989
5990 tcg_temp_free_ptr(fpst);
5991
5992 gen_set_nzcv(tcg_flags);
5993
5994 tcg_temp_free_i64(tcg_flags);
5995}
5996
5997
5998
5999
6000
6001
6002
6003static void disas_fp_compare(DisasContext *s, uint32_t insn)
6004{
6005 unsigned int mos, type, rm, op, rn, opc, op2r;
6006 int size;
6007
6008 mos = extract32(insn, 29, 3);
6009 type = extract32(insn, 22, 2);
6010 rm = extract32(insn, 16, 5);
6011 op = extract32(insn, 14, 2);
6012 rn = extract32(insn, 5, 5);
6013 opc = extract32(insn, 3, 2);
6014 op2r = extract32(insn, 0, 3);
6015
6016 if (mos || op || op2r) {
6017 unallocated_encoding(s);
6018 return;
6019 }
6020
6021 switch (type) {
6022 case 0:
6023 size = MO_32;
6024 break;
6025 case 1:
6026 size = MO_64;
6027 break;
6028 case 3:
6029 size = MO_16;
6030 if (dc_isar_feature(aa64_fp16, s)) {
6031 break;
6032 }
6033
6034 default:
6035 unallocated_encoding(s);
6036 return;
6037 }
6038
6039 if (!fp_access_check(s)) {
6040 return;
6041 }
6042
6043 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
6044}
6045
6046
6047
6048
6049
6050
6051
6052static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
6053{
6054 unsigned int mos, type, rm, cond, rn, op, nzcv;
6055 TCGv_i64 tcg_flags;
6056 TCGLabel *label_continue = NULL;
6057 int size;
6058
6059 mos = extract32(insn, 29, 3);
6060 type = extract32(insn, 22, 2);
6061 rm = extract32(insn, 16, 5);
6062 cond = extract32(insn, 12, 4);
6063 rn = extract32(insn, 5, 5);
6064 op = extract32(insn, 4, 1);
6065 nzcv = extract32(insn, 0, 4);
6066
6067 if (mos) {
6068 unallocated_encoding(s);
6069 return;
6070 }
6071
6072 switch (type) {
6073 case 0:
6074 size = MO_32;
6075 break;
6076 case 1:
6077 size = MO_64;
6078 break;
6079 case 3:
6080 size = MO_16;
6081 if (dc_isar_feature(aa64_fp16, s)) {
6082 break;
6083 }
6084
6085 default:
6086 unallocated_encoding(s);
6087 return;
6088 }
6089
6090 if (!fp_access_check(s)) {
6091 return;
6092 }
6093
6094 if (cond < 0x0e) {
6095 TCGLabel *label_match = gen_new_label();
6096 label_continue = gen_new_label();
6097 arm_gen_test_cc(cond, label_match);
6098
6099 tcg_flags = tcg_const_i64(nzcv << 28);
6100 gen_set_nzcv(tcg_flags);
6101 tcg_temp_free_i64(tcg_flags);
6102 tcg_gen_br(label_continue);
6103 gen_set_label(label_match);
6104 }
6105
6106 handle_fp_compare(s, size, rn, rm, false, op);
6107
6108 if (cond < 0x0e) {
6109 gen_set_label(label_continue);
6110 }
6111}
6112
6113
6114
6115
6116
6117
6118
6119static void disas_fp_csel(DisasContext *s, uint32_t insn)
6120{
6121 unsigned int mos, type, rm, cond, rn, rd;
6122 TCGv_i64 t_true, t_false, t_zero;
6123 DisasCompare64 c;
6124 MemOp sz;
6125
6126 mos = extract32(insn, 29, 3);
6127 type = extract32(insn, 22, 2);
6128 rm = extract32(insn, 16, 5);
6129 cond = extract32(insn, 12, 4);
6130 rn = extract32(insn, 5, 5);
6131 rd = extract32(insn, 0, 5);
6132
6133 if (mos) {
6134 unallocated_encoding(s);
6135 return;
6136 }
6137
6138 switch (type) {
6139 case 0:
6140 sz = MO_32;
6141 break;
6142 case 1:
6143 sz = MO_64;
6144 break;
6145 case 3:
6146 sz = MO_16;
6147 if (dc_isar_feature(aa64_fp16, s)) {
6148 break;
6149 }
6150
6151 default:
6152 unallocated_encoding(s);
6153 return;
6154 }
6155
6156 if (!fp_access_check(s)) {
6157 return;
6158 }
6159
6160
6161 t_true = tcg_temp_new_i64();
6162 t_false = tcg_temp_new_i64();
6163 read_vec_element(s, t_true, rn, 0, sz);
6164 read_vec_element(s, t_false, rm, 0, sz);
6165
6166 a64_test_cc(&c, cond);
6167 t_zero = tcg_const_i64(0);
6168 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
6169 tcg_temp_free_i64(t_zero);
6170 tcg_temp_free_i64(t_false);
6171 a64_free_cc(&c);
6172
6173
6174
6175 write_fp_dreg(s, rd, t_true);
6176 tcg_temp_free_i64(t_true);
6177}
6178
6179
6180static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
6181{
6182 TCGv_ptr fpst = NULL;
6183 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
6184 TCGv_i32 tcg_res = tcg_temp_new_i32();
6185
6186 switch (opcode) {
6187 case 0x0:
6188 tcg_gen_mov_i32(tcg_res, tcg_op);
6189 break;
6190 case 0x1:
6191 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
6192 break;
6193 case 0x2:
6194 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
6195 break;
6196 case 0x3:
6197 fpst = fpstatus_ptr(FPST_FPCR_F16);
6198 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
6199 break;
6200 case 0x8:
6201 case 0x9:
6202 case 0xa:
6203 case 0xb:
6204 case 0xc:
6205 {
6206 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
6207 fpst = fpstatus_ptr(FPST_FPCR_F16);
6208
6209 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6210 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6211
6212 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6213 tcg_temp_free_i32(tcg_rmode);
6214 break;
6215 }
6216 case 0xe:
6217 fpst = fpstatus_ptr(FPST_FPCR_F16);
6218 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
6219 break;
6220 case 0xf:
6221 fpst = fpstatus_ptr(FPST_FPCR_F16);
6222 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6223 break;
6224 default:
6225 abort();
6226 }
6227
6228 write_fp_sreg(s, rd, tcg_res);
6229
6230 if (fpst) {
6231 tcg_temp_free_ptr(fpst);
6232 }
6233 tcg_temp_free_i32(tcg_op);
6234 tcg_temp_free_i32(tcg_res);
6235}
6236
6237
6238static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
6239{
6240 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
6241 TCGv_i32 tcg_op, tcg_res;
6242 TCGv_ptr fpst;
6243 int rmode = -1;
6244
6245 tcg_op = read_fp_sreg(s, rn);
6246 tcg_res = tcg_temp_new_i32();
6247
6248 switch (opcode) {
6249 case 0x0:
6250 tcg_gen_mov_i32(tcg_res, tcg_op);
6251 goto done;
6252 case 0x1:
6253 gen_helper_vfp_abss(tcg_res, tcg_op);
6254 goto done;
6255 case 0x2:
6256 gen_helper_vfp_negs(tcg_res, tcg_op);
6257 goto done;
6258 case 0x3:
6259 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6260 goto done;
6261 case 0x6:
6262 gen_fpst = gen_helper_bfcvt;
6263 break;
6264 case 0x8:
6265 case 0x9:
6266 case 0xa:
6267 case 0xb:
6268 case 0xc:
6269 rmode = arm_rmode_to_sf(opcode & 7);
6270 gen_fpst = gen_helper_rints;
6271 break;
6272 case 0xe:
6273 gen_fpst = gen_helper_rints_exact;
6274 break;
6275 case 0xf:
6276 gen_fpst = gen_helper_rints;
6277 break;
6278 case 0x10:
6279 rmode = float_round_to_zero;
6280 gen_fpst = gen_helper_frint32_s;
6281 break;
6282 case 0x11:
6283 gen_fpst = gen_helper_frint32_s;
6284 break;
6285 case 0x12:
6286 rmode = float_round_to_zero;
6287 gen_fpst = gen_helper_frint64_s;
6288 break;
6289 case 0x13:
6290 gen_fpst = gen_helper_frint64_s;
6291 break;
6292 default:
6293 g_assert_not_reached();
6294 }
6295
6296 fpst = fpstatus_ptr(FPST_FPCR);
6297 if (rmode >= 0) {
6298 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6299 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6300 gen_fpst(tcg_res, tcg_op, fpst);
6301 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6302 tcg_temp_free_i32(tcg_rmode);
6303 } else {
6304 gen_fpst(tcg_res, tcg_op, fpst);
6305 }
6306 tcg_temp_free_ptr(fpst);
6307
6308 done:
6309 write_fp_sreg(s, rd, tcg_res);
6310 tcg_temp_free_i32(tcg_op);
6311 tcg_temp_free_i32(tcg_res);
6312}
6313
6314
6315static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6316{
6317 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6318 TCGv_i64 tcg_op, tcg_res;
6319 TCGv_ptr fpst;
6320 int rmode = -1;
6321
6322 switch (opcode) {
6323 case 0x0:
6324 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6325 return;
6326 }
6327
6328 tcg_op = read_fp_dreg(s, rn);
6329 tcg_res = tcg_temp_new_i64();
6330
6331 switch (opcode) {
6332 case 0x1:
6333 gen_helper_vfp_absd(tcg_res, tcg_op);
6334 goto done;
6335 case 0x2:
6336 gen_helper_vfp_negd(tcg_res, tcg_op);
6337 goto done;
6338 case 0x3:
6339 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6340 goto done;
6341 case 0x8:
6342 case 0x9:
6343 case 0xa:
6344 case 0xb:
6345 case 0xc:
6346 rmode = arm_rmode_to_sf(opcode & 7);
6347 gen_fpst = gen_helper_rintd;
6348 break;
6349 case 0xe:
6350 gen_fpst = gen_helper_rintd_exact;
6351 break;
6352 case 0xf:
6353 gen_fpst = gen_helper_rintd;
6354 break;
6355 case 0x10:
6356 rmode = float_round_to_zero;
6357 gen_fpst = gen_helper_frint32_d;
6358 break;
6359 case 0x11:
6360 gen_fpst = gen_helper_frint32_d;
6361 break;
6362 case 0x12:
6363 rmode = float_round_to_zero;
6364 gen_fpst = gen_helper_frint64_d;
6365 break;
6366 case 0x13:
6367 gen_fpst = gen_helper_frint64_d;
6368 break;
6369 default:
6370 g_assert_not_reached();
6371 }
6372
6373 fpst = fpstatus_ptr(FPST_FPCR);
6374 if (rmode >= 0) {
6375 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6376 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6377 gen_fpst(tcg_res, tcg_op, fpst);
6378 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6379 tcg_temp_free_i32(tcg_rmode);
6380 } else {
6381 gen_fpst(tcg_res, tcg_op, fpst);
6382 }
6383 tcg_temp_free_ptr(fpst);
6384
6385 done:
6386 write_fp_dreg(s, rd, tcg_res);
6387 tcg_temp_free_i64(tcg_op);
6388 tcg_temp_free_i64(tcg_res);
6389}
6390
6391static void handle_fp_fcvt(DisasContext *s, int opcode,
6392 int rd, int rn, int dtype, int ntype)
6393{
6394 switch (ntype) {
6395 case 0x0:
6396 {
6397 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6398 if (dtype == 1) {
6399
6400 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6401 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6402 write_fp_dreg(s, rd, tcg_rd);
6403 tcg_temp_free_i64(tcg_rd);
6404 } else {
6405
6406 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6407 TCGv_i32 ahp = get_ahp_flag();
6408 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6409
6410 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6411
6412 write_fp_sreg(s, rd, tcg_rd);
6413 tcg_temp_free_i32(tcg_rd);
6414 tcg_temp_free_i32(ahp);
6415 tcg_temp_free_ptr(fpst);
6416 }
6417 tcg_temp_free_i32(tcg_rn);
6418 break;
6419 }
6420 case 0x1:
6421 {
6422 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6423 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6424 if (dtype == 0) {
6425
6426 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6427 } else {
6428 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6429 TCGv_i32 ahp = get_ahp_flag();
6430
6431 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6432
6433 tcg_temp_free_ptr(fpst);
6434 tcg_temp_free_i32(ahp);
6435 }
6436 write_fp_sreg(s, rd, tcg_rd);
6437 tcg_temp_free_i32(tcg_rd);
6438 tcg_temp_free_i64(tcg_rn);
6439 break;
6440 }
6441 case 0x3:
6442 {
6443 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6444 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6445 TCGv_i32 tcg_ahp = get_ahp_flag();
6446 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6447 if (dtype == 0) {
6448
6449 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6450 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6451 write_fp_sreg(s, rd, tcg_rd);
6452 tcg_temp_free_i32(tcg_rd);
6453 } else {
6454
6455 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6456 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6457 write_fp_dreg(s, rd, tcg_rd);
6458 tcg_temp_free_i64(tcg_rd);
6459 }
6460 tcg_temp_free_i32(tcg_rn);
6461 tcg_temp_free_ptr(tcg_fpst);
6462 tcg_temp_free_i32(tcg_ahp);
6463 break;
6464 }
6465 default:
6466 abort();
6467 }
6468}
6469
6470
6471
6472
6473
6474
6475
6476static void disas_fp_1src(DisasContext *s, uint32_t insn)
6477{
6478 int mos = extract32(insn, 29, 3);
6479 int type = extract32(insn, 22, 2);
6480 int opcode = extract32(insn, 15, 6);
6481 int rn = extract32(insn, 5, 5);
6482 int rd = extract32(insn, 0, 5);
6483
6484 if (mos) {
6485 goto do_unallocated;
6486 }
6487
6488 switch (opcode) {
6489 case 0x4: case 0x5: case 0x7:
6490 {
6491
6492 int dtype = extract32(opcode, 0, 2);
6493 if (type == 2 || dtype == type) {
6494 goto do_unallocated;
6495 }
6496 if (!fp_access_check(s)) {
6497 return;
6498 }
6499
6500 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6501 break;
6502 }
6503
6504 case 0x10 ... 0x13:
6505 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6506 goto do_unallocated;
6507 }
6508
6509 case 0x0 ... 0x3:
6510 case 0x8 ... 0xc:
6511 case 0xe ... 0xf:
6512
6513 switch (type) {
6514 case 0:
6515 if (!fp_access_check(s)) {
6516 return;
6517 }
6518 handle_fp_1src_single(s, opcode, rd, rn);
6519 break;
6520 case 1:
6521 if (!fp_access_check(s)) {
6522 return;
6523 }
6524 handle_fp_1src_double(s, opcode, rd, rn);
6525 break;
6526 case 3:
6527 if (!dc_isar_feature(aa64_fp16, s)) {
6528 goto do_unallocated;
6529 }
6530
6531 if (!fp_access_check(s)) {
6532 return;
6533 }
6534 handle_fp_1src_half(s, opcode, rd, rn);
6535 break;
6536 default:
6537 goto do_unallocated;
6538 }
6539 break;
6540
6541 case 0x6:
6542 switch (type) {
6543 case 1:
6544 if (!dc_isar_feature(aa64_bf16, s)) {
6545 goto do_unallocated;
6546 }
6547 if (!fp_access_check(s)) {
6548 return;
6549 }
6550 handle_fp_1src_single(s, opcode, rd, rn);
6551 break;
6552 default:
6553 goto do_unallocated;
6554 }
6555 break;
6556
6557 default:
6558 do_unallocated:
6559 unallocated_encoding(s);
6560 break;
6561 }
6562}
6563
6564
6565static void handle_fp_2src_single(DisasContext *s, int opcode,
6566 int rd, int rn, int rm)
6567{
6568 TCGv_i32 tcg_op1;
6569 TCGv_i32 tcg_op2;
6570 TCGv_i32 tcg_res;
6571 TCGv_ptr fpst;
6572
6573 tcg_res = tcg_temp_new_i32();
6574 fpst = fpstatus_ptr(FPST_FPCR);
6575 tcg_op1 = read_fp_sreg(s, rn);
6576 tcg_op2 = read_fp_sreg(s, rm);
6577
6578 switch (opcode) {
6579 case 0x0:
6580 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6581 break;
6582 case 0x1:
6583 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6584 break;
6585 case 0x2:
6586 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6587 break;
6588 case 0x3:
6589 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6590 break;
6591 case 0x4:
6592 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6593 break;
6594 case 0x5:
6595 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6596 break;
6597 case 0x6:
6598 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6599 break;
6600 case 0x7:
6601 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6602 break;
6603 case 0x8:
6604 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6605 gen_helper_vfp_negs(tcg_res, tcg_res);
6606 break;
6607 }
6608
6609 write_fp_sreg(s, rd, tcg_res);
6610
6611 tcg_temp_free_ptr(fpst);
6612 tcg_temp_free_i32(tcg_op1);
6613 tcg_temp_free_i32(tcg_op2);
6614 tcg_temp_free_i32(tcg_res);
6615}
6616
6617
6618static void handle_fp_2src_double(DisasContext *s, int opcode,
6619 int rd, int rn, int rm)
6620{
6621 TCGv_i64 tcg_op1;
6622 TCGv_i64 tcg_op2;
6623 TCGv_i64 tcg_res;
6624 TCGv_ptr fpst;
6625
6626 tcg_res = tcg_temp_new_i64();
6627 fpst = fpstatus_ptr(FPST_FPCR);
6628 tcg_op1 = read_fp_dreg(s, rn);
6629 tcg_op2 = read_fp_dreg(s, rm);
6630
6631 switch (opcode) {
6632 case 0x0:
6633 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6634 break;
6635 case 0x1:
6636 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6637 break;
6638 case 0x2:
6639 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6640 break;
6641 case 0x3:
6642 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6643 break;
6644 case 0x4:
6645 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6646 break;
6647 case 0x5:
6648 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6649 break;
6650 case 0x6:
6651 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6652 break;
6653 case 0x7:
6654 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6655 break;
6656 case 0x8:
6657 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6658 gen_helper_vfp_negd(tcg_res, tcg_res);
6659 break;
6660 }
6661
6662 write_fp_dreg(s, rd, tcg_res);
6663
6664 tcg_temp_free_ptr(fpst);
6665 tcg_temp_free_i64(tcg_op1);
6666 tcg_temp_free_i64(tcg_op2);
6667 tcg_temp_free_i64(tcg_res);
6668}
6669
6670
6671static void handle_fp_2src_half(DisasContext *s, int opcode,
6672 int rd, int rn, int rm)
6673{
6674 TCGv_i32 tcg_op1;
6675 TCGv_i32 tcg_op2;
6676 TCGv_i32 tcg_res;
6677 TCGv_ptr fpst;
6678
6679 tcg_res = tcg_temp_new_i32();
6680 fpst = fpstatus_ptr(FPST_FPCR_F16);
6681 tcg_op1 = read_fp_hreg(s, rn);
6682 tcg_op2 = read_fp_hreg(s, rm);
6683
6684 switch (opcode) {
6685 case 0x0:
6686 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6687 break;
6688 case 0x1:
6689 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6690 break;
6691 case 0x2:
6692 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6693 break;
6694 case 0x3:
6695 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6696 break;
6697 case 0x4:
6698 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6699 break;
6700 case 0x5:
6701 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6702 break;
6703 case 0x6:
6704 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6705 break;
6706 case 0x7:
6707 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6708 break;
6709 case 0x8:
6710 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6711 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6712 break;
6713 default:
6714 g_assert_not_reached();
6715 }
6716
6717 write_fp_sreg(s, rd, tcg_res);
6718
6719 tcg_temp_free_ptr(fpst);
6720 tcg_temp_free_i32(tcg_op1);
6721 tcg_temp_free_i32(tcg_op2);
6722 tcg_temp_free_i32(tcg_res);
6723}
6724
6725
6726
6727
6728
6729
6730
6731static void disas_fp_2src(DisasContext *s, uint32_t insn)
6732{
6733 int mos = extract32(insn, 29, 3);
6734 int type = extract32(insn, 22, 2);
6735 int rd = extract32(insn, 0, 5);
6736 int rn = extract32(insn, 5, 5);
6737 int rm = extract32(insn, 16, 5);
6738 int opcode = extract32(insn, 12, 4);
6739
6740 if (opcode > 8 || mos) {
6741 unallocated_encoding(s);
6742 return;
6743 }
6744
6745 switch (type) {
6746 case 0:
6747 if (!fp_access_check(s)) {
6748 return;
6749 }
6750 handle_fp_2src_single(s, opcode, rd, rn, rm);
6751 break;
6752 case 1:
6753 if (!fp_access_check(s)) {
6754 return;
6755 }
6756 handle_fp_2src_double(s, opcode, rd, rn, rm);
6757 break;
6758 case 3:
6759 if (!dc_isar_feature(aa64_fp16, s)) {
6760 unallocated_encoding(s);
6761 return;
6762 }
6763 if (!fp_access_check(s)) {
6764 return;
6765 }
6766 handle_fp_2src_half(s, opcode, rd, rn, rm);
6767 break;
6768 default:
6769 unallocated_encoding(s);
6770 }
6771}
6772
6773
6774static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6775 int rd, int rn, int rm, int ra)
6776{
6777 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6778 TCGv_i32 tcg_res = tcg_temp_new_i32();
6779 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6780
6781 tcg_op1 = read_fp_sreg(s, rn);
6782 tcg_op2 = read_fp_sreg(s, rm);
6783 tcg_op3 = read_fp_sreg(s, ra);
6784
6785
6786
6787
6788
6789
6790
6791
6792 if (o1 == true) {
6793 gen_helper_vfp_negs(tcg_op3, tcg_op3);
6794 }
6795
6796 if (o0 != o1) {
6797 gen_helper_vfp_negs(tcg_op1, tcg_op1);
6798 }
6799
6800 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6801
6802 write_fp_sreg(s, rd, tcg_res);
6803
6804 tcg_temp_free_ptr(fpst);
6805 tcg_temp_free_i32(tcg_op1);
6806 tcg_temp_free_i32(tcg_op2);
6807 tcg_temp_free_i32(tcg_op3);
6808 tcg_temp_free_i32(tcg_res);
6809}
6810
6811
6812static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6813 int rd, int rn, int rm, int ra)
6814{
6815 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6816 TCGv_i64 tcg_res = tcg_temp_new_i64();
6817 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6818
6819 tcg_op1 = read_fp_dreg(s, rn);
6820 tcg_op2 = read_fp_dreg(s, rm);
6821 tcg_op3 = read_fp_dreg(s, ra);
6822
6823
6824
6825
6826
6827
6828
6829
6830 if (o1 == true) {
6831 gen_helper_vfp_negd(tcg_op3, tcg_op3);
6832 }
6833
6834 if (o0 != o1) {
6835 gen_helper_vfp_negd(tcg_op1, tcg_op1);
6836 }
6837
6838 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6839
6840 write_fp_dreg(s, rd, tcg_res);
6841
6842 tcg_temp_free_ptr(fpst);
6843 tcg_temp_free_i64(tcg_op1);
6844 tcg_temp_free_i64(tcg_op2);
6845 tcg_temp_free_i64(tcg_op3);
6846 tcg_temp_free_i64(tcg_res);
6847}
6848
6849
6850static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6851 int rd, int rn, int rm, int ra)
6852{
6853 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6854 TCGv_i32 tcg_res = tcg_temp_new_i32();
6855 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6856
6857 tcg_op1 = read_fp_hreg(s, rn);
6858 tcg_op2 = read_fp_hreg(s, rm);
6859 tcg_op3 = read_fp_hreg(s, ra);
6860
6861
6862
6863
6864
6865
6866
6867
6868 if (o1 == true) {
6869 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6870 }
6871
6872 if (o0 != o1) {
6873 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6874 }
6875
6876 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6877
6878 write_fp_sreg(s, rd, tcg_res);
6879
6880 tcg_temp_free_ptr(fpst);
6881 tcg_temp_free_i32(tcg_op1);
6882 tcg_temp_free_i32(tcg_op2);
6883 tcg_temp_free_i32(tcg_op3);
6884 tcg_temp_free_i32(tcg_res);
6885}
6886
6887
6888
6889
6890
6891
6892
6893static void disas_fp_3src(DisasContext *s, uint32_t insn)
6894{
6895 int mos = extract32(insn, 29, 3);
6896 int type = extract32(insn, 22, 2);
6897 int rd = extract32(insn, 0, 5);
6898 int rn = extract32(insn, 5, 5);
6899 int ra = extract32(insn, 10, 5);
6900 int rm = extract32(insn, 16, 5);
6901 bool o0 = extract32(insn, 15, 1);
6902 bool o1 = extract32(insn, 21, 1);
6903
6904 if (mos) {
6905 unallocated_encoding(s);
6906 return;
6907 }
6908
6909 switch (type) {
6910 case 0:
6911 if (!fp_access_check(s)) {
6912 return;
6913 }
6914 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6915 break;
6916 case 1:
6917 if (!fp_access_check(s)) {
6918 return;
6919 }
6920 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6921 break;
6922 case 3:
6923 if (!dc_isar_feature(aa64_fp16, s)) {
6924 unallocated_encoding(s);
6925 return;
6926 }
6927 if (!fp_access_check(s)) {
6928 return;
6929 }
6930 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6931 break;
6932 default:
6933 unallocated_encoding(s);
6934 }
6935}
6936
6937
6938
6939
6940
6941
6942
6943static void disas_fp_imm(DisasContext *s, uint32_t insn)
6944{
6945 int rd = extract32(insn, 0, 5);
6946 int imm5 = extract32(insn, 5, 5);
6947 int imm8 = extract32(insn, 13, 8);
6948 int type = extract32(insn, 22, 2);
6949 int mos = extract32(insn, 29, 3);
6950 uint64_t imm;
6951 TCGv_i64 tcg_res;
6952 MemOp sz;
6953
6954 if (mos || imm5) {
6955 unallocated_encoding(s);
6956 return;
6957 }
6958
6959 switch (type) {
6960 case 0:
6961 sz = MO_32;
6962 break;
6963 case 1:
6964 sz = MO_64;
6965 break;
6966 case 3:
6967 sz = MO_16;
6968 if (dc_isar_feature(aa64_fp16, s)) {
6969 break;
6970 }
6971
6972 default:
6973 unallocated_encoding(s);
6974 return;
6975 }
6976
6977 if (!fp_access_check(s)) {
6978 return;
6979 }
6980
6981 imm = vfp_expand_imm(sz, imm8);
6982
6983 tcg_res = tcg_const_i64(imm);
6984 write_fp_dreg(s, rd, tcg_res);
6985 tcg_temp_free_i64(tcg_res);
6986}
6987
6988
6989
6990
6991
6992
6993static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6994 bool itof, int rmode, int scale, int sf, int type)
6995{
6996 bool is_signed = !(opcode & 1);
6997 TCGv_ptr tcg_fpstatus;
6998 TCGv_i32 tcg_shift, tcg_single;
6999 TCGv_i64 tcg_double;
7000
7001 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
7002
7003 tcg_shift = tcg_const_i32(64 - scale);
7004
7005 if (itof) {
7006 TCGv_i64 tcg_int = cpu_reg(s, rn);
7007 if (!sf) {
7008 TCGv_i64 tcg_extend = new_tmp_a64(s);
7009
7010 if (is_signed) {
7011 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
7012 } else {
7013 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
7014 }
7015
7016 tcg_int = tcg_extend;
7017 }
7018
7019 switch (type) {
7020 case 1:
7021 tcg_double = tcg_temp_new_i64();
7022 if (is_signed) {
7023 gen_helper_vfp_sqtod(tcg_double, tcg_int,
7024 tcg_shift, tcg_fpstatus);
7025 } else {
7026 gen_helper_vfp_uqtod(tcg_double, tcg_int,
7027 tcg_shift, tcg_fpstatus);
7028 }
7029 write_fp_dreg(s, rd, tcg_double);
7030 tcg_temp_free_i64(tcg_double);
7031 break;
7032
7033 case 0:
7034 tcg_single = tcg_temp_new_i32();
7035 if (is_signed) {
7036 gen_helper_vfp_sqtos(tcg_single, tcg_int,
7037 tcg_shift, tcg_fpstatus);
7038 } else {
7039 gen_helper_vfp_uqtos(tcg_single, tcg_int,
7040 tcg_shift, tcg_fpstatus);
7041 }
7042 write_fp_sreg(s, rd, tcg_single);
7043 tcg_temp_free_i32(tcg_single);
7044 break;
7045
7046 case 3:
7047 tcg_single = tcg_temp_new_i32();
7048 if (is_signed) {
7049 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
7050 tcg_shift, tcg_fpstatus);
7051 } else {
7052 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
7053 tcg_shift, tcg_fpstatus);
7054 }
7055 write_fp_sreg(s, rd, tcg_single);
7056 tcg_temp_free_i32(tcg_single);
7057 break;
7058
7059 default:
7060 g_assert_not_reached();
7061 }
7062 } else {
7063 TCGv_i64 tcg_int = cpu_reg(s, rd);
7064 TCGv_i32 tcg_rmode;
7065
7066 if (extract32(opcode, 2, 1)) {
7067
7068
7069
7070 rmode = FPROUNDING_TIEAWAY;
7071 }
7072
7073 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7074
7075 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7076
7077 switch (type) {
7078 case 1:
7079 tcg_double = read_fp_dreg(s, rn);
7080 if (is_signed) {
7081 if (!sf) {
7082 gen_helper_vfp_tosld(tcg_int, tcg_double,
7083 tcg_shift, tcg_fpstatus);
7084 } else {
7085 gen_helper_vfp_tosqd(tcg_int, tcg_double,
7086 tcg_shift, tcg_fpstatus);
7087 }
7088 } else {
7089 if (!sf) {
7090 gen_helper_vfp_tould(tcg_int, tcg_double,
7091 tcg_shift, tcg_fpstatus);
7092 } else {
7093 gen_helper_vfp_touqd(tcg_int, tcg_double,
7094 tcg_shift, tcg_fpstatus);
7095 }
7096 }
7097 if (!sf) {
7098 tcg_gen_ext32u_i64(tcg_int, tcg_int);
7099 }
7100 tcg_temp_free_i64(tcg_double);
7101 break;
7102
7103 case 0:
7104 tcg_single = read_fp_sreg(s, rn);
7105 if (sf) {
7106 if (is_signed) {
7107 gen_helper_vfp_tosqs(tcg_int, tcg_single,
7108 tcg_shift, tcg_fpstatus);
7109 } else {
7110 gen_helper_vfp_touqs(tcg_int, tcg_single,
7111 tcg_shift, tcg_fpstatus);
7112 }
7113 } else {
7114 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7115 if (is_signed) {
7116 gen_helper_vfp_tosls(tcg_dest, tcg_single,
7117 tcg_shift, tcg_fpstatus);
7118 } else {
7119 gen_helper_vfp_touls(tcg_dest, tcg_single,
7120 tcg_shift, tcg_fpstatus);
7121 }
7122 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7123 tcg_temp_free_i32(tcg_dest);
7124 }
7125 tcg_temp_free_i32(tcg_single);
7126 break;
7127
7128 case 3:
7129 tcg_single = read_fp_sreg(s, rn);
7130 if (sf) {
7131 if (is_signed) {
7132 gen_helper_vfp_tosqh(tcg_int, tcg_single,
7133 tcg_shift, tcg_fpstatus);
7134 } else {
7135 gen_helper_vfp_touqh(tcg_int, tcg_single,
7136 tcg_shift, tcg_fpstatus);
7137 }
7138 } else {
7139 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7140 if (is_signed) {
7141 gen_helper_vfp_toslh(tcg_dest, tcg_single,
7142 tcg_shift, tcg_fpstatus);
7143 } else {
7144 gen_helper_vfp_toulh(tcg_dest, tcg_single,
7145 tcg_shift, tcg_fpstatus);
7146 }
7147 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7148 tcg_temp_free_i32(tcg_dest);
7149 }
7150 tcg_temp_free_i32(tcg_single);
7151 break;
7152
7153 default:
7154 g_assert_not_reached();
7155 }
7156
7157 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7158 tcg_temp_free_i32(tcg_rmode);
7159 }
7160
7161 tcg_temp_free_ptr(tcg_fpstatus);
7162 tcg_temp_free_i32(tcg_shift);
7163}
7164
7165
7166
7167
7168
7169
7170
7171static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
7172{
7173 int rd = extract32(insn, 0, 5);
7174 int rn = extract32(insn, 5, 5);
7175 int scale = extract32(insn, 10, 6);
7176 int opcode = extract32(insn, 16, 3);
7177 int rmode = extract32(insn, 19, 2);
7178 int type = extract32(insn, 22, 2);
7179 bool sbit = extract32(insn, 29, 1);
7180 bool sf = extract32(insn, 31, 1);
7181 bool itof;
7182
7183 if (sbit || (!sf && scale < 32)) {
7184 unallocated_encoding(s);
7185 return;
7186 }
7187
7188 switch (type) {
7189 case 0:
7190 case 1:
7191 break;
7192 case 3:
7193 if (dc_isar_feature(aa64_fp16, s)) {
7194 break;
7195 }
7196
7197 default:
7198 unallocated_encoding(s);
7199 return;
7200 }
7201
7202 switch ((rmode << 3) | opcode) {
7203 case 0x2:
7204 case 0x3:
7205 itof = true;
7206 break;
7207 case 0x18:
7208 case 0x19:
7209 itof = false;
7210 break;
7211 default:
7212 unallocated_encoding(s);
7213 return;
7214 }
7215
7216 if (!fp_access_check(s)) {
7217 return;
7218 }
7219
7220 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
7221}
7222
7223static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
7224{
7225
7226
7227
7228
7229 if (itof) {
7230 TCGv_i64 tcg_rn = cpu_reg(s, rn);
7231 TCGv_i64 tmp;
7232
7233 switch (type) {
7234 case 0:
7235
7236 tmp = tcg_temp_new_i64();
7237 tcg_gen_ext32u_i64(tmp, tcg_rn);
7238 write_fp_dreg(s, rd, tmp);
7239 tcg_temp_free_i64(tmp);
7240 break;
7241 case 1:
7242
7243 write_fp_dreg(s, rd, tcg_rn);
7244 break;
7245 case 2:
7246
7247 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
7248 clear_vec_high(s, true, rd);
7249 break;
7250 case 3:
7251
7252 tmp = tcg_temp_new_i64();
7253 tcg_gen_ext16u_i64(tmp, tcg_rn);
7254 write_fp_dreg(s, rd, tmp);
7255 tcg_temp_free_i64(tmp);
7256 break;
7257 default:
7258 g_assert_not_reached();
7259 }
7260 } else {
7261 TCGv_i64 tcg_rd = cpu_reg(s, rd);
7262
7263 switch (type) {
7264 case 0:
7265
7266 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
7267 break;
7268 case 1:
7269
7270 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
7271 break;
7272 case 2:
7273
7274 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
7275 break;
7276 case 3:
7277
7278 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
7279 break;
7280 default:
7281 g_assert_not_reached();
7282 }
7283 }
7284}
7285
7286static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
7287{
7288 TCGv_i64 t = read_fp_dreg(s, rn);
7289 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
7290
7291 gen_helper_fjcvtzs(t, t, fpstatus);
7292
7293 tcg_temp_free_ptr(fpstatus);
7294
7295 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
7296 tcg_gen_extrh_i64_i32(cpu_ZF, t);
7297 tcg_gen_movi_i32(cpu_CF, 0);
7298 tcg_gen_movi_i32(cpu_NF, 0);
7299 tcg_gen_movi_i32(cpu_VF, 0);
7300
7301 tcg_temp_free_i64(t);
7302}
7303
7304
7305
7306
7307
7308
7309
7310static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
7311{
7312 int rd = extract32(insn, 0, 5);
7313 int rn = extract32(insn, 5, 5);
7314 int opcode = extract32(insn, 16, 3);
7315 int rmode = extract32(insn, 19, 2);
7316 int type = extract32(insn, 22, 2);
7317 bool sbit = extract32(insn, 29, 1);
7318 bool sf = extract32(insn, 31, 1);
7319 bool itof = false;
7320
7321 if (sbit) {
7322 goto do_unallocated;
7323 }
7324
7325 switch (opcode) {
7326 case 2:
7327 case 3:
7328 itof = true;
7329
7330 case 4:
7331 case 5:
7332 if (rmode != 0) {
7333 goto do_unallocated;
7334 }
7335
7336 case 0:
7337 case 1:
7338 switch (type) {
7339 case 0:
7340 case 1:
7341 break;
7342 case 3:
7343 if (!dc_isar_feature(aa64_fp16, s)) {
7344 goto do_unallocated;
7345 }
7346 break;
7347 default:
7348 goto do_unallocated;
7349 }
7350 if (!fp_access_check(s)) {
7351 return;
7352 }
7353 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7354 break;
7355
7356 default:
7357 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7358 case 0b01100110:
7359 case 0b01100111:
7360 case 0b11100110:
7361 case 0b11100111:
7362 if (!dc_isar_feature(aa64_fp16, s)) {
7363 goto do_unallocated;
7364 }
7365
7366 case 0b00000110:
7367 case 0b00000111:
7368 case 0b10100110:
7369 case 0b10100111:
7370 case 0b11001110:
7371 case 0b11001111:
7372 if (!fp_access_check(s)) {
7373 return;
7374 }
7375 itof = opcode & 1;
7376 handle_fmov(s, rd, rn, type, itof);
7377 break;
7378
7379 case 0b00111110:
7380 if (!dc_isar_feature(aa64_jscvt, s)) {
7381 goto do_unallocated;
7382 } else if (fp_access_check(s)) {
7383 handle_fjcvtzs(s, rd, rn);
7384 }
7385 break;
7386
7387 default:
7388 do_unallocated:
7389 unallocated_encoding(s);
7390 return;
7391 }
7392 break;
7393 }
7394}
7395
7396
7397
7398
7399
7400
7401
7402static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7403{
7404 if (extract32(insn, 24, 1)) {
7405
7406 disas_fp_3src(s, insn);
7407 } else if (extract32(insn, 21, 1) == 0) {
7408
7409 disas_fp_fixed_conv(s, insn);
7410 } else {
7411 switch (extract32(insn, 10, 2)) {
7412 case 1:
7413
7414 disas_fp_ccomp(s, insn);
7415 break;
7416 case 2:
7417
7418 disas_fp_2src(s, insn);
7419 break;
7420 case 3:
7421
7422 disas_fp_csel(s, insn);
7423 break;
7424 case 0:
7425 switch (ctz32(extract32(insn, 12, 4))) {
7426 case 0:
7427
7428 disas_fp_imm(s, insn);
7429 break;
7430 case 1:
7431
7432 disas_fp_compare(s, insn);
7433 break;
7434 case 2:
7435
7436 disas_fp_1src(s, insn);
7437 break;
7438 case 3:
7439 unallocated_encoding(s);
7440 break;
7441 default:
7442
7443 disas_fp_int_conv(s, insn);
7444 break;
7445 }
7446 break;
7447 }
7448 }
7449}
7450
7451static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7452 int pos)
7453{
7454
7455
7456
7457
7458
7459
7460 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7461 assert(pos > 0 && pos < 64);
7462
7463 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7464 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7465 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7466
7467 tcg_temp_free_i64(tcg_tmp);
7468}
7469
7470
7471
7472
7473
7474
7475
7476static void disas_simd_ext(DisasContext *s, uint32_t insn)
7477{
7478 int is_q = extract32(insn, 30, 1);
7479 int op2 = extract32(insn, 22, 2);
7480 int imm4 = extract32(insn, 11, 4);
7481 int rm = extract32(insn, 16, 5);
7482 int rn = extract32(insn, 5, 5);
7483 int rd = extract32(insn, 0, 5);
7484 int pos = imm4 << 3;
7485 TCGv_i64 tcg_resl, tcg_resh;
7486
7487 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7488 unallocated_encoding(s);
7489 return;
7490 }
7491
7492 if (!fp_access_check(s)) {
7493 return;
7494 }
7495
7496 tcg_resh = tcg_temp_new_i64();
7497 tcg_resl = tcg_temp_new_i64();
7498
7499
7500
7501
7502
7503 if (!is_q) {
7504 read_vec_element(s, tcg_resl, rn, 0, MO_64);
7505 if (pos != 0) {
7506 read_vec_element(s, tcg_resh, rm, 0, MO_64);
7507 do_ext64(s, tcg_resh, tcg_resl, pos);
7508 }
7509 } else {
7510 TCGv_i64 tcg_hh;
7511 typedef struct {
7512 int reg;
7513 int elt;
7514 } EltPosns;
7515 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7516 EltPosns *elt = eltposns;
7517
7518 if (pos >= 64) {
7519 elt++;
7520 pos -= 64;
7521 }
7522
7523 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7524 elt++;
7525 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7526 elt++;
7527 if (pos != 0) {
7528 do_ext64(s, tcg_resh, tcg_resl, pos);
7529 tcg_hh = tcg_temp_new_i64();
7530 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7531 do_ext64(s, tcg_hh, tcg_resh, pos);
7532 tcg_temp_free_i64(tcg_hh);
7533 }
7534 }
7535
7536 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7537 tcg_temp_free_i64(tcg_resl);
7538 if (is_q) {
7539 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7540 }
7541 tcg_temp_free_i64(tcg_resh);
7542 clear_vec_high(s, is_q, rd);
7543}
7544
7545
7546
7547
7548
7549
7550
7551static void disas_simd_tb(DisasContext *s, uint32_t insn)
7552{
7553 int op2 = extract32(insn, 22, 2);
7554 int is_q = extract32(insn, 30, 1);
7555 int rm = extract32(insn, 16, 5);
7556 int rn = extract32(insn, 5, 5);
7557 int rd = extract32(insn, 0, 5);
7558 int is_tbx = extract32(insn, 12, 1);
7559 int len = (extract32(insn, 13, 2) + 1) * 16;
7560
7561 if (op2 != 0) {
7562 unallocated_encoding(s);
7563 return;
7564 }
7565
7566 if (!fp_access_check(s)) {
7567 return;
7568 }
7569
7570 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7571 vec_full_reg_offset(s, rm), cpu_env,
7572 is_q ? 16 : 8, vec_full_reg_size(s),
7573 (len << 6) | (is_tbx << 5) | rn,
7574 gen_helper_simd_tblx);
7575}
7576
7577
7578
7579
7580
7581
7582
7583static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7584{
7585 int rd = extract32(insn, 0, 5);
7586 int rn = extract32(insn, 5, 5);
7587 int rm = extract32(insn, 16, 5);
7588 int size = extract32(insn, 22, 2);
7589
7590
7591
7592 int opcode = extract32(insn, 12, 2);
7593 bool part = extract32(insn, 14, 1);
7594 bool is_q = extract32(insn, 30, 1);
7595 int esize = 8 << size;
7596 int i, ofs;
7597 int datasize = is_q ? 128 : 64;
7598 int elements = datasize / esize;
7599 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7600
7601 if (opcode == 0 || (size == 3 && !is_q)) {
7602 unallocated_encoding(s);
7603 return;
7604 }
7605
7606 if (!fp_access_check(s)) {
7607 return;
7608 }
7609
7610 tcg_resl = tcg_const_i64(0);
7611 tcg_resh = is_q ? tcg_const_i64(0) : NULL;
7612 tcg_res = tcg_temp_new_i64();
7613
7614 for (i = 0; i < elements; i++) {
7615 switch (opcode) {
7616 case 1:
7617 {
7618 int midpoint = elements / 2;
7619 if (i < midpoint) {
7620 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7621 } else {
7622 read_vec_element(s, tcg_res, rm,
7623 2 * (i - midpoint) + part, size);
7624 }
7625 break;
7626 }
7627 case 2:
7628 if (i & 1) {
7629 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7630 } else {
7631 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7632 }
7633 break;
7634 case 3:
7635 {
7636 int base = part * elements / 2;
7637 if (i & 1) {
7638 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7639 } else {
7640 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7641 }
7642 break;
7643 }
7644 default:
7645 g_assert_not_reached();
7646 }
7647
7648 ofs = i * esize;
7649 if (ofs < 64) {
7650 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7651 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7652 } else {
7653 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7654 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7655 }
7656 }
7657
7658 tcg_temp_free_i64(tcg_res);
7659
7660 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7661 tcg_temp_free_i64(tcg_resl);
7662
7663 if (is_q) {
7664 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7665 tcg_temp_free_i64(tcg_resh);
7666 }
7667 clear_vec_high(s, is_q, rd);
7668}
7669
7670
7671
7672
7673
7674
7675
7676
7677
7678
7679
7680static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7681 int esize, int size, int vmap, TCGv_ptr fpst)
7682{
7683 if (esize == size) {
7684 int element;
7685 MemOp msize = esize == 16 ? MO_16 : MO_32;
7686 TCGv_i32 tcg_elem;
7687
7688
7689 assert(ctpop8(vmap) == 1);
7690 element = ctz32(vmap);
7691 assert(element < 8);
7692
7693 tcg_elem = tcg_temp_new_i32();
7694 read_vec_element_i32(s, tcg_elem, rn, element, msize);
7695 return tcg_elem;
7696 } else {
7697 int bits = size / 2;
7698 int shift = ctpop8(vmap) / 2;
7699 int vmap_lo = (vmap >> shift) & vmap;
7700 int vmap_hi = (vmap & ~vmap_lo);
7701 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7702
7703 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7704 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7705 tcg_res = tcg_temp_new_i32();
7706
7707 switch (fpopcode) {
7708 case 0x0c:
7709 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7710 break;
7711 case 0x0f:
7712 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7713 break;
7714 case 0x1c:
7715 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7716 break;
7717 case 0x1f:
7718 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7719 break;
7720 case 0x2c:
7721 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7722 break;
7723 case 0x2f:
7724 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7725 break;
7726 case 0x3c:
7727 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7728 break;
7729 case 0x3f:
7730 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7731 break;
7732 default:
7733 g_assert_not_reached();
7734 }
7735
7736 tcg_temp_free_i32(tcg_hi);
7737 tcg_temp_free_i32(tcg_lo);
7738 return tcg_res;
7739 }
7740}
7741
7742
7743
7744
7745
7746
7747
7748static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7749{
7750 int rd = extract32(insn, 0, 5);
7751 int rn = extract32(insn, 5, 5);
7752 int size = extract32(insn, 22, 2);
7753 int opcode = extract32(insn, 12, 5);
7754 bool is_q = extract32(insn, 30, 1);
7755 bool is_u = extract32(insn, 29, 1);
7756 bool is_fp = false;
7757 bool is_min = false;
7758 int esize;
7759 int elements;
7760 int i;
7761 TCGv_i64 tcg_res, tcg_elt;
7762
7763 switch (opcode) {
7764 case 0x1b:
7765 if (is_u) {
7766 unallocated_encoding(s);
7767 return;
7768 }
7769
7770 case 0x3:
7771 case 0xa:
7772 case 0x1a:
7773 if (size == 3 || (size == 2 && !is_q)) {
7774 unallocated_encoding(s);
7775 return;
7776 }
7777 break;
7778 case 0xc:
7779 case 0xf:
7780
7781
7782
7783
7784
7785 is_min = extract32(size, 1, 1);
7786 is_fp = true;
7787 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7788 size = 1;
7789 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7790 unallocated_encoding(s);
7791 return;
7792 } else {
7793 size = 2;
7794 }
7795 break;
7796 default:
7797 unallocated_encoding(s);
7798 return;
7799 }
7800
7801 if (!fp_access_check(s)) {
7802 return;
7803 }
7804
7805 esize = 8 << size;
7806 elements = (is_q ? 128 : 64) / esize;
7807
7808 tcg_res = tcg_temp_new_i64();
7809 tcg_elt = tcg_temp_new_i64();
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822
7823 if (!is_fp) {
7824 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7825
7826 for (i = 1; i < elements; i++) {
7827 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7828
7829 switch (opcode) {
7830 case 0x03:
7831 case 0x1b:
7832 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7833 break;
7834 case 0x0a:
7835 if (is_u) {
7836 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7837 } else {
7838 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7839 }
7840 break;
7841 case 0x1a:
7842 if (is_u) {
7843 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7844 } else {
7845 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7846 }
7847 break;
7848 default:
7849 g_assert_not_reached();
7850 }
7851
7852 }
7853 } else {
7854
7855
7856
7857
7858
7859 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7860 int fpopcode = opcode | is_min << 4 | is_u << 5;
7861 int vmap = (1 << elements) - 1;
7862 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7863 (is_q ? 128 : 64), vmap, fpst);
7864 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7865 tcg_temp_free_i32(tcg_res32);
7866 tcg_temp_free_ptr(fpst);
7867 }
7868
7869 tcg_temp_free_i64(tcg_elt);
7870
7871
7872 if (opcode == 0x03) {
7873
7874 size++;
7875 }
7876
7877 switch (size) {
7878 case 0:
7879 tcg_gen_ext8u_i64(tcg_res, tcg_res);
7880 break;
7881 case 1:
7882 tcg_gen_ext16u_i64(tcg_res, tcg_res);
7883 break;
7884 case 2:
7885 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7886 break;
7887 case 3:
7888 break;
7889 default:
7890 g_assert_not_reached();
7891 }
7892
7893 write_fp_dreg(s, rd, tcg_res);
7894 tcg_temp_free_i64(tcg_res);
7895}
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905
7906static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7907 int imm5)
7908{
7909 int size = ctz32(imm5);
7910 int index;
7911
7912 if (size > 3 || (size == 3 && !is_q)) {
7913 unallocated_encoding(s);
7914 return;
7915 }
7916
7917 if (!fp_access_check(s)) {
7918 return;
7919 }
7920
7921 index = imm5 >> (size + 1);
7922 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7923 vec_reg_offset(s, rn, index, size),
7924 is_q ? 16 : 8, vec_full_reg_size(s));
7925}
7926
7927
7928
7929
7930
7931
7932
7933static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7934 int imm5)
7935{
7936 int size = ctz32(imm5);
7937 int index;
7938 TCGv_i64 tmp;
7939
7940 if (size > 3) {
7941 unallocated_encoding(s);
7942 return;
7943 }
7944
7945 if (!fp_access_check(s)) {
7946 return;
7947 }
7948
7949 index = imm5 >> (size + 1);
7950
7951
7952
7953
7954 tmp = tcg_temp_new_i64();
7955 read_vec_element(s, tmp, rn, index, size);
7956 write_fp_dreg(s, rd, tmp);
7957 tcg_temp_free_i64(tmp);
7958}
7959
7960
7961
7962
7963
7964
7965
7966
7967
7968
7969static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7970 int imm5)
7971{
7972 int size = ctz32(imm5);
7973 uint32_t dofs, oprsz, maxsz;
7974
7975 if (size > 3 || ((size == 3) && !is_q)) {
7976 unallocated_encoding(s);
7977 return;
7978 }
7979
7980 if (!fp_access_check(s)) {
7981 return;
7982 }
7983
7984 dofs = vec_full_reg_offset(s, rd);
7985 oprsz = is_q ? 16 : 8;
7986 maxsz = vec_full_reg_size(s);
7987
7988 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7989}
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001static void handle_simd_inse(DisasContext *s, int rd, int rn,
8002 int imm4, int imm5)
8003{
8004 int size = ctz32(imm5);
8005 int src_index, dst_index;
8006 TCGv_i64 tmp;
8007
8008 if (size > 3) {
8009 unallocated_encoding(s);
8010 return;
8011 }
8012
8013 if (!fp_access_check(s)) {
8014 return;
8015 }
8016
8017 dst_index = extract32(imm5, 1+size, 5);
8018 src_index = extract32(imm4, size, 4);
8019
8020 tmp = tcg_temp_new_i64();
8021
8022 read_vec_element(s, tmp, rn, src_index, size);
8023 write_vec_element(s, tmp, rd, dst_index, size);
8024
8025 tcg_temp_free_i64(tmp);
8026
8027
8028 clear_vec_high(s, true, rd);
8029}
8030
8031
8032
8033
8034
8035
8036
8037
8038
8039
8040
8041
8042static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
8043{
8044 int size = ctz32(imm5);
8045 int idx;
8046
8047 if (size > 3) {
8048 unallocated_encoding(s);
8049 return;
8050 }
8051
8052 if (!fp_access_check(s)) {
8053 return;
8054 }
8055
8056 idx = extract32(imm5, 1 + size, 4 - size);
8057 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
8058
8059
8060 clear_vec_high(s, true, rd);
8061}
8062
8063
8064
8065
8066
8067
8068
8069
8070
8071
8072
8073
8074
8075static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
8076 int rn, int rd, int imm5)
8077{
8078 int size = ctz32(imm5);
8079 int element;
8080 TCGv_i64 tcg_rd;
8081
8082
8083 if (is_signed) {
8084 if (size > 2 || (size == 2 && !is_q)) {
8085 unallocated_encoding(s);
8086 return;
8087 }
8088 } else {
8089 if (size > 3
8090 || (size < 3 && is_q)
8091 || (size == 3 && !is_q)) {
8092 unallocated_encoding(s);
8093 return;
8094 }
8095 }
8096
8097 if (!fp_access_check(s)) {
8098 return;
8099 }
8100
8101 element = extract32(imm5, 1+size, 4);
8102
8103 tcg_rd = cpu_reg(s, rd);
8104 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
8105 if (is_signed && !is_q) {
8106 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
8107 }
8108}
8109
8110
8111
8112
8113
8114
8115
8116static void disas_simd_copy(DisasContext *s, uint32_t insn)
8117{
8118 int rd = extract32(insn, 0, 5);
8119 int rn = extract32(insn, 5, 5);
8120 int imm4 = extract32(insn, 11, 4);
8121 int op = extract32(insn, 29, 1);
8122 int is_q = extract32(insn, 30, 1);
8123 int imm5 = extract32(insn, 16, 5);
8124
8125 if (op) {
8126 if (is_q) {
8127
8128 handle_simd_inse(s, rd, rn, imm4, imm5);
8129 } else {
8130 unallocated_encoding(s);
8131 }
8132 } else {
8133 switch (imm4) {
8134 case 0:
8135
8136 handle_simd_dupe(s, is_q, rd, rn, imm5);
8137 break;
8138 case 1:
8139
8140 handle_simd_dupg(s, is_q, rd, rn, imm5);
8141 break;
8142 case 3:
8143 if (is_q) {
8144
8145 handle_simd_insg(s, rd, rn, imm5);
8146 } else {
8147 unallocated_encoding(s);
8148 }
8149 break;
8150 case 5:
8151 case 7:
8152
8153 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
8154 break;
8155 default:
8156 unallocated_encoding(s);
8157 break;
8158 }
8159 }
8160}
8161
8162
8163
8164
8165
8166
8167
8168
8169
8170
8171
8172
8173
8174
8175
8176static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
8177{
8178 int rd = extract32(insn, 0, 5);
8179 int cmode = extract32(insn, 12, 4);
8180 int o2 = extract32(insn, 11, 1);
8181 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
8182 bool is_neg = extract32(insn, 29, 1);
8183 bool is_q = extract32(insn, 30, 1);
8184 uint64_t imm = 0;
8185
8186 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
8187
8188 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
8189 unallocated_encoding(s);
8190 return;
8191 }
8192 }
8193
8194 if (!fp_access_check(s)) {
8195 return;
8196 }
8197
8198 if (cmode == 15 && o2 && !is_neg) {
8199
8200 imm = vfp_expand_imm(MO_16, abcdefgh);
8201
8202 imm = dup_const(MO_16, imm);
8203 } else {
8204 imm = asimd_imm_const(abcdefgh, cmode, is_neg);
8205 }
8206
8207 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
8208
8209 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
8210 vec_full_reg_size(s), imm);
8211 } else {
8212
8213 if (is_neg) {
8214 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
8215 } else {
8216 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
8217 }
8218 }
8219}
8220
8221
8222
8223
8224
8225
8226
8227static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
8228{
8229 int rd = extract32(insn, 0, 5);
8230 int rn = extract32(insn, 5, 5);
8231 int imm4 = extract32(insn, 11, 4);
8232 int imm5 = extract32(insn, 16, 5);
8233 int op = extract32(insn, 29, 1);
8234
8235 if (op != 0 || imm4 != 0) {
8236 unallocated_encoding(s);
8237 return;
8238 }
8239
8240
8241 handle_simd_dupes(s, rd, rn, imm5);
8242}
8243
8244
8245
8246
8247
8248
8249
8250static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
8251{
8252 int u = extract32(insn, 29, 1);
8253 int size = extract32(insn, 22, 2);
8254 int opcode = extract32(insn, 12, 5);
8255 int rn = extract32(insn, 5, 5);
8256 int rd = extract32(insn, 0, 5);
8257 TCGv_ptr fpst;
8258
8259
8260
8261
8262
8263 opcode |= (extract32(size, 1, 1) << 5);
8264
8265 switch (opcode) {
8266 case 0x3b:
8267 if (u || size != 3) {
8268 unallocated_encoding(s);
8269 return;
8270 }
8271 if (!fp_access_check(s)) {
8272 return;
8273 }
8274
8275 fpst = NULL;
8276 break;
8277 case 0xc:
8278 case 0xd:
8279 case 0xf:
8280 case 0x2c:
8281 case 0x2f:
8282
8283 if (!u) {
8284 if (!dc_isar_feature(aa64_fp16, s)) {
8285 unallocated_encoding(s);
8286 return;
8287 } else {
8288 size = MO_16;
8289 }
8290 } else {
8291 size = extract32(size, 0, 1) ? MO_64 : MO_32;
8292 }
8293
8294 if (!fp_access_check(s)) {
8295 return;
8296 }
8297
8298 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8299 break;
8300 default:
8301 unallocated_encoding(s);
8302 return;
8303 }
8304
8305 if (size == MO_64) {
8306 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8307 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8308 TCGv_i64 tcg_res = tcg_temp_new_i64();
8309
8310 read_vec_element(s, tcg_op1, rn, 0, MO_64);
8311 read_vec_element(s, tcg_op2, rn, 1, MO_64);
8312
8313 switch (opcode) {
8314 case 0x3b:
8315 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
8316 break;
8317 case 0xc:
8318 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8319 break;
8320 case 0xd:
8321 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8322 break;
8323 case 0xf:
8324 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8325 break;
8326 case 0x2c:
8327 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8328 break;
8329 case 0x2f:
8330 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8331 break;
8332 default:
8333 g_assert_not_reached();
8334 }
8335
8336 write_fp_dreg(s, rd, tcg_res);
8337
8338 tcg_temp_free_i64(tcg_op1);
8339 tcg_temp_free_i64(tcg_op2);
8340 tcg_temp_free_i64(tcg_res);
8341 } else {
8342 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8343 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8344 TCGv_i32 tcg_res = tcg_temp_new_i32();
8345
8346 read_vec_element_i32(s, tcg_op1, rn, 0, size);
8347 read_vec_element_i32(s, tcg_op2, rn, 1, size);
8348
8349 if (size == MO_16) {
8350 switch (opcode) {
8351 case 0xc:
8352 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8353 break;
8354 case 0xd:
8355 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8356 break;
8357 case 0xf:
8358 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8359 break;
8360 case 0x2c:
8361 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8362 break;
8363 case 0x2f:
8364 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8365 break;
8366 default:
8367 g_assert_not_reached();
8368 }
8369 } else {
8370 switch (opcode) {
8371 case 0xc:
8372 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8373 break;
8374 case 0xd:
8375 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8376 break;
8377 case 0xf:
8378 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8379 break;
8380 case 0x2c:
8381 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8382 break;
8383 case 0x2f:
8384 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8385 break;
8386 default:
8387 g_assert_not_reached();
8388 }
8389 }
8390
8391 write_fp_sreg(s, rd, tcg_res);
8392
8393 tcg_temp_free_i32(tcg_op1);
8394 tcg_temp_free_i32(tcg_op2);
8395 tcg_temp_free_i32(tcg_res);
8396 }
8397
8398 if (fpst) {
8399 tcg_temp_free_ptr(fpst);
8400 }
8401}
8402
8403
8404
8405
8406
8407
8408
8409static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8410 TCGv_i64 tcg_rnd, bool accumulate,
8411 bool is_u, int size, int shift)
8412{
8413 bool extended_result = false;
8414 bool round = tcg_rnd != NULL;
8415 int ext_lshift = 0;
8416 TCGv_i64 tcg_src_hi;
8417
8418 if (round && size == 3) {
8419 extended_result = true;
8420 ext_lshift = 64 - shift;
8421 tcg_src_hi = tcg_temp_new_i64();
8422 } else if (shift == 64) {
8423 if (!accumulate && is_u) {
8424
8425 tcg_gen_movi_i64(tcg_res, 0);
8426 return;
8427 }
8428 }
8429
8430
8431 if (round) {
8432 if (extended_result) {
8433 TCGv_i64 tcg_zero = tcg_const_i64(0);
8434 if (!is_u) {
8435
8436 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8437 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8438 tcg_src, tcg_src_hi,
8439 tcg_rnd, tcg_zero);
8440 } else {
8441 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8442 tcg_src, tcg_zero,
8443 tcg_rnd, tcg_zero);
8444 }
8445 tcg_temp_free_i64(tcg_zero);
8446 } else {
8447 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8448 }
8449 }
8450
8451
8452 if (round && extended_result) {
8453
8454 if (ext_lshift == 0) {
8455
8456 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8457 } else {
8458 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8459 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8460 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8461 }
8462 } else {
8463 if (is_u) {
8464 if (shift == 64) {
8465
8466 tcg_gen_movi_i64(tcg_src, 0);
8467 } else {
8468 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8469 }
8470 } else {
8471 if (shift == 64) {
8472
8473 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8474 } else {
8475 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8476 }
8477 }
8478 }
8479
8480 if (accumulate) {
8481 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8482 } else {
8483 tcg_gen_mov_i64(tcg_res, tcg_src);
8484 }
8485
8486 if (extended_result) {
8487 tcg_temp_free_i64(tcg_src_hi);
8488 }
8489}
8490
8491
8492static void handle_scalar_simd_shri(DisasContext *s,
8493 bool is_u, int immh, int immb,
8494 int opcode, int rn, int rd)
8495{
8496 const int size = 3;
8497 int immhb = immh << 3 | immb;
8498 int shift = 2 * (8 << size) - immhb;
8499 bool accumulate = false;
8500 bool round = false;
8501 bool insert = false;
8502 TCGv_i64 tcg_rn;
8503 TCGv_i64 tcg_rd;
8504 TCGv_i64 tcg_round;
8505
8506 if (!extract32(immh, 3, 1)) {
8507 unallocated_encoding(s);
8508 return;
8509 }
8510
8511 if (!fp_access_check(s)) {
8512 return;
8513 }
8514
8515 switch (opcode) {
8516 case 0x02:
8517 accumulate = true;
8518 break;
8519 case 0x04:
8520 round = true;
8521 break;
8522 case 0x06:
8523 accumulate = round = true;
8524 break;
8525 case 0x08:
8526 insert = true;
8527 break;
8528 }
8529
8530 if (round) {
8531 uint64_t round_const = 1ULL << (shift - 1);
8532 tcg_round = tcg_const_i64(round_const);
8533 } else {
8534 tcg_round = NULL;
8535 }
8536
8537 tcg_rn = read_fp_dreg(s, rn);
8538 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8539
8540 if (insert) {
8541
8542
8543
8544 int esize = 8 << size;
8545 if (shift != esize) {
8546 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8547 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8548 }
8549 } else {
8550 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8551 accumulate, is_u, size, shift);
8552 }
8553
8554 write_fp_dreg(s, rd, tcg_rd);
8555
8556 tcg_temp_free_i64(tcg_rn);
8557 tcg_temp_free_i64(tcg_rd);
8558 if (round) {
8559 tcg_temp_free_i64(tcg_round);
8560 }
8561}
8562
8563
8564static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8565 int immh, int immb, int opcode,
8566 int rn, int rd)
8567{
8568 int size = 32 - clz32(immh) - 1;
8569 int immhb = immh << 3 | immb;
8570 int shift = immhb - (8 << size);
8571 TCGv_i64 tcg_rn;
8572 TCGv_i64 tcg_rd;
8573
8574 if (!extract32(immh, 3, 1)) {
8575 unallocated_encoding(s);
8576 return;
8577 }
8578
8579 if (!fp_access_check(s)) {
8580 return;
8581 }
8582
8583 tcg_rn = read_fp_dreg(s, rn);
8584 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8585
8586 if (insert) {
8587 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8588 } else {
8589 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8590 }
8591
8592 write_fp_dreg(s, rd, tcg_rd);
8593
8594 tcg_temp_free_i64(tcg_rn);
8595 tcg_temp_free_i64(tcg_rd);
8596}
8597
8598
8599
8600static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8601 bool is_u_shift, bool is_u_narrow,
8602 int immh, int immb, int opcode,
8603 int rn, int rd)
8604{
8605 int immhb = immh << 3 | immb;
8606 int size = 32 - clz32(immh) - 1;
8607 int esize = 8 << size;
8608 int shift = (2 * esize) - immhb;
8609 int elements = is_scalar ? 1 : (64 / esize);
8610 bool round = extract32(opcode, 0, 1);
8611 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8612 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8613 TCGv_i32 tcg_rd_narrowed;
8614 TCGv_i64 tcg_final;
8615
8616 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8617 { gen_helper_neon_narrow_sat_s8,
8618 gen_helper_neon_unarrow_sat8 },
8619 { gen_helper_neon_narrow_sat_s16,
8620 gen_helper_neon_unarrow_sat16 },
8621 { gen_helper_neon_narrow_sat_s32,
8622 gen_helper_neon_unarrow_sat32 },
8623 { NULL, NULL },
8624 };
8625 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8626 gen_helper_neon_narrow_sat_u8,
8627 gen_helper_neon_narrow_sat_u16,
8628 gen_helper_neon_narrow_sat_u32,
8629 NULL
8630 };
8631 NeonGenNarrowEnvFn *narrowfn;
8632
8633 int i;
8634
8635 assert(size < 4);
8636
8637 if (extract32(immh, 3, 1)) {
8638 unallocated_encoding(s);
8639 return;
8640 }
8641
8642 if (!fp_access_check(s)) {
8643 return;
8644 }
8645
8646 if (is_u_shift) {
8647 narrowfn = unsigned_narrow_fns[size];
8648 } else {
8649 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8650 }
8651
8652 tcg_rn = tcg_temp_new_i64();
8653 tcg_rd = tcg_temp_new_i64();
8654 tcg_rd_narrowed = tcg_temp_new_i32();
8655 tcg_final = tcg_const_i64(0);
8656
8657 if (round) {
8658 uint64_t round_const = 1ULL << (shift - 1);
8659 tcg_round = tcg_const_i64(round_const);
8660 } else {
8661 tcg_round = NULL;
8662 }
8663
8664 for (i = 0; i < elements; i++) {
8665 read_vec_element(s, tcg_rn, rn, i, ldop);
8666 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8667 false, is_u_shift, size+1, shift);
8668 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8669 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8670 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8671 }
8672
8673 if (!is_q) {
8674 write_vec_element(s, tcg_final, rd, 0, MO_64);
8675 } else {
8676 write_vec_element(s, tcg_final, rd, 1, MO_64);
8677 }
8678
8679 if (round) {
8680 tcg_temp_free_i64(tcg_round);
8681 }
8682 tcg_temp_free_i64(tcg_rn);
8683 tcg_temp_free_i64(tcg_rd);
8684 tcg_temp_free_i32(tcg_rd_narrowed);
8685 tcg_temp_free_i64(tcg_final);
8686
8687 clear_vec_high(s, is_q, rd);
8688}
8689
8690
8691static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8692 bool src_unsigned, bool dst_unsigned,
8693 int immh, int immb, int rn, int rd)
8694{
8695 int immhb = immh << 3 | immb;
8696 int size = 32 - clz32(immh) - 1;
8697 int shift = immhb - (8 << size);
8698 int pass;
8699
8700 assert(immh != 0);
8701 assert(!(scalar && is_q));
8702
8703 if (!scalar) {
8704 if (!is_q && extract32(immh, 3, 1)) {
8705 unallocated_encoding(s);
8706 return;
8707 }
8708
8709
8710
8711
8712
8713 switch (size) {
8714 case 0:
8715 shift |= shift << 8;
8716
8717 case 1:
8718 shift |= shift << 16;
8719 break;
8720 case 2:
8721 case 3:
8722 break;
8723 default:
8724 g_assert_not_reached();
8725 }
8726 }
8727
8728 if (!fp_access_check(s)) {
8729 return;
8730 }
8731
8732 if (size == 3) {
8733 TCGv_i64 tcg_shift = tcg_const_i64(shift);
8734 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8735 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8736 { NULL, gen_helper_neon_qshl_u64 },
8737 };
8738 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8739 int maxpass = is_q ? 2 : 1;
8740
8741 for (pass = 0; pass < maxpass; pass++) {
8742 TCGv_i64 tcg_op = tcg_temp_new_i64();
8743
8744 read_vec_element(s, tcg_op, rn, pass, MO_64);
8745 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8746 write_vec_element(s, tcg_op, rd, pass, MO_64);
8747
8748 tcg_temp_free_i64(tcg_op);
8749 }
8750 tcg_temp_free_i64(tcg_shift);
8751 clear_vec_high(s, is_q, rd);
8752 } else {
8753 TCGv_i32 tcg_shift = tcg_const_i32(shift);
8754 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8755 {
8756 { gen_helper_neon_qshl_s8,
8757 gen_helper_neon_qshl_s16,
8758 gen_helper_neon_qshl_s32 },
8759 { gen_helper_neon_qshlu_s8,
8760 gen_helper_neon_qshlu_s16,
8761 gen_helper_neon_qshlu_s32 }
8762 }, {
8763 { NULL, NULL, NULL },
8764 { gen_helper_neon_qshl_u8,
8765 gen_helper_neon_qshl_u16,
8766 gen_helper_neon_qshl_u32 }
8767 }
8768 };
8769 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8770 MemOp memop = scalar ? size : MO_32;
8771 int maxpass = scalar ? 1 : is_q ? 4 : 2;
8772
8773 for (pass = 0; pass < maxpass; pass++) {
8774 TCGv_i32 tcg_op = tcg_temp_new_i32();
8775
8776 read_vec_element_i32(s, tcg_op, rn, pass, memop);
8777 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8778 if (scalar) {
8779 switch (size) {
8780 case 0:
8781 tcg_gen_ext8u_i32(tcg_op, tcg_op);
8782 break;
8783 case 1:
8784 tcg_gen_ext16u_i32(tcg_op, tcg_op);
8785 break;
8786 case 2:
8787 break;
8788 default:
8789 g_assert_not_reached();
8790 }
8791 write_fp_sreg(s, rd, tcg_op);
8792 } else {
8793 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8794 }
8795
8796 tcg_temp_free_i32(tcg_op);
8797 }
8798 tcg_temp_free_i32(tcg_shift);
8799
8800 if (!scalar) {
8801 clear_vec_high(s, is_q, rd);
8802 }
8803 }
8804}
8805
8806
8807static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8808 int elements, int is_signed,
8809 int fracbits, int size)
8810{
8811 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8812 TCGv_i32 tcg_shift = NULL;
8813
8814 MemOp mop = size | (is_signed ? MO_SIGN : 0);
8815 int pass;
8816
8817 if (fracbits || size == MO_64) {
8818 tcg_shift = tcg_const_i32(fracbits);
8819 }
8820
8821 if (size == MO_64) {
8822 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8823 TCGv_i64 tcg_double = tcg_temp_new_i64();
8824
8825 for (pass = 0; pass < elements; pass++) {
8826 read_vec_element(s, tcg_int64, rn, pass, mop);
8827
8828 if (is_signed) {
8829 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8830 tcg_shift, tcg_fpst);
8831 } else {
8832 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8833 tcg_shift, tcg_fpst);
8834 }
8835 if (elements == 1) {
8836 write_fp_dreg(s, rd, tcg_double);
8837 } else {
8838 write_vec_element(s, tcg_double, rd, pass, MO_64);
8839 }
8840 }
8841
8842 tcg_temp_free_i64(tcg_int64);
8843 tcg_temp_free_i64(tcg_double);
8844
8845 } else {
8846 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8847 TCGv_i32 tcg_float = tcg_temp_new_i32();
8848
8849 for (pass = 0; pass < elements; pass++) {
8850 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8851
8852 switch (size) {
8853 case MO_32:
8854 if (fracbits) {
8855 if (is_signed) {
8856 gen_helper_vfp_sltos(tcg_float, tcg_int32,
8857 tcg_shift, tcg_fpst);
8858 } else {
8859 gen_helper_vfp_ultos(tcg_float, tcg_int32,
8860 tcg_shift, tcg_fpst);
8861 }
8862 } else {
8863 if (is_signed) {
8864 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8865 } else {
8866 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8867 }
8868 }
8869 break;
8870 case MO_16:
8871 if (fracbits) {
8872 if (is_signed) {
8873 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8874 tcg_shift, tcg_fpst);
8875 } else {
8876 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8877 tcg_shift, tcg_fpst);
8878 }
8879 } else {
8880 if (is_signed) {
8881 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8882 } else {
8883 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8884 }
8885 }
8886 break;
8887 default:
8888 g_assert_not_reached();
8889 }
8890
8891 if (elements == 1) {
8892 write_fp_sreg(s, rd, tcg_float);
8893 } else {
8894 write_vec_element_i32(s, tcg_float, rd, pass, size);
8895 }
8896 }
8897
8898 tcg_temp_free_i32(tcg_int32);
8899 tcg_temp_free_i32(tcg_float);
8900 }
8901
8902 tcg_temp_free_ptr(tcg_fpst);
8903 if (tcg_shift) {
8904 tcg_temp_free_i32(tcg_shift);
8905 }
8906
8907 clear_vec_high(s, elements << size == 16, rd);
8908}
8909
8910
8911static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8912 bool is_q, bool is_u,
8913 int immh, int immb, int opcode,
8914 int rn, int rd)
8915{
8916 int size, elements, fracbits;
8917 int immhb = immh << 3 | immb;
8918
8919 if (immh & 8) {
8920 size = MO_64;
8921 if (!is_scalar && !is_q) {
8922 unallocated_encoding(s);
8923 return;
8924 }
8925 } else if (immh & 4) {
8926 size = MO_32;
8927 } else if (immh & 2) {
8928 size = MO_16;
8929 if (!dc_isar_feature(aa64_fp16, s)) {
8930 unallocated_encoding(s);
8931 return;
8932 }
8933 } else {
8934
8935 g_assert(immh == 1);
8936 unallocated_encoding(s);
8937 return;
8938 }
8939
8940 if (is_scalar) {
8941 elements = 1;
8942 } else {
8943 elements = (8 << is_q) >> size;
8944 }
8945 fracbits = (16 << size) - immhb;
8946
8947 if (!fp_access_check(s)) {
8948 return;
8949 }
8950
8951 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8952}
8953
8954
8955static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8956 bool is_q, bool is_u,
8957 int immh, int immb, int rn, int rd)
8958{
8959 int immhb = immh << 3 | immb;
8960 int pass, size, fracbits;
8961 TCGv_ptr tcg_fpstatus;
8962 TCGv_i32 tcg_rmode, tcg_shift;
8963
8964 if (immh & 0x8) {
8965 size = MO_64;
8966 if (!is_scalar && !is_q) {
8967 unallocated_encoding(s);
8968 return;
8969 }
8970 } else if (immh & 0x4) {
8971 size = MO_32;
8972 } else if (immh & 0x2) {
8973 size = MO_16;
8974 if (!dc_isar_feature(aa64_fp16, s)) {
8975 unallocated_encoding(s);
8976 return;
8977 }
8978 } else {
8979
8980 assert(immh == 1);
8981 unallocated_encoding(s);
8982 return;
8983 }
8984
8985 if (!fp_access_check(s)) {
8986 return;
8987 }
8988
8989 assert(!(is_scalar && is_q));
8990
8991 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
8992 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8993 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8994 fracbits = (16 << size) - immhb;
8995 tcg_shift = tcg_const_i32(fracbits);
8996
8997 if (size == MO_64) {
8998 int maxpass = is_scalar ? 1 : 2;
8999
9000 for (pass = 0; pass < maxpass; pass++) {
9001 TCGv_i64 tcg_op = tcg_temp_new_i64();
9002
9003 read_vec_element(s, tcg_op, rn, pass, MO_64);
9004 if (is_u) {
9005 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9006 } else {
9007 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9008 }
9009 write_vec_element(s, tcg_op, rd, pass, MO_64);
9010 tcg_temp_free_i64(tcg_op);
9011 }
9012 clear_vec_high(s, is_q, rd);
9013 } else {
9014 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
9015 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
9016
9017 switch (size) {
9018 case MO_16:
9019 if (is_u) {
9020 fn = gen_helper_vfp_touhh;
9021 } else {
9022 fn = gen_helper_vfp_toshh;
9023 }
9024 break;
9025 case MO_32:
9026 if (is_u) {
9027 fn = gen_helper_vfp_touls;
9028 } else {
9029 fn = gen_helper_vfp_tosls;
9030 }
9031 break;
9032 default:
9033 g_assert_not_reached();
9034 }
9035
9036 for (pass = 0; pass < maxpass; pass++) {
9037 TCGv_i32 tcg_op = tcg_temp_new_i32();
9038
9039 read_vec_element_i32(s, tcg_op, rn, pass, size);
9040 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9041 if (is_scalar) {
9042 write_fp_sreg(s, rd, tcg_op);
9043 } else {
9044 write_vec_element_i32(s, tcg_op, rd, pass, size);
9045 }
9046 tcg_temp_free_i32(tcg_op);
9047 }
9048 if (!is_scalar) {
9049 clear_vec_high(s, is_q, rd);
9050 }
9051 }
9052
9053 tcg_temp_free_i32(tcg_shift);
9054 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9055 tcg_temp_free_ptr(tcg_fpstatus);
9056 tcg_temp_free_i32(tcg_rmode);
9057}
9058
9059
9060
9061
9062
9063
9064
9065
9066
9067static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
9068{
9069 int rd = extract32(insn, 0, 5);
9070 int rn = extract32(insn, 5, 5);
9071 int opcode = extract32(insn, 11, 5);
9072 int immb = extract32(insn, 16, 3);
9073 int immh = extract32(insn, 19, 4);
9074 bool is_u = extract32(insn, 29, 1);
9075
9076 if (immh == 0) {
9077 unallocated_encoding(s);
9078 return;
9079 }
9080
9081 switch (opcode) {
9082 case 0x08:
9083 if (!is_u) {
9084 unallocated_encoding(s);
9085 return;
9086 }
9087
9088 case 0x00:
9089 case 0x02:
9090 case 0x04:
9091 case 0x06:
9092 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
9093 break;
9094 case 0x0a:
9095 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
9096 break;
9097 case 0x1c:
9098 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
9099 opcode, rn, rd);
9100 break;
9101 case 0x10:
9102 case 0x11:
9103 if (!is_u) {
9104 unallocated_encoding(s);
9105 return;
9106 }
9107 handle_vec_simd_sqshrn(s, true, false, false, true,
9108 immh, immb, opcode, rn, rd);
9109 break;
9110 case 0x12:
9111 case 0x13:
9112 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
9113 immh, immb, opcode, rn, rd);
9114 break;
9115 case 0xc:
9116 if (!is_u) {
9117 unallocated_encoding(s);
9118 return;
9119 }
9120 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
9121 break;
9122 case 0xe:
9123 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
9124 break;
9125 case 0x1f:
9126 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
9127 break;
9128 default:
9129 unallocated_encoding(s);
9130 break;
9131 }
9132}
9133
9134
9135
9136
9137
9138
9139
9140static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
9141{
9142 bool is_u = extract32(insn, 29, 1);
9143 int size = extract32(insn, 22, 2);
9144 int opcode = extract32(insn, 12, 4);
9145 int rm = extract32(insn, 16, 5);
9146 int rn = extract32(insn, 5, 5);
9147 int rd = extract32(insn, 0, 5);
9148
9149 if (is_u) {
9150 unallocated_encoding(s);
9151 return;
9152 }
9153
9154 switch (opcode) {
9155 case 0x9:
9156 case 0xb:
9157 case 0xd:
9158 if (size == 0 || size == 3) {
9159 unallocated_encoding(s);
9160 return;
9161 }
9162 break;
9163 default:
9164 unallocated_encoding(s);
9165 return;
9166 }
9167
9168 if (!fp_access_check(s)) {
9169 return;
9170 }
9171
9172 if (size == 2) {
9173 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9174 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9175 TCGv_i64 tcg_res = tcg_temp_new_i64();
9176
9177 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
9178 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
9179
9180 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
9181 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
9182
9183 switch (opcode) {
9184 case 0xd:
9185 break;
9186 case 0xb:
9187 tcg_gen_neg_i64(tcg_res, tcg_res);
9188
9189 case 0x9:
9190 read_vec_element(s, tcg_op1, rd, 0, MO_64);
9191 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
9192 tcg_res, tcg_op1);
9193 break;
9194 default:
9195 g_assert_not_reached();
9196 }
9197
9198 write_fp_dreg(s, rd, tcg_res);
9199
9200 tcg_temp_free_i64(tcg_op1);
9201 tcg_temp_free_i64(tcg_op2);
9202 tcg_temp_free_i64(tcg_res);
9203 } else {
9204 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
9205 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
9206 TCGv_i64 tcg_res = tcg_temp_new_i64();
9207
9208 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
9209 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
9210
9211 switch (opcode) {
9212 case 0xd:
9213 break;
9214 case 0xb:
9215 gen_helper_neon_negl_u32(tcg_res, tcg_res);
9216
9217 case 0x9:
9218 {
9219 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
9220 read_vec_element(s, tcg_op3, rd, 0, MO_32);
9221 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
9222 tcg_res, tcg_op3);
9223 tcg_temp_free_i64(tcg_op3);
9224 break;
9225 }
9226 default:
9227 g_assert_not_reached();
9228 }
9229
9230 tcg_gen_ext32u_i64(tcg_res, tcg_res);
9231 write_fp_dreg(s, rd, tcg_res);
9232
9233 tcg_temp_free_i32(tcg_op1);
9234 tcg_temp_free_i32(tcg_op2);
9235 tcg_temp_free_i64(tcg_res);
9236 }
9237}
9238
9239static void handle_3same_64(DisasContext *s, int opcode, bool u,
9240 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
9241{
9242
9243
9244
9245
9246
9247 TCGCond cond;
9248
9249 switch (opcode) {
9250 case 0x1:
9251 if (u) {
9252 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9253 } else {
9254 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9255 }
9256 break;
9257 case 0x5:
9258 if (u) {
9259 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9260 } else {
9261 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9262 }
9263 break;
9264 case 0x6:
9265
9266
9267
9268 cond = u ? TCG_COND_GTU : TCG_COND_GT;
9269 do_cmop:
9270 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
9271 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9272 break;
9273 case 0x7:
9274 cond = u ? TCG_COND_GEU : TCG_COND_GE;
9275 goto do_cmop;
9276 case 0x11:
9277 if (u) {
9278 cond = TCG_COND_EQ;
9279 goto do_cmop;
9280 }
9281 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
9282 break;
9283 case 0x8:
9284 if (u) {
9285 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
9286 } else {
9287 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
9288 }
9289 break;
9290 case 0x9:
9291 if (u) {
9292 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9293 } else {
9294 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9295 }
9296 break;
9297 case 0xa:
9298 if (u) {
9299 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
9300 } else {
9301 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
9302 }
9303 break;
9304 case 0xb:
9305 if (u) {
9306 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9307 } else {
9308 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9309 }
9310 break;
9311 case 0x10:
9312 if (u) {
9313 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
9314 } else {
9315 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
9316 }
9317 break;
9318 default:
9319 g_assert_not_reached();
9320 }
9321}
9322
9323
9324
9325
9326
9327static void handle_3same_float(DisasContext *s, int size, int elements,
9328 int fpopcode, int rd, int rn, int rm)
9329{
9330 int pass;
9331 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9332
9333 for (pass = 0; pass < elements; pass++) {
9334 if (size) {
9335
9336 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9337 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9338 TCGv_i64 tcg_res = tcg_temp_new_i64();
9339
9340 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9341 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9342
9343 switch (fpopcode) {
9344 case 0x39:
9345
9346 gen_helper_vfp_negd(tcg_op1, tcg_op1);
9347
9348 case 0x19:
9349 read_vec_element(s, tcg_res, rd, pass, MO_64);
9350 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
9351 tcg_res, fpst);
9352 break;
9353 case 0x18:
9354 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9355 break;
9356 case 0x1a:
9357 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
9358 break;
9359 case 0x1b:
9360 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
9361 break;
9362 case 0x1c:
9363 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9364 break;
9365 case 0x1e:
9366 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
9367 break;
9368 case 0x1f:
9369 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9370 break;
9371 case 0x38:
9372 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9373 break;
9374 case 0x3a:
9375 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9376 break;
9377 case 0x3e:
9378 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
9379 break;
9380 case 0x3f:
9381 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9382 break;
9383 case 0x5b:
9384 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
9385 break;
9386 case 0x5c:
9387 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9388 break;
9389 case 0x5d:
9390 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9391 break;
9392 case 0x5f:
9393 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
9394 break;
9395 case 0x7a:
9396 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9397 gen_helper_vfp_absd(tcg_res, tcg_res);
9398 break;
9399 case 0x7c:
9400 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9401 break;
9402 case 0x7d:
9403 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9404 break;
9405 default:
9406 g_assert_not_reached();
9407 }
9408
9409 write_vec_element(s, tcg_res, rd, pass, MO_64);
9410
9411 tcg_temp_free_i64(tcg_res);
9412 tcg_temp_free_i64(tcg_op1);
9413 tcg_temp_free_i64(tcg_op2);
9414 } else {
9415
9416 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9417 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9418 TCGv_i32 tcg_res = tcg_temp_new_i32();
9419
9420 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9421 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9422
9423 switch (fpopcode) {
9424 case 0x39:
9425
9426 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9427
9428 case 0x19:
9429 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9430 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9431 tcg_res, fpst);
9432 break;
9433 case 0x1a:
9434 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9435 break;
9436 case 0x1b:
9437 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9438 break;
9439 case 0x1c:
9440 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9441 break;
9442 case 0x1e:
9443 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9444 break;
9445 case 0x1f:
9446 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9447 break;
9448 case 0x18:
9449 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9450 break;
9451 case 0x38:
9452 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9453 break;
9454 case 0x3a:
9455 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9456 break;
9457 case 0x3e:
9458 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9459 break;
9460 case 0x3f:
9461 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9462 break;
9463 case 0x5b:
9464 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9465 break;
9466 case 0x5c:
9467 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9468 break;
9469 case 0x5d:
9470 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9471 break;
9472 case 0x5f:
9473 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9474 break;
9475 case 0x7a:
9476 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9477 gen_helper_vfp_abss(tcg_res, tcg_res);
9478 break;
9479 case 0x7c:
9480 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9481 break;
9482 case 0x7d:
9483 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9484 break;
9485 default:
9486 g_assert_not_reached();
9487 }
9488
9489 if (elements == 1) {
9490
9491 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9492
9493 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9494 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9495 tcg_temp_free_i64(tcg_tmp);
9496 } else {
9497 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9498 }
9499
9500 tcg_temp_free_i32(tcg_res);
9501 tcg_temp_free_i32(tcg_op1);
9502 tcg_temp_free_i32(tcg_op2);
9503 }
9504 }
9505
9506 tcg_temp_free_ptr(fpst);
9507
9508 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9509}
9510
9511
9512
9513
9514
9515
9516
9517static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9518{
9519 int rd = extract32(insn, 0, 5);
9520 int rn = extract32(insn, 5, 5);
9521 int opcode = extract32(insn, 11, 5);
9522 int rm = extract32(insn, 16, 5);
9523 int size = extract32(insn, 22, 2);
9524 bool u = extract32(insn, 29, 1);
9525 TCGv_i64 tcg_rd;
9526
9527 if (opcode >= 0x18) {
9528
9529 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9530 switch (fpopcode) {
9531 case 0x1b:
9532 case 0x1f:
9533 case 0x3f:
9534 case 0x5d:
9535 case 0x7d:
9536 case 0x1c:
9537 case 0x5c:
9538 case 0x7c:
9539 case 0x7a:
9540 break;
9541 default:
9542 unallocated_encoding(s);
9543 return;
9544 }
9545
9546 if (!fp_access_check(s)) {
9547 return;
9548 }
9549
9550 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9551 return;
9552 }
9553
9554 switch (opcode) {
9555 case 0x1:
9556 case 0x5:
9557 case 0x9:
9558 case 0xb:
9559 break;
9560 case 0x8:
9561 case 0xa:
9562 case 0x6:
9563 case 0x7:
9564 case 0x11:
9565 case 0x10:
9566 if (size != 3) {
9567 unallocated_encoding(s);
9568 return;
9569 }
9570 break;
9571 case 0x16:
9572 if (size != 1 && size != 2) {
9573 unallocated_encoding(s);
9574 return;
9575 }
9576 break;
9577 default:
9578 unallocated_encoding(s);
9579 return;
9580 }
9581
9582 if (!fp_access_check(s)) {
9583 return;
9584 }
9585
9586 tcg_rd = tcg_temp_new_i64();
9587
9588 if (size == 3) {
9589 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9590 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9591
9592 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9593 tcg_temp_free_i64(tcg_rn);
9594 tcg_temp_free_i64(tcg_rm);
9595 } else {
9596
9597
9598
9599
9600
9601
9602 NeonGenTwoOpEnvFn *genenvfn;
9603 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9604 TCGv_i32 tcg_rm = tcg_temp_new_i32();
9605 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9606
9607 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9608 read_vec_element_i32(s, tcg_rm, rm, 0, size);
9609
9610 switch (opcode) {
9611 case 0x1:
9612 {
9613 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9614 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9615 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9616 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9617 };
9618 genenvfn = fns[size][u];
9619 break;
9620 }
9621 case 0x5:
9622 {
9623 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9624 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9625 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9626 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9627 };
9628 genenvfn = fns[size][u];
9629 break;
9630 }
9631 case 0x9:
9632 {
9633 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9634 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9635 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9636 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9637 };
9638 genenvfn = fns[size][u];
9639 break;
9640 }
9641 case 0xb:
9642 {
9643 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9644 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9645 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9646 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9647 };
9648 genenvfn = fns[size][u];
9649 break;
9650 }
9651 case 0x16:
9652 {
9653 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9654 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9655 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9656 };
9657 assert(size == 1 || size == 2);
9658 genenvfn = fns[size - 1][u];
9659 break;
9660 }
9661 default:
9662 g_assert_not_reached();
9663 }
9664
9665 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9666 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9667 tcg_temp_free_i32(tcg_rd32);
9668 tcg_temp_free_i32(tcg_rn);
9669 tcg_temp_free_i32(tcg_rm);
9670 }
9671
9672 write_fp_dreg(s, rd, tcg_rd);
9673
9674 tcg_temp_free_i64(tcg_rd);
9675}
9676
9677
9678
9679
9680
9681
9682
9683
9684
9685static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9686 uint32_t insn)
9687{
9688 int rd = extract32(insn, 0, 5);
9689 int rn = extract32(insn, 5, 5);
9690 int opcode = extract32(insn, 11, 3);
9691 int rm = extract32(insn, 16, 5);
9692 bool u = extract32(insn, 29, 1);
9693 bool a = extract32(insn, 23, 1);
9694 int fpopcode = opcode | (a << 3) | (u << 4);
9695 TCGv_ptr fpst;
9696 TCGv_i32 tcg_op1;
9697 TCGv_i32 tcg_op2;
9698 TCGv_i32 tcg_res;
9699
9700 switch (fpopcode) {
9701 case 0x03:
9702 case 0x04:
9703 case 0x07:
9704 case 0x0f:
9705 case 0x14:
9706 case 0x15:
9707 case 0x1a:
9708 case 0x1c:
9709 case 0x1d:
9710 break;
9711 default:
9712 unallocated_encoding(s);
9713 return;
9714 }
9715
9716 if (!dc_isar_feature(aa64_fp16, s)) {
9717 unallocated_encoding(s);
9718 }
9719
9720 if (!fp_access_check(s)) {
9721 return;
9722 }
9723
9724 fpst = fpstatus_ptr(FPST_FPCR_F16);
9725
9726 tcg_op1 = read_fp_hreg(s, rn);
9727 tcg_op2 = read_fp_hreg(s, rm);
9728 tcg_res = tcg_temp_new_i32();
9729
9730 switch (fpopcode) {
9731 case 0x03:
9732 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9733 break;
9734 case 0x04:
9735 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9736 break;
9737 case 0x07:
9738 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9739 break;
9740 case 0x0f:
9741 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9742 break;
9743 case 0x14:
9744 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9745 break;
9746 case 0x15:
9747 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9748 break;
9749 case 0x1a:
9750 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9751 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9752 break;
9753 case 0x1c:
9754 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9755 break;
9756 case 0x1d:
9757 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9758 break;
9759 default:
9760 g_assert_not_reached();
9761 }
9762
9763 write_fp_sreg(s, rd, tcg_res);
9764
9765
9766 tcg_temp_free_i32(tcg_res);
9767 tcg_temp_free_i32(tcg_op1);
9768 tcg_temp_free_i32(tcg_op2);
9769 tcg_temp_free_ptr(fpst);
9770}
9771
9772
9773
9774
9775
9776
9777
9778static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9779 uint32_t insn)
9780{
9781 int rd = extract32(insn, 0, 5);
9782 int rn = extract32(insn, 5, 5);
9783 int opcode = extract32(insn, 11, 4);
9784 int rm = extract32(insn, 16, 5);
9785 int size = extract32(insn, 22, 2);
9786 bool u = extract32(insn, 29, 1);
9787 TCGv_i32 ele1, ele2, ele3;
9788 TCGv_i64 res;
9789 bool feature;
9790
9791 switch (u * 16 + opcode) {
9792 case 0x10:
9793 case 0x11:
9794 if (size != 1 && size != 2) {
9795 unallocated_encoding(s);
9796 return;
9797 }
9798 feature = dc_isar_feature(aa64_rdm, s);
9799 break;
9800 default:
9801 unallocated_encoding(s);
9802 return;
9803 }
9804 if (!feature) {
9805 unallocated_encoding(s);
9806 return;
9807 }
9808 if (!fp_access_check(s)) {
9809 return;
9810 }
9811
9812
9813
9814
9815
9816
9817
9818 ele1 = tcg_temp_new_i32();
9819 ele2 = tcg_temp_new_i32();
9820 ele3 = tcg_temp_new_i32();
9821
9822 read_vec_element_i32(s, ele1, rn, 0, size);
9823 read_vec_element_i32(s, ele2, rm, 0, size);
9824 read_vec_element_i32(s, ele3, rd, 0, size);
9825
9826 switch (opcode) {
9827 case 0x0:
9828 if (size == 1) {
9829 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9830 } else {
9831 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9832 }
9833 break;
9834 case 0x1:
9835 if (size == 1) {
9836 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9837 } else {
9838 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9839 }
9840 break;
9841 default:
9842 g_assert_not_reached();
9843 }
9844 tcg_temp_free_i32(ele1);
9845 tcg_temp_free_i32(ele2);
9846
9847 res = tcg_temp_new_i64();
9848 tcg_gen_extu_i32_i64(res, ele3);
9849 tcg_temp_free_i32(ele3);
9850
9851 write_fp_dreg(s, rd, res);
9852 tcg_temp_free_i64(res);
9853}
9854
9855static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9856 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9857 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9858{
9859
9860
9861
9862
9863
9864
9865 TCGCond cond;
9866
9867 switch (opcode) {
9868 case 0x4:
9869 if (u) {
9870 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9871 } else {
9872 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9873 }
9874 break;
9875 case 0x5:
9876
9877
9878
9879 tcg_gen_not_i64(tcg_rd, tcg_rn);
9880 break;
9881 case 0x7:
9882 if (u) {
9883 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9884 } else {
9885 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9886 }
9887 break;
9888 case 0xa:
9889
9890
9891
9892
9893 cond = TCG_COND_LT;
9894 do_cmop:
9895 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9896 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9897 break;
9898 case 0x8:
9899 cond = u ? TCG_COND_GE : TCG_COND_GT;
9900 goto do_cmop;
9901 case 0x9:
9902 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9903 goto do_cmop;
9904 case 0xb:
9905 if (u) {
9906 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9907 } else {
9908 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9909 }
9910 break;
9911 case 0x2f:
9912 gen_helper_vfp_absd(tcg_rd, tcg_rn);
9913 break;
9914 case 0x6f:
9915 gen_helper_vfp_negd(tcg_rd, tcg_rn);
9916 break;
9917 case 0x7f:
9918 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9919 break;
9920 case 0x1a:
9921 case 0x1b:
9922 case 0x1c:
9923 case 0x3a:
9924 case 0x3b:
9925 {
9926 TCGv_i32 tcg_shift = tcg_const_i32(0);
9927 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9928 tcg_temp_free_i32(tcg_shift);
9929 break;
9930 }
9931 case 0x5a:
9932 case 0x5b:
9933 case 0x5c:
9934 case 0x7a:
9935 case 0x7b:
9936 {
9937 TCGv_i32 tcg_shift = tcg_const_i32(0);
9938 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9939 tcg_temp_free_i32(tcg_shift);
9940 break;
9941 }
9942 case 0x18:
9943 case 0x19:
9944 case 0x38:
9945 case 0x39:
9946 case 0x58:
9947 case 0x79:
9948 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9949 break;
9950 case 0x59:
9951 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9952 break;
9953 case 0x1e:
9954 case 0x5e:
9955 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9956 break;
9957 case 0x1f:
9958 case 0x5f:
9959 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9960 break;
9961 default:
9962 g_assert_not_reached();
9963 }
9964}
9965
9966static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9967 bool is_scalar, bool is_u, bool is_q,
9968 int size, int rn, int rd)
9969{
9970 bool is_double = (size == MO_64);
9971 TCGv_ptr fpst;
9972
9973 if (!fp_access_check(s)) {
9974 return;
9975 }
9976
9977 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9978
9979 if (is_double) {
9980 TCGv_i64 tcg_op = tcg_temp_new_i64();
9981 TCGv_i64 tcg_zero = tcg_const_i64(0);
9982 TCGv_i64 tcg_res = tcg_temp_new_i64();
9983 NeonGenTwoDoubleOpFn *genfn;
9984 bool swap = false;
9985 int pass;
9986
9987 switch (opcode) {
9988 case 0x2e:
9989 swap = true;
9990
9991 case 0x2c:
9992 genfn = gen_helper_neon_cgt_f64;
9993 break;
9994 case 0x2d:
9995 genfn = gen_helper_neon_ceq_f64;
9996 break;
9997 case 0x6d:
9998 swap = true;
9999
10000 case 0x6c:
10001 genfn = gen_helper_neon_cge_f64;
10002 break;
10003 default:
10004 g_assert_not_reached();
10005 }
10006
10007 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10008 read_vec_element(s, tcg_op, rn, pass, MO_64);
10009 if (swap) {
10010 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10011 } else {
10012 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10013 }
10014 write_vec_element(s, tcg_res, rd, pass, MO_64);
10015 }
10016 tcg_temp_free_i64(tcg_res);
10017 tcg_temp_free_i64(tcg_zero);
10018 tcg_temp_free_i64(tcg_op);
10019
10020 clear_vec_high(s, !is_scalar, rd);
10021 } else {
10022 TCGv_i32 tcg_op = tcg_temp_new_i32();
10023 TCGv_i32 tcg_zero = tcg_const_i32(0);
10024 TCGv_i32 tcg_res = tcg_temp_new_i32();
10025 NeonGenTwoSingleOpFn *genfn;
10026 bool swap = false;
10027 int pass, maxpasses;
10028
10029 if (size == MO_16) {
10030 switch (opcode) {
10031 case 0x2e:
10032 swap = true;
10033
10034 case 0x2c:
10035 genfn = gen_helper_advsimd_cgt_f16;
10036 break;
10037 case 0x2d:
10038 genfn = gen_helper_advsimd_ceq_f16;
10039 break;
10040 case 0x6d:
10041 swap = true;
10042
10043 case 0x6c:
10044 genfn = gen_helper_advsimd_cge_f16;
10045 break;
10046 default:
10047 g_assert_not_reached();
10048 }
10049 } else {
10050 switch (opcode) {
10051 case 0x2e:
10052 swap = true;
10053
10054 case 0x2c:
10055 genfn = gen_helper_neon_cgt_f32;
10056 break;
10057 case 0x2d:
10058 genfn = gen_helper_neon_ceq_f32;
10059 break;
10060 case 0x6d:
10061 swap = true;
10062
10063 case 0x6c:
10064 genfn = gen_helper_neon_cge_f32;
10065 break;
10066 default:
10067 g_assert_not_reached();
10068 }
10069 }
10070
10071 if (is_scalar) {
10072 maxpasses = 1;
10073 } else {
10074 int vector_size = 8 << is_q;
10075 maxpasses = vector_size >> size;
10076 }
10077
10078 for (pass = 0; pass < maxpasses; pass++) {
10079 read_vec_element_i32(s, tcg_op, rn, pass, size);
10080 if (swap) {
10081 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10082 } else {
10083 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10084 }
10085 if (is_scalar) {
10086 write_fp_sreg(s, rd, tcg_res);
10087 } else {
10088 write_vec_element_i32(s, tcg_res, rd, pass, size);
10089 }
10090 }
10091 tcg_temp_free_i32(tcg_res);
10092 tcg_temp_free_i32(tcg_zero);
10093 tcg_temp_free_i32(tcg_op);
10094 if (!is_scalar) {
10095 clear_vec_high(s, is_q, rd);
10096 }
10097 }
10098
10099 tcg_temp_free_ptr(fpst);
10100}
10101
10102static void handle_2misc_reciprocal(DisasContext *s, int opcode,
10103 bool is_scalar, bool is_u, bool is_q,
10104 int size, int rn, int rd)
10105{
10106 bool is_double = (size == 3);
10107 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10108
10109 if (is_double) {
10110 TCGv_i64 tcg_op = tcg_temp_new_i64();
10111 TCGv_i64 tcg_res = tcg_temp_new_i64();
10112 int pass;
10113
10114 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10115 read_vec_element(s, tcg_op, rn, pass, MO_64);
10116 switch (opcode) {
10117 case 0x3d:
10118 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
10119 break;
10120 case 0x3f:
10121 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
10122 break;
10123 case 0x7d:
10124 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
10125 break;
10126 default:
10127 g_assert_not_reached();
10128 }
10129 write_vec_element(s, tcg_res, rd, pass, MO_64);
10130 }
10131 tcg_temp_free_i64(tcg_res);
10132 tcg_temp_free_i64(tcg_op);
10133 clear_vec_high(s, !is_scalar, rd);
10134 } else {
10135 TCGv_i32 tcg_op = tcg_temp_new_i32();
10136 TCGv_i32 tcg_res = tcg_temp_new_i32();
10137 int pass, maxpasses;
10138
10139 if (is_scalar) {
10140 maxpasses = 1;
10141 } else {
10142 maxpasses = is_q ? 4 : 2;
10143 }
10144
10145 for (pass = 0; pass < maxpasses; pass++) {
10146 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10147
10148 switch (opcode) {
10149 case 0x3c:
10150 gen_helper_recpe_u32(tcg_res, tcg_op);
10151 break;
10152 case 0x3d:
10153 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
10154 break;
10155 case 0x3f:
10156 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
10157 break;
10158 case 0x7d:
10159 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
10160 break;
10161 default:
10162 g_assert_not_reached();
10163 }
10164
10165 if (is_scalar) {
10166 write_fp_sreg(s, rd, tcg_res);
10167 } else {
10168 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10169 }
10170 }
10171 tcg_temp_free_i32(tcg_res);
10172 tcg_temp_free_i32(tcg_op);
10173 if (!is_scalar) {
10174 clear_vec_high(s, is_q, rd);
10175 }
10176 }
10177 tcg_temp_free_ptr(fpst);
10178}
10179
10180static void handle_2misc_narrow(DisasContext *s, bool scalar,
10181 int opcode, bool u, bool is_q,
10182 int size, int rn, int rd)
10183{
10184
10185
10186
10187 int pass;
10188 TCGv_i32 tcg_res[2];
10189 int destelt = is_q ? 2 : 0;
10190 int passes = scalar ? 1 : 2;
10191
10192 if (scalar) {
10193 tcg_res[1] = tcg_const_i32(0);
10194 }
10195
10196 for (pass = 0; pass < passes; pass++) {
10197 TCGv_i64 tcg_op = tcg_temp_new_i64();
10198 NeonGenNarrowFn *genfn = NULL;
10199 NeonGenNarrowEnvFn *genenvfn = NULL;
10200
10201 if (scalar) {
10202 read_vec_element(s, tcg_op, rn, pass, size + 1);
10203 } else {
10204 read_vec_element(s, tcg_op, rn, pass, MO_64);
10205 }
10206 tcg_res[pass] = tcg_temp_new_i32();
10207
10208 switch (opcode) {
10209 case 0x12:
10210 {
10211 static NeonGenNarrowFn * const xtnfns[3] = {
10212 gen_helper_neon_narrow_u8,
10213 gen_helper_neon_narrow_u16,
10214 tcg_gen_extrl_i64_i32,
10215 };
10216 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
10217 gen_helper_neon_unarrow_sat8,
10218 gen_helper_neon_unarrow_sat16,
10219 gen_helper_neon_unarrow_sat32,
10220 };
10221 if (u) {
10222 genenvfn = sqxtunfns[size];
10223 } else {
10224 genfn = xtnfns[size];
10225 }
10226 break;
10227 }
10228 case 0x14:
10229 {
10230 static NeonGenNarrowEnvFn * const fns[3][2] = {
10231 { gen_helper_neon_narrow_sat_s8,
10232 gen_helper_neon_narrow_sat_u8 },
10233 { gen_helper_neon_narrow_sat_s16,
10234 gen_helper_neon_narrow_sat_u16 },
10235 { gen_helper_neon_narrow_sat_s32,
10236 gen_helper_neon_narrow_sat_u32 },
10237 };
10238 genenvfn = fns[size][u];
10239 break;
10240 }
10241 case 0x16:
10242
10243 if (size == 2) {
10244 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
10245 } else {
10246 TCGv_i32 tcg_lo = tcg_temp_new_i32();
10247 TCGv_i32 tcg_hi = tcg_temp_new_i32();
10248 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10249 TCGv_i32 ahp = get_ahp_flag();
10250
10251 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
10252 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
10253 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
10254 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
10255 tcg_temp_free_i32(tcg_lo);
10256 tcg_temp_free_i32(tcg_hi);
10257 tcg_temp_free_ptr(fpst);
10258 tcg_temp_free_i32(ahp);
10259 }
10260 break;
10261 case 0x36:
10262 {
10263 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10264 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
10265 tcg_temp_free_ptr(fpst);
10266 }
10267 break;
10268 case 0x56:
10269
10270
10271
10272 assert(size == 2);
10273 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
10274 break;
10275 default:
10276 g_assert_not_reached();
10277 }
10278
10279 if (genfn) {
10280 genfn(tcg_res[pass], tcg_op);
10281 } else if (genenvfn) {
10282 genenvfn(tcg_res[pass], cpu_env, tcg_op);
10283 }
10284
10285 tcg_temp_free_i64(tcg_op);
10286 }
10287
10288 for (pass = 0; pass < 2; pass++) {
10289 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
10290 tcg_temp_free_i32(tcg_res[pass]);
10291 }
10292 clear_vec_high(s, is_q, rd);
10293}
10294
10295
10296static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
10297 bool is_q, int size, int rn, int rd)
10298{
10299 bool is_double = (size == 3);
10300
10301 if (is_double) {
10302 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10303 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10304 int pass;
10305
10306 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10307 read_vec_element(s, tcg_rn, rn, pass, MO_64);
10308 read_vec_element(s, tcg_rd, rd, pass, MO_64);
10309
10310 if (is_u) {
10311 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10312 } else {
10313 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10314 }
10315 write_vec_element(s, tcg_rd, rd, pass, MO_64);
10316 }
10317 tcg_temp_free_i64(tcg_rd);
10318 tcg_temp_free_i64(tcg_rn);
10319 clear_vec_high(s, !is_scalar, rd);
10320 } else {
10321 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10322 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10323 int pass, maxpasses;
10324
10325 if (is_scalar) {
10326 maxpasses = 1;
10327 } else {
10328 maxpasses = is_q ? 4 : 2;
10329 }
10330
10331 for (pass = 0; pass < maxpasses; pass++) {
10332 if (is_scalar) {
10333 read_vec_element_i32(s, tcg_rn, rn, pass, size);
10334 read_vec_element_i32(s, tcg_rd, rd, pass, size);
10335 } else {
10336 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
10337 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10338 }
10339
10340 if (is_u) {
10341 switch (size) {
10342 case 0:
10343 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10344 break;
10345 case 1:
10346 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10347 break;
10348 case 2:
10349 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10350 break;
10351 default:
10352 g_assert_not_reached();
10353 }
10354 } else {
10355 switch (size) {
10356 case 0:
10357 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10358 break;
10359 case 1:
10360 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10361 break;
10362 case 2:
10363 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10364 break;
10365 default:
10366 g_assert_not_reached();
10367 }
10368 }
10369
10370 if (is_scalar) {
10371 TCGv_i64 tcg_zero = tcg_const_i64(0);
10372 write_vec_element(s, tcg_zero, rd, 0, MO_64);
10373 tcg_temp_free_i64(tcg_zero);
10374 }
10375 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10376 }
10377 tcg_temp_free_i32(tcg_rd);
10378 tcg_temp_free_i32(tcg_rn);
10379 clear_vec_high(s, is_q, rd);
10380 }
10381}
10382
10383
10384
10385
10386
10387
10388
10389static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
10390{
10391 int rd = extract32(insn, 0, 5);
10392 int rn = extract32(insn, 5, 5);
10393 int opcode = extract32(insn, 12, 5);
10394 int size = extract32(insn, 22, 2);
10395 bool u = extract32(insn, 29, 1);
10396 bool is_fcvt = false;
10397 int rmode;
10398 TCGv_i32 tcg_rmode;
10399 TCGv_ptr tcg_fpstatus;
10400
10401 switch (opcode) {
10402 case 0x3:
10403 if (!fp_access_check(s)) {
10404 return;
10405 }
10406 handle_2misc_satacc(s, true, u, false, size, rn, rd);
10407 return;
10408 case 0x7:
10409 break;
10410 case 0xa:
10411 if (u) {
10412 unallocated_encoding(s);
10413 return;
10414 }
10415
10416 case 0x8:
10417 case 0x9:
10418 case 0xb:
10419 if (size != 3) {
10420 unallocated_encoding(s);
10421 return;
10422 }
10423 break;
10424 case 0x12:
10425 if (!u) {
10426 unallocated_encoding(s);
10427 return;
10428 }
10429
10430 case 0x14:
10431 if (size == 3) {
10432 unallocated_encoding(s);
10433 return;
10434 }
10435 if (!fp_access_check(s)) {
10436 return;
10437 }
10438 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10439 return;
10440 case 0xc ... 0xf:
10441 case 0x16 ... 0x1d:
10442 case 0x1f:
10443
10444
10445
10446 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10447 size = extract32(size, 0, 1) ? 3 : 2;
10448 switch (opcode) {
10449 case 0x2c:
10450 case 0x2d:
10451 case 0x2e:
10452 case 0x6c:
10453 case 0x6d:
10454 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10455 return;
10456 case 0x1d:
10457 case 0x5d:
10458 {
10459 bool is_signed = (opcode == 0x1d);
10460 if (!fp_access_check(s)) {
10461 return;
10462 }
10463 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10464 return;
10465 }
10466 case 0x3d:
10467 case 0x3f:
10468 case 0x7d:
10469 if (!fp_access_check(s)) {
10470 return;
10471 }
10472 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10473 return;
10474 case 0x1a:
10475 case 0x1b:
10476 case 0x3a:
10477 case 0x3b:
10478 case 0x5a:
10479 case 0x5b:
10480 case 0x7a:
10481 case 0x7b:
10482 is_fcvt = true;
10483 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10484 break;
10485 case 0x1c:
10486 case 0x5c:
10487
10488 is_fcvt = true;
10489 rmode = FPROUNDING_TIEAWAY;
10490 break;
10491 case 0x56:
10492 if (size == 2) {
10493 unallocated_encoding(s);
10494 return;
10495 }
10496 if (!fp_access_check(s)) {
10497 return;
10498 }
10499 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10500 return;
10501 default:
10502 unallocated_encoding(s);
10503 return;
10504 }
10505 break;
10506 default:
10507 unallocated_encoding(s);
10508 return;
10509 }
10510
10511 if (!fp_access_check(s)) {
10512 return;
10513 }
10514
10515 if (is_fcvt) {
10516 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10517 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10518 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10519 } else {
10520 tcg_rmode = NULL;
10521 tcg_fpstatus = NULL;
10522 }
10523
10524 if (size == 3) {
10525 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10526 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10527
10528 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10529 write_fp_dreg(s, rd, tcg_rd);
10530 tcg_temp_free_i64(tcg_rd);
10531 tcg_temp_free_i64(tcg_rn);
10532 } else {
10533 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10534 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10535
10536 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10537
10538 switch (opcode) {
10539 case 0x7:
10540 {
10541 NeonGenOneOpEnvFn *genfn;
10542 static NeonGenOneOpEnvFn * const fns[3][2] = {
10543 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10544 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10545 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10546 };
10547 genfn = fns[size][u];
10548 genfn(tcg_rd, cpu_env, tcg_rn);
10549 break;
10550 }
10551 case 0x1a:
10552 case 0x1b:
10553 case 0x1c:
10554 case 0x3a:
10555 case 0x3b:
10556 {
10557 TCGv_i32 tcg_shift = tcg_const_i32(0);
10558 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10559 tcg_temp_free_i32(tcg_shift);
10560 break;
10561 }
10562 case 0x5a:
10563 case 0x5b:
10564 case 0x5c:
10565 case 0x7a:
10566 case 0x7b:
10567 {
10568 TCGv_i32 tcg_shift = tcg_const_i32(0);
10569 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10570 tcg_temp_free_i32(tcg_shift);
10571 break;
10572 }
10573 default:
10574 g_assert_not_reached();
10575 }
10576
10577 write_fp_sreg(s, rd, tcg_rd);
10578 tcg_temp_free_i32(tcg_rd);
10579 tcg_temp_free_i32(tcg_rn);
10580 }
10581
10582 if (is_fcvt) {
10583 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10584 tcg_temp_free_i32(tcg_rmode);
10585 tcg_temp_free_ptr(tcg_fpstatus);
10586 }
10587}
10588
10589
10590static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10591 int immh, int immb, int opcode, int rn, int rd)
10592{
10593 int size = 32 - clz32(immh) - 1;
10594 int immhb = immh << 3 | immb;
10595 int shift = 2 * (8 << size) - immhb;
10596 GVecGen2iFn *gvec_fn;
10597
10598 if (extract32(immh, 3, 1) && !is_q) {
10599 unallocated_encoding(s);
10600 return;
10601 }
10602 tcg_debug_assert(size <= 3);
10603
10604 if (!fp_access_check(s)) {
10605 return;
10606 }
10607
10608 switch (opcode) {
10609 case 0x02:
10610 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10611 break;
10612
10613 case 0x08:
10614 gvec_fn = gen_gvec_sri;
10615 break;
10616
10617 case 0x00:
10618 if (is_u) {
10619 if (shift == 8 << size) {
10620
10621 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10622 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10623 return;
10624 }
10625 gvec_fn = tcg_gen_gvec_shri;
10626 } else {
10627
10628 if (shift == 8 << size) {
10629 shift -= 1;
10630 }
10631 gvec_fn = tcg_gen_gvec_sari;
10632 }
10633 break;
10634
10635 case 0x04:
10636 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10637 break;
10638
10639 case 0x06:
10640 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10641 break;
10642
10643 default:
10644 g_assert_not_reached();
10645 }
10646
10647 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10648}
10649
10650
10651static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10652 int immh, int immb, int opcode, int rn, int rd)
10653{
10654 int size = 32 - clz32(immh) - 1;
10655 int immhb = immh << 3 | immb;
10656 int shift = immhb - (8 << size);
10657
10658
10659 assert(size >= 0 && size <= 3);
10660
10661 if (extract32(immh, 3, 1) && !is_q) {
10662 unallocated_encoding(s);
10663 return;
10664 }
10665
10666 if (!fp_access_check(s)) {
10667 return;
10668 }
10669
10670 if (insert) {
10671 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10672 } else {
10673 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10674 }
10675}
10676
10677
10678static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10679 int immh, int immb, int opcode, int rn, int rd)
10680{
10681 int size = 32 - clz32(immh) - 1;
10682 int immhb = immh << 3 | immb;
10683 int shift = immhb - (8 << size);
10684 int dsize = 64;
10685 int esize = 8 << size;
10686 int elements = dsize/esize;
10687 TCGv_i64 tcg_rn = new_tmp_a64(s);
10688 TCGv_i64 tcg_rd = new_tmp_a64(s);
10689 int i;
10690
10691 if (size >= 3) {
10692 unallocated_encoding(s);
10693 return;
10694 }
10695
10696 if (!fp_access_check(s)) {
10697 return;
10698 }
10699
10700
10701
10702
10703
10704 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10705
10706 for (i = 0; i < elements; i++) {
10707 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10708 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10709 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10710 write_vec_element(s, tcg_rd, rd, i, size + 1);
10711 }
10712}
10713
10714
10715static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10716 int immh, int immb, int opcode, int rn, int rd)
10717{
10718 int immhb = immh << 3 | immb;
10719 int size = 32 - clz32(immh) - 1;
10720 int dsize = 64;
10721 int esize = 8 << size;
10722 int elements = dsize/esize;
10723 int shift = (2 * esize) - immhb;
10724 bool round = extract32(opcode, 0, 1);
10725 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10726 TCGv_i64 tcg_round;
10727 int i;
10728
10729 if (extract32(immh, 3, 1)) {
10730 unallocated_encoding(s);
10731 return;
10732 }
10733
10734 if (!fp_access_check(s)) {
10735 return;
10736 }
10737
10738 tcg_rn = tcg_temp_new_i64();
10739 tcg_rd = tcg_temp_new_i64();
10740 tcg_final = tcg_temp_new_i64();
10741 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10742
10743 if (round) {
10744 uint64_t round_const = 1ULL << (shift - 1);
10745 tcg_round = tcg_const_i64(round_const);
10746 } else {
10747 tcg_round = NULL;
10748 }
10749
10750 for (i = 0; i < elements; i++) {
10751 read_vec_element(s, tcg_rn, rn, i, size+1);
10752 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10753 false, true, size+1, shift);
10754
10755 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10756 }
10757
10758 if (!is_q) {
10759 write_vec_element(s, tcg_final, rd, 0, MO_64);
10760 } else {
10761 write_vec_element(s, tcg_final, rd, 1, MO_64);
10762 }
10763 if (round) {
10764 tcg_temp_free_i64(tcg_round);
10765 }
10766 tcg_temp_free_i64(tcg_rn);
10767 tcg_temp_free_i64(tcg_rd);
10768 tcg_temp_free_i64(tcg_final);
10769
10770 clear_vec_high(s, is_q, rd);
10771}
10772
10773
10774
10775
10776
10777
10778
10779
10780static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10781{
10782 int rd = extract32(insn, 0, 5);
10783 int rn = extract32(insn, 5, 5);
10784 int opcode = extract32(insn, 11, 5);
10785 int immb = extract32(insn, 16, 3);
10786 int immh = extract32(insn, 19, 4);
10787 bool is_u = extract32(insn, 29, 1);
10788 bool is_q = extract32(insn, 30, 1);
10789
10790
10791 assert(immh != 0);
10792
10793 switch (opcode) {
10794 case 0x08:
10795 if (!is_u) {
10796 unallocated_encoding(s);
10797 return;
10798 }
10799
10800 case 0x00:
10801 case 0x02:
10802 case 0x04:
10803 case 0x06:
10804 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10805 break;
10806 case 0x0a:
10807 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10808 break;
10809 case 0x10:
10810 case 0x11:
10811 if (is_u) {
10812 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10813 opcode, rn, rd);
10814 } else {
10815 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10816 }
10817 break;
10818 case 0x12:
10819 case 0x13:
10820 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10821 opcode, rn, rd);
10822 break;
10823 case 0x14:
10824 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10825 break;
10826 case 0x1c:
10827 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10828 opcode, rn, rd);
10829 break;
10830 case 0xc:
10831 if (!is_u) {
10832 unallocated_encoding(s);
10833 return;
10834 }
10835 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10836 break;
10837 case 0xe:
10838 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10839 break;
10840 case 0x1f:
10841 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10842 return;
10843 default:
10844 unallocated_encoding(s);
10845 return;
10846 }
10847}
10848
10849
10850
10851
10852static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10853 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10854{
10855 static NeonGenTwo64OpFn * const fns[3][2] = {
10856 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10857 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10858 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10859 };
10860 NeonGenTwo64OpFn *genfn;
10861 assert(size < 3);
10862
10863 genfn = fns[size][is_sub];
10864 genfn(tcg_res, tcg_op1, tcg_op2);
10865}
10866
10867static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10868 int opcode, int rd, int rn, int rm)
10869{
10870
10871 TCGv_i64 tcg_res[2];
10872 int pass, accop;
10873
10874 tcg_res[0] = tcg_temp_new_i64();
10875 tcg_res[1] = tcg_temp_new_i64();
10876
10877
10878
10879
10880 switch (opcode) {
10881 case 5:
10882 case 8:
10883 case 9:
10884 accop = 1;
10885 break;
10886 case 10:
10887 case 11:
10888 accop = -1;
10889 break;
10890 default:
10891 accop = 0;
10892 break;
10893 }
10894
10895 if (accop != 0) {
10896 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10897 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10898 }
10899
10900
10901
10902
10903 if (size == 2) {
10904 for (pass = 0; pass < 2; pass++) {
10905 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10906 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10907 TCGv_i64 tcg_passres;
10908 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10909
10910 int elt = pass + is_q * 2;
10911
10912 read_vec_element(s, tcg_op1, rn, elt, memop);
10913 read_vec_element(s, tcg_op2, rm, elt, memop);
10914
10915 if (accop == 0) {
10916 tcg_passres = tcg_res[pass];
10917 } else {
10918 tcg_passres = tcg_temp_new_i64();
10919 }
10920
10921 switch (opcode) {
10922 case 0:
10923 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10924 break;
10925 case 2:
10926 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10927 break;
10928 case 5:
10929 case 7:
10930 {
10931 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10932 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10933
10934 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10935 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10936 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10937 tcg_passres,
10938 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10939 tcg_temp_free_i64(tcg_tmp1);
10940 tcg_temp_free_i64(tcg_tmp2);
10941 break;
10942 }
10943 case 8:
10944 case 10:
10945 case 12:
10946 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10947 break;
10948 case 9:
10949 case 11:
10950 case 13:
10951 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10952 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10953 tcg_passres, tcg_passres);
10954 break;
10955 default:
10956 g_assert_not_reached();
10957 }
10958
10959 if (opcode == 9 || opcode == 11) {
10960
10961 if (accop < 0) {
10962 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10963 }
10964 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10965 tcg_res[pass], tcg_passres);
10966 } else if (accop > 0) {
10967 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10968 } else if (accop < 0) {
10969 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10970 }
10971
10972 if (accop != 0) {
10973 tcg_temp_free_i64(tcg_passres);
10974 }
10975
10976 tcg_temp_free_i64(tcg_op1);
10977 tcg_temp_free_i64(tcg_op2);
10978 }
10979 } else {
10980
10981 for (pass = 0; pass < 2; pass++) {
10982 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10983 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10984 TCGv_i64 tcg_passres;
10985 int elt = pass + is_q * 2;
10986
10987 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10988 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10989
10990 if (accop == 0) {
10991 tcg_passres = tcg_res[pass];
10992 } else {
10993 tcg_passres = tcg_temp_new_i64();
10994 }
10995
10996 switch (opcode) {
10997 case 0:
10998 case 2:
10999 {
11000 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
11001 static NeonGenWidenFn * const widenfns[2][2] = {
11002 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11003 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11004 };
11005 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11006
11007 widenfn(tcg_op2_64, tcg_op2);
11008 widenfn(tcg_passres, tcg_op1);
11009 gen_neon_addl(size, (opcode == 2), tcg_passres,
11010 tcg_passres, tcg_op2_64);
11011 tcg_temp_free_i64(tcg_op2_64);
11012 break;
11013 }
11014 case 5:
11015 case 7:
11016 if (size == 0) {
11017 if (is_u) {
11018 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
11019 } else {
11020 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
11021 }
11022 } else {
11023 if (is_u) {
11024 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
11025 } else {
11026 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
11027 }
11028 }
11029 break;
11030 case 8:
11031 case 10:
11032 case 12:
11033 if (size == 0) {
11034 if (is_u) {
11035 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
11036 } else {
11037 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
11038 }
11039 } else {
11040 if (is_u) {
11041 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
11042 } else {
11043 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11044 }
11045 }
11046 break;
11047 case 9:
11048 case 11:
11049 case 13:
11050 assert(size == 1);
11051 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11052 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
11053 tcg_passres, tcg_passres);
11054 break;
11055 default:
11056 g_assert_not_reached();
11057 }
11058 tcg_temp_free_i32(tcg_op1);
11059 tcg_temp_free_i32(tcg_op2);
11060
11061 if (accop != 0) {
11062 if (opcode == 9 || opcode == 11) {
11063
11064 if (accop < 0) {
11065 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
11066 }
11067 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
11068 tcg_res[pass],
11069 tcg_passres);
11070 } else {
11071 gen_neon_addl(size, (accop < 0), tcg_res[pass],
11072 tcg_res[pass], tcg_passres);
11073 }
11074 tcg_temp_free_i64(tcg_passres);
11075 }
11076 }
11077 }
11078
11079 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
11080 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
11081 tcg_temp_free_i64(tcg_res[0]);
11082 tcg_temp_free_i64(tcg_res[1]);
11083}
11084
11085static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
11086 int opcode, int rd, int rn, int rm)
11087{
11088 TCGv_i64 tcg_res[2];
11089 int part = is_q ? 2 : 0;
11090 int pass;
11091
11092 for (pass = 0; pass < 2; pass++) {
11093 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11094 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11095 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
11096 static NeonGenWidenFn * const widenfns[3][2] = {
11097 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11098 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11099 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
11100 };
11101 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11102
11103 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11104 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
11105 widenfn(tcg_op2_wide, tcg_op2);
11106 tcg_temp_free_i32(tcg_op2);
11107 tcg_res[pass] = tcg_temp_new_i64();
11108 gen_neon_addl(size, (opcode == 3),
11109 tcg_res[pass], tcg_op1, tcg_op2_wide);
11110 tcg_temp_free_i64(tcg_op1);
11111 tcg_temp_free_i64(tcg_op2_wide);
11112 }
11113
11114 for (pass = 0; pass < 2; pass++) {
11115 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11116 tcg_temp_free_i64(tcg_res[pass]);
11117 }
11118}
11119
11120static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
11121{
11122 tcg_gen_addi_i64(in, in, 1U << 31);
11123 tcg_gen_extrh_i64_i32(res, in);
11124}
11125
11126static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
11127 int opcode, int rd, int rn, int rm)
11128{
11129 TCGv_i32 tcg_res[2];
11130 int part = is_q ? 2 : 0;
11131 int pass;
11132
11133 for (pass = 0; pass < 2; pass++) {
11134 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11135 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11136 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
11137 static NeonGenNarrowFn * const narrowfns[3][2] = {
11138 { gen_helper_neon_narrow_high_u8,
11139 gen_helper_neon_narrow_round_high_u8 },
11140 { gen_helper_neon_narrow_high_u16,
11141 gen_helper_neon_narrow_round_high_u16 },
11142 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
11143 };
11144 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
11145
11146 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11147 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11148
11149 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
11150
11151 tcg_temp_free_i64(tcg_op1);
11152 tcg_temp_free_i64(tcg_op2);
11153
11154 tcg_res[pass] = tcg_temp_new_i32();
11155 gennarrow(tcg_res[pass], tcg_wideres);
11156 tcg_temp_free_i64(tcg_wideres);
11157 }
11158
11159 for (pass = 0; pass < 2; pass++) {
11160 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
11161 tcg_temp_free_i32(tcg_res[pass]);
11162 }
11163 clear_vec_high(s, is_q, rd);
11164}
11165
11166
11167
11168
11169
11170
11171
11172static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
11173{
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184 int is_q = extract32(insn, 30, 1);
11185 int is_u = extract32(insn, 29, 1);
11186 int size = extract32(insn, 22, 2);
11187 int opcode = extract32(insn, 12, 4);
11188 int rm = extract32(insn, 16, 5);
11189 int rn = extract32(insn, 5, 5);
11190 int rd = extract32(insn, 0, 5);
11191
11192 switch (opcode) {
11193 case 1:
11194 case 3:
11195
11196 if (size == 3) {
11197 unallocated_encoding(s);
11198 return;
11199 }
11200 if (!fp_access_check(s)) {
11201 return;
11202 }
11203 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
11204 break;
11205 case 4:
11206 case 6:
11207
11208 if (size == 3) {
11209 unallocated_encoding(s);
11210 return;
11211 }
11212 if (!fp_access_check(s)) {
11213 return;
11214 }
11215 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
11216 break;
11217 case 14:
11218 if (is_u) {
11219 unallocated_encoding(s);
11220 return;
11221 }
11222 switch (size) {
11223 case 0:
11224 if (!fp_access_check(s)) {
11225 return;
11226 }
11227
11228 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11229 gen_helper_neon_pmull_h);
11230 break;
11231
11232 case 3:
11233 if (!dc_isar_feature(aa64_pmull, s)) {
11234 unallocated_encoding(s);
11235 return;
11236 }
11237 if (!fp_access_check(s)) {
11238 return;
11239 }
11240
11241 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11242 gen_helper_gvec_pmull_q);
11243 break;
11244
11245 default:
11246 unallocated_encoding(s);
11247 break;
11248 }
11249 return;
11250 case 9:
11251 case 11:
11252 case 13:
11253 if (is_u || size == 0) {
11254 unallocated_encoding(s);
11255 return;
11256 }
11257
11258 case 0:
11259 case 2:
11260 case 5:
11261 case 7:
11262 case 8:
11263 case 10:
11264 case 12:
11265
11266 if (size == 3) {
11267 unallocated_encoding(s);
11268 return;
11269 }
11270 if (!fp_access_check(s)) {
11271 return;
11272 }
11273
11274 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
11275 break;
11276 default:
11277
11278 unallocated_encoding(s);
11279 break;
11280 }
11281}
11282
11283
11284static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
11285{
11286 int rd = extract32(insn, 0, 5);
11287 int rn = extract32(insn, 5, 5);
11288 int rm = extract32(insn, 16, 5);
11289 int size = extract32(insn, 22, 2);
11290 bool is_u = extract32(insn, 29, 1);
11291 bool is_q = extract32(insn, 30, 1);
11292
11293 if (!fp_access_check(s)) {
11294 return;
11295 }
11296
11297 switch (size + 4 * is_u) {
11298 case 0:
11299 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
11300 return;
11301 case 1:
11302 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
11303 return;
11304 case 2:
11305 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
11306 return;
11307 case 3:
11308 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
11309 return;
11310 case 4:
11311 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
11312 return;
11313
11314 case 5:
11315 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
11316 return;
11317 case 6:
11318 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
11319 return;
11320 case 7:
11321 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
11322 return;
11323
11324 default:
11325 g_assert_not_reached();
11326 }
11327}
11328
11329
11330
11331
11332
11333
11334static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
11335 int size, int rn, int rm, int rd)
11336{
11337 TCGv_ptr fpst;
11338 int pass;
11339
11340
11341 if (opcode >= 0x58) {
11342 fpst = fpstatus_ptr(FPST_FPCR);
11343 } else {
11344 fpst = NULL;
11345 }
11346
11347 if (!fp_access_check(s)) {
11348 return;
11349 }
11350
11351
11352
11353
11354 if (size == 3) {
11355 TCGv_i64 tcg_res[2];
11356
11357 for (pass = 0; pass < 2; pass++) {
11358 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11359 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11360 int passreg = (pass == 0) ? rn : rm;
11361
11362 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
11363 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
11364 tcg_res[pass] = tcg_temp_new_i64();
11365
11366 switch (opcode) {
11367 case 0x17:
11368 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11369 break;
11370 case 0x58:
11371 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11372 break;
11373 case 0x5a:
11374 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11375 break;
11376 case 0x5e:
11377 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11378 break;
11379 case 0x78:
11380 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11381 break;
11382 case 0x7e:
11383 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11384 break;
11385 default:
11386 g_assert_not_reached();
11387 }
11388
11389 tcg_temp_free_i64(tcg_op1);
11390 tcg_temp_free_i64(tcg_op2);
11391 }
11392
11393 for (pass = 0; pass < 2; pass++) {
11394 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11395 tcg_temp_free_i64(tcg_res[pass]);
11396 }
11397 } else {
11398 int maxpass = is_q ? 4 : 2;
11399 TCGv_i32 tcg_res[4];
11400
11401 for (pass = 0; pass < maxpass; pass++) {
11402 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11403 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11404 NeonGenTwoOpFn *genfn = NULL;
11405 int passreg = pass < (maxpass / 2) ? rn : rm;
11406 int passelt = (is_q && (pass & 1)) ? 2 : 0;
11407
11408 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
11409 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
11410 tcg_res[pass] = tcg_temp_new_i32();
11411
11412 switch (opcode) {
11413 case 0x17:
11414 {
11415 static NeonGenTwoOpFn * const fns[3] = {
11416 gen_helper_neon_padd_u8,
11417 gen_helper_neon_padd_u16,
11418 tcg_gen_add_i32,
11419 };
11420 genfn = fns[size];
11421 break;
11422 }
11423 case 0x14:
11424 {
11425 static NeonGenTwoOpFn * const fns[3][2] = {
11426 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
11427 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
11428 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11429 };
11430 genfn = fns[size][u];
11431 break;
11432 }
11433 case 0x15:
11434 {
11435 static NeonGenTwoOpFn * const fns[3][2] = {
11436 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11437 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11438 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11439 };
11440 genfn = fns[size][u];
11441 break;
11442 }
11443
11444 case 0x58:
11445 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11446 break;
11447 case 0x5a:
11448 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11449 break;
11450 case 0x5e:
11451 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11452 break;
11453 case 0x78:
11454 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11455 break;
11456 case 0x7e:
11457 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11458 break;
11459 default:
11460 g_assert_not_reached();
11461 }
11462
11463
11464 if (genfn) {
11465 genfn(tcg_res[pass], tcg_op1, tcg_op2);
11466 }
11467
11468 tcg_temp_free_i32(tcg_op1);
11469 tcg_temp_free_i32(tcg_op2);
11470 }
11471
11472 for (pass = 0; pass < maxpass; pass++) {
11473 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11474 tcg_temp_free_i32(tcg_res[pass]);
11475 }
11476 clear_vec_high(s, is_q, rd);
11477 }
11478
11479 if (fpst) {
11480 tcg_temp_free_ptr(fpst);
11481 }
11482}
11483
11484
11485static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11486{
11487
11488
11489
11490
11491 int fpopcode = extract32(insn, 11, 5)
11492 | (extract32(insn, 23, 1) << 5)
11493 | (extract32(insn, 29, 1) << 6);
11494 int is_q = extract32(insn, 30, 1);
11495 int size = extract32(insn, 22, 1);
11496 int rm = extract32(insn, 16, 5);
11497 int rn = extract32(insn, 5, 5);
11498 int rd = extract32(insn, 0, 5);
11499
11500 int datasize = is_q ? 128 : 64;
11501 int esize = 32 << size;
11502 int elements = datasize / esize;
11503
11504 if (size == 1 && !is_q) {
11505 unallocated_encoding(s);
11506 return;
11507 }
11508
11509 switch (fpopcode) {
11510 case 0x58:
11511 case 0x5a:
11512 case 0x5e:
11513 case 0x78:
11514 case 0x7e:
11515 if (size && !is_q) {
11516 unallocated_encoding(s);
11517 return;
11518 }
11519 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11520 rn, rm, rd);
11521 return;
11522 case 0x1b:
11523 case 0x1f:
11524 case 0x3f:
11525 case 0x5d:
11526 case 0x7d:
11527 case 0x19:
11528 case 0x39:
11529 case 0x18:
11530 case 0x1a:
11531 case 0x1c:
11532 case 0x1e:
11533 case 0x38:
11534 case 0x3a:
11535 case 0x3e:
11536 case 0x5b:
11537 case 0x5c:
11538 case 0x5f:
11539 case 0x7a:
11540 case 0x7c:
11541 if (!fp_access_check(s)) {
11542 return;
11543 }
11544 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11545 return;
11546
11547 case 0x1d:
11548 case 0x3d:
11549 case 0x59:
11550 case 0x79:
11551 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11552 unallocated_encoding(s);
11553 return;
11554 }
11555 if (fp_access_check(s)) {
11556 int is_s = extract32(insn, 23, 1);
11557 int is_2 = extract32(insn, 29, 1);
11558 int data = (is_2 << 1) | is_s;
11559 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11560 vec_full_reg_offset(s, rn),
11561 vec_full_reg_offset(s, rm), cpu_env,
11562 is_q ? 16 : 8, vec_full_reg_size(s),
11563 data, gen_helper_gvec_fmlal_a64);
11564 }
11565 return;
11566
11567 default:
11568 unallocated_encoding(s);
11569 return;
11570 }
11571}
11572
11573
11574static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11575{
11576 int is_q = extract32(insn, 30, 1);
11577 int u = extract32(insn, 29, 1);
11578 int size = extract32(insn, 22, 2);
11579 int opcode = extract32(insn, 11, 5);
11580 int rm = extract32(insn, 16, 5);
11581 int rn = extract32(insn, 5, 5);
11582 int rd = extract32(insn, 0, 5);
11583 int pass;
11584 TCGCond cond;
11585
11586 switch (opcode) {
11587 case 0x13:
11588 if (u && size != 0) {
11589 unallocated_encoding(s);
11590 return;
11591 }
11592
11593 case 0x0:
11594 case 0x2:
11595 case 0x4:
11596 case 0xc:
11597 case 0xd:
11598 case 0xe:
11599 case 0xf:
11600 case 0x12:
11601 if (size == 3) {
11602 unallocated_encoding(s);
11603 return;
11604 }
11605 break;
11606 case 0x16:
11607 if (size == 0 || size == 3) {
11608 unallocated_encoding(s);
11609 return;
11610 }
11611 break;
11612 default:
11613 if (size == 3 && !is_q) {
11614 unallocated_encoding(s);
11615 return;
11616 }
11617 break;
11618 }
11619
11620 if (!fp_access_check(s)) {
11621 return;
11622 }
11623
11624 switch (opcode) {
11625 case 0x01:
11626 if (u) {
11627 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11628 } else {
11629 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11630 }
11631 return;
11632 case 0x05:
11633 if (u) {
11634 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11635 } else {
11636 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11637 }
11638 return;
11639 case 0x08:
11640 if (u) {
11641 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11642 } else {
11643 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11644 }
11645 return;
11646 case 0x0c:
11647 if (u) {
11648 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11649 } else {
11650 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11651 }
11652 return;
11653 case 0x0d:
11654 if (u) {
11655 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11656 } else {
11657 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11658 }
11659 return;
11660 case 0xe:
11661 if (u) {
11662 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11663 } else {
11664 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11665 }
11666 return;
11667 case 0xf:
11668 if (u) {
11669 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11670 } else {
11671 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11672 }
11673 return;
11674 case 0x10:
11675 if (u) {
11676 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11677 } else {
11678 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11679 }
11680 return;
11681 case 0x13:
11682 if (!u) {
11683 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11684 } else {
11685 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11686 }
11687 return;
11688 case 0x12:
11689 if (u) {
11690 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11691 } else {
11692 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11693 }
11694 return;
11695 case 0x16:
11696 {
11697 static gen_helper_gvec_3_ptr * const fns[2][2] = {
11698 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11699 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11700 };
11701 gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11702 }
11703 return;
11704 case 0x11:
11705 if (!u) {
11706 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11707 return;
11708 }
11709
11710 cond = TCG_COND_EQ;
11711 goto do_gvec_cmp;
11712 case 0x06:
11713 cond = u ? TCG_COND_GTU : TCG_COND_GT;
11714 goto do_gvec_cmp;
11715 case 0x07:
11716 cond = u ? TCG_COND_GEU : TCG_COND_GE;
11717 do_gvec_cmp:
11718 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11719 vec_full_reg_offset(s, rn),
11720 vec_full_reg_offset(s, rm),
11721 is_q ? 16 : 8, vec_full_reg_size(s));
11722 return;
11723 }
11724
11725 if (size == 3) {
11726 assert(is_q);
11727 for (pass = 0; pass < 2; pass++) {
11728 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11729 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11730 TCGv_i64 tcg_res = tcg_temp_new_i64();
11731
11732 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11733 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11734
11735 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11736
11737 write_vec_element(s, tcg_res, rd, pass, MO_64);
11738
11739 tcg_temp_free_i64(tcg_res);
11740 tcg_temp_free_i64(tcg_op1);
11741 tcg_temp_free_i64(tcg_op2);
11742 }
11743 } else {
11744 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11745 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11746 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11747 TCGv_i32 tcg_res = tcg_temp_new_i32();
11748 NeonGenTwoOpFn *genfn = NULL;
11749 NeonGenTwoOpEnvFn *genenvfn = NULL;
11750
11751 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11752 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11753
11754 switch (opcode) {
11755 case 0x0:
11756 {
11757 static NeonGenTwoOpFn * const fns[3][2] = {
11758 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11759 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11760 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11761 };
11762 genfn = fns[size][u];
11763 break;
11764 }
11765 case 0x2:
11766 {
11767 static NeonGenTwoOpFn * const fns[3][2] = {
11768 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11769 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11770 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11771 };
11772 genfn = fns[size][u];
11773 break;
11774 }
11775 case 0x4:
11776 {
11777 static NeonGenTwoOpFn * const fns[3][2] = {
11778 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11779 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11780 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11781 };
11782 genfn = fns[size][u];
11783 break;
11784 }
11785 case 0x9:
11786 {
11787 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11788 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11789 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11790 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11791 };
11792 genenvfn = fns[size][u];
11793 break;
11794 }
11795 case 0xa:
11796 {
11797 static NeonGenTwoOpFn * const fns[3][2] = {
11798 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11799 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11800 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11801 };
11802 genfn = fns[size][u];
11803 break;
11804 }
11805 case 0xb:
11806 {
11807 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11808 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11809 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11810 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11811 };
11812 genenvfn = fns[size][u];
11813 break;
11814 }
11815 default:
11816 g_assert_not_reached();
11817 }
11818
11819 if (genenvfn) {
11820 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11821 } else {
11822 genfn(tcg_res, tcg_op1, tcg_op2);
11823 }
11824
11825 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11826
11827 tcg_temp_free_i32(tcg_res);
11828 tcg_temp_free_i32(tcg_op1);
11829 tcg_temp_free_i32(tcg_op2);
11830 }
11831 }
11832 clear_vec_high(s, is_q, rd);
11833}
11834
11835
11836
11837
11838
11839
11840
11841static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11842{
11843 int opcode = extract32(insn, 11, 5);
11844
11845 switch (opcode) {
11846 case 0x3:
11847 disas_simd_3same_logic(s, insn);
11848 break;
11849 case 0x17:
11850 case 0x14:
11851 case 0x15:
11852 {
11853
11854 int is_q = extract32(insn, 30, 1);
11855 int u = extract32(insn, 29, 1);
11856 int size = extract32(insn, 22, 2);
11857 int rm = extract32(insn, 16, 5);
11858 int rn = extract32(insn, 5, 5);
11859 int rd = extract32(insn, 0, 5);
11860 if (opcode == 0x17) {
11861 if (u || (size == 3 && !is_q)) {
11862 unallocated_encoding(s);
11863 return;
11864 }
11865 } else {
11866 if (size == 3) {
11867 unallocated_encoding(s);
11868 return;
11869 }
11870 }
11871 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11872 break;
11873 }
11874 case 0x18 ... 0x31:
11875
11876 disas_simd_3same_float(s, insn);
11877 break;
11878 default:
11879 disas_simd_3same_int(s, insn);
11880 break;
11881 }
11882}
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11897{
11898 int opcode = extract32(insn, 11, 3);
11899 int u = extract32(insn, 29, 1);
11900 int a = extract32(insn, 23, 1);
11901 int is_q = extract32(insn, 30, 1);
11902 int rm = extract32(insn, 16, 5);
11903 int rn = extract32(insn, 5, 5);
11904 int rd = extract32(insn, 0, 5);
11905
11906
11907
11908
11909 int fpopcode = opcode | (a << 3) | (u << 4);
11910 int datasize = is_q ? 128 : 64;
11911 int elements = datasize / 16;
11912 bool pairwise;
11913 TCGv_ptr fpst;
11914 int pass;
11915
11916 switch (fpopcode) {
11917 case 0x0:
11918 case 0x1:
11919 case 0x2:
11920 case 0x3:
11921 case 0x4:
11922 case 0x6:
11923 case 0x7:
11924 case 0x8:
11925 case 0x9:
11926 case 0xa:
11927 case 0xe:
11928 case 0xf:
11929 case 0x13:
11930 case 0x14:
11931 case 0x15:
11932 case 0x17:
11933 case 0x1a:
11934 case 0x1c:
11935 case 0x1d:
11936 pairwise = false;
11937 break;
11938 case 0x10:
11939 case 0x12:
11940 case 0x16:
11941 case 0x18:
11942 case 0x1e:
11943 pairwise = true;
11944 break;
11945 default:
11946 unallocated_encoding(s);
11947 return;
11948 }
11949
11950 if (!dc_isar_feature(aa64_fp16, s)) {
11951 unallocated_encoding(s);
11952 return;
11953 }
11954
11955 if (!fp_access_check(s)) {
11956 return;
11957 }
11958
11959 fpst = fpstatus_ptr(FPST_FPCR_F16);
11960
11961 if (pairwise) {
11962 int maxpass = is_q ? 8 : 4;
11963 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11964 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11965 TCGv_i32 tcg_res[8];
11966
11967 for (pass = 0; pass < maxpass; pass++) {
11968 int passreg = pass < (maxpass / 2) ? rn : rm;
11969 int passelt = (pass << 1) & (maxpass - 1);
11970
11971 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11972 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11973 tcg_res[pass] = tcg_temp_new_i32();
11974
11975 switch (fpopcode) {
11976 case 0x10:
11977 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11978 fpst);
11979 break;
11980 case 0x12:
11981 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11982 break;
11983 case 0x16:
11984 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11985 break;
11986 case 0x18:
11987 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11988 fpst);
11989 break;
11990 case 0x1e:
11991 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11992 break;
11993 default:
11994 g_assert_not_reached();
11995 }
11996 }
11997
11998 for (pass = 0; pass < maxpass; pass++) {
11999 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
12000 tcg_temp_free_i32(tcg_res[pass]);
12001 }
12002
12003 tcg_temp_free_i32(tcg_op1);
12004 tcg_temp_free_i32(tcg_op2);
12005
12006 } else {
12007 for (pass = 0; pass < elements; pass++) {
12008 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
12009 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
12010 TCGv_i32 tcg_res = tcg_temp_new_i32();
12011
12012 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
12013 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
12014
12015 switch (fpopcode) {
12016 case 0x0:
12017 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12018 break;
12019 case 0x1:
12020 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12021 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12022 fpst);
12023 break;
12024 case 0x2:
12025 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
12026 break;
12027 case 0x3:
12028 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
12029 break;
12030 case 0x4:
12031 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12032 break;
12033 case 0x6:
12034 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
12035 break;
12036 case 0x7:
12037 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12038 break;
12039 case 0x8:
12040 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12041 break;
12042 case 0x9:
12043
12044 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
12045 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12046 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12047 fpst);
12048 break;
12049 case 0xa:
12050 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12051 break;
12052 case 0xe:
12053 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
12054 break;
12055 case 0xf:
12056 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12057 break;
12058 case 0x13:
12059 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
12060 break;
12061 case 0x14:
12062 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12063 break;
12064 case 0x15:
12065 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12066 break;
12067 case 0x17:
12068 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
12069 break;
12070 case 0x1a:
12071 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12072 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
12073 break;
12074 case 0x1c:
12075 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12076 break;
12077 case 0x1d:
12078 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12079 break;
12080 default:
12081 g_assert_not_reached();
12082 }
12083
12084 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12085 tcg_temp_free_i32(tcg_res);
12086 tcg_temp_free_i32(tcg_op1);
12087 tcg_temp_free_i32(tcg_op2);
12088 }
12089 }
12090
12091 tcg_temp_free_ptr(fpst);
12092
12093 clear_vec_high(s, is_q, rd);
12094}
12095
12096
12097
12098
12099
12100
12101
12102static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
12103{
12104 int rd = extract32(insn, 0, 5);
12105 int rn = extract32(insn, 5, 5);
12106 int opcode = extract32(insn, 11, 4);
12107 int rm = extract32(insn, 16, 5);
12108 int size = extract32(insn, 22, 2);
12109 bool u = extract32(insn, 29, 1);
12110 bool is_q = extract32(insn, 30, 1);
12111 bool feature;
12112 int rot;
12113
12114 switch (u * 16 + opcode) {
12115 case 0x10:
12116 case 0x11:
12117 if (size != 1 && size != 2) {
12118 unallocated_encoding(s);
12119 return;
12120 }
12121 feature = dc_isar_feature(aa64_rdm, s);
12122 break;
12123 case 0x02:
12124 case 0x12:
12125 if (size != MO_32) {
12126 unallocated_encoding(s);
12127 return;
12128 }
12129 feature = dc_isar_feature(aa64_dp, s);
12130 break;
12131 case 0x03:
12132 if (size != MO_32) {
12133 unallocated_encoding(s);
12134 return;
12135 }
12136 feature = dc_isar_feature(aa64_i8mm, s);
12137 break;
12138 case 0x04:
12139 case 0x14:
12140 case 0x05:
12141 if (!is_q || size != MO_32) {
12142 unallocated_encoding(s);
12143 return;
12144 }
12145 feature = dc_isar_feature(aa64_i8mm, s);
12146 break;
12147 case 0x18:
12148 case 0x19:
12149 case 0x1a:
12150 case 0x1b:
12151 case 0x1c:
12152 case 0x1e:
12153 if (size == 0
12154 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
12155 || (size == 3 && !is_q)) {
12156 unallocated_encoding(s);
12157 return;
12158 }
12159 feature = dc_isar_feature(aa64_fcma, s);
12160 break;
12161 case 0x1d:
12162 if (size != MO_16 || !is_q) {
12163 unallocated_encoding(s);
12164 return;
12165 }
12166 feature = dc_isar_feature(aa64_bf16, s);
12167 break;
12168 case 0x1f:
12169 switch (size) {
12170 case 1:
12171 case 3:
12172 feature = dc_isar_feature(aa64_bf16, s);
12173 break;
12174 default:
12175 unallocated_encoding(s);
12176 return;
12177 }
12178 break;
12179 default:
12180 unallocated_encoding(s);
12181 return;
12182 }
12183 if (!feature) {
12184 unallocated_encoding(s);
12185 return;
12186 }
12187 if (!fp_access_check(s)) {
12188 return;
12189 }
12190
12191 switch (opcode) {
12192 case 0x0:
12193 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
12194 return;
12195
12196 case 0x1:
12197 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
12198 return;
12199
12200 case 0x2:
12201 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
12202 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
12203 return;
12204
12205 case 0x3:
12206 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
12207 return;
12208
12209 case 0x04:
12210 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
12211 u ? gen_helper_gvec_ummla_b
12212 : gen_helper_gvec_smmla_b);
12213 return;
12214 case 0x05:
12215 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
12216 return;
12217
12218 case 0x8:
12219 case 0x9:
12220 case 0xa:
12221 case 0xb:
12222 rot = extract32(opcode, 0, 2);
12223 switch (size) {
12224 case 1:
12225 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
12226 gen_helper_gvec_fcmlah);
12227 break;
12228 case 2:
12229 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12230 gen_helper_gvec_fcmlas);
12231 break;
12232 case 3:
12233 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12234 gen_helper_gvec_fcmlad);
12235 break;
12236 default:
12237 g_assert_not_reached();
12238 }
12239 return;
12240
12241 case 0xc:
12242 case 0xe:
12243 rot = extract32(opcode, 1, 1);
12244 switch (size) {
12245 case 1:
12246 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12247 gen_helper_gvec_fcaddh);
12248 break;
12249 case 2:
12250 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12251 gen_helper_gvec_fcadds);
12252 break;
12253 case 3:
12254 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12255 gen_helper_gvec_fcaddd);
12256 break;
12257 default:
12258 g_assert_not_reached();
12259 }
12260 return;
12261
12262 case 0xd:
12263 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
12264 return;
12265 case 0xf:
12266 switch (size) {
12267 case 1:
12268 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
12269 break;
12270 case 3:
12271 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
12272 gen_helper_gvec_bfmlal);
12273 break;
12274 default:
12275 g_assert_not_reached();
12276 }
12277 return;
12278
12279 default:
12280 g_assert_not_reached();
12281 }
12282}
12283
12284static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
12285 int size, int rn, int rd)
12286{
12287
12288
12289
12290
12291 int pass;
12292
12293 if (size == 3) {
12294
12295 TCGv_i64 tcg_res[2];
12296 int srcelt = is_q ? 2 : 0;
12297
12298 for (pass = 0; pass < 2; pass++) {
12299 TCGv_i32 tcg_op = tcg_temp_new_i32();
12300 tcg_res[pass] = tcg_temp_new_i64();
12301
12302 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
12303 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
12304 tcg_temp_free_i32(tcg_op);
12305 }
12306 for (pass = 0; pass < 2; pass++) {
12307 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12308 tcg_temp_free_i64(tcg_res[pass]);
12309 }
12310 } else {
12311
12312 int srcelt = is_q ? 4 : 0;
12313 TCGv_i32 tcg_res[4];
12314 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
12315 TCGv_i32 ahp = get_ahp_flag();
12316
12317 for (pass = 0; pass < 4; pass++) {
12318 tcg_res[pass] = tcg_temp_new_i32();
12319
12320 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
12321 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
12322 fpst, ahp);
12323 }
12324 for (pass = 0; pass < 4; pass++) {
12325 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
12326 tcg_temp_free_i32(tcg_res[pass]);
12327 }
12328
12329 tcg_temp_free_ptr(fpst);
12330 tcg_temp_free_i32(ahp);
12331 }
12332}
12333
12334static void handle_rev(DisasContext *s, int opcode, bool u,
12335 bool is_q, int size, int rn, int rd)
12336{
12337 int op = (opcode << 1) | u;
12338 int opsz = op + size;
12339 int grp_size = 3 - opsz;
12340 int dsize = is_q ? 128 : 64;
12341 int i;
12342
12343 if (opsz >= 3) {
12344 unallocated_encoding(s);
12345 return;
12346 }
12347
12348 if (!fp_access_check(s)) {
12349 return;
12350 }
12351
12352 if (size == 0) {
12353
12354 int groups = dsize / (8 << grp_size);
12355
12356 for (i = 0; i < groups; i++) {
12357 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
12358
12359 read_vec_element(s, tcg_tmp, rn, i, grp_size);
12360 switch (grp_size) {
12361 case MO_16:
12362 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12363 break;
12364 case MO_32:
12365 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12366 break;
12367 case MO_64:
12368 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
12369 break;
12370 default:
12371 g_assert_not_reached();
12372 }
12373 write_vec_element(s, tcg_tmp, rd, i, grp_size);
12374 tcg_temp_free_i64(tcg_tmp);
12375 }
12376 clear_vec_high(s, is_q, rd);
12377 } else {
12378 int revmask = (1 << grp_size) - 1;
12379 int esize = 8 << size;
12380 int elements = dsize / esize;
12381 TCGv_i64 tcg_rn = tcg_temp_new_i64();
12382 TCGv_i64 tcg_rd = tcg_const_i64(0);
12383 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
12384
12385 for (i = 0; i < elements; i++) {
12386 int e_rev = (i & 0xf) ^ revmask;
12387 int off = e_rev * esize;
12388 read_vec_element(s, tcg_rn, rn, i, size);
12389 if (off >= 64) {
12390 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
12391 tcg_rn, off - 64, esize);
12392 } else {
12393 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
12394 }
12395 }
12396 write_vec_element(s, tcg_rd, rd, 0, MO_64);
12397 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
12398
12399 tcg_temp_free_i64(tcg_rd_hi);
12400 tcg_temp_free_i64(tcg_rd);
12401 tcg_temp_free_i64(tcg_rn);
12402 }
12403}
12404
12405static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
12406 bool is_q, int size, int rn, int rd)
12407{
12408
12409
12410
12411
12412
12413 bool accum = (opcode == 0x6);
12414 int maxpass = is_q ? 2 : 1;
12415 int pass;
12416 TCGv_i64 tcg_res[2];
12417
12418 if (size == 2) {
12419
12420 MemOp memop = size + (u ? 0 : MO_SIGN);
12421
12422 for (pass = 0; pass < maxpass; pass++) {
12423 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
12424 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
12425
12426 tcg_res[pass] = tcg_temp_new_i64();
12427
12428 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
12429 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
12430 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
12431 if (accum) {
12432 read_vec_element(s, tcg_op1, rd, pass, MO_64);
12433 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12434 }
12435
12436 tcg_temp_free_i64(tcg_op1);
12437 tcg_temp_free_i64(tcg_op2);
12438 }
12439 } else {
12440 for (pass = 0; pass < maxpass; pass++) {
12441 TCGv_i64 tcg_op = tcg_temp_new_i64();
12442 NeonGenOne64OpFn *genfn;
12443 static NeonGenOne64OpFn * const fns[2][2] = {
12444 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
12445 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
12446 };
12447
12448 genfn = fns[size][u];
12449
12450 tcg_res[pass] = tcg_temp_new_i64();
12451
12452 read_vec_element(s, tcg_op, rn, pass, MO_64);
12453 genfn(tcg_res[pass], tcg_op);
12454
12455 if (accum) {
12456 read_vec_element(s, tcg_op, rd, pass, MO_64);
12457 if (size == 0) {
12458 gen_helper_neon_addl_u16(tcg_res[pass],
12459 tcg_res[pass], tcg_op);
12460 } else {
12461 gen_helper_neon_addl_u32(tcg_res[pass],
12462 tcg_res[pass], tcg_op);
12463 }
12464 }
12465 tcg_temp_free_i64(tcg_op);
12466 }
12467 }
12468 if (!is_q) {
12469 tcg_res[1] = tcg_const_i64(0);
12470 }
12471 for (pass = 0; pass < 2; pass++) {
12472 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12473 tcg_temp_free_i64(tcg_res[pass]);
12474 }
12475}
12476
12477static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
12478{
12479
12480 int pass;
12481 int part = is_q ? 2 : 0;
12482 TCGv_i64 tcg_res[2];
12483
12484 for (pass = 0; pass < 2; pass++) {
12485 static NeonGenWidenFn * const widenfns[3] = {
12486 gen_helper_neon_widen_u8,
12487 gen_helper_neon_widen_u16,
12488 tcg_gen_extu_i32_i64,
12489 };
12490 NeonGenWidenFn *widenfn = widenfns[size];
12491 TCGv_i32 tcg_op = tcg_temp_new_i32();
12492
12493 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
12494 tcg_res[pass] = tcg_temp_new_i64();
12495 widenfn(tcg_res[pass], tcg_op);
12496 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
12497
12498 tcg_temp_free_i32(tcg_op);
12499 }
12500
12501 for (pass = 0; pass < 2; pass++) {
12502 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12503 tcg_temp_free_i64(tcg_res[pass]);
12504 }
12505}
12506
12507
12508
12509
12510
12511
12512
12513static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12514{
12515 int size = extract32(insn, 22, 2);
12516 int opcode = extract32(insn, 12, 5);
12517 bool u = extract32(insn, 29, 1);
12518 bool is_q = extract32(insn, 30, 1);
12519 int rn = extract32(insn, 5, 5);
12520 int rd = extract32(insn, 0, 5);
12521 bool need_fpstatus = false;
12522 bool need_rmode = false;
12523 int rmode = -1;
12524 TCGv_i32 tcg_rmode;
12525 TCGv_ptr tcg_fpstatus;
12526
12527 switch (opcode) {
12528 case 0x0:
12529 case 0x1:
12530 handle_rev(s, opcode, u, is_q, size, rn, rd);
12531 return;
12532 case 0x5:
12533 if (u && size == 0) {
12534
12535 break;
12536 } else if (u && size == 1) {
12537
12538 break;
12539 } else if (!u && size == 0) {
12540
12541 break;
12542 }
12543 unallocated_encoding(s);
12544 return;
12545 case 0x12:
12546 case 0x14:
12547 if (size == 3) {
12548 unallocated_encoding(s);
12549 return;
12550 }
12551 if (!fp_access_check(s)) {
12552 return;
12553 }
12554
12555 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12556 return;
12557 case 0x4:
12558 if (size == 3) {
12559 unallocated_encoding(s);
12560 return;
12561 }
12562 break;
12563 case 0x2:
12564 case 0x6:
12565 if (size == 3) {
12566 unallocated_encoding(s);
12567 return;
12568 }
12569 if (!fp_access_check(s)) {
12570 return;
12571 }
12572 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12573 return;
12574 case 0x13:
12575 if (u == 0 || size == 3) {
12576 unallocated_encoding(s);
12577 return;
12578 }
12579 if (!fp_access_check(s)) {
12580 return;
12581 }
12582 handle_shll(s, is_q, size, rn, rd);
12583 return;
12584 case 0xa:
12585 if (u == 1) {
12586 unallocated_encoding(s);
12587 return;
12588 }
12589
12590 case 0x8:
12591 case 0x9:
12592 case 0xb:
12593 if (size == 3 && !is_q) {
12594 unallocated_encoding(s);
12595 return;
12596 }
12597 break;
12598 case 0x3:
12599 if (size == 3 && !is_q) {
12600 unallocated_encoding(s);
12601 return;
12602 }
12603 if (!fp_access_check(s)) {
12604 return;
12605 }
12606 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12607 return;
12608 case 0x7:
12609 if (size == 3 && !is_q) {
12610 unallocated_encoding(s);
12611 return;
12612 }
12613 break;
12614 case 0xc ... 0xf:
12615 case 0x16 ... 0x1f:
12616 {
12617
12618
12619
12620 int is_double = extract32(size, 0, 1);
12621 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12622 size = is_double ? 3 : 2;
12623 switch (opcode) {
12624 case 0x2f:
12625 case 0x6f:
12626 if (size == 3 && !is_q) {
12627 unallocated_encoding(s);
12628 return;
12629 }
12630 break;
12631 case 0x1d:
12632 case 0x5d:
12633 {
12634 bool is_signed = (opcode == 0x1d) ? true : false;
12635 int elements = is_double ? 2 : is_q ? 4 : 2;
12636 if (is_double && !is_q) {
12637 unallocated_encoding(s);
12638 return;
12639 }
12640 if (!fp_access_check(s)) {
12641 return;
12642 }
12643 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12644 return;
12645 }
12646 case 0x2c:
12647 case 0x2d:
12648 case 0x2e:
12649 case 0x6c:
12650 case 0x6d:
12651 if (size == 3 && !is_q) {
12652 unallocated_encoding(s);
12653 return;
12654 }
12655 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12656 return;
12657 case 0x7f:
12658 if (size == 3 && !is_q) {
12659 unallocated_encoding(s);
12660 return;
12661 }
12662 break;
12663 case 0x1a:
12664 case 0x1b:
12665 case 0x3a:
12666 case 0x3b:
12667 case 0x5a:
12668 case 0x5b:
12669 case 0x7a:
12670 case 0x7b:
12671 need_fpstatus = true;
12672 need_rmode = true;
12673 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12674 if (size == 3 && !is_q) {
12675 unallocated_encoding(s);
12676 return;
12677 }
12678 break;
12679 case 0x5c:
12680 case 0x1c:
12681 need_fpstatus = true;
12682 need_rmode = true;
12683 rmode = FPROUNDING_TIEAWAY;
12684 if (size == 3 && !is_q) {
12685 unallocated_encoding(s);
12686 return;
12687 }
12688 break;
12689 case 0x3c:
12690 if (size == 3) {
12691 unallocated_encoding(s);
12692 return;
12693 }
12694
12695 case 0x3d:
12696 case 0x7d:
12697 if (size == 3 && !is_q) {
12698 unallocated_encoding(s);
12699 return;
12700 }
12701 if (!fp_access_check(s)) {
12702 return;
12703 }
12704 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12705 return;
12706 case 0x56:
12707 if (size == 2) {
12708 unallocated_encoding(s);
12709 return;
12710 }
12711
12712 case 0x16:
12713
12714
12715
12716 if (!fp_access_check(s)) {
12717 return;
12718 }
12719 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12720 return;
12721 case 0x36:
12722 if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
12723 unallocated_encoding(s);
12724 return;
12725 }
12726 if (!fp_access_check(s)) {
12727 return;
12728 }
12729 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12730 return;
12731 case 0x17:
12732 if (!fp_access_check(s)) {
12733 return;
12734 }
12735 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12736 return;
12737 case 0x18:
12738 case 0x19:
12739 case 0x38:
12740 case 0x39:
12741 need_rmode = true;
12742 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12743
12744 case 0x59:
12745 case 0x79:
12746 need_fpstatus = true;
12747 if (size == 3 && !is_q) {
12748 unallocated_encoding(s);
12749 return;
12750 }
12751 break;
12752 case 0x58:
12753 need_rmode = true;
12754 rmode = FPROUNDING_TIEAWAY;
12755 need_fpstatus = true;
12756 if (size == 3 && !is_q) {
12757 unallocated_encoding(s);
12758 return;
12759 }
12760 break;
12761 case 0x7c:
12762 if (size == 3) {
12763 unallocated_encoding(s);
12764 return;
12765 }
12766 break;
12767 case 0x1e:
12768 case 0x1f:
12769 need_rmode = true;
12770 rmode = FPROUNDING_ZERO;
12771
12772 case 0x5e:
12773 case 0x5f:
12774 need_fpstatus = true;
12775 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12776 unallocated_encoding(s);
12777 return;
12778 }
12779 break;
12780 default:
12781 unallocated_encoding(s);
12782 return;
12783 }
12784 break;
12785 }
12786 default:
12787 unallocated_encoding(s);
12788 return;
12789 }
12790
12791 if (!fp_access_check(s)) {
12792 return;
12793 }
12794
12795 if (need_fpstatus || need_rmode) {
12796 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12797 } else {
12798 tcg_fpstatus = NULL;
12799 }
12800 if (need_rmode) {
12801 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12802 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12803 } else {
12804 tcg_rmode = NULL;
12805 }
12806
12807 switch (opcode) {
12808 case 0x5:
12809 if (u && size == 0) {
12810 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12811 return;
12812 }
12813 break;
12814 case 0x8:
12815 if (u) {
12816 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12817 } else {
12818 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12819 }
12820 return;
12821 case 0x9:
12822 if (u) {
12823 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12824 } else {
12825 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12826 }
12827 return;
12828 case 0xa:
12829 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12830 return;
12831 case 0xb:
12832 if (u) {
12833 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12834 } else {
12835 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12836 }
12837 return;
12838 }
12839
12840 if (size == 3) {
12841
12842 int pass;
12843
12844
12845
12846
12847 tcg_debug_assert(is_q);
12848 for (pass = 0; pass < 2; pass++) {
12849 TCGv_i64 tcg_op = tcg_temp_new_i64();
12850 TCGv_i64 tcg_res = tcg_temp_new_i64();
12851
12852 read_vec_element(s, tcg_op, rn, pass, MO_64);
12853
12854 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12855 tcg_rmode, tcg_fpstatus);
12856
12857 write_vec_element(s, tcg_res, rd, pass, MO_64);
12858
12859 tcg_temp_free_i64(tcg_res);
12860 tcg_temp_free_i64(tcg_op);
12861 }
12862 } else {
12863 int pass;
12864
12865 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12866 TCGv_i32 tcg_op = tcg_temp_new_i32();
12867 TCGv_i32 tcg_res = tcg_temp_new_i32();
12868
12869 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12870
12871 if (size == 2) {
12872
12873 switch (opcode) {
12874 case 0x4:
12875 if (u) {
12876 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12877 } else {
12878 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12879 }
12880 break;
12881 case 0x7:
12882 if (u) {
12883 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12884 } else {
12885 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12886 }
12887 break;
12888 case 0x2f:
12889 gen_helper_vfp_abss(tcg_res, tcg_op);
12890 break;
12891 case 0x6f:
12892 gen_helper_vfp_negs(tcg_res, tcg_op);
12893 break;
12894 case 0x7f:
12895 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12896 break;
12897 case 0x1a:
12898 case 0x1b:
12899 case 0x1c:
12900 case 0x3a:
12901 case 0x3b:
12902 {
12903 TCGv_i32 tcg_shift = tcg_const_i32(0);
12904 gen_helper_vfp_tosls(tcg_res, tcg_op,
12905 tcg_shift, tcg_fpstatus);
12906 tcg_temp_free_i32(tcg_shift);
12907 break;
12908 }
12909 case 0x5a:
12910 case 0x5b:
12911 case 0x5c:
12912 case 0x7a:
12913 case 0x7b:
12914 {
12915 TCGv_i32 tcg_shift = tcg_const_i32(0);
12916 gen_helper_vfp_touls(tcg_res, tcg_op,
12917 tcg_shift, tcg_fpstatus);
12918 tcg_temp_free_i32(tcg_shift);
12919 break;
12920 }
12921 case 0x18:
12922 case 0x19:
12923 case 0x38:
12924 case 0x39:
12925 case 0x58:
12926 case 0x79:
12927 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12928 break;
12929 case 0x59:
12930 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12931 break;
12932 case 0x7c:
12933 gen_helper_rsqrte_u32(tcg_res, tcg_op);
12934 break;
12935 case 0x1e:
12936 case 0x5e:
12937 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12938 break;
12939 case 0x1f:
12940 case 0x5f:
12941 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12942 break;
12943 default:
12944 g_assert_not_reached();
12945 }
12946 } else {
12947
12948 switch (opcode) {
12949 case 0x5:
12950
12951
12952
12953 if (u) {
12954 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12955 } else {
12956 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12957 }
12958 break;
12959 case 0x7:
12960 {
12961 NeonGenOneOpEnvFn *genfn;
12962 static NeonGenOneOpEnvFn * const fns[2][2] = {
12963 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12964 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12965 };
12966 genfn = fns[size][u];
12967 genfn(tcg_res, cpu_env, tcg_op);
12968 break;
12969 }
12970 case 0x4:
12971 if (u) {
12972 if (size == 0) {
12973 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12974 } else {
12975 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12976 }
12977 } else {
12978 if (size == 0) {
12979 gen_helper_neon_cls_s8(tcg_res, tcg_op);
12980 } else {
12981 gen_helper_neon_cls_s16(tcg_res, tcg_op);
12982 }
12983 }
12984 break;
12985 default:
12986 g_assert_not_reached();
12987 }
12988 }
12989
12990 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12991
12992 tcg_temp_free_i32(tcg_res);
12993 tcg_temp_free_i32(tcg_op);
12994 }
12995 }
12996 clear_vec_high(s, is_q, rd);
12997
12998 if (need_rmode) {
12999 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13000 tcg_temp_free_i32(tcg_rmode);
13001 }
13002 if (need_fpstatus) {
13003 tcg_temp_free_ptr(tcg_fpstatus);
13004 }
13005}
13006
13007
13008
13009
13010
13011
13012
13013
13014
13015
13016
13017
13018
13019
13020
13021static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
13022{
13023 int fpop, opcode, a, u;
13024 int rn, rd;
13025 bool is_q;
13026 bool is_scalar;
13027 bool only_in_vector = false;
13028
13029 int pass;
13030 TCGv_i32 tcg_rmode = NULL;
13031 TCGv_ptr tcg_fpstatus = NULL;
13032 bool need_rmode = false;
13033 bool need_fpst = true;
13034 int rmode;
13035
13036 if (!dc_isar_feature(aa64_fp16, s)) {
13037 unallocated_encoding(s);
13038 return;
13039 }
13040
13041 rd = extract32(insn, 0, 5);
13042 rn = extract32(insn, 5, 5);
13043
13044 a = extract32(insn, 23, 1);
13045 u = extract32(insn, 29, 1);
13046 is_scalar = extract32(insn, 28, 1);
13047 is_q = extract32(insn, 30, 1);
13048
13049 opcode = extract32(insn, 12, 5);
13050 fpop = deposit32(opcode, 5, 1, a);
13051 fpop = deposit32(fpop, 6, 1, u);
13052
13053 switch (fpop) {
13054 case 0x1d:
13055 case 0x5d:
13056 {
13057 int elements;
13058
13059 if (is_scalar) {
13060 elements = 1;
13061 } else {
13062 elements = (is_q ? 8 : 4);
13063 }
13064
13065 if (!fp_access_check(s)) {
13066 return;
13067 }
13068 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
13069 return;
13070 }
13071 break;
13072 case 0x2c:
13073 case 0x2d:
13074 case 0x2e:
13075 case 0x6c:
13076 case 0x6d:
13077 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
13078 return;
13079 case 0x3d:
13080 case 0x3f:
13081 break;
13082 case 0x18:
13083 need_rmode = true;
13084 only_in_vector = true;
13085 rmode = FPROUNDING_TIEEVEN;
13086 break;
13087 case 0x19:
13088 need_rmode = true;
13089 only_in_vector = true;
13090 rmode = FPROUNDING_NEGINF;
13091 break;
13092 case 0x38:
13093 need_rmode = true;
13094 only_in_vector = true;
13095 rmode = FPROUNDING_POSINF;
13096 break;
13097 case 0x39:
13098 need_rmode = true;
13099 only_in_vector = true;
13100 rmode = FPROUNDING_ZERO;
13101 break;
13102 case 0x58:
13103 need_rmode = true;
13104 only_in_vector = true;
13105 rmode = FPROUNDING_TIEAWAY;
13106 break;
13107 case 0x59:
13108 case 0x79:
13109 only_in_vector = true;
13110
13111 break;
13112 case 0x1a:
13113 need_rmode = true;
13114 rmode = FPROUNDING_TIEEVEN;
13115 break;
13116 case 0x1b:
13117 need_rmode = true;
13118 rmode = FPROUNDING_NEGINF;
13119 break;
13120 case 0x1c:
13121 need_rmode = true;
13122 rmode = FPROUNDING_TIEAWAY;
13123 break;
13124 case 0x3a:
13125 need_rmode = true;
13126 rmode = FPROUNDING_POSINF;
13127 break;
13128 case 0x3b:
13129 need_rmode = true;
13130 rmode = FPROUNDING_ZERO;
13131 break;
13132 case 0x5a:
13133 need_rmode = true;
13134 rmode = FPROUNDING_TIEEVEN;
13135 break;
13136 case 0x5b:
13137 need_rmode = true;
13138 rmode = FPROUNDING_NEGINF;
13139 break;
13140 case 0x5c:
13141 need_rmode = true;
13142 rmode = FPROUNDING_TIEAWAY;
13143 break;
13144 case 0x7a:
13145 need_rmode = true;
13146 rmode = FPROUNDING_POSINF;
13147 break;
13148 case 0x7b:
13149 need_rmode = true;
13150 rmode = FPROUNDING_ZERO;
13151 break;
13152 case 0x2f:
13153 case 0x6f:
13154 need_fpst = false;
13155 break;
13156 case 0x7d:
13157 case 0x7f:
13158 break;
13159 default:
13160 unallocated_encoding(s);
13161 return;
13162 }
13163
13164
13165
13166 if (is_scalar) {
13167 if (!is_q) {
13168 unallocated_encoding(s);
13169 return;
13170 }
13171
13172 if (only_in_vector) {
13173 unallocated_encoding(s);
13174 return;
13175 }
13176 }
13177
13178 if (!fp_access_check(s)) {
13179 return;
13180 }
13181
13182 if (need_rmode || need_fpst) {
13183 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
13184 }
13185
13186 if (need_rmode) {
13187 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
13188 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13189 }
13190
13191 if (is_scalar) {
13192 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
13193 TCGv_i32 tcg_res = tcg_temp_new_i32();
13194
13195 switch (fpop) {
13196 case 0x1a:
13197 case 0x1b:
13198 case 0x1c:
13199 case 0x3a:
13200 case 0x3b:
13201 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13202 break;
13203 case 0x3d:
13204 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13205 break;
13206 case 0x3f:
13207 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
13208 break;
13209 case 0x5a:
13210 case 0x5b:
13211 case 0x5c:
13212 case 0x7a:
13213 case 0x7b:
13214 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13215 break;
13216 case 0x6f:
13217 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13218 break;
13219 case 0x7d:
13220 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13221 break;
13222 default:
13223 g_assert_not_reached();
13224 }
13225
13226
13227 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
13228 write_fp_sreg(s, rd, tcg_res);
13229
13230 tcg_temp_free_i32(tcg_res);
13231 tcg_temp_free_i32(tcg_op);
13232 } else {
13233 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
13234 TCGv_i32 tcg_op = tcg_temp_new_i32();
13235 TCGv_i32 tcg_res = tcg_temp_new_i32();
13236
13237 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
13238
13239 switch (fpop) {
13240 case 0x1a:
13241 case 0x1b:
13242 case 0x1c:
13243 case 0x3a:
13244 case 0x3b:
13245 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13246 break;
13247 case 0x3d:
13248 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13249 break;
13250 case 0x5a:
13251 case 0x5b:
13252 case 0x5c:
13253 case 0x7a:
13254 case 0x7b:
13255 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13256 break;
13257 case 0x18:
13258 case 0x19:
13259 case 0x38:
13260 case 0x39:
13261 case 0x58:
13262 case 0x79:
13263 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
13264 break;
13265 case 0x59:
13266 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
13267 break;
13268 case 0x2f:
13269 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
13270 break;
13271 case 0x6f:
13272 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13273 break;
13274 case 0x7d:
13275 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13276 break;
13277 case 0x7f:
13278 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
13279 break;
13280 default:
13281 g_assert_not_reached();
13282 }
13283
13284 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
13285
13286 tcg_temp_free_i32(tcg_res);
13287 tcg_temp_free_i32(tcg_op);
13288 }
13289
13290 clear_vec_high(s, is_q, rd);
13291 }
13292
13293 if (tcg_rmode) {
13294 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13295 tcg_temp_free_i32(tcg_rmode);
13296 }
13297
13298 if (tcg_fpstatus) {
13299 tcg_temp_free_ptr(tcg_fpstatus);
13300 }
13301}
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311
13312
13313
13314static void disas_simd_indexed(DisasContext *s, uint32_t insn)
13315{
13316
13317
13318
13319
13320
13321
13322
13323 bool is_scalar = extract32(insn, 28, 1);
13324 bool is_q = extract32(insn, 30, 1);
13325 bool u = extract32(insn, 29, 1);
13326 int size = extract32(insn, 22, 2);
13327 int l = extract32(insn, 21, 1);
13328 int m = extract32(insn, 20, 1);
13329
13330 int rm = extract32(insn, 16, 4);
13331 int opcode = extract32(insn, 12, 4);
13332 int h = extract32(insn, 11, 1);
13333 int rn = extract32(insn, 5, 5);
13334 int rd = extract32(insn, 0, 5);
13335 bool is_long = false;
13336 int is_fp = 0;
13337 bool is_fp16 = false;
13338 int index;
13339 TCGv_ptr fpst;
13340
13341 switch (16 * u + opcode) {
13342 case 0x08:
13343 case 0x10:
13344 case 0x14:
13345 if (is_scalar) {
13346 unallocated_encoding(s);
13347 return;
13348 }
13349 break;
13350 case 0x02:
13351 case 0x12:
13352 case 0x06:
13353 case 0x16:
13354 case 0x0a:
13355 case 0x1a:
13356 if (is_scalar) {
13357 unallocated_encoding(s);
13358 return;
13359 }
13360 is_long = true;
13361 break;
13362 case 0x03:
13363 case 0x07:
13364 case 0x0b:
13365 is_long = true;
13366 break;
13367 case 0x0c:
13368 case 0x0d:
13369 break;
13370 case 0x01:
13371 case 0x05:
13372 case 0x09:
13373 case 0x19:
13374 is_fp = 1;
13375 break;
13376 case 0x1d:
13377 case 0x1f:
13378 if (!dc_isar_feature(aa64_rdm, s)) {
13379 unallocated_encoding(s);
13380 return;
13381 }
13382 break;
13383 case 0x0e:
13384 case 0x1e:
13385 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
13386 unallocated_encoding(s);
13387 return;
13388 }
13389 break;
13390 case 0x0f:
13391 switch (size) {
13392 case 0:
13393 case 2:
13394 if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
13395 unallocated_encoding(s);
13396 return;
13397 }
13398 size = MO_32;
13399 break;
13400 case 1:
13401 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13402 unallocated_encoding(s);
13403 return;
13404 }
13405 size = MO_32;
13406 break;
13407 case 3:
13408 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13409 unallocated_encoding(s);
13410 return;
13411 }
13412
13413 size = MO_16;
13414 break;
13415 default:
13416 unallocated_encoding(s);
13417 return;
13418 }
13419 break;
13420 case 0x11:
13421 case 0x13:
13422 case 0x15:
13423 case 0x17:
13424 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
13425 unallocated_encoding(s);
13426 return;
13427 }
13428 is_fp = 2;
13429 break;
13430 case 0x00:
13431 case 0x04:
13432 case 0x18:
13433 case 0x1c:
13434 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
13435 unallocated_encoding(s);
13436 return;
13437 }
13438 size = MO_16;
13439
13440 break;
13441 default:
13442 unallocated_encoding(s);
13443 return;
13444 }
13445
13446 switch (is_fp) {
13447 case 1:
13448
13449 switch (size) {
13450 case 0:
13451 size = MO_16;
13452 is_fp16 = true;
13453 break;
13454 case MO_32:
13455 case MO_64:
13456 break;
13457 default:
13458 unallocated_encoding(s);
13459 return;
13460 }
13461 break;
13462
13463 case 2:
13464
13465 size += 1;
13466 switch (size) {
13467 case MO_32:
13468 if (h && !is_q) {
13469 unallocated_encoding(s);
13470 return;
13471 }
13472 is_fp16 = true;
13473 break;
13474 case MO_64:
13475 break;
13476 default:
13477 unallocated_encoding(s);
13478 return;
13479 }
13480 break;
13481
13482 default:
13483 switch (size) {
13484 case MO_8:
13485 case MO_64:
13486 unallocated_encoding(s);
13487 return;
13488 }
13489 break;
13490 }
13491 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
13492 unallocated_encoding(s);
13493 return;
13494 }
13495
13496
13497 switch (size) {
13498 case MO_16:
13499 index = h << 2 | l << 1 | m;
13500 break;
13501 case MO_32:
13502 index = h << 1 | l;
13503 rm |= m << 4;
13504 break;
13505 case MO_64:
13506 if (l || !is_q) {
13507 unallocated_encoding(s);
13508 return;
13509 }
13510 index = h;
13511 rm |= m << 4;
13512 break;
13513 default:
13514 g_assert_not_reached();
13515 }
13516
13517 if (!fp_access_check(s)) {
13518 return;
13519 }
13520
13521 if (is_fp) {
13522 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
13523 } else {
13524 fpst = NULL;
13525 }
13526
13527 switch (16 * u + opcode) {
13528 case 0x0e:
13529 case 0x1e:
13530 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13531 u ? gen_helper_gvec_udot_idx_b
13532 : gen_helper_gvec_sdot_idx_b);
13533 return;
13534 case 0x0f:
13535 switch (extract32(insn, 22, 2)) {
13536 case 0:
13537 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13538 gen_helper_gvec_sudot_idx_b);
13539 return;
13540 case 1:
13541 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13542 gen_helper_gvec_bfdot_idx);
13543 return;
13544 case 2:
13545 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13546 gen_helper_gvec_usdot_idx_b);
13547 return;
13548 case 3:
13549 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
13550 gen_helper_gvec_bfmlal_idx);
13551 return;
13552 }
13553 g_assert_not_reached();
13554 case 0x11:
13555 case 0x13:
13556 case 0x15:
13557 case 0x17:
13558 {
13559 int rot = extract32(insn, 13, 2);
13560 int data = (index << 2) | rot;
13561 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
13562 vec_full_reg_offset(s, rn),
13563 vec_full_reg_offset(s, rm),
13564 vec_full_reg_offset(s, rd), fpst,
13565 is_q ? 16 : 8, vec_full_reg_size(s), data,
13566 size == MO_64
13567 ? gen_helper_gvec_fcmlas_idx
13568 : gen_helper_gvec_fcmlah_idx);
13569 tcg_temp_free_ptr(fpst);
13570 }
13571 return;
13572
13573 case 0x00:
13574 case 0x04:
13575 case 0x18:
13576 case 0x1c:
13577 {
13578 int is_s = extract32(opcode, 2, 1);
13579 int is_2 = u;
13580 int data = (index << 2) | (is_2 << 1) | is_s;
13581 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13582 vec_full_reg_offset(s, rn),
13583 vec_full_reg_offset(s, rm), cpu_env,
13584 is_q ? 16 : 8, vec_full_reg_size(s),
13585 data, gen_helper_gvec_fmlal_idx_a64);
13586 }
13587 return;
13588
13589 case 0x08:
13590 if (!is_long && !is_scalar) {
13591 static gen_helper_gvec_3 * const fns[3] = {
13592 gen_helper_gvec_mul_idx_h,
13593 gen_helper_gvec_mul_idx_s,
13594 gen_helper_gvec_mul_idx_d,
13595 };
13596 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
13597 vec_full_reg_offset(s, rn),
13598 vec_full_reg_offset(s, rm),
13599 is_q ? 16 : 8, vec_full_reg_size(s),
13600 index, fns[size - 1]);
13601 return;
13602 }
13603 break;
13604
13605 case 0x10:
13606 if (!is_long && !is_scalar) {
13607 static gen_helper_gvec_4 * const fns[3] = {
13608 gen_helper_gvec_mla_idx_h,
13609 gen_helper_gvec_mla_idx_s,
13610 gen_helper_gvec_mla_idx_d,
13611 };
13612 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13613 vec_full_reg_offset(s, rn),
13614 vec_full_reg_offset(s, rm),
13615 vec_full_reg_offset(s, rd),
13616 is_q ? 16 : 8, vec_full_reg_size(s),
13617 index, fns[size - 1]);
13618 return;
13619 }
13620 break;
13621
13622 case 0x14:
13623 if (!is_long && !is_scalar) {
13624 static gen_helper_gvec_4 * const fns[3] = {
13625 gen_helper_gvec_mls_idx_h,
13626 gen_helper_gvec_mls_idx_s,
13627 gen_helper_gvec_mls_idx_d,
13628 };
13629 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13630 vec_full_reg_offset(s, rn),
13631 vec_full_reg_offset(s, rm),
13632 vec_full_reg_offset(s, rd),
13633 is_q ? 16 : 8, vec_full_reg_size(s),
13634 index, fns[size - 1]);
13635 return;
13636 }
13637 break;
13638 }
13639
13640 if (size == 3) {
13641 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13642 int pass;
13643
13644 assert(is_fp && is_q && !is_long);
13645
13646 read_vec_element(s, tcg_idx, rm, index, MO_64);
13647
13648 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13649 TCGv_i64 tcg_op = tcg_temp_new_i64();
13650 TCGv_i64 tcg_res = tcg_temp_new_i64();
13651
13652 read_vec_element(s, tcg_op, rn, pass, MO_64);
13653
13654 switch (16 * u + opcode) {
13655 case 0x05:
13656
13657 gen_helper_vfp_negd(tcg_op, tcg_op);
13658
13659 case 0x01:
13660 read_vec_element(s, tcg_res, rd, pass, MO_64);
13661 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13662 break;
13663 case 0x09:
13664 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13665 break;
13666 case 0x19:
13667 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13668 break;
13669 default:
13670 g_assert_not_reached();
13671 }
13672
13673 write_vec_element(s, tcg_res, rd, pass, MO_64);
13674 tcg_temp_free_i64(tcg_op);
13675 tcg_temp_free_i64(tcg_res);
13676 }
13677
13678 tcg_temp_free_i64(tcg_idx);
13679 clear_vec_high(s, !is_scalar, rd);
13680 } else if (!is_long) {
13681
13682
13683
13684
13685 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13686 int pass, maxpasses;
13687
13688 if (is_scalar) {
13689 maxpasses = 1;
13690 } else {
13691 maxpasses = is_q ? 4 : 2;
13692 }
13693
13694 read_vec_element_i32(s, tcg_idx, rm, index, size);
13695
13696 if (size == 1 && !is_scalar) {
13697
13698
13699
13700
13701 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13702 }
13703
13704 for (pass = 0; pass < maxpasses; pass++) {
13705 TCGv_i32 tcg_op = tcg_temp_new_i32();
13706 TCGv_i32 tcg_res = tcg_temp_new_i32();
13707
13708 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13709
13710 switch (16 * u + opcode) {
13711 case 0x08:
13712 case 0x10:
13713 case 0x14:
13714 {
13715 static NeonGenTwoOpFn * const fns[2][2] = {
13716 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13717 { tcg_gen_add_i32, tcg_gen_sub_i32 },
13718 };
13719 NeonGenTwoOpFn *genfn;
13720 bool is_sub = opcode == 0x4;
13721
13722 if (size == 1) {
13723 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13724 } else {
13725 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13726 }
13727 if (opcode == 0x8) {
13728 break;
13729 }
13730 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13731 genfn = fns[size - 1][is_sub];
13732 genfn(tcg_res, tcg_op, tcg_res);
13733 break;
13734 }
13735 case 0x05:
13736 case 0x01:
13737 read_vec_element_i32(s, tcg_res, rd, pass,
13738 is_scalar ? size : MO_32);
13739 switch (size) {
13740 case 1:
13741 if (opcode == 0x5) {
13742
13743
13744 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13745 }
13746 if (is_scalar) {
13747 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13748 tcg_res, fpst);
13749 } else {
13750 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13751 tcg_res, fpst);
13752 }
13753 break;
13754 case 2:
13755 if (opcode == 0x5) {
13756
13757
13758 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13759 }
13760 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13761 tcg_res, fpst);
13762 break;
13763 default:
13764 g_assert_not_reached();
13765 }
13766 break;
13767 case 0x09:
13768 switch (size) {
13769 case 1:
13770 if (is_scalar) {
13771 gen_helper_advsimd_mulh(tcg_res, tcg_op,
13772 tcg_idx, fpst);
13773 } else {
13774 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13775 tcg_idx, fpst);
13776 }
13777 break;
13778 case 2:
13779 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13780 break;
13781 default:
13782 g_assert_not_reached();
13783 }
13784 break;
13785 case 0x19:
13786 switch (size) {
13787 case 1:
13788 if (is_scalar) {
13789 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13790 tcg_idx, fpst);
13791 } else {
13792 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13793 tcg_idx, fpst);
13794 }
13795 break;
13796 case 2:
13797 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13798 break;
13799 default:
13800 g_assert_not_reached();
13801 }
13802 break;
13803 case 0x0c:
13804 if (size == 1) {
13805 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13806 tcg_op, tcg_idx);
13807 } else {
13808 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13809 tcg_op, tcg_idx);
13810 }
13811 break;
13812 case 0x0d:
13813 if (size == 1) {
13814 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13815 tcg_op, tcg_idx);
13816 } else {
13817 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13818 tcg_op, tcg_idx);
13819 }
13820 break;
13821 case 0x1d:
13822 read_vec_element_i32(s, tcg_res, rd, pass,
13823 is_scalar ? size : MO_32);
13824 if (size == 1) {
13825 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13826 tcg_op, tcg_idx, tcg_res);
13827 } else {
13828 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13829 tcg_op, tcg_idx, tcg_res);
13830 }
13831 break;
13832 case 0x1f:
13833 read_vec_element_i32(s, tcg_res, rd, pass,
13834 is_scalar ? size : MO_32);
13835 if (size == 1) {
13836 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13837 tcg_op, tcg_idx, tcg_res);
13838 } else {
13839 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13840 tcg_op, tcg_idx, tcg_res);
13841 }
13842 break;
13843 default:
13844 g_assert_not_reached();
13845 }
13846
13847 if (is_scalar) {
13848 write_fp_sreg(s, rd, tcg_res);
13849 } else {
13850 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13851 }
13852
13853 tcg_temp_free_i32(tcg_op);
13854 tcg_temp_free_i32(tcg_res);
13855 }
13856
13857 tcg_temp_free_i32(tcg_idx);
13858 clear_vec_high(s, is_q, rd);
13859 } else {
13860
13861 TCGv_i64 tcg_res[2];
13862 int pass;
13863 bool satop = extract32(opcode, 0, 1);
13864 MemOp memop = MO_32;
13865
13866 if (satop || !u) {
13867 memop |= MO_SIGN;
13868 }
13869
13870 if (size == 2) {
13871 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13872
13873 read_vec_element(s, tcg_idx, rm, index, memop);
13874
13875 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13876 TCGv_i64 tcg_op = tcg_temp_new_i64();
13877 TCGv_i64 tcg_passres;
13878 int passelt;
13879
13880 if (is_scalar) {
13881 passelt = 0;
13882 } else {
13883 passelt = pass + (is_q * 2);
13884 }
13885
13886 read_vec_element(s, tcg_op, rn, passelt, memop);
13887
13888 tcg_res[pass] = tcg_temp_new_i64();
13889
13890 if (opcode == 0xa || opcode == 0xb) {
13891
13892 tcg_passres = tcg_res[pass];
13893 } else {
13894 tcg_passres = tcg_temp_new_i64();
13895 }
13896
13897 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13898 tcg_temp_free_i64(tcg_op);
13899
13900 if (satop) {
13901
13902 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13903 tcg_passres, tcg_passres);
13904 }
13905
13906 if (opcode == 0xa || opcode == 0xb) {
13907 continue;
13908 }
13909
13910
13911 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13912
13913 switch (opcode) {
13914 case 0x2:
13915 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13916 break;
13917 case 0x6:
13918 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13919 break;
13920 case 0x7:
13921 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13922
13923 case 0x3:
13924 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13925 tcg_res[pass],
13926 tcg_passres);
13927 break;
13928 default:
13929 g_assert_not_reached();
13930 }
13931 tcg_temp_free_i64(tcg_passres);
13932 }
13933 tcg_temp_free_i64(tcg_idx);
13934
13935 clear_vec_high(s, !is_scalar, rd);
13936 } else {
13937 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13938
13939 assert(size == 1);
13940 read_vec_element_i32(s, tcg_idx, rm, index, size);
13941
13942 if (!is_scalar) {
13943
13944
13945
13946
13947 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13948 }
13949
13950 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13951 TCGv_i32 tcg_op = tcg_temp_new_i32();
13952 TCGv_i64 tcg_passres;
13953
13954 if (is_scalar) {
13955 read_vec_element_i32(s, tcg_op, rn, pass, size);
13956 } else {
13957 read_vec_element_i32(s, tcg_op, rn,
13958 pass + (is_q * 2), MO_32);
13959 }
13960
13961 tcg_res[pass] = tcg_temp_new_i64();
13962
13963 if (opcode == 0xa || opcode == 0xb) {
13964
13965 tcg_passres = tcg_res[pass];
13966 } else {
13967 tcg_passres = tcg_temp_new_i64();
13968 }
13969
13970 if (memop & MO_SIGN) {
13971 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13972 } else {
13973 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13974 }
13975 if (satop) {
13976 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13977 tcg_passres, tcg_passres);
13978 }
13979 tcg_temp_free_i32(tcg_op);
13980
13981 if (opcode == 0xa || opcode == 0xb) {
13982 continue;
13983 }
13984
13985
13986 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13987
13988 switch (opcode) {
13989 case 0x2:
13990 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13991 tcg_passres);
13992 break;
13993 case 0x6:
13994 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13995 tcg_passres);
13996 break;
13997 case 0x7:
13998 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13999
14000 case 0x3:
14001 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
14002 tcg_res[pass],
14003 tcg_passres);
14004 break;
14005 default:
14006 g_assert_not_reached();
14007 }
14008 tcg_temp_free_i64(tcg_passres);
14009 }
14010 tcg_temp_free_i32(tcg_idx);
14011
14012 if (is_scalar) {
14013 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
14014 }
14015 }
14016
14017 if (is_scalar) {
14018 tcg_res[1] = tcg_const_i64(0);
14019 }
14020
14021 for (pass = 0; pass < 2; pass++) {
14022 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
14023 tcg_temp_free_i64(tcg_res[pass]);
14024 }
14025 }
14026
14027 if (fpst) {
14028 tcg_temp_free_ptr(fpst);
14029 }
14030}
14031
14032
14033
14034
14035
14036
14037
14038static void disas_crypto_aes(DisasContext *s, uint32_t insn)
14039{
14040 int size = extract32(insn, 22, 2);
14041 int opcode = extract32(insn, 12, 5);
14042 int rn = extract32(insn, 5, 5);
14043 int rd = extract32(insn, 0, 5);
14044 int decrypt;
14045 gen_helper_gvec_2 *genfn2 = NULL;
14046 gen_helper_gvec_3 *genfn3 = NULL;
14047
14048 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
14049 unallocated_encoding(s);
14050 return;
14051 }
14052
14053 switch (opcode) {
14054 case 0x4:
14055 decrypt = 0;
14056 genfn3 = gen_helper_crypto_aese;
14057 break;
14058 case 0x6:
14059 decrypt = 0;
14060 genfn2 = gen_helper_crypto_aesmc;
14061 break;
14062 case 0x5:
14063 decrypt = 1;
14064 genfn3 = gen_helper_crypto_aese;
14065 break;
14066 case 0x7:
14067 decrypt = 1;
14068 genfn2 = gen_helper_crypto_aesmc;
14069 break;
14070 default:
14071 unallocated_encoding(s);
14072 return;
14073 }
14074
14075 if (!fp_access_check(s)) {
14076 return;
14077 }
14078 if (genfn2) {
14079 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
14080 } else {
14081 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
14082 }
14083}
14084
14085
14086
14087
14088
14089
14090
14091static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
14092{
14093 int size = extract32(insn, 22, 2);
14094 int opcode = extract32(insn, 12, 3);
14095 int rm = extract32(insn, 16, 5);
14096 int rn = extract32(insn, 5, 5);
14097 int rd = extract32(insn, 0, 5);
14098 gen_helper_gvec_3 *genfn;
14099 bool feature;
14100
14101 if (size != 0) {
14102 unallocated_encoding(s);
14103 return;
14104 }
14105
14106 switch (opcode) {
14107 case 0:
14108 genfn = gen_helper_crypto_sha1c;
14109 feature = dc_isar_feature(aa64_sha1, s);
14110 break;
14111 case 1:
14112 genfn = gen_helper_crypto_sha1p;
14113 feature = dc_isar_feature(aa64_sha1, s);
14114 break;
14115 case 2:
14116 genfn = gen_helper_crypto_sha1m;
14117 feature = dc_isar_feature(aa64_sha1, s);
14118 break;
14119 case 3:
14120 genfn = gen_helper_crypto_sha1su0;
14121 feature = dc_isar_feature(aa64_sha1, s);
14122 break;
14123 case 4:
14124 genfn = gen_helper_crypto_sha256h;
14125 feature = dc_isar_feature(aa64_sha256, s);
14126 break;
14127 case 5:
14128 genfn = gen_helper_crypto_sha256h2;
14129 feature = dc_isar_feature(aa64_sha256, s);
14130 break;
14131 case 6:
14132 genfn = gen_helper_crypto_sha256su1;
14133 feature = dc_isar_feature(aa64_sha256, s);
14134 break;
14135 default:
14136 unallocated_encoding(s);
14137 return;
14138 }
14139
14140 if (!feature) {
14141 unallocated_encoding(s);
14142 return;
14143 }
14144
14145 if (!fp_access_check(s)) {
14146 return;
14147 }
14148 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
14149}
14150
14151
14152
14153
14154
14155
14156
14157static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
14158{
14159 int size = extract32(insn, 22, 2);
14160 int opcode = extract32(insn, 12, 5);
14161 int rn = extract32(insn, 5, 5);
14162 int rd = extract32(insn, 0, 5);
14163 gen_helper_gvec_2 *genfn;
14164 bool feature;
14165
14166 if (size != 0) {
14167 unallocated_encoding(s);
14168 return;
14169 }
14170
14171 switch (opcode) {
14172 case 0:
14173 feature = dc_isar_feature(aa64_sha1, s);
14174 genfn = gen_helper_crypto_sha1h;
14175 break;
14176 case 1:
14177 feature = dc_isar_feature(aa64_sha1, s);
14178 genfn = gen_helper_crypto_sha1su1;
14179 break;
14180 case 2:
14181 feature = dc_isar_feature(aa64_sha256, s);
14182 genfn = gen_helper_crypto_sha256su0;
14183 break;
14184 default:
14185 unallocated_encoding(s);
14186 return;
14187 }
14188
14189 if (!feature) {
14190 unallocated_encoding(s);
14191 return;
14192 }
14193
14194 if (!fp_access_check(s)) {
14195 return;
14196 }
14197 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
14198}
14199
14200static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
14201{
14202 tcg_gen_rotli_i64(d, m, 1);
14203 tcg_gen_xor_i64(d, d, n);
14204}
14205
14206static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
14207{
14208 tcg_gen_rotli_vec(vece, d, m, 1);
14209 tcg_gen_xor_vec(vece, d, d, n);
14210}
14211
14212void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
14213 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
14214{
14215 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
14216 static const GVecGen3 op = {
14217 .fni8 = gen_rax1_i64,
14218 .fniv = gen_rax1_vec,
14219 .opt_opc = vecop_list,
14220 .fno = gen_helper_crypto_rax1,
14221 .vece = MO_64,
14222 };
14223 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
14224}
14225
14226
14227
14228
14229
14230
14231
14232static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
14233{
14234 int opcode = extract32(insn, 10, 2);
14235 int o = extract32(insn, 14, 1);
14236 int rm = extract32(insn, 16, 5);
14237 int rn = extract32(insn, 5, 5);
14238 int rd = extract32(insn, 0, 5);
14239 bool feature;
14240 gen_helper_gvec_3 *oolfn = NULL;
14241 GVecGen3Fn *gvecfn = NULL;
14242
14243 if (o == 0) {
14244 switch (opcode) {
14245 case 0:
14246 feature = dc_isar_feature(aa64_sha512, s);
14247 oolfn = gen_helper_crypto_sha512h;
14248 break;
14249 case 1:
14250 feature = dc_isar_feature(aa64_sha512, s);
14251 oolfn = gen_helper_crypto_sha512h2;
14252 break;
14253 case 2:
14254 feature = dc_isar_feature(aa64_sha512, s);
14255 oolfn = gen_helper_crypto_sha512su1;
14256 break;
14257 case 3:
14258 feature = dc_isar_feature(aa64_sha3, s);
14259 gvecfn = gen_gvec_rax1;
14260 break;
14261 default:
14262 g_assert_not_reached();
14263 }
14264 } else {
14265 switch (opcode) {
14266 case 0:
14267 feature = dc_isar_feature(aa64_sm3, s);
14268 oolfn = gen_helper_crypto_sm3partw1;
14269 break;
14270 case 1:
14271 feature = dc_isar_feature(aa64_sm3, s);
14272 oolfn = gen_helper_crypto_sm3partw2;
14273 break;
14274 case 2:
14275 feature = dc_isar_feature(aa64_sm4, s);
14276 oolfn = gen_helper_crypto_sm4ekey;
14277 break;
14278 default:
14279 unallocated_encoding(s);
14280 return;
14281 }
14282 }
14283
14284 if (!feature) {
14285 unallocated_encoding(s);
14286 return;
14287 }
14288
14289 if (!fp_access_check(s)) {
14290 return;
14291 }
14292
14293 if (oolfn) {
14294 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
14295 } else {
14296 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
14297 }
14298}
14299
14300
14301
14302
14303
14304
14305
14306static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
14307{
14308 int opcode = extract32(insn, 10, 2);
14309 int rn = extract32(insn, 5, 5);
14310 int rd = extract32(insn, 0, 5);
14311 bool feature;
14312
14313 switch (opcode) {
14314 case 0:
14315 feature = dc_isar_feature(aa64_sha512, s);
14316 break;
14317 case 1:
14318 feature = dc_isar_feature(aa64_sm4, s);
14319 break;
14320 default:
14321 unallocated_encoding(s);
14322 return;
14323 }
14324
14325 if (!feature) {
14326 unallocated_encoding(s);
14327 return;
14328 }
14329
14330 if (!fp_access_check(s)) {
14331 return;
14332 }
14333
14334 switch (opcode) {
14335 case 0:
14336 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
14337 break;
14338 case 1:
14339 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
14340 break;
14341 default:
14342 g_assert_not_reached();
14343 }
14344}
14345
14346
14347
14348
14349
14350
14351
14352static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
14353{
14354 int op0 = extract32(insn, 21, 2);
14355 int rm = extract32(insn, 16, 5);
14356 int ra = extract32(insn, 10, 5);
14357 int rn = extract32(insn, 5, 5);
14358 int rd = extract32(insn, 0, 5);
14359 bool feature;
14360
14361 switch (op0) {
14362 case 0:
14363 case 1:
14364 feature = dc_isar_feature(aa64_sha3, s);
14365 break;
14366 case 2:
14367 feature = dc_isar_feature(aa64_sm3, s);
14368 break;
14369 default:
14370 unallocated_encoding(s);
14371 return;
14372 }
14373
14374 if (!feature) {
14375 unallocated_encoding(s);
14376 return;
14377 }
14378
14379 if (!fp_access_check(s)) {
14380 return;
14381 }
14382
14383 if (op0 < 2) {
14384 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
14385 int pass;
14386
14387 tcg_op1 = tcg_temp_new_i64();
14388 tcg_op2 = tcg_temp_new_i64();
14389 tcg_op3 = tcg_temp_new_i64();
14390 tcg_res[0] = tcg_temp_new_i64();
14391 tcg_res[1] = tcg_temp_new_i64();
14392
14393 for (pass = 0; pass < 2; pass++) {
14394 read_vec_element(s, tcg_op1, rn, pass, MO_64);
14395 read_vec_element(s, tcg_op2, rm, pass, MO_64);
14396 read_vec_element(s, tcg_op3, ra, pass, MO_64);
14397
14398 if (op0 == 0) {
14399
14400 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
14401 } else {
14402
14403 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
14404 }
14405 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
14406 }
14407 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
14408 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
14409
14410 tcg_temp_free_i64(tcg_op1);
14411 tcg_temp_free_i64(tcg_op2);
14412 tcg_temp_free_i64(tcg_op3);
14413 tcg_temp_free_i64(tcg_res[0]);
14414 tcg_temp_free_i64(tcg_res[1]);
14415 } else {
14416 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
14417
14418 tcg_op1 = tcg_temp_new_i32();
14419 tcg_op2 = tcg_temp_new_i32();
14420 tcg_op3 = tcg_temp_new_i32();
14421 tcg_res = tcg_temp_new_i32();
14422 tcg_zero = tcg_const_i32(0);
14423
14424 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
14425 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
14426 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
14427
14428 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
14429 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
14430 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
14431 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
14432
14433 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
14434 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
14435 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
14436 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
14437
14438 tcg_temp_free_i32(tcg_op1);
14439 tcg_temp_free_i32(tcg_op2);
14440 tcg_temp_free_i32(tcg_op3);
14441 tcg_temp_free_i32(tcg_res);
14442 tcg_temp_free_i32(tcg_zero);
14443 }
14444}
14445
14446
14447
14448
14449
14450
14451
14452static void disas_crypto_xar(DisasContext *s, uint32_t insn)
14453{
14454 int rm = extract32(insn, 16, 5);
14455 int imm6 = extract32(insn, 10, 6);
14456 int rn = extract32(insn, 5, 5);
14457 int rd = extract32(insn, 0, 5);
14458
14459 if (!dc_isar_feature(aa64_sha3, s)) {
14460 unallocated_encoding(s);
14461 return;
14462 }
14463
14464 if (!fp_access_check(s)) {
14465 return;
14466 }
14467
14468 gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
14469 vec_full_reg_offset(s, rn),
14470 vec_full_reg_offset(s, rm), imm6, 16,
14471 vec_full_reg_size(s));
14472}
14473
14474
14475
14476
14477
14478
14479
14480static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
14481{
14482 static gen_helper_gvec_3 * const fns[4] = {
14483 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
14484 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
14485 };
14486 int opcode = extract32(insn, 10, 2);
14487 int imm2 = extract32(insn, 12, 2);
14488 int rm = extract32(insn, 16, 5);
14489 int rn = extract32(insn, 5, 5);
14490 int rd = extract32(insn, 0, 5);
14491
14492 if (!dc_isar_feature(aa64_sm3, s)) {
14493 unallocated_encoding(s);
14494 return;
14495 }
14496
14497 if (!fp_access_check(s)) {
14498 return;
14499 }
14500
14501 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
14502}
14503
14504
14505
14506
14507
14508
14509static const AArch64DecodeTable data_proc_simd[] = {
14510
14511 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
14512 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
14513 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
14514 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
14515 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
14516 { 0x0e000400, 0x9fe08400, disas_simd_copy },
14517 { 0x0f000000, 0x9f000400, disas_simd_indexed },
14518
14519 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
14520 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
14521 { 0x0e000000, 0xbf208c00, disas_simd_tb },
14522 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
14523 { 0x2e000000, 0xbf208400, disas_simd_ext },
14524 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
14525 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
14526 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
14527 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
14528 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
14529 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
14530 { 0x5f000000, 0xdf000400, disas_simd_indexed },
14531 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
14532 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
14533 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
14534 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
14535 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
14536 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
14537 { 0xce000000, 0xff808000, disas_crypto_four_reg },
14538 { 0xce800000, 0xffe00000, disas_crypto_xar },
14539 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
14540 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
14541 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
14542 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
14543 { 0x00000000, 0x00000000, NULL }
14544};
14545
14546static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
14547{
14548
14549
14550
14551
14552 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
14553 if (fn) {
14554 fn(s, insn);
14555 } else {
14556 unallocated_encoding(s);
14557 }
14558}
14559
14560
14561static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
14562{
14563 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
14564 disas_data_proc_fp(s, insn);
14565 } else {
14566
14567 disas_data_proc_simd(s, insn);
14568 }
14569}
14570
14571
14572
14573
14574
14575
14576
14577
14578static bool is_guarded_page(CPUARMState *env, DisasContext *s)
14579{
14580 uint64_t addr = s->base.pc_first;
14581#ifdef CONFIG_USER_ONLY
14582 return page_get_flags(addr) & PAGE_BTI;
14583#else
14584 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
14585 unsigned int index = tlb_index(env, mmu_idx, addr);
14586 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596
14597
14598 return (tlb_hit(entry->addr_code, addr) &&
14599 arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
14600#endif
14601}
14602
14603
14604
14605
14606
14607
14608
14609
14610
14611
14612
14613
14614
14615
14616
14617
14618
14619static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14620{
14621 if ((insn & 0xfffff01fu) == 0xd503201fu) {
14622
14623 switch (extract32(insn, 5, 7)) {
14624 case 0b011001:
14625 case 0b011011:
14626
14627
14628
14629
14630 return !bt || btype != 3;
14631 case 0b100000:
14632
14633 return false;
14634 case 0b100010:
14635
14636 return btype != 3;
14637 case 0b100100:
14638
14639 return btype != 2;
14640 case 0b100110:
14641
14642 return true;
14643 }
14644 } else {
14645 switch (insn & 0xffe0001fu) {
14646 case 0xd4200000u:
14647 case 0xd4400000u:
14648
14649 return true;
14650 }
14651 }
14652 return false;
14653}
14654
14655static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14656 CPUState *cpu)
14657{
14658 DisasContext *dc = container_of(dcbase, DisasContext, base);
14659 CPUARMState *env = cpu->env_ptr;
14660 ARMCPU *arm_cpu = env_archcpu(env);
14661 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
14662 int bound, core_mmu_idx;
14663
14664 dc->isar = &arm_cpu->isar;
14665 dc->condjmp = 0;
14666
14667 dc->aarch64 = 1;
14668
14669
14670
14671 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
14672 !arm_el_is_aa64(env, 3);
14673 dc->thumb = 0;
14674 dc->sctlr_b = 0;
14675 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
14676 dc->condexec_mask = 0;
14677 dc->condexec_cond = 0;
14678 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
14679 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14680 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
14681 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
14682 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
14683 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14684#if !defined(CONFIG_USER_ONLY)
14685 dc->user = (dc->current_el == 0);
14686#endif
14687 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
14688 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
14689 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
14690 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
14691 dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
14692 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
14693 dc->bt = EX_TBFLAG_A64(tb_flags, BT);
14694 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
14695 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
14696 dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
14697 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
14698 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
14699 dc->vec_len = 0;
14700 dc->vec_stride = 0;
14701 dc->cp_regs = arm_cpu->cp_regs;
14702 dc->features = env->features;
14703 dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14704
14705#ifdef CONFIG_USER_ONLY
14706
14707 tcg_debug_assert(dc->tbid & 1);
14708#endif
14709
14710
14711
14712
14713
14714
14715
14716
14717
14718
14719
14720
14721
14722
14723
14724
14725 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
14726 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
14727 dc->is_ldex = false;
14728 dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
14729
14730
14731 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14732
14733
14734 if (dc->ss_active) {
14735 bound = 1;
14736 }
14737 dc->base.max_insns = MIN(dc->base.max_insns, bound);
14738
14739 init_tmp_a64_array(dc);
14740}
14741
14742static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14743{
14744}
14745
14746static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14747{
14748 DisasContext *dc = container_of(dcbase, DisasContext, base);
14749
14750 tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14751 dc->insn_start = tcg_last_op();
14752}
14753
14754static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14755{
14756 DisasContext *s = container_of(dcbase, DisasContext, base);
14757 CPUARMState *env = cpu->env_ptr;
14758 uint64_t pc = s->base.pc_next;
14759 uint32_t insn;
14760
14761
14762 if (s->ss_active && !s->pstate_ss) {
14763
14764
14765
14766
14767
14768
14769
14770
14771
14772
14773 assert(s->base.num_insns == 1);
14774 gen_swstep_exception(s, 0, 0);
14775 s->base.is_jmp = DISAS_NORETURN;
14776 s->base.pc_next = pc + 4;
14777 return;
14778 }
14779
14780 if (pc & 3) {
14781
14782
14783
14784
14785
14786
14787 assert(s->base.num_insns == 1);
14788 gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
14789 s->base.is_jmp = DISAS_NORETURN;
14790 s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
14791 return;
14792 }
14793
14794 s->pc_curr = pc;
14795 insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
14796 s->insn = insn;
14797 s->base.pc_next = pc + 4;
14798
14799 s->fp_access_checked = false;
14800 s->sve_access_checked = false;
14801
14802 if (s->pstate_il) {
14803
14804
14805
14806
14807 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14808 syn_illegalstate(), default_exception_el(s));
14809 return;
14810 }
14811
14812 if (dc_isar_feature(aa64_bti, s)) {
14813 if (s->base.num_insns == 1) {
14814
14815
14816
14817
14818
14819
14820
14821
14822
14823
14824
14825 s->guarded_page = is_guarded_page(env, s);
14826
14827
14828 tcg_debug_assert(s->btype >= 0);
14829
14830
14831
14832
14833
14834
14835
14836 if (s->btype != 0
14837 && s->guarded_page
14838 && !btype_destination_ok(insn, s->bt, s->btype)) {
14839 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14840 syn_btitrap(s->btype),
14841 default_exception_el(s));
14842 return;
14843 }
14844 } else {
14845
14846 tcg_debug_assert(s->btype == 0);
14847 }
14848 }
14849
14850 switch (extract32(insn, 25, 4)) {
14851 case 0x0: case 0x1: case 0x3:
14852 unallocated_encoding(s);
14853 break;
14854 case 0x2:
14855 if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
14856 unallocated_encoding(s);
14857 }
14858 break;
14859 case 0x8: case 0x9:
14860 disas_data_proc_imm(s, insn);
14861 break;
14862 case 0xa: case 0xb:
14863 disas_b_exc_sys(s, insn);
14864 break;
14865 case 0x4:
14866 case 0x6:
14867 case 0xc:
14868 case 0xe:
14869 disas_ldst(s, insn);
14870 break;
14871 case 0x5:
14872 case 0xd:
14873 disas_data_proc_reg(s, insn);
14874 break;
14875 case 0x7:
14876 case 0xf:
14877 disas_data_proc_simd_fp(s, insn);
14878 break;
14879 default:
14880 assert(FALSE);
14881 break;
14882 }
14883
14884
14885 free_tmp_a64(s);
14886
14887
14888
14889
14890
14891 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14892 reset_btype(s);
14893 }
14894
14895 translator_loop_temp_check(&s->base);
14896}
14897
14898static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14899{
14900 DisasContext *dc = container_of(dcbase, DisasContext, base);
14901
14902 if (unlikely(dc->ss_active)) {
14903
14904
14905
14906
14907
14908 switch (dc->base.is_jmp) {
14909 default:
14910 gen_a64_set_pc_im(dc->base.pc_next);
14911
14912 case DISAS_EXIT:
14913 case DISAS_JUMP:
14914 gen_step_complete_exception(dc);
14915 break;
14916 case DISAS_NORETURN:
14917 break;
14918 }
14919 } else {
14920 switch (dc->base.is_jmp) {
14921 case DISAS_NEXT:
14922 case DISAS_TOO_MANY:
14923 gen_goto_tb(dc, 1, dc->base.pc_next);
14924 break;
14925 default:
14926 case DISAS_UPDATE_EXIT:
14927 gen_a64_set_pc_im(dc->base.pc_next);
14928
14929 case DISAS_EXIT:
14930 tcg_gen_exit_tb(NULL, 0);
14931 break;
14932 case DISAS_UPDATE_NOCHAIN:
14933 gen_a64_set_pc_im(dc->base.pc_next);
14934
14935 case DISAS_JUMP:
14936 tcg_gen_lookup_and_goto_ptr();
14937 break;
14938 case DISAS_NORETURN:
14939 case DISAS_SWI:
14940 break;
14941 case DISAS_WFE:
14942 gen_a64_set_pc_im(dc->base.pc_next);
14943 gen_helper_wfe(cpu_env);
14944 break;
14945 case DISAS_YIELD:
14946 gen_a64_set_pc_im(dc->base.pc_next);
14947 gen_helper_yield(cpu_env);
14948 break;
14949 case DISAS_WFI:
14950 {
14951
14952
14953
14954 TCGv_i32 tmp = tcg_const_i32(4);
14955
14956 gen_a64_set_pc_im(dc->base.pc_next);
14957 gen_helper_wfi(cpu_env, tmp);
14958 tcg_temp_free_i32(tmp);
14959
14960
14961
14962 tcg_gen_exit_tb(NULL, 0);
14963 break;
14964 }
14965 }
14966 }
14967}
14968
14969static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14970 CPUState *cpu)
14971{
14972 DisasContext *dc = container_of(dcbase, DisasContext, base);
14973
14974 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
14975 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
14976}
14977
14978const TranslatorOps aarch64_translator_ops = {
14979 .init_disas_context = aarch64_tr_init_disas_context,
14980 .tb_start = aarch64_tr_tb_start,
14981 .insn_start = aarch64_tr_insn_start,
14982 .translate_insn = aarch64_tr_translate_insn,
14983 .tb_stop = aarch64_tr_tb_stop,
14984 .disas_log = aarch64_tr_disas_log,
14985};
14986