1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "tcg/tcg-op.h"
24#include "tcg/tcg-op-gvec.h"
25#include "qemu/log.h"
26#include "arm_ldst.h"
27#include "translate.h"
28#include "internals.h"
29#include "qemu/host-utils.h"
30
31#include "semihosting/semihost.h"
32#include "exec/gen-icount.h"
33
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
36#include "exec/log.h"
37
38#include "translate-a64.h"
39#include "qemu/atomic128.h"
40
41static TCGv_i64 cpu_X[32];
42static TCGv_i64 cpu_pc;
43
44
45static TCGv_i64 cpu_exclusive_high;
46
47static const char *regnames[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
52};
53
54enum a64_shift_type {
55 A64_SHIFT_TYPE_LSL = 0,
56 A64_SHIFT_TYPE_LSR = 1,
57 A64_SHIFT_TYPE_ASR = 2,
58 A64_SHIFT_TYPE_ROR = 3
59};
60
61
62
63
64typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
65
66typedef struct AArch64DecodeTable {
67 uint32_t pattern;
68 uint32_t mask;
69 AArch64DecodeFn *disas_fn;
70} AArch64DecodeTable;
71
72
73void a64_translate_init(void)
74{
75 int i;
76
77 cpu_pc = tcg_global_mem_new_i64(cpu_env,
78 offsetof(CPUARMState, pc),
79 "pc");
80 for (i = 0; i < 32; i++) {
81 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
82 offsetof(CPUARMState, xregs[i]),
83 regnames[i]);
84 }
85
86 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
87 offsetof(CPUARMState, exclusive_high), "exclusive_high");
88}
89
90
91
92
93static int get_a64_user_mem_index(DisasContext *s)
94{
95
96
97
98
99 ARMMMUIdx useridx = s->mmu_idx;
100
101 if (s->unpriv) {
102
103
104
105
106
107 switch (useridx) {
108 case ARMMMUIdx_E10_1:
109 case ARMMMUIdx_E10_1_PAN:
110 useridx = ARMMMUIdx_E10_0;
111 break;
112 case ARMMMUIdx_E20_2:
113 case ARMMMUIdx_E20_2_PAN:
114 useridx = ARMMMUIdx_E20_0;
115 break;
116 case ARMMMUIdx_SE10_1:
117 case ARMMMUIdx_SE10_1_PAN:
118 useridx = ARMMMUIdx_SE10_0;
119 break;
120 case ARMMMUIdx_SE20_2:
121 case ARMMMUIdx_SE20_2_PAN:
122 useridx = ARMMMUIdx_SE20_0;
123 break;
124 default:
125 g_assert_not_reached();
126 }
127 }
128 return arm_to_core_mmu_idx(useridx);
129}
130
131static void reset_btype(DisasContext *s)
132{
133 if (s->btype != 0) {
134 TCGv_i32 zero = tcg_const_i32(0);
135 tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
136 tcg_temp_free_i32(zero);
137 s->btype = 0;
138 }
139}
140
141static void set_btype(DisasContext *s, int val)
142{
143 TCGv_i32 tcg_val;
144
145
146 tcg_debug_assert(val >= 1 && val <= 3);
147
148 tcg_val = tcg_const_i32(val);
149 tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
150 tcg_temp_free_i32(tcg_val);
151 s->btype = -1;
152}
153
154void gen_a64_set_pc_im(uint64_t val)
155{
156 tcg_gen_movi_i64(cpu_pc, val);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
173 TCGv_i64 src, int tbi)
174{
175 if (tbi == 0) {
176
177 tcg_gen_mov_i64(dst, src);
178 } else if (!regime_has_2_ranges(s->mmu_idx)) {
179
180 tcg_gen_extract_i64(dst, src, 0, 56);
181 } else {
182
183 tcg_gen_sextract_i64(dst, src, 0, 56);
184
185 switch (tbi) {
186 case 1:
187
188 tcg_gen_and_i64(dst, dst, src);
189 break;
190 case 2:
191
192 tcg_gen_or_i64(dst, dst, src);
193 break;
194 case 3:
195
196 break;
197 default:
198 g_assert_not_reached();
199 }
200 }
201}
202
203static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
204{
205
206
207
208
209 gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
210}
211
212
213
214
215
216
217
218
219
220
221
222
223
224TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
225{
226 TCGv_i64 clean = new_tmp_a64(s);
227#ifdef CONFIG_USER_ONLY
228 gen_top_byte_ignore(s, clean, addr, s->tbid);
229#else
230 tcg_gen_mov_i64(clean, addr);
231#endif
232 return clean;
233}
234
235
236static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
237{
238 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
239}
240
241static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
242 MMUAccessType acc, int log2_size)
243{
244 TCGv_i32 t_acc = tcg_const_i32(acc);
245 TCGv_i32 t_idx = tcg_const_i32(get_mem_index(s));
246 TCGv_i32 t_size = tcg_const_i32(1 << log2_size);
247
248 gen_helper_probe_access(cpu_env, ptr, t_acc, t_idx, t_size);
249 tcg_temp_free_i32(t_acc);
250 tcg_temp_free_i32(t_idx);
251 tcg_temp_free_i32(t_size);
252}
253
254
255
256
257
258
259
260static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
261 bool is_write, bool tag_checked,
262 int log2_size, bool is_unpriv,
263 int core_idx)
264{
265 if (tag_checked && s->mte_active[is_unpriv]) {
266 TCGv_i32 tcg_desc;
267 TCGv_i64 ret;
268 int desc = 0;
269
270 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
271 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
272 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
273 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
274 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
275 tcg_desc = tcg_const_i32(desc);
276
277 ret = new_tmp_a64(s);
278 gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
279 tcg_temp_free_i32(tcg_desc);
280
281 return ret;
282 }
283 return clean_data_tbi(s, addr);
284}
285
286TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
287 bool tag_checked, int log2_size)
288{
289 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
290 false, get_mem_index(s));
291}
292
293
294
295
296TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
297 bool tag_checked, int size)
298{
299 if (tag_checked && s->mte_active[0]) {
300 TCGv_i32 tcg_desc;
301 TCGv_i64 ret;
302 int desc = 0;
303
304 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
305 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
306 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
307 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
308 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
309 tcg_desc = tcg_const_i32(desc);
310
311 ret = new_tmp_a64(s);
312 gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
313 tcg_temp_free_i32(tcg_desc);
314
315 return ret;
316 }
317 return clean_data_tbi(s, addr);
318}
319
320typedef struct DisasCompare64 {
321 TCGCond cond;
322 TCGv_i64 value;
323} DisasCompare64;
324
325static void a64_test_cc(DisasCompare64 *c64, int cc)
326{
327 DisasCompare c32;
328
329 arm_test_cc(&c32, cc);
330
331
332
333 c64->cond = c32.cond;
334 c64->value = tcg_temp_new_i64();
335 tcg_gen_ext_i32_i64(c64->value, c32.value);
336
337 arm_free_cc(&c32);
338}
339
340static void a64_free_cc(DisasCompare64 *c64)
341{
342 tcg_temp_free_i64(c64->value);
343}
344
345static void gen_exception_internal(int excp)
346{
347 TCGv_i32 tcg_excp = tcg_const_i32(excp);
348
349 assert(excp_is_internal(excp));
350 gen_helper_exception_internal(cpu_env, tcg_excp);
351 tcg_temp_free_i32(tcg_excp);
352}
353
354static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
355{
356 gen_a64_set_pc_im(pc);
357 gen_exception_internal(excp);
358 s->base.is_jmp = DISAS_NORETURN;
359}
360
361static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
362{
363 TCGv_i32 tcg_syn;
364
365 gen_a64_set_pc_im(s->pc_curr);
366 tcg_syn = tcg_const_i32(syndrome);
367 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
368 tcg_temp_free_i32(tcg_syn);
369 s->base.is_jmp = DISAS_NORETURN;
370}
371
372static void gen_step_complete_exception(DisasContext *s)
373{
374
375
376
377
378
379
380
381
382
383 gen_ss_advance(s);
384 gen_swstep_exception(s, 1, s->is_ldex);
385 s->base.is_jmp = DISAS_NORETURN;
386}
387
388static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
389{
390 if (s->ss_active) {
391 return false;
392 }
393 return translator_use_goto_tb(&s->base, dest);
394}
395
396static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
397{
398 if (use_goto_tb(s, dest)) {
399 tcg_gen_goto_tb(n);
400 gen_a64_set_pc_im(dest);
401 tcg_gen_exit_tb(s->base.tb, n);
402 s->base.is_jmp = DISAS_NORETURN;
403 } else {
404 gen_a64_set_pc_im(dest);
405 if (s->ss_active) {
406 gen_step_complete_exception(s);
407 } else {
408 tcg_gen_lookup_and_goto_ptr();
409 s->base.is_jmp = DISAS_NORETURN;
410 }
411 }
412}
413
414static void init_tmp_a64_array(DisasContext *s)
415{
416#ifdef CONFIG_DEBUG_TCG
417 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
418#endif
419 s->tmp_a64_count = 0;
420}
421
422static void free_tmp_a64(DisasContext *s)
423{
424 int i;
425 for (i = 0; i < s->tmp_a64_count; i++) {
426 tcg_temp_free_i64(s->tmp_a64[i]);
427 }
428 init_tmp_a64_array(s);
429}
430
431TCGv_i64 new_tmp_a64(DisasContext *s)
432{
433 assert(s->tmp_a64_count < TMP_A64_MAX);
434 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
435}
436
437TCGv_i64 new_tmp_a64_local(DisasContext *s)
438{
439 assert(s->tmp_a64_count < TMP_A64_MAX);
440 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
441}
442
443TCGv_i64 new_tmp_a64_zero(DisasContext *s)
444{
445 TCGv_i64 t = new_tmp_a64(s);
446 tcg_gen_movi_i64(t, 0);
447 return t;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465TCGv_i64 cpu_reg(DisasContext *s, int reg)
466{
467 if (reg == 31) {
468 return new_tmp_a64_zero(s);
469 } else {
470 return cpu_X[reg];
471 }
472}
473
474
475TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
476{
477 return cpu_X[reg];
478}
479
480
481
482
483
484TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
485{
486 TCGv_i64 v = new_tmp_a64(s);
487 if (reg != 31) {
488 if (sf) {
489 tcg_gen_mov_i64(v, cpu_X[reg]);
490 } else {
491 tcg_gen_ext32u_i64(v, cpu_X[reg]);
492 }
493 } else {
494 tcg_gen_movi_i64(v, 0);
495 }
496 return v;
497}
498
499TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
500{
501 TCGv_i64 v = new_tmp_a64(s);
502 if (sf) {
503 tcg_gen_mov_i64(v, cpu_X[reg]);
504 } else {
505 tcg_gen_ext32u_i64(v, cpu_X[reg]);
506 }
507 return v;
508}
509
510
511
512
513
514
515static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
516{
517 return vec_reg_offset(s, regno, 0, size);
518}
519
520
521static inline int fp_reg_hi_offset(DisasContext *s, int regno)
522{
523 return vec_reg_offset(s, regno, 1, MO_64);
524}
525
526
527
528
529
530
531
532static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
533{
534 TCGv_i64 v = tcg_temp_new_i64();
535
536 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
537 return v;
538}
539
540static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
541{
542 TCGv_i32 v = tcg_temp_new_i32();
543
544 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
545 return v;
546}
547
548static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
549{
550 TCGv_i32 v = tcg_temp_new_i32();
551
552 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
553 return v;
554}
555
556
557
558
559static void clear_vec_high(DisasContext *s, bool is_q, int rd)
560{
561 unsigned ofs = fp_reg_offset(s, rd, MO_64);
562 unsigned vsz = vec_full_reg_size(s);
563
564
565 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
566}
567
568void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
569{
570 unsigned ofs = fp_reg_offset(s, reg, MO_64);
571
572 tcg_gen_st_i64(v, cpu_env, ofs);
573 clear_vec_high(s, false, reg);
574}
575
576static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
577{
578 TCGv_i64 tmp = tcg_temp_new_i64();
579
580 tcg_gen_extu_i32_i64(tmp, v);
581 write_fp_dreg(s, reg, tmp);
582 tcg_temp_free_i64(tmp);
583}
584
585
586static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
587 GVecGen2Fn *gvec_fn, int vece)
588{
589 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
590 is_q ? 16 : 8, vec_full_reg_size(s));
591}
592
593
594
595
596static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
597 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
598{
599 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
600 imm, is_q ? 16 : 8, vec_full_reg_size(s));
601}
602
603
604static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
605 GVecGen3Fn *gvec_fn, int vece)
606{
607 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
608 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
609}
610
611
612static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
613 int rx, GVecGen4Fn *gvec_fn, int vece)
614{
615 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
616 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
617 is_q ? 16 : 8, vec_full_reg_size(s));
618}
619
620
621static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
622 int rn, int data, gen_helper_gvec_2 *fn)
623{
624 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
625 vec_full_reg_offset(s, rn),
626 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
627}
628
629
630static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
631 int rn, int rm, int data, gen_helper_gvec_3 *fn)
632{
633 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
634 vec_full_reg_offset(s, rn),
635 vec_full_reg_offset(s, rm),
636 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
637}
638
639
640
641
642static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
643 int rm, bool is_fp16, int data,
644 gen_helper_gvec_3_ptr *fn)
645{
646 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
647 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
648 vec_full_reg_offset(s, rn),
649 vec_full_reg_offset(s, rm), fpst,
650 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
651 tcg_temp_free_ptr(fpst);
652}
653
654
655static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
656 int rm, gen_helper_gvec_3_ptr *fn)
657{
658 TCGv_ptr qc_ptr = tcg_temp_new_ptr();
659
660 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
661 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
662 vec_full_reg_offset(s, rn),
663 vec_full_reg_offset(s, rm), qc_ptr,
664 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
665 tcg_temp_free_ptr(qc_ptr);
666}
667
668
669static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
670 int rm, int ra, int data, gen_helper_gvec_4 *fn)
671{
672 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
673 vec_full_reg_offset(s, rn),
674 vec_full_reg_offset(s, rm),
675 vec_full_reg_offset(s, ra),
676 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
677}
678
679
680
681
682
683static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
684 int rm, int ra, bool is_fp16, int data,
685 gen_helper_gvec_4_ptr *fn)
686{
687 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
688 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
689 vec_full_reg_offset(s, rn),
690 vec_full_reg_offset(s, rm),
691 vec_full_reg_offset(s, ra), fpst,
692 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
693 tcg_temp_free_ptr(fpst);
694}
695
696
697
698
699static inline void gen_set_NZ64(TCGv_i64 result)
700{
701 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
702 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
703}
704
705
706static inline void gen_logic_CC(int sf, TCGv_i64 result)
707{
708 if (sf) {
709 gen_set_NZ64(result);
710 } else {
711 tcg_gen_extrl_i64_i32(cpu_ZF, result);
712 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
713 }
714 tcg_gen_movi_i32(cpu_CF, 0);
715 tcg_gen_movi_i32(cpu_VF, 0);
716}
717
718
719static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
720{
721 if (sf) {
722 TCGv_i64 result, flag, tmp;
723 result = tcg_temp_new_i64();
724 flag = tcg_temp_new_i64();
725 tmp = tcg_temp_new_i64();
726
727 tcg_gen_movi_i64(tmp, 0);
728 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
729
730 tcg_gen_extrl_i64_i32(cpu_CF, flag);
731
732 gen_set_NZ64(result);
733
734 tcg_gen_xor_i64(flag, result, t0);
735 tcg_gen_xor_i64(tmp, t0, t1);
736 tcg_gen_andc_i64(flag, flag, tmp);
737 tcg_temp_free_i64(tmp);
738 tcg_gen_extrh_i64_i32(cpu_VF, flag);
739
740 tcg_gen_mov_i64(dest, result);
741 tcg_temp_free_i64(result);
742 tcg_temp_free_i64(flag);
743 } else {
744
745 TCGv_i32 t0_32 = tcg_temp_new_i32();
746 TCGv_i32 t1_32 = tcg_temp_new_i32();
747 TCGv_i32 tmp = tcg_temp_new_i32();
748
749 tcg_gen_movi_i32(tmp, 0);
750 tcg_gen_extrl_i64_i32(t0_32, t0);
751 tcg_gen_extrl_i64_i32(t1_32, t1);
752 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
753 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
754 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
755 tcg_gen_xor_i32(tmp, t0_32, t1_32);
756 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
757 tcg_gen_extu_i32_i64(dest, cpu_NF);
758
759 tcg_temp_free_i32(tmp);
760 tcg_temp_free_i32(t0_32);
761 tcg_temp_free_i32(t1_32);
762 }
763}
764
765
766static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
767{
768 if (sf) {
769
770 TCGv_i64 result, flag, tmp;
771
772 result = tcg_temp_new_i64();
773 flag = tcg_temp_new_i64();
774 tcg_gen_sub_i64(result, t0, t1);
775
776 gen_set_NZ64(result);
777
778 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
779 tcg_gen_extrl_i64_i32(cpu_CF, flag);
780
781 tcg_gen_xor_i64(flag, result, t0);
782 tmp = tcg_temp_new_i64();
783 tcg_gen_xor_i64(tmp, t0, t1);
784 tcg_gen_and_i64(flag, flag, tmp);
785 tcg_temp_free_i64(tmp);
786 tcg_gen_extrh_i64_i32(cpu_VF, flag);
787 tcg_gen_mov_i64(dest, result);
788 tcg_temp_free_i64(flag);
789 tcg_temp_free_i64(result);
790 } else {
791
792 TCGv_i32 t0_32 = tcg_temp_new_i32();
793 TCGv_i32 t1_32 = tcg_temp_new_i32();
794 TCGv_i32 tmp;
795
796 tcg_gen_extrl_i64_i32(t0_32, t0);
797 tcg_gen_extrl_i64_i32(t1_32, t1);
798 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
799 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
800 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
801 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
802 tmp = tcg_temp_new_i32();
803 tcg_gen_xor_i32(tmp, t0_32, t1_32);
804 tcg_temp_free_i32(t0_32);
805 tcg_temp_free_i32(t1_32);
806 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
807 tcg_temp_free_i32(tmp);
808 tcg_gen_extu_i32_i64(dest, cpu_NF);
809 }
810}
811
812
813static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
814{
815 TCGv_i64 flag = tcg_temp_new_i64();
816 tcg_gen_extu_i32_i64(flag, cpu_CF);
817 tcg_gen_add_i64(dest, t0, t1);
818 tcg_gen_add_i64(dest, dest, flag);
819 tcg_temp_free_i64(flag);
820
821 if (!sf) {
822 tcg_gen_ext32u_i64(dest, dest);
823 }
824}
825
826
827static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
828{
829 if (sf) {
830 TCGv_i64 result, cf_64, vf_64, tmp;
831 result = tcg_temp_new_i64();
832 cf_64 = tcg_temp_new_i64();
833 vf_64 = tcg_temp_new_i64();
834 tmp = tcg_const_i64(0);
835
836 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
837 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
838 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
839 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
840 gen_set_NZ64(result);
841
842 tcg_gen_xor_i64(vf_64, result, t0);
843 tcg_gen_xor_i64(tmp, t0, t1);
844 tcg_gen_andc_i64(vf_64, vf_64, tmp);
845 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
846
847 tcg_gen_mov_i64(dest, result);
848
849 tcg_temp_free_i64(tmp);
850 tcg_temp_free_i64(vf_64);
851 tcg_temp_free_i64(cf_64);
852 tcg_temp_free_i64(result);
853 } else {
854 TCGv_i32 t0_32, t1_32, tmp;
855 t0_32 = tcg_temp_new_i32();
856 t1_32 = tcg_temp_new_i32();
857 tmp = tcg_const_i32(0);
858
859 tcg_gen_extrl_i64_i32(t0_32, t0);
860 tcg_gen_extrl_i64_i32(t1_32, t1);
861 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
862 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
863
864 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
865 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
866 tcg_gen_xor_i32(tmp, t0_32, t1_32);
867 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
868 tcg_gen_extu_i32_i64(dest, cpu_NF);
869
870 tcg_temp_free_i32(tmp);
871 tcg_temp_free_i32(t1_32);
872 tcg_temp_free_i32(t0_32);
873 }
874}
875
876
877
878
879
880
881
882
883static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
884 TCGv_i64 tcg_addr, MemOp memop, int memidx,
885 bool iss_valid,
886 unsigned int iss_srt,
887 bool iss_sf, bool iss_ar)
888{
889 memop = finalize_memop(s, memop);
890 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
891
892 if (iss_valid) {
893 uint32_t syn;
894
895 syn = syn_data_abort_with_iss(0,
896 (memop & MO_SIZE),
897 false,
898 iss_srt,
899 iss_sf,
900 iss_ar,
901 0, 0, 0, 0, 0, false);
902 disas_set_insn_syndrome(s, syn);
903 }
904}
905
906static void do_gpr_st(DisasContext *s, TCGv_i64 source,
907 TCGv_i64 tcg_addr, MemOp memop,
908 bool iss_valid,
909 unsigned int iss_srt,
910 bool iss_sf, bool iss_ar)
911{
912 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
913 iss_valid, iss_srt, iss_sf, iss_ar);
914}
915
916
917
918
919static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
920 MemOp memop, bool extend, int memidx,
921 bool iss_valid, unsigned int iss_srt,
922 bool iss_sf, bool iss_ar)
923{
924 memop = finalize_memop(s, memop);
925 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
926
927 if (extend && (memop & MO_SIGN)) {
928 g_assert((memop & MO_SIZE) <= MO_32);
929 tcg_gen_ext32u_i64(dest, dest);
930 }
931
932 if (iss_valid) {
933 uint32_t syn;
934
935 syn = syn_data_abort_with_iss(0,
936 (memop & MO_SIZE),
937 (memop & MO_SIGN) != 0,
938 iss_srt,
939 iss_sf,
940 iss_ar,
941 0, 0, 0, 0, 0, false);
942 disas_set_insn_syndrome(s, syn);
943 }
944}
945
946static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
947 MemOp memop, bool extend,
948 bool iss_valid, unsigned int iss_srt,
949 bool iss_sf, bool iss_ar)
950{
951 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
952 iss_valid, iss_srt, iss_sf, iss_ar);
953}
954
955
956
957
958static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
959{
960
961 TCGv_i64 tmplo = tcg_temp_new_i64();
962 MemOp mop;
963
964 tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
965
966 if (size < 4) {
967 mop = finalize_memop(s, size);
968 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
969 } else {
970 bool be = s->be_data == MO_BE;
971 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
972 TCGv_i64 tmphi = tcg_temp_new_i64();
973
974 tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
975
976 mop = s->be_data | MO_Q;
977 tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
978 mop | (s->align_mem ? MO_ALIGN_16 : 0));
979 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
980 tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
981 get_mem_index(s), mop);
982
983 tcg_temp_free_i64(tcg_hiaddr);
984 tcg_temp_free_i64(tmphi);
985 }
986
987 tcg_temp_free_i64(tmplo);
988}
989
990
991
992
993static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
994{
995
996 TCGv_i64 tmplo = tcg_temp_new_i64();
997 TCGv_i64 tmphi = NULL;
998 MemOp mop;
999
1000 if (size < 4) {
1001 mop = finalize_memop(s, size);
1002 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
1003 } else {
1004 bool be = s->be_data == MO_BE;
1005 TCGv_i64 tcg_hiaddr;
1006
1007 tmphi = tcg_temp_new_i64();
1008 tcg_hiaddr = tcg_temp_new_i64();
1009
1010 mop = s->be_data | MO_Q;
1011 tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
1012 mop | (s->align_mem ? MO_ALIGN_16 : 0));
1013 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1014 tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
1015 get_mem_index(s), mop);
1016 tcg_temp_free_i64(tcg_hiaddr);
1017 }
1018
1019 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1020 tcg_temp_free_i64(tmplo);
1021
1022 if (tmphi) {
1023 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1024 tcg_temp_free_i64(tmphi);
1025 }
1026 clear_vec_high(s, tmphi != NULL, destidx);
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1043 int element, MemOp memop)
1044{
1045 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1046 switch ((unsigned)memop) {
1047 case MO_8:
1048 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1049 break;
1050 case MO_16:
1051 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1052 break;
1053 case MO_32:
1054 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1055 break;
1056 case MO_8|MO_SIGN:
1057 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1058 break;
1059 case MO_16|MO_SIGN:
1060 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1061 break;
1062 case MO_32|MO_SIGN:
1063 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1064 break;
1065 case MO_64:
1066 case MO_64|MO_SIGN:
1067 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1068 break;
1069 default:
1070 g_assert_not_reached();
1071 }
1072}
1073
1074static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1075 int element, MemOp memop)
1076{
1077 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1078 switch (memop) {
1079 case MO_8:
1080 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1081 break;
1082 case MO_16:
1083 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1084 break;
1085 case MO_8|MO_SIGN:
1086 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1087 break;
1088 case MO_16|MO_SIGN:
1089 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1090 break;
1091 case MO_32:
1092 case MO_32|MO_SIGN:
1093 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1094 break;
1095 default:
1096 g_assert_not_reached();
1097 }
1098}
1099
1100
1101static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1102 int element, MemOp memop)
1103{
1104 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1105 switch (memop) {
1106 case MO_8:
1107 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1108 break;
1109 case MO_16:
1110 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1111 break;
1112 case MO_32:
1113 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1114 break;
1115 case MO_64:
1116 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1117 break;
1118 default:
1119 g_assert_not_reached();
1120 }
1121}
1122
1123static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1124 int destidx, int element, MemOp memop)
1125{
1126 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1127 switch (memop) {
1128 case MO_8:
1129 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1130 break;
1131 case MO_16:
1132 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1133 break;
1134 case MO_32:
1135 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1136 break;
1137 default:
1138 g_assert_not_reached();
1139 }
1140}
1141
1142
1143static void do_vec_st(DisasContext *s, int srcidx, int element,
1144 TCGv_i64 tcg_addr, MemOp mop)
1145{
1146 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1147
1148 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1149 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1150
1151 tcg_temp_free_i64(tcg_tmp);
1152}
1153
1154
1155static void do_vec_ld(DisasContext *s, int destidx, int element,
1156 TCGv_i64 tcg_addr, MemOp mop)
1157{
1158 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1159
1160 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1161 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1162
1163 tcg_temp_free_i64(tcg_tmp);
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173static bool fp_access_check(DisasContext *s)
1174{
1175 if (s->fp_excp_el) {
1176 assert(!s->fp_access_checked);
1177 s->fp_access_checked = true;
1178
1179 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1180 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
1181 return false;
1182 }
1183 s->fp_access_checked = true;
1184 return true;
1185}
1186
1187
1188
1189
1190bool sve_access_check(DisasContext *s)
1191{
1192 if (s->sve_excp_el) {
1193 assert(!s->sve_access_checked);
1194 s->sve_access_checked = true;
1195
1196 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
1197 syn_sve_access_trap(), s->sve_excp_el);
1198 return false;
1199 }
1200 s->sve_access_checked = true;
1201 return fp_access_check(s);
1202}
1203
1204
1205
1206
1207
1208
1209static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1210 int option, unsigned int shift)
1211{
1212 int extsize = extract32(option, 0, 2);
1213 bool is_signed = extract32(option, 2, 1);
1214
1215 if (is_signed) {
1216 switch (extsize) {
1217 case 0:
1218 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1219 break;
1220 case 1:
1221 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1222 break;
1223 case 2:
1224 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1225 break;
1226 case 3:
1227 tcg_gen_mov_i64(tcg_out, tcg_in);
1228 break;
1229 }
1230 } else {
1231 switch (extsize) {
1232 case 0:
1233 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1234 break;
1235 case 1:
1236 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1237 break;
1238 case 2:
1239 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1240 break;
1241 case 3:
1242 tcg_gen_mov_i64(tcg_out, tcg_in);
1243 break;
1244 }
1245 }
1246
1247 if (shift) {
1248 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1249 }
1250}
1251
1252static inline void gen_check_sp_alignment(DisasContext *s)
1253{
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1277 uint32_t insn)
1278{
1279 const AArch64DecodeTable *tptr = table;
1280
1281 while (tptr->mask) {
1282 if ((insn & tptr->mask) == tptr->pattern) {
1283 return tptr->disas_fn;
1284 }
1285 tptr++;
1286 }
1287 return NULL;
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1305{
1306 uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
1307
1308 if (insn & (1U << 31)) {
1309
1310 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
1311 }
1312
1313
1314 reset_btype(s);
1315 gen_goto_tb(s, 0, addr);
1316}
1317
1318
1319
1320
1321
1322
1323
1324static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1325{
1326 unsigned int sf, op, rt;
1327 uint64_t addr;
1328 TCGLabel *label_match;
1329 TCGv_i64 tcg_cmp;
1330
1331 sf = extract32(insn, 31, 1);
1332 op = extract32(insn, 24, 1);
1333 rt = extract32(insn, 0, 5);
1334 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1335
1336 tcg_cmp = read_cpu_reg(s, rt, sf);
1337 label_match = gen_new_label();
1338
1339 reset_btype(s);
1340 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1341 tcg_cmp, 0, label_match);
1342
1343 gen_goto_tb(s, 0, s->base.pc_next);
1344 gen_set_label(label_match);
1345 gen_goto_tb(s, 1, addr);
1346}
1347
1348
1349
1350
1351
1352
1353
1354static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1355{
1356 unsigned int bit_pos, op, rt;
1357 uint64_t addr;
1358 TCGLabel *label_match;
1359 TCGv_i64 tcg_cmp;
1360
1361 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1362 op = extract32(insn, 24, 1);
1363 addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
1364 rt = extract32(insn, 0, 5);
1365
1366 tcg_cmp = tcg_temp_new_i64();
1367 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1368 label_match = gen_new_label();
1369
1370 reset_btype(s);
1371 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1372 tcg_cmp, 0, label_match);
1373 tcg_temp_free_i64(tcg_cmp);
1374 gen_goto_tb(s, 0, s->base.pc_next);
1375 gen_set_label(label_match);
1376 gen_goto_tb(s, 1, addr);
1377}
1378
1379
1380
1381
1382
1383
1384
1385static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1386{
1387 unsigned int cond;
1388 uint64_t addr;
1389
1390 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1391 unallocated_encoding(s);
1392 return;
1393 }
1394 addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
1395 cond = extract32(insn, 0, 4);
1396
1397 reset_btype(s);
1398 if (cond < 0x0e) {
1399
1400 TCGLabel *label_match = gen_new_label();
1401 arm_gen_test_cc(cond, label_match);
1402 gen_goto_tb(s, 0, s->base.pc_next);
1403 gen_set_label(label_match);
1404 gen_goto_tb(s, 1, addr);
1405 } else {
1406
1407 gen_goto_tb(s, 0, addr);
1408 }
1409}
1410
1411
1412static void handle_hint(DisasContext *s, uint32_t insn,
1413 unsigned int op1, unsigned int op2, unsigned int crm)
1414{
1415 unsigned int selector = crm << 3 | op2;
1416
1417 if (op1 != 3) {
1418 unallocated_encoding(s);
1419 return;
1420 }
1421
1422 switch (selector) {
1423 case 0b00000:
1424 break;
1425 case 0b00011:
1426 s->base.is_jmp = DISAS_WFI;
1427 break;
1428 case 0b00001:
1429
1430
1431
1432
1433
1434 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1435 s->base.is_jmp = DISAS_YIELD;
1436 }
1437 break;
1438 case 0b00010:
1439 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1440 s->base.is_jmp = DISAS_WFE;
1441 }
1442 break;
1443 case 0b00100:
1444 case 0b00101:
1445
1446 break;
1447 case 0b00111:
1448 if (s->pauth_active) {
1449 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1450 }
1451 break;
1452 case 0b01000:
1453 if (s->pauth_active) {
1454 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1455 }
1456 break;
1457 case 0b01010:
1458 if (s->pauth_active) {
1459 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1460 }
1461 break;
1462 case 0b01100:
1463 if (s->pauth_active) {
1464 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1465 }
1466 break;
1467 case 0b01110:
1468 if (s->pauth_active) {
1469 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1470 }
1471 break;
1472 case 0b11000:
1473 if (s->pauth_active) {
1474 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1475 new_tmp_a64_zero(s));
1476 }
1477 break;
1478 case 0b11001:
1479 if (s->pauth_active) {
1480 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1481 }
1482 break;
1483 case 0b11010:
1484 if (s->pauth_active) {
1485 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1486 new_tmp_a64_zero(s));
1487 }
1488 break;
1489 case 0b11011:
1490 if (s->pauth_active) {
1491 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1492 }
1493 break;
1494 case 0b11100:
1495 if (s->pauth_active) {
1496 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1497 new_tmp_a64_zero(s));
1498 }
1499 break;
1500 case 0b11101:
1501 if (s->pauth_active) {
1502 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1503 }
1504 break;
1505 case 0b11110:
1506 if (s->pauth_active) {
1507 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1508 new_tmp_a64_zero(s));
1509 }
1510 break;
1511 case 0b11111:
1512 if (s->pauth_active) {
1513 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1514 }
1515 break;
1516 default:
1517
1518 break;
1519 }
1520}
1521
1522static void gen_clrex(DisasContext *s, uint32_t insn)
1523{
1524 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1525}
1526
1527
1528static void handle_sync(DisasContext *s, uint32_t insn,
1529 unsigned int op1, unsigned int op2, unsigned int crm)
1530{
1531 TCGBar bar;
1532
1533 if (op1 != 3) {
1534 unallocated_encoding(s);
1535 return;
1536 }
1537
1538 switch (op2) {
1539 case 2:
1540 gen_clrex(s, insn);
1541 return;
1542 case 4:
1543 case 5:
1544 switch (crm & 3) {
1545 case 1:
1546 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1547 break;
1548 case 2:
1549 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1550 break;
1551 default:
1552 bar = TCG_BAR_SC | TCG_MO_ALL;
1553 break;
1554 }
1555 tcg_gen_mb(bar);
1556 return;
1557 case 6:
1558
1559
1560
1561
1562 reset_btype(s);
1563 gen_goto_tb(s, 0, s->base.pc_next);
1564 return;
1565
1566 case 7:
1567 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1568 goto do_unallocated;
1569 }
1570
1571
1572
1573
1574 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1575 gen_goto_tb(s, 0, s->base.pc_next);
1576 return;
1577
1578 default:
1579 do_unallocated:
1580 unallocated_encoding(s);
1581 return;
1582 }
1583}
1584
1585static void gen_xaflag(void)
1586{
1587 TCGv_i32 z = tcg_temp_new_i32();
1588
1589 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1590
1591
1592
1593
1594
1595
1596
1597
1598 tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1599 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1600
1601
1602 tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1603 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1604
1605
1606 tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1607 tcg_gen_neg_i32(cpu_VF, cpu_VF);
1608
1609
1610 tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1611
1612 tcg_temp_free_i32(z);
1613}
1614
1615static void gen_axflag(void)
1616{
1617 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);
1618 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);
1619
1620
1621 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1622
1623 tcg_gen_movi_i32(cpu_NF, 0);
1624 tcg_gen_movi_i32(cpu_VF, 0);
1625}
1626
1627
1628static void handle_msr_i(DisasContext *s, uint32_t insn,
1629 unsigned int op1, unsigned int op2, unsigned int crm)
1630{
1631 TCGv_i32 t1;
1632 int op = op1 << 3 | op2;
1633
1634
1635 s->base.is_jmp = DISAS_TOO_MANY;
1636
1637 switch (op) {
1638 case 0x00:
1639 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1640 goto do_unallocated;
1641 }
1642 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1643 s->base.is_jmp = DISAS_NEXT;
1644 break;
1645
1646 case 0x01:
1647 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1648 goto do_unallocated;
1649 }
1650 gen_xaflag();
1651 s->base.is_jmp = DISAS_NEXT;
1652 break;
1653
1654 case 0x02:
1655 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1656 goto do_unallocated;
1657 }
1658 gen_axflag();
1659 s->base.is_jmp = DISAS_NEXT;
1660 break;
1661
1662 case 0x03:
1663 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1664 goto do_unallocated;
1665 }
1666 if (crm & 1) {
1667 set_pstate_bits(PSTATE_UAO);
1668 } else {
1669 clear_pstate_bits(PSTATE_UAO);
1670 }
1671 t1 = tcg_const_i32(s->current_el);
1672 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1673 tcg_temp_free_i32(t1);
1674 break;
1675
1676 case 0x04:
1677 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1678 goto do_unallocated;
1679 }
1680 if (crm & 1) {
1681 set_pstate_bits(PSTATE_PAN);
1682 } else {
1683 clear_pstate_bits(PSTATE_PAN);
1684 }
1685 t1 = tcg_const_i32(s->current_el);
1686 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1687 tcg_temp_free_i32(t1);
1688 break;
1689
1690 case 0x05:
1691 if (s->current_el == 0) {
1692 goto do_unallocated;
1693 }
1694 t1 = tcg_const_i32(crm & PSTATE_SP);
1695 gen_helper_msr_i_spsel(cpu_env, t1);
1696 tcg_temp_free_i32(t1);
1697 break;
1698
1699 case 0x19:
1700 if (!dc_isar_feature(aa64_ssbs, s)) {
1701 goto do_unallocated;
1702 }
1703 if (crm & 1) {
1704 set_pstate_bits(PSTATE_SSBS);
1705 } else {
1706 clear_pstate_bits(PSTATE_SSBS);
1707 }
1708
1709 break;
1710
1711 case 0x1a:
1712 if (!dc_isar_feature(aa64_dit, s)) {
1713 goto do_unallocated;
1714 }
1715 if (crm & 1) {
1716 set_pstate_bits(PSTATE_DIT);
1717 } else {
1718 clear_pstate_bits(PSTATE_DIT);
1719 }
1720
1721 break;
1722
1723 case 0x1e:
1724 t1 = tcg_const_i32(crm);
1725 gen_helper_msr_i_daifset(cpu_env, t1);
1726 tcg_temp_free_i32(t1);
1727 break;
1728
1729 case 0x1f:
1730 t1 = tcg_const_i32(crm);
1731 gen_helper_msr_i_daifclear(cpu_env, t1);
1732 tcg_temp_free_i32(t1);
1733
1734 s->base.is_jmp = DISAS_UPDATE_EXIT;
1735 break;
1736
1737 case 0x1c:
1738 if (dc_isar_feature(aa64_mte, s)) {
1739
1740 if (crm & 1) {
1741 set_pstate_bits(PSTATE_TCO);
1742 } else {
1743 clear_pstate_bits(PSTATE_TCO);
1744 }
1745 t1 = tcg_const_i32(s->current_el);
1746 gen_helper_rebuild_hflags_a64(cpu_env, t1);
1747 tcg_temp_free_i32(t1);
1748
1749 s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1750 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1751
1752 s->base.is_jmp = DISAS_NEXT;
1753 } else {
1754 goto do_unallocated;
1755 }
1756 break;
1757
1758 default:
1759 do_unallocated:
1760 unallocated_encoding(s);
1761 return;
1762 }
1763}
1764
1765static void gen_get_nzcv(TCGv_i64 tcg_rt)
1766{
1767 TCGv_i32 tmp = tcg_temp_new_i32();
1768 TCGv_i32 nzcv = tcg_temp_new_i32();
1769
1770
1771 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1772
1773 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1774 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1775
1776 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1777
1778 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1779 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1780
1781 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1782
1783 tcg_temp_free_i32(nzcv);
1784 tcg_temp_free_i32(tmp);
1785}
1786
1787static void gen_set_nzcv(TCGv_i64 tcg_rt)
1788{
1789 TCGv_i32 nzcv = tcg_temp_new_i32();
1790
1791
1792 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1793
1794
1795 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1796
1797 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1798 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1799
1800 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1801 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1802
1803 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1804 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1805 tcg_temp_free_i32(nzcv);
1806}
1807
1808
1809
1810
1811
1812
1813
1814
1815static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1816 unsigned int op0, unsigned int op1, unsigned int op2,
1817 unsigned int crn, unsigned int crm, unsigned int rt)
1818{
1819 const ARMCPRegInfo *ri;
1820 TCGv_i64 tcg_rt;
1821
1822 ri = get_arm_cp_reginfo(s->cp_regs,
1823 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1824 crn, crm, op0, op1, op2));
1825
1826 if (!ri) {
1827
1828
1829
1830 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1831 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1832 isread ? "read" : "write", op0, op1, crn, crm, op2);
1833 unallocated_encoding(s);
1834 return;
1835 }
1836
1837
1838 if (!cp_access_ok(s->current_el, ri, isread)) {
1839 unallocated_encoding(s);
1840 return;
1841 }
1842
1843 if (ri->accessfn) {
1844
1845
1846
1847 TCGv_ptr tmpptr;
1848 TCGv_i32 tcg_syn, tcg_isread;
1849 uint32_t syndrome;
1850
1851 gen_a64_set_pc_im(s->pc_curr);
1852 tmpptr = tcg_const_ptr(ri);
1853 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1854 tcg_syn = tcg_const_i32(syndrome);
1855 tcg_isread = tcg_const_i32(isread);
1856 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1857 tcg_temp_free_ptr(tmpptr);
1858 tcg_temp_free_i32(tcg_syn);
1859 tcg_temp_free_i32(tcg_isread);
1860 } else if (ri->type & ARM_CP_RAISES_EXC) {
1861
1862
1863
1864
1865 gen_a64_set_pc_im(s->pc_curr);
1866 }
1867
1868
1869 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1870 case ARM_CP_NOP:
1871 return;
1872 case ARM_CP_NZCV:
1873 tcg_rt = cpu_reg(s, rt);
1874 if (isread) {
1875 gen_get_nzcv(tcg_rt);
1876 } else {
1877 gen_set_nzcv(tcg_rt);
1878 }
1879 return;
1880 case ARM_CP_CURRENTEL:
1881
1882
1883
1884 tcg_rt = cpu_reg(s, rt);
1885 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1886 return;
1887 case ARM_CP_DC_ZVA:
1888
1889 if (s->mte_active[0]) {
1890 TCGv_i32 t_desc;
1891 int desc = 0;
1892
1893 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
1894 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
1895 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
1896 t_desc = tcg_const_i32(desc);
1897
1898 tcg_rt = new_tmp_a64(s);
1899 gen_helper_mte_check_zva(tcg_rt, cpu_env, t_desc, cpu_reg(s, rt));
1900 tcg_temp_free_i32(t_desc);
1901 } else {
1902 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
1903 }
1904 gen_helper_dc_zva(cpu_env, tcg_rt);
1905 return;
1906 case ARM_CP_DC_GVA:
1907 {
1908 TCGv_i64 clean_addr, tag;
1909
1910
1911
1912
1913
1914 tcg_rt = cpu_reg(s, rt);
1915 clean_addr = clean_data_tbi(s, tcg_rt);
1916 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
1917
1918 if (s->ata) {
1919
1920 tag = tcg_temp_new_i64();
1921 tcg_gen_shri_i64(tag, tcg_rt, 56);
1922 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
1923 tcg_temp_free_i64(tag);
1924 }
1925 }
1926 return;
1927 case ARM_CP_DC_GZVA:
1928 {
1929 TCGv_i64 clean_addr, tag;
1930
1931
1932 tcg_rt = cpu_reg(s, rt);
1933 clean_addr = clean_data_tbi(s, tcg_rt);
1934 gen_helper_dc_zva(cpu_env, clean_addr);
1935
1936 if (s->ata) {
1937
1938 tag = tcg_temp_new_i64();
1939 tcg_gen_shri_i64(tag, tcg_rt, 56);
1940 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
1941 tcg_temp_free_i64(tag);
1942 }
1943 }
1944 return;
1945 default:
1946 break;
1947 }
1948 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1949 return;
1950 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1951 return;
1952 }
1953
1954 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1955 gen_io_start();
1956 }
1957
1958 tcg_rt = cpu_reg(s, rt);
1959
1960 if (isread) {
1961 if (ri->type & ARM_CP_CONST) {
1962 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1963 } else if (ri->readfn) {
1964 TCGv_ptr tmpptr;
1965 tmpptr = tcg_const_ptr(ri);
1966 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1967 tcg_temp_free_ptr(tmpptr);
1968 } else {
1969 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1970 }
1971 } else {
1972 if (ri->type & ARM_CP_CONST) {
1973
1974 return;
1975 } else if (ri->writefn) {
1976 TCGv_ptr tmpptr;
1977 tmpptr = tcg_const_ptr(ri);
1978 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1979 tcg_temp_free_ptr(tmpptr);
1980 } else {
1981 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1982 }
1983 }
1984
1985 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1986
1987 s->base.is_jmp = DISAS_UPDATE_EXIT;
1988 }
1989 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1990
1991
1992
1993
1994 TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
1995 gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
1996 tcg_temp_free_i32(tcg_el);
1997
1998
1999
2000
2001
2002 s->base.is_jmp = DISAS_UPDATE_EXIT;
2003 }
2004}
2005
2006
2007
2008
2009
2010
2011
2012static void disas_system(DisasContext *s, uint32_t insn)
2013{
2014 unsigned int l, op0, op1, crn, crm, op2, rt;
2015 l = extract32(insn, 21, 1);
2016 op0 = extract32(insn, 19, 2);
2017 op1 = extract32(insn, 16, 3);
2018 crn = extract32(insn, 12, 4);
2019 crm = extract32(insn, 8, 4);
2020 op2 = extract32(insn, 5, 3);
2021 rt = extract32(insn, 0, 5);
2022
2023 if (op0 == 0) {
2024 if (l || rt != 31) {
2025 unallocated_encoding(s);
2026 return;
2027 }
2028 switch (crn) {
2029 case 2:
2030 handle_hint(s, insn, op1, op2, crm);
2031 break;
2032 case 3:
2033 handle_sync(s, insn, op1, op2, crm);
2034 break;
2035 case 4:
2036 handle_msr_i(s, insn, op1, op2, crm);
2037 break;
2038 default:
2039 unallocated_encoding(s);
2040 break;
2041 }
2042 return;
2043 }
2044 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2045}
2046
2047
2048
2049
2050
2051
2052
2053
2054static void disas_exc(DisasContext *s, uint32_t insn)
2055{
2056 int opc = extract32(insn, 21, 3);
2057 int op2_ll = extract32(insn, 0, 5);
2058 int imm16 = extract32(insn, 5, 16);
2059 TCGv_i32 tmp;
2060
2061 switch (opc) {
2062 case 0:
2063
2064
2065
2066
2067
2068 switch (op2_ll) {
2069 case 1:
2070 gen_ss_advance(s);
2071 gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
2072 syn_aa64_svc(imm16), default_exception_el(s));
2073 break;
2074 case 2:
2075 if (s->current_el == 0) {
2076 unallocated_encoding(s);
2077 break;
2078 }
2079
2080
2081
2082 gen_a64_set_pc_im(s->pc_curr);
2083 gen_helper_pre_hvc(cpu_env);
2084 gen_ss_advance(s);
2085 gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
2086 syn_aa64_hvc(imm16), 2);
2087 break;
2088 case 3:
2089 if (s->current_el == 0) {
2090 unallocated_encoding(s);
2091 break;
2092 }
2093 gen_a64_set_pc_im(s->pc_curr);
2094 tmp = tcg_const_i32(syn_aa64_smc(imm16));
2095 gen_helper_pre_smc(cpu_env, tmp);
2096 tcg_temp_free_i32(tmp);
2097 gen_ss_advance(s);
2098 gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
2099 syn_aa64_smc(imm16), 3);
2100 break;
2101 default:
2102 unallocated_encoding(s);
2103 break;
2104 }
2105 break;
2106 case 1:
2107 if (op2_ll != 0) {
2108 unallocated_encoding(s);
2109 break;
2110 }
2111
2112 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2113 break;
2114 case 2:
2115 if (op2_ll != 0) {
2116 unallocated_encoding(s);
2117 break;
2118 }
2119
2120
2121
2122
2123
2124
2125 if (semihosting_enabled() && imm16 == 0xf000) {
2126#ifndef CONFIG_USER_ONLY
2127
2128
2129
2130
2131 if (s->current_el == 0) {
2132 unsupported_encoding(s, insn);
2133 break;
2134 }
2135#endif
2136 gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
2137 } else {
2138 unsupported_encoding(s, insn);
2139 }
2140 break;
2141 case 5:
2142 if (op2_ll < 1 || op2_ll > 3) {
2143 unallocated_encoding(s);
2144 break;
2145 }
2146
2147 unsupported_encoding(s, insn);
2148 break;
2149 default:
2150 unallocated_encoding(s);
2151 break;
2152 }
2153}
2154
2155
2156
2157
2158
2159
2160
2161static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
2162{
2163 unsigned int opc, op2, op3, rn, op4;
2164 unsigned btype_mod = 2;
2165 TCGv_i64 dst;
2166 TCGv_i64 modifier;
2167
2168 opc = extract32(insn, 21, 4);
2169 op2 = extract32(insn, 16, 5);
2170 op3 = extract32(insn, 10, 6);
2171 rn = extract32(insn, 5, 5);
2172 op4 = extract32(insn, 0, 5);
2173
2174 if (op2 != 0x1f) {
2175 goto do_unallocated;
2176 }
2177
2178 switch (opc) {
2179 case 0:
2180 case 1:
2181 case 2:
2182 btype_mod = opc;
2183 switch (op3) {
2184 case 0:
2185
2186 if (op4 != 0) {
2187 goto do_unallocated;
2188 }
2189 dst = cpu_reg(s, rn);
2190 break;
2191
2192 case 2:
2193 case 3:
2194 if (!dc_isar_feature(aa64_pauth, s)) {
2195 goto do_unallocated;
2196 }
2197 if (opc == 2) {
2198
2199 if (rn != 0x1f || op4 != 0x1f) {
2200 goto do_unallocated;
2201 }
2202 rn = 30;
2203 modifier = cpu_X[31];
2204 } else {
2205
2206 if (op4 != 0x1f) {
2207 goto do_unallocated;
2208 }
2209 modifier = new_tmp_a64_zero(s);
2210 }
2211 if (s->pauth_active) {
2212 dst = new_tmp_a64(s);
2213 if (op3 == 2) {
2214 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2215 } else {
2216 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2217 }
2218 } else {
2219 dst = cpu_reg(s, rn);
2220 }
2221 break;
2222
2223 default:
2224 goto do_unallocated;
2225 }
2226 gen_a64_set_pc(s, dst);
2227
2228 if (opc == 1) {
2229 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2230 }
2231 break;
2232
2233 case 8:
2234 case 9:
2235 if (!dc_isar_feature(aa64_pauth, s)) {
2236 goto do_unallocated;
2237 }
2238 if ((op3 & ~1) != 2) {
2239 goto do_unallocated;
2240 }
2241 btype_mod = opc & 1;
2242 if (s->pauth_active) {
2243 dst = new_tmp_a64(s);
2244 modifier = cpu_reg_sp(s, op4);
2245 if (op3 == 2) {
2246 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
2247 } else {
2248 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
2249 }
2250 } else {
2251 dst = cpu_reg(s, rn);
2252 }
2253 gen_a64_set_pc(s, dst);
2254
2255 if (opc == 9) {
2256 tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
2257 }
2258 break;
2259
2260 case 4:
2261 if (s->current_el == 0) {
2262 goto do_unallocated;
2263 }
2264 switch (op3) {
2265 case 0:
2266 if (op4 != 0) {
2267 goto do_unallocated;
2268 }
2269 dst = tcg_temp_new_i64();
2270 tcg_gen_ld_i64(dst, cpu_env,
2271 offsetof(CPUARMState, elr_el[s->current_el]));
2272 break;
2273
2274 case 2:
2275 case 3:
2276 if (!dc_isar_feature(aa64_pauth, s)) {
2277 goto do_unallocated;
2278 }
2279 if (rn != 0x1f || op4 != 0x1f) {
2280 goto do_unallocated;
2281 }
2282 dst = tcg_temp_new_i64();
2283 tcg_gen_ld_i64(dst, cpu_env,
2284 offsetof(CPUARMState, elr_el[s->current_el]));
2285 if (s->pauth_active) {
2286 modifier = cpu_X[31];
2287 if (op3 == 2) {
2288 gen_helper_autia(dst, cpu_env, dst, modifier);
2289 } else {
2290 gen_helper_autib(dst, cpu_env, dst, modifier);
2291 }
2292 }
2293 break;
2294
2295 default:
2296 goto do_unallocated;
2297 }
2298 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
2299 gen_io_start();
2300 }
2301
2302 gen_helper_exception_return(cpu_env, dst);
2303 tcg_temp_free_i64(dst);
2304
2305 s->base.is_jmp = DISAS_EXIT;
2306 return;
2307
2308 case 5:
2309 if (op3 != 0 || op4 != 0 || rn != 0x1f) {
2310 goto do_unallocated;
2311 } else {
2312 unsupported_encoding(s, insn);
2313 }
2314 return;
2315
2316 default:
2317 do_unallocated:
2318 unallocated_encoding(s);
2319 return;
2320 }
2321
2322 switch (btype_mod) {
2323 case 0:
2324 if (dc_isar_feature(aa64_bti, s)) {
2325
2326 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
2327 }
2328 break;
2329
2330 case 1:
2331 if (dc_isar_feature(aa64_bti, s)) {
2332
2333 set_btype(s, 2);
2334 }
2335 break;
2336
2337 default:
2338
2339 break;
2340 }
2341
2342 s->base.is_jmp = DISAS_JUMP;
2343}
2344
2345
2346static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2347{
2348 switch (extract32(insn, 25, 7)) {
2349 case 0x0a: case 0x0b:
2350 case 0x4a: case 0x4b:
2351 disas_uncond_b_imm(s, insn);
2352 break;
2353 case 0x1a: case 0x5a:
2354 disas_comp_b_imm(s, insn);
2355 break;
2356 case 0x1b: case 0x5b:
2357 disas_test_b_imm(s, insn);
2358 break;
2359 case 0x2a:
2360 disas_cond_b_imm(s, insn);
2361 break;
2362 case 0x6a:
2363 if (insn & (1 << 24)) {
2364 if (extract32(insn, 22, 2) == 0) {
2365 disas_system(s, insn);
2366 } else {
2367 unallocated_encoding(s);
2368 }
2369 } else {
2370 disas_exc(s, insn);
2371 }
2372 break;
2373 case 0x6b:
2374 disas_uncond_b_reg(s, insn);
2375 break;
2376 default:
2377 unallocated_encoding(s);
2378 break;
2379 }
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2394 TCGv_i64 addr, int size, bool is_pair)
2395{
2396 int idx = get_mem_index(s);
2397 MemOp memop = s->be_data;
2398
2399 g_assert(size <= 3);
2400 if (is_pair) {
2401 g_assert(size >= 2);
2402 if (size == 2) {
2403
2404 memop |= MO_64 | MO_ALIGN;
2405 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2406 if (s->be_data == MO_LE) {
2407 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2408 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2409 } else {
2410 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2411 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2412 }
2413 } else {
2414
2415
2416 memop |= MO_64;
2417 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2418 memop | MO_ALIGN_16);
2419
2420 TCGv_i64 addr2 = tcg_temp_new_i64();
2421 tcg_gen_addi_i64(addr2, addr, 8);
2422 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2423 tcg_temp_free_i64(addr2);
2424
2425 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2426 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2427 }
2428 } else {
2429 memop |= size | MO_ALIGN;
2430 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2431 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2432 }
2433 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2434}
2435
2436static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2437 TCGv_i64 addr, int size, int is_pair)
2438{
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451 TCGLabel *fail_label = gen_new_label();
2452 TCGLabel *done_label = gen_new_label();
2453 TCGv_i64 tmp;
2454
2455 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2456
2457 tmp = tcg_temp_new_i64();
2458 if (is_pair) {
2459 if (size == 2) {
2460 if (s->be_data == MO_LE) {
2461 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2462 } else {
2463 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2464 }
2465 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2466 cpu_exclusive_val, tmp,
2467 get_mem_index(s),
2468 MO_64 | MO_ALIGN | s->be_data);
2469 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2470 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2471 if (!HAVE_CMPXCHG128) {
2472 gen_helper_exit_atomic(cpu_env);
2473 s->base.is_jmp = DISAS_NORETURN;
2474 } else if (s->be_data == MO_LE) {
2475 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2476 cpu_exclusive_addr,
2477 cpu_reg(s, rt),
2478 cpu_reg(s, rt2));
2479 } else {
2480 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2481 cpu_exclusive_addr,
2482 cpu_reg(s, rt),
2483 cpu_reg(s, rt2));
2484 }
2485 } else if (s->be_data == MO_LE) {
2486 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2487 cpu_reg(s, rt), cpu_reg(s, rt2));
2488 } else {
2489 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2490 cpu_reg(s, rt), cpu_reg(s, rt2));
2491 }
2492 } else {
2493 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2494 cpu_reg(s, rt), get_mem_index(s),
2495 size | MO_ALIGN | s->be_data);
2496 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2497 }
2498 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2499 tcg_temp_free_i64(tmp);
2500 tcg_gen_br(done_label);
2501
2502 gen_set_label(fail_label);
2503 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2504 gen_set_label(done_label);
2505 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2506}
2507
2508static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2509 int rn, int size)
2510{
2511 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2512 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2513 int memidx = get_mem_index(s);
2514 TCGv_i64 clean_addr;
2515
2516 if (rn == 31) {
2517 gen_check_sp_alignment(s);
2518 }
2519 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2520 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2521 size | MO_ALIGN | s->be_data);
2522}
2523
2524static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2525 int rn, int size)
2526{
2527 TCGv_i64 s1 = cpu_reg(s, rs);
2528 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2529 TCGv_i64 t1 = cpu_reg(s, rt);
2530 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2531 TCGv_i64 clean_addr;
2532 int memidx = get_mem_index(s);
2533
2534 if (rn == 31) {
2535 gen_check_sp_alignment(s);
2536 }
2537
2538
2539 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2540
2541 if (size == 2) {
2542 TCGv_i64 cmp = tcg_temp_new_i64();
2543 TCGv_i64 val = tcg_temp_new_i64();
2544
2545 if (s->be_data == MO_LE) {
2546 tcg_gen_concat32_i64(val, t1, t2);
2547 tcg_gen_concat32_i64(cmp, s1, s2);
2548 } else {
2549 tcg_gen_concat32_i64(val, t2, t1);
2550 tcg_gen_concat32_i64(cmp, s2, s1);
2551 }
2552
2553 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2554 MO_64 | MO_ALIGN | s->be_data);
2555 tcg_temp_free_i64(val);
2556
2557 if (s->be_data == MO_LE) {
2558 tcg_gen_extr32_i64(s1, s2, cmp);
2559 } else {
2560 tcg_gen_extr32_i64(s2, s1, cmp);
2561 }
2562 tcg_temp_free_i64(cmp);
2563 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2564 if (HAVE_CMPXCHG128) {
2565 TCGv_i32 tcg_rs = tcg_const_i32(rs);
2566 if (s->be_data == MO_LE) {
2567 gen_helper_casp_le_parallel(cpu_env, tcg_rs,
2568 clean_addr, t1, t2);
2569 } else {
2570 gen_helper_casp_be_parallel(cpu_env, tcg_rs,
2571 clean_addr, t1, t2);
2572 }
2573 tcg_temp_free_i32(tcg_rs);
2574 } else {
2575 gen_helper_exit_atomic(cpu_env);
2576 s->base.is_jmp = DISAS_NORETURN;
2577 }
2578 } else {
2579 TCGv_i64 d1 = tcg_temp_new_i64();
2580 TCGv_i64 d2 = tcg_temp_new_i64();
2581 TCGv_i64 a2 = tcg_temp_new_i64();
2582 TCGv_i64 c1 = tcg_temp_new_i64();
2583 TCGv_i64 c2 = tcg_temp_new_i64();
2584 TCGv_i64 zero = tcg_const_i64(0);
2585
2586
2587 tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
2588 MO_64 | MO_ALIGN_16 | s->be_data);
2589 tcg_gen_addi_i64(a2, clean_addr, 8);
2590 tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
2591
2592
2593 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2594 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2595 tcg_gen_and_i64(c2, c2, c1);
2596
2597
2598 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2599 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2600 tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
2601 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2602 tcg_temp_free_i64(a2);
2603 tcg_temp_free_i64(c1);
2604 tcg_temp_free_i64(c2);
2605 tcg_temp_free_i64(zero);
2606
2607
2608 tcg_gen_mov_i64(s1, d1);
2609 tcg_gen_mov_i64(s2, d2);
2610 tcg_temp_free_i64(d1);
2611 tcg_temp_free_i64(d2);
2612 }
2613}
2614
2615
2616
2617
2618static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2619{
2620 int opc0 = extract32(opc, 0, 1);
2621 int regsize;
2622
2623 if (is_signed) {
2624 regsize = opc0 ? 32 : 64;
2625 } else {
2626 regsize = size == 3 ? 64 : 32;
2627 }
2628 return regsize == 64;
2629}
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2645{
2646 int rt = extract32(insn, 0, 5);
2647 int rn = extract32(insn, 5, 5);
2648 int rt2 = extract32(insn, 10, 5);
2649 int rs = extract32(insn, 16, 5);
2650 int is_lasr = extract32(insn, 15, 1);
2651 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2652 int size = extract32(insn, 30, 2);
2653 TCGv_i64 clean_addr;
2654
2655 switch (o2_L_o1_o0) {
2656 case 0x0:
2657 case 0x1:
2658 if (rn == 31) {
2659 gen_check_sp_alignment(s);
2660 }
2661 if (is_lasr) {
2662 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2663 }
2664 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2665 true, rn != 31, size);
2666 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2667 return;
2668
2669 case 0x4:
2670 case 0x5:
2671 if (rn == 31) {
2672 gen_check_sp_alignment(s);
2673 }
2674 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2675 false, rn != 31, size);
2676 s->is_ldex = true;
2677 gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2678 if (is_lasr) {
2679 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2680 }
2681 return;
2682
2683 case 0x8:
2684 if (!dc_isar_feature(aa64_lor, s)) {
2685 break;
2686 }
2687
2688
2689 case 0x9:
2690
2691 if (rn == 31) {
2692 gen_check_sp_alignment(s);
2693 }
2694 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2695 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2696 true, rn != 31, size);
2697
2698 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
2699 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2700 return;
2701
2702 case 0xc:
2703 if (!dc_isar_feature(aa64_lor, s)) {
2704 break;
2705 }
2706
2707
2708 case 0xd:
2709
2710 if (rn == 31) {
2711 gen_check_sp_alignment(s);
2712 }
2713 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2714 false, rn != 31, size);
2715
2716 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
2717 rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2718 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2719 return;
2720
2721 case 0x2: case 0x3:
2722 if (size & 2) {
2723 if (rn == 31) {
2724 gen_check_sp_alignment(s);
2725 }
2726 if (is_lasr) {
2727 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2728 }
2729 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2730 true, rn != 31, size);
2731 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2732 return;
2733 }
2734 if (rt2 == 31
2735 && ((rt | rs) & 1) == 0
2736 && dc_isar_feature(aa64_atomics, s)) {
2737
2738 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2739 return;
2740 }
2741 break;
2742
2743 case 0x6: case 0x7:
2744 if (size & 2) {
2745 if (rn == 31) {
2746 gen_check_sp_alignment(s);
2747 }
2748 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2749 false, rn != 31, size);
2750 s->is_ldex = true;
2751 gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2752 if (is_lasr) {
2753 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2754 }
2755 return;
2756 }
2757 if (rt2 == 31
2758 && ((rt | rs) & 1) == 0
2759 && dc_isar_feature(aa64_atomics, s)) {
2760
2761 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2762 return;
2763 }
2764 break;
2765
2766 case 0xa:
2767 case 0xb:
2768 case 0xe:
2769 case 0xf:
2770 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2771 gen_compare_and_swap(s, rs, rt, rn, size);
2772 return;
2773 }
2774 break;
2775 }
2776 unallocated_encoding(s);
2777}
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792static void disas_ld_lit(DisasContext *s, uint32_t insn)
2793{
2794 int rt = extract32(insn, 0, 5);
2795 int64_t imm = sextract32(insn, 5, 19) << 2;
2796 bool is_vector = extract32(insn, 26, 1);
2797 int opc = extract32(insn, 30, 2);
2798 bool is_signed = false;
2799 int size = 2;
2800 TCGv_i64 tcg_rt, clean_addr;
2801
2802 if (is_vector) {
2803 if (opc == 3) {
2804 unallocated_encoding(s);
2805 return;
2806 }
2807 size = 2 + opc;
2808 if (!fp_access_check(s)) {
2809 return;
2810 }
2811 } else {
2812 if (opc == 3) {
2813
2814 return;
2815 }
2816 size = 2 + extract32(opc, 0, 1);
2817 is_signed = extract32(opc, 1, 1);
2818 }
2819
2820 tcg_rt = cpu_reg(s, rt);
2821
2822 clean_addr = tcg_const_i64(s->pc_curr + imm);
2823 if (is_vector) {
2824 do_fp_ld(s, rt, clean_addr, size);
2825 } else {
2826
2827 bool iss_sf = opc != 0;
2828
2829 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
2830 false, true, rt, iss_sf, false);
2831 }
2832 tcg_temp_free_i64(clean_addr);
2833}
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2864{
2865 int rt = extract32(insn, 0, 5);
2866 int rn = extract32(insn, 5, 5);
2867 int rt2 = extract32(insn, 10, 5);
2868 uint64_t offset = sextract64(insn, 15, 7);
2869 int index = extract32(insn, 23, 2);
2870 bool is_vector = extract32(insn, 26, 1);
2871 bool is_load = extract32(insn, 22, 1);
2872 int opc = extract32(insn, 30, 2);
2873
2874 bool is_signed = false;
2875 bool postindex = false;
2876 bool wback = false;
2877 bool set_tag = false;
2878
2879 TCGv_i64 clean_addr, dirty_addr;
2880
2881 int size;
2882
2883 if (opc == 3) {
2884 unallocated_encoding(s);
2885 return;
2886 }
2887
2888 if (is_vector) {
2889 size = 2 + opc;
2890 } else if (opc == 1 && !is_load) {
2891
2892 if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2893 unallocated_encoding(s);
2894 return;
2895 }
2896 size = 3;
2897 set_tag = true;
2898 } else {
2899 size = 2 + extract32(opc, 1, 1);
2900 is_signed = extract32(opc, 0, 1);
2901 if (!is_load && is_signed) {
2902 unallocated_encoding(s);
2903 return;
2904 }
2905 }
2906
2907 switch (index) {
2908 case 1:
2909 postindex = true;
2910 wback = true;
2911 break;
2912 case 0:
2913
2914
2915
2916
2917
2918 if (is_signed) {
2919
2920 unallocated_encoding(s);
2921 return;
2922 }
2923 postindex = false;
2924 break;
2925 case 2:
2926 postindex = false;
2927 break;
2928 case 3:
2929 postindex = false;
2930 wback = true;
2931 break;
2932 }
2933
2934 if (is_vector && !fp_access_check(s)) {
2935 return;
2936 }
2937
2938 offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
2939
2940 if (rn == 31) {
2941 gen_check_sp_alignment(s);
2942 }
2943
2944 dirty_addr = read_cpu_reg_sp(s, rn, 1);
2945 if (!postindex) {
2946 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2947 }
2948
2949 if (set_tag) {
2950 if (!s->ata) {
2951
2952
2953
2954
2955 gen_helper_stg_stub(cpu_env, dirty_addr);
2956 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2957 gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
2958 } else {
2959 gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
2960 }
2961 }
2962
2963 clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
2964 (wback || rn != 31) && !set_tag, 2 << size);
2965
2966 if (is_vector) {
2967 if (is_load) {
2968 do_fp_ld(s, rt, clean_addr, size);
2969 } else {
2970 do_fp_st(s, rt, clean_addr, size);
2971 }
2972 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2973 if (is_load) {
2974 do_fp_ld(s, rt2, clean_addr, size);
2975 } else {
2976 do_fp_st(s, rt2, clean_addr, size);
2977 }
2978 } else {
2979 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2980 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2981
2982 if (is_load) {
2983 TCGv_i64 tmp = tcg_temp_new_i64();
2984
2985
2986
2987
2988 do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
2989 false, false, 0, false, false);
2990 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2991 do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
2992 false, false, 0, false, false);
2993
2994 tcg_gen_mov_i64(tcg_rt, tmp);
2995 tcg_temp_free_i64(tmp);
2996 } else {
2997 do_gpr_st(s, tcg_rt, clean_addr, size,
2998 false, 0, false, false);
2999 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
3000 do_gpr_st(s, tcg_rt2, clean_addr, size,
3001 false, 0, false, false);
3002 }
3003 }
3004
3005 if (wback) {
3006 if (postindex) {
3007 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3008 }
3009 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3010 }
3011}
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
3030 int opc,
3031 int size,
3032 int rt,
3033 bool is_vector)
3034{
3035 int rn = extract32(insn, 5, 5);
3036 int imm9 = sextract32(insn, 12, 9);
3037 int idx = extract32(insn, 10, 2);
3038 bool is_signed = false;
3039 bool is_store = false;
3040 bool is_extended = false;
3041 bool is_unpriv = (idx == 2);
3042 bool iss_valid = !is_vector;
3043 bool post_index;
3044 bool writeback;
3045 int memidx;
3046
3047 TCGv_i64 clean_addr, dirty_addr;
3048
3049 if (is_vector) {
3050 size |= (opc & 2) << 1;
3051 if (size > 4 || is_unpriv) {
3052 unallocated_encoding(s);
3053 return;
3054 }
3055 is_store = ((opc & 1) == 0);
3056 if (!fp_access_check(s)) {
3057 return;
3058 }
3059 } else {
3060 if (size == 3 && opc == 2) {
3061
3062 if (idx != 0) {
3063 unallocated_encoding(s);
3064 return;
3065 }
3066 return;
3067 }
3068 if (opc == 3 && size > 1) {
3069 unallocated_encoding(s);
3070 return;
3071 }
3072 is_store = (opc == 0);
3073 is_signed = extract32(opc, 1, 1);
3074 is_extended = (size < 3) && extract32(opc, 0, 1);
3075 }
3076
3077 switch (idx) {
3078 case 0:
3079 case 2:
3080 post_index = false;
3081 writeback = false;
3082 break;
3083 case 1:
3084 post_index = true;
3085 writeback = true;
3086 break;
3087 case 3:
3088 post_index = false;
3089 writeback = true;
3090 break;
3091 default:
3092 g_assert_not_reached();
3093 }
3094
3095 if (rn == 31) {
3096 gen_check_sp_alignment(s);
3097 }
3098
3099 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3100 if (!post_index) {
3101 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3102 }
3103
3104 memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3105 clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3106 writeback || rn != 31,
3107 size, is_unpriv, memidx);
3108
3109 if (is_vector) {
3110 if (is_store) {
3111 do_fp_st(s, rt, clean_addr, size);
3112 } else {
3113 do_fp_ld(s, rt, clean_addr, size);
3114 }
3115 } else {
3116 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3117 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3118
3119 if (is_store) {
3120 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3121 iss_valid, rt, iss_sf, false);
3122 } else {
3123 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3124 is_extended, memidx,
3125 iss_valid, rt, iss_sf, false);
3126 }
3127 }
3128
3129 if (writeback) {
3130 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3131 if (post_index) {
3132 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3133 }
3134 tcg_gen_mov_i64(tcg_rn, dirty_addr);
3135 }
3136}
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3160 int opc,
3161 int size,
3162 int rt,
3163 bool is_vector)
3164{
3165 int rn = extract32(insn, 5, 5);
3166 int shift = extract32(insn, 12, 1);
3167 int rm = extract32(insn, 16, 5);
3168 int opt = extract32(insn, 13, 3);
3169 bool is_signed = false;
3170 bool is_store = false;
3171 bool is_extended = false;
3172
3173 TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3174
3175 if (extract32(opt, 1, 1) == 0) {
3176 unallocated_encoding(s);
3177 return;
3178 }
3179
3180 if (is_vector) {
3181 size |= (opc & 2) << 1;
3182 if (size > 4) {
3183 unallocated_encoding(s);
3184 return;
3185 }
3186 is_store = !extract32(opc, 0, 1);
3187 if (!fp_access_check(s)) {
3188 return;
3189 }
3190 } else {
3191 if (size == 3 && opc == 2) {
3192
3193 return;
3194 }
3195 if (opc == 3 && size > 1) {
3196 unallocated_encoding(s);
3197 return;
3198 }
3199 is_store = (opc == 0);
3200 is_signed = extract32(opc, 1, 1);
3201 is_extended = (size < 3) && extract32(opc, 0, 1);
3202 }
3203
3204 if (rn == 31) {
3205 gen_check_sp_alignment(s);
3206 }
3207 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3208
3209 tcg_rm = read_cpu_reg(s, rm, 1);
3210 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3211
3212 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3213 clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3214
3215 if (is_vector) {
3216 if (is_store) {
3217 do_fp_st(s, rt, clean_addr, size);
3218 } else {
3219 do_fp_ld(s, rt, clean_addr, size);
3220 }
3221 } else {
3222 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3223 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3224 if (is_store) {
3225 do_gpr_st(s, tcg_rt, clean_addr, size,
3226 true, rt, iss_sf, false);
3227 } else {
3228 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3229 is_extended, true, rt, iss_sf, false);
3230 }
3231 }
3232}
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3252 int opc,
3253 int size,
3254 int rt,
3255 bool is_vector)
3256{
3257 int rn = extract32(insn, 5, 5);
3258 unsigned int imm12 = extract32(insn, 10, 12);
3259 unsigned int offset;
3260
3261 TCGv_i64 clean_addr, dirty_addr;
3262
3263 bool is_store;
3264 bool is_signed = false;
3265 bool is_extended = false;
3266
3267 if (is_vector) {
3268 size |= (opc & 2) << 1;
3269 if (size > 4) {
3270 unallocated_encoding(s);
3271 return;
3272 }
3273 is_store = !extract32(opc, 0, 1);
3274 if (!fp_access_check(s)) {
3275 return;
3276 }
3277 } else {
3278 if (size == 3 && opc == 2) {
3279
3280 return;
3281 }
3282 if (opc == 3 && size > 1) {
3283 unallocated_encoding(s);
3284 return;
3285 }
3286 is_store = (opc == 0);
3287 is_signed = extract32(opc, 1, 1);
3288 is_extended = (size < 3) && extract32(opc, 0, 1);
3289 }
3290
3291 if (rn == 31) {
3292 gen_check_sp_alignment(s);
3293 }
3294 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3295 offset = imm12 << size;
3296 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3297 clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3298
3299 if (is_vector) {
3300 if (is_store) {
3301 do_fp_st(s, rt, clean_addr, size);
3302 } else {
3303 do_fp_ld(s, rt, clean_addr, size);
3304 }
3305 } else {
3306 TCGv_i64 tcg_rt = cpu_reg(s, rt);
3307 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3308 if (is_store) {
3309 do_gpr_st(s, tcg_rt, clean_addr, size,
3310 true, rt, iss_sf, false);
3311 } else {
3312 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3313 is_extended, true, rt, iss_sf, false);
3314 }
3315 }
3316}
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3333 int size, int rt, bool is_vector)
3334{
3335 int rs = extract32(insn, 16, 5);
3336 int rn = extract32(insn, 5, 5);
3337 int o3_opc = extract32(insn, 12, 4);
3338 bool r = extract32(insn, 22, 1);
3339 bool a = extract32(insn, 23, 1);
3340 TCGv_i64 tcg_rs, tcg_rt, clean_addr;
3341 AtomicThreeOpFn *fn = NULL;
3342 MemOp mop = s->be_data | size | MO_ALIGN;
3343
3344 if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3345 unallocated_encoding(s);
3346 return;
3347 }
3348 switch (o3_opc) {
3349 case 000:
3350 fn = tcg_gen_atomic_fetch_add_i64;
3351 break;
3352 case 001:
3353 fn = tcg_gen_atomic_fetch_and_i64;
3354 break;
3355 case 002:
3356 fn = tcg_gen_atomic_fetch_xor_i64;
3357 break;
3358 case 003:
3359 fn = tcg_gen_atomic_fetch_or_i64;
3360 break;
3361 case 004:
3362 fn = tcg_gen_atomic_fetch_smax_i64;
3363 mop |= MO_SIGN;
3364 break;
3365 case 005:
3366 fn = tcg_gen_atomic_fetch_smin_i64;
3367 mop |= MO_SIGN;
3368 break;
3369 case 006:
3370 fn = tcg_gen_atomic_fetch_umax_i64;
3371 break;
3372 case 007:
3373 fn = tcg_gen_atomic_fetch_umin_i64;
3374 break;
3375 case 010:
3376 fn = tcg_gen_atomic_xchg_i64;
3377 break;
3378 case 014:
3379 if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3380 rs != 31 || a != 1 || r != 0) {
3381 unallocated_encoding(s);
3382 return;
3383 }
3384 break;
3385 default:
3386 unallocated_encoding(s);
3387 return;
3388 }
3389
3390 if (rn == 31) {
3391 gen_check_sp_alignment(s);
3392 }
3393 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3394
3395 if (o3_opc == 014) {
3396
3397
3398
3399
3400
3401
3402
3403 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
3404 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3405 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3406 return;
3407 }
3408
3409 tcg_rs = read_cpu_reg(s, rs, true);
3410 tcg_rt = cpu_reg(s, rt);
3411
3412 if (o3_opc == 1) {
3413 tcg_gen_not_i64(tcg_rs, tcg_rs);
3414 }
3415
3416
3417
3418
3419 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3420
3421 if ((mop & MO_SIGN) && size != MO_64) {
3422 tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3423 }
3424}
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3442 int size, int rt, bool is_vector)
3443{
3444 int rn = extract32(insn, 5, 5);
3445 bool is_wback = extract32(insn, 11, 1);
3446 bool use_key_a = !extract32(insn, 23, 1);
3447 int offset;
3448 TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3449
3450 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3451 unallocated_encoding(s);
3452 return;
3453 }
3454
3455 if (rn == 31) {
3456 gen_check_sp_alignment(s);
3457 }
3458 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3459
3460 if (s->pauth_active) {
3461 if (use_key_a) {
3462 gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3463 new_tmp_a64_zero(s));
3464 } else {
3465 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3466 new_tmp_a64_zero(s));
3467 }
3468 }
3469
3470
3471 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3472 offset = sextract32(offset << size, 0, 10 + size);
3473 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3474
3475
3476 clean_addr = gen_mte_check1(s, dirty_addr, false,
3477 is_wback || rn != 31, size);
3478
3479 tcg_rt = cpu_reg(s, rt);
3480 do_gpr_ld(s, tcg_rt, clean_addr, size,
3481 false, !is_wback,
3482 rt, true, false);
3483
3484 if (is_wback) {
3485 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3486 }
3487}
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3504{
3505 int rt = extract32(insn, 0, 5);
3506 int rn = extract32(insn, 5, 5);
3507 int offset = sextract32(insn, 12, 9);
3508 int opc = extract32(insn, 22, 2);
3509 int size = extract32(insn, 30, 2);
3510 TCGv_i64 clean_addr, dirty_addr;
3511 bool is_store = false;
3512 bool extend = false;
3513 bool iss_sf;
3514 MemOp mop;
3515
3516 if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3517 unallocated_encoding(s);
3518 return;
3519 }
3520
3521
3522 mop = size | MO_ALIGN;
3523
3524 switch (opc) {
3525 case 0:
3526 is_store = true;
3527 break;
3528 case 1:
3529 break;
3530 case 2:
3531 if (size == 3) {
3532 unallocated_encoding(s);
3533 return;
3534 }
3535 mop |= MO_SIGN;
3536 break;
3537 case 3:
3538 if (size > 1) {
3539 unallocated_encoding(s);
3540 return;
3541 }
3542 mop |= MO_SIGN;
3543 extend = true;
3544 break;
3545 default:
3546 g_assert_not_reached();
3547 }
3548
3549 iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
3550
3551 if (rn == 31) {
3552 gen_check_sp_alignment(s);
3553 }
3554
3555 dirty_addr = read_cpu_reg_sp(s, rn, 1);
3556 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3557 clean_addr = clean_data_tbi(s, dirty_addr);
3558
3559 if (is_store) {
3560
3561 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3562 do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
3563 } else {
3564
3565
3566
3567
3568 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
3569 extend, true, rt, iss_sf, true);
3570 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3571 }
3572}
3573
3574
3575static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3576{
3577 int rt = extract32(insn, 0, 5);
3578 int opc = extract32(insn, 22, 2);
3579 bool is_vector = extract32(insn, 26, 1);
3580 int size = extract32(insn, 30, 2);
3581
3582 switch (extract32(insn, 24, 2)) {
3583 case 0:
3584 if (extract32(insn, 21, 1) == 0) {
3585
3586
3587
3588
3589 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3590 return;
3591 }
3592 switch (extract32(insn, 10, 2)) {
3593 case 0:
3594 disas_ldst_atomic(s, insn, size, rt, is_vector);
3595 return;
3596 case 2:
3597 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3598 return;
3599 default:
3600 disas_ldst_pac(s, insn, size, rt, is_vector);
3601 return;
3602 }
3603 break;
3604 case 1:
3605 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3606 return;
3607 }
3608 unallocated_encoding(s);
3609}
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3630{
3631 int rt = extract32(insn, 0, 5);
3632 int rn = extract32(insn, 5, 5);
3633 int rm = extract32(insn, 16, 5);
3634 int size = extract32(insn, 10, 2);
3635 int opcode = extract32(insn, 12, 4);
3636 bool is_store = !extract32(insn, 22, 1);
3637 bool is_postidx = extract32(insn, 23, 1);
3638 bool is_q = extract32(insn, 30, 1);
3639 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3640 MemOp endian, align, mop;
3641
3642 int total;
3643 int elements;
3644 int rpt;
3645 int selem;
3646 int r;
3647
3648 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3649 unallocated_encoding(s);
3650 return;
3651 }
3652
3653 if (!is_postidx && rm != 0) {
3654 unallocated_encoding(s);
3655 return;
3656 }
3657
3658
3659 switch (opcode) {
3660 case 0x0:
3661 rpt = 1;
3662 selem = 4;
3663 break;
3664 case 0x2:
3665 rpt = 4;
3666 selem = 1;
3667 break;
3668 case 0x4:
3669 rpt = 1;
3670 selem = 3;
3671 break;
3672 case 0x6:
3673 rpt = 3;
3674 selem = 1;
3675 break;
3676 case 0x7:
3677 rpt = 1;
3678 selem = 1;
3679 break;
3680 case 0x8:
3681 rpt = 1;
3682 selem = 2;
3683 break;
3684 case 0xa:
3685 rpt = 2;
3686 selem = 1;
3687 break;
3688 default:
3689 unallocated_encoding(s);
3690 return;
3691 }
3692
3693 if (size == 3 && !is_q && selem != 1) {
3694
3695 unallocated_encoding(s);
3696 return;
3697 }
3698
3699 if (!fp_access_check(s)) {
3700 return;
3701 }
3702
3703 if (rn == 31) {
3704 gen_check_sp_alignment(s);
3705 }
3706
3707
3708 endian = s->be_data;
3709 if (size == 0) {
3710 endian = MO_LE;
3711 }
3712
3713 total = rpt * selem * (is_q ? 16 : 8);
3714 tcg_rn = cpu_reg_sp(s, rn);
3715
3716
3717
3718
3719
3720 clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3721 total);
3722
3723
3724
3725
3726
3727 align = MO_ALIGN;
3728 if (selem == 1 && endian == MO_LE) {
3729 align = pow2_align(size);
3730 size = 3;
3731 }
3732 if (!s->align_mem) {
3733 align = 0;
3734 }
3735 mop = endian | size | align;
3736
3737 elements = (is_q ? 16 : 8) >> size;
3738 tcg_ebytes = tcg_const_i64(1 << size);
3739 for (r = 0; r < rpt; r++) {
3740 int e;
3741 for (e = 0; e < elements; e++) {
3742 int xs;
3743 for (xs = 0; xs < selem; xs++) {
3744 int tt = (rt + r + xs) % 32;
3745 if (is_store) {
3746 do_vec_st(s, tt, e, clean_addr, mop);
3747 } else {
3748 do_vec_ld(s, tt, e, clean_addr, mop);
3749 }
3750 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3751 }
3752 }
3753 }
3754 tcg_temp_free_i64(tcg_ebytes);
3755
3756 if (!is_store) {
3757
3758
3759
3760
3761
3762
3763
3764 for (r = 0; r < rpt * selem; r++) {
3765 int tt = (rt + r) % 32;
3766 clear_vec_high(s, is_q, tt);
3767 }
3768 }
3769
3770 if (is_postidx) {
3771 if (rm == 31) {
3772 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3773 } else {
3774 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3775 }
3776 }
3777}
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3802{
3803 int rt = extract32(insn, 0, 5);
3804 int rn = extract32(insn, 5, 5);
3805 int rm = extract32(insn, 16, 5);
3806 int size = extract32(insn, 10, 2);
3807 int S = extract32(insn, 12, 1);
3808 int opc = extract32(insn, 13, 3);
3809 int R = extract32(insn, 21, 1);
3810 int is_load = extract32(insn, 22, 1);
3811 int is_postidx = extract32(insn, 23, 1);
3812 int is_q = extract32(insn, 30, 1);
3813
3814 int scale = extract32(opc, 1, 2);
3815 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3816 bool replicate = false;
3817 int index = is_q << 3 | S << 2 | size;
3818 int xs, total;
3819 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3820 MemOp mop;
3821
3822 if (extract32(insn, 31, 1)) {
3823 unallocated_encoding(s);
3824 return;
3825 }
3826 if (!is_postidx && rm != 0) {
3827 unallocated_encoding(s);
3828 return;
3829 }
3830
3831 switch (scale) {
3832 case 3:
3833 if (!is_load || S) {
3834 unallocated_encoding(s);
3835 return;
3836 }
3837 scale = size;
3838 replicate = true;
3839 break;
3840 case 0:
3841 break;
3842 case 1:
3843 if (extract32(size, 0, 1)) {
3844 unallocated_encoding(s);
3845 return;
3846 }
3847 index >>= 1;
3848 break;
3849 case 2:
3850 if (extract32(size, 1, 1)) {
3851 unallocated_encoding(s);
3852 return;
3853 }
3854 if (!extract32(size, 0, 1)) {
3855 index >>= 2;
3856 } else {
3857 if (S) {
3858 unallocated_encoding(s);
3859 return;
3860 }
3861 index >>= 3;
3862 scale = 3;
3863 }
3864 break;
3865 default:
3866 g_assert_not_reached();
3867 }
3868
3869 if (!fp_access_check(s)) {
3870 return;
3871 }
3872
3873 if (rn == 31) {
3874 gen_check_sp_alignment(s);
3875 }
3876
3877 total = selem << scale;
3878 tcg_rn = cpu_reg_sp(s, rn);
3879
3880 clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3881 total);
3882 mop = finalize_memop(s, scale);
3883
3884 tcg_ebytes = tcg_const_i64(1 << scale);
3885 for (xs = 0; xs < selem; xs++) {
3886 if (replicate) {
3887
3888 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3889
3890 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3891 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3892 (is_q + 1) * 8, vec_full_reg_size(s),
3893 tcg_tmp);
3894 tcg_temp_free_i64(tcg_tmp);
3895 } else {
3896
3897 if (is_load) {
3898 do_vec_ld(s, rt, index, clean_addr, mop);
3899 } else {
3900 do_vec_st(s, rt, index, clean_addr, mop);
3901 }
3902 }
3903 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3904 rt = (rt + 1) % 32;
3905 }
3906 tcg_temp_free_i64(tcg_ebytes);
3907
3908 if (is_postidx) {
3909 if (rm == 31) {
3910 tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3911 } else {
3912 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3913 }
3914 }
3915}
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925static void disas_ldst_tag(DisasContext *s, uint32_t insn)
3926{
3927 int rt = extract32(insn, 0, 5);
3928 int rn = extract32(insn, 5, 5);
3929 uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
3930 int op2 = extract32(insn, 10, 2);
3931 int op1 = extract32(insn, 22, 2);
3932 bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
3933 int index = 0;
3934 TCGv_i64 addr, clean_addr, tcg_rt;
3935
3936
3937 if (extract32(insn, 30, 2) != 3) {
3938 goto do_unallocated;
3939 }
3940
3941
3942
3943
3944
3945
3946
3947 switch (op1) {
3948 case 0:
3949 if (op2 != 0) {
3950
3951 index = op2 - 2;
3952 } else {
3953
3954 if (s->current_el == 0 || offset != 0) {
3955 goto do_unallocated;
3956 }
3957 is_mult = is_zero = true;
3958 }
3959 break;
3960 case 1:
3961 if (op2 != 0) {
3962
3963 is_zero = true;
3964 index = op2 - 2;
3965 } else {
3966
3967 is_load = true;
3968 }
3969 break;
3970 case 2:
3971 if (op2 != 0) {
3972
3973 is_pair = true;
3974 index = op2 - 2;
3975 } else {
3976
3977 if (s->current_el == 0 || offset != 0) {
3978 goto do_unallocated;
3979 }
3980 is_mult = true;
3981 }
3982 break;
3983 case 3:
3984 if (op2 != 0) {
3985
3986 is_pair = is_zero = true;
3987 index = op2 - 2;
3988 } else {
3989
3990 if (s->current_el == 0 || offset != 0) {
3991 goto do_unallocated;
3992 }
3993 is_mult = is_load = true;
3994 }
3995 break;
3996
3997 default:
3998 do_unallocated:
3999 unallocated_encoding(s);
4000 return;
4001 }
4002
4003 if (is_mult
4004 ? !dc_isar_feature(aa64_mte, s)
4005 : !dc_isar_feature(aa64_mte_insn_reg, s)) {
4006 goto do_unallocated;
4007 }
4008
4009 if (rn == 31) {
4010 gen_check_sp_alignment(s);
4011 }
4012
4013 addr = read_cpu_reg_sp(s, rn, true);
4014 if (index >= 0) {
4015
4016 tcg_gen_addi_i64(addr, addr, offset);
4017 }
4018
4019 if (is_mult) {
4020 tcg_rt = cpu_reg(s, rt);
4021
4022 if (is_zero) {
4023 int size = 4 << s->dcz_blocksize;
4024
4025 if (s->ata) {
4026 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
4027 }
4028
4029
4030
4031
4032 clean_addr = clean_data_tbi(s, addr);
4033 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4034 gen_helper_dc_zva(cpu_env, clean_addr);
4035 } else if (s->ata) {
4036 if (is_load) {
4037 gen_helper_ldgm(tcg_rt, cpu_env, addr);
4038 } else {
4039 gen_helper_stgm(cpu_env, addr, tcg_rt);
4040 }
4041 } else {
4042 MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4043 int size = 4 << GMID_EL1_BS;
4044
4045 clean_addr = clean_data_tbi(s, addr);
4046 tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4047 gen_probe_access(s, clean_addr, acc, size);
4048
4049 if (is_load) {
4050
4051 tcg_gen_movi_i64(tcg_rt, 0);
4052 }
4053 }
4054 return;
4055 }
4056
4057 if (is_load) {
4058 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4059 tcg_rt = cpu_reg(s, rt);
4060 if (s->ata) {
4061 gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4062 } else {
4063 clean_addr = clean_data_tbi(s, addr);
4064 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4065 gen_address_with_allocation_tag0(tcg_rt, addr);
4066 }
4067 } else {
4068 tcg_rt = cpu_reg_sp(s, rt);
4069 if (!s->ata) {
4070
4071
4072
4073
4074
4075 if (is_pair) {
4076 gen_helper_st2g_stub(cpu_env, addr);
4077 } else {
4078 gen_helper_stg_stub(cpu_env, addr);
4079 }
4080 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4081 if (is_pair) {
4082 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4083 } else {
4084 gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4085 }
4086 } else {
4087 if (is_pair) {
4088 gen_helper_st2g(cpu_env, addr, tcg_rt);
4089 } else {
4090 gen_helper_stg(cpu_env, addr, tcg_rt);
4091 }
4092 }
4093 }
4094
4095 if (is_zero) {
4096 TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4097 TCGv_i64 tcg_zero = tcg_const_i64(0);
4098 int mem_index = get_mem_index(s);
4099 int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
4100
4101 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
4102 MO_Q | MO_ALIGN_16);
4103 for (i = 8; i < n; i += 8) {
4104 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4105 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_Q);
4106 }
4107 tcg_temp_free_i64(tcg_zero);
4108 }
4109
4110 if (index != 0) {
4111
4112 if (index < 0) {
4113
4114 tcg_gen_addi_i64(addr, addr, offset);
4115 }
4116 tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4117 }
4118}
4119
4120
4121static void disas_ldst(DisasContext *s, uint32_t insn)
4122{
4123 switch (extract32(insn, 24, 6)) {
4124 case 0x08:
4125 disas_ldst_excl(s, insn);
4126 break;
4127 case 0x18: case 0x1c:
4128 disas_ld_lit(s, insn);
4129 break;
4130 case 0x28: case 0x29:
4131 case 0x2c: case 0x2d:
4132 disas_ldst_pair(s, insn);
4133 break;
4134 case 0x38: case 0x39:
4135 case 0x3c: case 0x3d:
4136 disas_ldst_reg(s, insn);
4137 break;
4138 case 0x0c:
4139 disas_ldst_multiple_struct(s, insn);
4140 break;
4141 case 0x0d:
4142 disas_ldst_single_struct(s, insn);
4143 break;
4144 case 0x19:
4145 if (extract32(insn, 21, 1) != 0) {
4146 disas_ldst_tag(s, insn);
4147 } else if (extract32(insn, 10, 2) == 0) {
4148 disas_ldst_ldapr_stlr(s, insn);
4149 } else {
4150 unallocated_encoding(s);
4151 }
4152 break;
4153 default:
4154 unallocated_encoding(s);
4155 break;
4156 }
4157}
4158
4159
4160
4161
4162
4163
4164
4165static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
4166{
4167 unsigned int page, rd;
4168 uint64_t base;
4169 uint64_t offset;
4170
4171 page = extract32(insn, 31, 1);
4172
4173 offset = sextract64(insn, 5, 19);
4174 offset = offset << 2 | extract32(insn, 29, 2);
4175 rd = extract32(insn, 0, 5);
4176 base = s->pc_curr;
4177
4178 if (page) {
4179
4180 base &= ~0xfff;
4181 offset <<= 12;
4182 }
4183
4184 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
4185}
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
4201{
4202 int rd = extract32(insn, 0, 5);
4203 int rn = extract32(insn, 5, 5);
4204 uint64_t imm = extract32(insn, 10, 12);
4205 bool shift = extract32(insn, 22, 1);
4206 bool setflags = extract32(insn, 29, 1);
4207 bool sub_op = extract32(insn, 30, 1);
4208 bool is_64bit = extract32(insn, 31, 1);
4209
4210 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
4211 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
4212 TCGv_i64 tcg_result;
4213
4214 if (shift) {
4215 imm <<= 12;
4216 }
4217
4218 tcg_result = tcg_temp_new_i64();
4219 if (!setflags) {
4220 if (sub_op) {
4221 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
4222 } else {
4223 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
4224 }
4225 } else {
4226 TCGv_i64 tcg_imm = tcg_const_i64(imm);
4227 if (sub_op) {
4228 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4229 } else {
4230 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
4231 }
4232 tcg_temp_free_i64(tcg_imm);
4233 }
4234
4235 if (is_64bit) {
4236 tcg_gen_mov_i64(tcg_rd, tcg_result);
4237 } else {
4238 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4239 }
4240
4241 tcg_temp_free_i64(tcg_result);
4242}
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
4255{
4256 int rd = extract32(insn, 0, 5);
4257 int rn = extract32(insn, 5, 5);
4258 int uimm4 = extract32(insn, 10, 4);
4259 int uimm6 = extract32(insn, 16, 6);
4260 bool sub_op = extract32(insn, 30, 1);
4261 TCGv_i64 tcg_rn, tcg_rd;
4262 int imm;
4263
4264
4265 if ((insn & 0xa040c000u) != 0x80000000u ||
4266 !dc_isar_feature(aa64_mte_insn_reg, s)) {
4267 unallocated_encoding(s);
4268 return;
4269 }
4270
4271 imm = uimm6 << LOG2_TAG_GRANULE;
4272 if (sub_op) {
4273 imm = -imm;
4274 }
4275
4276 tcg_rn = cpu_reg_sp(s, rn);
4277 tcg_rd = cpu_reg_sp(s, rd);
4278
4279 if (s->ata) {
4280 TCGv_i32 offset = tcg_const_i32(imm);
4281 TCGv_i32 tag_offset = tcg_const_i32(uimm4);
4282
4283 gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, offset, tag_offset);
4284 tcg_temp_free_i32(tag_offset);
4285 tcg_temp_free_i32(offset);
4286 } else {
4287 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4288 gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4289 }
4290}
4291
4292
4293
4294
4295
4296static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4297{
4298 assert(e != 0);
4299 while (e < 64) {
4300 mask |= mask << e;
4301 e *= 2;
4302 }
4303 return mask;
4304}
4305
4306
4307static inline uint64_t bitmask64(unsigned int length)
4308{
4309 assert(length > 0 && length <= 64);
4310 return ~0ULL >> (64 - length);
4311}
4312
4313
4314
4315
4316
4317
4318bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4319 unsigned int imms, unsigned int immr)
4320{
4321 uint64_t mask;
4322 unsigned e, levels, s, r;
4323 int len;
4324
4325 assert(immn < 2 && imms < 64 && immr < 64);
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4350 if (len < 1) {
4351
4352 return false;
4353 }
4354 e = 1 << len;
4355
4356 levels = e - 1;
4357 s = imms & levels;
4358 r = immr & levels;
4359
4360 if (s == levels) {
4361
4362 return false;
4363 }
4364
4365
4366
4367
4368 mask = bitmask64(s + 1);
4369 if (r) {
4370 mask = (mask >> r) | (mask << (e - r));
4371 mask &= bitmask64(e);
4372 }
4373
4374 mask = bitfield_replicate(mask, e);
4375 *result = mask;
4376 return true;
4377}
4378
4379
4380
4381
4382
4383
4384
4385static void disas_logic_imm(DisasContext *s, uint32_t insn)
4386{
4387 unsigned int sf, opc, is_n, immr, imms, rn, rd;
4388 TCGv_i64 tcg_rd, tcg_rn;
4389 uint64_t wmask;
4390 bool is_and = false;
4391
4392 sf = extract32(insn, 31, 1);
4393 opc = extract32(insn, 29, 2);
4394 is_n = extract32(insn, 22, 1);
4395 immr = extract32(insn, 16, 6);
4396 imms = extract32(insn, 10, 6);
4397 rn = extract32(insn, 5, 5);
4398 rd = extract32(insn, 0, 5);
4399
4400 if (!sf && is_n) {
4401 unallocated_encoding(s);
4402 return;
4403 }
4404
4405 if (opc == 0x3) {
4406 tcg_rd = cpu_reg(s, rd);
4407 } else {
4408 tcg_rd = cpu_reg_sp(s, rd);
4409 }
4410 tcg_rn = cpu_reg(s, rn);
4411
4412 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
4413
4414 unallocated_encoding(s);
4415 return;
4416 }
4417
4418 if (!sf) {
4419 wmask &= 0xffffffff;
4420 }
4421
4422 switch (opc) {
4423 case 0x3:
4424 case 0x0:
4425 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
4426 is_and = true;
4427 break;
4428 case 0x1:
4429 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
4430 break;
4431 case 0x2:
4432 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
4433 break;
4434 default:
4435 assert(FALSE);
4436 break;
4437 }
4438
4439 if (!sf && !is_and) {
4440
4441
4442
4443 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4444 }
4445
4446 if (opc == 3) {
4447 gen_logic_CC(sf, tcg_rd);
4448 }
4449}
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463static void disas_movw_imm(DisasContext *s, uint32_t insn)
4464{
4465 int rd = extract32(insn, 0, 5);
4466 uint64_t imm = extract32(insn, 5, 16);
4467 int sf = extract32(insn, 31, 1);
4468 int opc = extract32(insn, 29, 2);
4469 int pos = extract32(insn, 21, 2) << 4;
4470 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4471 TCGv_i64 tcg_imm;
4472
4473 if (!sf && (pos >= 32)) {
4474 unallocated_encoding(s);
4475 return;
4476 }
4477
4478 switch (opc) {
4479 case 0:
4480 case 2:
4481 imm <<= pos;
4482 if (opc == 0) {
4483 imm = ~imm;
4484 }
4485 if (!sf) {
4486 imm &= 0xffffffffu;
4487 }
4488 tcg_gen_movi_i64(tcg_rd, imm);
4489 break;
4490 case 3:
4491 tcg_imm = tcg_const_i64(imm);
4492 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
4493 tcg_temp_free_i64(tcg_imm);
4494 if (!sf) {
4495 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4496 }
4497 break;
4498 default:
4499 unallocated_encoding(s);
4500 break;
4501 }
4502}
4503
4504
4505
4506
4507
4508
4509
4510static void disas_bitfield(DisasContext *s, uint32_t insn)
4511{
4512 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
4513 TCGv_i64 tcg_rd, tcg_tmp;
4514
4515 sf = extract32(insn, 31, 1);
4516 opc = extract32(insn, 29, 2);
4517 n = extract32(insn, 22, 1);
4518 ri = extract32(insn, 16, 6);
4519 si = extract32(insn, 10, 6);
4520 rn = extract32(insn, 5, 5);
4521 rd = extract32(insn, 0, 5);
4522 bitsize = sf ? 64 : 32;
4523
4524 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
4525 unallocated_encoding(s);
4526 return;
4527 }
4528
4529 tcg_rd = cpu_reg(s, rd);
4530
4531
4532
4533
4534 tcg_tmp = read_cpu_reg(s, rn, 1);
4535
4536
4537 if (si >= ri) {
4538
4539 len = (si - ri) + 1;
4540 if (opc == 0) {
4541 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4542 goto done;
4543 } else if (opc == 2) {
4544 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4545 return;
4546 }
4547
4548 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4549 pos = 0;
4550 } else {
4551
4552
4553
4554 len = si + 1;
4555 pos = (bitsize - ri) & (bitsize - 1);
4556 }
4557
4558 if (opc == 0 && len < ri) {
4559
4560
4561
4562 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4563 len = ri;
4564 }
4565
4566 if (opc == 1) {
4567 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4568 } else {
4569
4570
4571
4572 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4573 return;
4574 }
4575
4576 done:
4577 if (!sf) {
4578 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4579 }
4580}
4581
4582
4583
4584
4585
4586
4587
4588static void disas_extract(DisasContext *s, uint32_t insn)
4589{
4590 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
4591
4592 sf = extract32(insn, 31, 1);
4593 n = extract32(insn, 22, 1);
4594 rm = extract32(insn, 16, 5);
4595 imm = extract32(insn, 10, 6);
4596 rn = extract32(insn, 5, 5);
4597 rd = extract32(insn, 0, 5);
4598 op21 = extract32(insn, 29, 2);
4599 op0 = extract32(insn, 21, 1);
4600 bitsize = sf ? 64 : 32;
4601
4602 if (sf != n || op21 || op0 || imm >= bitsize) {
4603 unallocated_encoding(s);
4604 } else {
4605 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4606
4607 tcg_rd = cpu_reg(s, rd);
4608
4609 if (unlikely(imm == 0)) {
4610
4611
4612
4613 if (sf) {
4614 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
4615 } else {
4616 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
4617 }
4618 } else {
4619 tcg_rm = cpu_reg(s, rm);
4620 tcg_rn = cpu_reg(s, rn);
4621
4622 if (sf) {
4623
4624 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
4625 } else {
4626 TCGv_i32 t0 = tcg_temp_new_i32();
4627
4628 tcg_gen_extrl_i64_i32(t0, tcg_rm);
4629 if (rm == rn) {
4630 tcg_gen_rotri_i32(t0, t0, imm);
4631 } else {
4632 TCGv_i32 t1 = tcg_temp_new_i32();
4633 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4634 tcg_gen_extract2_i32(t0, t0, t1, imm);
4635 tcg_temp_free_i32(t1);
4636 }
4637 tcg_gen_extu_i32_i64(tcg_rd, t0);
4638 tcg_temp_free_i32(t0);
4639 }
4640 }
4641 }
4642}
4643
4644
4645static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
4646{
4647 switch (extract32(insn, 23, 6)) {
4648 case 0x20: case 0x21:
4649 disas_pc_rel_adr(s, insn);
4650 break;
4651 case 0x22:
4652 disas_add_sub_imm(s, insn);
4653 break;
4654 case 0x23:
4655 disas_add_sub_imm_with_tags(s, insn);
4656 break;
4657 case 0x24:
4658 disas_logic_imm(s, insn);
4659 break;
4660 case 0x25:
4661 disas_movw_imm(s, insn);
4662 break;
4663 case 0x26:
4664 disas_bitfield(s, insn);
4665 break;
4666 case 0x27:
4667 disas_extract(s, insn);
4668 break;
4669 default:
4670 unallocated_encoding(s);
4671 break;
4672 }
4673}
4674
4675
4676
4677
4678
4679
4680static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4681 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4682{
4683 switch (shift_type) {
4684 case A64_SHIFT_TYPE_LSL:
4685 tcg_gen_shl_i64(dst, src, shift_amount);
4686 break;
4687 case A64_SHIFT_TYPE_LSR:
4688 tcg_gen_shr_i64(dst, src, shift_amount);
4689 break;
4690 case A64_SHIFT_TYPE_ASR:
4691 if (!sf) {
4692 tcg_gen_ext32s_i64(dst, src);
4693 }
4694 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4695 break;
4696 case A64_SHIFT_TYPE_ROR:
4697 if (sf) {
4698 tcg_gen_rotr_i64(dst, src, shift_amount);
4699 } else {
4700 TCGv_i32 t0, t1;
4701 t0 = tcg_temp_new_i32();
4702 t1 = tcg_temp_new_i32();
4703 tcg_gen_extrl_i64_i32(t0, src);
4704 tcg_gen_extrl_i64_i32(t1, shift_amount);
4705 tcg_gen_rotr_i32(t0, t0, t1);
4706 tcg_gen_extu_i32_i64(dst, t0);
4707 tcg_temp_free_i32(t0);
4708 tcg_temp_free_i32(t1);
4709 }
4710 break;
4711 default:
4712 assert(FALSE);
4713 break;
4714 }
4715
4716 if (!sf) {
4717 tcg_gen_ext32u_i64(dst, dst);
4718 }
4719}
4720
4721
4722
4723
4724
4725static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4726 enum a64_shift_type shift_type, unsigned int shift_i)
4727{
4728 assert(shift_i < (sf ? 64 : 32));
4729
4730 if (shift_i == 0) {
4731 tcg_gen_mov_i64(dst, src);
4732 } else {
4733 TCGv_i64 shift_const;
4734
4735 shift_const = tcg_const_i64(shift_i);
4736 shift_reg(dst, src, sf, shift_type, shift_const);
4737 tcg_temp_free_i64(shift_const);
4738 }
4739}
4740
4741
4742
4743
4744
4745
4746
4747static void disas_logic_reg(DisasContext *s, uint32_t insn)
4748{
4749 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4750 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4751
4752 sf = extract32(insn, 31, 1);
4753 opc = extract32(insn, 29, 2);
4754 shift_type = extract32(insn, 22, 2);
4755 invert = extract32(insn, 21, 1);
4756 rm = extract32(insn, 16, 5);
4757 shift_amount = extract32(insn, 10, 6);
4758 rn = extract32(insn, 5, 5);
4759 rd = extract32(insn, 0, 5);
4760
4761 if (!sf && (shift_amount & (1 << 5))) {
4762 unallocated_encoding(s);
4763 return;
4764 }
4765
4766 tcg_rd = cpu_reg(s, rd);
4767
4768 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4769
4770
4771
4772 tcg_rm = cpu_reg(s, rm);
4773 if (invert) {
4774 tcg_gen_not_i64(tcg_rd, tcg_rm);
4775 if (!sf) {
4776 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4777 }
4778 } else {
4779 if (sf) {
4780 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4781 } else {
4782 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4783 }
4784 }
4785 return;
4786 }
4787
4788 tcg_rm = read_cpu_reg(s, rm, sf);
4789
4790 if (shift_amount) {
4791 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4792 }
4793
4794 tcg_rn = cpu_reg(s, rn);
4795
4796 switch (opc | (invert << 2)) {
4797 case 0:
4798 case 3:
4799 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4800 break;
4801 case 1:
4802 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4803 break;
4804 case 2:
4805 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4806 break;
4807 case 4:
4808 case 7:
4809 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4810 break;
4811 case 5:
4812 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4813 break;
4814 case 6:
4815 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4816 break;
4817 default:
4818 assert(FALSE);
4819 break;
4820 }
4821
4822 if (!sf) {
4823 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4824 }
4825
4826 if (opc == 3) {
4827 gen_logic_CC(sf, tcg_rd);
4828 }
4829}
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4849{
4850 int rd = extract32(insn, 0, 5);
4851 int rn = extract32(insn, 5, 5);
4852 int imm3 = extract32(insn, 10, 3);
4853 int option = extract32(insn, 13, 3);
4854 int rm = extract32(insn, 16, 5);
4855 int opt = extract32(insn, 22, 2);
4856 bool setflags = extract32(insn, 29, 1);
4857 bool sub_op = extract32(insn, 30, 1);
4858 bool sf = extract32(insn, 31, 1);
4859
4860 TCGv_i64 tcg_rm, tcg_rn;
4861 TCGv_i64 tcg_rd;
4862 TCGv_i64 tcg_result;
4863
4864 if (imm3 > 4 || opt != 0) {
4865 unallocated_encoding(s);
4866 return;
4867 }
4868
4869
4870 if (!setflags) {
4871 tcg_rd = cpu_reg_sp(s, rd);
4872 } else {
4873 tcg_rd = cpu_reg(s, rd);
4874 }
4875 tcg_rn = read_cpu_reg_sp(s, rn, sf);
4876
4877 tcg_rm = read_cpu_reg(s, rm, sf);
4878 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4879
4880 tcg_result = tcg_temp_new_i64();
4881
4882 if (!setflags) {
4883 if (sub_op) {
4884 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4885 } else {
4886 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4887 }
4888 } else {
4889 if (sub_op) {
4890 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4891 } else {
4892 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4893 }
4894 }
4895
4896 if (sf) {
4897 tcg_gen_mov_i64(tcg_rd, tcg_result);
4898 } else {
4899 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4900 }
4901
4902 tcg_temp_free_i64(tcg_result);
4903}
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4920{
4921 int rd = extract32(insn, 0, 5);
4922 int rn = extract32(insn, 5, 5);
4923 int imm6 = extract32(insn, 10, 6);
4924 int rm = extract32(insn, 16, 5);
4925 int shift_type = extract32(insn, 22, 2);
4926 bool setflags = extract32(insn, 29, 1);
4927 bool sub_op = extract32(insn, 30, 1);
4928 bool sf = extract32(insn, 31, 1);
4929
4930 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4931 TCGv_i64 tcg_rn, tcg_rm;
4932 TCGv_i64 tcg_result;
4933
4934 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4935 unallocated_encoding(s);
4936 return;
4937 }
4938
4939 tcg_rn = read_cpu_reg(s, rn, sf);
4940 tcg_rm = read_cpu_reg(s, rm, sf);
4941
4942 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4943
4944 tcg_result = tcg_temp_new_i64();
4945
4946 if (!setflags) {
4947 if (sub_op) {
4948 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4949 } else {
4950 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4951 }
4952 } else {
4953 if (sub_op) {
4954 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4955 } else {
4956 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4957 }
4958 }
4959
4960 if (sf) {
4961 tcg_gen_mov_i64(tcg_rd, tcg_result);
4962 } else {
4963 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4964 }
4965
4966 tcg_temp_free_i64(tcg_result);
4967}
4968
4969
4970
4971
4972
4973
4974
4975
4976static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4977{
4978 int rd = extract32(insn, 0, 5);
4979 int rn = extract32(insn, 5, 5);
4980 int ra = extract32(insn, 10, 5);
4981 int rm = extract32(insn, 16, 5);
4982 int op_id = (extract32(insn, 29, 3) << 4) |
4983 (extract32(insn, 21, 3) << 1) |
4984 extract32(insn, 15, 1);
4985 bool sf = extract32(insn, 31, 1);
4986 bool is_sub = extract32(op_id, 0, 1);
4987 bool is_high = extract32(op_id, 2, 1);
4988 bool is_signed = false;
4989 TCGv_i64 tcg_op1;
4990 TCGv_i64 tcg_op2;
4991 TCGv_i64 tcg_tmp;
4992
4993
4994 switch (op_id) {
4995 case 0x42:
4996 case 0x43:
4997 case 0x44:
4998 is_signed = true;
4999 break;
5000 case 0x0:
5001 case 0x1:
5002 case 0x40:
5003 case 0x41:
5004 case 0x4a:
5005 case 0x4b:
5006 case 0x4c:
5007 break;
5008 default:
5009 unallocated_encoding(s);
5010 return;
5011 }
5012
5013 if (is_high) {
5014 TCGv_i64 low_bits = tcg_temp_new_i64();
5015 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5016 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5017 TCGv_i64 tcg_rm = cpu_reg(s, rm);
5018
5019 if (is_signed) {
5020 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5021 } else {
5022 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
5023 }
5024
5025 tcg_temp_free_i64(low_bits);
5026 return;
5027 }
5028
5029 tcg_op1 = tcg_temp_new_i64();
5030 tcg_op2 = tcg_temp_new_i64();
5031 tcg_tmp = tcg_temp_new_i64();
5032
5033 if (op_id < 0x42) {
5034 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
5035 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
5036 } else {
5037 if (is_signed) {
5038 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
5039 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
5040 } else {
5041 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
5042 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
5043 }
5044 }
5045
5046 if (ra == 31 && !is_sub) {
5047
5048 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
5049 } else {
5050 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
5051 if (is_sub) {
5052 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5053 } else {
5054 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
5055 }
5056 }
5057
5058 if (!sf) {
5059 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
5060 }
5061
5062 tcg_temp_free_i64(tcg_op1);
5063 tcg_temp_free_i64(tcg_op2);
5064 tcg_temp_free_i64(tcg_tmp);
5065}
5066
5067
5068
5069
5070
5071
5072
5073
5074static void disas_adc_sbc(DisasContext *s, uint32_t insn)
5075{
5076 unsigned int sf, op, setflags, rm, rn, rd;
5077 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
5078
5079 sf = extract32(insn, 31, 1);
5080 op = extract32(insn, 30, 1);
5081 setflags = extract32(insn, 29, 1);
5082 rm = extract32(insn, 16, 5);
5083 rn = extract32(insn, 5, 5);
5084 rd = extract32(insn, 0, 5);
5085
5086 tcg_rd = cpu_reg(s, rd);
5087 tcg_rn = cpu_reg(s, rn);
5088
5089 if (op) {
5090 tcg_y = new_tmp_a64(s);
5091 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
5092 } else {
5093 tcg_y = cpu_reg(s, rm);
5094 }
5095
5096 if (setflags) {
5097 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
5098 } else {
5099 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
5100 }
5101}
5102
5103
5104
5105
5106
5107
5108
5109
5110static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
5111{
5112 int mask = extract32(insn, 0, 4);
5113 int o2 = extract32(insn, 4, 1);
5114 int rn = extract32(insn, 5, 5);
5115 int imm6 = extract32(insn, 15, 6);
5116 int sf_op_s = extract32(insn, 29, 3);
5117 TCGv_i64 tcg_rn;
5118 TCGv_i32 nzcv;
5119
5120 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
5121 unallocated_encoding(s);
5122 return;
5123 }
5124
5125 tcg_rn = read_cpu_reg(s, rn, 1);
5126 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
5127
5128 nzcv = tcg_temp_new_i32();
5129 tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
5130
5131 if (mask & 8) {
5132 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
5133 }
5134 if (mask & 4) {
5135 tcg_gen_not_i32(cpu_ZF, nzcv);
5136 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
5137 }
5138 if (mask & 2) {
5139 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
5140 }
5141 if (mask & 1) {
5142 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
5143 }
5144
5145 tcg_temp_free_i32(nzcv);
5146}
5147
5148
5149
5150
5151
5152
5153
5154
5155static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
5156{
5157 int o3_mask = extract32(insn, 0, 5);
5158 int rn = extract32(insn, 5, 5);
5159 int o2 = extract32(insn, 15, 6);
5160 int sz = extract32(insn, 14, 1);
5161 int sf_op_s = extract32(insn, 29, 3);
5162 TCGv_i32 tmp;
5163 int shift;
5164
5165 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
5166 !dc_isar_feature(aa64_condm_4, s)) {
5167 unallocated_encoding(s);
5168 return;
5169 }
5170 shift = sz ? 16 : 24;
5171
5172 tmp = tcg_temp_new_i32();
5173 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
5174 tcg_gen_shli_i32(cpu_NF, tmp, shift);
5175 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
5176 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
5177 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
5178 tcg_temp_free_i32(tmp);
5179}
5180
5181
5182
5183
5184
5185
5186
5187
5188static void disas_cc(DisasContext *s, uint32_t insn)
5189{
5190 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
5191 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
5192 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
5193 DisasCompare c;
5194
5195 if (!extract32(insn, 29, 1)) {
5196 unallocated_encoding(s);
5197 return;
5198 }
5199 if (insn & (1 << 10 | 1 << 4)) {
5200 unallocated_encoding(s);
5201 return;
5202 }
5203 sf = extract32(insn, 31, 1);
5204 op = extract32(insn, 30, 1);
5205 is_imm = extract32(insn, 11, 1);
5206 y = extract32(insn, 16, 5);
5207 cond = extract32(insn, 12, 4);
5208 rn = extract32(insn, 5, 5);
5209 nzcv = extract32(insn, 0, 4);
5210
5211
5212 tcg_t0 = tcg_temp_new_i32();
5213 arm_test_cc(&c, cond);
5214 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5215 arm_free_cc(&c);
5216
5217
5218 if (is_imm) {
5219 tcg_y = new_tmp_a64(s);
5220 tcg_gen_movi_i64(tcg_y, y);
5221 } else {
5222 tcg_y = cpu_reg(s, y);
5223 }
5224 tcg_rn = cpu_reg(s, rn);
5225
5226
5227 tcg_tmp = tcg_temp_new_i64();
5228 if (op) {
5229 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5230 } else {
5231 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5232 }
5233 tcg_temp_free_i64(tcg_tmp);
5234
5235
5236
5237
5238
5239
5240 tcg_t1 = tcg_temp_new_i32();
5241 tcg_t2 = tcg_temp_new_i32();
5242 tcg_gen_neg_i32(tcg_t1, tcg_t0);
5243 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5244
5245 if (nzcv & 8) {
5246 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5247 } else {
5248 if (TCG_TARGET_HAS_andc_i32) {
5249 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5250 } else {
5251 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5252 }
5253 }
5254 if (nzcv & 4) {
5255 if (TCG_TARGET_HAS_andc_i32) {
5256 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5257 } else {
5258 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5259 }
5260 } else {
5261 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5262 }
5263 if (nzcv & 2) {
5264 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5265 } else {
5266 if (TCG_TARGET_HAS_andc_i32) {
5267 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5268 } else {
5269 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5270 }
5271 }
5272 if (nzcv & 1) {
5273 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5274 } else {
5275 if (TCG_TARGET_HAS_andc_i32) {
5276 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5277 } else {
5278 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5279 }
5280 }
5281 tcg_temp_free_i32(tcg_t0);
5282 tcg_temp_free_i32(tcg_t1);
5283 tcg_temp_free_i32(tcg_t2);
5284}
5285
5286
5287
5288
5289
5290
5291
5292static void disas_cond_select(DisasContext *s, uint32_t insn)
5293{
5294 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5295 TCGv_i64 tcg_rd, zero;
5296 DisasCompare64 c;
5297
5298 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5299
5300 unallocated_encoding(s);
5301 return;
5302 }
5303 sf = extract32(insn, 31, 1);
5304 else_inv = extract32(insn, 30, 1);
5305 rm = extract32(insn, 16, 5);
5306 cond = extract32(insn, 12, 4);
5307 else_inc = extract32(insn, 10, 1);
5308 rn = extract32(insn, 5, 5);
5309 rd = extract32(insn, 0, 5);
5310
5311 tcg_rd = cpu_reg(s, rd);
5312
5313 a64_test_cc(&c, cond);
5314 zero = tcg_const_i64(0);
5315
5316 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5317
5318 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5319 if (else_inv) {
5320 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5321 }
5322 } else {
5323 TCGv_i64 t_true = cpu_reg(s, rn);
5324 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5325 if (else_inv && else_inc) {
5326 tcg_gen_neg_i64(t_false, t_false);
5327 } else if (else_inv) {
5328 tcg_gen_not_i64(t_false, t_false);
5329 } else if (else_inc) {
5330 tcg_gen_addi_i64(t_false, t_false, 1);
5331 }
5332 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5333 }
5334
5335 tcg_temp_free_i64(zero);
5336 a64_free_cc(&c);
5337
5338 if (!sf) {
5339 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5340 }
5341}
5342
5343static void handle_clz(DisasContext *s, unsigned int sf,
5344 unsigned int rn, unsigned int rd)
5345{
5346 TCGv_i64 tcg_rd, tcg_rn;
5347 tcg_rd = cpu_reg(s, rd);
5348 tcg_rn = cpu_reg(s, rn);
5349
5350 if (sf) {
5351 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5352 } else {
5353 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5354 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5355 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5356 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5357 tcg_temp_free_i32(tcg_tmp32);
5358 }
5359}
5360
5361static void handle_cls(DisasContext *s, unsigned int sf,
5362 unsigned int rn, unsigned int rd)
5363{
5364 TCGv_i64 tcg_rd, tcg_rn;
5365 tcg_rd = cpu_reg(s, rd);
5366 tcg_rn = cpu_reg(s, rn);
5367
5368 if (sf) {
5369 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5370 } else {
5371 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5372 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5373 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5374 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5375 tcg_temp_free_i32(tcg_tmp32);
5376 }
5377}
5378
5379static void handle_rbit(DisasContext *s, unsigned int sf,
5380 unsigned int rn, unsigned int rd)
5381{
5382 TCGv_i64 tcg_rd, tcg_rn;
5383 tcg_rd = cpu_reg(s, rd);
5384 tcg_rn = cpu_reg(s, rn);
5385
5386 if (sf) {
5387 gen_helper_rbit64(tcg_rd, tcg_rn);
5388 } else {
5389 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5390 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5391 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5392 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5393 tcg_temp_free_i32(tcg_tmp32);
5394 }
5395}
5396
5397
5398static void handle_rev64(DisasContext *s, unsigned int sf,
5399 unsigned int rn, unsigned int rd)
5400{
5401 if (!sf) {
5402 unallocated_encoding(s);
5403 return;
5404 }
5405 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5406}
5407
5408
5409
5410
5411static void handle_rev32(DisasContext *s, unsigned int sf,
5412 unsigned int rn, unsigned int rd)
5413{
5414 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5415 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5416
5417 if (sf) {
5418 tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5419 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5420 } else {
5421 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5422 }
5423}
5424
5425
5426static void handle_rev16(DisasContext *s, unsigned int sf,
5427 unsigned int rn, unsigned int rd)
5428{
5429 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5430 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5431 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5432 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5433
5434 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5435 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5436 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5437 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5438 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5439
5440 tcg_temp_free_i64(mask);
5441 tcg_temp_free_i64(tcg_tmp);
5442}
5443
5444
5445
5446
5447
5448
5449
5450static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5451{
5452 unsigned int sf, opcode, opcode2, rn, rd;
5453 TCGv_i64 tcg_rd;
5454
5455 if (extract32(insn, 29, 1)) {
5456 unallocated_encoding(s);
5457 return;
5458 }
5459
5460 sf = extract32(insn, 31, 1);
5461 opcode = extract32(insn, 10, 6);
5462 opcode2 = extract32(insn, 16, 5);
5463 rn = extract32(insn, 5, 5);
5464 rd = extract32(insn, 0, 5);
5465
5466#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5467
5468 switch (MAP(sf, opcode2, opcode)) {
5469 case MAP(0, 0x00, 0x00):
5470 case MAP(1, 0x00, 0x00):
5471 handle_rbit(s, sf, rn, rd);
5472 break;
5473 case MAP(0, 0x00, 0x01):
5474 case MAP(1, 0x00, 0x01):
5475 handle_rev16(s, sf, rn, rd);
5476 break;
5477 case MAP(0, 0x00, 0x02):
5478 case MAP(1, 0x00, 0x02):
5479 handle_rev32(s, sf, rn, rd);
5480 break;
5481 case MAP(1, 0x00, 0x03):
5482 handle_rev64(s, sf, rn, rd);
5483 break;
5484 case MAP(0, 0x00, 0x04):
5485 case MAP(1, 0x00, 0x04):
5486 handle_clz(s, sf, rn, rd);
5487 break;
5488 case MAP(0, 0x00, 0x05):
5489 case MAP(1, 0x00, 0x05):
5490 handle_cls(s, sf, rn, rd);
5491 break;
5492 case MAP(1, 0x01, 0x00):
5493 if (s->pauth_active) {
5494 tcg_rd = cpu_reg(s, rd);
5495 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5496 } else if (!dc_isar_feature(aa64_pauth, s)) {
5497 goto do_unallocated;
5498 }
5499 break;
5500 case MAP(1, 0x01, 0x01):
5501 if (s->pauth_active) {
5502 tcg_rd = cpu_reg(s, rd);
5503 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5504 } else if (!dc_isar_feature(aa64_pauth, s)) {
5505 goto do_unallocated;
5506 }
5507 break;
5508 case MAP(1, 0x01, 0x02):
5509 if (s->pauth_active) {
5510 tcg_rd = cpu_reg(s, rd);
5511 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5512 } else if (!dc_isar_feature(aa64_pauth, s)) {
5513 goto do_unallocated;
5514 }
5515 break;
5516 case MAP(1, 0x01, 0x03):
5517 if (s->pauth_active) {
5518 tcg_rd = cpu_reg(s, rd);
5519 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5520 } else if (!dc_isar_feature(aa64_pauth, s)) {
5521 goto do_unallocated;
5522 }
5523 break;
5524 case MAP(1, 0x01, 0x04):
5525 if (s->pauth_active) {
5526 tcg_rd = cpu_reg(s, rd);
5527 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5528 } else if (!dc_isar_feature(aa64_pauth, s)) {
5529 goto do_unallocated;
5530 }
5531 break;
5532 case MAP(1, 0x01, 0x05):
5533 if (s->pauth_active) {
5534 tcg_rd = cpu_reg(s, rd);
5535 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5536 } else if (!dc_isar_feature(aa64_pauth, s)) {
5537 goto do_unallocated;
5538 }
5539 break;
5540 case MAP(1, 0x01, 0x06):
5541 if (s->pauth_active) {
5542 tcg_rd = cpu_reg(s, rd);
5543 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5544 } else if (!dc_isar_feature(aa64_pauth, s)) {
5545 goto do_unallocated;
5546 }
5547 break;
5548 case MAP(1, 0x01, 0x07):
5549 if (s->pauth_active) {
5550 tcg_rd = cpu_reg(s, rd);
5551 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5552 } else if (!dc_isar_feature(aa64_pauth, s)) {
5553 goto do_unallocated;
5554 }
5555 break;
5556 case MAP(1, 0x01, 0x08):
5557 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5558 goto do_unallocated;
5559 } else if (s->pauth_active) {
5560 tcg_rd = cpu_reg(s, rd);
5561 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5562 }
5563 break;
5564 case MAP(1, 0x01, 0x09):
5565 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5566 goto do_unallocated;
5567 } else if (s->pauth_active) {
5568 tcg_rd = cpu_reg(s, rd);
5569 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5570 }
5571 break;
5572 case MAP(1, 0x01, 0x0a):
5573 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5574 goto do_unallocated;
5575 } else if (s->pauth_active) {
5576 tcg_rd = cpu_reg(s, rd);
5577 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5578 }
5579 break;
5580 case MAP(1, 0x01, 0x0b):
5581 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5582 goto do_unallocated;
5583 } else if (s->pauth_active) {
5584 tcg_rd = cpu_reg(s, rd);
5585 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5586 }
5587 break;
5588 case MAP(1, 0x01, 0x0c):
5589 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5590 goto do_unallocated;
5591 } else if (s->pauth_active) {
5592 tcg_rd = cpu_reg(s, rd);
5593 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5594 }
5595 break;
5596 case MAP(1, 0x01, 0x0d):
5597 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5598 goto do_unallocated;
5599 } else if (s->pauth_active) {
5600 tcg_rd = cpu_reg(s, rd);
5601 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5602 }
5603 break;
5604 case MAP(1, 0x01, 0x0e):
5605 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5606 goto do_unallocated;
5607 } else if (s->pauth_active) {
5608 tcg_rd = cpu_reg(s, rd);
5609 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5610 }
5611 break;
5612 case MAP(1, 0x01, 0x0f):
5613 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5614 goto do_unallocated;
5615 } else if (s->pauth_active) {
5616 tcg_rd = cpu_reg(s, rd);
5617 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
5618 }
5619 break;
5620 case MAP(1, 0x01, 0x10):
5621 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5622 goto do_unallocated;
5623 } else if (s->pauth_active) {
5624 tcg_rd = cpu_reg(s, rd);
5625 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5626 }
5627 break;
5628 case MAP(1, 0x01, 0x11):
5629 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5630 goto do_unallocated;
5631 } else if (s->pauth_active) {
5632 tcg_rd = cpu_reg(s, rd);
5633 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5634 }
5635 break;
5636 default:
5637 do_unallocated:
5638 unallocated_encoding(s);
5639 break;
5640 }
5641
5642#undef MAP
5643}
5644
5645static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5646 unsigned int rm, unsigned int rn, unsigned int rd)
5647{
5648 TCGv_i64 tcg_n, tcg_m, tcg_rd;
5649 tcg_rd = cpu_reg(s, rd);
5650
5651 if (!sf && is_signed) {
5652 tcg_n = new_tmp_a64(s);
5653 tcg_m = new_tmp_a64(s);
5654 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5655 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5656 } else {
5657 tcg_n = read_cpu_reg(s, rn, sf);
5658 tcg_m = read_cpu_reg(s, rm, sf);
5659 }
5660
5661 if (is_signed) {
5662 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5663 } else {
5664 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5665 }
5666
5667 if (!sf) {
5668 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5669 }
5670}
5671
5672
5673static void handle_shift_reg(DisasContext *s,
5674 enum a64_shift_type shift_type, unsigned int sf,
5675 unsigned int rm, unsigned int rn, unsigned int rd)
5676{
5677 TCGv_i64 tcg_shift = tcg_temp_new_i64();
5678 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5679 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5680
5681 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5682 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5683 tcg_temp_free_i64(tcg_shift);
5684}
5685
5686
5687static void handle_crc32(DisasContext *s,
5688 unsigned int sf, unsigned int sz, bool crc32c,
5689 unsigned int rm, unsigned int rn, unsigned int rd)
5690{
5691 TCGv_i64 tcg_acc, tcg_val;
5692 TCGv_i32 tcg_bytes;
5693
5694 if (!dc_isar_feature(aa64_crc32, s)
5695 || (sf == 1 && sz != 3)
5696 || (sf == 0 && sz == 3)) {
5697 unallocated_encoding(s);
5698 return;
5699 }
5700
5701 if (sz == 3) {
5702 tcg_val = cpu_reg(s, rm);
5703 } else {
5704 uint64_t mask;
5705 switch (sz) {
5706 case 0:
5707 mask = 0xFF;
5708 break;
5709 case 1:
5710 mask = 0xFFFF;
5711 break;
5712 case 2:
5713 mask = 0xFFFFFFFF;
5714 break;
5715 default:
5716 g_assert_not_reached();
5717 }
5718 tcg_val = new_tmp_a64(s);
5719 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5720 }
5721
5722 tcg_acc = cpu_reg(s, rn);
5723 tcg_bytes = tcg_const_i32(1 << sz);
5724
5725 if (crc32c) {
5726 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5727 } else {
5728 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5729 }
5730
5731 tcg_temp_free_i32(tcg_bytes);
5732}
5733
5734
5735
5736
5737
5738
5739
5740static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5741{
5742 unsigned int sf, rm, opcode, rn, rd, setflag;
5743 sf = extract32(insn, 31, 1);
5744 setflag = extract32(insn, 29, 1);
5745 rm = extract32(insn, 16, 5);
5746 opcode = extract32(insn, 10, 6);
5747 rn = extract32(insn, 5, 5);
5748 rd = extract32(insn, 0, 5);
5749
5750 if (setflag && opcode != 0) {
5751 unallocated_encoding(s);
5752 return;
5753 }
5754
5755 switch (opcode) {
5756 case 0:
5757 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5758 goto do_unallocated;
5759 } else {
5760 TCGv_i64 tcg_n, tcg_m, tcg_d;
5761
5762 tcg_n = read_cpu_reg_sp(s, rn, true);
5763 tcg_m = read_cpu_reg_sp(s, rm, true);
5764 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5765 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5766 tcg_d = cpu_reg(s, rd);
5767
5768 if (setflag) {
5769 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5770 } else {
5771 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5772 }
5773 }
5774 break;
5775 case 2:
5776 handle_div(s, false, sf, rm, rn, rd);
5777 break;
5778 case 3:
5779 handle_div(s, true, sf, rm, rn, rd);
5780 break;
5781 case 4:
5782 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5783 goto do_unallocated;
5784 }
5785 if (s->ata) {
5786 gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5787 cpu_reg_sp(s, rn), cpu_reg(s, rm));
5788 } else {
5789 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5790 cpu_reg_sp(s, rn));
5791 }
5792 break;
5793 case 5:
5794 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5795 goto do_unallocated;
5796 } else {
5797 TCGv_i64 t1 = tcg_const_i64(1);
5798 TCGv_i64 t2 = tcg_temp_new_i64();
5799
5800 tcg_gen_extract_i64(t2, cpu_reg_sp(s, rn), 56, 4);
5801 tcg_gen_shl_i64(t1, t1, t2);
5802 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t1);
5803
5804 tcg_temp_free_i64(t1);
5805 tcg_temp_free_i64(t2);
5806 }
5807 break;
5808 case 8:
5809 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5810 break;
5811 case 9:
5812 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5813 break;
5814 case 10:
5815 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5816 break;
5817 case 11:
5818 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5819 break;
5820 case 12:
5821 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5822 goto do_unallocated;
5823 }
5824 gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5825 cpu_reg(s, rn), cpu_reg_sp(s, rm));
5826 break;
5827 case 16:
5828 case 17:
5829 case 18:
5830 case 19:
5831 case 20:
5832 case 21:
5833 case 22:
5834 case 23:
5835 {
5836 int sz = extract32(opcode, 0, 2);
5837 bool crc32c = extract32(opcode, 2, 1);
5838 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5839 break;
5840 }
5841 default:
5842 do_unallocated:
5843 unallocated_encoding(s);
5844 break;
5845 }
5846}
5847
5848
5849
5850
5851
5852
5853
5854
5855static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5856{
5857 int op0 = extract32(insn, 30, 1);
5858 int op1 = extract32(insn, 28, 1);
5859 int op2 = extract32(insn, 21, 4);
5860 int op3 = extract32(insn, 10, 6);
5861
5862 if (!op1) {
5863 if (op2 & 8) {
5864 if (op2 & 1) {
5865
5866 disas_add_sub_ext_reg(s, insn);
5867 } else {
5868
5869 disas_add_sub_reg(s, insn);
5870 }
5871 } else {
5872
5873 disas_logic_reg(s, insn);
5874 }
5875 return;
5876 }
5877
5878 switch (op2) {
5879 case 0x0:
5880 switch (op3) {
5881 case 0x00:
5882 disas_adc_sbc(s, insn);
5883 break;
5884
5885 case 0x01:
5886 case 0x21:
5887 disas_rotate_right_into_flags(s, insn);
5888 break;
5889
5890 case 0x02:
5891 case 0x12:
5892 case 0x22:
5893 case 0x32:
5894 disas_evaluate_into_flags(s, insn);
5895 break;
5896
5897 default:
5898 goto do_unallocated;
5899 }
5900 break;
5901
5902 case 0x2:
5903 disas_cc(s, insn);
5904 break;
5905
5906 case 0x4:
5907 disas_cond_select(s, insn);
5908 break;
5909
5910 case 0x6:
5911 if (op0) {
5912 disas_data_proc_1src(s, insn);
5913 } else {
5914 disas_data_proc_2src(s, insn);
5915 }
5916 break;
5917 case 0x8 ... 0xf:
5918 disas_data_proc_3src(s, insn);
5919 break;
5920
5921 default:
5922 do_unallocated:
5923 unallocated_encoding(s);
5924 break;
5925 }
5926}
5927
5928static void handle_fp_compare(DisasContext *s, int size,
5929 unsigned int rn, unsigned int rm,
5930 bool cmp_with_zero, bool signal_all_nans)
5931{
5932 TCGv_i64 tcg_flags = tcg_temp_new_i64();
5933 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
5934
5935 if (size == MO_64) {
5936 TCGv_i64 tcg_vn, tcg_vm;
5937
5938 tcg_vn = read_fp_dreg(s, rn);
5939 if (cmp_with_zero) {
5940 tcg_vm = tcg_const_i64(0);
5941 } else {
5942 tcg_vm = read_fp_dreg(s, rm);
5943 }
5944 if (signal_all_nans) {
5945 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5946 } else {
5947 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5948 }
5949 tcg_temp_free_i64(tcg_vn);
5950 tcg_temp_free_i64(tcg_vm);
5951 } else {
5952 TCGv_i32 tcg_vn = tcg_temp_new_i32();
5953 TCGv_i32 tcg_vm = tcg_temp_new_i32();
5954
5955 read_vec_element_i32(s, tcg_vn, rn, 0, size);
5956 if (cmp_with_zero) {
5957 tcg_gen_movi_i32(tcg_vm, 0);
5958 } else {
5959 read_vec_element_i32(s, tcg_vm, rm, 0, size);
5960 }
5961
5962 switch (size) {
5963 case MO_32:
5964 if (signal_all_nans) {
5965 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5966 } else {
5967 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5968 }
5969 break;
5970 case MO_16:
5971 if (signal_all_nans) {
5972 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5973 } else {
5974 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5975 }
5976 break;
5977 default:
5978 g_assert_not_reached();
5979 }
5980
5981 tcg_temp_free_i32(tcg_vn);
5982 tcg_temp_free_i32(tcg_vm);
5983 }
5984
5985 tcg_temp_free_ptr(fpst);
5986
5987 gen_set_nzcv(tcg_flags);
5988
5989 tcg_temp_free_i64(tcg_flags);
5990}
5991
5992
5993
5994
5995
5996
5997
5998static void disas_fp_compare(DisasContext *s, uint32_t insn)
5999{
6000 unsigned int mos, type, rm, op, rn, opc, op2r;
6001 int size;
6002
6003 mos = extract32(insn, 29, 3);
6004 type = extract32(insn, 22, 2);
6005 rm = extract32(insn, 16, 5);
6006 op = extract32(insn, 14, 2);
6007 rn = extract32(insn, 5, 5);
6008 opc = extract32(insn, 3, 2);
6009 op2r = extract32(insn, 0, 3);
6010
6011 if (mos || op || op2r) {
6012 unallocated_encoding(s);
6013 return;
6014 }
6015
6016 switch (type) {
6017 case 0:
6018 size = MO_32;
6019 break;
6020 case 1:
6021 size = MO_64;
6022 break;
6023 case 3:
6024 size = MO_16;
6025 if (dc_isar_feature(aa64_fp16, s)) {
6026 break;
6027 }
6028
6029 default:
6030 unallocated_encoding(s);
6031 return;
6032 }
6033
6034 if (!fp_access_check(s)) {
6035 return;
6036 }
6037
6038 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
6039}
6040
6041
6042
6043
6044
6045
6046
6047static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
6048{
6049 unsigned int mos, type, rm, cond, rn, op, nzcv;
6050 TCGv_i64 tcg_flags;
6051 TCGLabel *label_continue = NULL;
6052 int size;
6053
6054 mos = extract32(insn, 29, 3);
6055 type = extract32(insn, 22, 2);
6056 rm = extract32(insn, 16, 5);
6057 cond = extract32(insn, 12, 4);
6058 rn = extract32(insn, 5, 5);
6059 op = extract32(insn, 4, 1);
6060 nzcv = extract32(insn, 0, 4);
6061
6062 if (mos) {
6063 unallocated_encoding(s);
6064 return;
6065 }
6066
6067 switch (type) {
6068 case 0:
6069 size = MO_32;
6070 break;
6071 case 1:
6072 size = MO_64;
6073 break;
6074 case 3:
6075 size = MO_16;
6076 if (dc_isar_feature(aa64_fp16, s)) {
6077 break;
6078 }
6079
6080 default:
6081 unallocated_encoding(s);
6082 return;
6083 }
6084
6085 if (!fp_access_check(s)) {
6086 return;
6087 }
6088
6089 if (cond < 0x0e) {
6090 TCGLabel *label_match = gen_new_label();
6091 label_continue = gen_new_label();
6092 arm_gen_test_cc(cond, label_match);
6093
6094 tcg_flags = tcg_const_i64(nzcv << 28);
6095 gen_set_nzcv(tcg_flags);
6096 tcg_temp_free_i64(tcg_flags);
6097 tcg_gen_br(label_continue);
6098 gen_set_label(label_match);
6099 }
6100
6101 handle_fp_compare(s, size, rn, rm, false, op);
6102
6103 if (cond < 0x0e) {
6104 gen_set_label(label_continue);
6105 }
6106}
6107
6108
6109
6110
6111
6112
6113
6114static void disas_fp_csel(DisasContext *s, uint32_t insn)
6115{
6116 unsigned int mos, type, rm, cond, rn, rd;
6117 TCGv_i64 t_true, t_false, t_zero;
6118 DisasCompare64 c;
6119 MemOp sz;
6120
6121 mos = extract32(insn, 29, 3);
6122 type = extract32(insn, 22, 2);
6123 rm = extract32(insn, 16, 5);
6124 cond = extract32(insn, 12, 4);
6125 rn = extract32(insn, 5, 5);
6126 rd = extract32(insn, 0, 5);
6127
6128 if (mos) {
6129 unallocated_encoding(s);
6130 return;
6131 }
6132
6133 switch (type) {
6134 case 0:
6135 sz = MO_32;
6136 break;
6137 case 1:
6138 sz = MO_64;
6139 break;
6140 case 3:
6141 sz = MO_16;
6142 if (dc_isar_feature(aa64_fp16, s)) {
6143 break;
6144 }
6145
6146 default:
6147 unallocated_encoding(s);
6148 return;
6149 }
6150
6151 if (!fp_access_check(s)) {
6152 return;
6153 }
6154
6155
6156 t_true = tcg_temp_new_i64();
6157 t_false = tcg_temp_new_i64();
6158 read_vec_element(s, t_true, rn, 0, sz);
6159 read_vec_element(s, t_false, rm, 0, sz);
6160
6161 a64_test_cc(&c, cond);
6162 t_zero = tcg_const_i64(0);
6163 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
6164 tcg_temp_free_i64(t_zero);
6165 tcg_temp_free_i64(t_false);
6166 a64_free_cc(&c);
6167
6168
6169
6170 write_fp_dreg(s, rd, t_true);
6171 tcg_temp_free_i64(t_true);
6172}
6173
6174
6175static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
6176{
6177 TCGv_ptr fpst = NULL;
6178 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
6179 TCGv_i32 tcg_res = tcg_temp_new_i32();
6180
6181 switch (opcode) {
6182 case 0x0:
6183 tcg_gen_mov_i32(tcg_res, tcg_op);
6184 break;
6185 case 0x1:
6186 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
6187 break;
6188 case 0x2:
6189 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
6190 break;
6191 case 0x3:
6192 fpst = fpstatus_ptr(FPST_FPCR_F16);
6193 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
6194 break;
6195 case 0x8:
6196 case 0x9:
6197 case 0xa:
6198 case 0xb:
6199 case 0xc:
6200 {
6201 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
6202 fpst = fpstatus_ptr(FPST_FPCR_F16);
6203
6204 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6205 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6206
6207 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6208 tcg_temp_free_i32(tcg_rmode);
6209 break;
6210 }
6211 case 0xe:
6212 fpst = fpstatus_ptr(FPST_FPCR_F16);
6213 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
6214 break;
6215 case 0xf:
6216 fpst = fpstatus_ptr(FPST_FPCR_F16);
6217 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
6218 break;
6219 default:
6220 abort();
6221 }
6222
6223 write_fp_sreg(s, rd, tcg_res);
6224
6225 if (fpst) {
6226 tcg_temp_free_ptr(fpst);
6227 }
6228 tcg_temp_free_i32(tcg_op);
6229 tcg_temp_free_i32(tcg_res);
6230}
6231
6232
6233static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
6234{
6235 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
6236 TCGv_i32 tcg_op, tcg_res;
6237 TCGv_ptr fpst;
6238 int rmode = -1;
6239
6240 tcg_op = read_fp_sreg(s, rn);
6241 tcg_res = tcg_temp_new_i32();
6242
6243 switch (opcode) {
6244 case 0x0:
6245 tcg_gen_mov_i32(tcg_res, tcg_op);
6246 goto done;
6247 case 0x1:
6248 gen_helper_vfp_abss(tcg_res, tcg_op);
6249 goto done;
6250 case 0x2:
6251 gen_helper_vfp_negs(tcg_res, tcg_op);
6252 goto done;
6253 case 0x3:
6254 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6255 goto done;
6256 case 0x6:
6257 gen_fpst = gen_helper_bfcvt;
6258 break;
6259 case 0x8:
6260 case 0x9:
6261 case 0xa:
6262 case 0xb:
6263 case 0xc:
6264 rmode = arm_rmode_to_sf(opcode & 7);
6265 gen_fpst = gen_helper_rints;
6266 break;
6267 case 0xe:
6268 gen_fpst = gen_helper_rints_exact;
6269 break;
6270 case 0xf:
6271 gen_fpst = gen_helper_rints;
6272 break;
6273 case 0x10:
6274 rmode = float_round_to_zero;
6275 gen_fpst = gen_helper_frint32_s;
6276 break;
6277 case 0x11:
6278 gen_fpst = gen_helper_frint32_s;
6279 break;
6280 case 0x12:
6281 rmode = float_round_to_zero;
6282 gen_fpst = gen_helper_frint64_s;
6283 break;
6284 case 0x13:
6285 gen_fpst = gen_helper_frint64_s;
6286 break;
6287 default:
6288 g_assert_not_reached();
6289 }
6290
6291 fpst = fpstatus_ptr(FPST_FPCR);
6292 if (rmode >= 0) {
6293 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6294 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6295 gen_fpst(tcg_res, tcg_op, fpst);
6296 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6297 tcg_temp_free_i32(tcg_rmode);
6298 } else {
6299 gen_fpst(tcg_res, tcg_op, fpst);
6300 }
6301 tcg_temp_free_ptr(fpst);
6302
6303 done:
6304 write_fp_sreg(s, rd, tcg_res);
6305 tcg_temp_free_i32(tcg_op);
6306 tcg_temp_free_i32(tcg_res);
6307}
6308
6309
6310static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6311{
6312 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6313 TCGv_i64 tcg_op, tcg_res;
6314 TCGv_ptr fpst;
6315 int rmode = -1;
6316
6317 switch (opcode) {
6318 case 0x0:
6319 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6320 return;
6321 }
6322
6323 tcg_op = read_fp_dreg(s, rn);
6324 tcg_res = tcg_temp_new_i64();
6325
6326 switch (opcode) {
6327 case 0x1:
6328 gen_helper_vfp_absd(tcg_res, tcg_op);
6329 goto done;
6330 case 0x2:
6331 gen_helper_vfp_negd(tcg_res, tcg_op);
6332 goto done;
6333 case 0x3:
6334 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6335 goto done;
6336 case 0x8:
6337 case 0x9:
6338 case 0xa:
6339 case 0xb:
6340 case 0xc:
6341 rmode = arm_rmode_to_sf(opcode & 7);
6342 gen_fpst = gen_helper_rintd;
6343 break;
6344 case 0xe:
6345 gen_fpst = gen_helper_rintd_exact;
6346 break;
6347 case 0xf:
6348 gen_fpst = gen_helper_rintd;
6349 break;
6350 case 0x10:
6351 rmode = float_round_to_zero;
6352 gen_fpst = gen_helper_frint32_d;
6353 break;
6354 case 0x11:
6355 gen_fpst = gen_helper_frint32_d;
6356 break;
6357 case 0x12:
6358 rmode = float_round_to_zero;
6359 gen_fpst = gen_helper_frint64_d;
6360 break;
6361 case 0x13:
6362 gen_fpst = gen_helper_frint64_d;
6363 break;
6364 default:
6365 g_assert_not_reached();
6366 }
6367
6368 fpst = fpstatus_ptr(FPST_FPCR);
6369 if (rmode >= 0) {
6370 TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
6371 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6372 gen_fpst(tcg_res, tcg_op, fpst);
6373 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
6374 tcg_temp_free_i32(tcg_rmode);
6375 } else {
6376 gen_fpst(tcg_res, tcg_op, fpst);
6377 }
6378 tcg_temp_free_ptr(fpst);
6379
6380 done:
6381 write_fp_dreg(s, rd, tcg_res);
6382 tcg_temp_free_i64(tcg_op);
6383 tcg_temp_free_i64(tcg_res);
6384}
6385
6386static void handle_fp_fcvt(DisasContext *s, int opcode,
6387 int rd, int rn, int dtype, int ntype)
6388{
6389 switch (ntype) {
6390 case 0x0:
6391 {
6392 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6393 if (dtype == 1) {
6394
6395 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6396 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6397 write_fp_dreg(s, rd, tcg_rd);
6398 tcg_temp_free_i64(tcg_rd);
6399 } else {
6400
6401 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6402 TCGv_i32 ahp = get_ahp_flag();
6403 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6404
6405 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6406
6407 write_fp_sreg(s, rd, tcg_rd);
6408 tcg_temp_free_i32(tcg_rd);
6409 tcg_temp_free_i32(ahp);
6410 tcg_temp_free_ptr(fpst);
6411 }
6412 tcg_temp_free_i32(tcg_rn);
6413 break;
6414 }
6415 case 0x1:
6416 {
6417 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6418 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6419 if (dtype == 0) {
6420
6421 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6422 } else {
6423 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6424 TCGv_i32 ahp = get_ahp_flag();
6425
6426 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6427
6428 tcg_temp_free_ptr(fpst);
6429 tcg_temp_free_i32(ahp);
6430 }
6431 write_fp_sreg(s, rd, tcg_rd);
6432 tcg_temp_free_i32(tcg_rd);
6433 tcg_temp_free_i64(tcg_rn);
6434 break;
6435 }
6436 case 0x3:
6437 {
6438 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6439 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6440 TCGv_i32 tcg_ahp = get_ahp_flag();
6441 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6442 if (dtype == 0) {
6443
6444 TCGv_i32 tcg_rd = tcg_temp_new_i32();
6445 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6446 write_fp_sreg(s, rd, tcg_rd);
6447 tcg_temp_free_i32(tcg_rd);
6448 } else {
6449
6450 TCGv_i64 tcg_rd = tcg_temp_new_i64();
6451 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6452 write_fp_dreg(s, rd, tcg_rd);
6453 tcg_temp_free_i64(tcg_rd);
6454 }
6455 tcg_temp_free_i32(tcg_rn);
6456 tcg_temp_free_ptr(tcg_fpst);
6457 tcg_temp_free_i32(tcg_ahp);
6458 break;
6459 }
6460 default:
6461 abort();
6462 }
6463}
6464
6465
6466
6467
6468
6469
6470
6471static void disas_fp_1src(DisasContext *s, uint32_t insn)
6472{
6473 int mos = extract32(insn, 29, 3);
6474 int type = extract32(insn, 22, 2);
6475 int opcode = extract32(insn, 15, 6);
6476 int rn = extract32(insn, 5, 5);
6477 int rd = extract32(insn, 0, 5);
6478
6479 if (mos) {
6480 goto do_unallocated;
6481 }
6482
6483 switch (opcode) {
6484 case 0x4: case 0x5: case 0x7:
6485 {
6486
6487 int dtype = extract32(opcode, 0, 2);
6488 if (type == 2 || dtype == type) {
6489 goto do_unallocated;
6490 }
6491 if (!fp_access_check(s)) {
6492 return;
6493 }
6494
6495 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6496 break;
6497 }
6498
6499 case 0x10 ... 0x13:
6500 if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6501 goto do_unallocated;
6502 }
6503
6504 case 0x0 ... 0x3:
6505 case 0x8 ... 0xc:
6506 case 0xe ... 0xf:
6507
6508 switch (type) {
6509 case 0:
6510 if (!fp_access_check(s)) {
6511 return;
6512 }
6513 handle_fp_1src_single(s, opcode, rd, rn);
6514 break;
6515 case 1:
6516 if (!fp_access_check(s)) {
6517 return;
6518 }
6519 handle_fp_1src_double(s, opcode, rd, rn);
6520 break;
6521 case 3:
6522 if (!dc_isar_feature(aa64_fp16, s)) {
6523 goto do_unallocated;
6524 }
6525
6526 if (!fp_access_check(s)) {
6527 return;
6528 }
6529 handle_fp_1src_half(s, opcode, rd, rn);
6530 break;
6531 default:
6532 goto do_unallocated;
6533 }
6534 break;
6535
6536 case 0x6:
6537 switch (type) {
6538 case 1:
6539 if (!dc_isar_feature(aa64_bf16, s)) {
6540 goto do_unallocated;
6541 }
6542 if (!fp_access_check(s)) {
6543 return;
6544 }
6545 handle_fp_1src_single(s, opcode, rd, rn);
6546 break;
6547 default:
6548 goto do_unallocated;
6549 }
6550 break;
6551
6552 default:
6553 do_unallocated:
6554 unallocated_encoding(s);
6555 break;
6556 }
6557}
6558
6559
6560static void handle_fp_2src_single(DisasContext *s, int opcode,
6561 int rd, int rn, int rm)
6562{
6563 TCGv_i32 tcg_op1;
6564 TCGv_i32 tcg_op2;
6565 TCGv_i32 tcg_res;
6566 TCGv_ptr fpst;
6567
6568 tcg_res = tcg_temp_new_i32();
6569 fpst = fpstatus_ptr(FPST_FPCR);
6570 tcg_op1 = read_fp_sreg(s, rn);
6571 tcg_op2 = read_fp_sreg(s, rm);
6572
6573 switch (opcode) {
6574 case 0x0:
6575 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6576 break;
6577 case 0x1:
6578 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6579 break;
6580 case 0x2:
6581 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6582 break;
6583 case 0x3:
6584 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6585 break;
6586 case 0x4:
6587 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6588 break;
6589 case 0x5:
6590 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6591 break;
6592 case 0x6:
6593 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6594 break;
6595 case 0x7:
6596 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6597 break;
6598 case 0x8:
6599 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6600 gen_helper_vfp_negs(tcg_res, tcg_res);
6601 break;
6602 }
6603
6604 write_fp_sreg(s, rd, tcg_res);
6605
6606 tcg_temp_free_ptr(fpst);
6607 tcg_temp_free_i32(tcg_op1);
6608 tcg_temp_free_i32(tcg_op2);
6609 tcg_temp_free_i32(tcg_res);
6610}
6611
6612
6613static void handle_fp_2src_double(DisasContext *s, int opcode,
6614 int rd, int rn, int rm)
6615{
6616 TCGv_i64 tcg_op1;
6617 TCGv_i64 tcg_op2;
6618 TCGv_i64 tcg_res;
6619 TCGv_ptr fpst;
6620
6621 tcg_res = tcg_temp_new_i64();
6622 fpst = fpstatus_ptr(FPST_FPCR);
6623 tcg_op1 = read_fp_dreg(s, rn);
6624 tcg_op2 = read_fp_dreg(s, rm);
6625
6626 switch (opcode) {
6627 case 0x0:
6628 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6629 break;
6630 case 0x1:
6631 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6632 break;
6633 case 0x2:
6634 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6635 break;
6636 case 0x3:
6637 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6638 break;
6639 case 0x4:
6640 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6641 break;
6642 case 0x5:
6643 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6644 break;
6645 case 0x6:
6646 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6647 break;
6648 case 0x7:
6649 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6650 break;
6651 case 0x8:
6652 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6653 gen_helper_vfp_negd(tcg_res, tcg_res);
6654 break;
6655 }
6656
6657 write_fp_dreg(s, rd, tcg_res);
6658
6659 tcg_temp_free_ptr(fpst);
6660 tcg_temp_free_i64(tcg_op1);
6661 tcg_temp_free_i64(tcg_op2);
6662 tcg_temp_free_i64(tcg_res);
6663}
6664
6665
6666static void handle_fp_2src_half(DisasContext *s, int opcode,
6667 int rd, int rn, int rm)
6668{
6669 TCGv_i32 tcg_op1;
6670 TCGv_i32 tcg_op2;
6671 TCGv_i32 tcg_res;
6672 TCGv_ptr fpst;
6673
6674 tcg_res = tcg_temp_new_i32();
6675 fpst = fpstatus_ptr(FPST_FPCR_F16);
6676 tcg_op1 = read_fp_hreg(s, rn);
6677 tcg_op2 = read_fp_hreg(s, rm);
6678
6679 switch (opcode) {
6680 case 0x0:
6681 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6682 break;
6683 case 0x1:
6684 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6685 break;
6686 case 0x2:
6687 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6688 break;
6689 case 0x3:
6690 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6691 break;
6692 case 0x4:
6693 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6694 break;
6695 case 0x5:
6696 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6697 break;
6698 case 0x6:
6699 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6700 break;
6701 case 0x7:
6702 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6703 break;
6704 case 0x8:
6705 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6706 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6707 break;
6708 default:
6709 g_assert_not_reached();
6710 }
6711
6712 write_fp_sreg(s, rd, tcg_res);
6713
6714 tcg_temp_free_ptr(fpst);
6715 tcg_temp_free_i32(tcg_op1);
6716 tcg_temp_free_i32(tcg_op2);
6717 tcg_temp_free_i32(tcg_res);
6718}
6719
6720
6721
6722
6723
6724
6725
6726static void disas_fp_2src(DisasContext *s, uint32_t insn)
6727{
6728 int mos = extract32(insn, 29, 3);
6729 int type = extract32(insn, 22, 2);
6730 int rd = extract32(insn, 0, 5);
6731 int rn = extract32(insn, 5, 5);
6732 int rm = extract32(insn, 16, 5);
6733 int opcode = extract32(insn, 12, 4);
6734
6735 if (opcode > 8 || mos) {
6736 unallocated_encoding(s);
6737 return;
6738 }
6739
6740 switch (type) {
6741 case 0:
6742 if (!fp_access_check(s)) {
6743 return;
6744 }
6745 handle_fp_2src_single(s, opcode, rd, rn, rm);
6746 break;
6747 case 1:
6748 if (!fp_access_check(s)) {
6749 return;
6750 }
6751 handle_fp_2src_double(s, opcode, rd, rn, rm);
6752 break;
6753 case 3:
6754 if (!dc_isar_feature(aa64_fp16, s)) {
6755 unallocated_encoding(s);
6756 return;
6757 }
6758 if (!fp_access_check(s)) {
6759 return;
6760 }
6761 handle_fp_2src_half(s, opcode, rd, rn, rm);
6762 break;
6763 default:
6764 unallocated_encoding(s);
6765 }
6766}
6767
6768
6769static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6770 int rd, int rn, int rm, int ra)
6771{
6772 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6773 TCGv_i32 tcg_res = tcg_temp_new_i32();
6774 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6775
6776 tcg_op1 = read_fp_sreg(s, rn);
6777 tcg_op2 = read_fp_sreg(s, rm);
6778 tcg_op3 = read_fp_sreg(s, ra);
6779
6780
6781
6782
6783
6784
6785
6786
6787 if (o1 == true) {
6788 gen_helper_vfp_negs(tcg_op3, tcg_op3);
6789 }
6790
6791 if (o0 != o1) {
6792 gen_helper_vfp_negs(tcg_op1, tcg_op1);
6793 }
6794
6795 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6796
6797 write_fp_sreg(s, rd, tcg_res);
6798
6799 tcg_temp_free_ptr(fpst);
6800 tcg_temp_free_i32(tcg_op1);
6801 tcg_temp_free_i32(tcg_op2);
6802 tcg_temp_free_i32(tcg_op3);
6803 tcg_temp_free_i32(tcg_res);
6804}
6805
6806
6807static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6808 int rd, int rn, int rm, int ra)
6809{
6810 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6811 TCGv_i64 tcg_res = tcg_temp_new_i64();
6812 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6813
6814 tcg_op1 = read_fp_dreg(s, rn);
6815 tcg_op2 = read_fp_dreg(s, rm);
6816 tcg_op3 = read_fp_dreg(s, ra);
6817
6818
6819
6820
6821
6822
6823
6824
6825 if (o1 == true) {
6826 gen_helper_vfp_negd(tcg_op3, tcg_op3);
6827 }
6828
6829 if (o0 != o1) {
6830 gen_helper_vfp_negd(tcg_op1, tcg_op1);
6831 }
6832
6833 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6834
6835 write_fp_dreg(s, rd, tcg_res);
6836
6837 tcg_temp_free_ptr(fpst);
6838 tcg_temp_free_i64(tcg_op1);
6839 tcg_temp_free_i64(tcg_op2);
6840 tcg_temp_free_i64(tcg_op3);
6841 tcg_temp_free_i64(tcg_res);
6842}
6843
6844
6845static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6846 int rd, int rn, int rm, int ra)
6847{
6848 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6849 TCGv_i32 tcg_res = tcg_temp_new_i32();
6850 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6851
6852 tcg_op1 = read_fp_hreg(s, rn);
6853 tcg_op2 = read_fp_hreg(s, rm);
6854 tcg_op3 = read_fp_hreg(s, ra);
6855
6856
6857
6858
6859
6860
6861
6862
6863 if (o1 == true) {
6864 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6865 }
6866
6867 if (o0 != o1) {
6868 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6869 }
6870
6871 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6872
6873 write_fp_sreg(s, rd, tcg_res);
6874
6875 tcg_temp_free_ptr(fpst);
6876 tcg_temp_free_i32(tcg_op1);
6877 tcg_temp_free_i32(tcg_op2);
6878 tcg_temp_free_i32(tcg_op3);
6879 tcg_temp_free_i32(tcg_res);
6880}
6881
6882
6883
6884
6885
6886
6887
6888static void disas_fp_3src(DisasContext *s, uint32_t insn)
6889{
6890 int mos = extract32(insn, 29, 3);
6891 int type = extract32(insn, 22, 2);
6892 int rd = extract32(insn, 0, 5);
6893 int rn = extract32(insn, 5, 5);
6894 int ra = extract32(insn, 10, 5);
6895 int rm = extract32(insn, 16, 5);
6896 bool o0 = extract32(insn, 15, 1);
6897 bool o1 = extract32(insn, 21, 1);
6898
6899 if (mos) {
6900 unallocated_encoding(s);
6901 return;
6902 }
6903
6904 switch (type) {
6905 case 0:
6906 if (!fp_access_check(s)) {
6907 return;
6908 }
6909 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6910 break;
6911 case 1:
6912 if (!fp_access_check(s)) {
6913 return;
6914 }
6915 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6916 break;
6917 case 3:
6918 if (!dc_isar_feature(aa64_fp16, s)) {
6919 unallocated_encoding(s);
6920 return;
6921 }
6922 if (!fp_access_check(s)) {
6923 return;
6924 }
6925 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6926 break;
6927 default:
6928 unallocated_encoding(s);
6929 }
6930}
6931
6932
6933
6934
6935
6936
6937
6938static void disas_fp_imm(DisasContext *s, uint32_t insn)
6939{
6940 int rd = extract32(insn, 0, 5);
6941 int imm5 = extract32(insn, 5, 5);
6942 int imm8 = extract32(insn, 13, 8);
6943 int type = extract32(insn, 22, 2);
6944 int mos = extract32(insn, 29, 3);
6945 uint64_t imm;
6946 TCGv_i64 tcg_res;
6947 MemOp sz;
6948
6949 if (mos || imm5) {
6950 unallocated_encoding(s);
6951 return;
6952 }
6953
6954 switch (type) {
6955 case 0:
6956 sz = MO_32;
6957 break;
6958 case 1:
6959 sz = MO_64;
6960 break;
6961 case 3:
6962 sz = MO_16;
6963 if (dc_isar_feature(aa64_fp16, s)) {
6964 break;
6965 }
6966
6967 default:
6968 unallocated_encoding(s);
6969 return;
6970 }
6971
6972 if (!fp_access_check(s)) {
6973 return;
6974 }
6975
6976 imm = vfp_expand_imm(sz, imm8);
6977
6978 tcg_res = tcg_const_i64(imm);
6979 write_fp_dreg(s, rd, tcg_res);
6980 tcg_temp_free_i64(tcg_res);
6981}
6982
6983
6984
6985
6986
6987
6988static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6989 bool itof, int rmode, int scale, int sf, int type)
6990{
6991 bool is_signed = !(opcode & 1);
6992 TCGv_ptr tcg_fpstatus;
6993 TCGv_i32 tcg_shift, tcg_single;
6994 TCGv_i64 tcg_double;
6995
6996 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
6997
6998 tcg_shift = tcg_const_i32(64 - scale);
6999
7000 if (itof) {
7001 TCGv_i64 tcg_int = cpu_reg(s, rn);
7002 if (!sf) {
7003 TCGv_i64 tcg_extend = new_tmp_a64(s);
7004
7005 if (is_signed) {
7006 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
7007 } else {
7008 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
7009 }
7010
7011 tcg_int = tcg_extend;
7012 }
7013
7014 switch (type) {
7015 case 1:
7016 tcg_double = tcg_temp_new_i64();
7017 if (is_signed) {
7018 gen_helper_vfp_sqtod(tcg_double, tcg_int,
7019 tcg_shift, tcg_fpstatus);
7020 } else {
7021 gen_helper_vfp_uqtod(tcg_double, tcg_int,
7022 tcg_shift, tcg_fpstatus);
7023 }
7024 write_fp_dreg(s, rd, tcg_double);
7025 tcg_temp_free_i64(tcg_double);
7026 break;
7027
7028 case 0:
7029 tcg_single = tcg_temp_new_i32();
7030 if (is_signed) {
7031 gen_helper_vfp_sqtos(tcg_single, tcg_int,
7032 tcg_shift, tcg_fpstatus);
7033 } else {
7034 gen_helper_vfp_uqtos(tcg_single, tcg_int,
7035 tcg_shift, tcg_fpstatus);
7036 }
7037 write_fp_sreg(s, rd, tcg_single);
7038 tcg_temp_free_i32(tcg_single);
7039 break;
7040
7041 case 3:
7042 tcg_single = tcg_temp_new_i32();
7043 if (is_signed) {
7044 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
7045 tcg_shift, tcg_fpstatus);
7046 } else {
7047 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
7048 tcg_shift, tcg_fpstatus);
7049 }
7050 write_fp_sreg(s, rd, tcg_single);
7051 tcg_temp_free_i32(tcg_single);
7052 break;
7053
7054 default:
7055 g_assert_not_reached();
7056 }
7057 } else {
7058 TCGv_i64 tcg_int = cpu_reg(s, rd);
7059 TCGv_i32 tcg_rmode;
7060
7061 if (extract32(opcode, 2, 1)) {
7062
7063
7064
7065 rmode = FPROUNDING_TIEAWAY;
7066 }
7067
7068 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7069
7070 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7071
7072 switch (type) {
7073 case 1:
7074 tcg_double = read_fp_dreg(s, rn);
7075 if (is_signed) {
7076 if (!sf) {
7077 gen_helper_vfp_tosld(tcg_int, tcg_double,
7078 tcg_shift, tcg_fpstatus);
7079 } else {
7080 gen_helper_vfp_tosqd(tcg_int, tcg_double,
7081 tcg_shift, tcg_fpstatus);
7082 }
7083 } else {
7084 if (!sf) {
7085 gen_helper_vfp_tould(tcg_int, tcg_double,
7086 tcg_shift, tcg_fpstatus);
7087 } else {
7088 gen_helper_vfp_touqd(tcg_int, tcg_double,
7089 tcg_shift, tcg_fpstatus);
7090 }
7091 }
7092 if (!sf) {
7093 tcg_gen_ext32u_i64(tcg_int, tcg_int);
7094 }
7095 tcg_temp_free_i64(tcg_double);
7096 break;
7097
7098 case 0:
7099 tcg_single = read_fp_sreg(s, rn);
7100 if (sf) {
7101 if (is_signed) {
7102 gen_helper_vfp_tosqs(tcg_int, tcg_single,
7103 tcg_shift, tcg_fpstatus);
7104 } else {
7105 gen_helper_vfp_touqs(tcg_int, tcg_single,
7106 tcg_shift, tcg_fpstatus);
7107 }
7108 } else {
7109 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7110 if (is_signed) {
7111 gen_helper_vfp_tosls(tcg_dest, tcg_single,
7112 tcg_shift, tcg_fpstatus);
7113 } else {
7114 gen_helper_vfp_touls(tcg_dest, tcg_single,
7115 tcg_shift, tcg_fpstatus);
7116 }
7117 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7118 tcg_temp_free_i32(tcg_dest);
7119 }
7120 tcg_temp_free_i32(tcg_single);
7121 break;
7122
7123 case 3:
7124 tcg_single = read_fp_sreg(s, rn);
7125 if (sf) {
7126 if (is_signed) {
7127 gen_helper_vfp_tosqh(tcg_int, tcg_single,
7128 tcg_shift, tcg_fpstatus);
7129 } else {
7130 gen_helper_vfp_touqh(tcg_int, tcg_single,
7131 tcg_shift, tcg_fpstatus);
7132 }
7133 } else {
7134 TCGv_i32 tcg_dest = tcg_temp_new_i32();
7135 if (is_signed) {
7136 gen_helper_vfp_toslh(tcg_dest, tcg_single,
7137 tcg_shift, tcg_fpstatus);
7138 } else {
7139 gen_helper_vfp_toulh(tcg_dest, tcg_single,
7140 tcg_shift, tcg_fpstatus);
7141 }
7142 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
7143 tcg_temp_free_i32(tcg_dest);
7144 }
7145 tcg_temp_free_i32(tcg_single);
7146 break;
7147
7148 default:
7149 g_assert_not_reached();
7150 }
7151
7152 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7153 tcg_temp_free_i32(tcg_rmode);
7154 }
7155
7156 tcg_temp_free_ptr(tcg_fpstatus);
7157 tcg_temp_free_i32(tcg_shift);
7158}
7159
7160
7161
7162
7163
7164
7165
7166static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
7167{
7168 int rd = extract32(insn, 0, 5);
7169 int rn = extract32(insn, 5, 5);
7170 int scale = extract32(insn, 10, 6);
7171 int opcode = extract32(insn, 16, 3);
7172 int rmode = extract32(insn, 19, 2);
7173 int type = extract32(insn, 22, 2);
7174 bool sbit = extract32(insn, 29, 1);
7175 bool sf = extract32(insn, 31, 1);
7176 bool itof;
7177
7178 if (sbit || (!sf && scale < 32)) {
7179 unallocated_encoding(s);
7180 return;
7181 }
7182
7183 switch (type) {
7184 case 0:
7185 case 1:
7186 break;
7187 case 3:
7188 if (dc_isar_feature(aa64_fp16, s)) {
7189 break;
7190 }
7191
7192 default:
7193 unallocated_encoding(s);
7194 return;
7195 }
7196
7197 switch ((rmode << 3) | opcode) {
7198 case 0x2:
7199 case 0x3:
7200 itof = true;
7201 break;
7202 case 0x18:
7203 case 0x19:
7204 itof = false;
7205 break;
7206 default:
7207 unallocated_encoding(s);
7208 return;
7209 }
7210
7211 if (!fp_access_check(s)) {
7212 return;
7213 }
7214
7215 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
7216}
7217
7218static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
7219{
7220
7221
7222
7223
7224 if (itof) {
7225 TCGv_i64 tcg_rn = cpu_reg(s, rn);
7226 TCGv_i64 tmp;
7227
7228 switch (type) {
7229 case 0:
7230
7231 tmp = tcg_temp_new_i64();
7232 tcg_gen_ext32u_i64(tmp, tcg_rn);
7233 write_fp_dreg(s, rd, tmp);
7234 tcg_temp_free_i64(tmp);
7235 break;
7236 case 1:
7237
7238 write_fp_dreg(s, rd, tcg_rn);
7239 break;
7240 case 2:
7241
7242 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
7243 clear_vec_high(s, true, rd);
7244 break;
7245 case 3:
7246
7247 tmp = tcg_temp_new_i64();
7248 tcg_gen_ext16u_i64(tmp, tcg_rn);
7249 write_fp_dreg(s, rd, tmp);
7250 tcg_temp_free_i64(tmp);
7251 break;
7252 default:
7253 g_assert_not_reached();
7254 }
7255 } else {
7256 TCGv_i64 tcg_rd = cpu_reg(s, rd);
7257
7258 switch (type) {
7259 case 0:
7260
7261 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
7262 break;
7263 case 1:
7264
7265 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
7266 break;
7267 case 2:
7268
7269 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
7270 break;
7271 case 3:
7272
7273 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
7274 break;
7275 default:
7276 g_assert_not_reached();
7277 }
7278 }
7279}
7280
7281static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
7282{
7283 TCGv_i64 t = read_fp_dreg(s, rn);
7284 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
7285
7286 gen_helper_fjcvtzs(t, t, fpstatus);
7287
7288 tcg_temp_free_ptr(fpstatus);
7289
7290 tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
7291 tcg_gen_extrh_i64_i32(cpu_ZF, t);
7292 tcg_gen_movi_i32(cpu_CF, 0);
7293 tcg_gen_movi_i32(cpu_NF, 0);
7294 tcg_gen_movi_i32(cpu_VF, 0);
7295
7296 tcg_temp_free_i64(t);
7297}
7298
7299
7300
7301
7302
7303
7304
7305static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
7306{
7307 int rd = extract32(insn, 0, 5);
7308 int rn = extract32(insn, 5, 5);
7309 int opcode = extract32(insn, 16, 3);
7310 int rmode = extract32(insn, 19, 2);
7311 int type = extract32(insn, 22, 2);
7312 bool sbit = extract32(insn, 29, 1);
7313 bool sf = extract32(insn, 31, 1);
7314 bool itof = false;
7315
7316 if (sbit) {
7317 goto do_unallocated;
7318 }
7319
7320 switch (opcode) {
7321 case 2:
7322 case 3:
7323 itof = true;
7324
7325 case 4:
7326 case 5:
7327 if (rmode != 0) {
7328 goto do_unallocated;
7329 }
7330
7331 case 0:
7332 case 1:
7333 switch (type) {
7334 case 0:
7335 case 1:
7336 break;
7337 case 3:
7338 if (!dc_isar_feature(aa64_fp16, s)) {
7339 goto do_unallocated;
7340 }
7341 break;
7342 default:
7343 goto do_unallocated;
7344 }
7345 if (!fp_access_check(s)) {
7346 return;
7347 }
7348 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7349 break;
7350
7351 default:
7352 switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7353 case 0b01100110:
7354 case 0b01100111:
7355 case 0b11100110:
7356 case 0b11100111:
7357 if (!dc_isar_feature(aa64_fp16, s)) {
7358 goto do_unallocated;
7359 }
7360
7361 case 0b00000110:
7362 case 0b00000111:
7363 case 0b10100110:
7364 case 0b10100111:
7365 case 0b11001110:
7366 case 0b11001111:
7367 if (!fp_access_check(s)) {
7368 return;
7369 }
7370 itof = opcode & 1;
7371 handle_fmov(s, rd, rn, type, itof);
7372 break;
7373
7374 case 0b00111110:
7375 if (!dc_isar_feature(aa64_jscvt, s)) {
7376 goto do_unallocated;
7377 } else if (fp_access_check(s)) {
7378 handle_fjcvtzs(s, rd, rn);
7379 }
7380 break;
7381
7382 default:
7383 do_unallocated:
7384 unallocated_encoding(s);
7385 return;
7386 }
7387 break;
7388 }
7389}
7390
7391
7392
7393
7394
7395
7396
7397static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7398{
7399 if (extract32(insn, 24, 1)) {
7400
7401 disas_fp_3src(s, insn);
7402 } else if (extract32(insn, 21, 1) == 0) {
7403
7404 disas_fp_fixed_conv(s, insn);
7405 } else {
7406 switch (extract32(insn, 10, 2)) {
7407 case 1:
7408
7409 disas_fp_ccomp(s, insn);
7410 break;
7411 case 2:
7412
7413 disas_fp_2src(s, insn);
7414 break;
7415 case 3:
7416
7417 disas_fp_csel(s, insn);
7418 break;
7419 case 0:
7420 switch (ctz32(extract32(insn, 12, 4))) {
7421 case 0:
7422
7423 disas_fp_imm(s, insn);
7424 break;
7425 case 1:
7426
7427 disas_fp_compare(s, insn);
7428 break;
7429 case 2:
7430
7431 disas_fp_1src(s, insn);
7432 break;
7433 case 3:
7434 unallocated_encoding(s);
7435 break;
7436 default:
7437
7438 disas_fp_int_conv(s, insn);
7439 break;
7440 }
7441 break;
7442 }
7443 }
7444}
7445
7446static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7447 int pos)
7448{
7449
7450
7451
7452
7453
7454
7455 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7456 assert(pos > 0 && pos < 64);
7457
7458 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7459 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7460 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7461
7462 tcg_temp_free_i64(tcg_tmp);
7463}
7464
7465
7466
7467
7468
7469
7470
7471static void disas_simd_ext(DisasContext *s, uint32_t insn)
7472{
7473 int is_q = extract32(insn, 30, 1);
7474 int op2 = extract32(insn, 22, 2);
7475 int imm4 = extract32(insn, 11, 4);
7476 int rm = extract32(insn, 16, 5);
7477 int rn = extract32(insn, 5, 5);
7478 int rd = extract32(insn, 0, 5);
7479 int pos = imm4 << 3;
7480 TCGv_i64 tcg_resl, tcg_resh;
7481
7482 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7483 unallocated_encoding(s);
7484 return;
7485 }
7486
7487 if (!fp_access_check(s)) {
7488 return;
7489 }
7490
7491 tcg_resh = tcg_temp_new_i64();
7492 tcg_resl = tcg_temp_new_i64();
7493
7494
7495
7496
7497
7498 if (!is_q) {
7499 read_vec_element(s, tcg_resl, rn, 0, MO_64);
7500 if (pos != 0) {
7501 read_vec_element(s, tcg_resh, rm, 0, MO_64);
7502 do_ext64(s, tcg_resh, tcg_resl, pos);
7503 }
7504 } else {
7505 TCGv_i64 tcg_hh;
7506 typedef struct {
7507 int reg;
7508 int elt;
7509 } EltPosns;
7510 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7511 EltPosns *elt = eltposns;
7512
7513 if (pos >= 64) {
7514 elt++;
7515 pos -= 64;
7516 }
7517
7518 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7519 elt++;
7520 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7521 elt++;
7522 if (pos != 0) {
7523 do_ext64(s, tcg_resh, tcg_resl, pos);
7524 tcg_hh = tcg_temp_new_i64();
7525 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7526 do_ext64(s, tcg_hh, tcg_resh, pos);
7527 tcg_temp_free_i64(tcg_hh);
7528 }
7529 }
7530
7531 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7532 tcg_temp_free_i64(tcg_resl);
7533 if (is_q) {
7534 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7535 }
7536 tcg_temp_free_i64(tcg_resh);
7537 clear_vec_high(s, is_q, rd);
7538}
7539
7540
7541
7542
7543
7544
7545
7546static void disas_simd_tb(DisasContext *s, uint32_t insn)
7547{
7548 int op2 = extract32(insn, 22, 2);
7549 int is_q = extract32(insn, 30, 1);
7550 int rm = extract32(insn, 16, 5);
7551 int rn = extract32(insn, 5, 5);
7552 int rd = extract32(insn, 0, 5);
7553 int is_tbx = extract32(insn, 12, 1);
7554 int len = (extract32(insn, 13, 2) + 1) * 16;
7555
7556 if (op2 != 0) {
7557 unallocated_encoding(s);
7558 return;
7559 }
7560
7561 if (!fp_access_check(s)) {
7562 return;
7563 }
7564
7565 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7566 vec_full_reg_offset(s, rm), cpu_env,
7567 is_q ? 16 : 8, vec_full_reg_size(s),
7568 (len << 6) | (is_tbx << 5) | rn,
7569 gen_helper_simd_tblx);
7570}
7571
7572
7573
7574
7575
7576
7577
7578static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7579{
7580 int rd = extract32(insn, 0, 5);
7581 int rn = extract32(insn, 5, 5);
7582 int rm = extract32(insn, 16, 5);
7583 int size = extract32(insn, 22, 2);
7584
7585
7586
7587 int opcode = extract32(insn, 12, 2);
7588 bool part = extract32(insn, 14, 1);
7589 bool is_q = extract32(insn, 30, 1);
7590 int esize = 8 << size;
7591 int i, ofs;
7592 int datasize = is_q ? 128 : 64;
7593 int elements = datasize / esize;
7594 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
7595
7596 if (opcode == 0 || (size == 3 && !is_q)) {
7597 unallocated_encoding(s);
7598 return;
7599 }
7600
7601 if (!fp_access_check(s)) {
7602 return;
7603 }
7604
7605 tcg_resl = tcg_const_i64(0);
7606 tcg_resh = is_q ? tcg_const_i64(0) : NULL;
7607 tcg_res = tcg_temp_new_i64();
7608
7609 for (i = 0; i < elements; i++) {
7610 switch (opcode) {
7611 case 1:
7612 {
7613 int midpoint = elements / 2;
7614 if (i < midpoint) {
7615 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
7616 } else {
7617 read_vec_element(s, tcg_res, rm,
7618 2 * (i - midpoint) + part, size);
7619 }
7620 break;
7621 }
7622 case 2:
7623 if (i & 1) {
7624 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
7625 } else {
7626 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
7627 }
7628 break;
7629 case 3:
7630 {
7631 int base = part * elements / 2;
7632 if (i & 1) {
7633 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
7634 } else {
7635 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
7636 }
7637 break;
7638 }
7639 default:
7640 g_assert_not_reached();
7641 }
7642
7643 ofs = i * esize;
7644 if (ofs < 64) {
7645 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
7646 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
7647 } else {
7648 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
7649 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
7650 }
7651 }
7652
7653 tcg_temp_free_i64(tcg_res);
7654
7655 write_vec_element(s, tcg_resl, rd, 0, MO_64);
7656 tcg_temp_free_i64(tcg_resl);
7657
7658 if (is_q) {
7659 write_vec_element(s, tcg_resh, rd, 1, MO_64);
7660 tcg_temp_free_i64(tcg_resh);
7661 }
7662 clear_vec_high(s, is_q, rd);
7663}
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7676 int esize, int size, int vmap, TCGv_ptr fpst)
7677{
7678 if (esize == size) {
7679 int element;
7680 MemOp msize = esize == 16 ? MO_16 : MO_32;
7681 TCGv_i32 tcg_elem;
7682
7683
7684 assert(ctpop8(vmap) == 1);
7685 element = ctz32(vmap);
7686 assert(element < 8);
7687
7688 tcg_elem = tcg_temp_new_i32();
7689 read_vec_element_i32(s, tcg_elem, rn, element, msize);
7690 return tcg_elem;
7691 } else {
7692 int bits = size / 2;
7693 int shift = ctpop8(vmap) / 2;
7694 int vmap_lo = (vmap >> shift) & vmap;
7695 int vmap_hi = (vmap & ~vmap_lo);
7696 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7697
7698 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7699 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7700 tcg_res = tcg_temp_new_i32();
7701
7702 switch (fpopcode) {
7703 case 0x0c:
7704 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7705 break;
7706 case 0x0f:
7707 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7708 break;
7709 case 0x1c:
7710 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7711 break;
7712 case 0x1f:
7713 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7714 break;
7715 case 0x2c:
7716 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7717 break;
7718 case 0x2f:
7719 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7720 break;
7721 case 0x3c:
7722 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7723 break;
7724 case 0x3f:
7725 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7726 break;
7727 default:
7728 g_assert_not_reached();
7729 }
7730
7731 tcg_temp_free_i32(tcg_hi);
7732 tcg_temp_free_i32(tcg_lo);
7733 return tcg_res;
7734 }
7735}
7736
7737
7738
7739
7740
7741
7742
7743static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7744{
7745 int rd = extract32(insn, 0, 5);
7746 int rn = extract32(insn, 5, 5);
7747 int size = extract32(insn, 22, 2);
7748 int opcode = extract32(insn, 12, 5);
7749 bool is_q = extract32(insn, 30, 1);
7750 bool is_u = extract32(insn, 29, 1);
7751 bool is_fp = false;
7752 bool is_min = false;
7753 int esize;
7754 int elements;
7755 int i;
7756 TCGv_i64 tcg_res, tcg_elt;
7757
7758 switch (opcode) {
7759 case 0x1b:
7760 if (is_u) {
7761 unallocated_encoding(s);
7762 return;
7763 }
7764
7765 case 0x3:
7766 case 0xa:
7767 case 0x1a:
7768 if (size == 3 || (size == 2 && !is_q)) {
7769 unallocated_encoding(s);
7770 return;
7771 }
7772 break;
7773 case 0xc:
7774 case 0xf:
7775
7776
7777
7778
7779
7780 is_min = extract32(size, 1, 1);
7781 is_fp = true;
7782 if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7783 size = 1;
7784 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7785 unallocated_encoding(s);
7786 return;
7787 } else {
7788 size = 2;
7789 }
7790 break;
7791 default:
7792 unallocated_encoding(s);
7793 return;
7794 }
7795
7796 if (!fp_access_check(s)) {
7797 return;
7798 }
7799
7800 esize = 8 << size;
7801 elements = (is_q ? 128 : 64) / esize;
7802
7803 tcg_res = tcg_temp_new_i64();
7804 tcg_elt = tcg_temp_new_i64();
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818 if (!is_fp) {
7819 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7820
7821 for (i = 1; i < elements; i++) {
7822 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7823
7824 switch (opcode) {
7825 case 0x03:
7826 case 0x1b:
7827 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7828 break;
7829 case 0x0a:
7830 if (is_u) {
7831 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7832 } else {
7833 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7834 }
7835 break;
7836 case 0x1a:
7837 if (is_u) {
7838 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7839 } else {
7840 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7841 }
7842 break;
7843 default:
7844 g_assert_not_reached();
7845 }
7846
7847 }
7848 } else {
7849
7850
7851
7852
7853
7854 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7855 int fpopcode = opcode | is_min << 4 | is_u << 5;
7856 int vmap = (1 << elements) - 1;
7857 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7858 (is_q ? 128 : 64), vmap, fpst);
7859 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7860 tcg_temp_free_i32(tcg_res32);
7861 tcg_temp_free_ptr(fpst);
7862 }
7863
7864 tcg_temp_free_i64(tcg_elt);
7865
7866
7867 if (opcode == 0x03) {
7868
7869 size++;
7870 }
7871
7872 switch (size) {
7873 case 0:
7874 tcg_gen_ext8u_i64(tcg_res, tcg_res);
7875 break;
7876 case 1:
7877 tcg_gen_ext16u_i64(tcg_res, tcg_res);
7878 break;
7879 case 2:
7880 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7881 break;
7882 case 3:
7883 break;
7884 default:
7885 g_assert_not_reached();
7886 }
7887
7888 write_fp_dreg(s, rd, tcg_res);
7889 tcg_temp_free_i64(tcg_res);
7890}
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7902 int imm5)
7903{
7904 int size = ctz32(imm5);
7905 int index;
7906
7907 if (size > 3 || (size == 3 && !is_q)) {
7908 unallocated_encoding(s);
7909 return;
7910 }
7911
7912 if (!fp_access_check(s)) {
7913 return;
7914 }
7915
7916 index = imm5 >> (size + 1);
7917 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7918 vec_reg_offset(s, rn, index, size),
7919 is_q ? 16 : 8, vec_full_reg_size(s));
7920}
7921
7922
7923
7924
7925
7926
7927
7928static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7929 int imm5)
7930{
7931 int size = ctz32(imm5);
7932 int index;
7933 TCGv_i64 tmp;
7934
7935 if (size > 3) {
7936 unallocated_encoding(s);
7937 return;
7938 }
7939
7940 if (!fp_access_check(s)) {
7941 return;
7942 }
7943
7944 index = imm5 >> (size + 1);
7945
7946
7947
7948
7949 tmp = tcg_temp_new_i64();
7950 read_vec_element(s, tmp, rn, index, size);
7951 write_fp_dreg(s, rd, tmp);
7952 tcg_temp_free_i64(tmp);
7953}
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7965 int imm5)
7966{
7967 int size = ctz32(imm5);
7968 uint32_t dofs, oprsz, maxsz;
7969
7970 if (size > 3 || ((size == 3) && !is_q)) {
7971 unallocated_encoding(s);
7972 return;
7973 }
7974
7975 if (!fp_access_check(s)) {
7976 return;
7977 }
7978
7979 dofs = vec_full_reg_offset(s, rd);
7980 oprsz = is_q ? 16 : 8;
7981 maxsz = vec_full_reg_size(s);
7982
7983 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7984}
7985
7986
7987
7988
7989
7990
7991
7992
7993
7994
7995
7996static void handle_simd_inse(DisasContext *s, int rd, int rn,
7997 int imm4, int imm5)
7998{
7999 int size = ctz32(imm5);
8000 int src_index, dst_index;
8001 TCGv_i64 tmp;
8002
8003 if (size > 3) {
8004 unallocated_encoding(s);
8005 return;
8006 }
8007
8008 if (!fp_access_check(s)) {
8009 return;
8010 }
8011
8012 dst_index = extract32(imm5, 1+size, 5);
8013 src_index = extract32(imm4, size, 4);
8014
8015 tmp = tcg_temp_new_i64();
8016
8017 read_vec_element(s, tmp, rn, src_index, size);
8018 write_vec_element(s, tmp, rd, dst_index, size);
8019
8020 tcg_temp_free_i64(tmp);
8021
8022
8023 clear_vec_high(s, true, rd);
8024}
8025
8026
8027
8028
8029
8030
8031
8032
8033
8034
8035
8036
8037static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
8038{
8039 int size = ctz32(imm5);
8040 int idx;
8041
8042 if (size > 3) {
8043 unallocated_encoding(s);
8044 return;
8045 }
8046
8047 if (!fp_access_check(s)) {
8048 return;
8049 }
8050
8051 idx = extract32(imm5, 1 + size, 4 - size);
8052 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
8053
8054
8055 clear_vec_high(s, true, rd);
8056}
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067
8068
8069
8070static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
8071 int rn, int rd, int imm5)
8072{
8073 int size = ctz32(imm5);
8074 int element;
8075 TCGv_i64 tcg_rd;
8076
8077
8078 if (is_signed) {
8079 if (size > 2 || (size == 2 && !is_q)) {
8080 unallocated_encoding(s);
8081 return;
8082 }
8083 } else {
8084 if (size > 3
8085 || (size < 3 && is_q)
8086 || (size == 3 && !is_q)) {
8087 unallocated_encoding(s);
8088 return;
8089 }
8090 }
8091
8092 if (!fp_access_check(s)) {
8093 return;
8094 }
8095
8096 element = extract32(imm5, 1+size, 4);
8097
8098 tcg_rd = cpu_reg(s, rd);
8099 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
8100 if (is_signed && !is_q) {
8101 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
8102 }
8103}
8104
8105
8106
8107
8108
8109
8110
8111static void disas_simd_copy(DisasContext *s, uint32_t insn)
8112{
8113 int rd = extract32(insn, 0, 5);
8114 int rn = extract32(insn, 5, 5);
8115 int imm4 = extract32(insn, 11, 4);
8116 int op = extract32(insn, 29, 1);
8117 int is_q = extract32(insn, 30, 1);
8118 int imm5 = extract32(insn, 16, 5);
8119
8120 if (op) {
8121 if (is_q) {
8122
8123 handle_simd_inse(s, rd, rn, imm4, imm5);
8124 } else {
8125 unallocated_encoding(s);
8126 }
8127 } else {
8128 switch (imm4) {
8129 case 0:
8130
8131 handle_simd_dupe(s, is_q, rd, rn, imm5);
8132 break;
8133 case 1:
8134
8135 handle_simd_dupg(s, is_q, rd, rn, imm5);
8136 break;
8137 case 3:
8138 if (is_q) {
8139
8140 handle_simd_insg(s, rd, rn, imm5);
8141 } else {
8142 unallocated_encoding(s);
8143 }
8144 break;
8145 case 5:
8146 case 7:
8147
8148 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
8149 break;
8150 default:
8151 unallocated_encoding(s);
8152 break;
8153 }
8154 }
8155}
8156
8157
8158
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169
8170
8171static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
8172{
8173 int rd = extract32(insn, 0, 5);
8174 int cmode = extract32(insn, 12, 4);
8175 int o2 = extract32(insn, 11, 1);
8176 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
8177 bool is_neg = extract32(insn, 29, 1);
8178 bool is_q = extract32(insn, 30, 1);
8179 uint64_t imm = 0;
8180
8181 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
8182
8183 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
8184 unallocated_encoding(s);
8185 return;
8186 }
8187 }
8188
8189 if (!fp_access_check(s)) {
8190 return;
8191 }
8192
8193 if (cmode == 15 && o2 && !is_neg) {
8194
8195 imm = vfp_expand_imm(MO_16, abcdefgh);
8196
8197 imm = dup_const(MO_16, imm);
8198 } else {
8199 imm = asimd_imm_const(abcdefgh, cmode, is_neg);
8200 }
8201
8202 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
8203
8204 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
8205 vec_full_reg_size(s), imm);
8206 } else {
8207
8208 if (is_neg) {
8209 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
8210 } else {
8211 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
8212 }
8213 }
8214}
8215
8216
8217
8218
8219
8220
8221
8222static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
8223{
8224 int rd = extract32(insn, 0, 5);
8225 int rn = extract32(insn, 5, 5);
8226 int imm4 = extract32(insn, 11, 4);
8227 int imm5 = extract32(insn, 16, 5);
8228 int op = extract32(insn, 29, 1);
8229
8230 if (op != 0 || imm4 != 0) {
8231 unallocated_encoding(s);
8232 return;
8233 }
8234
8235
8236 handle_simd_dupes(s, rd, rn, imm5);
8237}
8238
8239
8240
8241
8242
8243
8244
8245static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
8246{
8247 int u = extract32(insn, 29, 1);
8248 int size = extract32(insn, 22, 2);
8249 int opcode = extract32(insn, 12, 5);
8250 int rn = extract32(insn, 5, 5);
8251 int rd = extract32(insn, 0, 5);
8252 TCGv_ptr fpst;
8253
8254
8255
8256
8257
8258 opcode |= (extract32(size, 1, 1) << 5);
8259
8260 switch (opcode) {
8261 case 0x3b:
8262 if (u || size != 3) {
8263 unallocated_encoding(s);
8264 return;
8265 }
8266 if (!fp_access_check(s)) {
8267 return;
8268 }
8269
8270 fpst = NULL;
8271 break;
8272 case 0xc:
8273 case 0xd:
8274 case 0xf:
8275 case 0x2c:
8276 case 0x2f:
8277
8278 if (!u) {
8279 if (!dc_isar_feature(aa64_fp16, s)) {
8280 unallocated_encoding(s);
8281 return;
8282 } else {
8283 size = MO_16;
8284 }
8285 } else {
8286 size = extract32(size, 0, 1) ? MO_64 : MO_32;
8287 }
8288
8289 if (!fp_access_check(s)) {
8290 return;
8291 }
8292
8293 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8294 break;
8295 default:
8296 unallocated_encoding(s);
8297 return;
8298 }
8299
8300 if (size == MO_64) {
8301 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8302 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8303 TCGv_i64 tcg_res = tcg_temp_new_i64();
8304
8305 read_vec_element(s, tcg_op1, rn, 0, MO_64);
8306 read_vec_element(s, tcg_op2, rn, 1, MO_64);
8307
8308 switch (opcode) {
8309 case 0x3b:
8310 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
8311 break;
8312 case 0xc:
8313 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8314 break;
8315 case 0xd:
8316 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8317 break;
8318 case 0xf:
8319 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8320 break;
8321 case 0x2c:
8322 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8323 break;
8324 case 0x2f:
8325 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8326 break;
8327 default:
8328 g_assert_not_reached();
8329 }
8330
8331 write_fp_dreg(s, rd, tcg_res);
8332
8333 tcg_temp_free_i64(tcg_op1);
8334 tcg_temp_free_i64(tcg_op2);
8335 tcg_temp_free_i64(tcg_res);
8336 } else {
8337 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8338 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8339 TCGv_i32 tcg_res = tcg_temp_new_i32();
8340
8341 read_vec_element_i32(s, tcg_op1, rn, 0, size);
8342 read_vec_element_i32(s, tcg_op2, rn, 1, size);
8343
8344 if (size == MO_16) {
8345 switch (opcode) {
8346 case 0xc:
8347 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8348 break;
8349 case 0xd:
8350 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8351 break;
8352 case 0xf:
8353 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8354 break;
8355 case 0x2c:
8356 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8357 break;
8358 case 0x2f:
8359 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8360 break;
8361 default:
8362 g_assert_not_reached();
8363 }
8364 } else {
8365 switch (opcode) {
8366 case 0xc:
8367 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8368 break;
8369 case 0xd:
8370 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8371 break;
8372 case 0xf:
8373 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8374 break;
8375 case 0x2c:
8376 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8377 break;
8378 case 0x2f:
8379 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8380 break;
8381 default:
8382 g_assert_not_reached();
8383 }
8384 }
8385
8386 write_fp_sreg(s, rd, tcg_res);
8387
8388 tcg_temp_free_i32(tcg_op1);
8389 tcg_temp_free_i32(tcg_op2);
8390 tcg_temp_free_i32(tcg_res);
8391 }
8392
8393 if (fpst) {
8394 tcg_temp_free_ptr(fpst);
8395 }
8396}
8397
8398
8399
8400
8401
8402
8403
8404static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8405 TCGv_i64 tcg_rnd, bool accumulate,
8406 bool is_u, int size, int shift)
8407{
8408 bool extended_result = false;
8409 bool round = tcg_rnd != NULL;
8410 int ext_lshift = 0;
8411 TCGv_i64 tcg_src_hi;
8412
8413 if (round && size == 3) {
8414 extended_result = true;
8415 ext_lshift = 64 - shift;
8416 tcg_src_hi = tcg_temp_new_i64();
8417 } else if (shift == 64) {
8418 if (!accumulate && is_u) {
8419
8420 tcg_gen_movi_i64(tcg_res, 0);
8421 return;
8422 }
8423 }
8424
8425
8426 if (round) {
8427 if (extended_result) {
8428 TCGv_i64 tcg_zero = tcg_const_i64(0);
8429 if (!is_u) {
8430
8431 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8432 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8433 tcg_src, tcg_src_hi,
8434 tcg_rnd, tcg_zero);
8435 } else {
8436 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8437 tcg_src, tcg_zero,
8438 tcg_rnd, tcg_zero);
8439 }
8440 tcg_temp_free_i64(tcg_zero);
8441 } else {
8442 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8443 }
8444 }
8445
8446
8447 if (round && extended_result) {
8448
8449 if (ext_lshift == 0) {
8450
8451 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8452 } else {
8453 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8454 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8455 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8456 }
8457 } else {
8458 if (is_u) {
8459 if (shift == 64) {
8460
8461 tcg_gen_movi_i64(tcg_src, 0);
8462 } else {
8463 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8464 }
8465 } else {
8466 if (shift == 64) {
8467
8468 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8469 } else {
8470 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8471 }
8472 }
8473 }
8474
8475 if (accumulate) {
8476 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8477 } else {
8478 tcg_gen_mov_i64(tcg_res, tcg_src);
8479 }
8480
8481 if (extended_result) {
8482 tcg_temp_free_i64(tcg_src_hi);
8483 }
8484}
8485
8486
8487static void handle_scalar_simd_shri(DisasContext *s,
8488 bool is_u, int immh, int immb,
8489 int opcode, int rn, int rd)
8490{
8491 const int size = 3;
8492 int immhb = immh << 3 | immb;
8493 int shift = 2 * (8 << size) - immhb;
8494 bool accumulate = false;
8495 bool round = false;
8496 bool insert = false;
8497 TCGv_i64 tcg_rn;
8498 TCGv_i64 tcg_rd;
8499 TCGv_i64 tcg_round;
8500
8501 if (!extract32(immh, 3, 1)) {
8502 unallocated_encoding(s);
8503 return;
8504 }
8505
8506 if (!fp_access_check(s)) {
8507 return;
8508 }
8509
8510 switch (opcode) {
8511 case 0x02:
8512 accumulate = true;
8513 break;
8514 case 0x04:
8515 round = true;
8516 break;
8517 case 0x06:
8518 accumulate = round = true;
8519 break;
8520 case 0x08:
8521 insert = true;
8522 break;
8523 }
8524
8525 if (round) {
8526 uint64_t round_const = 1ULL << (shift - 1);
8527 tcg_round = tcg_const_i64(round_const);
8528 } else {
8529 tcg_round = NULL;
8530 }
8531
8532 tcg_rn = read_fp_dreg(s, rn);
8533 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8534
8535 if (insert) {
8536
8537
8538
8539 int esize = 8 << size;
8540 if (shift != esize) {
8541 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8542 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8543 }
8544 } else {
8545 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8546 accumulate, is_u, size, shift);
8547 }
8548
8549 write_fp_dreg(s, rd, tcg_rd);
8550
8551 tcg_temp_free_i64(tcg_rn);
8552 tcg_temp_free_i64(tcg_rd);
8553 if (round) {
8554 tcg_temp_free_i64(tcg_round);
8555 }
8556}
8557
8558
8559static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8560 int immh, int immb, int opcode,
8561 int rn, int rd)
8562{
8563 int size = 32 - clz32(immh) - 1;
8564 int immhb = immh << 3 | immb;
8565 int shift = immhb - (8 << size);
8566 TCGv_i64 tcg_rn;
8567 TCGv_i64 tcg_rd;
8568
8569 if (!extract32(immh, 3, 1)) {
8570 unallocated_encoding(s);
8571 return;
8572 }
8573
8574 if (!fp_access_check(s)) {
8575 return;
8576 }
8577
8578 tcg_rn = read_fp_dreg(s, rn);
8579 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8580
8581 if (insert) {
8582 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8583 } else {
8584 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8585 }
8586
8587 write_fp_dreg(s, rd, tcg_rd);
8588
8589 tcg_temp_free_i64(tcg_rn);
8590 tcg_temp_free_i64(tcg_rd);
8591}
8592
8593
8594
8595static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8596 bool is_u_shift, bool is_u_narrow,
8597 int immh, int immb, int opcode,
8598 int rn, int rd)
8599{
8600 int immhb = immh << 3 | immb;
8601 int size = 32 - clz32(immh) - 1;
8602 int esize = 8 << size;
8603 int shift = (2 * esize) - immhb;
8604 int elements = is_scalar ? 1 : (64 / esize);
8605 bool round = extract32(opcode, 0, 1);
8606 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8607 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8608 TCGv_i32 tcg_rd_narrowed;
8609 TCGv_i64 tcg_final;
8610
8611 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8612 { gen_helper_neon_narrow_sat_s8,
8613 gen_helper_neon_unarrow_sat8 },
8614 { gen_helper_neon_narrow_sat_s16,
8615 gen_helper_neon_unarrow_sat16 },
8616 { gen_helper_neon_narrow_sat_s32,
8617 gen_helper_neon_unarrow_sat32 },
8618 { NULL, NULL },
8619 };
8620 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8621 gen_helper_neon_narrow_sat_u8,
8622 gen_helper_neon_narrow_sat_u16,
8623 gen_helper_neon_narrow_sat_u32,
8624 NULL
8625 };
8626 NeonGenNarrowEnvFn *narrowfn;
8627
8628 int i;
8629
8630 assert(size < 4);
8631
8632 if (extract32(immh, 3, 1)) {
8633 unallocated_encoding(s);
8634 return;
8635 }
8636
8637 if (!fp_access_check(s)) {
8638 return;
8639 }
8640
8641 if (is_u_shift) {
8642 narrowfn = unsigned_narrow_fns[size];
8643 } else {
8644 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8645 }
8646
8647 tcg_rn = tcg_temp_new_i64();
8648 tcg_rd = tcg_temp_new_i64();
8649 tcg_rd_narrowed = tcg_temp_new_i32();
8650 tcg_final = tcg_const_i64(0);
8651
8652 if (round) {
8653 uint64_t round_const = 1ULL << (shift - 1);
8654 tcg_round = tcg_const_i64(round_const);
8655 } else {
8656 tcg_round = NULL;
8657 }
8658
8659 for (i = 0; i < elements; i++) {
8660 read_vec_element(s, tcg_rn, rn, i, ldop);
8661 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8662 false, is_u_shift, size+1, shift);
8663 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8664 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8665 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8666 }
8667
8668 if (!is_q) {
8669 write_vec_element(s, tcg_final, rd, 0, MO_64);
8670 } else {
8671 write_vec_element(s, tcg_final, rd, 1, MO_64);
8672 }
8673
8674 if (round) {
8675 tcg_temp_free_i64(tcg_round);
8676 }
8677 tcg_temp_free_i64(tcg_rn);
8678 tcg_temp_free_i64(tcg_rd);
8679 tcg_temp_free_i32(tcg_rd_narrowed);
8680 tcg_temp_free_i64(tcg_final);
8681
8682 clear_vec_high(s, is_q, rd);
8683}
8684
8685
8686static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8687 bool src_unsigned, bool dst_unsigned,
8688 int immh, int immb, int rn, int rd)
8689{
8690 int immhb = immh << 3 | immb;
8691 int size = 32 - clz32(immh) - 1;
8692 int shift = immhb - (8 << size);
8693 int pass;
8694
8695 assert(immh != 0);
8696 assert(!(scalar && is_q));
8697
8698 if (!scalar) {
8699 if (!is_q && extract32(immh, 3, 1)) {
8700 unallocated_encoding(s);
8701 return;
8702 }
8703
8704
8705
8706
8707
8708 switch (size) {
8709 case 0:
8710 shift |= shift << 8;
8711
8712 case 1:
8713 shift |= shift << 16;
8714 break;
8715 case 2:
8716 case 3:
8717 break;
8718 default:
8719 g_assert_not_reached();
8720 }
8721 }
8722
8723 if (!fp_access_check(s)) {
8724 return;
8725 }
8726
8727 if (size == 3) {
8728 TCGv_i64 tcg_shift = tcg_const_i64(shift);
8729 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8730 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8731 { NULL, gen_helper_neon_qshl_u64 },
8732 };
8733 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8734 int maxpass = is_q ? 2 : 1;
8735
8736 for (pass = 0; pass < maxpass; pass++) {
8737 TCGv_i64 tcg_op = tcg_temp_new_i64();
8738
8739 read_vec_element(s, tcg_op, rn, pass, MO_64);
8740 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8741 write_vec_element(s, tcg_op, rd, pass, MO_64);
8742
8743 tcg_temp_free_i64(tcg_op);
8744 }
8745 tcg_temp_free_i64(tcg_shift);
8746 clear_vec_high(s, is_q, rd);
8747 } else {
8748 TCGv_i32 tcg_shift = tcg_const_i32(shift);
8749 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8750 {
8751 { gen_helper_neon_qshl_s8,
8752 gen_helper_neon_qshl_s16,
8753 gen_helper_neon_qshl_s32 },
8754 { gen_helper_neon_qshlu_s8,
8755 gen_helper_neon_qshlu_s16,
8756 gen_helper_neon_qshlu_s32 }
8757 }, {
8758 { NULL, NULL, NULL },
8759 { gen_helper_neon_qshl_u8,
8760 gen_helper_neon_qshl_u16,
8761 gen_helper_neon_qshl_u32 }
8762 }
8763 };
8764 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8765 MemOp memop = scalar ? size : MO_32;
8766 int maxpass = scalar ? 1 : is_q ? 4 : 2;
8767
8768 for (pass = 0; pass < maxpass; pass++) {
8769 TCGv_i32 tcg_op = tcg_temp_new_i32();
8770
8771 read_vec_element_i32(s, tcg_op, rn, pass, memop);
8772 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8773 if (scalar) {
8774 switch (size) {
8775 case 0:
8776 tcg_gen_ext8u_i32(tcg_op, tcg_op);
8777 break;
8778 case 1:
8779 tcg_gen_ext16u_i32(tcg_op, tcg_op);
8780 break;
8781 case 2:
8782 break;
8783 default:
8784 g_assert_not_reached();
8785 }
8786 write_fp_sreg(s, rd, tcg_op);
8787 } else {
8788 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8789 }
8790
8791 tcg_temp_free_i32(tcg_op);
8792 }
8793 tcg_temp_free_i32(tcg_shift);
8794
8795 if (!scalar) {
8796 clear_vec_high(s, is_q, rd);
8797 }
8798 }
8799}
8800
8801
8802static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8803 int elements, int is_signed,
8804 int fracbits, int size)
8805{
8806 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8807 TCGv_i32 tcg_shift = NULL;
8808
8809 MemOp mop = size | (is_signed ? MO_SIGN : 0);
8810 int pass;
8811
8812 if (fracbits || size == MO_64) {
8813 tcg_shift = tcg_const_i32(fracbits);
8814 }
8815
8816 if (size == MO_64) {
8817 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8818 TCGv_i64 tcg_double = tcg_temp_new_i64();
8819
8820 for (pass = 0; pass < elements; pass++) {
8821 read_vec_element(s, tcg_int64, rn, pass, mop);
8822
8823 if (is_signed) {
8824 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8825 tcg_shift, tcg_fpst);
8826 } else {
8827 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8828 tcg_shift, tcg_fpst);
8829 }
8830 if (elements == 1) {
8831 write_fp_dreg(s, rd, tcg_double);
8832 } else {
8833 write_vec_element(s, tcg_double, rd, pass, MO_64);
8834 }
8835 }
8836
8837 tcg_temp_free_i64(tcg_int64);
8838 tcg_temp_free_i64(tcg_double);
8839
8840 } else {
8841 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8842 TCGv_i32 tcg_float = tcg_temp_new_i32();
8843
8844 for (pass = 0; pass < elements; pass++) {
8845 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8846
8847 switch (size) {
8848 case MO_32:
8849 if (fracbits) {
8850 if (is_signed) {
8851 gen_helper_vfp_sltos(tcg_float, tcg_int32,
8852 tcg_shift, tcg_fpst);
8853 } else {
8854 gen_helper_vfp_ultos(tcg_float, tcg_int32,
8855 tcg_shift, tcg_fpst);
8856 }
8857 } else {
8858 if (is_signed) {
8859 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8860 } else {
8861 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8862 }
8863 }
8864 break;
8865 case MO_16:
8866 if (fracbits) {
8867 if (is_signed) {
8868 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8869 tcg_shift, tcg_fpst);
8870 } else {
8871 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8872 tcg_shift, tcg_fpst);
8873 }
8874 } else {
8875 if (is_signed) {
8876 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8877 } else {
8878 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8879 }
8880 }
8881 break;
8882 default:
8883 g_assert_not_reached();
8884 }
8885
8886 if (elements == 1) {
8887 write_fp_sreg(s, rd, tcg_float);
8888 } else {
8889 write_vec_element_i32(s, tcg_float, rd, pass, size);
8890 }
8891 }
8892
8893 tcg_temp_free_i32(tcg_int32);
8894 tcg_temp_free_i32(tcg_float);
8895 }
8896
8897 tcg_temp_free_ptr(tcg_fpst);
8898 if (tcg_shift) {
8899 tcg_temp_free_i32(tcg_shift);
8900 }
8901
8902 clear_vec_high(s, elements << size == 16, rd);
8903}
8904
8905
8906static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8907 bool is_q, bool is_u,
8908 int immh, int immb, int opcode,
8909 int rn, int rd)
8910{
8911 int size, elements, fracbits;
8912 int immhb = immh << 3 | immb;
8913
8914 if (immh & 8) {
8915 size = MO_64;
8916 if (!is_scalar && !is_q) {
8917 unallocated_encoding(s);
8918 return;
8919 }
8920 } else if (immh & 4) {
8921 size = MO_32;
8922 } else if (immh & 2) {
8923 size = MO_16;
8924 if (!dc_isar_feature(aa64_fp16, s)) {
8925 unallocated_encoding(s);
8926 return;
8927 }
8928 } else {
8929
8930 g_assert(immh == 1);
8931 unallocated_encoding(s);
8932 return;
8933 }
8934
8935 if (is_scalar) {
8936 elements = 1;
8937 } else {
8938 elements = (8 << is_q) >> size;
8939 }
8940 fracbits = (16 << size) - immhb;
8941
8942 if (!fp_access_check(s)) {
8943 return;
8944 }
8945
8946 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8947}
8948
8949
8950static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8951 bool is_q, bool is_u,
8952 int immh, int immb, int rn, int rd)
8953{
8954 int immhb = immh << 3 | immb;
8955 int pass, size, fracbits;
8956 TCGv_ptr tcg_fpstatus;
8957 TCGv_i32 tcg_rmode, tcg_shift;
8958
8959 if (immh & 0x8) {
8960 size = MO_64;
8961 if (!is_scalar && !is_q) {
8962 unallocated_encoding(s);
8963 return;
8964 }
8965 } else if (immh & 0x4) {
8966 size = MO_32;
8967 } else if (immh & 0x2) {
8968 size = MO_16;
8969 if (!dc_isar_feature(aa64_fp16, s)) {
8970 unallocated_encoding(s);
8971 return;
8972 }
8973 } else {
8974
8975 assert(immh == 1);
8976 unallocated_encoding(s);
8977 return;
8978 }
8979
8980 if (!fp_access_check(s)) {
8981 return;
8982 }
8983
8984 assert(!(is_scalar && is_q));
8985
8986 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
8987 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8988 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8989 fracbits = (16 << size) - immhb;
8990 tcg_shift = tcg_const_i32(fracbits);
8991
8992 if (size == MO_64) {
8993 int maxpass = is_scalar ? 1 : 2;
8994
8995 for (pass = 0; pass < maxpass; pass++) {
8996 TCGv_i64 tcg_op = tcg_temp_new_i64();
8997
8998 read_vec_element(s, tcg_op, rn, pass, MO_64);
8999 if (is_u) {
9000 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9001 } else {
9002 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9003 }
9004 write_vec_element(s, tcg_op, rd, pass, MO_64);
9005 tcg_temp_free_i64(tcg_op);
9006 }
9007 clear_vec_high(s, is_q, rd);
9008 } else {
9009 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
9010 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
9011
9012 switch (size) {
9013 case MO_16:
9014 if (is_u) {
9015 fn = gen_helper_vfp_touhh;
9016 } else {
9017 fn = gen_helper_vfp_toshh;
9018 }
9019 break;
9020 case MO_32:
9021 if (is_u) {
9022 fn = gen_helper_vfp_touls;
9023 } else {
9024 fn = gen_helper_vfp_tosls;
9025 }
9026 break;
9027 default:
9028 g_assert_not_reached();
9029 }
9030
9031 for (pass = 0; pass < maxpass; pass++) {
9032 TCGv_i32 tcg_op = tcg_temp_new_i32();
9033
9034 read_vec_element_i32(s, tcg_op, rn, pass, size);
9035 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
9036 if (is_scalar) {
9037 write_fp_sreg(s, rd, tcg_op);
9038 } else {
9039 write_vec_element_i32(s, tcg_op, rd, pass, size);
9040 }
9041 tcg_temp_free_i32(tcg_op);
9042 }
9043 if (!is_scalar) {
9044 clear_vec_high(s, is_q, rd);
9045 }
9046 }
9047
9048 tcg_temp_free_ptr(tcg_fpstatus);
9049 tcg_temp_free_i32(tcg_shift);
9050 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9051 tcg_temp_free_i32(tcg_rmode);
9052}
9053
9054
9055
9056
9057
9058
9059
9060
9061
9062static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
9063{
9064 int rd = extract32(insn, 0, 5);
9065 int rn = extract32(insn, 5, 5);
9066 int opcode = extract32(insn, 11, 5);
9067 int immb = extract32(insn, 16, 3);
9068 int immh = extract32(insn, 19, 4);
9069 bool is_u = extract32(insn, 29, 1);
9070
9071 if (immh == 0) {
9072 unallocated_encoding(s);
9073 return;
9074 }
9075
9076 switch (opcode) {
9077 case 0x08:
9078 if (!is_u) {
9079 unallocated_encoding(s);
9080 return;
9081 }
9082
9083 case 0x00:
9084 case 0x02:
9085 case 0x04:
9086 case 0x06:
9087 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
9088 break;
9089 case 0x0a:
9090 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
9091 break;
9092 case 0x1c:
9093 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
9094 opcode, rn, rd);
9095 break;
9096 case 0x10:
9097 case 0x11:
9098 if (!is_u) {
9099 unallocated_encoding(s);
9100 return;
9101 }
9102 handle_vec_simd_sqshrn(s, true, false, false, true,
9103 immh, immb, opcode, rn, rd);
9104 break;
9105 case 0x12:
9106 case 0x13:
9107 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
9108 immh, immb, opcode, rn, rd);
9109 break;
9110 case 0xc:
9111 if (!is_u) {
9112 unallocated_encoding(s);
9113 return;
9114 }
9115 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
9116 break;
9117 case 0xe:
9118 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
9119 break;
9120 case 0x1f:
9121 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
9122 break;
9123 default:
9124 unallocated_encoding(s);
9125 break;
9126 }
9127}
9128
9129
9130
9131
9132
9133
9134
9135static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
9136{
9137 bool is_u = extract32(insn, 29, 1);
9138 int size = extract32(insn, 22, 2);
9139 int opcode = extract32(insn, 12, 4);
9140 int rm = extract32(insn, 16, 5);
9141 int rn = extract32(insn, 5, 5);
9142 int rd = extract32(insn, 0, 5);
9143
9144 if (is_u) {
9145 unallocated_encoding(s);
9146 return;
9147 }
9148
9149 switch (opcode) {
9150 case 0x9:
9151 case 0xb:
9152 case 0xd:
9153 if (size == 0 || size == 3) {
9154 unallocated_encoding(s);
9155 return;
9156 }
9157 break;
9158 default:
9159 unallocated_encoding(s);
9160 return;
9161 }
9162
9163 if (!fp_access_check(s)) {
9164 return;
9165 }
9166
9167 if (size == 2) {
9168 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9169 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9170 TCGv_i64 tcg_res = tcg_temp_new_i64();
9171
9172 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
9173 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
9174
9175 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
9176 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
9177
9178 switch (opcode) {
9179 case 0xd:
9180 break;
9181 case 0xb:
9182 tcg_gen_neg_i64(tcg_res, tcg_res);
9183
9184 case 0x9:
9185 read_vec_element(s, tcg_op1, rd, 0, MO_64);
9186 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
9187 tcg_res, tcg_op1);
9188 break;
9189 default:
9190 g_assert_not_reached();
9191 }
9192
9193 write_fp_dreg(s, rd, tcg_res);
9194
9195 tcg_temp_free_i64(tcg_op1);
9196 tcg_temp_free_i64(tcg_op2);
9197 tcg_temp_free_i64(tcg_res);
9198 } else {
9199 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
9200 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
9201 TCGv_i64 tcg_res = tcg_temp_new_i64();
9202
9203 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
9204 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
9205
9206 switch (opcode) {
9207 case 0xd:
9208 break;
9209 case 0xb:
9210 gen_helper_neon_negl_u32(tcg_res, tcg_res);
9211
9212 case 0x9:
9213 {
9214 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
9215 read_vec_element(s, tcg_op3, rd, 0, MO_32);
9216 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
9217 tcg_res, tcg_op3);
9218 tcg_temp_free_i64(tcg_op3);
9219 break;
9220 }
9221 default:
9222 g_assert_not_reached();
9223 }
9224
9225 tcg_gen_ext32u_i64(tcg_res, tcg_res);
9226 write_fp_dreg(s, rd, tcg_res);
9227
9228 tcg_temp_free_i32(tcg_op1);
9229 tcg_temp_free_i32(tcg_op2);
9230 tcg_temp_free_i64(tcg_res);
9231 }
9232}
9233
9234static void handle_3same_64(DisasContext *s, int opcode, bool u,
9235 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
9236{
9237
9238
9239
9240
9241
9242 TCGCond cond;
9243
9244 switch (opcode) {
9245 case 0x1:
9246 if (u) {
9247 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9248 } else {
9249 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9250 }
9251 break;
9252 case 0x5:
9253 if (u) {
9254 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9255 } else {
9256 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9257 }
9258 break;
9259 case 0x6:
9260
9261
9262
9263 cond = u ? TCG_COND_GTU : TCG_COND_GT;
9264 do_cmop:
9265 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
9266 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9267 break;
9268 case 0x7:
9269 cond = u ? TCG_COND_GEU : TCG_COND_GE;
9270 goto do_cmop;
9271 case 0x11:
9272 if (u) {
9273 cond = TCG_COND_EQ;
9274 goto do_cmop;
9275 }
9276 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
9277 break;
9278 case 0x8:
9279 if (u) {
9280 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
9281 } else {
9282 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
9283 }
9284 break;
9285 case 0x9:
9286 if (u) {
9287 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9288 } else {
9289 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9290 }
9291 break;
9292 case 0xa:
9293 if (u) {
9294 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
9295 } else {
9296 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
9297 }
9298 break;
9299 case 0xb:
9300 if (u) {
9301 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9302 } else {
9303 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
9304 }
9305 break;
9306 case 0x10:
9307 if (u) {
9308 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
9309 } else {
9310 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
9311 }
9312 break;
9313 default:
9314 g_assert_not_reached();
9315 }
9316}
9317
9318
9319
9320
9321
9322static void handle_3same_float(DisasContext *s, int size, int elements,
9323 int fpopcode, int rd, int rn, int rm)
9324{
9325 int pass;
9326 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9327
9328 for (pass = 0; pass < elements; pass++) {
9329 if (size) {
9330
9331 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9332 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9333 TCGv_i64 tcg_res = tcg_temp_new_i64();
9334
9335 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9336 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9337
9338 switch (fpopcode) {
9339 case 0x39:
9340
9341 gen_helper_vfp_negd(tcg_op1, tcg_op1);
9342
9343 case 0x19:
9344 read_vec_element(s, tcg_res, rd, pass, MO_64);
9345 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
9346 tcg_res, fpst);
9347 break;
9348 case 0x18:
9349 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9350 break;
9351 case 0x1a:
9352 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
9353 break;
9354 case 0x1b:
9355 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
9356 break;
9357 case 0x1c:
9358 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9359 break;
9360 case 0x1e:
9361 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
9362 break;
9363 case 0x1f:
9364 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9365 break;
9366 case 0x38:
9367 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
9368 break;
9369 case 0x3a:
9370 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9371 break;
9372 case 0x3e:
9373 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
9374 break;
9375 case 0x3f:
9376 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9377 break;
9378 case 0x5b:
9379 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
9380 break;
9381 case 0x5c:
9382 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9383 break;
9384 case 0x5d:
9385 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9386 break;
9387 case 0x5f:
9388 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
9389 break;
9390 case 0x7a:
9391 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
9392 gen_helper_vfp_absd(tcg_res, tcg_res);
9393 break;
9394 case 0x7c:
9395 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9396 break;
9397 case 0x7d:
9398 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
9399 break;
9400 default:
9401 g_assert_not_reached();
9402 }
9403
9404 write_vec_element(s, tcg_res, rd, pass, MO_64);
9405
9406 tcg_temp_free_i64(tcg_res);
9407 tcg_temp_free_i64(tcg_op1);
9408 tcg_temp_free_i64(tcg_op2);
9409 } else {
9410
9411 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9412 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9413 TCGv_i32 tcg_res = tcg_temp_new_i32();
9414
9415 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9416 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9417
9418 switch (fpopcode) {
9419 case 0x39:
9420
9421 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9422
9423 case 0x19:
9424 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9425 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9426 tcg_res, fpst);
9427 break;
9428 case 0x1a:
9429 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9430 break;
9431 case 0x1b:
9432 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9433 break;
9434 case 0x1c:
9435 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9436 break;
9437 case 0x1e:
9438 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9439 break;
9440 case 0x1f:
9441 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9442 break;
9443 case 0x18:
9444 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9445 break;
9446 case 0x38:
9447 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9448 break;
9449 case 0x3a:
9450 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9451 break;
9452 case 0x3e:
9453 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9454 break;
9455 case 0x3f:
9456 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9457 break;
9458 case 0x5b:
9459 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9460 break;
9461 case 0x5c:
9462 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9463 break;
9464 case 0x5d:
9465 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9466 break;
9467 case 0x5f:
9468 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9469 break;
9470 case 0x7a:
9471 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9472 gen_helper_vfp_abss(tcg_res, tcg_res);
9473 break;
9474 case 0x7c:
9475 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9476 break;
9477 case 0x7d:
9478 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9479 break;
9480 default:
9481 g_assert_not_reached();
9482 }
9483
9484 if (elements == 1) {
9485
9486 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9487
9488 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9489 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9490 tcg_temp_free_i64(tcg_tmp);
9491 } else {
9492 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9493 }
9494
9495 tcg_temp_free_i32(tcg_res);
9496 tcg_temp_free_i32(tcg_op1);
9497 tcg_temp_free_i32(tcg_op2);
9498 }
9499 }
9500
9501 tcg_temp_free_ptr(fpst);
9502
9503 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9504}
9505
9506
9507
9508
9509
9510
9511
9512static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9513{
9514 int rd = extract32(insn, 0, 5);
9515 int rn = extract32(insn, 5, 5);
9516 int opcode = extract32(insn, 11, 5);
9517 int rm = extract32(insn, 16, 5);
9518 int size = extract32(insn, 22, 2);
9519 bool u = extract32(insn, 29, 1);
9520 TCGv_i64 tcg_rd;
9521
9522 if (opcode >= 0x18) {
9523
9524 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9525 switch (fpopcode) {
9526 case 0x1b:
9527 case 0x1f:
9528 case 0x3f:
9529 case 0x5d:
9530 case 0x7d:
9531 case 0x1c:
9532 case 0x5c:
9533 case 0x7c:
9534 case 0x7a:
9535 break;
9536 default:
9537 unallocated_encoding(s);
9538 return;
9539 }
9540
9541 if (!fp_access_check(s)) {
9542 return;
9543 }
9544
9545 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9546 return;
9547 }
9548
9549 switch (opcode) {
9550 case 0x1:
9551 case 0x5:
9552 case 0x9:
9553 case 0xb:
9554 break;
9555 case 0x8:
9556 case 0xa:
9557 case 0x6:
9558 case 0x7:
9559 case 0x11:
9560 case 0x10:
9561 if (size != 3) {
9562 unallocated_encoding(s);
9563 return;
9564 }
9565 break;
9566 case 0x16:
9567 if (size != 1 && size != 2) {
9568 unallocated_encoding(s);
9569 return;
9570 }
9571 break;
9572 default:
9573 unallocated_encoding(s);
9574 return;
9575 }
9576
9577 if (!fp_access_check(s)) {
9578 return;
9579 }
9580
9581 tcg_rd = tcg_temp_new_i64();
9582
9583 if (size == 3) {
9584 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9585 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9586
9587 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9588 tcg_temp_free_i64(tcg_rn);
9589 tcg_temp_free_i64(tcg_rm);
9590 } else {
9591
9592
9593
9594
9595
9596
9597 NeonGenTwoOpEnvFn *genenvfn;
9598 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9599 TCGv_i32 tcg_rm = tcg_temp_new_i32();
9600 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9601
9602 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9603 read_vec_element_i32(s, tcg_rm, rm, 0, size);
9604
9605 switch (opcode) {
9606 case 0x1:
9607 {
9608 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9609 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9610 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9611 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9612 };
9613 genenvfn = fns[size][u];
9614 break;
9615 }
9616 case 0x5:
9617 {
9618 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9619 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9620 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9621 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9622 };
9623 genenvfn = fns[size][u];
9624 break;
9625 }
9626 case 0x9:
9627 {
9628 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9629 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9630 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9631 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9632 };
9633 genenvfn = fns[size][u];
9634 break;
9635 }
9636 case 0xb:
9637 {
9638 static NeonGenTwoOpEnvFn * const fns[3][2] = {
9639 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9640 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9641 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9642 };
9643 genenvfn = fns[size][u];
9644 break;
9645 }
9646 case 0x16:
9647 {
9648 static NeonGenTwoOpEnvFn * const fns[2][2] = {
9649 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9650 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9651 };
9652 assert(size == 1 || size == 2);
9653 genenvfn = fns[size - 1][u];
9654 break;
9655 }
9656 default:
9657 g_assert_not_reached();
9658 }
9659
9660 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9661 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9662 tcg_temp_free_i32(tcg_rd32);
9663 tcg_temp_free_i32(tcg_rn);
9664 tcg_temp_free_i32(tcg_rm);
9665 }
9666
9667 write_fp_dreg(s, rd, tcg_rd);
9668
9669 tcg_temp_free_i64(tcg_rd);
9670}
9671
9672
9673
9674
9675
9676
9677
9678
9679
9680static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9681 uint32_t insn)
9682{
9683 int rd = extract32(insn, 0, 5);
9684 int rn = extract32(insn, 5, 5);
9685 int opcode = extract32(insn, 11, 3);
9686 int rm = extract32(insn, 16, 5);
9687 bool u = extract32(insn, 29, 1);
9688 bool a = extract32(insn, 23, 1);
9689 int fpopcode = opcode | (a << 3) | (u << 4);
9690 TCGv_ptr fpst;
9691 TCGv_i32 tcg_op1;
9692 TCGv_i32 tcg_op2;
9693 TCGv_i32 tcg_res;
9694
9695 switch (fpopcode) {
9696 case 0x03:
9697 case 0x04:
9698 case 0x07:
9699 case 0x0f:
9700 case 0x14:
9701 case 0x15:
9702 case 0x1a:
9703 case 0x1c:
9704 case 0x1d:
9705 break;
9706 default:
9707 unallocated_encoding(s);
9708 return;
9709 }
9710
9711 if (!dc_isar_feature(aa64_fp16, s)) {
9712 unallocated_encoding(s);
9713 }
9714
9715 if (!fp_access_check(s)) {
9716 return;
9717 }
9718
9719 fpst = fpstatus_ptr(FPST_FPCR_F16);
9720
9721 tcg_op1 = read_fp_hreg(s, rn);
9722 tcg_op2 = read_fp_hreg(s, rm);
9723 tcg_res = tcg_temp_new_i32();
9724
9725 switch (fpopcode) {
9726 case 0x03:
9727 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9728 break;
9729 case 0x04:
9730 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9731 break;
9732 case 0x07:
9733 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9734 break;
9735 case 0x0f:
9736 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9737 break;
9738 case 0x14:
9739 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9740 break;
9741 case 0x15:
9742 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9743 break;
9744 case 0x1a:
9745 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9746 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9747 break;
9748 case 0x1c:
9749 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9750 break;
9751 case 0x1d:
9752 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9753 break;
9754 default:
9755 g_assert_not_reached();
9756 }
9757
9758 write_fp_sreg(s, rd, tcg_res);
9759
9760
9761 tcg_temp_free_i32(tcg_res);
9762 tcg_temp_free_i32(tcg_op1);
9763 tcg_temp_free_i32(tcg_op2);
9764 tcg_temp_free_ptr(fpst);
9765}
9766
9767
9768
9769
9770
9771
9772
9773static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9774 uint32_t insn)
9775{
9776 int rd = extract32(insn, 0, 5);
9777 int rn = extract32(insn, 5, 5);
9778 int opcode = extract32(insn, 11, 4);
9779 int rm = extract32(insn, 16, 5);
9780 int size = extract32(insn, 22, 2);
9781 bool u = extract32(insn, 29, 1);
9782 TCGv_i32 ele1, ele2, ele3;
9783 TCGv_i64 res;
9784 bool feature;
9785
9786 switch (u * 16 + opcode) {
9787 case 0x10:
9788 case 0x11:
9789 if (size != 1 && size != 2) {
9790 unallocated_encoding(s);
9791 return;
9792 }
9793 feature = dc_isar_feature(aa64_rdm, s);
9794 break;
9795 default:
9796 unallocated_encoding(s);
9797 return;
9798 }
9799 if (!feature) {
9800 unallocated_encoding(s);
9801 return;
9802 }
9803 if (!fp_access_check(s)) {
9804 return;
9805 }
9806
9807
9808
9809
9810
9811
9812
9813 ele1 = tcg_temp_new_i32();
9814 ele2 = tcg_temp_new_i32();
9815 ele3 = tcg_temp_new_i32();
9816
9817 read_vec_element_i32(s, ele1, rn, 0, size);
9818 read_vec_element_i32(s, ele2, rm, 0, size);
9819 read_vec_element_i32(s, ele3, rd, 0, size);
9820
9821 switch (opcode) {
9822 case 0x0:
9823 if (size == 1) {
9824 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9825 } else {
9826 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9827 }
9828 break;
9829 case 0x1:
9830 if (size == 1) {
9831 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9832 } else {
9833 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9834 }
9835 break;
9836 default:
9837 g_assert_not_reached();
9838 }
9839 tcg_temp_free_i32(ele1);
9840 tcg_temp_free_i32(ele2);
9841
9842 res = tcg_temp_new_i64();
9843 tcg_gen_extu_i32_i64(res, ele3);
9844 tcg_temp_free_i32(ele3);
9845
9846 write_fp_dreg(s, rd, res);
9847 tcg_temp_free_i64(res);
9848}
9849
9850static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9851 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9852 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9853{
9854
9855
9856
9857
9858
9859
9860 TCGCond cond;
9861
9862 switch (opcode) {
9863 case 0x4:
9864 if (u) {
9865 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9866 } else {
9867 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9868 }
9869 break;
9870 case 0x5:
9871
9872
9873
9874 tcg_gen_not_i64(tcg_rd, tcg_rn);
9875 break;
9876 case 0x7:
9877 if (u) {
9878 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9879 } else {
9880 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9881 }
9882 break;
9883 case 0xa:
9884
9885
9886
9887
9888 cond = TCG_COND_LT;
9889 do_cmop:
9890 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9891 tcg_gen_neg_i64(tcg_rd, tcg_rd);
9892 break;
9893 case 0x8:
9894 cond = u ? TCG_COND_GE : TCG_COND_GT;
9895 goto do_cmop;
9896 case 0x9:
9897 cond = u ? TCG_COND_LE : TCG_COND_EQ;
9898 goto do_cmop;
9899 case 0xb:
9900 if (u) {
9901 tcg_gen_neg_i64(tcg_rd, tcg_rn);
9902 } else {
9903 tcg_gen_abs_i64(tcg_rd, tcg_rn);
9904 }
9905 break;
9906 case 0x2f:
9907 gen_helper_vfp_absd(tcg_rd, tcg_rn);
9908 break;
9909 case 0x6f:
9910 gen_helper_vfp_negd(tcg_rd, tcg_rn);
9911 break;
9912 case 0x7f:
9913 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9914 break;
9915 case 0x1a:
9916 case 0x1b:
9917 case 0x1c:
9918 case 0x3a:
9919 case 0x3b:
9920 {
9921 TCGv_i32 tcg_shift = tcg_const_i32(0);
9922 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9923 tcg_temp_free_i32(tcg_shift);
9924 break;
9925 }
9926 case 0x5a:
9927 case 0x5b:
9928 case 0x5c:
9929 case 0x7a:
9930 case 0x7b:
9931 {
9932 TCGv_i32 tcg_shift = tcg_const_i32(0);
9933 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9934 tcg_temp_free_i32(tcg_shift);
9935 break;
9936 }
9937 case 0x18:
9938 case 0x19:
9939 case 0x38:
9940 case 0x39:
9941 case 0x58:
9942 case 0x79:
9943 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9944 break;
9945 case 0x59:
9946 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9947 break;
9948 case 0x1e:
9949 case 0x5e:
9950 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9951 break;
9952 case 0x1f:
9953 case 0x5f:
9954 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9955 break;
9956 default:
9957 g_assert_not_reached();
9958 }
9959}
9960
9961static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9962 bool is_scalar, bool is_u, bool is_q,
9963 int size, int rn, int rd)
9964{
9965 bool is_double = (size == MO_64);
9966 TCGv_ptr fpst;
9967
9968 if (!fp_access_check(s)) {
9969 return;
9970 }
9971
9972 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9973
9974 if (is_double) {
9975 TCGv_i64 tcg_op = tcg_temp_new_i64();
9976 TCGv_i64 tcg_zero = tcg_const_i64(0);
9977 TCGv_i64 tcg_res = tcg_temp_new_i64();
9978 NeonGenTwoDoubleOpFn *genfn;
9979 bool swap = false;
9980 int pass;
9981
9982 switch (opcode) {
9983 case 0x2e:
9984 swap = true;
9985
9986 case 0x2c:
9987 genfn = gen_helper_neon_cgt_f64;
9988 break;
9989 case 0x2d:
9990 genfn = gen_helper_neon_ceq_f64;
9991 break;
9992 case 0x6d:
9993 swap = true;
9994
9995 case 0x6c:
9996 genfn = gen_helper_neon_cge_f64;
9997 break;
9998 default:
9999 g_assert_not_reached();
10000 }
10001
10002 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10003 read_vec_element(s, tcg_op, rn, pass, MO_64);
10004 if (swap) {
10005 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10006 } else {
10007 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10008 }
10009 write_vec_element(s, tcg_res, rd, pass, MO_64);
10010 }
10011 tcg_temp_free_i64(tcg_res);
10012 tcg_temp_free_i64(tcg_zero);
10013 tcg_temp_free_i64(tcg_op);
10014
10015 clear_vec_high(s, !is_scalar, rd);
10016 } else {
10017 TCGv_i32 tcg_op = tcg_temp_new_i32();
10018 TCGv_i32 tcg_zero = tcg_const_i32(0);
10019 TCGv_i32 tcg_res = tcg_temp_new_i32();
10020 NeonGenTwoSingleOpFn *genfn;
10021 bool swap = false;
10022 int pass, maxpasses;
10023
10024 if (size == MO_16) {
10025 switch (opcode) {
10026 case 0x2e:
10027 swap = true;
10028
10029 case 0x2c:
10030 genfn = gen_helper_advsimd_cgt_f16;
10031 break;
10032 case 0x2d:
10033 genfn = gen_helper_advsimd_ceq_f16;
10034 break;
10035 case 0x6d:
10036 swap = true;
10037
10038 case 0x6c:
10039 genfn = gen_helper_advsimd_cge_f16;
10040 break;
10041 default:
10042 g_assert_not_reached();
10043 }
10044 } else {
10045 switch (opcode) {
10046 case 0x2e:
10047 swap = true;
10048
10049 case 0x2c:
10050 genfn = gen_helper_neon_cgt_f32;
10051 break;
10052 case 0x2d:
10053 genfn = gen_helper_neon_ceq_f32;
10054 break;
10055 case 0x6d:
10056 swap = true;
10057
10058 case 0x6c:
10059 genfn = gen_helper_neon_cge_f32;
10060 break;
10061 default:
10062 g_assert_not_reached();
10063 }
10064 }
10065
10066 if (is_scalar) {
10067 maxpasses = 1;
10068 } else {
10069 int vector_size = 8 << is_q;
10070 maxpasses = vector_size >> size;
10071 }
10072
10073 for (pass = 0; pass < maxpasses; pass++) {
10074 read_vec_element_i32(s, tcg_op, rn, pass, size);
10075 if (swap) {
10076 genfn(tcg_res, tcg_zero, tcg_op, fpst);
10077 } else {
10078 genfn(tcg_res, tcg_op, tcg_zero, fpst);
10079 }
10080 if (is_scalar) {
10081 write_fp_sreg(s, rd, tcg_res);
10082 } else {
10083 write_vec_element_i32(s, tcg_res, rd, pass, size);
10084 }
10085 }
10086 tcg_temp_free_i32(tcg_res);
10087 tcg_temp_free_i32(tcg_zero);
10088 tcg_temp_free_i32(tcg_op);
10089 if (!is_scalar) {
10090 clear_vec_high(s, is_q, rd);
10091 }
10092 }
10093
10094 tcg_temp_free_ptr(fpst);
10095}
10096
10097static void handle_2misc_reciprocal(DisasContext *s, int opcode,
10098 bool is_scalar, bool is_u, bool is_q,
10099 int size, int rn, int rd)
10100{
10101 bool is_double = (size == 3);
10102 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10103
10104 if (is_double) {
10105 TCGv_i64 tcg_op = tcg_temp_new_i64();
10106 TCGv_i64 tcg_res = tcg_temp_new_i64();
10107 int pass;
10108
10109 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10110 read_vec_element(s, tcg_op, rn, pass, MO_64);
10111 switch (opcode) {
10112 case 0x3d:
10113 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
10114 break;
10115 case 0x3f:
10116 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
10117 break;
10118 case 0x7d:
10119 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
10120 break;
10121 default:
10122 g_assert_not_reached();
10123 }
10124 write_vec_element(s, tcg_res, rd, pass, MO_64);
10125 }
10126 tcg_temp_free_i64(tcg_res);
10127 tcg_temp_free_i64(tcg_op);
10128 clear_vec_high(s, !is_scalar, rd);
10129 } else {
10130 TCGv_i32 tcg_op = tcg_temp_new_i32();
10131 TCGv_i32 tcg_res = tcg_temp_new_i32();
10132 int pass, maxpasses;
10133
10134 if (is_scalar) {
10135 maxpasses = 1;
10136 } else {
10137 maxpasses = is_q ? 4 : 2;
10138 }
10139
10140 for (pass = 0; pass < maxpasses; pass++) {
10141 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
10142
10143 switch (opcode) {
10144 case 0x3c:
10145 gen_helper_recpe_u32(tcg_res, tcg_op);
10146 break;
10147 case 0x3d:
10148 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
10149 break;
10150 case 0x3f:
10151 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
10152 break;
10153 case 0x7d:
10154 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
10155 break;
10156 default:
10157 g_assert_not_reached();
10158 }
10159
10160 if (is_scalar) {
10161 write_fp_sreg(s, rd, tcg_res);
10162 } else {
10163 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10164 }
10165 }
10166 tcg_temp_free_i32(tcg_res);
10167 tcg_temp_free_i32(tcg_op);
10168 if (!is_scalar) {
10169 clear_vec_high(s, is_q, rd);
10170 }
10171 }
10172 tcg_temp_free_ptr(fpst);
10173}
10174
10175static void handle_2misc_narrow(DisasContext *s, bool scalar,
10176 int opcode, bool u, bool is_q,
10177 int size, int rn, int rd)
10178{
10179
10180
10181
10182 int pass;
10183 TCGv_i32 tcg_res[2];
10184 int destelt = is_q ? 2 : 0;
10185 int passes = scalar ? 1 : 2;
10186
10187 if (scalar) {
10188 tcg_res[1] = tcg_const_i32(0);
10189 }
10190
10191 for (pass = 0; pass < passes; pass++) {
10192 TCGv_i64 tcg_op = tcg_temp_new_i64();
10193 NeonGenNarrowFn *genfn = NULL;
10194 NeonGenNarrowEnvFn *genenvfn = NULL;
10195
10196 if (scalar) {
10197 read_vec_element(s, tcg_op, rn, pass, size + 1);
10198 } else {
10199 read_vec_element(s, tcg_op, rn, pass, MO_64);
10200 }
10201 tcg_res[pass] = tcg_temp_new_i32();
10202
10203 switch (opcode) {
10204 case 0x12:
10205 {
10206 static NeonGenNarrowFn * const xtnfns[3] = {
10207 gen_helper_neon_narrow_u8,
10208 gen_helper_neon_narrow_u16,
10209 tcg_gen_extrl_i64_i32,
10210 };
10211 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
10212 gen_helper_neon_unarrow_sat8,
10213 gen_helper_neon_unarrow_sat16,
10214 gen_helper_neon_unarrow_sat32,
10215 };
10216 if (u) {
10217 genenvfn = sqxtunfns[size];
10218 } else {
10219 genfn = xtnfns[size];
10220 }
10221 break;
10222 }
10223 case 0x14:
10224 {
10225 static NeonGenNarrowEnvFn * const fns[3][2] = {
10226 { gen_helper_neon_narrow_sat_s8,
10227 gen_helper_neon_narrow_sat_u8 },
10228 { gen_helper_neon_narrow_sat_s16,
10229 gen_helper_neon_narrow_sat_u16 },
10230 { gen_helper_neon_narrow_sat_s32,
10231 gen_helper_neon_narrow_sat_u32 },
10232 };
10233 genenvfn = fns[size][u];
10234 break;
10235 }
10236 case 0x16:
10237
10238 if (size == 2) {
10239 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
10240 } else {
10241 TCGv_i32 tcg_lo = tcg_temp_new_i32();
10242 TCGv_i32 tcg_hi = tcg_temp_new_i32();
10243 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10244 TCGv_i32 ahp = get_ahp_flag();
10245
10246 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
10247 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
10248 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
10249 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
10250 tcg_temp_free_i32(tcg_lo);
10251 tcg_temp_free_i32(tcg_hi);
10252 tcg_temp_free_ptr(fpst);
10253 tcg_temp_free_i32(ahp);
10254 }
10255 break;
10256 case 0x36:
10257 {
10258 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
10259 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
10260 tcg_temp_free_ptr(fpst);
10261 }
10262 break;
10263 case 0x56:
10264
10265
10266
10267 assert(size == 2);
10268 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
10269 break;
10270 default:
10271 g_assert_not_reached();
10272 }
10273
10274 if (genfn) {
10275 genfn(tcg_res[pass], tcg_op);
10276 } else if (genenvfn) {
10277 genenvfn(tcg_res[pass], cpu_env, tcg_op);
10278 }
10279
10280 tcg_temp_free_i64(tcg_op);
10281 }
10282
10283 for (pass = 0; pass < 2; pass++) {
10284 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
10285 tcg_temp_free_i32(tcg_res[pass]);
10286 }
10287 clear_vec_high(s, is_q, rd);
10288}
10289
10290
10291static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
10292 bool is_q, int size, int rn, int rd)
10293{
10294 bool is_double = (size == 3);
10295
10296 if (is_double) {
10297 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10298 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10299 int pass;
10300
10301 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
10302 read_vec_element(s, tcg_rn, rn, pass, MO_64);
10303 read_vec_element(s, tcg_rd, rd, pass, MO_64);
10304
10305 if (is_u) {
10306 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10307 } else {
10308 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10309 }
10310 write_vec_element(s, tcg_rd, rd, pass, MO_64);
10311 }
10312 tcg_temp_free_i64(tcg_rd);
10313 tcg_temp_free_i64(tcg_rn);
10314 clear_vec_high(s, !is_scalar, rd);
10315 } else {
10316 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10317 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10318 int pass, maxpasses;
10319
10320 if (is_scalar) {
10321 maxpasses = 1;
10322 } else {
10323 maxpasses = is_q ? 4 : 2;
10324 }
10325
10326 for (pass = 0; pass < maxpasses; pass++) {
10327 if (is_scalar) {
10328 read_vec_element_i32(s, tcg_rn, rn, pass, size);
10329 read_vec_element_i32(s, tcg_rd, rd, pass, size);
10330 } else {
10331 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
10332 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10333 }
10334
10335 if (is_u) {
10336 switch (size) {
10337 case 0:
10338 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10339 break;
10340 case 1:
10341 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10342 break;
10343 case 2:
10344 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10345 break;
10346 default:
10347 g_assert_not_reached();
10348 }
10349 } else {
10350 switch (size) {
10351 case 0:
10352 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10353 break;
10354 case 1:
10355 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10356 break;
10357 case 2:
10358 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
10359 break;
10360 default:
10361 g_assert_not_reached();
10362 }
10363 }
10364
10365 if (is_scalar) {
10366 TCGv_i64 tcg_zero = tcg_const_i64(0);
10367 write_vec_element(s, tcg_zero, rd, 0, MO_64);
10368 tcg_temp_free_i64(tcg_zero);
10369 }
10370 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
10371 }
10372 tcg_temp_free_i32(tcg_rd);
10373 tcg_temp_free_i32(tcg_rn);
10374 clear_vec_high(s, is_q, rd);
10375 }
10376}
10377
10378
10379
10380
10381
10382
10383
10384static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
10385{
10386 int rd = extract32(insn, 0, 5);
10387 int rn = extract32(insn, 5, 5);
10388 int opcode = extract32(insn, 12, 5);
10389 int size = extract32(insn, 22, 2);
10390 bool u = extract32(insn, 29, 1);
10391 bool is_fcvt = false;
10392 int rmode;
10393 TCGv_i32 tcg_rmode;
10394 TCGv_ptr tcg_fpstatus;
10395
10396 switch (opcode) {
10397 case 0x3:
10398 if (!fp_access_check(s)) {
10399 return;
10400 }
10401 handle_2misc_satacc(s, true, u, false, size, rn, rd);
10402 return;
10403 case 0x7:
10404 break;
10405 case 0xa:
10406 if (u) {
10407 unallocated_encoding(s);
10408 return;
10409 }
10410
10411 case 0x8:
10412 case 0x9:
10413 case 0xb:
10414 if (size != 3) {
10415 unallocated_encoding(s);
10416 return;
10417 }
10418 break;
10419 case 0x12:
10420 if (!u) {
10421 unallocated_encoding(s);
10422 return;
10423 }
10424
10425 case 0x14:
10426 if (size == 3) {
10427 unallocated_encoding(s);
10428 return;
10429 }
10430 if (!fp_access_check(s)) {
10431 return;
10432 }
10433 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
10434 return;
10435 case 0xc ... 0xf:
10436 case 0x16 ... 0x1d:
10437 case 0x1f:
10438
10439
10440
10441 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
10442 size = extract32(size, 0, 1) ? 3 : 2;
10443 switch (opcode) {
10444 case 0x2c:
10445 case 0x2d:
10446 case 0x2e:
10447 case 0x6c:
10448 case 0x6d:
10449 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
10450 return;
10451 case 0x1d:
10452 case 0x5d:
10453 {
10454 bool is_signed = (opcode == 0x1d);
10455 if (!fp_access_check(s)) {
10456 return;
10457 }
10458 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
10459 return;
10460 }
10461 case 0x3d:
10462 case 0x3f:
10463 case 0x7d:
10464 if (!fp_access_check(s)) {
10465 return;
10466 }
10467 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
10468 return;
10469 case 0x1a:
10470 case 0x1b:
10471 case 0x3a:
10472 case 0x3b:
10473 case 0x5a:
10474 case 0x5b:
10475 case 0x7a:
10476 case 0x7b:
10477 is_fcvt = true;
10478 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10479 break;
10480 case 0x1c:
10481 case 0x5c:
10482
10483 is_fcvt = true;
10484 rmode = FPROUNDING_TIEAWAY;
10485 break;
10486 case 0x56:
10487 if (size == 2) {
10488 unallocated_encoding(s);
10489 return;
10490 }
10491 if (!fp_access_check(s)) {
10492 return;
10493 }
10494 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10495 return;
10496 default:
10497 unallocated_encoding(s);
10498 return;
10499 }
10500 break;
10501 default:
10502 unallocated_encoding(s);
10503 return;
10504 }
10505
10506 if (!fp_access_check(s)) {
10507 return;
10508 }
10509
10510 if (is_fcvt) {
10511 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
10512 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10513 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10514 } else {
10515 tcg_rmode = NULL;
10516 tcg_fpstatus = NULL;
10517 }
10518
10519 if (size == 3) {
10520 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10521 TCGv_i64 tcg_rd = tcg_temp_new_i64();
10522
10523 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10524 write_fp_dreg(s, rd, tcg_rd);
10525 tcg_temp_free_i64(tcg_rd);
10526 tcg_temp_free_i64(tcg_rn);
10527 } else {
10528 TCGv_i32 tcg_rn = tcg_temp_new_i32();
10529 TCGv_i32 tcg_rd = tcg_temp_new_i32();
10530
10531 read_vec_element_i32(s, tcg_rn, rn, 0, size);
10532
10533 switch (opcode) {
10534 case 0x7:
10535 {
10536 NeonGenOneOpEnvFn *genfn;
10537 static NeonGenOneOpEnvFn * const fns[3][2] = {
10538 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10539 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10540 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10541 };
10542 genfn = fns[size][u];
10543 genfn(tcg_rd, cpu_env, tcg_rn);
10544 break;
10545 }
10546 case 0x1a:
10547 case 0x1b:
10548 case 0x1c:
10549 case 0x3a:
10550 case 0x3b:
10551 {
10552 TCGv_i32 tcg_shift = tcg_const_i32(0);
10553 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10554 tcg_temp_free_i32(tcg_shift);
10555 break;
10556 }
10557 case 0x5a:
10558 case 0x5b:
10559 case 0x5c:
10560 case 0x7a:
10561 case 0x7b:
10562 {
10563 TCGv_i32 tcg_shift = tcg_const_i32(0);
10564 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
10565 tcg_temp_free_i32(tcg_shift);
10566 break;
10567 }
10568 default:
10569 g_assert_not_reached();
10570 }
10571
10572 write_fp_sreg(s, rd, tcg_rd);
10573 tcg_temp_free_i32(tcg_rd);
10574 tcg_temp_free_i32(tcg_rn);
10575 }
10576
10577 if (is_fcvt) {
10578 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
10579 tcg_temp_free_i32(tcg_rmode);
10580 tcg_temp_free_ptr(tcg_fpstatus);
10581 }
10582}
10583
10584
10585static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10586 int immh, int immb, int opcode, int rn, int rd)
10587{
10588 int size = 32 - clz32(immh) - 1;
10589 int immhb = immh << 3 | immb;
10590 int shift = 2 * (8 << size) - immhb;
10591 GVecGen2iFn *gvec_fn;
10592
10593 if (extract32(immh, 3, 1) && !is_q) {
10594 unallocated_encoding(s);
10595 return;
10596 }
10597 tcg_debug_assert(size <= 3);
10598
10599 if (!fp_access_check(s)) {
10600 return;
10601 }
10602
10603 switch (opcode) {
10604 case 0x02:
10605 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10606 break;
10607
10608 case 0x08:
10609 gvec_fn = gen_gvec_sri;
10610 break;
10611
10612 case 0x00:
10613 if (is_u) {
10614 if (shift == 8 << size) {
10615
10616 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10617 is_q ? 16 : 8, vec_full_reg_size(s), 0);
10618 return;
10619 }
10620 gvec_fn = tcg_gen_gvec_shri;
10621 } else {
10622
10623 if (shift == 8 << size) {
10624 shift -= 1;
10625 }
10626 gvec_fn = tcg_gen_gvec_sari;
10627 }
10628 break;
10629
10630 case 0x04:
10631 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10632 break;
10633
10634 case 0x06:
10635 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10636 break;
10637
10638 default:
10639 g_assert_not_reached();
10640 }
10641
10642 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10643}
10644
10645
10646static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10647 int immh, int immb, int opcode, int rn, int rd)
10648{
10649 int size = 32 - clz32(immh) - 1;
10650 int immhb = immh << 3 | immb;
10651 int shift = immhb - (8 << size);
10652
10653
10654 assert(size >= 0 && size <= 3);
10655
10656 if (extract32(immh, 3, 1) && !is_q) {
10657 unallocated_encoding(s);
10658 return;
10659 }
10660
10661 if (!fp_access_check(s)) {
10662 return;
10663 }
10664
10665 if (insert) {
10666 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10667 } else {
10668 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10669 }
10670}
10671
10672
10673static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10674 int immh, int immb, int opcode, int rn, int rd)
10675{
10676 int size = 32 - clz32(immh) - 1;
10677 int immhb = immh << 3 | immb;
10678 int shift = immhb - (8 << size);
10679 int dsize = 64;
10680 int esize = 8 << size;
10681 int elements = dsize/esize;
10682 TCGv_i64 tcg_rn = new_tmp_a64(s);
10683 TCGv_i64 tcg_rd = new_tmp_a64(s);
10684 int i;
10685
10686 if (size >= 3) {
10687 unallocated_encoding(s);
10688 return;
10689 }
10690
10691 if (!fp_access_check(s)) {
10692 return;
10693 }
10694
10695
10696
10697
10698
10699 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10700
10701 for (i = 0; i < elements; i++) {
10702 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10703 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10704 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10705 write_vec_element(s, tcg_rd, rd, i, size + 1);
10706 }
10707}
10708
10709
10710static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10711 int immh, int immb, int opcode, int rn, int rd)
10712{
10713 int immhb = immh << 3 | immb;
10714 int size = 32 - clz32(immh) - 1;
10715 int dsize = 64;
10716 int esize = 8 << size;
10717 int elements = dsize/esize;
10718 int shift = (2 * esize) - immhb;
10719 bool round = extract32(opcode, 0, 1);
10720 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10721 TCGv_i64 tcg_round;
10722 int i;
10723
10724 if (extract32(immh, 3, 1)) {
10725 unallocated_encoding(s);
10726 return;
10727 }
10728
10729 if (!fp_access_check(s)) {
10730 return;
10731 }
10732
10733 tcg_rn = tcg_temp_new_i64();
10734 tcg_rd = tcg_temp_new_i64();
10735 tcg_final = tcg_temp_new_i64();
10736 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10737
10738 if (round) {
10739 uint64_t round_const = 1ULL << (shift - 1);
10740 tcg_round = tcg_const_i64(round_const);
10741 } else {
10742 tcg_round = NULL;
10743 }
10744
10745 for (i = 0; i < elements; i++) {
10746 read_vec_element(s, tcg_rn, rn, i, size+1);
10747 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10748 false, true, size+1, shift);
10749
10750 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10751 }
10752
10753 if (!is_q) {
10754 write_vec_element(s, tcg_final, rd, 0, MO_64);
10755 } else {
10756 write_vec_element(s, tcg_final, rd, 1, MO_64);
10757 }
10758 if (round) {
10759 tcg_temp_free_i64(tcg_round);
10760 }
10761 tcg_temp_free_i64(tcg_rn);
10762 tcg_temp_free_i64(tcg_rd);
10763 tcg_temp_free_i64(tcg_final);
10764
10765 clear_vec_high(s, is_q, rd);
10766}
10767
10768
10769
10770
10771
10772
10773
10774
10775static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10776{
10777 int rd = extract32(insn, 0, 5);
10778 int rn = extract32(insn, 5, 5);
10779 int opcode = extract32(insn, 11, 5);
10780 int immb = extract32(insn, 16, 3);
10781 int immh = extract32(insn, 19, 4);
10782 bool is_u = extract32(insn, 29, 1);
10783 bool is_q = extract32(insn, 30, 1);
10784
10785
10786 assert(immh != 0);
10787
10788 switch (opcode) {
10789 case 0x08:
10790 if (!is_u) {
10791 unallocated_encoding(s);
10792 return;
10793 }
10794
10795 case 0x00:
10796 case 0x02:
10797 case 0x04:
10798 case 0x06:
10799 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10800 break;
10801 case 0x0a:
10802 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10803 break;
10804 case 0x10:
10805 case 0x11:
10806 if (is_u) {
10807 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10808 opcode, rn, rd);
10809 } else {
10810 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10811 }
10812 break;
10813 case 0x12:
10814 case 0x13:
10815 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10816 opcode, rn, rd);
10817 break;
10818 case 0x14:
10819 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10820 break;
10821 case 0x1c:
10822 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10823 opcode, rn, rd);
10824 break;
10825 case 0xc:
10826 if (!is_u) {
10827 unallocated_encoding(s);
10828 return;
10829 }
10830 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10831 break;
10832 case 0xe:
10833 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10834 break;
10835 case 0x1f:
10836 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10837 return;
10838 default:
10839 unallocated_encoding(s);
10840 return;
10841 }
10842}
10843
10844
10845
10846
10847static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10848 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10849{
10850 static NeonGenTwo64OpFn * const fns[3][2] = {
10851 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10852 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10853 { tcg_gen_add_i64, tcg_gen_sub_i64 },
10854 };
10855 NeonGenTwo64OpFn *genfn;
10856 assert(size < 3);
10857
10858 genfn = fns[size][is_sub];
10859 genfn(tcg_res, tcg_op1, tcg_op2);
10860}
10861
10862static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10863 int opcode, int rd, int rn, int rm)
10864{
10865
10866 TCGv_i64 tcg_res[2];
10867 int pass, accop;
10868
10869 tcg_res[0] = tcg_temp_new_i64();
10870 tcg_res[1] = tcg_temp_new_i64();
10871
10872
10873
10874
10875 switch (opcode) {
10876 case 5:
10877 case 8:
10878 case 9:
10879 accop = 1;
10880 break;
10881 case 10:
10882 case 11:
10883 accop = -1;
10884 break;
10885 default:
10886 accop = 0;
10887 break;
10888 }
10889
10890 if (accop != 0) {
10891 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10892 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10893 }
10894
10895
10896
10897
10898 if (size == 2) {
10899 for (pass = 0; pass < 2; pass++) {
10900 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10901 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10902 TCGv_i64 tcg_passres;
10903 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10904
10905 int elt = pass + is_q * 2;
10906
10907 read_vec_element(s, tcg_op1, rn, elt, memop);
10908 read_vec_element(s, tcg_op2, rm, elt, memop);
10909
10910 if (accop == 0) {
10911 tcg_passres = tcg_res[pass];
10912 } else {
10913 tcg_passres = tcg_temp_new_i64();
10914 }
10915
10916 switch (opcode) {
10917 case 0:
10918 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10919 break;
10920 case 2:
10921 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10922 break;
10923 case 5:
10924 case 7:
10925 {
10926 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10927 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10928
10929 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10930 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10931 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10932 tcg_passres,
10933 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10934 tcg_temp_free_i64(tcg_tmp1);
10935 tcg_temp_free_i64(tcg_tmp2);
10936 break;
10937 }
10938 case 8:
10939 case 10:
10940 case 12:
10941 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10942 break;
10943 case 9:
10944 case 11:
10945 case 13:
10946 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10947 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10948 tcg_passres, tcg_passres);
10949 break;
10950 default:
10951 g_assert_not_reached();
10952 }
10953
10954 if (opcode == 9 || opcode == 11) {
10955
10956 if (accop < 0) {
10957 tcg_gen_neg_i64(tcg_passres, tcg_passres);
10958 }
10959 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10960 tcg_res[pass], tcg_passres);
10961 } else if (accop > 0) {
10962 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10963 } else if (accop < 0) {
10964 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10965 }
10966
10967 if (accop != 0) {
10968 tcg_temp_free_i64(tcg_passres);
10969 }
10970
10971 tcg_temp_free_i64(tcg_op1);
10972 tcg_temp_free_i64(tcg_op2);
10973 }
10974 } else {
10975
10976 for (pass = 0; pass < 2; pass++) {
10977 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10978 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10979 TCGv_i64 tcg_passres;
10980 int elt = pass + is_q * 2;
10981
10982 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10983 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10984
10985 if (accop == 0) {
10986 tcg_passres = tcg_res[pass];
10987 } else {
10988 tcg_passres = tcg_temp_new_i64();
10989 }
10990
10991 switch (opcode) {
10992 case 0:
10993 case 2:
10994 {
10995 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10996 static NeonGenWidenFn * const widenfns[2][2] = {
10997 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10998 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10999 };
11000 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11001
11002 widenfn(tcg_op2_64, tcg_op2);
11003 widenfn(tcg_passres, tcg_op1);
11004 gen_neon_addl(size, (opcode == 2), tcg_passres,
11005 tcg_passres, tcg_op2_64);
11006 tcg_temp_free_i64(tcg_op2_64);
11007 break;
11008 }
11009 case 5:
11010 case 7:
11011 if (size == 0) {
11012 if (is_u) {
11013 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
11014 } else {
11015 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
11016 }
11017 } else {
11018 if (is_u) {
11019 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
11020 } else {
11021 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
11022 }
11023 }
11024 break;
11025 case 8:
11026 case 10:
11027 case 12:
11028 if (size == 0) {
11029 if (is_u) {
11030 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
11031 } else {
11032 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
11033 }
11034 } else {
11035 if (is_u) {
11036 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
11037 } else {
11038 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11039 }
11040 }
11041 break;
11042 case 9:
11043 case 11:
11044 case 13:
11045 assert(size == 1);
11046 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
11047 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
11048 tcg_passres, tcg_passres);
11049 break;
11050 default:
11051 g_assert_not_reached();
11052 }
11053 tcg_temp_free_i32(tcg_op1);
11054 tcg_temp_free_i32(tcg_op2);
11055
11056 if (accop != 0) {
11057 if (opcode == 9 || opcode == 11) {
11058
11059 if (accop < 0) {
11060 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
11061 }
11062 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
11063 tcg_res[pass],
11064 tcg_passres);
11065 } else {
11066 gen_neon_addl(size, (accop < 0), tcg_res[pass],
11067 tcg_res[pass], tcg_passres);
11068 }
11069 tcg_temp_free_i64(tcg_passres);
11070 }
11071 }
11072 }
11073
11074 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
11075 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
11076 tcg_temp_free_i64(tcg_res[0]);
11077 tcg_temp_free_i64(tcg_res[1]);
11078}
11079
11080static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
11081 int opcode, int rd, int rn, int rm)
11082{
11083 TCGv_i64 tcg_res[2];
11084 int part = is_q ? 2 : 0;
11085 int pass;
11086
11087 for (pass = 0; pass < 2; pass++) {
11088 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11089 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11090 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
11091 static NeonGenWidenFn * const widenfns[3][2] = {
11092 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
11093 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
11094 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
11095 };
11096 NeonGenWidenFn *widenfn = widenfns[size][is_u];
11097
11098 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11099 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
11100 widenfn(tcg_op2_wide, tcg_op2);
11101 tcg_temp_free_i32(tcg_op2);
11102 tcg_res[pass] = tcg_temp_new_i64();
11103 gen_neon_addl(size, (opcode == 3),
11104 tcg_res[pass], tcg_op1, tcg_op2_wide);
11105 tcg_temp_free_i64(tcg_op1);
11106 tcg_temp_free_i64(tcg_op2_wide);
11107 }
11108
11109 for (pass = 0; pass < 2; pass++) {
11110 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11111 tcg_temp_free_i64(tcg_res[pass]);
11112 }
11113}
11114
11115static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
11116{
11117 tcg_gen_addi_i64(in, in, 1U << 31);
11118 tcg_gen_extrh_i64_i32(res, in);
11119}
11120
11121static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
11122 int opcode, int rd, int rn, int rm)
11123{
11124 TCGv_i32 tcg_res[2];
11125 int part = is_q ? 2 : 0;
11126 int pass;
11127
11128 for (pass = 0; pass < 2; pass++) {
11129 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11130 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11131 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
11132 static NeonGenNarrowFn * const narrowfns[3][2] = {
11133 { gen_helper_neon_narrow_high_u8,
11134 gen_helper_neon_narrow_round_high_u8 },
11135 { gen_helper_neon_narrow_high_u16,
11136 gen_helper_neon_narrow_round_high_u16 },
11137 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
11138 };
11139 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
11140
11141 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11142 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11143
11144 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
11145
11146 tcg_temp_free_i64(tcg_op1);
11147 tcg_temp_free_i64(tcg_op2);
11148
11149 tcg_res[pass] = tcg_temp_new_i32();
11150 gennarrow(tcg_res[pass], tcg_wideres);
11151 tcg_temp_free_i64(tcg_wideres);
11152 }
11153
11154 for (pass = 0; pass < 2; pass++) {
11155 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
11156 tcg_temp_free_i32(tcg_res[pass]);
11157 }
11158 clear_vec_high(s, is_q, rd);
11159}
11160
11161
11162
11163
11164
11165
11166
11167static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
11168{
11169
11170
11171
11172
11173
11174
11175
11176
11177
11178
11179 int is_q = extract32(insn, 30, 1);
11180 int is_u = extract32(insn, 29, 1);
11181 int size = extract32(insn, 22, 2);
11182 int opcode = extract32(insn, 12, 4);
11183 int rm = extract32(insn, 16, 5);
11184 int rn = extract32(insn, 5, 5);
11185 int rd = extract32(insn, 0, 5);
11186
11187 switch (opcode) {
11188 case 1:
11189 case 3:
11190
11191 if (size == 3) {
11192 unallocated_encoding(s);
11193 return;
11194 }
11195 if (!fp_access_check(s)) {
11196 return;
11197 }
11198 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
11199 break;
11200 case 4:
11201 case 6:
11202
11203 if (size == 3) {
11204 unallocated_encoding(s);
11205 return;
11206 }
11207 if (!fp_access_check(s)) {
11208 return;
11209 }
11210 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
11211 break;
11212 case 14:
11213 if (is_u) {
11214 unallocated_encoding(s);
11215 return;
11216 }
11217 switch (size) {
11218 case 0:
11219 if (!fp_access_check(s)) {
11220 return;
11221 }
11222
11223 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11224 gen_helper_neon_pmull_h);
11225 break;
11226
11227 case 3:
11228 if (!dc_isar_feature(aa64_pmull, s)) {
11229 unallocated_encoding(s);
11230 return;
11231 }
11232 if (!fp_access_check(s)) {
11233 return;
11234 }
11235
11236 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
11237 gen_helper_gvec_pmull_q);
11238 break;
11239
11240 default:
11241 unallocated_encoding(s);
11242 break;
11243 }
11244 return;
11245 case 9:
11246 case 11:
11247 case 13:
11248 if (is_u || size == 0) {
11249 unallocated_encoding(s);
11250 return;
11251 }
11252
11253 case 0:
11254 case 2:
11255 case 5:
11256 case 7:
11257 case 8:
11258 case 10:
11259 case 12:
11260
11261 if (size == 3) {
11262 unallocated_encoding(s);
11263 return;
11264 }
11265 if (!fp_access_check(s)) {
11266 return;
11267 }
11268
11269 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
11270 break;
11271 default:
11272
11273 unallocated_encoding(s);
11274 break;
11275 }
11276}
11277
11278
11279static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
11280{
11281 int rd = extract32(insn, 0, 5);
11282 int rn = extract32(insn, 5, 5);
11283 int rm = extract32(insn, 16, 5);
11284 int size = extract32(insn, 22, 2);
11285 bool is_u = extract32(insn, 29, 1);
11286 bool is_q = extract32(insn, 30, 1);
11287
11288 if (!fp_access_check(s)) {
11289 return;
11290 }
11291
11292 switch (size + 4 * is_u) {
11293 case 0:
11294 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
11295 return;
11296 case 1:
11297 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
11298 return;
11299 case 2:
11300 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
11301 return;
11302 case 3:
11303 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
11304 return;
11305 case 4:
11306 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
11307 return;
11308
11309 case 5:
11310 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
11311 return;
11312 case 6:
11313 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
11314 return;
11315 case 7:
11316 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
11317 return;
11318
11319 default:
11320 g_assert_not_reached();
11321 }
11322}
11323
11324
11325
11326
11327
11328
11329static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
11330 int size, int rn, int rm, int rd)
11331{
11332 TCGv_ptr fpst;
11333 int pass;
11334
11335
11336 if (opcode >= 0x58) {
11337 fpst = fpstatus_ptr(FPST_FPCR);
11338 } else {
11339 fpst = NULL;
11340 }
11341
11342 if (!fp_access_check(s)) {
11343 return;
11344 }
11345
11346
11347
11348
11349 if (size == 3) {
11350 TCGv_i64 tcg_res[2];
11351
11352 for (pass = 0; pass < 2; pass++) {
11353 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11354 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11355 int passreg = (pass == 0) ? rn : rm;
11356
11357 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
11358 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
11359 tcg_res[pass] = tcg_temp_new_i64();
11360
11361 switch (opcode) {
11362 case 0x17:
11363 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11364 break;
11365 case 0x58:
11366 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11367 break;
11368 case 0x5a:
11369 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11370 break;
11371 case 0x5e:
11372 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11373 break;
11374 case 0x78:
11375 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11376 break;
11377 case 0x7e:
11378 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11379 break;
11380 default:
11381 g_assert_not_reached();
11382 }
11383
11384 tcg_temp_free_i64(tcg_op1);
11385 tcg_temp_free_i64(tcg_op2);
11386 }
11387
11388 for (pass = 0; pass < 2; pass++) {
11389 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11390 tcg_temp_free_i64(tcg_res[pass]);
11391 }
11392 } else {
11393 int maxpass = is_q ? 4 : 2;
11394 TCGv_i32 tcg_res[4];
11395
11396 for (pass = 0; pass < maxpass; pass++) {
11397 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11398 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11399 NeonGenTwoOpFn *genfn = NULL;
11400 int passreg = pass < (maxpass / 2) ? rn : rm;
11401 int passelt = (is_q && (pass & 1)) ? 2 : 0;
11402
11403 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
11404 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
11405 tcg_res[pass] = tcg_temp_new_i32();
11406
11407 switch (opcode) {
11408 case 0x17:
11409 {
11410 static NeonGenTwoOpFn * const fns[3] = {
11411 gen_helper_neon_padd_u8,
11412 gen_helper_neon_padd_u16,
11413 tcg_gen_add_i32,
11414 };
11415 genfn = fns[size];
11416 break;
11417 }
11418 case 0x14:
11419 {
11420 static NeonGenTwoOpFn * const fns[3][2] = {
11421 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
11422 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
11423 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
11424 };
11425 genfn = fns[size][u];
11426 break;
11427 }
11428 case 0x15:
11429 {
11430 static NeonGenTwoOpFn * const fns[3][2] = {
11431 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
11432 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
11433 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
11434 };
11435 genfn = fns[size][u];
11436 break;
11437 }
11438
11439 case 0x58:
11440 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11441 break;
11442 case 0x5a:
11443 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11444 break;
11445 case 0x5e:
11446 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11447 break;
11448 case 0x78:
11449 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11450 break;
11451 case 0x7e:
11452 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11453 break;
11454 default:
11455 g_assert_not_reached();
11456 }
11457
11458
11459 if (genfn) {
11460 genfn(tcg_res[pass], tcg_op1, tcg_op2);
11461 }
11462
11463 tcg_temp_free_i32(tcg_op1);
11464 tcg_temp_free_i32(tcg_op2);
11465 }
11466
11467 for (pass = 0; pass < maxpass; pass++) {
11468 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11469 tcg_temp_free_i32(tcg_res[pass]);
11470 }
11471 clear_vec_high(s, is_q, rd);
11472 }
11473
11474 if (fpst) {
11475 tcg_temp_free_ptr(fpst);
11476 }
11477}
11478
11479
11480static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
11481{
11482
11483
11484
11485
11486 int fpopcode = extract32(insn, 11, 5)
11487 | (extract32(insn, 23, 1) << 5)
11488 | (extract32(insn, 29, 1) << 6);
11489 int is_q = extract32(insn, 30, 1);
11490 int size = extract32(insn, 22, 1);
11491 int rm = extract32(insn, 16, 5);
11492 int rn = extract32(insn, 5, 5);
11493 int rd = extract32(insn, 0, 5);
11494
11495 int datasize = is_q ? 128 : 64;
11496 int esize = 32 << size;
11497 int elements = datasize / esize;
11498
11499 if (size == 1 && !is_q) {
11500 unallocated_encoding(s);
11501 return;
11502 }
11503
11504 switch (fpopcode) {
11505 case 0x58:
11506 case 0x5a:
11507 case 0x5e:
11508 case 0x78:
11509 case 0x7e:
11510 if (size && !is_q) {
11511 unallocated_encoding(s);
11512 return;
11513 }
11514 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
11515 rn, rm, rd);
11516 return;
11517 case 0x1b:
11518 case 0x1f:
11519 case 0x3f:
11520 case 0x5d:
11521 case 0x7d:
11522 case 0x19:
11523 case 0x39:
11524 case 0x18:
11525 case 0x1a:
11526 case 0x1c:
11527 case 0x1e:
11528 case 0x38:
11529 case 0x3a:
11530 case 0x3e:
11531 case 0x5b:
11532 case 0x5c:
11533 case 0x5f:
11534 case 0x7a:
11535 case 0x7c:
11536 if (!fp_access_check(s)) {
11537 return;
11538 }
11539 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11540 return;
11541
11542 case 0x1d:
11543 case 0x3d:
11544 case 0x59:
11545 case 0x79:
11546 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11547 unallocated_encoding(s);
11548 return;
11549 }
11550 if (fp_access_check(s)) {
11551 int is_s = extract32(insn, 23, 1);
11552 int is_2 = extract32(insn, 29, 1);
11553 int data = (is_2 << 1) | is_s;
11554 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11555 vec_full_reg_offset(s, rn),
11556 vec_full_reg_offset(s, rm), cpu_env,
11557 is_q ? 16 : 8, vec_full_reg_size(s),
11558 data, gen_helper_gvec_fmlal_a64);
11559 }
11560 return;
11561
11562 default:
11563 unallocated_encoding(s);
11564 return;
11565 }
11566}
11567
11568
11569static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11570{
11571 int is_q = extract32(insn, 30, 1);
11572 int u = extract32(insn, 29, 1);
11573 int size = extract32(insn, 22, 2);
11574 int opcode = extract32(insn, 11, 5);
11575 int rm = extract32(insn, 16, 5);
11576 int rn = extract32(insn, 5, 5);
11577 int rd = extract32(insn, 0, 5);
11578 int pass;
11579 TCGCond cond;
11580
11581 switch (opcode) {
11582 case 0x13:
11583 if (u && size != 0) {
11584 unallocated_encoding(s);
11585 return;
11586 }
11587
11588 case 0x0:
11589 case 0x2:
11590 case 0x4:
11591 case 0xc:
11592 case 0xd:
11593 case 0xe:
11594 case 0xf:
11595 case 0x12:
11596 if (size == 3) {
11597 unallocated_encoding(s);
11598 return;
11599 }
11600 break;
11601 case 0x16:
11602 if (size == 0 || size == 3) {
11603 unallocated_encoding(s);
11604 return;
11605 }
11606 break;
11607 default:
11608 if (size == 3 && !is_q) {
11609 unallocated_encoding(s);
11610 return;
11611 }
11612 break;
11613 }
11614
11615 if (!fp_access_check(s)) {
11616 return;
11617 }
11618
11619 switch (opcode) {
11620 case 0x01:
11621 if (u) {
11622 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11623 } else {
11624 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11625 }
11626 return;
11627 case 0x05:
11628 if (u) {
11629 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11630 } else {
11631 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11632 }
11633 return;
11634 case 0x08:
11635 if (u) {
11636 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11637 } else {
11638 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11639 }
11640 return;
11641 case 0x0c:
11642 if (u) {
11643 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11644 } else {
11645 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11646 }
11647 return;
11648 case 0x0d:
11649 if (u) {
11650 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11651 } else {
11652 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11653 }
11654 return;
11655 case 0xe:
11656 if (u) {
11657 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11658 } else {
11659 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11660 }
11661 return;
11662 case 0xf:
11663 if (u) {
11664 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11665 } else {
11666 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11667 }
11668 return;
11669 case 0x10:
11670 if (u) {
11671 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11672 } else {
11673 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11674 }
11675 return;
11676 case 0x13:
11677 if (!u) {
11678 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11679 } else {
11680 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11681 }
11682 return;
11683 case 0x12:
11684 if (u) {
11685 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11686 } else {
11687 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11688 }
11689 return;
11690 case 0x16:
11691 {
11692 static gen_helper_gvec_3_ptr * const fns[2][2] = {
11693 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11694 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11695 };
11696 gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11697 }
11698 return;
11699 case 0x11:
11700 if (!u) {
11701 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11702 return;
11703 }
11704
11705 cond = TCG_COND_EQ;
11706 goto do_gvec_cmp;
11707 case 0x06:
11708 cond = u ? TCG_COND_GTU : TCG_COND_GT;
11709 goto do_gvec_cmp;
11710 case 0x07:
11711 cond = u ? TCG_COND_GEU : TCG_COND_GE;
11712 do_gvec_cmp:
11713 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11714 vec_full_reg_offset(s, rn),
11715 vec_full_reg_offset(s, rm),
11716 is_q ? 16 : 8, vec_full_reg_size(s));
11717 return;
11718 }
11719
11720 if (size == 3) {
11721 assert(is_q);
11722 for (pass = 0; pass < 2; pass++) {
11723 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11724 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11725 TCGv_i64 tcg_res = tcg_temp_new_i64();
11726
11727 read_vec_element(s, tcg_op1, rn, pass, MO_64);
11728 read_vec_element(s, tcg_op2, rm, pass, MO_64);
11729
11730 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11731
11732 write_vec_element(s, tcg_res, rd, pass, MO_64);
11733
11734 tcg_temp_free_i64(tcg_res);
11735 tcg_temp_free_i64(tcg_op1);
11736 tcg_temp_free_i64(tcg_op2);
11737 }
11738 } else {
11739 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11740 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11741 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11742 TCGv_i32 tcg_res = tcg_temp_new_i32();
11743 NeonGenTwoOpFn *genfn = NULL;
11744 NeonGenTwoOpEnvFn *genenvfn = NULL;
11745
11746 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11747 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11748
11749 switch (opcode) {
11750 case 0x0:
11751 {
11752 static NeonGenTwoOpFn * const fns[3][2] = {
11753 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11754 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11755 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11756 };
11757 genfn = fns[size][u];
11758 break;
11759 }
11760 case 0x2:
11761 {
11762 static NeonGenTwoOpFn * const fns[3][2] = {
11763 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11764 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11765 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11766 };
11767 genfn = fns[size][u];
11768 break;
11769 }
11770 case 0x4:
11771 {
11772 static NeonGenTwoOpFn * const fns[3][2] = {
11773 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11774 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11775 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11776 };
11777 genfn = fns[size][u];
11778 break;
11779 }
11780 case 0x9:
11781 {
11782 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11783 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11784 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11785 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11786 };
11787 genenvfn = fns[size][u];
11788 break;
11789 }
11790 case 0xa:
11791 {
11792 static NeonGenTwoOpFn * const fns[3][2] = {
11793 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11794 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11795 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11796 };
11797 genfn = fns[size][u];
11798 break;
11799 }
11800 case 0xb:
11801 {
11802 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11803 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11804 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11805 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11806 };
11807 genenvfn = fns[size][u];
11808 break;
11809 }
11810 default:
11811 g_assert_not_reached();
11812 }
11813
11814 if (genenvfn) {
11815 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11816 } else {
11817 genfn(tcg_res, tcg_op1, tcg_op2);
11818 }
11819
11820 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11821
11822 tcg_temp_free_i32(tcg_res);
11823 tcg_temp_free_i32(tcg_op1);
11824 tcg_temp_free_i32(tcg_op2);
11825 }
11826 }
11827 clear_vec_high(s, is_q, rd);
11828}
11829
11830
11831
11832
11833
11834
11835
11836static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11837{
11838 int opcode = extract32(insn, 11, 5);
11839
11840 switch (opcode) {
11841 case 0x3:
11842 disas_simd_3same_logic(s, insn);
11843 break;
11844 case 0x17:
11845 case 0x14:
11846 case 0x15:
11847 {
11848
11849 int is_q = extract32(insn, 30, 1);
11850 int u = extract32(insn, 29, 1);
11851 int size = extract32(insn, 22, 2);
11852 int rm = extract32(insn, 16, 5);
11853 int rn = extract32(insn, 5, 5);
11854 int rd = extract32(insn, 0, 5);
11855 if (opcode == 0x17) {
11856 if (u || (size == 3 && !is_q)) {
11857 unallocated_encoding(s);
11858 return;
11859 }
11860 } else {
11861 if (size == 3) {
11862 unallocated_encoding(s);
11863 return;
11864 }
11865 }
11866 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11867 break;
11868 }
11869 case 0x18 ... 0x31:
11870
11871 disas_simd_3same_float(s, insn);
11872 break;
11873 default:
11874 disas_simd_3same_int(s, insn);
11875 break;
11876 }
11877}
11878
11879
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11892{
11893 int opcode = extract32(insn, 11, 3);
11894 int u = extract32(insn, 29, 1);
11895 int a = extract32(insn, 23, 1);
11896 int is_q = extract32(insn, 30, 1);
11897 int rm = extract32(insn, 16, 5);
11898 int rn = extract32(insn, 5, 5);
11899 int rd = extract32(insn, 0, 5);
11900
11901
11902
11903
11904 int fpopcode = opcode | (a << 3) | (u << 4);
11905 int datasize = is_q ? 128 : 64;
11906 int elements = datasize / 16;
11907 bool pairwise;
11908 TCGv_ptr fpst;
11909 int pass;
11910
11911 switch (fpopcode) {
11912 case 0x0:
11913 case 0x1:
11914 case 0x2:
11915 case 0x3:
11916 case 0x4:
11917 case 0x6:
11918 case 0x7:
11919 case 0x8:
11920 case 0x9:
11921 case 0xa:
11922 case 0xe:
11923 case 0xf:
11924 case 0x13:
11925 case 0x14:
11926 case 0x15:
11927 case 0x17:
11928 case 0x1a:
11929 case 0x1c:
11930 case 0x1d:
11931 pairwise = false;
11932 break;
11933 case 0x10:
11934 case 0x12:
11935 case 0x16:
11936 case 0x18:
11937 case 0x1e:
11938 pairwise = true;
11939 break;
11940 default:
11941 unallocated_encoding(s);
11942 return;
11943 }
11944
11945 if (!dc_isar_feature(aa64_fp16, s)) {
11946 unallocated_encoding(s);
11947 return;
11948 }
11949
11950 if (!fp_access_check(s)) {
11951 return;
11952 }
11953
11954 fpst = fpstatus_ptr(FPST_FPCR_F16);
11955
11956 if (pairwise) {
11957 int maxpass = is_q ? 8 : 4;
11958 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11959 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11960 TCGv_i32 tcg_res[8];
11961
11962 for (pass = 0; pass < maxpass; pass++) {
11963 int passreg = pass < (maxpass / 2) ? rn : rm;
11964 int passelt = (pass << 1) & (maxpass - 1);
11965
11966 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11967 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11968 tcg_res[pass] = tcg_temp_new_i32();
11969
11970 switch (fpopcode) {
11971 case 0x10:
11972 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11973 fpst);
11974 break;
11975 case 0x12:
11976 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11977 break;
11978 case 0x16:
11979 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11980 break;
11981 case 0x18:
11982 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11983 fpst);
11984 break;
11985 case 0x1e:
11986 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11987 break;
11988 default:
11989 g_assert_not_reached();
11990 }
11991 }
11992
11993 for (pass = 0; pass < maxpass; pass++) {
11994 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11995 tcg_temp_free_i32(tcg_res[pass]);
11996 }
11997
11998 tcg_temp_free_i32(tcg_op1);
11999 tcg_temp_free_i32(tcg_op2);
12000
12001 } else {
12002 for (pass = 0; pass < elements; pass++) {
12003 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
12004 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
12005 TCGv_i32 tcg_res = tcg_temp_new_i32();
12006
12007 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
12008 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
12009
12010 switch (fpopcode) {
12011 case 0x0:
12012 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12013 break;
12014 case 0x1:
12015 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12016 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12017 fpst);
12018 break;
12019 case 0x2:
12020 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
12021 break;
12022 case 0x3:
12023 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
12024 break;
12025 case 0x4:
12026 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12027 break;
12028 case 0x6:
12029 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
12030 break;
12031 case 0x7:
12032 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12033 break;
12034 case 0x8:
12035 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
12036 break;
12037 case 0x9:
12038
12039 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
12040 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12041 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
12042 fpst);
12043 break;
12044 case 0xa:
12045 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12046 break;
12047 case 0xe:
12048 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
12049 break;
12050 case 0xf:
12051 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12052 break;
12053 case 0x13:
12054 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
12055 break;
12056 case 0x14:
12057 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12058 break;
12059 case 0x15:
12060 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12061 break;
12062 case 0x17:
12063 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
12064 break;
12065 case 0x1a:
12066 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
12067 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
12068 break;
12069 case 0x1c:
12070 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12071 break;
12072 case 0x1d:
12073 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
12074 break;
12075 default:
12076 g_assert_not_reached();
12077 }
12078
12079 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12080 tcg_temp_free_i32(tcg_res);
12081 tcg_temp_free_i32(tcg_op1);
12082 tcg_temp_free_i32(tcg_op2);
12083 }
12084 }
12085
12086 tcg_temp_free_ptr(fpst);
12087
12088 clear_vec_high(s, is_q, rd);
12089}
12090
12091
12092
12093
12094
12095
12096
12097static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
12098{
12099 int rd = extract32(insn, 0, 5);
12100 int rn = extract32(insn, 5, 5);
12101 int opcode = extract32(insn, 11, 4);
12102 int rm = extract32(insn, 16, 5);
12103 int size = extract32(insn, 22, 2);
12104 bool u = extract32(insn, 29, 1);
12105 bool is_q = extract32(insn, 30, 1);
12106 bool feature;
12107 int rot;
12108
12109 switch (u * 16 + opcode) {
12110 case 0x10:
12111 case 0x11:
12112 if (size != 1 && size != 2) {
12113 unallocated_encoding(s);
12114 return;
12115 }
12116 feature = dc_isar_feature(aa64_rdm, s);
12117 break;
12118 case 0x02:
12119 case 0x12:
12120 if (size != MO_32) {
12121 unallocated_encoding(s);
12122 return;
12123 }
12124 feature = dc_isar_feature(aa64_dp, s);
12125 break;
12126 case 0x03:
12127 if (size != MO_32) {
12128 unallocated_encoding(s);
12129 return;
12130 }
12131 feature = dc_isar_feature(aa64_i8mm, s);
12132 break;
12133 case 0x04:
12134 case 0x14:
12135 case 0x05:
12136 if (!is_q || size != MO_32) {
12137 unallocated_encoding(s);
12138 return;
12139 }
12140 feature = dc_isar_feature(aa64_i8mm, s);
12141 break;
12142 case 0x18:
12143 case 0x19:
12144 case 0x1a:
12145 case 0x1b:
12146 case 0x1c:
12147 case 0x1e:
12148 if (size == 0
12149 || (size == 1 && !dc_isar_feature(aa64_fp16, s))
12150 || (size == 3 && !is_q)) {
12151 unallocated_encoding(s);
12152 return;
12153 }
12154 feature = dc_isar_feature(aa64_fcma, s);
12155 break;
12156 case 0x1d:
12157 if (size != MO_16 || !is_q) {
12158 unallocated_encoding(s);
12159 return;
12160 }
12161 feature = dc_isar_feature(aa64_bf16, s);
12162 break;
12163 case 0x1f:
12164 switch (size) {
12165 case 1:
12166 case 3:
12167 feature = dc_isar_feature(aa64_bf16, s);
12168 break;
12169 default:
12170 unallocated_encoding(s);
12171 return;
12172 }
12173 break;
12174 default:
12175 unallocated_encoding(s);
12176 return;
12177 }
12178 if (!feature) {
12179 unallocated_encoding(s);
12180 return;
12181 }
12182 if (!fp_access_check(s)) {
12183 return;
12184 }
12185
12186 switch (opcode) {
12187 case 0x0:
12188 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
12189 return;
12190
12191 case 0x1:
12192 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
12193 return;
12194
12195 case 0x2:
12196 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
12197 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
12198 return;
12199
12200 case 0x3:
12201 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
12202 return;
12203
12204 case 0x04:
12205 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
12206 u ? gen_helper_gvec_ummla_b
12207 : gen_helper_gvec_smmla_b);
12208 return;
12209 case 0x05:
12210 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
12211 return;
12212
12213 case 0x8:
12214 case 0x9:
12215 case 0xa:
12216 case 0xb:
12217 rot = extract32(opcode, 0, 2);
12218 switch (size) {
12219 case 1:
12220 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
12221 gen_helper_gvec_fcmlah);
12222 break;
12223 case 2:
12224 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12225 gen_helper_gvec_fcmlas);
12226 break;
12227 case 3:
12228 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
12229 gen_helper_gvec_fcmlad);
12230 break;
12231 default:
12232 g_assert_not_reached();
12233 }
12234 return;
12235
12236 case 0xc:
12237 case 0xe:
12238 rot = extract32(opcode, 1, 1);
12239 switch (size) {
12240 case 1:
12241 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12242 gen_helper_gvec_fcaddh);
12243 break;
12244 case 2:
12245 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12246 gen_helper_gvec_fcadds);
12247 break;
12248 case 3:
12249 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
12250 gen_helper_gvec_fcaddd);
12251 break;
12252 default:
12253 g_assert_not_reached();
12254 }
12255 return;
12256
12257 case 0xd:
12258 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
12259 return;
12260 case 0xf:
12261 switch (size) {
12262 case 1:
12263 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
12264 break;
12265 case 3:
12266 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
12267 gen_helper_gvec_bfmlal);
12268 break;
12269 default:
12270 g_assert_not_reached();
12271 }
12272 return;
12273
12274 default:
12275 g_assert_not_reached();
12276 }
12277}
12278
12279static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
12280 int size, int rn, int rd)
12281{
12282
12283
12284
12285
12286 int pass;
12287
12288 if (size == 3) {
12289
12290 TCGv_i64 tcg_res[2];
12291 int srcelt = is_q ? 2 : 0;
12292
12293 for (pass = 0; pass < 2; pass++) {
12294 TCGv_i32 tcg_op = tcg_temp_new_i32();
12295 tcg_res[pass] = tcg_temp_new_i64();
12296
12297 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
12298 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
12299 tcg_temp_free_i32(tcg_op);
12300 }
12301 for (pass = 0; pass < 2; pass++) {
12302 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12303 tcg_temp_free_i64(tcg_res[pass]);
12304 }
12305 } else {
12306
12307 int srcelt = is_q ? 4 : 0;
12308 TCGv_i32 tcg_res[4];
12309 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
12310 TCGv_i32 ahp = get_ahp_flag();
12311
12312 for (pass = 0; pass < 4; pass++) {
12313 tcg_res[pass] = tcg_temp_new_i32();
12314
12315 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
12316 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
12317 fpst, ahp);
12318 }
12319 for (pass = 0; pass < 4; pass++) {
12320 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
12321 tcg_temp_free_i32(tcg_res[pass]);
12322 }
12323
12324 tcg_temp_free_ptr(fpst);
12325 tcg_temp_free_i32(ahp);
12326 }
12327}
12328
12329static void handle_rev(DisasContext *s, int opcode, bool u,
12330 bool is_q, int size, int rn, int rd)
12331{
12332 int op = (opcode << 1) | u;
12333 int opsz = op + size;
12334 int grp_size = 3 - opsz;
12335 int dsize = is_q ? 128 : 64;
12336 int i;
12337
12338 if (opsz >= 3) {
12339 unallocated_encoding(s);
12340 return;
12341 }
12342
12343 if (!fp_access_check(s)) {
12344 return;
12345 }
12346
12347 if (size == 0) {
12348
12349 int groups = dsize / (8 << grp_size);
12350
12351 for (i = 0; i < groups; i++) {
12352 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
12353
12354 read_vec_element(s, tcg_tmp, rn, i, grp_size);
12355 switch (grp_size) {
12356 case MO_16:
12357 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12358 break;
12359 case MO_32:
12360 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
12361 break;
12362 case MO_64:
12363 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
12364 break;
12365 default:
12366 g_assert_not_reached();
12367 }
12368 write_vec_element(s, tcg_tmp, rd, i, grp_size);
12369 tcg_temp_free_i64(tcg_tmp);
12370 }
12371 clear_vec_high(s, is_q, rd);
12372 } else {
12373 int revmask = (1 << grp_size) - 1;
12374 int esize = 8 << size;
12375 int elements = dsize / esize;
12376 TCGv_i64 tcg_rn = tcg_temp_new_i64();
12377 TCGv_i64 tcg_rd = tcg_const_i64(0);
12378 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
12379
12380 for (i = 0; i < elements; i++) {
12381 int e_rev = (i & 0xf) ^ revmask;
12382 int off = e_rev * esize;
12383 read_vec_element(s, tcg_rn, rn, i, size);
12384 if (off >= 64) {
12385 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
12386 tcg_rn, off - 64, esize);
12387 } else {
12388 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
12389 }
12390 }
12391 write_vec_element(s, tcg_rd, rd, 0, MO_64);
12392 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
12393
12394 tcg_temp_free_i64(tcg_rd_hi);
12395 tcg_temp_free_i64(tcg_rd);
12396 tcg_temp_free_i64(tcg_rn);
12397 }
12398}
12399
12400static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
12401 bool is_q, int size, int rn, int rd)
12402{
12403
12404
12405
12406
12407
12408 bool accum = (opcode == 0x6);
12409 int maxpass = is_q ? 2 : 1;
12410 int pass;
12411 TCGv_i64 tcg_res[2];
12412
12413 if (size == 2) {
12414
12415 MemOp memop = size + (u ? 0 : MO_SIGN);
12416
12417 for (pass = 0; pass < maxpass; pass++) {
12418 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
12419 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
12420
12421 tcg_res[pass] = tcg_temp_new_i64();
12422
12423 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
12424 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
12425 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
12426 if (accum) {
12427 read_vec_element(s, tcg_op1, rd, pass, MO_64);
12428 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12429 }
12430
12431 tcg_temp_free_i64(tcg_op1);
12432 tcg_temp_free_i64(tcg_op2);
12433 }
12434 } else {
12435 for (pass = 0; pass < maxpass; pass++) {
12436 TCGv_i64 tcg_op = tcg_temp_new_i64();
12437 NeonGenOne64OpFn *genfn;
12438 static NeonGenOne64OpFn * const fns[2][2] = {
12439 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
12440 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
12441 };
12442
12443 genfn = fns[size][u];
12444
12445 tcg_res[pass] = tcg_temp_new_i64();
12446
12447 read_vec_element(s, tcg_op, rn, pass, MO_64);
12448 genfn(tcg_res[pass], tcg_op);
12449
12450 if (accum) {
12451 read_vec_element(s, tcg_op, rd, pass, MO_64);
12452 if (size == 0) {
12453 gen_helper_neon_addl_u16(tcg_res[pass],
12454 tcg_res[pass], tcg_op);
12455 } else {
12456 gen_helper_neon_addl_u32(tcg_res[pass],
12457 tcg_res[pass], tcg_op);
12458 }
12459 }
12460 tcg_temp_free_i64(tcg_op);
12461 }
12462 }
12463 if (!is_q) {
12464 tcg_res[1] = tcg_const_i64(0);
12465 }
12466 for (pass = 0; pass < 2; pass++) {
12467 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12468 tcg_temp_free_i64(tcg_res[pass]);
12469 }
12470}
12471
12472static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
12473{
12474
12475 int pass;
12476 int part = is_q ? 2 : 0;
12477 TCGv_i64 tcg_res[2];
12478
12479 for (pass = 0; pass < 2; pass++) {
12480 static NeonGenWidenFn * const widenfns[3] = {
12481 gen_helper_neon_widen_u8,
12482 gen_helper_neon_widen_u16,
12483 tcg_gen_extu_i32_i64,
12484 };
12485 NeonGenWidenFn *widenfn = widenfns[size];
12486 TCGv_i32 tcg_op = tcg_temp_new_i32();
12487
12488 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
12489 tcg_res[pass] = tcg_temp_new_i64();
12490 widenfn(tcg_res[pass], tcg_op);
12491 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
12492
12493 tcg_temp_free_i32(tcg_op);
12494 }
12495
12496 for (pass = 0; pass < 2; pass++) {
12497 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12498 tcg_temp_free_i64(tcg_res[pass]);
12499 }
12500}
12501
12502
12503
12504
12505
12506
12507
12508static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
12509{
12510 int size = extract32(insn, 22, 2);
12511 int opcode = extract32(insn, 12, 5);
12512 bool u = extract32(insn, 29, 1);
12513 bool is_q = extract32(insn, 30, 1);
12514 int rn = extract32(insn, 5, 5);
12515 int rd = extract32(insn, 0, 5);
12516 bool need_fpstatus = false;
12517 bool need_rmode = false;
12518 int rmode = -1;
12519 TCGv_i32 tcg_rmode;
12520 TCGv_ptr tcg_fpstatus;
12521
12522 switch (opcode) {
12523 case 0x0:
12524 case 0x1:
12525 handle_rev(s, opcode, u, is_q, size, rn, rd);
12526 return;
12527 case 0x5:
12528 if (u && size == 0) {
12529
12530 break;
12531 } else if (u && size == 1) {
12532
12533 break;
12534 } else if (!u && size == 0) {
12535
12536 break;
12537 }
12538 unallocated_encoding(s);
12539 return;
12540 case 0x12:
12541 case 0x14:
12542 if (size == 3) {
12543 unallocated_encoding(s);
12544 return;
12545 }
12546 if (!fp_access_check(s)) {
12547 return;
12548 }
12549
12550 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
12551 return;
12552 case 0x4:
12553 if (size == 3) {
12554 unallocated_encoding(s);
12555 return;
12556 }
12557 break;
12558 case 0x2:
12559 case 0x6:
12560 if (size == 3) {
12561 unallocated_encoding(s);
12562 return;
12563 }
12564 if (!fp_access_check(s)) {
12565 return;
12566 }
12567 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12568 return;
12569 case 0x13:
12570 if (u == 0 || size == 3) {
12571 unallocated_encoding(s);
12572 return;
12573 }
12574 if (!fp_access_check(s)) {
12575 return;
12576 }
12577 handle_shll(s, is_q, size, rn, rd);
12578 return;
12579 case 0xa:
12580 if (u == 1) {
12581 unallocated_encoding(s);
12582 return;
12583 }
12584
12585 case 0x8:
12586 case 0x9:
12587 case 0xb:
12588 if (size == 3 && !is_q) {
12589 unallocated_encoding(s);
12590 return;
12591 }
12592 break;
12593 case 0x3:
12594 if (size == 3 && !is_q) {
12595 unallocated_encoding(s);
12596 return;
12597 }
12598 if (!fp_access_check(s)) {
12599 return;
12600 }
12601 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12602 return;
12603 case 0x7:
12604 if (size == 3 && !is_q) {
12605 unallocated_encoding(s);
12606 return;
12607 }
12608 break;
12609 case 0xc ... 0xf:
12610 case 0x16 ... 0x1f:
12611 {
12612
12613
12614
12615 int is_double = extract32(size, 0, 1);
12616 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12617 size = is_double ? 3 : 2;
12618 switch (opcode) {
12619 case 0x2f:
12620 case 0x6f:
12621 if (size == 3 && !is_q) {
12622 unallocated_encoding(s);
12623 return;
12624 }
12625 break;
12626 case 0x1d:
12627 case 0x5d:
12628 {
12629 bool is_signed = (opcode == 0x1d) ? true : false;
12630 int elements = is_double ? 2 : is_q ? 4 : 2;
12631 if (is_double && !is_q) {
12632 unallocated_encoding(s);
12633 return;
12634 }
12635 if (!fp_access_check(s)) {
12636 return;
12637 }
12638 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12639 return;
12640 }
12641 case 0x2c:
12642 case 0x2d:
12643 case 0x2e:
12644 case 0x6c:
12645 case 0x6d:
12646 if (size == 3 && !is_q) {
12647 unallocated_encoding(s);
12648 return;
12649 }
12650 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12651 return;
12652 case 0x7f:
12653 if (size == 3 && !is_q) {
12654 unallocated_encoding(s);
12655 return;
12656 }
12657 break;
12658 case 0x1a:
12659 case 0x1b:
12660 case 0x3a:
12661 case 0x3b:
12662 case 0x5a:
12663 case 0x5b:
12664 case 0x7a:
12665 case 0x7b:
12666 need_fpstatus = true;
12667 need_rmode = true;
12668 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12669 if (size == 3 && !is_q) {
12670 unallocated_encoding(s);
12671 return;
12672 }
12673 break;
12674 case 0x5c:
12675 case 0x1c:
12676 need_fpstatus = true;
12677 need_rmode = true;
12678 rmode = FPROUNDING_TIEAWAY;
12679 if (size == 3 && !is_q) {
12680 unallocated_encoding(s);
12681 return;
12682 }
12683 break;
12684 case 0x3c:
12685 if (size == 3) {
12686 unallocated_encoding(s);
12687 return;
12688 }
12689
12690 case 0x3d:
12691 case 0x7d:
12692 if (size == 3 && !is_q) {
12693 unallocated_encoding(s);
12694 return;
12695 }
12696 if (!fp_access_check(s)) {
12697 return;
12698 }
12699 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12700 return;
12701 case 0x56:
12702 if (size == 2) {
12703 unallocated_encoding(s);
12704 return;
12705 }
12706
12707 case 0x16:
12708
12709
12710
12711 if (!fp_access_check(s)) {
12712 return;
12713 }
12714 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12715 return;
12716 case 0x36:
12717 if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
12718 unallocated_encoding(s);
12719 return;
12720 }
12721 if (!fp_access_check(s)) {
12722 return;
12723 }
12724 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12725 return;
12726 case 0x17:
12727 if (!fp_access_check(s)) {
12728 return;
12729 }
12730 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12731 return;
12732 case 0x18:
12733 case 0x19:
12734 case 0x38:
12735 case 0x39:
12736 need_rmode = true;
12737 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12738
12739 case 0x59:
12740 case 0x79:
12741 need_fpstatus = true;
12742 if (size == 3 && !is_q) {
12743 unallocated_encoding(s);
12744 return;
12745 }
12746 break;
12747 case 0x58:
12748 need_rmode = true;
12749 rmode = FPROUNDING_TIEAWAY;
12750 need_fpstatus = true;
12751 if (size == 3 && !is_q) {
12752 unallocated_encoding(s);
12753 return;
12754 }
12755 break;
12756 case 0x7c:
12757 if (size == 3) {
12758 unallocated_encoding(s);
12759 return;
12760 }
12761 break;
12762 case 0x1e:
12763 case 0x1f:
12764 need_rmode = true;
12765 rmode = FPROUNDING_ZERO;
12766
12767 case 0x5e:
12768 case 0x5f:
12769 need_fpstatus = true;
12770 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12771 unallocated_encoding(s);
12772 return;
12773 }
12774 break;
12775 default:
12776 unallocated_encoding(s);
12777 return;
12778 }
12779 break;
12780 }
12781 default:
12782 unallocated_encoding(s);
12783 return;
12784 }
12785
12786 if (!fp_access_check(s)) {
12787 return;
12788 }
12789
12790 if (need_fpstatus || need_rmode) {
12791 tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12792 } else {
12793 tcg_fpstatus = NULL;
12794 }
12795 if (need_rmode) {
12796 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12797 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12798 } else {
12799 tcg_rmode = NULL;
12800 }
12801
12802 switch (opcode) {
12803 case 0x5:
12804 if (u && size == 0) {
12805 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12806 return;
12807 }
12808 break;
12809 case 0x8:
12810 if (u) {
12811 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12812 } else {
12813 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12814 }
12815 return;
12816 case 0x9:
12817 if (u) {
12818 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12819 } else {
12820 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12821 }
12822 return;
12823 case 0xa:
12824 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12825 return;
12826 case 0xb:
12827 if (u) {
12828 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12829 } else {
12830 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12831 }
12832 return;
12833 }
12834
12835 if (size == 3) {
12836
12837 int pass;
12838
12839
12840
12841
12842 tcg_debug_assert(is_q);
12843 for (pass = 0; pass < 2; pass++) {
12844 TCGv_i64 tcg_op = tcg_temp_new_i64();
12845 TCGv_i64 tcg_res = tcg_temp_new_i64();
12846
12847 read_vec_element(s, tcg_op, rn, pass, MO_64);
12848
12849 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12850 tcg_rmode, tcg_fpstatus);
12851
12852 write_vec_element(s, tcg_res, rd, pass, MO_64);
12853
12854 tcg_temp_free_i64(tcg_res);
12855 tcg_temp_free_i64(tcg_op);
12856 }
12857 } else {
12858 int pass;
12859
12860 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12861 TCGv_i32 tcg_op = tcg_temp_new_i32();
12862 TCGv_i32 tcg_res = tcg_temp_new_i32();
12863
12864 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12865
12866 if (size == 2) {
12867
12868 switch (opcode) {
12869 case 0x4:
12870 if (u) {
12871 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12872 } else {
12873 tcg_gen_clrsb_i32(tcg_res, tcg_op);
12874 }
12875 break;
12876 case 0x7:
12877 if (u) {
12878 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12879 } else {
12880 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12881 }
12882 break;
12883 case 0x2f:
12884 gen_helper_vfp_abss(tcg_res, tcg_op);
12885 break;
12886 case 0x6f:
12887 gen_helper_vfp_negs(tcg_res, tcg_op);
12888 break;
12889 case 0x7f:
12890 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12891 break;
12892 case 0x1a:
12893 case 0x1b:
12894 case 0x1c:
12895 case 0x3a:
12896 case 0x3b:
12897 {
12898 TCGv_i32 tcg_shift = tcg_const_i32(0);
12899 gen_helper_vfp_tosls(tcg_res, tcg_op,
12900 tcg_shift, tcg_fpstatus);
12901 tcg_temp_free_i32(tcg_shift);
12902 break;
12903 }
12904 case 0x5a:
12905 case 0x5b:
12906 case 0x5c:
12907 case 0x7a:
12908 case 0x7b:
12909 {
12910 TCGv_i32 tcg_shift = tcg_const_i32(0);
12911 gen_helper_vfp_touls(tcg_res, tcg_op,
12912 tcg_shift, tcg_fpstatus);
12913 tcg_temp_free_i32(tcg_shift);
12914 break;
12915 }
12916 case 0x18:
12917 case 0x19:
12918 case 0x38:
12919 case 0x39:
12920 case 0x58:
12921 case 0x79:
12922 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12923 break;
12924 case 0x59:
12925 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12926 break;
12927 case 0x7c:
12928 gen_helper_rsqrte_u32(tcg_res, tcg_op);
12929 break;
12930 case 0x1e:
12931 case 0x5e:
12932 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12933 break;
12934 case 0x1f:
12935 case 0x5f:
12936 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12937 break;
12938 default:
12939 g_assert_not_reached();
12940 }
12941 } else {
12942
12943 switch (opcode) {
12944 case 0x5:
12945
12946
12947
12948 if (u) {
12949 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12950 } else {
12951 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12952 }
12953 break;
12954 case 0x7:
12955 {
12956 NeonGenOneOpEnvFn *genfn;
12957 static NeonGenOneOpEnvFn * const fns[2][2] = {
12958 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12959 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12960 };
12961 genfn = fns[size][u];
12962 genfn(tcg_res, cpu_env, tcg_op);
12963 break;
12964 }
12965 case 0x4:
12966 if (u) {
12967 if (size == 0) {
12968 gen_helper_neon_clz_u8(tcg_res, tcg_op);
12969 } else {
12970 gen_helper_neon_clz_u16(tcg_res, tcg_op);
12971 }
12972 } else {
12973 if (size == 0) {
12974 gen_helper_neon_cls_s8(tcg_res, tcg_op);
12975 } else {
12976 gen_helper_neon_cls_s16(tcg_res, tcg_op);
12977 }
12978 }
12979 break;
12980 default:
12981 g_assert_not_reached();
12982 }
12983 }
12984
12985 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12986
12987 tcg_temp_free_i32(tcg_res);
12988 tcg_temp_free_i32(tcg_op);
12989 }
12990 }
12991 clear_vec_high(s, is_q, rd);
12992
12993 if (need_rmode) {
12994 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12995 tcg_temp_free_i32(tcg_rmode);
12996 }
12997 if (need_fpstatus) {
12998 tcg_temp_free_ptr(tcg_fpstatus);
12999 }
13000}
13001
13002
13003
13004
13005
13006
13007
13008
13009
13010
13011
13012
13013
13014
13015
13016static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
13017{
13018 int fpop, opcode, a, u;
13019 int rn, rd;
13020 bool is_q;
13021 bool is_scalar;
13022 bool only_in_vector = false;
13023
13024 int pass;
13025 TCGv_i32 tcg_rmode = NULL;
13026 TCGv_ptr tcg_fpstatus = NULL;
13027 bool need_rmode = false;
13028 bool need_fpst = true;
13029 int rmode;
13030
13031 if (!dc_isar_feature(aa64_fp16, s)) {
13032 unallocated_encoding(s);
13033 return;
13034 }
13035
13036 rd = extract32(insn, 0, 5);
13037 rn = extract32(insn, 5, 5);
13038
13039 a = extract32(insn, 23, 1);
13040 u = extract32(insn, 29, 1);
13041 is_scalar = extract32(insn, 28, 1);
13042 is_q = extract32(insn, 30, 1);
13043
13044 opcode = extract32(insn, 12, 5);
13045 fpop = deposit32(opcode, 5, 1, a);
13046 fpop = deposit32(fpop, 6, 1, u);
13047
13048 switch (fpop) {
13049 case 0x1d:
13050 case 0x5d:
13051 {
13052 int elements;
13053
13054 if (is_scalar) {
13055 elements = 1;
13056 } else {
13057 elements = (is_q ? 8 : 4);
13058 }
13059
13060 if (!fp_access_check(s)) {
13061 return;
13062 }
13063 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
13064 return;
13065 }
13066 break;
13067 case 0x2c:
13068 case 0x2d:
13069 case 0x2e:
13070 case 0x6c:
13071 case 0x6d:
13072 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
13073 return;
13074 case 0x3d:
13075 case 0x3f:
13076 break;
13077 case 0x18:
13078 need_rmode = true;
13079 only_in_vector = true;
13080 rmode = FPROUNDING_TIEEVEN;
13081 break;
13082 case 0x19:
13083 need_rmode = true;
13084 only_in_vector = true;
13085 rmode = FPROUNDING_NEGINF;
13086 break;
13087 case 0x38:
13088 need_rmode = true;
13089 only_in_vector = true;
13090 rmode = FPROUNDING_POSINF;
13091 break;
13092 case 0x39:
13093 need_rmode = true;
13094 only_in_vector = true;
13095 rmode = FPROUNDING_ZERO;
13096 break;
13097 case 0x58:
13098 need_rmode = true;
13099 only_in_vector = true;
13100 rmode = FPROUNDING_TIEAWAY;
13101 break;
13102 case 0x59:
13103 case 0x79:
13104 only_in_vector = true;
13105
13106 break;
13107 case 0x1a:
13108 need_rmode = true;
13109 rmode = FPROUNDING_TIEEVEN;
13110 break;
13111 case 0x1b:
13112 need_rmode = true;
13113 rmode = FPROUNDING_NEGINF;
13114 break;
13115 case 0x1c:
13116 need_rmode = true;
13117 rmode = FPROUNDING_TIEAWAY;
13118 break;
13119 case 0x3a:
13120 need_rmode = true;
13121 rmode = FPROUNDING_POSINF;
13122 break;
13123 case 0x3b:
13124 need_rmode = true;
13125 rmode = FPROUNDING_ZERO;
13126 break;
13127 case 0x5a:
13128 need_rmode = true;
13129 rmode = FPROUNDING_TIEEVEN;
13130 break;
13131 case 0x5b:
13132 need_rmode = true;
13133 rmode = FPROUNDING_NEGINF;
13134 break;
13135 case 0x5c:
13136 need_rmode = true;
13137 rmode = FPROUNDING_TIEAWAY;
13138 break;
13139 case 0x7a:
13140 need_rmode = true;
13141 rmode = FPROUNDING_POSINF;
13142 break;
13143 case 0x7b:
13144 need_rmode = true;
13145 rmode = FPROUNDING_ZERO;
13146 break;
13147 case 0x2f:
13148 case 0x6f:
13149 need_fpst = false;
13150 break;
13151 case 0x7d:
13152 case 0x7f:
13153 break;
13154 default:
13155 unallocated_encoding(s);
13156 return;
13157 }
13158
13159
13160
13161 if (is_scalar) {
13162 if (!is_q) {
13163 unallocated_encoding(s);
13164 return;
13165 }
13166
13167 if (only_in_vector) {
13168 unallocated_encoding(s);
13169 return;
13170 }
13171 }
13172
13173 if (!fp_access_check(s)) {
13174 return;
13175 }
13176
13177 if (need_rmode || need_fpst) {
13178 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
13179 }
13180
13181 if (need_rmode) {
13182 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
13183 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13184 }
13185
13186 if (is_scalar) {
13187 TCGv_i32 tcg_op = read_fp_hreg(s, rn);
13188 TCGv_i32 tcg_res = tcg_temp_new_i32();
13189
13190 switch (fpop) {
13191 case 0x1a:
13192 case 0x1b:
13193 case 0x1c:
13194 case 0x3a:
13195 case 0x3b:
13196 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13197 break;
13198 case 0x3d:
13199 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13200 break;
13201 case 0x3f:
13202 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
13203 break;
13204 case 0x5a:
13205 case 0x5b:
13206 case 0x5c:
13207 case 0x7a:
13208 case 0x7b:
13209 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13210 break;
13211 case 0x6f:
13212 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13213 break;
13214 case 0x7d:
13215 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13216 break;
13217 default:
13218 g_assert_not_reached();
13219 }
13220
13221
13222 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
13223 write_fp_sreg(s, rd, tcg_res);
13224
13225 tcg_temp_free_i32(tcg_res);
13226 tcg_temp_free_i32(tcg_op);
13227 } else {
13228 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
13229 TCGv_i32 tcg_op = tcg_temp_new_i32();
13230 TCGv_i32 tcg_res = tcg_temp_new_i32();
13231
13232 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
13233
13234 switch (fpop) {
13235 case 0x1a:
13236 case 0x1b:
13237 case 0x1c:
13238 case 0x3a:
13239 case 0x3b:
13240 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
13241 break;
13242 case 0x3d:
13243 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
13244 break;
13245 case 0x5a:
13246 case 0x5b:
13247 case 0x5c:
13248 case 0x7a:
13249 case 0x7b:
13250 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
13251 break;
13252 case 0x18:
13253 case 0x19:
13254 case 0x38:
13255 case 0x39:
13256 case 0x58:
13257 case 0x79:
13258 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
13259 break;
13260 case 0x59:
13261 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
13262 break;
13263 case 0x2f:
13264 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
13265 break;
13266 case 0x6f:
13267 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
13268 break;
13269 case 0x7d:
13270 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
13271 break;
13272 case 0x7f:
13273 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
13274 break;
13275 default:
13276 g_assert_not_reached();
13277 }
13278
13279 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
13280
13281 tcg_temp_free_i32(tcg_res);
13282 tcg_temp_free_i32(tcg_op);
13283 }
13284
13285 clear_vec_high(s, is_q, rd);
13286 }
13287
13288 if (tcg_rmode) {
13289 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
13290 tcg_temp_free_i32(tcg_rmode);
13291 }
13292
13293 if (tcg_fpstatus) {
13294 tcg_temp_free_ptr(tcg_fpstatus);
13295 }
13296}
13297
13298
13299
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309static void disas_simd_indexed(DisasContext *s, uint32_t insn)
13310{
13311
13312
13313
13314
13315
13316
13317
13318 bool is_scalar = extract32(insn, 28, 1);
13319 bool is_q = extract32(insn, 30, 1);
13320 bool u = extract32(insn, 29, 1);
13321 int size = extract32(insn, 22, 2);
13322 int l = extract32(insn, 21, 1);
13323 int m = extract32(insn, 20, 1);
13324
13325 int rm = extract32(insn, 16, 4);
13326 int opcode = extract32(insn, 12, 4);
13327 int h = extract32(insn, 11, 1);
13328 int rn = extract32(insn, 5, 5);
13329 int rd = extract32(insn, 0, 5);
13330 bool is_long = false;
13331 int is_fp = 0;
13332 bool is_fp16 = false;
13333 int index;
13334 TCGv_ptr fpst;
13335
13336 switch (16 * u + opcode) {
13337 case 0x08:
13338 case 0x10:
13339 case 0x14:
13340 if (is_scalar) {
13341 unallocated_encoding(s);
13342 return;
13343 }
13344 break;
13345 case 0x02:
13346 case 0x12:
13347 case 0x06:
13348 case 0x16:
13349 case 0x0a:
13350 case 0x1a:
13351 if (is_scalar) {
13352 unallocated_encoding(s);
13353 return;
13354 }
13355 is_long = true;
13356 break;
13357 case 0x03:
13358 case 0x07:
13359 case 0x0b:
13360 is_long = true;
13361 break;
13362 case 0x0c:
13363 case 0x0d:
13364 break;
13365 case 0x01:
13366 case 0x05:
13367 case 0x09:
13368 case 0x19:
13369 is_fp = 1;
13370 break;
13371 case 0x1d:
13372 case 0x1f:
13373 if (!dc_isar_feature(aa64_rdm, s)) {
13374 unallocated_encoding(s);
13375 return;
13376 }
13377 break;
13378 case 0x0e:
13379 case 0x1e:
13380 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
13381 unallocated_encoding(s);
13382 return;
13383 }
13384 break;
13385 case 0x0f:
13386 switch (size) {
13387 case 0:
13388 case 2:
13389 if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
13390 unallocated_encoding(s);
13391 return;
13392 }
13393 size = MO_32;
13394 break;
13395 case 1:
13396 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13397 unallocated_encoding(s);
13398 return;
13399 }
13400 size = MO_32;
13401 break;
13402 case 3:
13403 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
13404 unallocated_encoding(s);
13405 return;
13406 }
13407
13408 size = MO_16;
13409 break;
13410 default:
13411 unallocated_encoding(s);
13412 return;
13413 }
13414 break;
13415 case 0x11:
13416 case 0x13:
13417 case 0x15:
13418 case 0x17:
13419 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
13420 unallocated_encoding(s);
13421 return;
13422 }
13423 is_fp = 2;
13424 break;
13425 case 0x00:
13426 case 0x04:
13427 case 0x18:
13428 case 0x1c:
13429 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
13430 unallocated_encoding(s);
13431 return;
13432 }
13433 size = MO_16;
13434
13435 break;
13436 default:
13437 unallocated_encoding(s);
13438 return;
13439 }
13440
13441 switch (is_fp) {
13442 case 1:
13443
13444 switch (size) {
13445 case 0:
13446 size = MO_16;
13447 is_fp16 = true;
13448 break;
13449 case MO_32:
13450 case MO_64:
13451 break;
13452 default:
13453 unallocated_encoding(s);
13454 return;
13455 }
13456 break;
13457
13458 case 2:
13459
13460 size += 1;
13461 switch (size) {
13462 case MO_32:
13463 if (h && !is_q) {
13464 unallocated_encoding(s);
13465 return;
13466 }
13467 is_fp16 = true;
13468 break;
13469 case MO_64:
13470 break;
13471 default:
13472 unallocated_encoding(s);
13473 return;
13474 }
13475 break;
13476
13477 default:
13478 switch (size) {
13479 case MO_8:
13480 case MO_64:
13481 unallocated_encoding(s);
13482 return;
13483 }
13484 break;
13485 }
13486 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
13487 unallocated_encoding(s);
13488 return;
13489 }
13490
13491
13492 switch (size) {
13493 case MO_16:
13494 index = h << 2 | l << 1 | m;
13495 break;
13496 case MO_32:
13497 index = h << 1 | l;
13498 rm |= m << 4;
13499 break;
13500 case MO_64:
13501 if (l || !is_q) {
13502 unallocated_encoding(s);
13503 return;
13504 }
13505 index = h;
13506 rm |= m << 4;
13507 break;
13508 default:
13509 g_assert_not_reached();
13510 }
13511
13512 if (!fp_access_check(s)) {
13513 return;
13514 }
13515
13516 if (is_fp) {
13517 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
13518 } else {
13519 fpst = NULL;
13520 }
13521
13522 switch (16 * u + opcode) {
13523 case 0x0e:
13524 case 0x1e:
13525 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13526 u ? gen_helper_gvec_udot_idx_b
13527 : gen_helper_gvec_sdot_idx_b);
13528 return;
13529 case 0x0f:
13530 switch (extract32(insn, 22, 2)) {
13531 case 0:
13532 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13533 gen_helper_gvec_sudot_idx_b);
13534 return;
13535 case 1:
13536 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13537 gen_helper_gvec_bfdot_idx);
13538 return;
13539 case 2:
13540 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
13541 gen_helper_gvec_usdot_idx_b);
13542 return;
13543 case 3:
13544 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
13545 gen_helper_gvec_bfmlal_idx);
13546 return;
13547 }
13548 g_assert_not_reached();
13549 case 0x11:
13550 case 0x13:
13551 case 0x15:
13552 case 0x17:
13553 {
13554 int rot = extract32(insn, 13, 2);
13555 int data = (index << 2) | rot;
13556 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
13557 vec_full_reg_offset(s, rn),
13558 vec_full_reg_offset(s, rm),
13559 vec_full_reg_offset(s, rd), fpst,
13560 is_q ? 16 : 8, vec_full_reg_size(s), data,
13561 size == MO_64
13562 ? gen_helper_gvec_fcmlas_idx
13563 : gen_helper_gvec_fcmlah_idx);
13564 tcg_temp_free_ptr(fpst);
13565 }
13566 return;
13567
13568 case 0x00:
13569 case 0x04:
13570 case 0x18:
13571 case 0x1c:
13572 {
13573 int is_s = extract32(opcode, 2, 1);
13574 int is_2 = u;
13575 int data = (index << 2) | (is_2 << 1) | is_s;
13576 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
13577 vec_full_reg_offset(s, rn),
13578 vec_full_reg_offset(s, rm), cpu_env,
13579 is_q ? 16 : 8, vec_full_reg_size(s),
13580 data, gen_helper_gvec_fmlal_idx_a64);
13581 }
13582 return;
13583
13584 case 0x08:
13585 if (!is_long && !is_scalar) {
13586 static gen_helper_gvec_3 * const fns[3] = {
13587 gen_helper_gvec_mul_idx_h,
13588 gen_helper_gvec_mul_idx_s,
13589 gen_helper_gvec_mul_idx_d,
13590 };
13591 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
13592 vec_full_reg_offset(s, rn),
13593 vec_full_reg_offset(s, rm),
13594 is_q ? 16 : 8, vec_full_reg_size(s),
13595 index, fns[size - 1]);
13596 return;
13597 }
13598 break;
13599
13600 case 0x10:
13601 if (!is_long && !is_scalar) {
13602 static gen_helper_gvec_4 * const fns[3] = {
13603 gen_helper_gvec_mla_idx_h,
13604 gen_helper_gvec_mla_idx_s,
13605 gen_helper_gvec_mla_idx_d,
13606 };
13607 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13608 vec_full_reg_offset(s, rn),
13609 vec_full_reg_offset(s, rm),
13610 vec_full_reg_offset(s, rd),
13611 is_q ? 16 : 8, vec_full_reg_size(s),
13612 index, fns[size - 1]);
13613 return;
13614 }
13615 break;
13616
13617 case 0x14:
13618 if (!is_long && !is_scalar) {
13619 static gen_helper_gvec_4 * const fns[3] = {
13620 gen_helper_gvec_mls_idx_h,
13621 gen_helper_gvec_mls_idx_s,
13622 gen_helper_gvec_mls_idx_d,
13623 };
13624 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13625 vec_full_reg_offset(s, rn),
13626 vec_full_reg_offset(s, rm),
13627 vec_full_reg_offset(s, rd),
13628 is_q ? 16 : 8, vec_full_reg_size(s),
13629 index, fns[size - 1]);
13630 return;
13631 }
13632 break;
13633 }
13634
13635 if (size == 3) {
13636 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13637 int pass;
13638
13639 assert(is_fp && is_q && !is_long);
13640
13641 read_vec_element(s, tcg_idx, rm, index, MO_64);
13642
13643 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13644 TCGv_i64 tcg_op = tcg_temp_new_i64();
13645 TCGv_i64 tcg_res = tcg_temp_new_i64();
13646
13647 read_vec_element(s, tcg_op, rn, pass, MO_64);
13648
13649 switch (16 * u + opcode) {
13650 case 0x05:
13651
13652 gen_helper_vfp_negd(tcg_op, tcg_op);
13653
13654 case 0x01:
13655 read_vec_element(s, tcg_res, rd, pass, MO_64);
13656 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13657 break;
13658 case 0x09:
13659 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13660 break;
13661 case 0x19:
13662 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13663 break;
13664 default:
13665 g_assert_not_reached();
13666 }
13667
13668 write_vec_element(s, tcg_res, rd, pass, MO_64);
13669 tcg_temp_free_i64(tcg_op);
13670 tcg_temp_free_i64(tcg_res);
13671 }
13672
13673 tcg_temp_free_i64(tcg_idx);
13674 clear_vec_high(s, !is_scalar, rd);
13675 } else if (!is_long) {
13676
13677
13678
13679
13680 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13681 int pass, maxpasses;
13682
13683 if (is_scalar) {
13684 maxpasses = 1;
13685 } else {
13686 maxpasses = is_q ? 4 : 2;
13687 }
13688
13689 read_vec_element_i32(s, tcg_idx, rm, index, size);
13690
13691 if (size == 1 && !is_scalar) {
13692
13693
13694
13695
13696 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13697 }
13698
13699 for (pass = 0; pass < maxpasses; pass++) {
13700 TCGv_i32 tcg_op = tcg_temp_new_i32();
13701 TCGv_i32 tcg_res = tcg_temp_new_i32();
13702
13703 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13704
13705 switch (16 * u + opcode) {
13706 case 0x08:
13707 case 0x10:
13708 case 0x14:
13709 {
13710 static NeonGenTwoOpFn * const fns[2][2] = {
13711 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13712 { tcg_gen_add_i32, tcg_gen_sub_i32 },
13713 };
13714 NeonGenTwoOpFn *genfn;
13715 bool is_sub = opcode == 0x4;
13716
13717 if (size == 1) {
13718 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13719 } else {
13720 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13721 }
13722 if (opcode == 0x8) {
13723 break;
13724 }
13725 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13726 genfn = fns[size - 1][is_sub];
13727 genfn(tcg_res, tcg_op, tcg_res);
13728 break;
13729 }
13730 case 0x05:
13731 case 0x01:
13732 read_vec_element_i32(s, tcg_res, rd, pass,
13733 is_scalar ? size : MO_32);
13734 switch (size) {
13735 case 1:
13736 if (opcode == 0x5) {
13737
13738
13739 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13740 }
13741 if (is_scalar) {
13742 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13743 tcg_res, fpst);
13744 } else {
13745 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13746 tcg_res, fpst);
13747 }
13748 break;
13749 case 2:
13750 if (opcode == 0x5) {
13751
13752
13753 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13754 }
13755 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13756 tcg_res, fpst);
13757 break;
13758 default:
13759 g_assert_not_reached();
13760 }
13761 break;
13762 case 0x09:
13763 switch (size) {
13764 case 1:
13765 if (is_scalar) {
13766 gen_helper_advsimd_mulh(tcg_res, tcg_op,
13767 tcg_idx, fpst);
13768 } else {
13769 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13770 tcg_idx, fpst);
13771 }
13772 break;
13773 case 2:
13774 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13775 break;
13776 default:
13777 g_assert_not_reached();
13778 }
13779 break;
13780 case 0x19:
13781 switch (size) {
13782 case 1:
13783 if (is_scalar) {
13784 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13785 tcg_idx, fpst);
13786 } else {
13787 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13788 tcg_idx, fpst);
13789 }
13790 break;
13791 case 2:
13792 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13793 break;
13794 default:
13795 g_assert_not_reached();
13796 }
13797 break;
13798 case 0x0c:
13799 if (size == 1) {
13800 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13801 tcg_op, tcg_idx);
13802 } else {
13803 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13804 tcg_op, tcg_idx);
13805 }
13806 break;
13807 case 0x0d:
13808 if (size == 1) {
13809 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13810 tcg_op, tcg_idx);
13811 } else {
13812 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13813 tcg_op, tcg_idx);
13814 }
13815 break;
13816 case 0x1d:
13817 read_vec_element_i32(s, tcg_res, rd, pass,
13818 is_scalar ? size : MO_32);
13819 if (size == 1) {
13820 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13821 tcg_op, tcg_idx, tcg_res);
13822 } else {
13823 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13824 tcg_op, tcg_idx, tcg_res);
13825 }
13826 break;
13827 case 0x1f:
13828 read_vec_element_i32(s, tcg_res, rd, pass,
13829 is_scalar ? size : MO_32);
13830 if (size == 1) {
13831 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13832 tcg_op, tcg_idx, tcg_res);
13833 } else {
13834 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13835 tcg_op, tcg_idx, tcg_res);
13836 }
13837 break;
13838 default:
13839 g_assert_not_reached();
13840 }
13841
13842 if (is_scalar) {
13843 write_fp_sreg(s, rd, tcg_res);
13844 } else {
13845 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13846 }
13847
13848 tcg_temp_free_i32(tcg_op);
13849 tcg_temp_free_i32(tcg_res);
13850 }
13851
13852 tcg_temp_free_i32(tcg_idx);
13853 clear_vec_high(s, is_q, rd);
13854 } else {
13855
13856 TCGv_i64 tcg_res[2];
13857 int pass;
13858 bool satop = extract32(opcode, 0, 1);
13859 MemOp memop = MO_32;
13860
13861 if (satop || !u) {
13862 memop |= MO_SIGN;
13863 }
13864
13865 if (size == 2) {
13866 TCGv_i64 tcg_idx = tcg_temp_new_i64();
13867
13868 read_vec_element(s, tcg_idx, rm, index, memop);
13869
13870 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13871 TCGv_i64 tcg_op = tcg_temp_new_i64();
13872 TCGv_i64 tcg_passres;
13873 int passelt;
13874
13875 if (is_scalar) {
13876 passelt = 0;
13877 } else {
13878 passelt = pass + (is_q * 2);
13879 }
13880
13881 read_vec_element(s, tcg_op, rn, passelt, memop);
13882
13883 tcg_res[pass] = tcg_temp_new_i64();
13884
13885 if (opcode == 0xa || opcode == 0xb) {
13886
13887 tcg_passres = tcg_res[pass];
13888 } else {
13889 tcg_passres = tcg_temp_new_i64();
13890 }
13891
13892 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13893 tcg_temp_free_i64(tcg_op);
13894
13895 if (satop) {
13896
13897 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13898 tcg_passres, tcg_passres);
13899 }
13900
13901 if (opcode == 0xa || opcode == 0xb) {
13902 continue;
13903 }
13904
13905
13906 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13907
13908 switch (opcode) {
13909 case 0x2:
13910 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13911 break;
13912 case 0x6:
13913 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13914 break;
13915 case 0x7:
13916 tcg_gen_neg_i64(tcg_passres, tcg_passres);
13917
13918 case 0x3:
13919 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13920 tcg_res[pass],
13921 tcg_passres);
13922 break;
13923 default:
13924 g_assert_not_reached();
13925 }
13926 tcg_temp_free_i64(tcg_passres);
13927 }
13928 tcg_temp_free_i64(tcg_idx);
13929
13930 clear_vec_high(s, !is_scalar, rd);
13931 } else {
13932 TCGv_i32 tcg_idx = tcg_temp_new_i32();
13933
13934 assert(size == 1);
13935 read_vec_element_i32(s, tcg_idx, rm, index, size);
13936
13937 if (!is_scalar) {
13938
13939
13940
13941
13942 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13943 }
13944
13945 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13946 TCGv_i32 tcg_op = tcg_temp_new_i32();
13947 TCGv_i64 tcg_passres;
13948
13949 if (is_scalar) {
13950 read_vec_element_i32(s, tcg_op, rn, pass, size);
13951 } else {
13952 read_vec_element_i32(s, tcg_op, rn,
13953 pass + (is_q * 2), MO_32);
13954 }
13955
13956 tcg_res[pass] = tcg_temp_new_i64();
13957
13958 if (opcode == 0xa || opcode == 0xb) {
13959
13960 tcg_passres = tcg_res[pass];
13961 } else {
13962 tcg_passres = tcg_temp_new_i64();
13963 }
13964
13965 if (memop & MO_SIGN) {
13966 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13967 } else {
13968 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13969 }
13970 if (satop) {
13971 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13972 tcg_passres, tcg_passres);
13973 }
13974 tcg_temp_free_i32(tcg_op);
13975
13976 if (opcode == 0xa || opcode == 0xb) {
13977 continue;
13978 }
13979
13980
13981 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13982
13983 switch (opcode) {
13984 case 0x2:
13985 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13986 tcg_passres);
13987 break;
13988 case 0x6:
13989 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13990 tcg_passres);
13991 break;
13992 case 0x7:
13993 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13994
13995 case 0x3:
13996 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13997 tcg_res[pass],
13998 tcg_passres);
13999 break;
14000 default:
14001 g_assert_not_reached();
14002 }
14003 tcg_temp_free_i64(tcg_passres);
14004 }
14005 tcg_temp_free_i32(tcg_idx);
14006
14007 if (is_scalar) {
14008 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
14009 }
14010 }
14011
14012 if (is_scalar) {
14013 tcg_res[1] = tcg_const_i64(0);
14014 }
14015
14016 for (pass = 0; pass < 2; pass++) {
14017 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
14018 tcg_temp_free_i64(tcg_res[pass]);
14019 }
14020 }
14021
14022 if (fpst) {
14023 tcg_temp_free_ptr(fpst);
14024 }
14025}
14026
14027
14028
14029
14030
14031
14032
14033static void disas_crypto_aes(DisasContext *s, uint32_t insn)
14034{
14035 int size = extract32(insn, 22, 2);
14036 int opcode = extract32(insn, 12, 5);
14037 int rn = extract32(insn, 5, 5);
14038 int rd = extract32(insn, 0, 5);
14039 int decrypt;
14040 gen_helper_gvec_2 *genfn2 = NULL;
14041 gen_helper_gvec_3 *genfn3 = NULL;
14042
14043 if (!dc_isar_feature(aa64_aes, s) || size != 0) {
14044 unallocated_encoding(s);
14045 return;
14046 }
14047
14048 switch (opcode) {
14049 case 0x4:
14050 decrypt = 0;
14051 genfn3 = gen_helper_crypto_aese;
14052 break;
14053 case 0x6:
14054 decrypt = 0;
14055 genfn2 = gen_helper_crypto_aesmc;
14056 break;
14057 case 0x5:
14058 decrypt = 1;
14059 genfn3 = gen_helper_crypto_aese;
14060 break;
14061 case 0x7:
14062 decrypt = 1;
14063 genfn2 = gen_helper_crypto_aesmc;
14064 break;
14065 default:
14066 unallocated_encoding(s);
14067 return;
14068 }
14069
14070 if (!fp_access_check(s)) {
14071 return;
14072 }
14073 if (genfn2) {
14074 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
14075 } else {
14076 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
14077 }
14078}
14079
14080
14081
14082
14083
14084
14085
14086static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
14087{
14088 int size = extract32(insn, 22, 2);
14089 int opcode = extract32(insn, 12, 3);
14090 int rm = extract32(insn, 16, 5);
14091 int rn = extract32(insn, 5, 5);
14092 int rd = extract32(insn, 0, 5);
14093 gen_helper_gvec_3 *genfn;
14094 bool feature;
14095
14096 if (size != 0) {
14097 unallocated_encoding(s);
14098 return;
14099 }
14100
14101 switch (opcode) {
14102 case 0:
14103 genfn = gen_helper_crypto_sha1c;
14104 feature = dc_isar_feature(aa64_sha1, s);
14105 break;
14106 case 1:
14107 genfn = gen_helper_crypto_sha1p;
14108 feature = dc_isar_feature(aa64_sha1, s);
14109 break;
14110 case 2:
14111 genfn = gen_helper_crypto_sha1m;
14112 feature = dc_isar_feature(aa64_sha1, s);
14113 break;
14114 case 3:
14115 genfn = gen_helper_crypto_sha1su0;
14116 feature = dc_isar_feature(aa64_sha1, s);
14117 break;
14118 case 4:
14119 genfn = gen_helper_crypto_sha256h;
14120 feature = dc_isar_feature(aa64_sha256, s);
14121 break;
14122 case 5:
14123 genfn = gen_helper_crypto_sha256h2;
14124 feature = dc_isar_feature(aa64_sha256, s);
14125 break;
14126 case 6:
14127 genfn = gen_helper_crypto_sha256su1;
14128 feature = dc_isar_feature(aa64_sha256, s);
14129 break;
14130 default:
14131 unallocated_encoding(s);
14132 return;
14133 }
14134
14135 if (!feature) {
14136 unallocated_encoding(s);
14137 return;
14138 }
14139
14140 if (!fp_access_check(s)) {
14141 return;
14142 }
14143 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
14144}
14145
14146
14147
14148
14149
14150
14151
14152static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
14153{
14154 int size = extract32(insn, 22, 2);
14155 int opcode = extract32(insn, 12, 5);
14156 int rn = extract32(insn, 5, 5);
14157 int rd = extract32(insn, 0, 5);
14158 gen_helper_gvec_2 *genfn;
14159 bool feature;
14160
14161 if (size != 0) {
14162 unallocated_encoding(s);
14163 return;
14164 }
14165
14166 switch (opcode) {
14167 case 0:
14168 feature = dc_isar_feature(aa64_sha1, s);
14169 genfn = gen_helper_crypto_sha1h;
14170 break;
14171 case 1:
14172 feature = dc_isar_feature(aa64_sha1, s);
14173 genfn = gen_helper_crypto_sha1su1;
14174 break;
14175 case 2:
14176 feature = dc_isar_feature(aa64_sha256, s);
14177 genfn = gen_helper_crypto_sha256su0;
14178 break;
14179 default:
14180 unallocated_encoding(s);
14181 return;
14182 }
14183
14184 if (!feature) {
14185 unallocated_encoding(s);
14186 return;
14187 }
14188
14189 if (!fp_access_check(s)) {
14190 return;
14191 }
14192 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
14193}
14194
14195static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
14196{
14197 tcg_gen_rotli_i64(d, m, 1);
14198 tcg_gen_xor_i64(d, d, n);
14199}
14200
14201static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
14202{
14203 tcg_gen_rotli_vec(vece, d, m, 1);
14204 tcg_gen_xor_vec(vece, d, d, n);
14205}
14206
14207void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
14208 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
14209{
14210 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
14211 static const GVecGen3 op = {
14212 .fni8 = gen_rax1_i64,
14213 .fniv = gen_rax1_vec,
14214 .opt_opc = vecop_list,
14215 .fno = gen_helper_crypto_rax1,
14216 .vece = MO_64,
14217 };
14218 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
14219}
14220
14221
14222
14223
14224
14225
14226
14227static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
14228{
14229 int opcode = extract32(insn, 10, 2);
14230 int o = extract32(insn, 14, 1);
14231 int rm = extract32(insn, 16, 5);
14232 int rn = extract32(insn, 5, 5);
14233 int rd = extract32(insn, 0, 5);
14234 bool feature;
14235 gen_helper_gvec_3 *oolfn = NULL;
14236 GVecGen3Fn *gvecfn = NULL;
14237
14238 if (o == 0) {
14239 switch (opcode) {
14240 case 0:
14241 feature = dc_isar_feature(aa64_sha512, s);
14242 oolfn = gen_helper_crypto_sha512h;
14243 break;
14244 case 1:
14245 feature = dc_isar_feature(aa64_sha512, s);
14246 oolfn = gen_helper_crypto_sha512h2;
14247 break;
14248 case 2:
14249 feature = dc_isar_feature(aa64_sha512, s);
14250 oolfn = gen_helper_crypto_sha512su1;
14251 break;
14252 case 3:
14253 feature = dc_isar_feature(aa64_sha3, s);
14254 gvecfn = gen_gvec_rax1;
14255 break;
14256 default:
14257 g_assert_not_reached();
14258 }
14259 } else {
14260 switch (opcode) {
14261 case 0:
14262 feature = dc_isar_feature(aa64_sm3, s);
14263 oolfn = gen_helper_crypto_sm3partw1;
14264 break;
14265 case 1:
14266 feature = dc_isar_feature(aa64_sm3, s);
14267 oolfn = gen_helper_crypto_sm3partw2;
14268 break;
14269 case 2:
14270 feature = dc_isar_feature(aa64_sm4, s);
14271 oolfn = gen_helper_crypto_sm4ekey;
14272 break;
14273 default:
14274 unallocated_encoding(s);
14275 return;
14276 }
14277 }
14278
14279 if (!feature) {
14280 unallocated_encoding(s);
14281 return;
14282 }
14283
14284 if (!fp_access_check(s)) {
14285 return;
14286 }
14287
14288 if (oolfn) {
14289 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
14290 } else {
14291 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
14292 }
14293}
14294
14295
14296
14297
14298
14299
14300
14301static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
14302{
14303 int opcode = extract32(insn, 10, 2);
14304 int rn = extract32(insn, 5, 5);
14305 int rd = extract32(insn, 0, 5);
14306 bool feature;
14307
14308 switch (opcode) {
14309 case 0:
14310 feature = dc_isar_feature(aa64_sha512, s);
14311 break;
14312 case 1:
14313 feature = dc_isar_feature(aa64_sm4, s);
14314 break;
14315 default:
14316 unallocated_encoding(s);
14317 return;
14318 }
14319
14320 if (!feature) {
14321 unallocated_encoding(s);
14322 return;
14323 }
14324
14325 if (!fp_access_check(s)) {
14326 return;
14327 }
14328
14329 switch (opcode) {
14330 case 0:
14331 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
14332 break;
14333 case 1:
14334 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
14335 break;
14336 default:
14337 g_assert_not_reached();
14338 }
14339}
14340
14341
14342
14343
14344
14345
14346
14347static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
14348{
14349 int op0 = extract32(insn, 21, 2);
14350 int rm = extract32(insn, 16, 5);
14351 int ra = extract32(insn, 10, 5);
14352 int rn = extract32(insn, 5, 5);
14353 int rd = extract32(insn, 0, 5);
14354 bool feature;
14355
14356 switch (op0) {
14357 case 0:
14358 case 1:
14359 feature = dc_isar_feature(aa64_sha3, s);
14360 break;
14361 case 2:
14362 feature = dc_isar_feature(aa64_sm3, s);
14363 break;
14364 default:
14365 unallocated_encoding(s);
14366 return;
14367 }
14368
14369 if (!feature) {
14370 unallocated_encoding(s);
14371 return;
14372 }
14373
14374 if (!fp_access_check(s)) {
14375 return;
14376 }
14377
14378 if (op0 < 2) {
14379 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
14380 int pass;
14381
14382 tcg_op1 = tcg_temp_new_i64();
14383 tcg_op2 = tcg_temp_new_i64();
14384 tcg_op3 = tcg_temp_new_i64();
14385 tcg_res[0] = tcg_temp_new_i64();
14386 tcg_res[1] = tcg_temp_new_i64();
14387
14388 for (pass = 0; pass < 2; pass++) {
14389 read_vec_element(s, tcg_op1, rn, pass, MO_64);
14390 read_vec_element(s, tcg_op2, rm, pass, MO_64);
14391 read_vec_element(s, tcg_op3, ra, pass, MO_64);
14392
14393 if (op0 == 0) {
14394
14395 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
14396 } else {
14397
14398 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
14399 }
14400 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
14401 }
14402 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
14403 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
14404
14405 tcg_temp_free_i64(tcg_op1);
14406 tcg_temp_free_i64(tcg_op2);
14407 tcg_temp_free_i64(tcg_op3);
14408 tcg_temp_free_i64(tcg_res[0]);
14409 tcg_temp_free_i64(tcg_res[1]);
14410 } else {
14411 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
14412
14413 tcg_op1 = tcg_temp_new_i32();
14414 tcg_op2 = tcg_temp_new_i32();
14415 tcg_op3 = tcg_temp_new_i32();
14416 tcg_res = tcg_temp_new_i32();
14417 tcg_zero = tcg_const_i32(0);
14418
14419 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
14420 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
14421 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
14422
14423 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
14424 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
14425 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
14426 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
14427
14428 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
14429 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
14430 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
14431 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
14432
14433 tcg_temp_free_i32(tcg_op1);
14434 tcg_temp_free_i32(tcg_op2);
14435 tcg_temp_free_i32(tcg_op3);
14436 tcg_temp_free_i32(tcg_res);
14437 tcg_temp_free_i32(tcg_zero);
14438 }
14439}
14440
14441
14442
14443
14444
14445
14446
14447static void disas_crypto_xar(DisasContext *s, uint32_t insn)
14448{
14449 int rm = extract32(insn, 16, 5);
14450 int imm6 = extract32(insn, 10, 6);
14451 int rn = extract32(insn, 5, 5);
14452 int rd = extract32(insn, 0, 5);
14453
14454 if (!dc_isar_feature(aa64_sha3, s)) {
14455 unallocated_encoding(s);
14456 return;
14457 }
14458
14459 if (!fp_access_check(s)) {
14460 return;
14461 }
14462
14463 gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
14464 vec_full_reg_offset(s, rn),
14465 vec_full_reg_offset(s, rm), imm6, 16,
14466 vec_full_reg_size(s));
14467}
14468
14469
14470
14471
14472
14473
14474
14475static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
14476{
14477 static gen_helper_gvec_3 * const fns[4] = {
14478 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
14479 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
14480 };
14481 int opcode = extract32(insn, 10, 2);
14482 int imm2 = extract32(insn, 12, 2);
14483 int rm = extract32(insn, 16, 5);
14484 int rn = extract32(insn, 5, 5);
14485 int rd = extract32(insn, 0, 5);
14486
14487 if (!dc_isar_feature(aa64_sm3, s)) {
14488 unallocated_encoding(s);
14489 return;
14490 }
14491
14492 if (!fp_access_check(s)) {
14493 return;
14494 }
14495
14496 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
14497}
14498
14499
14500
14501
14502
14503
14504static const AArch64DecodeTable data_proc_simd[] = {
14505
14506 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
14507 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
14508 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
14509 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
14510 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
14511 { 0x0e000400, 0x9fe08400, disas_simd_copy },
14512 { 0x0f000000, 0x9f000400, disas_simd_indexed },
14513
14514 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
14515 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
14516 { 0x0e000000, 0xbf208c00, disas_simd_tb },
14517 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
14518 { 0x2e000000, 0xbf208400, disas_simd_ext },
14519 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
14520 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
14521 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
14522 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
14523 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
14524 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
14525 { 0x5f000000, 0xdf000400, disas_simd_indexed },
14526 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
14527 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
14528 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
14529 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
14530 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
14531 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
14532 { 0xce000000, 0xff808000, disas_crypto_four_reg },
14533 { 0xce800000, 0xffe00000, disas_crypto_xar },
14534 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
14535 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
14536 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
14537 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
14538 { 0x00000000, 0x00000000, NULL }
14539};
14540
14541static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
14542{
14543
14544
14545
14546
14547 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
14548 if (fn) {
14549 fn(s, insn);
14550 } else {
14551 unallocated_encoding(s);
14552 }
14553}
14554
14555
14556static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
14557{
14558 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
14559 disas_data_proc_fp(s, insn);
14560 } else {
14561
14562 disas_data_proc_simd(s, insn);
14563 }
14564}
14565
14566
14567
14568
14569
14570
14571
14572
14573static bool is_guarded_page(CPUARMState *env, DisasContext *s)
14574{
14575 uint64_t addr = s->base.pc_first;
14576#ifdef CONFIG_USER_ONLY
14577 return page_get_flags(addr) & PAGE_BTI;
14578#else
14579 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
14580 unsigned int index = tlb_index(env, mmu_idx, addr);
14581 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593 return (tlb_hit(entry->addr_code, addr) &&
14594 arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
14595#endif
14596}
14597
14598
14599
14600
14601
14602
14603
14604
14605
14606
14607
14608
14609
14610
14611
14612
14613
14614static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
14615{
14616 if ((insn & 0xfffff01fu) == 0xd503201fu) {
14617
14618 switch (extract32(insn, 5, 7)) {
14619 case 0b011001:
14620 case 0b011011:
14621
14622
14623
14624
14625 return !bt || btype != 3;
14626 case 0b100000:
14627
14628 return false;
14629 case 0b100010:
14630
14631 return btype != 3;
14632 case 0b100100:
14633
14634 return btype != 2;
14635 case 0b100110:
14636
14637 return true;
14638 }
14639 } else {
14640 switch (insn & 0xffe0001fu) {
14641 case 0xd4200000u:
14642 case 0xd4400000u:
14643
14644 return true;
14645 }
14646 }
14647 return false;
14648}
14649
14650static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14651 CPUState *cpu)
14652{
14653 DisasContext *dc = container_of(dcbase, DisasContext, base);
14654 CPUARMState *env = cpu->env_ptr;
14655 ARMCPU *arm_cpu = env_archcpu(env);
14656 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
14657 int bound, core_mmu_idx;
14658
14659 dc->isar = &arm_cpu->isar;
14660 dc->condjmp = 0;
14661
14662 dc->aarch64 = 1;
14663
14664
14665
14666 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
14667 !arm_el_is_aa64(env, 3);
14668 dc->thumb = 0;
14669 dc->sctlr_b = 0;
14670 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
14671 dc->condexec_mask = 0;
14672 dc->condexec_cond = 0;
14673 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
14674 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14675 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
14676 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
14677 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
14678 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14679#if !defined(CONFIG_USER_ONLY)
14680 dc->user = (dc->current_el == 0);
14681#endif
14682 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
14683 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
14684 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
14685 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
14686 dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
14687 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
14688 dc->bt = EX_TBFLAG_A64(tb_flags, BT);
14689 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
14690 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
14691 dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
14692 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
14693 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
14694 dc->vec_len = 0;
14695 dc->vec_stride = 0;
14696 dc->cp_regs = arm_cpu->cp_regs;
14697 dc->features = env->features;
14698 dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14699
14700#ifdef CONFIG_USER_ONLY
14701
14702 tcg_debug_assert(dc->tbid & 1);
14703#endif
14704
14705
14706
14707
14708
14709
14710
14711
14712
14713
14714
14715
14716
14717
14718
14719
14720 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
14721 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
14722 dc->is_ldex = false;
14723 dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
14724
14725
14726 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14727
14728
14729 if (dc->ss_active) {
14730 bound = 1;
14731 }
14732 dc->base.max_insns = MIN(dc->base.max_insns, bound);
14733
14734 init_tmp_a64_array(dc);
14735}
14736
14737static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14738{
14739}
14740
14741static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14742{
14743 DisasContext *dc = container_of(dcbase, DisasContext, base);
14744
14745 tcg_gen_insn_start(dc->base.pc_next, 0, 0);
14746 dc->insn_start = tcg_last_op();
14747}
14748
14749static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14750{
14751 DisasContext *s = container_of(dcbase, DisasContext, base);
14752 CPUARMState *env = cpu->env_ptr;
14753 uint32_t insn;
14754
14755 if (s->ss_active && !s->pstate_ss) {
14756
14757
14758
14759
14760
14761
14762
14763
14764
14765
14766 assert(s->base.num_insns == 1);
14767 gen_swstep_exception(s, 0, 0);
14768 s->base.is_jmp = DISAS_NORETURN;
14769 return;
14770 }
14771
14772 s->pc_curr = s->base.pc_next;
14773 insn = arm_ldl_code(env, &s->base, s->base.pc_next, s->sctlr_b);
14774 s->insn = insn;
14775 s->base.pc_next += 4;
14776
14777 s->fp_access_checked = false;
14778 s->sve_access_checked = false;
14779
14780 if (s->pstate_il) {
14781
14782
14783
14784
14785 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14786 syn_illegalstate(), default_exception_el(s));
14787 return;
14788 }
14789
14790 if (dc_isar_feature(aa64_bti, s)) {
14791 if (s->base.num_insns == 1) {
14792
14793
14794
14795
14796
14797
14798
14799
14800
14801
14802
14803 s->guarded_page = is_guarded_page(env, s);
14804
14805
14806 tcg_debug_assert(s->btype >= 0);
14807
14808
14809
14810
14811
14812
14813
14814 if (s->btype != 0
14815 && s->guarded_page
14816 && !btype_destination_ok(insn, s->bt, s->btype)) {
14817 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
14818 syn_btitrap(s->btype),
14819 default_exception_el(s));
14820 return;
14821 }
14822 } else {
14823
14824 tcg_debug_assert(s->btype == 0);
14825 }
14826 }
14827
14828 switch (extract32(insn, 25, 4)) {
14829 case 0x0: case 0x1: case 0x3:
14830 unallocated_encoding(s);
14831 break;
14832 case 0x2:
14833 if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
14834 unallocated_encoding(s);
14835 }
14836 break;
14837 case 0x8: case 0x9:
14838 disas_data_proc_imm(s, insn);
14839 break;
14840 case 0xa: case 0xb:
14841 disas_b_exc_sys(s, insn);
14842 break;
14843 case 0x4:
14844 case 0x6:
14845 case 0xc:
14846 case 0xe:
14847 disas_ldst(s, insn);
14848 break;
14849 case 0x5:
14850 case 0xd:
14851 disas_data_proc_reg(s, insn);
14852 break;
14853 case 0x7:
14854 case 0xf:
14855 disas_data_proc_simd_fp(s, insn);
14856 break;
14857 default:
14858 assert(FALSE);
14859 break;
14860 }
14861
14862
14863 free_tmp_a64(s);
14864
14865
14866
14867
14868
14869 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14870 reset_btype(s);
14871 }
14872
14873 translator_loop_temp_check(&s->base);
14874}
14875
14876static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14877{
14878 DisasContext *dc = container_of(dcbase, DisasContext, base);
14879
14880 if (unlikely(dc->ss_active)) {
14881
14882
14883
14884
14885
14886 switch (dc->base.is_jmp) {
14887 default:
14888 gen_a64_set_pc_im(dc->base.pc_next);
14889
14890 case DISAS_EXIT:
14891 case DISAS_JUMP:
14892 gen_step_complete_exception(dc);
14893 break;
14894 case DISAS_NORETURN:
14895 break;
14896 }
14897 } else {
14898 switch (dc->base.is_jmp) {
14899 case DISAS_NEXT:
14900 case DISAS_TOO_MANY:
14901 gen_goto_tb(dc, 1, dc->base.pc_next);
14902 break;
14903 default:
14904 case DISAS_UPDATE_EXIT:
14905 gen_a64_set_pc_im(dc->base.pc_next);
14906
14907 case DISAS_EXIT:
14908 tcg_gen_exit_tb(NULL, 0);
14909 break;
14910 case DISAS_UPDATE_NOCHAIN:
14911 gen_a64_set_pc_im(dc->base.pc_next);
14912
14913 case DISAS_JUMP:
14914 tcg_gen_lookup_and_goto_ptr();
14915 break;
14916 case DISAS_NORETURN:
14917 case DISAS_SWI:
14918 break;
14919 case DISAS_WFE:
14920 gen_a64_set_pc_im(dc->base.pc_next);
14921 gen_helper_wfe(cpu_env);
14922 break;
14923 case DISAS_YIELD:
14924 gen_a64_set_pc_im(dc->base.pc_next);
14925 gen_helper_yield(cpu_env);
14926 break;
14927 case DISAS_WFI:
14928 {
14929
14930
14931
14932 TCGv_i32 tmp = tcg_const_i32(4);
14933
14934 gen_a64_set_pc_im(dc->base.pc_next);
14935 gen_helper_wfi(cpu_env, tmp);
14936 tcg_temp_free_i32(tmp);
14937
14938
14939
14940 tcg_gen_exit_tb(NULL, 0);
14941 break;
14942 }
14943 }
14944 }
14945}
14946
14947static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14948 CPUState *cpu)
14949{
14950 DisasContext *dc = container_of(dcbase, DisasContext, base);
14951
14952 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
14953 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
14954}
14955
14956const TranslatorOps aarch64_translator_ops = {
14957 .init_disas_context = aarch64_tr_init_disas_context,
14958 .tb_start = aarch64_tr_tb_start,
14959 .insn_start = aarch64_tr_insn_start,
14960 .translate_insn = aarch64_tr_translate_insn,
14961 .tb_stop = aarch64_tr_tb_stop,
14962 .disas_log = aarch64_tr_disas_log,
14963};
14964