1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22
23#include "cpu.h"
24#include "internals.h"
25#include "disas/disas.h"
26#include "exec/exec-all.h"
27#include "tcg-op.h"
28#include "tcg-op-gvec.h"
29#include "qemu/log.h"
30#include "qemu/bitops.h"
31#include "arm_ldst.h"
32#include "hw/semihosting/semihost.h"
33
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
36
37#include "trace-tcg.h"
38#include "exec/log.h"
39
40
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43
44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
51
52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
53
54#include "translate.h"
55
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
62
63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
64static TCGv_i32 cpu_R[16];
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
68
69#include "exec/gen-icount.h"
70
71static const char * const regnames[] =
72 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
73 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
74
75
76typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
77 TCGv_i32, TCGv_i32);
78
79typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
80
81
82void arm_translate_init(void)
83{
84 int i;
85
86 for (i = 0; i < 16; i++) {
87 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
88 offsetof(CPUARMState, regs[i]),
89 regnames[i]);
90 }
91 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
92 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
93 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
94 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
95
96 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
97 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
98 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
99 offsetof(CPUARMState, exclusive_val), "exclusive_val");
100
101 a64_translate_init();
102}
103
104
105
106
107typedef enum ISSInfo {
108 ISSNone = 0,
109 ISSRegMask = 0x1f,
110 ISSInvalid = (1 << 5),
111 ISSIsAcqRel = (1 << 6),
112 ISSIsWrite = (1 << 7),
113 ISSIs16Bit = (1 << 8),
114} ISSInfo;
115
116
117static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
118{
119 uint32_t syn;
120 int sas = memop & MO_SIZE;
121 bool sse = memop & MO_SIGN;
122 bool is_acqrel = issinfo & ISSIsAcqRel;
123 bool is_write = issinfo & ISSIsWrite;
124 bool is_16bit = issinfo & ISSIs16Bit;
125 int srt = issinfo & ISSRegMask;
126
127 if (issinfo & ISSInvalid) {
128
129
130
131 return;
132 }
133
134 if (srt == 15) {
135
136
137
138
139 return;
140 }
141
142 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
143 0, 0, 0, is_write, 0, is_16bit);
144 disas_set_insn_syndrome(s, syn);
145}
146
147static inline int get_a32_user_mem_index(DisasContext *s)
148{
149
150
151
152
153
154 switch (s->mmu_idx) {
155 case ARMMMUIdx_S1E2:
156 case ARMMMUIdx_S12NSE0:
157 case ARMMMUIdx_S12NSE1:
158 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
159 case ARMMMUIdx_S1E3:
160 case ARMMMUIdx_S1SE0:
161 case ARMMMUIdx_S1SE1:
162 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
163 case ARMMMUIdx_MUser:
164 case ARMMMUIdx_MPriv:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
166 case ARMMMUIdx_MUserNegPri:
167 case ARMMMUIdx_MPrivNegPri:
168 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
169 case ARMMMUIdx_MSUser:
170 case ARMMMUIdx_MSPriv:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
172 case ARMMMUIdx_MSUserNegPri:
173 case ARMMMUIdx_MSPrivNegPri:
174 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
175 case ARMMMUIdx_S2NS:
176 default:
177 g_assert_not_reached();
178 }
179}
180
181static inline TCGv_i32 load_cpu_offset(int offset)
182{
183 TCGv_i32 tmp = tcg_temp_new_i32();
184 tcg_gen_ld_i32(tmp, cpu_env, offset);
185 return tmp;
186}
187
188#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
189
190static inline void store_cpu_offset(TCGv_i32 var, int offset)
191{
192 tcg_gen_st_i32(var, cpu_env, offset);
193 tcg_temp_free_i32(var);
194}
195
196#define store_cpu_field(var, name) \
197 store_cpu_offset(var, offsetof(CPUARMState, name))
198
199
200static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
201{
202 if (reg == 15) {
203 uint32_t addr;
204
205 if (s->thumb)
206 addr = (long)s->pc + 2;
207 else
208 addr = (long)s->pc + 4;
209 tcg_gen_movi_i32(var, addr);
210 } else {
211 tcg_gen_mov_i32(var, cpu_R[reg]);
212 }
213}
214
215
216static inline TCGv_i32 load_reg(DisasContext *s, int reg)
217{
218 TCGv_i32 tmp = tcg_temp_new_i32();
219 load_reg_var(s, tmp, reg);
220 return tmp;
221}
222
223
224
225static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
226{
227 if (reg == 15) {
228
229
230
231
232
233 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
234 s->base.is_jmp = DISAS_JUMP;
235 }
236 tcg_gen_mov_i32(cpu_R[reg], var);
237 tcg_temp_free_i32(var);
238}
239
240
241
242
243
244
245
246
247static void store_sp_checked(DisasContext *s, TCGv_i32 var)
248{
249#ifndef CONFIG_USER_ONLY
250 if (s->v8m_stackcheck) {
251 gen_helper_v8m_stackcheck(cpu_env, var);
252 }
253#endif
254 store_reg(s, 13, var);
255}
256
257
258#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
259#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
260#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
261#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
262
263#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
264#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
265
266
267static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
268{
269 TCGv_i32 tmp_mask = tcg_const_i32(mask);
270 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
271 tcg_temp_free_i32(tmp_mask);
272}
273
274#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
275
276static void gen_exception_internal(int excp)
277{
278 TCGv_i32 tcg_excp = tcg_const_i32(excp);
279
280 assert(excp_is_internal(excp));
281 gen_helper_exception_internal(cpu_env, tcg_excp);
282 tcg_temp_free_i32(tcg_excp);
283}
284
285static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
286{
287 TCGv_i32 tcg_excp = tcg_const_i32(excp);
288 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
289 TCGv_i32 tcg_el = tcg_const_i32(target_el);
290
291 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
292 tcg_syn, tcg_el);
293
294 tcg_temp_free_i32(tcg_el);
295 tcg_temp_free_i32(tcg_syn);
296 tcg_temp_free_i32(tcg_excp);
297}
298
299static void gen_step_complete_exception(DisasContext *s)
300{
301
302
303
304
305
306
307
308
309
310 gen_ss_advance(s);
311 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
312 default_exception_el(s));
313 s->base.is_jmp = DISAS_NORETURN;
314}
315
316static void gen_singlestep_exception(DisasContext *s)
317{
318
319
320
321
322 if (s->ss_active) {
323 gen_step_complete_exception(s);
324 } else {
325 gen_exception_internal(EXCP_DEBUG);
326 }
327}
328
329static inline bool is_singlestepping(DisasContext *s)
330{
331
332
333
334
335
336
337 return s->base.singlestep_enabled || s->ss_active;
338}
339
340static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
341{
342 TCGv_i32 tmp1 = tcg_temp_new_i32();
343 TCGv_i32 tmp2 = tcg_temp_new_i32();
344 tcg_gen_ext16s_i32(tmp1, a);
345 tcg_gen_ext16s_i32(tmp2, b);
346 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
347 tcg_temp_free_i32(tmp2);
348 tcg_gen_sari_i32(a, a, 16);
349 tcg_gen_sari_i32(b, b, 16);
350 tcg_gen_mul_i32(b, b, a);
351 tcg_gen_mov_i32(a, tmp1);
352 tcg_temp_free_i32(tmp1);
353}
354
355
356static void gen_rev16(TCGv_i32 var)
357{
358 TCGv_i32 tmp = tcg_temp_new_i32();
359 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
360 tcg_gen_shri_i32(tmp, var, 8);
361 tcg_gen_and_i32(tmp, tmp, mask);
362 tcg_gen_and_i32(var, var, mask);
363 tcg_gen_shli_i32(var, var, 8);
364 tcg_gen_or_i32(var, var, tmp);
365 tcg_temp_free_i32(mask);
366 tcg_temp_free_i32(tmp);
367}
368
369
370static void gen_revsh(TCGv_i32 var)
371{
372 tcg_gen_ext16u_i32(var, var);
373 tcg_gen_bswap16_i32(var, var);
374 tcg_gen_ext16s_i32(var, var);
375}
376
377
378static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
379{
380 TCGv_i64 tmp64 = tcg_temp_new_i64();
381
382 tcg_gen_extu_i32_i64(tmp64, b);
383 tcg_temp_free_i32(b);
384 tcg_gen_shli_i64(tmp64, tmp64, 32);
385 tcg_gen_add_i64(a, tmp64, a);
386
387 tcg_temp_free_i64(tmp64);
388 return a;
389}
390
391
392static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
393{
394 TCGv_i64 tmp64 = tcg_temp_new_i64();
395
396 tcg_gen_extu_i32_i64(tmp64, b);
397 tcg_temp_free_i32(b);
398 tcg_gen_shli_i64(tmp64, tmp64, 32);
399 tcg_gen_sub_i64(a, tmp64, a);
400
401 tcg_temp_free_i64(tmp64);
402 return a;
403}
404
405
406static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
407{
408 TCGv_i32 lo = tcg_temp_new_i32();
409 TCGv_i32 hi = tcg_temp_new_i32();
410 TCGv_i64 ret;
411
412 tcg_gen_mulu2_i32(lo, hi, a, b);
413 tcg_temp_free_i32(a);
414 tcg_temp_free_i32(b);
415
416 ret = tcg_temp_new_i64();
417 tcg_gen_concat_i32_i64(ret, lo, hi);
418 tcg_temp_free_i32(lo);
419 tcg_temp_free_i32(hi);
420
421 return ret;
422}
423
424static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
425{
426 TCGv_i32 lo = tcg_temp_new_i32();
427 TCGv_i32 hi = tcg_temp_new_i32();
428 TCGv_i64 ret;
429
430 tcg_gen_muls2_i32(lo, hi, a, b);
431 tcg_temp_free_i32(a);
432 tcg_temp_free_i32(b);
433
434 ret = tcg_temp_new_i64();
435 tcg_gen_concat_i32_i64(ret, lo, hi);
436 tcg_temp_free_i32(lo);
437 tcg_temp_free_i32(hi);
438
439 return ret;
440}
441
442
443static void gen_swap_half(TCGv_i32 var)
444{
445 TCGv_i32 tmp = tcg_temp_new_i32();
446 tcg_gen_shri_i32(tmp, var, 16);
447 tcg_gen_shli_i32(var, var, 16);
448 tcg_gen_or_i32(var, var, tmp);
449 tcg_temp_free_i32(tmp);
450}
451
452
453
454
455
456
457
458
459static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
460{
461 TCGv_i32 tmp = tcg_temp_new_i32();
462 tcg_gen_xor_i32(tmp, t0, t1);
463 tcg_gen_andi_i32(tmp, tmp, 0x8000);
464 tcg_gen_andi_i32(t0, t0, ~0x8000);
465 tcg_gen_andi_i32(t1, t1, ~0x8000);
466 tcg_gen_add_i32(t0, t0, t1);
467 tcg_gen_xor_i32(t0, t0, tmp);
468 tcg_temp_free_i32(tmp);
469 tcg_temp_free_i32(t1);
470}
471
472
473static void gen_set_CF_bit31(TCGv_i32 var)
474{
475 tcg_gen_shri_i32(cpu_CF, var, 31);
476}
477
478
479static inline void gen_logic_CC(TCGv_i32 var)
480{
481 tcg_gen_mov_i32(cpu_NF, var);
482 tcg_gen_mov_i32(cpu_ZF, var);
483}
484
485
486static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
487{
488 tcg_gen_add_i32(t0, t0, t1);
489 tcg_gen_add_i32(t0, t0, cpu_CF);
490}
491
492
493static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
494{
495 tcg_gen_add_i32(dest, t0, t1);
496 tcg_gen_add_i32(dest, dest, cpu_CF);
497}
498
499
500static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
501{
502 tcg_gen_sub_i32(dest, t0, t1);
503 tcg_gen_add_i32(dest, dest, cpu_CF);
504 tcg_gen_subi_i32(dest, dest, 1);
505}
506
507
508static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
509{
510 TCGv_i32 tmp = tcg_temp_new_i32();
511 tcg_gen_movi_i32(tmp, 0);
512 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
513 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
514 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
515 tcg_gen_xor_i32(tmp, t0, t1);
516 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
517 tcg_temp_free_i32(tmp);
518 tcg_gen_mov_i32(dest, cpu_NF);
519}
520
521
522static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
523{
524 TCGv_i32 tmp = tcg_temp_new_i32();
525 if (TCG_TARGET_HAS_add2_i32) {
526 tcg_gen_movi_i32(tmp, 0);
527 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
528 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
529 } else {
530 TCGv_i64 q0 = tcg_temp_new_i64();
531 TCGv_i64 q1 = tcg_temp_new_i64();
532 tcg_gen_extu_i32_i64(q0, t0);
533 tcg_gen_extu_i32_i64(q1, t1);
534 tcg_gen_add_i64(q0, q0, q1);
535 tcg_gen_extu_i32_i64(q1, cpu_CF);
536 tcg_gen_add_i64(q0, q0, q1);
537 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
538 tcg_temp_free_i64(q0);
539 tcg_temp_free_i64(q1);
540 }
541 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
542 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
543 tcg_gen_xor_i32(tmp, t0, t1);
544 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
545 tcg_temp_free_i32(tmp);
546 tcg_gen_mov_i32(dest, cpu_NF);
547}
548
549
550static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
551{
552 TCGv_i32 tmp;
553 tcg_gen_sub_i32(cpu_NF, t0, t1);
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
556 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
557 tmp = tcg_temp_new_i32();
558 tcg_gen_xor_i32(tmp, t0, t1);
559 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
560 tcg_temp_free_i32(tmp);
561 tcg_gen_mov_i32(dest, cpu_NF);
562}
563
564
565static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
566{
567 TCGv_i32 tmp = tcg_temp_new_i32();
568 tcg_gen_not_i32(tmp, t1);
569 gen_adc_CC(dest, t0, tmp);
570 tcg_temp_free_i32(tmp);
571}
572
573#define GEN_SHIFT(name) \
574static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
575{ \
576 TCGv_i32 tmp1, tmp2, tmp3; \
577 tmp1 = tcg_temp_new_i32(); \
578 tcg_gen_andi_i32(tmp1, t1, 0xff); \
579 tmp2 = tcg_const_i32(0); \
580 tmp3 = tcg_const_i32(0x1f); \
581 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
582 tcg_temp_free_i32(tmp3); \
583 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
584 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
585 tcg_temp_free_i32(tmp2); \
586 tcg_temp_free_i32(tmp1); \
587}
588GEN_SHIFT(shl)
589GEN_SHIFT(shr)
590#undef GEN_SHIFT
591
592static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
593{
594 TCGv_i32 tmp1, tmp2;
595 tmp1 = tcg_temp_new_i32();
596 tcg_gen_andi_i32(tmp1, t1, 0xff);
597 tmp2 = tcg_const_i32(0x1f);
598 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
599 tcg_temp_free_i32(tmp2);
600 tcg_gen_sar_i32(dest, t0, tmp1);
601 tcg_temp_free_i32(tmp1);
602}
603
604static void shifter_out_im(TCGv_i32 var, int shift)
605{
606 if (shift == 0) {
607 tcg_gen_andi_i32(cpu_CF, var, 1);
608 } else {
609 tcg_gen_shri_i32(cpu_CF, var, shift);
610 if (shift != 31) {
611 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
612 }
613 }
614}
615
616
617static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
618 int shift, int flags)
619{
620 switch (shiftop) {
621 case 0:
622 if (shift != 0) {
623 if (flags)
624 shifter_out_im(var, 32 - shift);
625 tcg_gen_shli_i32(var, var, shift);
626 }
627 break;
628 case 1:
629 if (shift == 0) {
630 if (flags) {
631 tcg_gen_shri_i32(cpu_CF, var, 31);
632 }
633 tcg_gen_movi_i32(var, 0);
634 } else {
635 if (flags)
636 shifter_out_im(var, shift - 1);
637 tcg_gen_shri_i32(var, var, shift);
638 }
639 break;
640 case 2:
641 if (shift == 0)
642 shift = 32;
643 if (flags)
644 shifter_out_im(var, shift - 1);
645 if (shift == 32)
646 shift = 31;
647 tcg_gen_sari_i32(var, var, shift);
648 break;
649 case 3:
650 if (shift != 0) {
651 if (flags)
652 shifter_out_im(var, shift - 1);
653 tcg_gen_rotri_i32(var, var, shift); break;
654 } else {
655 TCGv_i32 tmp = tcg_temp_new_i32();
656 tcg_gen_shli_i32(tmp, cpu_CF, 31);
657 if (flags)
658 shifter_out_im(var, 0);
659 tcg_gen_shri_i32(var, var, 1);
660 tcg_gen_or_i32(var, var, tmp);
661 tcg_temp_free_i32(tmp);
662 }
663 }
664};
665
666static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
667 TCGv_i32 shift, int flags)
668{
669 if (flags) {
670 switch (shiftop) {
671 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
672 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
673 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
674 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
675 }
676 } else {
677 switch (shiftop) {
678 case 0:
679 gen_shl(var, var, shift);
680 break;
681 case 1:
682 gen_shr(var, var, shift);
683 break;
684 case 2:
685 gen_sar(var, var, shift);
686 break;
687 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
688 tcg_gen_rotr_i32(var, var, shift); break;
689 }
690 }
691 tcg_temp_free_i32(shift);
692}
693
694#define PAS_OP(pfx) \
695 switch (op2) { \
696 case 0: gen_pas_helper(glue(pfx,add16)); break; \
697 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
698 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
699 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
700 case 4: gen_pas_helper(glue(pfx,add8)); break; \
701 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
702 }
703static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
704{
705 TCGv_ptr tmp;
706
707 switch (op1) {
708#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
709 case 1:
710 tmp = tcg_temp_new_ptr();
711 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
712 PAS_OP(s)
713 tcg_temp_free_ptr(tmp);
714 break;
715 case 5:
716 tmp = tcg_temp_new_ptr();
717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
718 PAS_OP(u)
719 tcg_temp_free_ptr(tmp);
720 break;
721#undef gen_pas_helper
722#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
723 case 2:
724 PAS_OP(q);
725 break;
726 case 3:
727 PAS_OP(sh);
728 break;
729 case 6:
730 PAS_OP(uq);
731 break;
732 case 7:
733 PAS_OP(uh);
734 break;
735#undef gen_pas_helper
736 }
737}
738#undef PAS_OP
739
740
741#define PAS_OP(pfx) \
742 switch (op1) { \
743 case 0: gen_pas_helper(glue(pfx,add8)); break; \
744 case 1: gen_pas_helper(glue(pfx,add16)); break; \
745 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
746 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
747 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
748 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
749 }
750static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
751{
752 TCGv_ptr tmp;
753
754 switch (op2) {
755#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
756 case 0:
757 tmp = tcg_temp_new_ptr();
758 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
759 PAS_OP(s)
760 tcg_temp_free_ptr(tmp);
761 break;
762 case 4:
763 tmp = tcg_temp_new_ptr();
764 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
765 PAS_OP(u)
766 tcg_temp_free_ptr(tmp);
767 break;
768#undef gen_pas_helper
769#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
770 case 1:
771 PAS_OP(q);
772 break;
773 case 2:
774 PAS_OP(sh);
775 break;
776 case 5:
777 PAS_OP(uq);
778 break;
779 case 6:
780 PAS_OP(uh);
781 break;
782#undef gen_pas_helper
783 }
784}
785#undef PAS_OP
786
787
788
789
790
791void arm_test_cc(DisasCompare *cmp, int cc)
792{
793 TCGv_i32 value;
794 TCGCond cond;
795 bool global = true;
796
797 switch (cc) {
798 case 0:
799 case 1:
800 cond = TCG_COND_EQ;
801 value = cpu_ZF;
802 break;
803
804 case 2:
805 case 3:
806 cond = TCG_COND_NE;
807 value = cpu_CF;
808 break;
809
810 case 4:
811 case 5:
812 cond = TCG_COND_LT;
813 value = cpu_NF;
814 break;
815
816 case 6:
817 case 7:
818 cond = TCG_COND_LT;
819 value = cpu_VF;
820 break;
821
822 case 8:
823 case 9:
824 cond = TCG_COND_NE;
825 value = tcg_temp_new_i32();
826 global = false;
827
828
829 tcg_gen_neg_i32(value, cpu_CF);
830 tcg_gen_and_i32(value, value, cpu_ZF);
831 break;
832
833 case 10:
834 case 11:
835
836 cond = TCG_COND_GE;
837 value = tcg_temp_new_i32();
838 global = false;
839 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
840 break;
841
842 case 12:
843 case 13:
844 cond = TCG_COND_NE;
845 value = tcg_temp_new_i32();
846 global = false;
847
848
849 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
850 tcg_gen_sari_i32(value, value, 31);
851 tcg_gen_andc_i32(value, cpu_ZF, value);
852 break;
853
854 case 14:
855 case 15:
856
857
858 cond = TCG_COND_ALWAYS;
859 value = cpu_ZF;
860 goto no_invert;
861
862 default:
863 fprintf(stderr, "Bad condition code 0x%x\n", cc);
864 abort();
865 }
866
867 if (cc & 1) {
868 cond = tcg_invert_cond(cond);
869 }
870
871 no_invert:
872 cmp->cond = cond;
873 cmp->value = value;
874 cmp->value_global = global;
875}
876
877void arm_free_cc(DisasCompare *cmp)
878{
879 if (!cmp->value_global) {
880 tcg_temp_free_i32(cmp->value);
881 }
882}
883
884void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
885{
886 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
887}
888
889void arm_gen_test_cc(int cc, TCGLabel *label)
890{
891 DisasCompare cmp;
892 arm_test_cc(&cmp, cc);
893 arm_jump_cc(&cmp, label);
894 arm_free_cc(&cmp);
895}
896
897static const uint8_t table_logic_cc[16] = {
898 1,
899 1,
900 0,
901 0,
902 0,
903 0,
904 0,
905 0,
906 1,
907 1,
908 0,
909 0,
910 1,
911 1,
912 1,
913 1,
914};
915
916static inline void gen_set_condexec(DisasContext *s)
917{
918 if (s->condexec_mask) {
919 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
920 TCGv_i32 tmp = tcg_temp_new_i32();
921 tcg_gen_movi_i32(tmp, val);
922 store_cpu_field(tmp, condexec_bits);
923 }
924}
925
926static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
927{
928 tcg_gen_movi_i32(cpu_R[15], val);
929}
930
931
932static inline void gen_bx_im(DisasContext *s, uint32_t addr)
933{
934 TCGv_i32 tmp;
935
936 s->base.is_jmp = DISAS_JUMP;
937 if (s->thumb != (addr & 1)) {
938 tmp = tcg_temp_new_i32();
939 tcg_gen_movi_i32(tmp, addr & 1);
940 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
941 tcg_temp_free_i32(tmp);
942 }
943 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
944}
945
946
947static inline void gen_bx(DisasContext *s, TCGv_i32 var)
948{
949 s->base.is_jmp = DISAS_JUMP;
950 tcg_gen_andi_i32(cpu_R[15], var, ~1);
951 tcg_gen_andi_i32(var, var, 1);
952 store_cpu_field(var, thumb);
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
978{
979
980
981
982 gen_bx(s, var);
983#ifndef CONFIG_USER_ONLY
984 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
985 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
986 s->base.is_jmp = DISAS_BX_EXCRET;
987 }
988#endif
989}
990
991static inline void gen_bx_excret_final_code(DisasContext *s)
992{
993
994 TCGLabel *excret_label = gen_new_label();
995 uint32_t min_magic;
996
997 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
998
999 min_magic = FNC_RETURN_MIN_MAGIC;
1000 } else {
1001
1002 min_magic = EXC_RETURN_MIN_MAGIC;
1003 }
1004
1005
1006 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
1007
1008 if (is_singlestepping(s)) {
1009 gen_singlestep_exception(s);
1010 } else {
1011 tcg_gen_exit_tb(NULL, 0);
1012 }
1013 gen_set_label(excret_label);
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 gen_ss_advance(s);
1026 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1027}
1028
1029static inline void gen_bxns(DisasContext *s, int rm)
1030{
1031 TCGv_i32 var = load_reg(s, rm);
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 gen_helper_v7m_bxns(cpu_env, var);
1044 tcg_temp_free_i32(var);
1045 s->base.is_jmp = DISAS_EXIT;
1046}
1047
1048static inline void gen_blxns(DisasContext *s, int rm)
1049{
1050 TCGv_i32 var = load_reg(s, rm);
1051
1052
1053
1054
1055
1056 gen_set_pc_im(s, s->pc);
1057 gen_helper_v7m_blxns(cpu_env, var);
1058 tcg_temp_free_i32(var);
1059 s->base.is_jmp = DISAS_EXIT;
1060}
1061
1062
1063
1064
1065static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
1066{
1067 if (reg == 15 && ENABLE_ARCH_7) {
1068 gen_bx(s, var);
1069 } else {
1070 store_reg(s, reg, var);
1071 }
1072}
1073
1074
1075
1076
1077
1078static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
1079{
1080 if (reg == 15 && ENABLE_ARCH_5) {
1081 gen_bx_excret(s, var);
1082 } else {
1083 store_reg(s, reg, var);
1084 }
1085}
1086
1087#ifdef CONFIG_USER_ONLY
1088#define IS_USER_ONLY 1
1089#else
1090#define IS_USER_ONLY 0
1091#endif
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
1102{
1103 TCGv addr = tcg_temp_new();
1104 tcg_gen_extu_i32_tl(addr, a32);
1105
1106
1107 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1108 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
1109 }
1110 return addr;
1111}
1112
1113static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1114 int index, TCGMemOp opc)
1115{
1116 TCGv addr;
1117
1118 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1119 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1120 opc |= MO_ALIGN;
1121 }
1122
1123 addr = gen_aa32_addr(s, a32, opc);
1124 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1125 tcg_temp_free(addr);
1126}
1127
1128static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1129 int index, TCGMemOp opc)
1130{
1131 TCGv addr;
1132
1133 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1134 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1135 opc |= MO_ALIGN;
1136 }
1137
1138 addr = gen_aa32_addr(s, a32, opc);
1139 tcg_gen_qemu_st_i32(val, addr, index, opc);
1140 tcg_temp_free(addr);
1141}
1142
1143#define DO_GEN_LD(SUFF, OPC) \
1144static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
1145 TCGv_i32 a32, int index) \
1146{ \
1147 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
1148} \
1149static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1150 TCGv_i32 val, \
1151 TCGv_i32 a32, int index, \
1152 ISSInfo issinfo) \
1153{ \
1154 gen_aa32_ld##SUFF(s, val, a32, index); \
1155 disas_set_da_iss(s, OPC, issinfo); \
1156}
1157
1158#define DO_GEN_ST(SUFF, OPC) \
1159static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1160 TCGv_i32 a32, int index) \
1161{ \
1162 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
1163} \
1164static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1165 TCGv_i32 val, \
1166 TCGv_i32 a32, int index, \
1167 ISSInfo issinfo) \
1168{ \
1169 gen_aa32_st##SUFF(s, val, a32, index); \
1170 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
1171}
1172
1173static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
1174{
1175
1176 if (!IS_USER_ONLY && s->sctlr_b) {
1177 tcg_gen_rotri_i64(val, val, 32);
1178 }
1179}
1180
1181static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1182 int index, TCGMemOp opc)
1183{
1184 TCGv addr = gen_aa32_addr(s, a32, opc);
1185 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1186 gen_aa32_frob64(s, val);
1187 tcg_temp_free(addr);
1188}
1189
1190static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1191 TCGv_i32 a32, int index)
1192{
1193 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1194}
1195
1196static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1197 int index, TCGMemOp opc)
1198{
1199 TCGv addr = gen_aa32_addr(s, a32, opc);
1200
1201
1202 if (!IS_USER_ONLY && s->sctlr_b) {
1203 TCGv_i64 tmp = tcg_temp_new_i64();
1204 tcg_gen_rotri_i64(tmp, val, 32);
1205 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1206 tcg_temp_free_i64(tmp);
1207 } else {
1208 tcg_gen_qemu_st_i64(val, addr, index, opc);
1209 }
1210 tcg_temp_free(addr);
1211}
1212
1213static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1214 TCGv_i32 a32, int index)
1215{
1216 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1217}
1218
1219DO_GEN_LD(8s, MO_SB)
1220DO_GEN_LD(8u, MO_UB)
1221DO_GEN_LD(16s, MO_SW)
1222DO_GEN_LD(16u, MO_UW)
1223DO_GEN_LD(32u, MO_UL)
1224DO_GEN_ST(8, MO_UB)
1225DO_GEN_ST(16, MO_UW)
1226DO_GEN_ST(32, MO_UL)
1227
1228static inline void gen_hvc(DisasContext *s, int imm16)
1229{
1230
1231
1232
1233
1234 gen_set_pc_im(s, s->pc - 4);
1235 gen_helper_pre_hvc(cpu_env);
1236
1237
1238
1239
1240
1241 s->svc_imm = imm16;
1242 gen_set_pc_im(s, s->pc);
1243 s->base.is_jmp = DISAS_HVC;
1244}
1245
1246static inline void gen_smc(DisasContext *s)
1247{
1248
1249
1250
1251 TCGv_i32 tmp;
1252
1253 gen_set_pc_im(s, s->pc - 4);
1254 tmp = tcg_const_i32(syn_aa32_smc());
1255 gen_helper_pre_smc(cpu_env, tmp);
1256 tcg_temp_free_i32(tmp);
1257 gen_set_pc_im(s, s->pc);
1258 s->base.is_jmp = DISAS_SMC;
1259}
1260
1261static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1262{
1263 gen_set_condexec(s);
1264 gen_set_pc_im(s, s->pc - offset);
1265 gen_exception_internal(excp);
1266 s->base.is_jmp = DISAS_NORETURN;
1267}
1268
1269static void gen_exception_insn(DisasContext *s, int offset, int excp,
1270 int syn, uint32_t target_el)
1271{
1272 gen_set_condexec(s);
1273 gen_set_pc_im(s, s->pc - offset);
1274 gen_exception(excp, syn, target_el);
1275 s->base.is_jmp = DISAS_NORETURN;
1276}
1277
1278static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1279{
1280 TCGv_i32 tcg_syn;
1281
1282 gen_set_condexec(s);
1283 gen_set_pc_im(s, s->pc - offset);
1284 tcg_syn = tcg_const_i32(syn);
1285 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1286 tcg_temp_free_i32(tcg_syn);
1287 s->base.is_jmp = DISAS_NORETURN;
1288}
1289
1290
1291static inline void gen_lookup_tb(DisasContext *s)
1292{
1293 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1294 s->base.is_jmp = DISAS_EXIT;
1295}
1296
1297static inline void gen_hlt(DisasContext *s, int imm)
1298{
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 if (semihosting_enabled() &&
1312#ifndef CONFIG_USER_ONLY
1313 s->current_el != 0 &&
1314#endif
1315 (imm == (s->thumb ? 0x3c : 0xf000))) {
1316 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1317 return;
1318 }
1319
1320 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1321 default_exception_el(s));
1322}
1323
1324static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1325 TCGv_i32 var)
1326{
1327 int val, rm, shift, shiftop;
1328 TCGv_i32 offset;
1329
1330 if (!(insn & (1 << 25))) {
1331
1332 val = insn & 0xfff;
1333 if (!(insn & (1 << 23)))
1334 val = -val;
1335 if (val != 0)
1336 tcg_gen_addi_i32(var, var, val);
1337 } else {
1338
1339 rm = (insn) & 0xf;
1340 shift = (insn >> 7) & 0x1f;
1341 shiftop = (insn >> 5) & 3;
1342 offset = load_reg(s, rm);
1343 gen_arm_shift_im(offset, shiftop, shift, 0);
1344 if (!(insn & (1 << 23)))
1345 tcg_gen_sub_i32(var, var, offset);
1346 else
1347 tcg_gen_add_i32(var, var, offset);
1348 tcg_temp_free_i32(offset);
1349 }
1350}
1351
1352static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1353 int extra, TCGv_i32 var)
1354{
1355 int val, rm;
1356 TCGv_i32 offset;
1357
1358 if (insn & (1 << 22)) {
1359
1360 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1361 if (!(insn & (1 << 23)))
1362 val = -val;
1363 val += extra;
1364 if (val != 0)
1365 tcg_gen_addi_i32(var, var, val);
1366 } else {
1367
1368 if (extra)
1369 tcg_gen_addi_i32(var, var, extra);
1370 rm = (insn) & 0xf;
1371 offset = load_reg(s, rm);
1372 if (!(insn & (1 << 23)))
1373 tcg_gen_sub_i32(var, var, offset);
1374 else
1375 tcg_gen_add_i32(var, var, offset);
1376 tcg_temp_free_i32(offset);
1377 }
1378}
1379
1380static TCGv_ptr get_fpstatus_ptr(int neon)
1381{
1382 TCGv_ptr statusptr = tcg_temp_new_ptr();
1383 int offset;
1384 if (neon) {
1385 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1386 } else {
1387 offset = offsetof(CPUARMState, vfp.fp_status);
1388 }
1389 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1390 return statusptr;
1391}
1392
1393static inline long vfp_reg_offset(bool dp, unsigned reg)
1394{
1395 if (dp) {
1396 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
1397 } else {
1398 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
1399 if (reg & 1) {
1400 ofs += offsetof(CPU_DoubleU, l.upper);
1401 } else {
1402 ofs += offsetof(CPU_DoubleU, l.lower);
1403 }
1404 return ofs;
1405 }
1406}
1407
1408
1409
1410static inline long
1411neon_reg_offset (int reg, int n)
1412{
1413 int sreg;
1414 sreg = reg * 2 + n;
1415 return vfp_reg_offset(0, sreg);
1416}
1417
1418
1419
1420
1421static inline long
1422neon_element_offset(int reg, int element, TCGMemOp size)
1423{
1424 int element_size = 1 << size;
1425 int ofs = element * element_size;
1426#ifdef HOST_WORDS_BIGENDIAN
1427
1428
1429
1430 if (element_size < 8) {
1431 ofs ^= 8 - element_size;
1432 }
1433#endif
1434 return neon_reg_offset(reg, 0) + ofs;
1435}
1436
1437static TCGv_i32 neon_load_reg(int reg, int pass)
1438{
1439 TCGv_i32 tmp = tcg_temp_new_i32();
1440 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1441 return tmp;
1442}
1443
1444static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1445{
1446 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1447
1448 switch (mop) {
1449 case MO_UB:
1450 tcg_gen_ld8u_i32(var, cpu_env, offset);
1451 break;
1452 case MO_UW:
1453 tcg_gen_ld16u_i32(var, cpu_env, offset);
1454 break;
1455 case MO_UL:
1456 tcg_gen_ld_i32(var, cpu_env, offset);
1457 break;
1458 default:
1459 g_assert_not_reached();
1460 }
1461}
1462
1463static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1464{
1465 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1466
1467 switch (mop) {
1468 case MO_UB:
1469 tcg_gen_ld8u_i64(var, cpu_env, offset);
1470 break;
1471 case MO_UW:
1472 tcg_gen_ld16u_i64(var, cpu_env, offset);
1473 break;
1474 case MO_UL:
1475 tcg_gen_ld32u_i64(var, cpu_env, offset);
1476 break;
1477 case MO_Q:
1478 tcg_gen_ld_i64(var, cpu_env, offset);
1479 break;
1480 default:
1481 g_assert_not_reached();
1482 }
1483}
1484
1485static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1486{
1487 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1488 tcg_temp_free_i32(var);
1489}
1490
1491static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1492{
1493 long offset = neon_element_offset(reg, ele, size);
1494
1495 switch (size) {
1496 case MO_8:
1497 tcg_gen_st8_i32(var, cpu_env, offset);
1498 break;
1499 case MO_16:
1500 tcg_gen_st16_i32(var, cpu_env, offset);
1501 break;
1502 case MO_32:
1503 tcg_gen_st_i32(var, cpu_env, offset);
1504 break;
1505 default:
1506 g_assert_not_reached();
1507 }
1508}
1509
1510static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1511{
1512 long offset = neon_element_offset(reg, ele, size);
1513
1514 switch (size) {
1515 case MO_8:
1516 tcg_gen_st8_i64(var, cpu_env, offset);
1517 break;
1518 case MO_16:
1519 tcg_gen_st16_i64(var, cpu_env, offset);
1520 break;
1521 case MO_32:
1522 tcg_gen_st32_i64(var, cpu_env, offset);
1523 break;
1524 case MO_64:
1525 tcg_gen_st_i64(var, cpu_env, offset);
1526 break;
1527 default:
1528 g_assert_not_reached();
1529 }
1530}
1531
1532static inline void neon_load_reg64(TCGv_i64 var, int reg)
1533{
1534 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1535}
1536
1537static inline void neon_store_reg64(TCGv_i64 var, int reg)
1538{
1539 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1540}
1541
1542static inline void neon_load_reg32(TCGv_i32 var, int reg)
1543{
1544 tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
1545}
1546
1547static inline void neon_store_reg32(TCGv_i32 var, int reg)
1548{
1549 tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
1550}
1551
1552static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1553{
1554 TCGv_ptr ret = tcg_temp_new_ptr();
1555 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1556 return ret;
1557}
1558
1559#define ARM_CP_RW_BIT (1 << 20)
1560
1561
1562#include "translate-vfp.inc.c"
1563
1564static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1565{
1566 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1567}
1568
1569static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1570{
1571 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1572}
1573
1574static inline TCGv_i32 iwmmxt_load_creg(int reg)
1575{
1576 TCGv_i32 var = tcg_temp_new_i32();
1577 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1578 return var;
1579}
1580
1581static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1582{
1583 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1584 tcg_temp_free_i32(var);
1585}
1586
1587static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1588{
1589 iwmmxt_store_reg(cpu_M0, rn);
1590}
1591
1592static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1593{
1594 iwmmxt_load_reg(cpu_M0, rn);
1595}
1596
1597static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1598{
1599 iwmmxt_load_reg(cpu_V1, rn);
1600 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1601}
1602
1603static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1604{
1605 iwmmxt_load_reg(cpu_V1, rn);
1606 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1607}
1608
1609static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1610{
1611 iwmmxt_load_reg(cpu_V1, rn);
1612 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1613}
1614
1615#define IWMMXT_OP(name) \
1616static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1617{ \
1618 iwmmxt_load_reg(cpu_V1, rn); \
1619 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1620}
1621
1622#define IWMMXT_OP_ENV(name) \
1623static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1624{ \
1625 iwmmxt_load_reg(cpu_V1, rn); \
1626 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1627}
1628
1629#define IWMMXT_OP_ENV_SIZE(name) \
1630IWMMXT_OP_ENV(name##b) \
1631IWMMXT_OP_ENV(name##w) \
1632IWMMXT_OP_ENV(name##l)
1633
1634#define IWMMXT_OP_ENV1(name) \
1635static inline void gen_op_iwmmxt_##name##_M0(void) \
1636{ \
1637 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1638}
1639
1640IWMMXT_OP(maddsq)
1641IWMMXT_OP(madduq)
1642IWMMXT_OP(sadb)
1643IWMMXT_OP(sadw)
1644IWMMXT_OP(mulslw)
1645IWMMXT_OP(mulshw)
1646IWMMXT_OP(mululw)
1647IWMMXT_OP(muluhw)
1648IWMMXT_OP(macsw)
1649IWMMXT_OP(macuw)
1650
1651IWMMXT_OP_ENV_SIZE(unpackl)
1652IWMMXT_OP_ENV_SIZE(unpackh)
1653
1654IWMMXT_OP_ENV1(unpacklub)
1655IWMMXT_OP_ENV1(unpackluw)
1656IWMMXT_OP_ENV1(unpacklul)
1657IWMMXT_OP_ENV1(unpackhub)
1658IWMMXT_OP_ENV1(unpackhuw)
1659IWMMXT_OP_ENV1(unpackhul)
1660IWMMXT_OP_ENV1(unpacklsb)
1661IWMMXT_OP_ENV1(unpacklsw)
1662IWMMXT_OP_ENV1(unpacklsl)
1663IWMMXT_OP_ENV1(unpackhsb)
1664IWMMXT_OP_ENV1(unpackhsw)
1665IWMMXT_OP_ENV1(unpackhsl)
1666
1667IWMMXT_OP_ENV_SIZE(cmpeq)
1668IWMMXT_OP_ENV_SIZE(cmpgtu)
1669IWMMXT_OP_ENV_SIZE(cmpgts)
1670
1671IWMMXT_OP_ENV_SIZE(mins)
1672IWMMXT_OP_ENV_SIZE(minu)
1673IWMMXT_OP_ENV_SIZE(maxs)
1674IWMMXT_OP_ENV_SIZE(maxu)
1675
1676IWMMXT_OP_ENV_SIZE(subn)
1677IWMMXT_OP_ENV_SIZE(addn)
1678IWMMXT_OP_ENV_SIZE(subu)
1679IWMMXT_OP_ENV_SIZE(addu)
1680IWMMXT_OP_ENV_SIZE(subs)
1681IWMMXT_OP_ENV_SIZE(adds)
1682
1683IWMMXT_OP_ENV(avgb0)
1684IWMMXT_OP_ENV(avgb1)
1685IWMMXT_OP_ENV(avgw0)
1686IWMMXT_OP_ENV(avgw1)
1687
1688IWMMXT_OP_ENV(packuw)
1689IWMMXT_OP_ENV(packul)
1690IWMMXT_OP_ENV(packuq)
1691IWMMXT_OP_ENV(packsw)
1692IWMMXT_OP_ENV(packsl)
1693IWMMXT_OP_ENV(packsq)
1694
1695static void gen_op_iwmmxt_set_mup(void)
1696{
1697 TCGv_i32 tmp;
1698 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1699 tcg_gen_ori_i32(tmp, tmp, 2);
1700 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1701}
1702
1703static void gen_op_iwmmxt_set_cup(void)
1704{
1705 TCGv_i32 tmp;
1706 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1707 tcg_gen_ori_i32(tmp, tmp, 1);
1708 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1709}
1710
1711static void gen_op_iwmmxt_setpsr_nz(void)
1712{
1713 TCGv_i32 tmp = tcg_temp_new_i32();
1714 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1715 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1716}
1717
1718static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1719{
1720 iwmmxt_load_reg(cpu_V1, rn);
1721 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1722 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1723}
1724
1725static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1726 TCGv_i32 dest)
1727{
1728 int rd;
1729 uint32_t offset;
1730 TCGv_i32 tmp;
1731
1732 rd = (insn >> 16) & 0xf;
1733 tmp = load_reg(s, rd);
1734
1735 offset = (insn & 0xff) << ((insn >> 7) & 2);
1736 if (insn & (1 << 24)) {
1737
1738 if (insn & (1 << 23))
1739 tcg_gen_addi_i32(tmp, tmp, offset);
1740 else
1741 tcg_gen_addi_i32(tmp, tmp, -offset);
1742 tcg_gen_mov_i32(dest, tmp);
1743 if (insn & (1 << 21))
1744 store_reg(s, rd, tmp);
1745 else
1746 tcg_temp_free_i32(tmp);
1747 } else if (insn & (1 << 21)) {
1748
1749 tcg_gen_mov_i32(dest, tmp);
1750 if (insn & (1 << 23))
1751 tcg_gen_addi_i32(tmp, tmp, offset);
1752 else
1753 tcg_gen_addi_i32(tmp, tmp, -offset);
1754 store_reg(s, rd, tmp);
1755 } else if (!(insn & (1 << 23)))
1756 return 1;
1757 return 0;
1758}
1759
1760static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1761{
1762 int rd = (insn >> 0) & 0xf;
1763 TCGv_i32 tmp;
1764
1765 if (insn & (1 << 8)) {
1766 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1767 return 1;
1768 } else {
1769 tmp = iwmmxt_load_creg(rd);
1770 }
1771 } else {
1772 tmp = tcg_temp_new_i32();
1773 iwmmxt_load_reg(cpu_V0, rd);
1774 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
1775 }
1776 tcg_gen_andi_i32(tmp, tmp, mask);
1777 tcg_gen_mov_i32(dest, tmp);
1778 tcg_temp_free_i32(tmp);
1779 return 0;
1780}
1781
1782
1783
1784static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1785{
1786 int rd, wrd;
1787 int rdhi, rdlo, rd0, rd1, i;
1788 TCGv_i32 addr;
1789 TCGv_i32 tmp, tmp2, tmp3;
1790
1791 if ((insn & 0x0e000e00) == 0x0c000000) {
1792 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1793 wrd = insn & 0xf;
1794 rdlo = (insn >> 12) & 0xf;
1795 rdhi = (insn >> 16) & 0xf;
1796 if (insn & ARM_CP_RW_BIT) {
1797 iwmmxt_load_reg(cpu_V0, wrd);
1798 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
1799 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1800 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
1801 } else {
1802 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1803 iwmmxt_store_reg(cpu_V0, wrd);
1804 gen_op_iwmmxt_set_mup();
1805 }
1806 return 0;
1807 }
1808
1809 wrd = (insn >> 12) & 0xf;
1810 addr = tcg_temp_new_i32();
1811 if (gen_iwmmxt_address(s, insn, addr)) {
1812 tcg_temp_free_i32(addr);
1813 return 1;
1814 }
1815 if (insn & ARM_CP_RW_BIT) {
1816 if ((insn >> 28) == 0xf) {
1817 tmp = tcg_temp_new_i32();
1818 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1819 iwmmxt_store_creg(wrd, tmp);
1820 } else {
1821 i = 1;
1822 if (insn & (1 << 8)) {
1823 if (insn & (1 << 22)) {
1824 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
1825 i = 0;
1826 } else {
1827 tmp = tcg_temp_new_i32();
1828 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1829 }
1830 } else {
1831 tmp = tcg_temp_new_i32();
1832 if (insn & (1 << 22)) {
1833 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1834 } else {
1835 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
1836 }
1837 }
1838 if (i) {
1839 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1840 tcg_temp_free_i32(tmp);
1841 }
1842 gen_op_iwmmxt_movq_wRn_M0(wrd);
1843 }
1844 } else {
1845 if ((insn >> 28) == 0xf) {
1846 tmp = iwmmxt_load_creg(wrd);
1847 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1848 } else {
1849 gen_op_iwmmxt_movq_M0_wRn(wrd);
1850 tmp = tcg_temp_new_i32();
1851 if (insn & (1 << 8)) {
1852 if (insn & (1 << 22)) {
1853 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
1854 } else {
1855 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1856 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1857 }
1858 } else {
1859 if (insn & (1 << 22)) {
1860 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1861 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1862 } else {
1863 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
1864 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
1865 }
1866 }
1867 }
1868 tcg_temp_free_i32(tmp);
1869 }
1870 tcg_temp_free_i32(addr);
1871 return 0;
1872 }
1873
1874 if ((insn & 0x0f000000) != 0x0e000000)
1875 return 1;
1876
1877 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1878 case 0x000:
1879 wrd = (insn >> 12) & 0xf;
1880 rd0 = (insn >> 0) & 0xf;
1881 rd1 = (insn >> 16) & 0xf;
1882 gen_op_iwmmxt_movq_M0_wRn(rd0);
1883 gen_op_iwmmxt_orq_M0_wRn(rd1);
1884 gen_op_iwmmxt_setpsr_nz();
1885 gen_op_iwmmxt_movq_wRn_M0(wrd);
1886 gen_op_iwmmxt_set_mup();
1887 gen_op_iwmmxt_set_cup();
1888 break;
1889 case 0x011:
1890 if (insn & 0xf)
1891 return 1;
1892 rd = (insn >> 12) & 0xf;
1893 wrd = (insn >> 16) & 0xf;
1894 switch (wrd) {
1895 case ARM_IWMMXT_wCID:
1896 case ARM_IWMMXT_wCASF:
1897 break;
1898 case ARM_IWMMXT_wCon:
1899 gen_op_iwmmxt_set_cup();
1900
1901 case ARM_IWMMXT_wCSSF:
1902 tmp = iwmmxt_load_creg(wrd);
1903 tmp2 = load_reg(s, rd);
1904 tcg_gen_andc_i32(tmp, tmp, tmp2);
1905 tcg_temp_free_i32(tmp2);
1906 iwmmxt_store_creg(wrd, tmp);
1907 break;
1908 case ARM_IWMMXT_wCGR0:
1909 case ARM_IWMMXT_wCGR1:
1910 case ARM_IWMMXT_wCGR2:
1911 case ARM_IWMMXT_wCGR3:
1912 gen_op_iwmmxt_set_cup();
1913 tmp = load_reg(s, rd);
1914 iwmmxt_store_creg(wrd, tmp);
1915 break;
1916 default:
1917 return 1;
1918 }
1919 break;
1920 case 0x100:
1921 wrd = (insn >> 12) & 0xf;
1922 rd0 = (insn >> 0) & 0xf;
1923 rd1 = (insn >> 16) & 0xf;
1924 gen_op_iwmmxt_movq_M0_wRn(rd0);
1925 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1926 gen_op_iwmmxt_setpsr_nz();
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 gen_op_iwmmxt_set_mup();
1929 gen_op_iwmmxt_set_cup();
1930 break;
1931 case 0x111:
1932 if (insn & 0xf)
1933 return 1;
1934 rd = (insn >> 12) & 0xf;
1935 wrd = (insn >> 16) & 0xf;
1936 tmp = iwmmxt_load_creg(wrd);
1937 store_reg(s, rd, tmp);
1938 break;
1939 case 0x300:
1940 wrd = (insn >> 12) & 0xf;
1941 rd0 = (insn >> 0) & 0xf;
1942 rd1 = (insn >> 16) & 0xf;
1943 gen_op_iwmmxt_movq_M0_wRn(rd0);
1944 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1945 gen_op_iwmmxt_andq_M0_wRn(rd1);
1946 gen_op_iwmmxt_setpsr_nz();
1947 gen_op_iwmmxt_movq_wRn_M0(wrd);
1948 gen_op_iwmmxt_set_mup();
1949 gen_op_iwmmxt_set_cup();
1950 break;
1951 case 0x200:
1952 wrd = (insn >> 12) & 0xf;
1953 rd0 = (insn >> 0) & 0xf;
1954 rd1 = (insn >> 16) & 0xf;
1955 gen_op_iwmmxt_movq_M0_wRn(rd0);
1956 gen_op_iwmmxt_andq_M0_wRn(rd1);
1957 gen_op_iwmmxt_setpsr_nz();
1958 gen_op_iwmmxt_movq_wRn_M0(wrd);
1959 gen_op_iwmmxt_set_mup();
1960 gen_op_iwmmxt_set_cup();
1961 break;
1962 case 0x810: case 0xa10:
1963 wrd = (insn >> 12) & 0xf;
1964 rd0 = (insn >> 0) & 0xf;
1965 rd1 = (insn >> 16) & 0xf;
1966 gen_op_iwmmxt_movq_M0_wRn(rd0);
1967 if (insn & (1 << 21))
1968 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1969 else
1970 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1971 gen_op_iwmmxt_movq_wRn_M0(wrd);
1972 gen_op_iwmmxt_set_mup();
1973 break;
1974 case 0x10e: case 0x50e: case 0x90e: case 0xd0e:
1975 wrd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
1977 rd1 = (insn >> 0) & 0xf;
1978 gen_op_iwmmxt_movq_M0_wRn(rd0);
1979 switch ((insn >> 22) & 3) {
1980 case 0:
1981 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1982 break;
1983 case 1:
1984 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1985 break;
1986 case 2:
1987 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1988 break;
1989 case 3:
1990 return 1;
1991 }
1992 gen_op_iwmmxt_movq_wRn_M0(wrd);
1993 gen_op_iwmmxt_set_mup();
1994 gen_op_iwmmxt_set_cup();
1995 break;
1996 case 0x10c: case 0x50c: case 0x90c: case 0xd0c:
1997 wrd = (insn >> 12) & 0xf;
1998 rd0 = (insn >> 16) & 0xf;
1999 rd1 = (insn >> 0) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 switch ((insn >> 22) & 3) {
2002 case 0:
2003 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2004 break;
2005 case 1:
2006 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2007 break;
2008 case 2:
2009 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2010 break;
2011 case 3:
2012 return 1;
2013 }
2014 gen_op_iwmmxt_movq_wRn_M0(wrd);
2015 gen_op_iwmmxt_set_mup();
2016 gen_op_iwmmxt_set_cup();
2017 break;
2018 case 0x012: case 0x112: case 0x412: case 0x512:
2019 wrd = (insn >> 12) & 0xf;
2020 rd0 = (insn >> 16) & 0xf;
2021 rd1 = (insn >> 0) & 0xf;
2022 gen_op_iwmmxt_movq_M0_wRn(rd0);
2023 if (insn & (1 << 22))
2024 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2025 else
2026 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2027 if (!(insn & (1 << 20)))
2028 gen_op_iwmmxt_addl_M0_wRn(wrd);
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 break;
2032 case 0x010: case 0x110: case 0x210: case 0x310:
2033 wrd = (insn >> 12) & 0xf;
2034 rd0 = (insn >> 16) & 0xf;
2035 rd1 = (insn >> 0) & 0xf;
2036 gen_op_iwmmxt_movq_M0_wRn(rd0);
2037 if (insn & (1 << 21)) {
2038 if (insn & (1 << 20))
2039 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2040 else
2041 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2042 } else {
2043 if (insn & (1 << 20))
2044 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2045 else
2046 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2047 }
2048 gen_op_iwmmxt_movq_wRn_M0(wrd);
2049 gen_op_iwmmxt_set_mup();
2050 break;
2051 case 0x410: case 0x510: case 0x610: case 0x710:
2052 wrd = (insn >> 12) & 0xf;
2053 rd0 = (insn >> 16) & 0xf;
2054 rd1 = (insn >> 0) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0);
2056 if (insn & (1 << 21))
2057 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2058 else
2059 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2060 if (!(insn & (1 << 20))) {
2061 iwmmxt_load_reg(cpu_V1, wrd);
2062 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
2063 }
2064 gen_op_iwmmxt_movq_wRn_M0(wrd);
2065 gen_op_iwmmxt_set_mup();
2066 break;
2067 case 0x006: case 0x406: case 0x806: case 0xc06:
2068 wrd = (insn >> 12) & 0xf;
2069 rd0 = (insn >> 16) & 0xf;
2070 rd1 = (insn >> 0) & 0xf;
2071 gen_op_iwmmxt_movq_M0_wRn(rd0);
2072 switch ((insn >> 22) & 3) {
2073 case 0:
2074 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2075 break;
2076 case 1:
2077 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2078 break;
2079 case 2:
2080 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2081 break;
2082 case 3:
2083 return 1;
2084 }
2085 gen_op_iwmmxt_movq_wRn_M0(wrd);
2086 gen_op_iwmmxt_set_mup();
2087 gen_op_iwmmxt_set_cup();
2088 break;
2089 case 0x800: case 0x900: case 0xc00: case 0xd00:
2090 wrd = (insn >> 12) & 0xf;
2091 rd0 = (insn >> 16) & 0xf;
2092 rd1 = (insn >> 0) & 0xf;
2093 gen_op_iwmmxt_movq_M0_wRn(rd0);
2094 if (insn & (1 << 22)) {
2095 if (insn & (1 << 20))
2096 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2097 else
2098 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2099 } else {
2100 if (insn & (1 << 20))
2101 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2104 }
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2108 break;
2109 case 0x802: case 0x902: case 0xa02: case 0xb02:
2110 wrd = (insn >> 12) & 0xf;
2111 rd0 = (insn >> 16) & 0xf;
2112 rd1 = (insn >> 0) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2115 tcg_gen_andi_i32(tmp, tmp, 7);
2116 iwmmxt_load_reg(cpu_V1, rd1);
2117 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2118 tcg_temp_free_i32(tmp);
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x601: case 0x605: case 0x609: case 0x60d:
2123 if (((insn >> 6) & 3) == 3)
2124 return 1;
2125 rd = (insn >> 12) & 0xf;
2126 wrd = (insn >> 16) & 0xf;
2127 tmp = load_reg(s, rd);
2128 gen_op_iwmmxt_movq_M0_wRn(wrd);
2129 switch ((insn >> 6) & 3) {
2130 case 0:
2131 tmp2 = tcg_const_i32(0xff);
2132 tmp3 = tcg_const_i32((insn & 7) << 3);
2133 break;
2134 case 1:
2135 tmp2 = tcg_const_i32(0xffff);
2136 tmp3 = tcg_const_i32((insn & 3) << 4);
2137 break;
2138 case 2:
2139 tmp2 = tcg_const_i32(0xffffffff);
2140 tmp3 = tcg_const_i32((insn & 1) << 5);
2141 break;
2142 default:
2143 tmp2 = NULL;
2144 tmp3 = NULL;
2145 }
2146 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
2147 tcg_temp_free_i32(tmp3);
2148 tcg_temp_free_i32(tmp2);
2149 tcg_temp_free_i32(tmp);
2150 gen_op_iwmmxt_movq_wRn_M0(wrd);
2151 gen_op_iwmmxt_set_mup();
2152 break;
2153 case 0x107: case 0x507: case 0x907: case 0xd07:
2154 rd = (insn >> 12) & 0xf;
2155 wrd = (insn >> 16) & 0xf;
2156 if (rd == 15 || ((insn >> 22) & 3) == 3)
2157 return 1;
2158 gen_op_iwmmxt_movq_M0_wRn(wrd);
2159 tmp = tcg_temp_new_i32();
2160 switch ((insn >> 22) & 3) {
2161 case 0:
2162 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
2163 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2164 if (insn & 8) {
2165 tcg_gen_ext8s_i32(tmp, tmp);
2166 } else {
2167 tcg_gen_andi_i32(tmp, tmp, 0xff);
2168 }
2169 break;
2170 case 1:
2171 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
2172 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2173 if (insn & 8) {
2174 tcg_gen_ext16s_i32(tmp, tmp);
2175 } else {
2176 tcg_gen_andi_i32(tmp, tmp, 0xffff);
2177 }
2178 break;
2179 case 2:
2180 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
2181 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
2182 break;
2183 }
2184 store_reg(s, rd, tmp);
2185 break;
2186 case 0x117: case 0x517: case 0x917: case 0xd17:
2187 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2188 return 1;
2189 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2190 switch ((insn >> 22) & 3) {
2191 case 0:
2192 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
2193 break;
2194 case 1:
2195 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
2196 break;
2197 case 2:
2198 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
2199 break;
2200 }
2201 tcg_gen_shli_i32(tmp, tmp, 28);
2202 gen_set_nzcv(tmp);
2203 tcg_temp_free_i32(tmp);
2204 break;
2205 case 0x401: case 0x405: case 0x409: case 0x40d:
2206 if (((insn >> 6) & 3) == 3)
2207 return 1;
2208 rd = (insn >> 12) & 0xf;
2209 wrd = (insn >> 16) & 0xf;
2210 tmp = load_reg(s, rd);
2211 switch ((insn >> 6) & 3) {
2212 case 0:
2213 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
2214 break;
2215 case 1:
2216 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
2217 break;
2218 case 2:
2219 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2220 break;
2221 }
2222 tcg_temp_free_i32(tmp);
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 break;
2226 case 0x113: case 0x513: case 0x913: case 0xd13:
2227 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2228 return 1;
2229 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2230 tmp2 = tcg_temp_new_i32();
2231 tcg_gen_mov_i32(tmp2, tmp);
2232 switch ((insn >> 22) & 3) {
2233 case 0:
2234 for (i = 0; i < 7; i ++) {
2235 tcg_gen_shli_i32(tmp2, tmp2, 4);
2236 tcg_gen_and_i32(tmp, tmp, tmp2);
2237 }
2238 break;
2239 case 1:
2240 for (i = 0; i < 3; i ++) {
2241 tcg_gen_shli_i32(tmp2, tmp2, 8);
2242 tcg_gen_and_i32(tmp, tmp, tmp2);
2243 }
2244 break;
2245 case 2:
2246 tcg_gen_shli_i32(tmp2, tmp2, 16);
2247 tcg_gen_and_i32(tmp, tmp, tmp2);
2248 break;
2249 }
2250 gen_set_nzcv(tmp);
2251 tcg_temp_free_i32(tmp2);
2252 tcg_temp_free_i32(tmp);
2253 break;
2254 case 0x01c: case 0x41c: case 0x81c: case 0xc1c:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2261 break;
2262 case 1:
2263 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2264 break;
2265 case 2:
2266 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2267 break;
2268 case 3:
2269 return 1;
2270 }
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 break;
2274 case 0x115: case 0x515: case 0x915: case 0xd15:
2275 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2276 return 1;
2277 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2278 tmp2 = tcg_temp_new_i32();
2279 tcg_gen_mov_i32(tmp2, tmp);
2280 switch ((insn >> 22) & 3) {
2281 case 0:
2282 for (i = 0; i < 7; i ++) {
2283 tcg_gen_shli_i32(tmp2, tmp2, 4);
2284 tcg_gen_or_i32(tmp, tmp, tmp2);
2285 }
2286 break;
2287 case 1:
2288 for (i = 0; i < 3; i ++) {
2289 tcg_gen_shli_i32(tmp2, tmp2, 8);
2290 tcg_gen_or_i32(tmp, tmp, tmp2);
2291 }
2292 break;
2293 case 2:
2294 tcg_gen_shli_i32(tmp2, tmp2, 16);
2295 tcg_gen_or_i32(tmp, tmp, tmp2);
2296 break;
2297 }
2298 gen_set_nzcv(tmp);
2299 tcg_temp_free_i32(tmp2);
2300 tcg_temp_free_i32(tmp);
2301 break;
2302 case 0x103: case 0x503: case 0x903: case 0xd03:
2303 rd = (insn >> 12) & 0xf;
2304 rd0 = (insn >> 16) & 0xf;
2305 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2306 return 1;
2307 gen_op_iwmmxt_movq_M0_wRn(rd0);
2308 tmp = tcg_temp_new_i32();
2309 switch ((insn >> 22) & 3) {
2310 case 0:
2311 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2312 break;
2313 case 1:
2314 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2315 break;
2316 case 2:
2317 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2318 break;
2319 }
2320 store_reg(s, rd, tmp);
2321 break;
2322 case 0x106: case 0x306: case 0x506: case 0x706:
2323 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2324 wrd = (insn >> 12) & 0xf;
2325 rd0 = (insn >> 16) & 0xf;
2326 rd1 = (insn >> 0) & 0xf;
2327 gen_op_iwmmxt_movq_M0_wRn(rd0);
2328 switch ((insn >> 22) & 3) {
2329 case 0:
2330 if (insn & (1 << 21))
2331 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2332 else
2333 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2334 break;
2335 case 1:
2336 if (insn & (1 << 21))
2337 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2338 else
2339 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2340 break;
2341 case 2:
2342 if (insn & (1 << 21))
2343 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2344 else
2345 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2346 break;
2347 case 3:
2348 return 1;
2349 }
2350 gen_op_iwmmxt_movq_wRn_M0(wrd);
2351 gen_op_iwmmxt_set_mup();
2352 gen_op_iwmmxt_set_cup();
2353 break;
2354 case 0x00e: case 0x20e: case 0x40e: case 0x60e:
2355 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2356 wrd = (insn >> 12) & 0xf;
2357 rd0 = (insn >> 16) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 switch ((insn >> 22) & 3) {
2360 case 0:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_unpacklsb_M0();
2363 else
2364 gen_op_iwmmxt_unpacklub_M0();
2365 break;
2366 case 1:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_unpacklsw_M0();
2369 else
2370 gen_op_iwmmxt_unpackluw_M0();
2371 break;
2372 case 2:
2373 if (insn & (1 << 21))
2374 gen_op_iwmmxt_unpacklsl_M0();
2375 else
2376 gen_op_iwmmxt_unpacklul_M0();
2377 break;
2378 case 3:
2379 return 1;
2380 }
2381 gen_op_iwmmxt_movq_wRn_M0(wrd);
2382 gen_op_iwmmxt_set_mup();
2383 gen_op_iwmmxt_set_cup();
2384 break;
2385 case 0x00c: case 0x20c: case 0x40c: case 0x60c:
2386 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2387 wrd = (insn >> 12) & 0xf;
2388 rd0 = (insn >> 16) & 0xf;
2389 gen_op_iwmmxt_movq_M0_wRn(rd0);
2390 switch ((insn >> 22) & 3) {
2391 case 0:
2392 if (insn & (1 << 21))
2393 gen_op_iwmmxt_unpackhsb_M0();
2394 else
2395 gen_op_iwmmxt_unpackhub_M0();
2396 break;
2397 case 1:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_unpackhsw_M0();
2400 else
2401 gen_op_iwmmxt_unpackhuw_M0();
2402 break;
2403 case 2:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_unpackhsl_M0();
2406 else
2407 gen_op_iwmmxt_unpackhul_M0();
2408 break;
2409 case 3:
2410 return 1;
2411 }
2412 gen_op_iwmmxt_movq_wRn_M0(wrd);
2413 gen_op_iwmmxt_set_mup();
2414 gen_op_iwmmxt_set_cup();
2415 break;
2416 case 0x204: case 0x604: case 0xa04: case 0xe04:
2417 case 0x214: case 0x614: case 0xa14: case 0xe14:
2418 if (((insn >> 22) & 3) == 0)
2419 return 1;
2420 wrd = (insn >> 12) & 0xf;
2421 rd0 = (insn >> 16) & 0xf;
2422 gen_op_iwmmxt_movq_M0_wRn(rd0);
2423 tmp = tcg_temp_new_i32();
2424 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2425 tcg_temp_free_i32(tmp);
2426 return 1;
2427 }
2428 switch ((insn >> 22) & 3) {
2429 case 1:
2430 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2431 break;
2432 case 2:
2433 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2434 break;
2435 case 3:
2436 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2437 break;
2438 }
2439 tcg_temp_free_i32(tmp);
2440 gen_op_iwmmxt_movq_wRn_M0(wrd);
2441 gen_op_iwmmxt_set_mup();
2442 gen_op_iwmmxt_set_cup();
2443 break;
2444 case 0x004: case 0x404: case 0x804: case 0xc04:
2445 case 0x014: case 0x414: case 0x814: case 0xc14:
2446 if (((insn >> 22) & 3) == 0)
2447 return 1;
2448 wrd = (insn >> 12) & 0xf;
2449 rd0 = (insn >> 16) & 0xf;
2450 gen_op_iwmmxt_movq_M0_wRn(rd0);
2451 tmp = tcg_temp_new_i32();
2452 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2453 tcg_temp_free_i32(tmp);
2454 return 1;
2455 }
2456 switch ((insn >> 22) & 3) {
2457 case 1:
2458 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2459 break;
2460 case 2:
2461 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2462 break;
2463 case 3:
2464 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2465 break;
2466 }
2467 tcg_temp_free_i32(tmp);
2468 gen_op_iwmmxt_movq_wRn_M0(wrd);
2469 gen_op_iwmmxt_set_mup();
2470 gen_op_iwmmxt_set_cup();
2471 break;
2472 case 0x104: case 0x504: case 0x904: case 0xd04:
2473 case 0x114: case 0x514: case 0x914: case 0xd14:
2474 if (((insn >> 22) & 3) == 0)
2475 return 1;
2476 wrd = (insn >> 12) & 0xf;
2477 rd0 = (insn >> 16) & 0xf;
2478 gen_op_iwmmxt_movq_M0_wRn(rd0);
2479 tmp = tcg_temp_new_i32();
2480 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2481 tcg_temp_free_i32(tmp);
2482 return 1;
2483 }
2484 switch ((insn >> 22) & 3) {
2485 case 1:
2486 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2487 break;
2488 case 2:
2489 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2490 break;
2491 case 3:
2492 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2493 break;
2494 }
2495 tcg_temp_free_i32(tmp);
2496 gen_op_iwmmxt_movq_wRn_M0(wrd);
2497 gen_op_iwmmxt_set_mup();
2498 gen_op_iwmmxt_set_cup();
2499 break;
2500 case 0x304: case 0x704: case 0xb04: case 0xf04:
2501 case 0x314: case 0x714: case 0xb14: case 0xf14:
2502 if (((insn >> 22) & 3) == 0)
2503 return 1;
2504 wrd = (insn >> 12) & 0xf;
2505 rd0 = (insn >> 16) & 0xf;
2506 gen_op_iwmmxt_movq_M0_wRn(rd0);
2507 tmp = tcg_temp_new_i32();
2508 switch ((insn >> 22) & 3) {
2509 case 1:
2510 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2511 tcg_temp_free_i32(tmp);
2512 return 1;
2513 }
2514 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2515 break;
2516 case 2:
2517 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2518 tcg_temp_free_i32(tmp);
2519 return 1;
2520 }
2521 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2522 break;
2523 case 3:
2524 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2525 tcg_temp_free_i32(tmp);
2526 return 1;
2527 }
2528 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2529 break;
2530 }
2531 tcg_temp_free_i32(tmp);
2532 gen_op_iwmmxt_movq_wRn_M0(wrd);
2533 gen_op_iwmmxt_set_mup();
2534 gen_op_iwmmxt_set_cup();
2535 break;
2536 case 0x116: case 0x316: case 0x516: case 0x716:
2537 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2538 wrd = (insn >> 12) & 0xf;
2539 rd0 = (insn >> 16) & 0xf;
2540 rd1 = (insn >> 0) & 0xf;
2541 gen_op_iwmmxt_movq_M0_wRn(rd0);
2542 switch ((insn >> 22) & 3) {
2543 case 0:
2544 if (insn & (1 << 21))
2545 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2546 else
2547 gen_op_iwmmxt_minub_M0_wRn(rd1);
2548 break;
2549 case 1:
2550 if (insn & (1 << 21))
2551 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2552 else
2553 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2554 break;
2555 case 2:
2556 if (insn & (1 << 21))
2557 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2558 else
2559 gen_op_iwmmxt_minul_M0_wRn(rd1);
2560 break;
2561 case 3:
2562 return 1;
2563 }
2564 gen_op_iwmmxt_movq_wRn_M0(wrd);
2565 gen_op_iwmmxt_set_mup();
2566 break;
2567 case 0x016: case 0x216: case 0x416: case 0x616:
2568 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2569 wrd = (insn >> 12) & 0xf;
2570 rd0 = (insn >> 16) & 0xf;
2571 rd1 = (insn >> 0) & 0xf;
2572 gen_op_iwmmxt_movq_M0_wRn(rd0);
2573 switch ((insn >> 22) & 3) {
2574 case 0:
2575 if (insn & (1 << 21))
2576 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2577 else
2578 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2579 break;
2580 case 1:
2581 if (insn & (1 << 21))
2582 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2583 else
2584 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2585 break;
2586 case 2:
2587 if (insn & (1 << 21))
2588 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2589 else
2590 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2591 break;
2592 case 3:
2593 return 1;
2594 }
2595 gen_op_iwmmxt_movq_wRn_M0(wrd);
2596 gen_op_iwmmxt_set_mup();
2597 break;
2598 case 0x002: case 0x102: case 0x202: case 0x302:
2599 case 0x402: case 0x502: case 0x602: case 0x702:
2600 wrd = (insn >> 12) & 0xf;
2601 rd0 = (insn >> 16) & 0xf;
2602 rd1 = (insn >> 0) & 0xf;
2603 gen_op_iwmmxt_movq_M0_wRn(rd0);
2604 tmp = tcg_const_i32((insn >> 20) & 3);
2605 iwmmxt_load_reg(cpu_V1, rd1);
2606 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2607 tcg_temp_free_i32(tmp);
2608 gen_op_iwmmxt_movq_wRn_M0(wrd);
2609 gen_op_iwmmxt_set_mup();
2610 break;
2611 case 0x01a: case 0x11a: case 0x21a: case 0x31a:
2612 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2613 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2614 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2615 wrd = (insn >> 12) & 0xf;
2616 rd0 = (insn >> 16) & 0xf;
2617 rd1 = (insn >> 0) & 0xf;
2618 gen_op_iwmmxt_movq_M0_wRn(rd0);
2619 switch ((insn >> 20) & 0xf) {
2620 case 0x0:
2621 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2622 break;
2623 case 0x1:
2624 gen_op_iwmmxt_subub_M0_wRn(rd1);
2625 break;
2626 case 0x3:
2627 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2628 break;
2629 case 0x4:
2630 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2631 break;
2632 case 0x5:
2633 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2634 break;
2635 case 0x7:
2636 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2637 break;
2638 case 0x8:
2639 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2640 break;
2641 case 0x9:
2642 gen_op_iwmmxt_subul_M0_wRn(rd1);
2643 break;
2644 case 0xb:
2645 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2646 break;
2647 default:
2648 return 1;
2649 }
2650 gen_op_iwmmxt_movq_wRn_M0(wrd);
2651 gen_op_iwmmxt_set_mup();
2652 gen_op_iwmmxt_set_cup();
2653 break;
2654 case 0x01e: case 0x11e: case 0x21e: case 0x31e:
2655 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2656 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2657 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2658 wrd = (insn >> 12) & 0xf;
2659 rd0 = (insn >> 16) & 0xf;
2660 gen_op_iwmmxt_movq_M0_wRn(rd0);
2661 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2662 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2663 tcg_temp_free_i32(tmp);
2664 gen_op_iwmmxt_movq_wRn_M0(wrd);
2665 gen_op_iwmmxt_set_mup();
2666 gen_op_iwmmxt_set_cup();
2667 break;
2668 case 0x018: case 0x118: case 0x218: case 0x318:
2669 case 0x418: case 0x518: case 0x618: case 0x718:
2670 case 0x818: case 0x918: case 0xa18: case 0xb18:
2671 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2672 wrd = (insn >> 12) & 0xf;
2673 rd0 = (insn >> 16) & 0xf;
2674 rd1 = (insn >> 0) & 0xf;
2675 gen_op_iwmmxt_movq_M0_wRn(rd0);
2676 switch ((insn >> 20) & 0xf) {
2677 case 0x0:
2678 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2679 break;
2680 case 0x1:
2681 gen_op_iwmmxt_addub_M0_wRn(rd1);
2682 break;
2683 case 0x3:
2684 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2685 break;
2686 case 0x4:
2687 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2688 break;
2689 case 0x5:
2690 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2691 break;
2692 case 0x7:
2693 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2694 break;
2695 case 0x8:
2696 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2697 break;
2698 case 0x9:
2699 gen_op_iwmmxt_addul_M0_wRn(rd1);
2700 break;
2701 case 0xb:
2702 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2703 break;
2704 default:
2705 return 1;
2706 }
2707 gen_op_iwmmxt_movq_wRn_M0(wrd);
2708 gen_op_iwmmxt_set_mup();
2709 gen_op_iwmmxt_set_cup();
2710 break;
2711 case 0x008: case 0x108: case 0x208: case 0x308:
2712 case 0x408: case 0x508: case 0x608: case 0x708:
2713 case 0x808: case 0x908: case 0xa08: case 0xb08:
2714 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2715 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2716 return 1;
2717 wrd = (insn >> 12) & 0xf;
2718 rd0 = (insn >> 16) & 0xf;
2719 rd1 = (insn >> 0) & 0xf;
2720 gen_op_iwmmxt_movq_M0_wRn(rd0);
2721 switch ((insn >> 22) & 3) {
2722 case 1:
2723 if (insn & (1 << 21))
2724 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2725 else
2726 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2727 break;
2728 case 2:
2729 if (insn & (1 << 21))
2730 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2731 else
2732 gen_op_iwmmxt_packul_M0_wRn(rd1);
2733 break;
2734 case 3:
2735 if (insn & (1 << 21))
2736 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2737 else
2738 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2739 break;
2740 }
2741 gen_op_iwmmxt_movq_wRn_M0(wrd);
2742 gen_op_iwmmxt_set_mup();
2743 gen_op_iwmmxt_set_cup();
2744 break;
2745 case 0x201: case 0x203: case 0x205: case 0x207:
2746 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2747 case 0x211: case 0x213: case 0x215: case 0x217:
2748 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2749 wrd = (insn >> 5) & 0xf;
2750 rd0 = (insn >> 12) & 0xf;
2751 rd1 = (insn >> 0) & 0xf;
2752 if (rd0 == 0xf || rd1 == 0xf)
2753 return 1;
2754 gen_op_iwmmxt_movq_M0_wRn(wrd);
2755 tmp = load_reg(s, rd0);
2756 tmp2 = load_reg(s, rd1);
2757 switch ((insn >> 16) & 0xf) {
2758 case 0x0:
2759 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2760 break;
2761 case 0x8:
2762 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2763 break;
2764 case 0xc: case 0xd: case 0xe: case 0xf:
2765 if (insn & (1 << 16))
2766 tcg_gen_shri_i32(tmp, tmp, 16);
2767 if (insn & (1 << 17))
2768 tcg_gen_shri_i32(tmp2, tmp2, 16);
2769 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2770 break;
2771 default:
2772 tcg_temp_free_i32(tmp2);
2773 tcg_temp_free_i32(tmp);
2774 return 1;
2775 }
2776 tcg_temp_free_i32(tmp2);
2777 tcg_temp_free_i32(tmp);
2778 gen_op_iwmmxt_movq_wRn_M0(wrd);
2779 gen_op_iwmmxt_set_mup();
2780 break;
2781 default:
2782 return 1;
2783 }
2784
2785 return 0;
2786}
2787
2788
2789
2790static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2791{
2792 int acc, rd0, rd1, rdhi, rdlo;
2793 TCGv_i32 tmp, tmp2;
2794
2795 if ((insn & 0x0ff00f10) == 0x0e200010) {
2796
2797 rd0 = (insn >> 12) & 0xf;
2798 rd1 = insn & 0xf;
2799 acc = (insn >> 5) & 7;
2800
2801 if (acc != 0)
2802 return 1;
2803
2804 tmp = load_reg(s, rd0);
2805 tmp2 = load_reg(s, rd1);
2806 switch ((insn >> 16) & 0xf) {
2807 case 0x0:
2808 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2809 break;
2810 case 0x8:
2811 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2812 break;
2813 case 0xc:
2814 case 0xd:
2815 case 0xe:
2816 case 0xf:
2817 if (insn & (1 << 16))
2818 tcg_gen_shri_i32(tmp, tmp, 16);
2819 if (insn & (1 << 17))
2820 tcg_gen_shri_i32(tmp2, tmp2, 16);
2821 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2822 break;
2823 default:
2824 return 1;
2825 }
2826 tcg_temp_free_i32(tmp2);
2827 tcg_temp_free_i32(tmp);
2828
2829 gen_op_iwmmxt_movq_wRn_M0(acc);
2830 return 0;
2831 }
2832
2833 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2834
2835 rdhi = (insn >> 16) & 0xf;
2836 rdlo = (insn >> 12) & 0xf;
2837 acc = insn & 7;
2838
2839 if (acc != 0)
2840 return 1;
2841
2842 if (insn & ARM_CP_RW_BIT) {
2843 iwmmxt_load_reg(cpu_V0, acc);
2844 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
2845 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2846 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
2847 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2848 } else {
2849 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2850 iwmmxt_store_reg(cpu_V0, acc);
2851 }
2852 return 0;
2853 }
2854
2855 return 1;
2856}
2857
2858#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2859#define VFP_SREG(insn, bigbit, smallbit) \
2860 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2861#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2862 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2863 reg = (((insn) >> (bigbit)) & 0x0f) \
2864 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2865 } else { \
2866 if (insn & (1 << (smallbit))) \
2867 return 1; \
2868 reg = ((insn) >> (bigbit)) & 0x0f; \
2869 }} while (0)
2870
2871#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2872#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2873#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2874#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2875#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2876#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2877
2878static void gen_neon_dup_low16(TCGv_i32 var)
2879{
2880 TCGv_i32 tmp = tcg_temp_new_i32();
2881 tcg_gen_ext16u_i32(var, var);
2882 tcg_gen_shli_i32(tmp, var, 16);
2883 tcg_gen_or_i32(var, var, tmp);
2884 tcg_temp_free_i32(tmp);
2885}
2886
2887static void gen_neon_dup_high16(TCGv_i32 var)
2888{
2889 TCGv_i32 tmp = tcg_temp_new_i32();
2890 tcg_gen_andi_i32(var, var, 0xffff0000);
2891 tcg_gen_shri_i32(tmp, var, 16);
2892 tcg_gen_or_i32(var, var, tmp);
2893 tcg_temp_free_i32(tmp);
2894}
2895
2896
2897
2898
2899
2900static int disas_vfp_insn(DisasContext *s, uint32_t insn)
2901{
2902 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
2903 return 1;
2904 }
2905
2906
2907
2908
2909
2910
2911
2912 if (extract32(insn, 28, 4) == 0xf) {
2913 if (disas_vfp_uncond(s, insn)) {
2914 return 0;
2915 }
2916 } else {
2917 if (disas_vfp(s, insn)) {
2918 return 0;
2919 }
2920 }
2921
2922 return 1;
2923}
2924
2925static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
2926{
2927#ifndef CONFIG_USER_ONLY
2928 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
2929 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
2930#else
2931 return true;
2932#endif
2933}
2934
2935static void gen_goto_ptr(void)
2936{
2937 tcg_gen_lookup_and_goto_ptr();
2938}
2939
2940
2941
2942
2943
2944static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
2945{
2946 if (use_goto_tb(s, dest)) {
2947 tcg_gen_goto_tb(n);
2948 gen_set_pc_im(s, dest);
2949 tcg_gen_exit_tb(s->base.tb, n);
2950 } else {
2951 gen_set_pc_im(s, dest);
2952 gen_goto_ptr();
2953 }
2954 s->base.is_jmp = DISAS_NORETURN;
2955}
2956
2957static inline void gen_jmp (DisasContext *s, uint32_t dest)
2958{
2959 if (unlikely(is_singlestepping(s))) {
2960
2961 if (s->thumb)
2962 dest |= 1;
2963 gen_bx_im(s, dest);
2964 } else {
2965 gen_goto_tb(s, 0, dest);
2966 }
2967}
2968
2969static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
2970{
2971 if (x)
2972 tcg_gen_sari_i32(t0, t0, 16);
2973 else
2974 gen_sxth(t0);
2975 if (y)
2976 tcg_gen_sari_i32(t1, t1, 16);
2977 else
2978 gen_sxth(t1);
2979 tcg_gen_mul_i32(t0, t0, t1);
2980}
2981
2982
2983static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
2984{
2985 uint32_t mask;
2986
2987 mask = 0;
2988 if (flags & (1 << 0))
2989 mask |= 0xff;
2990 if (flags & (1 << 1))
2991 mask |= 0xff00;
2992 if (flags & (1 << 2))
2993 mask |= 0xff0000;
2994 if (flags & (1 << 3))
2995 mask |= 0xff000000;
2996
2997
2998 mask &= ~CPSR_RESERVED;
2999 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
3000 mask &= ~CPSR_T;
3001 }
3002 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
3003 mask &= ~CPSR_Q;
3004 }
3005 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
3006 mask &= ~(CPSR_E | CPSR_GE);
3007 }
3008 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
3009 mask &= ~CPSR_IT;
3010 }
3011
3012 if (!spsr) {
3013 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
3014 }
3015
3016 if (IS_USER(s))
3017 mask &= CPSR_USER;
3018 return mask;
3019}
3020
3021
3022static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3023{
3024 TCGv_i32 tmp;
3025 if (spsr) {
3026
3027 if (IS_USER(s))
3028 return 1;
3029
3030 tmp = load_cpu_field(spsr);
3031 tcg_gen_andi_i32(tmp, tmp, ~mask);
3032 tcg_gen_andi_i32(t0, t0, mask);
3033 tcg_gen_or_i32(tmp, tmp, t0);
3034 store_cpu_field(tmp, spsr);
3035 } else {
3036 gen_set_cpsr(t0, mask);
3037 }
3038 tcg_temp_free_i32(t0);
3039 gen_lookup_tb(s);
3040 return 0;
3041}
3042
3043
3044static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3045{
3046 TCGv_i32 tmp;
3047 tmp = tcg_temp_new_i32();
3048 tcg_gen_movi_i32(tmp, val);
3049 return gen_set_psr(s, mask, spsr, tmp);
3050}
3051
3052static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
3053 int *tgtmode, int *regno)
3054{
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 int exc_target = default_exception_el(s);
3075
3076
3077
3078
3079 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
3080 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
3081 goto undef;
3082 }
3083
3084 if (IS_USER(s) || rn == 15) {
3085 goto undef;
3086 }
3087
3088
3089
3090
3091 if (r) {
3092
3093 switch (sysm) {
3094 case 0xe:
3095 *tgtmode = ARM_CPU_MODE_FIQ;
3096 break;
3097 case 0x10:
3098 *tgtmode = ARM_CPU_MODE_IRQ;
3099 break;
3100 case 0x12:
3101 *tgtmode = ARM_CPU_MODE_SVC;
3102 break;
3103 case 0x14:
3104 *tgtmode = ARM_CPU_MODE_ABT;
3105 break;
3106 case 0x16:
3107 *tgtmode = ARM_CPU_MODE_UND;
3108 break;
3109 case 0x1c:
3110 *tgtmode = ARM_CPU_MODE_MON;
3111 break;
3112 case 0x1e:
3113 *tgtmode = ARM_CPU_MODE_HYP;
3114 break;
3115 default:
3116 goto undef;
3117 }
3118
3119 *regno = 16;
3120 } else {
3121
3122 switch (sysm) {
3123 case 0x0 ... 0x6:
3124 *tgtmode = ARM_CPU_MODE_USR;
3125 *regno = sysm + 8;
3126 break;
3127 case 0x8 ... 0xe:
3128 *tgtmode = ARM_CPU_MODE_FIQ;
3129 *regno = sysm;
3130 break;
3131 case 0x10 ... 0x11:
3132 *tgtmode = ARM_CPU_MODE_IRQ;
3133 *regno = sysm & 1 ? 13 : 14;
3134 break;
3135 case 0x12 ... 0x13:
3136 *tgtmode = ARM_CPU_MODE_SVC;
3137 *regno = sysm & 1 ? 13 : 14;
3138 break;
3139 case 0x14 ... 0x15:
3140 *tgtmode = ARM_CPU_MODE_ABT;
3141 *regno = sysm & 1 ? 13 : 14;
3142 break;
3143 case 0x16 ... 0x17:
3144 *tgtmode = ARM_CPU_MODE_UND;
3145 *regno = sysm & 1 ? 13 : 14;
3146 break;
3147 case 0x1c ... 0x1d:
3148 *tgtmode = ARM_CPU_MODE_MON;
3149 *regno = sysm & 1 ? 13 : 14;
3150 break;
3151 case 0x1e ... 0x1f:
3152 *tgtmode = ARM_CPU_MODE_HYP;
3153
3154 *regno = sysm & 1 ? 13 : 17;
3155 break;
3156 default:
3157 goto undef;
3158 }
3159 }
3160
3161
3162
3163
3164 switch (*tgtmode) {
3165 case ARM_CPU_MODE_MON:
3166 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
3167 goto undef;
3168 }
3169 if (s->current_el == 1) {
3170
3171
3172
3173 exc_target = 3;
3174 goto undef;
3175 }
3176 break;
3177 case ARM_CPU_MODE_HYP:
3178
3179
3180
3181
3182
3183
3184 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
3185 (s->current_el < 3 && *regno != 17)) {
3186 goto undef;
3187 }
3188 break;
3189 default:
3190 break;
3191 }
3192
3193 return true;
3194
3195undef:
3196
3197 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
3198 return false;
3199}
3200
3201static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
3202{
3203 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3204 int tgtmode = 0, regno = 0;
3205
3206 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
3207 return;
3208 }
3209
3210
3211 gen_set_condexec(s);
3212 gen_set_pc_im(s, s->pc - 4);
3213 tcg_reg = load_reg(s, rn);
3214 tcg_tgtmode = tcg_const_i32(tgtmode);
3215 tcg_regno = tcg_const_i32(regno);
3216 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
3217 tcg_temp_free_i32(tcg_tgtmode);
3218 tcg_temp_free_i32(tcg_regno);
3219 tcg_temp_free_i32(tcg_reg);
3220 s->base.is_jmp = DISAS_UPDATE;
3221}
3222
3223static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
3224{
3225 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
3226 int tgtmode = 0, regno = 0;
3227
3228 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
3229 return;
3230 }
3231
3232
3233 gen_set_condexec(s);
3234 gen_set_pc_im(s, s->pc - 4);
3235 tcg_reg = tcg_temp_new_i32();
3236 tcg_tgtmode = tcg_const_i32(tgtmode);
3237 tcg_regno = tcg_const_i32(regno);
3238 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
3239 tcg_temp_free_i32(tcg_tgtmode);
3240 tcg_temp_free_i32(tcg_regno);
3241 store_reg(s, rn, tcg_reg);
3242 s->base.is_jmp = DISAS_UPDATE;
3243}
3244
3245
3246
3247
3248
3249static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
3250{
3251 tcg_gen_mov_i32(cpu_R[15], pc);
3252 tcg_temp_free_i32(pc);
3253}
3254
3255
3256static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3257{
3258 store_pc_exc_ret(s, pc);
3259
3260
3261
3262
3263 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3264 gen_io_start();
3265 }
3266 gen_helper_cpsr_write_eret(cpu_env, cpsr);
3267 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
3268 gen_io_end();
3269 }
3270 tcg_temp_free_i32(cpsr);
3271
3272 s->base.is_jmp = DISAS_EXIT;
3273}
3274
3275
3276static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3277{
3278 gen_rfe(s, pc, load_cpu_field(spsr));
3279}
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289static void gen_nop_hint(DisasContext *s, int val)
3290{
3291 switch (val) {
3292
3293
3294
3295
3296
3297 case 1:
3298 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3299 gen_set_pc_im(s, s->pc);
3300 s->base.is_jmp = DISAS_YIELD;
3301 }
3302 break;
3303 case 3:
3304 gen_set_pc_im(s, s->pc);
3305 s->base.is_jmp = DISAS_WFI;
3306 break;
3307 case 2:
3308 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3309 gen_set_pc_im(s, s->pc);
3310 s->base.is_jmp = DISAS_WFE;
3311 }
3312 break;
3313 case 4:
3314 case 5:
3315
3316 default:
3317 break;
3318 }
3319}
3320
3321#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3322
3323static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3324{
3325 switch (size) {
3326 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3327 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3328 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3329 default: abort();
3330 }
3331}
3332
3333static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3334{
3335 switch (size) {
3336 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3337 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3338 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3339 default: return;
3340 }
3341}
3342
3343
3344#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
3345#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
3346#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
3347#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
3348
3349#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3350 switch ((size << 1) | u) { \
3351 case 0: \
3352 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3353 break; \
3354 case 1: \
3355 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3356 break; \
3357 case 2: \
3358 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3359 break; \
3360 case 3: \
3361 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3362 break; \
3363 case 4: \
3364 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3365 break; \
3366 case 5: \
3367 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3368 break; \
3369 default: return 1; \
3370 }} while (0)
3371
3372#define GEN_NEON_INTEGER_OP(name) do { \
3373 switch ((size << 1) | u) { \
3374 case 0: \
3375 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3376 break; \
3377 case 1: \
3378 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3379 break; \
3380 case 2: \
3381 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3382 break; \
3383 case 3: \
3384 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3385 break; \
3386 case 4: \
3387 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3388 break; \
3389 case 5: \
3390 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3391 break; \
3392 default: return 1; \
3393 }} while (0)
3394
3395static TCGv_i32 neon_load_scratch(int scratch)
3396{
3397 TCGv_i32 tmp = tcg_temp_new_i32();
3398 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3399 return tmp;
3400}
3401
3402static void neon_store_scratch(int scratch, TCGv_i32 var)
3403{
3404 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3405 tcg_temp_free_i32(var);
3406}
3407
3408static inline TCGv_i32 neon_get_scalar(int size, int reg)
3409{
3410 TCGv_i32 tmp;
3411 if (size == 1) {
3412 tmp = neon_load_reg(reg & 7, reg >> 4);
3413 if (reg & 8) {
3414 gen_neon_dup_high16(tmp);
3415 } else {
3416 gen_neon_dup_low16(tmp);
3417 }
3418 } else {
3419 tmp = neon_load_reg(reg & 15, reg >> 4);
3420 }
3421 return tmp;
3422}
3423
3424static int gen_neon_unzip(int rd, int rm, int size, int q)
3425{
3426 TCGv_ptr pd, pm;
3427
3428 if (!q && size == 2) {
3429 return 1;
3430 }
3431 pd = vfp_reg_ptr(true, rd);
3432 pm = vfp_reg_ptr(true, rm);
3433 if (q) {
3434 switch (size) {
3435 case 0:
3436 gen_helper_neon_qunzip8(pd, pm);
3437 break;
3438 case 1:
3439 gen_helper_neon_qunzip16(pd, pm);
3440 break;
3441 case 2:
3442 gen_helper_neon_qunzip32(pd, pm);
3443 break;
3444 default:
3445 abort();
3446 }
3447 } else {
3448 switch (size) {
3449 case 0:
3450 gen_helper_neon_unzip8(pd, pm);
3451 break;
3452 case 1:
3453 gen_helper_neon_unzip16(pd, pm);
3454 break;
3455 default:
3456 abort();
3457 }
3458 }
3459 tcg_temp_free_ptr(pd);
3460 tcg_temp_free_ptr(pm);
3461 return 0;
3462}
3463
3464static int gen_neon_zip(int rd, int rm, int size, int q)
3465{
3466 TCGv_ptr pd, pm;
3467
3468 if (!q && size == 2) {
3469 return 1;
3470 }
3471 pd = vfp_reg_ptr(true, rd);
3472 pm = vfp_reg_ptr(true, rm);
3473 if (q) {
3474 switch (size) {
3475 case 0:
3476 gen_helper_neon_qzip8(pd, pm);
3477 break;
3478 case 1:
3479 gen_helper_neon_qzip16(pd, pm);
3480 break;
3481 case 2:
3482 gen_helper_neon_qzip32(pd, pm);
3483 break;
3484 default:
3485 abort();
3486 }
3487 } else {
3488 switch (size) {
3489 case 0:
3490 gen_helper_neon_zip8(pd, pm);
3491 break;
3492 case 1:
3493 gen_helper_neon_zip16(pd, pm);
3494 break;
3495 default:
3496 abort();
3497 }
3498 }
3499 tcg_temp_free_ptr(pd);
3500 tcg_temp_free_ptr(pm);
3501 return 0;
3502}
3503
3504static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3505{
3506 TCGv_i32 rd, tmp;
3507
3508 rd = tcg_temp_new_i32();
3509 tmp = tcg_temp_new_i32();
3510
3511 tcg_gen_shli_i32(rd, t0, 8);
3512 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3513 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3514 tcg_gen_or_i32(rd, rd, tmp);
3515
3516 tcg_gen_shri_i32(t1, t1, 8);
3517 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3518 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3519 tcg_gen_or_i32(t1, t1, tmp);
3520 tcg_gen_mov_i32(t0, rd);
3521
3522 tcg_temp_free_i32(tmp);
3523 tcg_temp_free_i32(rd);
3524}
3525
3526static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3527{
3528 TCGv_i32 rd, tmp;
3529
3530 rd = tcg_temp_new_i32();
3531 tmp = tcg_temp_new_i32();
3532
3533 tcg_gen_shli_i32(rd, t0, 16);
3534 tcg_gen_andi_i32(tmp, t1, 0xffff);
3535 tcg_gen_or_i32(rd, rd, tmp);
3536 tcg_gen_shri_i32(t1, t1, 16);
3537 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3538 tcg_gen_or_i32(t1, t1, tmp);
3539 tcg_gen_mov_i32(t0, rd);
3540
3541 tcg_temp_free_i32(tmp);
3542 tcg_temp_free_i32(rd);
3543}
3544
3545
3546static struct {
3547 int nregs;
3548 int interleave;
3549 int spacing;
3550} const neon_ls_element_type[11] = {
3551 {1, 4, 1},
3552 {1, 4, 2},
3553 {4, 1, 1},
3554 {2, 2, 2},
3555 {1, 3, 1},
3556 {1, 3, 2},
3557 {3, 1, 1},
3558 {1, 1, 1},
3559 {1, 2, 1},
3560 {1, 2, 2},
3561 {2, 1, 1}
3562};
3563
3564
3565
3566static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
3567{
3568 int rd, rn, rm;
3569 int op;
3570 int nregs;
3571 int interleave;
3572 int spacing;
3573 int stride;
3574 int size;
3575 int reg;
3576 int load;
3577 int n;
3578 int vec_size;
3579 int mmu_idx;
3580 TCGMemOp endian;
3581 TCGv_i32 addr;
3582 TCGv_i32 tmp;
3583 TCGv_i32 tmp2;
3584 TCGv_i64 tmp64;
3585
3586
3587
3588
3589
3590 if (s->fp_excp_el) {
3591 gen_exception_insn(s, 4, EXCP_UDEF,
3592 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
3593 return 0;
3594 }
3595
3596 if (!s->vfp_enabled)
3597 return 1;
3598 VFP_DREG_D(rd, insn);
3599 rn = (insn >> 16) & 0xf;
3600 rm = insn & 0xf;
3601 load = (insn & (1 << 21)) != 0;
3602 endian = s->be_data;
3603 mmu_idx = get_mem_index(s);
3604 if ((insn & (1 << 23)) == 0) {
3605
3606 op = (insn >> 8) & 0xf;
3607 size = (insn >> 6) & 3;
3608 if (op > 10)
3609 return 1;
3610
3611 switch (op & 0xc) {
3612 case 4:
3613 if (((insn >> 5) & 1) == 1) {
3614 return 1;
3615 }
3616 break;
3617 case 8:
3618 if (((insn >> 4) & 3) == 3) {
3619 return 1;
3620 }
3621 break;
3622 default:
3623 break;
3624 }
3625 nregs = neon_ls_element_type[op].nregs;
3626 interleave = neon_ls_element_type[op].interleave;
3627 spacing = neon_ls_element_type[op].spacing;
3628 if (size == 3 && (interleave | spacing) != 1) {
3629 return 1;
3630 }
3631
3632 if (size == 0) {
3633 endian = MO_LE;
3634 }
3635
3636
3637
3638 if (interleave == 1 && endian == MO_LE) {
3639 size = 3;
3640 }
3641 tmp64 = tcg_temp_new_i64();
3642 addr = tcg_temp_new_i32();
3643 tmp2 = tcg_const_i32(1 << size);
3644 load_reg_var(s, addr, rn);
3645 for (reg = 0; reg < nregs; reg++) {
3646 for (n = 0; n < 8 >> size; n++) {
3647 int xs;
3648 for (xs = 0; xs < interleave; xs++) {
3649 int tt = rd + reg + spacing * xs;
3650
3651 if (load) {
3652 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
3653 neon_store_element64(tt, n, size, tmp64);
3654 } else {
3655 neon_load_element64(tmp64, tt, n, size);
3656 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
3657 }
3658 tcg_gen_add_i32(addr, addr, tmp2);
3659 }
3660 }
3661 }
3662 tcg_temp_free_i32(addr);
3663 tcg_temp_free_i32(tmp2);
3664 tcg_temp_free_i64(tmp64);
3665 stride = nregs * interleave * 8;
3666 } else {
3667 size = (insn >> 10) & 3;
3668 if (size == 3) {
3669
3670 int a = (insn >> 4) & 1;
3671 if (!load) {
3672 return 1;
3673 }
3674 size = (insn >> 6) & 3;
3675 nregs = ((insn >> 8) & 3) + 1;
3676
3677 if (size == 3) {
3678 if (nregs != 4 || a == 0) {
3679 return 1;
3680 }
3681
3682 size = 2;
3683 }
3684 if (nregs == 1 && a == 1 && size == 0) {
3685 return 1;
3686 }
3687 if (nregs == 3 && a == 1) {
3688 return 1;
3689 }
3690 addr = tcg_temp_new_i32();
3691 load_reg_var(s, addr, rn);
3692
3693
3694
3695
3696 stride = (insn & (1 << 5)) ? 2 : 1;
3697 vec_size = nregs == 1 ? stride * 8 : 8;
3698
3699 tmp = tcg_temp_new_i32();
3700 for (reg = 0; reg < nregs; reg++) {
3701 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3702 s->be_data | size);
3703 if ((rd & 1) && vec_size == 16) {
3704
3705
3706
3707 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3708 8, 8, tmp);
3709 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
3710 neon_reg_offset(rd, 0), 8, 8);
3711 } else {
3712 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
3713 vec_size, vec_size, tmp);
3714 }
3715 tcg_gen_addi_i32(addr, addr, 1 << size);
3716 rd += stride;
3717 }
3718 tcg_temp_free_i32(tmp);
3719 tcg_temp_free_i32(addr);
3720 stride = (1 << size) * nregs;
3721 } else {
3722
3723 int idx = (insn >> 4) & 0xf;
3724 int reg_idx;
3725 switch (size) {
3726 case 0:
3727 reg_idx = (insn >> 5) & 7;
3728 stride = 1;
3729 break;
3730 case 1:
3731 reg_idx = (insn >> 6) & 3;
3732 stride = (insn & (1 << 5)) ? 2 : 1;
3733 break;
3734 case 2:
3735 reg_idx = (insn >> 7) & 1;
3736 stride = (insn & (1 << 6)) ? 2 : 1;
3737 break;
3738 default:
3739 abort();
3740 }
3741 nregs = ((insn >> 8) & 3) + 1;
3742
3743 switch (nregs) {
3744 case 1:
3745 if (((idx & (1 << size)) != 0) ||
3746 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3747 return 1;
3748 }
3749 break;
3750 case 3:
3751 if ((idx & 1) != 0) {
3752 return 1;
3753 }
3754
3755 case 2:
3756 if (size == 2 && (idx & 2) != 0) {
3757 return 1;
3758 }
3759 break;
3760 case 4:
3761 if ((size == 2) && ((idx & 3) == 3)) {
3762 return 1;
3763 }
3764 break;
3765 default:
3766 abort();
3767 }
3768 if ((rd + stride * (nregs - 1)) > 31) {
3769
3770
3771
3772
3773 return 1;
3774 }
3775 tmp = tcg_temp_new_i32();
3776 addr = tcg_temp_new_i32();
3777 load_reg_var(s, addr, rn);
3778 for (reg = 0; reg < nregs; reg++) {
3779 if (load) {
3780 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
3781 s->be_data | size);
3782 neon_store_element(rd, reg_idx, size, tmp);
3783 } else {
3784 neon_load_element(tmp, rd, reg_idx, size);
3785 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
3786 s->be_data | size);
3787 }
3788 rd += stride;
3789 tcg_gen_addi_i32(addr, addr, 1 << size);
3790 }
3791 tcg_temp_free_i32(addr);
3792 tcg_temp_free_i32(tmp);
3793 stride = nregs * (1 << size);
3794 }
3795 }
3796 if (rm != 15) {
3797 TCGv_i32 base;
3798
3799 base = load_reg(s, rn);
3800 if (rm == 13) {
3801 tcg_gen_addi_i32(base, base, stride);
3802 } else {
3803 TCGv_i32 index;
3804 index = load_reg(s, rm);
3805 tcg_gen_add_i32(base, base, index);
3806 tcg_temp_free_i32(index);
3807 }
3808 store_reg(s, rn, base);
3809 }
3810 return 0;
3811}
3812
3813static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
3814{
3815 switch (size) {
3816 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3817 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3818 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
3819 default: abort();
3820 }
3821}
3822
3823static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3824{
3825 switch (size) {
3826 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3827 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3828 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3829 default: abort();
3830 }
3831}
3832
3833static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
3834{
3835 switch (size) {
3836 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3837 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3838 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3839 default: abort();
3840 }
3841}
3842
3843static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
3844{
3845 switch (size) {
3846 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
3847 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
3848 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
3849 default: abort();
3850 }
3851}
3852
3853static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
3854 int q, int u)
3855{
3856 if (q) {
3857 if (u) {
3858 switch (size) {
3859 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3860 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3861 default: abort();
3862 }
3863 } else {
3864 switch (size) {
3865 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3866 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3867 default: abort();
3868 }
3869 }
3870 } else {
3871 if (u) {
3872 switch (size) {
3873 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
3874 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
3875 default: abort();
3876 }
3877 } else {
3878 switch (size) {
3879 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3880 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3881 default: abort();
3882 }
3883 }
3884 }
3885}
3886
3887static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
3888{
3889 if (u) {
3890 switch (size) {
3891 case 0: gen_helper_neon_widen_u8(dest, src); break;
3892 case 1: gen_helper_neon_widen_u16(dest, src); break;
3893 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3894 default: abort();
3895 }
3896 } else {
3897 switch (size) {
3898 case 0: gen_helper_neon_widen_s8(dest, src); break;
3899 case 1: gen_helper_neon_widen_s16(dest, src); break;
3900 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3901 default: abort();
3902 }
3903 }
3904 tcg_temp_free_i32(src);
3905}
3906
3907static inline void gen_neon_addl(int size)
3908{
3909 switch (size) {
3910 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3911 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3912 case 2: tcg_gen_add_i64(CPU_V001); break;
3913 default: abort();
3914 }
3915}
3916
3917static inline void gen_neon_subl(int size)
3918{
3919 switch (size) {
3920 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3921 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3922 case 2: tcg_gen_sub_i64(CPU_V001); break;
3923 default: abort();
3924 }
3925}
3926
3927static inline void gen_neon_negl(TCGv_i64 var, int size)
3928{
3929 switch (size) {
3930 case 0: gen_helper_neon_negl_u16(var, var); break;
3931 case 1: gen_helper_neon_negl_u32(var, var); break;
3932 case 2:
3933 tcg_gen_neg_i64(var, var);
3934 break;
3935 default: abort();
3936 }
3937}
3938
3939static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
3940{
3941 switch (size) {
3942 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
3943 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
3944 default: abort();
3945 }
3946}
3947
3948static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
3949 int size, int u)
3950{
3951 TCGv_i64 tmp;
3952
3953 switch ((size << 1) | u) {
3954 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
3955 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
3956 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
3957 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
3958 case 4:
3959 tmp = gen_muls_i64_i32(a, b);
3960 tcg_gen_mov_i64(dest, tmp);
3961 tcg_temp_free_i64(tmp);
3962 break;
3963 case 5:
3964 tmp = gen_mulu_i64_i32(a, b);
3965 tcg_gen_mov_i64(dest, tmp);
3966 tcg_temp_free_i64(tmp);
3967 break;
3968 default: abort();
3969 }
3970
3971
3972
3973 if (size < 2) {
3974 tcg_temp_free_i32(a);
3975 tcg_temp_free_i32(b);
3976 }
3977}
3978
3979static void gen_neon_narrow_op(int op, int u, int size,
3980 TCGv_i32 dest, TCGv_i64 src)
3981{
3982 if (op) {
3983 if (u) {
3984 gen_neon_unarrow_sats(size, dest, src);
3985 } else {
3986 gen_neon_narrow(size, dest, src);
3987 }
3988 } else {
3989 if (u) {
3990 gen_neon_narrow_satu(size, dest, src);
3991 } else {
3992 gen_neon_narrow_sats(size, dest, src);
3993 }
3994 }
3995}
3996
3997
3998
3999
4000
4001#define NEON_3R_VHADD 0
4002#define NEON_3R_VQADD 1
4003#define NEON_3R_VRHADD 2
4004#define NEON_3R_LOGIC 3
4005#define NEON_3R_VHSUB 4
4006#define NEON_3R_VQSUB 5
4007#define NEON_3R_VCGT 6
4008#define NEON_3R_VCGE 7
4009#define NEON_3R_VSHL 8
4010#define NEON_3R_VQSHL 9
4011#define NEON_3R_VRSHL 10
4012#define NEON_3R_VQRSHL 11
4013#define NEON_3R_VMAX 12
4014#define NEON_3R_VMIN 13
4015#define NEON_3R_VABD 14
4016#define NEON_3R_VABA 15
4017#define NEON_3R_VADD_VSUB 16
4018#define NEON_3R_VTST_VCEQ 17
4019#define NEON_3R_VML 18
4020#define NEON_3R_VMUL 19
4021#define NEON_3R_VPMAX 20
4022#define NEON_3R_VPMIN 21
4023#define NEON_3R_VQDMULH_VQRDMULH 22
4024#define NEON_3R_VPADD_VQRDMLAH 23
4025#define NEON_3R_SHA 24
4026#define NEON_3R_VFM_VQRDMLSH 25
4027#define NEON_3R_FLOAT_ARITH 26
4028#define NEON_3R_FLOAT_MULTIPLY 27
4029#define NEON_3R_FLOAT_CMP 28
4030#define NEON_3R_FLOAT_ACMP 29
4031#define NEON_3R_FLOAT_MINMAX 30
4032#define NEON_3R_FLOAT_MISC 31
4033
4034static const uint8_t neon_3r_sizes[] = {
4035 [NEON_3R_VHADD] = 0x7,
4036 [NEON_3R_VQADD] = 0xf,
4037 [NEON_3R_VRHADD] = 0x7,
4038 [NEON_3R_LOGIC] = 0xf,
4039 [NEON_3R_VHSUB] = 0x7,
4040 [NEON_3R_VQSUB] = 0xf,
4041 [NEON_3R_VCGT] = 0x7,
4042 [NEON_3R_VCGE] = 0x7,
4043 [NEON_3R_VSHL] = 0xf,
4044 [NEON_3R_VQSHL] = 0xf,
4045 [NEON_3R_VRSHL] = 0xf,
4046 [NEON_3R_VQRSHL] = 0xf,
4047 [NEON_3R_VMAX] = 0x7,
4048 [NEON_3R_VMIN] = 0x7,
4049 [NEON_3R_VABD] = 0x7,
4050 [NEON_3R_VABA] = 0x7,
4051 [NEON_3R_VADD_VSUB] = 0xf,
4052 [NEON_3R_VTST_VCEQ] = 0x7,
4053 [NEON_3R_VML] = 0x7,
4054 [NEON_3R_VMUL] = 0x7,
4055 [NEON_3R_VPMAX] = 0x7,
4056 [NEON_3R_VPMIN] = 0x7,
4057 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4058 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
4059 [NEON_3R_SHA] = 0xf,
4060 [NEON_3R_VFM_VQRDMLSH] = 0x7,
4061 [NEON_3R_FLOAT_ARITH] = 0x5,
4062 [NEON_3R_FLOAT_MULTIPLY] = 0x5,
4063 [NEON_3R_FLOAT_CMP] = 0x5,
4064 [NEON_3R_FLOAT_ACMP] = 0x5,
4065 [NEON_3R_FLOAT_MINMAX] = 0x5,
4066 [NEON_3R_FLOAT_MISC] = 0x5,
4067};
4068
4069
4070
4071
4072
4073#define NEON_2RM_VREV64 0
4074#define NEON_2RM_VREV32 1
4075#define NEON_2RM_VREV16 2
4076#define NEON_2RM_VPADDL 4
4077#define NEON_2RM_VPADDL_U 5
4078#define NEON_2RM_AESE 6
4079#define NEON_2RM_AESMC 7
4080#define NEON_2RM_VCLS 8
4081#define NEON_2RM_VCLZ 9
4082#define NEON_2RM_VCNT 10
4083#define NEON_2RM_VMVN 11
4084#define NEON_2RM_VPADAL 12
4085#define NEON_2RM_VPADAL_U 13
4086#define NEON_2RM_VQABS 14
4087#define NEON_2RM_VQNEG 15
4088#define NEON_2RM_VCGT0 16
4089#define NEON_2RM_VCGE0 17
4090#define NEON_2RM_VCEQ0 18
4091#define NEON_2RM_VCLE0 19
4092#define NEON_2RM_VCLT0 20
4093#define NEON_2RM_SHA1H 21
4094#define NEON_2RM_VABS 22
4095#define NEON_2RM_VNEG 23
4096#define NEON_2RM_VCGT0_F 24
4097#define NEON_2RM_VCGE0_F 25
4098#define NEON_2RM_VCEQ0_F 26
4099#define NEON_2RM_VCLE0_F 27
4100#define NEON_2RM_VCLT0_F 28
4101#define NEON_2RM_VABS_F 30
4102#define NEON_2RM_VNEG_F 31
4103#define NEON_2RM_VSWP 32
4104#define NEON_2RM_VTRN 33
4105#define NEON_2RM_VUZP 34
4106#define NEON_2RM_VZIP 35
4107#define NEON_2RM_VMOVN 36
4108#define NEON_2RM_VQMOVN 37
4109#define NEON_2RM_VSHLL 38
4110#define NEON_2RM_SHA1SU1 39
4111#define NEON_2RM_VRINTN 40
4112#define NEON_2RM_VRINTX 41
4113#define NEON_2RM_VRINTA 42
4114#define NEON_2RM_VRINTZ 43
4115#define NEON_2RM_VCVT_F16_F32 44
4116#define NEON_2RM_VRINTM 45
4117#define NEON_2RM_VCVT_F32_F16 46
4118#define NEON_2RM_VRINTP 47
4119#define NEON_2RM_VCVTAU 48
4120#define NEON_2RM_VCVTAS 49
4121#define NEON_2RM_VCVTNU 50
4122#define NEON_2RM_VCVTNS 51
4123#define NEON_2RM_VCVTPU 52
4124#define NEON_2RM_VCVTPS 53
4125#define NEON_2RM_VCVTMU 54
4126#define NEON_2RM_VCVTMS 55
4127#define NEON_2RM_VRECPE 56
4128#define NEON_2RM_VRSQRTE 57
4129#define NEON_2RM_VRECPE_F 58
4130#define NEON_2RM_VRSQRTE_F 59
4131#define NEON_2RM_VCVT_FS 60
4132#define NEON_2RM_VCVT_FU 61
4133#define NEON_2RM_VCVT_SF 62
4134#define NEON_2RM_VCVT_UF 63
4135
4136static bool neon_2rm_is_v8_op(int op)
4137{
4138
4139 switch (op) {
4140 case NEON_2RM_VRINTN:
4141 case NEON_2RM_VRINTA:
4142 case NEON_2RM_VRINTM:
4143 case NEON_2RM_VRINTP:
4144 case NEON_2RM_VRINTZ:
4145 case NEON_2RM_VRINTX:
4146 case NEON_2RM_VCVTAU:
4147 case NEON_2RM_VCVTAS:
4148 case NEON_2RM_VCVTNU:
4149 case NEON_2RM_VCVTNS:
4150 case NEON_2RM_VCVTPU:
4151 case NEON_2RM_VCVTPS:
4152 case NEON_2RM_VCVTMU:
4153 case NEON_2RM_VCVTMS:
4154 return true;
4155 default:
4156 return false;
4157 }
4158}
4159
4160
4161
4162
4163
4164static const uint8_t neon_2rm_sizes[] = {
4165 [NEON_2RM_VREV64] = 0x7,
4166 [NEON_2RM_VREV32] = 0x3,
4167 [NEON_2RM_VREV16] = 0x1,
4168 [NEON_2RM_VPADDL] = 0x7,
4169 [NEON_2RM_VPADDL_U] = 0x7,
4170 [NEON_2RM_AESE] = 0x1,
4171 [NEON_2RM_AESMC] = 0x1,
4172 [NEON_2RM_VCLS] = 0x7,
4173 [NEON_2RM_VCLZ] = 0x7,
4174 [NEON_2RM_VCNT] = 0x1,
4175 [NEON_2RM_VMVN] = 0x1,
4176 [NEON_2RM_VPADAL] = 0x7,
4177 [NEON_2RM_VPADAL_U] = 0x7,
4178 [NEON_2RM_VQABS] = 0x7,
4179 [NEON_2RM_VQNEG] = 0x7,
4180 [NEON_2RM_VCGT0] = 0x7,
4181 [NEON_2RM_VCGE0] = 0x7,
4182 [NEON_2RM_VCEQ0] = 0x7,
4183 [NEON_2RM_VCLE0] = 0x7,
4184 [NEON_2RM_VCLT0] = 0x7,
4185 [NEON_2RM_SHA1H] = 0x4,
4186 [NEON_2RM_VABS] = 0x7,
4187 [NEON_2RM_VNEG] = 0x7,
4188 [NEON_2RM_VCGT0_F] = 0x4,
4189 [NEON_2RM_VCGE0_F] = 0x4,
4190 [NEON_2RM_VCEQ0_F] = 0x4,
4191 [NEON_2RM_VCLE0_F] = 0x4,
4192 [NEON_2RM_VCLT0_F] = 0x4,
4193 [NEON_2RM_VABS_F] = 0x4,
4194 [NEON_2RM_VNEG_F] = 0x4,
4195 [NEON_2RM_VSWP] = 0x1,
4196 [NEON_2RM_VTRN] = 0x7,
4197 [NEON_2RM_VUZP] = 0x7,
4198 [NEON_2RM_VZIP] = 0x7,
4199 [NEON_2RM_VMOVN] = 0x7,
4200 [NEON_2RM_VQMOVN] = 0x7,
4201 [NEON_2RM_VSHLL] = 0x7,
4202 [NEON_2RM_SHA1SU1] = 0x4,
4203 [NEON_2RM_VRINTN] = 0x4,
4204 [NEON_2RM_VRINTX] = 0x4,
4205 [NEON_2RM_VRINTA] = 0x4,
4206 [NEON_2RM_VRINTZ] = 0x4,
4207 [NEON_2RM_VCVT_F16_F32] = 0x2,
4208 [NEON_2RM_VRINTM] = 0x4,
4209 [NEON_2RM_VCVT_F32_F16] = 0x2,
4210 [NEON_2RM_VRINTP] = 0x4,
4211 [NEON_2RM_VCVTAU] = 0x4,
4212 [NEON_2RM_VCVTAS] = 0x4,
4213 [NEON_2RM_VCVTNU] = 0x4,
4214 [NEON_2RM_VCVTNS] = 0x4,
4215 [NEON_2RM_VCVTPU] = 0x4,
4216 [NEON_2RM_VCVTPS] = 0x4,
4217 [NEON_2RM_VCVTMU] = 0x4,
4218 [NEON_2RM_VCVTMS] = 0x4,
4219 [NEON_2RM_VRECPE] = 0x4,
4220 [NEON_2RM_VRSQRTE] = 0x4,
4221 [NEON_2RM_VRECPE_F] = 0x4,
4222 [NEON_2RM_VRSQRTE_F] = 0x4,
4223 [NEON_2RM_VCVT_FS] = 0x4,
4224 [NEON_2RM_VCVT_FU] = 0x4,
4225 [NEON_2RM_VCVT_SF] = 0x4,
4226 [NEON_2RM_VCVT_UF] = 0x4,
4227};
4228
4229
4230
4231static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
4232 int q, int rd, int rn, int rm)
4233{
4234 if (dc_isar_feature(aa32_rdm, s)) {
4235 int opr_sz = (1 + q) * 8;
4236 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
4237 vfp_reg_offset(1, rn),
4238 vfp_reg_offset(1, rm), cpu_env,
4239 opr_sz, opr_sz, 0, fn);
4240 return 0;
4241 }
4242 return 1;
4243}
4244
4245static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4246{
4247 tcg_gen_vec_sar8i_i64(a, a, shift);
4248 tcg_gen_vec_add8_i64(d, d, a);
4249}
4250
4251static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4252{
4253 tcg_gen_vec_sar16i_i64(a, a, shift);
4254 tcg_gen_vec_add16_i64(d, d, a);
4255}
4256
4257static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4258{
4259 tcg_gen_sari_i32(a, a, shift);
4260 tcg_gen_add_i32(d, d, a);
4261}
4262
4263static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4264{
4265 tcg_gen_sari_i64(a, a, shift);
4266 tcg_gen_add_i64(d, d, a);
4267}
4268
4269static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4270{
4271 tcg_gen_sari_vec(vece, a, a, sh);
4272 tcg_gen_add_vec(vece, d, d, a);
4273}
4274
4275static const TCGOpcode vecop_list_ssra[] = {
4276 INDEX_op_sari_vec, INDEX_op_add_vec, 0
4277};
4278
4279const GVecGen2i ssra_op[4] = {
4280 { .fni8 = gen_ssra8_i64,
4281 .fniv = gen_ssra_vec,
4282 .load_dest = true,
4283 .opt_opc = vecop_list_ssra,
4284 .vece = MO_8 },
4285 { .fni8 = gen_ssra16_i64,
4286 .fniv = gen_ssra_vec,
4287 .load_dest = true,
4288 .opt_opc = vecop_list_ssra,
4289 .vece = MO_16 },
4290 { .fni4 = gen_ssra32_i32,
4291 .fniv = gen_ssra_vec,
4292 .load_dest = true,
4293 .opt_opc = vecop_list_ssra,
4294 .vece = MO_32 },
4295 { .fni8 = gen_ssra64_i64,
4296 .fniv = gen_ssra_vec,
4297 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4298 .opt_opc = vecop_list_ssra,
4299 .load_dest = true,
4300 .vece = MO_64 },
4301};
4302
4303static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4304{
4305 tcg_gen_vec_shr8i_i64(a, a, shift);
4306 tcg_gen_vec_add8_i64(d, d, a);
4307}
4308
4309static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4310{
4311 tcg_gen_vec_shr16i_i64(a, a, shift);
4312 tcg_gen_vec_add16_i64(d, d, a);
4313}
4314
4315static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4316{
4317 tcg_gen_shri_i32(a, a, shift);
4318 tcg_gen_add_i32(d, d, a);
4319}
4320
4321static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4322{
4323 tcg_gen_shri_i64(a, a, shift);
4324 tcg_gen_add_i64(d, d, a);
4325}
4326
4327static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4328{
4329 tcg_gen_shri_vec(vece, a, a, sh);
4330 tcg_gen_add_vec(vece, d, d, a);
4331}
4332
4333static const TCGOpcode vecop_list_usra[] = {
4334 INDEX_op_shri_vec, INDEX_op_add_vec, 0
4335};
4336
4337const GVecGen2i usra_op[4] = {
4338 { .fni8 = gen_usra8_i64,
4339 .fniv = gen_usra_vec,
4340 .load_dest = true,
4341 .opt_opc = vecop_list_usra,
4342 .vece = MO_8, },
4343 { .fni8 = gen_usra16_i64,
4344 .fniv = gen_usra_vec,
4345 .load_dest = true,
4346 .opt_opc = vecop_list_usra,
4347 .vece = MO_16, },
4348 { .fni4 = gen_usra32_i32,
4349 .fniv = gen_usra_vec,
4350 .load_dest = true,
4351 .opt_opc = vecop_list_usra,
4352 .vece = MO_32, },
4353 { .fni8 = gen_usra64_i64,
4354 .fniv = gen_usra_vec,
4355 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4356 .load_dest = true,
4357 .opt_opc = vecop_list_usra,
4358 .vece = MO_64, },
4359};
4360
4361static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4362{
4363 uint64_t mask = dup_const(MO_8, 0xff >> shift);
4364 TCGv_i64 t = tcg_temp_new_i64();
4365
4366 tcg_gen_shri_i64(t, a, shift);
4367 tcg_gen_andi_i64(t, t, mask);
4368 tcg_gen_andi_i64(d, d, ~mask);
4369 tcg_gen_or_i64(d, d, t);
4370 tcg_temp_free_i64(t);
4371}
4372
4373static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4374{
4375 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
4376 TCGv_i64 t = tcg_temp_new_i64();
4377
4378 tcg_gen_shri_i64(t, a, shift);
4379 tcg_gen_andi_i64(t, t, mask);
4380 tcg_gen_andi_i64(d, d, ~mask);
4381 tcg_gen_or_i64(d, d, t);
4382 tcg_temp_free_i64(t);
4383}
4384
4385static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4386{
4387 tcg_gen_shri_i32(a, a, shift);
4388 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
4389}
4390
4391static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4392{
4393 tcg_gen_shri_i64(a, a, shift);
4394 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
4395}
4396
4397static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4398{
4399 if (sh == 0) {
4400 tcg_gen_mov_vec(d, a);
4401 } else {
4402 TCGv_vec t = tcg_temp_new_vec_matching(d);
4403 TCGv_vec m = tcg_temp_new_vec_matching(d);
4404
4405 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
4406 tcg_gen_shri_vec(vece, t, a, sh);
4407 tcg_gen_and_vec(vece, d, d, m);
4408 tcg_gen_or_vec(vece, d, d, t);
4409
4410 tcg_temp_free_vec(t);
4411 tcg_temp_free_vec(m);
4412 }
4413}
4414
4415static const TCGOpcode vecop_list_sri[] = { INDEX_op_shri_vec, 0 };
4416
4417const GVecGen2i sri_op[4] = {
4418 { .fni8 = gen_shr8_ins_i64,
4419 .fniv = gen_shr_ins_vec,
4420 .load_dest = true,
4421 .opt_opc = vecop_list_sri,
4422 .vece = MO_8 },
4423 { .fni8 = gen_shr16_ins_i64,
4424 .fniv = gen_shr_ins_vec,
4425 .load_dest = true,
4426 .opt_opc = vecop_list_sri,
4427 .vece = MO_16 },
4428 { .fni4 = gen_shr32_ins_i32,
4429 .fniv = gen_shr_ins_vec,
4430 .load_dest = true,
4431 .opt_opc = vecop_list_sri,
4432 .vece = MO_32 },
4433 { .fni8 = gen_shr64_ins_i64,
4434 .fniv = gen_shr_ins_vec,
4435 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4436 .load_dest = true,
4437 .opt_opc = vecop_list_sri,
4438 .vece = MO_64 },
4439};
4440
4441static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4442{
4443 uint64_t mask = dup_const(MO_8, 0xff << shift);
4444 TCGv_i64 t = tcg_temp_new_i64();
4445
4446 tcg_gen_shli_i64(t, a, shift);
4447 tcg_gen_andi_i64(t, t, mask);
4448 tcg_gen_andi_i64(d, d, ~mask);
4449 tcg_gen_or_i64(d, d, t);
4450 tcg_temp_free_i64(t);
4451}
4452
4453static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4454{
4455 uint64_t mask = dup_const(MO_16, 0xffff << shift);
4456 TCGv_i64 t = tcg_temp_new_i64();
4457
4458 tcg_gen_shli_i64(t, a, shift);
4459 tcg_gen_andi_i64(t, t, mask);
4460 tcg_gen_andi_i64(d, d, ~mask);
4461 tcg_gen_or_i64(d, d, t);
4462 tcg_temp_free_i64(t);
4463}
4464
4465static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
4466{
4467 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
4468}
4469
4470static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
4471{
4472 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
4473}
4474
4475static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
4476{
4477 if (sh == 0) {
4478 tcg_gen_mov_vec(d, a);
4479 } else {
4480 TCGv_vec t = tcg_temp_new_vec_matching(d);
4481 TCGv_vec m = tcg_temp_new_vec_matching(d);
4482
4483 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
4484 tcg_gen_shli_vec(vece, t, a, sh);
4485 tcg_gen_and_vec(vece, d, d, m);
4486 tcg_gen_or_vec(vece, d, d, t);
4487
4488 tcg_temp_free_vec(t);
4489 tcg_temp_free_vec(m);
4490 }
4491}
4492
4493static const TCGOpcode vecop_list_sli[] = { INDEX_op_shli_vec, 0 };
4494
4495const GVecGen2i sli_op[4] = {
4496 { .fni8 = gen_shl8_ins_i64,
4497 .fniv = gen_shl_ins_vec,
4498 .load_dest = true,
4499 .opt_opc = vecop_list_sli,
4500 .vece = MO_8 },
4501 { .fni8 = gen_shl16_ins_i64,
4502 .fniv = gen_shl_ins_vec,
4503 .load_dest = true,
4504 .opt_opc = vecop_list_sli,
4505 .vece = MO_16 },
4506 { .fni4 = gen_shl32_ins_i32,
4507 .fniv = gen_shl_ins_vec,
4508 .load_dest = true,
4509 .opt_opc = vecop_list_sli,
4510 .vece = MO_32 },
4511 { .fni8 = gen_shl64_ins_i64,
4512 .fniv = gen_shl_ins_vec,
4513 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4514 .load_dest = true,
4515 .opt_opc = vecop_list_sli,
4516 .vece = MO_64 },
4517};
4518
4519static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4520{
4521 gen_helper_neon_mul_u8(a, a, b);
4522 gen_helper_neon_add_u8(d, d, a);
4523}
4524
4525static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4526{
4527 gen_helper_neon_mul_u8(a, a, b);
4528 gen_helper_neon_sub_u8(d, d, a);
4529}
4530
4531static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4532{
4533 gen_helper_neon_mul_u16(a, a, b);
4534 gen_helper_neon_add_u16(d, d, a);
4535}
4536
4537static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4538{
4539 gen_helper_neon_mul_u16(a, a, b);
4540 gen_helper_neon_sub_u16(d, d, a);
4541}
4542
4543static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4544{
4545 tcg_gen_mul_i32(a, a, b);
4546 tcg_gen_add_i32(d, d, a);
4547}
4548
4549static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4550{
4551 tcg_gen_mul_i32(a, a, b);
4552 tcg_gen_sub_i32(d, d, a);
4553}
4554
4555static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4556{
4557 tcg_gen_mul_i64(a, a, b);
4558 tcg_gen_add_i64(d, d, a);
4559}
4560
4561static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4562{
4563 tcg_gen_mul_i64(a, a, b);
4564 tcg_gen_sub_i64(d, d, a);
4565}
4566
4567static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4568{
4569 tcg_gen_mul_vec(vece, a, a, b);
4570 tcg_gen_add_vec(vece, d, d, a);
4571}
4572
4573static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4574{
4575 tcg_gen_mul_vec(vece, a, a, b);
4576 tcg_gen_sub_vec(vece, d, d, a);
4577}
4578
4579
4580
4581
4582
4583static const TCGOpcode vecop_list_mla[] = {
4584 INDEX_op_mul_vec, INDEX_op_add_vec, 0
4585};
4586
4587static const TCGOpcode vecop_list_mls[] = {
4588 INDEX_op_mul_vec, INDEX_op_sub_vec, 0
4589};
4590
4591const GVecGen3 mla_op[4] = {
4592 { .fni4 = gen_mla8_i32,
4593 .fniv = gen_mla_vec,
4594 .load_dest = true,
4595 .opt_opc = vecop_list_mla,
4596 .vece = MO_8 },
4597 { .fni4 = gen_mla16_i32,
4598 .fniv = gen_mla_vec,
4599 .load_dest = true,
4600 .opt_opc = vecop_list_mla,
4601 .vece = MO_16 },
4602 { .fni4 = gen_mla32_i32,
4603 .fniv = gen_mla_vec,
4604 .load_dest = true,
4605 .opt_opc = vecop_list_mla,
4606 .vece = MO_32 },
4607 { .fni8 = gen_mla64_i64,
4608 .fniv = gen_mla_vec,
4609 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4610 .load_dest = true,
4611 .opt_opc = vecop_list_mla,
4612 .vece = MO_64 },
4613};
4614
4615const GVecGen3 mls_op[4] = {
4616 { .fni4 = gen_mls8_i32,
4617 .fniv = gen_mls_vec,
4618 .load_dest = true,
4619 .opt_opc = vecop_list_mls,
4620 .vece = MO_8 },
4621 { .fni4 = gen_mls16_i32,
4622 .fniv = gen_mls_vec,
4623 .load_dest = true,
4624 .opt_opc = vecop_list_mls,
4625 .vece = MO_16 },
4626 { .fni4 = gen_mls32_i32,
4627 .fniv = gen_mls_vec,
4628 .load_dest = true,
4629 .opt_opc = vecop_list_mls,
4630 .vece = MO_32 },
4631 { .fni8 = gen_mls64_i64,
4632 .fniv = gen_mls_vec,
4633 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4634 .load_dest = true,
4635 .opt_opc = vecop_list_mls,
4636 .vece = MO_64 },
4637};
4638
4639
4640static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
4641{
4642 tcg_gen_and_i32(d, a, b);
4643 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
4644 tcg_gen_neg_i32(d, d);
4645}
4646
4647void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
4648{
4649 tcg_gen_and_i64(d, a, b);
4650 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
4651 tcg_gen_neg_i64(d, d);
4652}
4653
4654static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
4655{
4656 tcg_gen_and_vec(vece, d, a, b);
4657 tcg_gen_dupi_vec(vece, a, 0);
4658 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
4659}
4660
4661static const TCGOpcode vecop_list_cmtst[] = { INDEX_op_cmp_vec, 0 };
4662
4663const GVecGen3 cmtst_op[4] = {
4664 { .fni4 = gen_helper_neon_tst_u8,
4665 .fniv = gen_cmtst_vec,
4666 .opt_opc = vecop_list_cmtst,
4667 .vece = MO_8 },
4668 { .fni4 = gen_helper_neon_tst_u16,
4669 .fniv = gen_cmtst_vec,
4670 .opt_opc = vecop_list_cmtst,
4671 .vece = MO_16 },
4672 { .fni4 = gen_cmtst_i32,
4673 .fniv = gen_cmtst_vec,
4674 .opt_opc = vecop_list_cmtst,
4675 .vece = MO_32 },
4676 { .fni8 = gen_cmtst_i64,
4677 .fniv = gen_cmtst_vec,
4678 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
4679 .opt_opc = vecop_list_cmtst,
4680 .vece = MO_64 },
4681};
4682
4683static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4684 TCGv_vec a, TCGv_vec b)
4685{
4686 TCGv_vec x = tcg_temp_new_vec_matching(t);
4687 tcg_gen_add_vec(vece, x, a, b);
4688 tcg_gen_usadd_vec(vece, t, a, b);
4689 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4690 tcg_gen_or_vec(vece, sat, sat, x);
4691 tcg_temp_free_vec(x);
4692}
4693
4694static const TCGOpcode vecop_list_uqadd[] = {
4695 INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4696};
4697
4698const GVecGen4 uqadd_op[4] = {
4699 { .fniv = gen_uqadd_vec,
4700 .fno = gen_helper_gvec_uqadd_b,
4701 .write_aofs = true,
4702 .opt_opc = vecop_list_uqadd,
4703 .vece = MO_8 },
4704 { .fniv = gen_uqadd_vec,
4705 .fno = gen_helper_gvec_uqadd_h,
4706 .write_aofs = true,
4707 .opt_opc = vecop_list_uqadd,
4708 .vece = MO_16 },
4709 { .fniv = gen_uqadd_vec,
4710 .fno = gen_helper_gvec_uqadd_s,
4711 .write_aofs = true,
4712 .opt_opc = vecop_list_uqadd,
4713 .vece = MO_32 },
4714 { .fniv = gen_uqadd_vec,
4715 .fno = gen_helper_gvec_uqadd_d,
4716 .write_aofs = true,
4717 .opt_opc = vecop_list_uqadd,
4718 .vece = MO_64 },
4719};
4720
4721static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4722 TCGv_vec a, TCGv_vec b)
4723{
4724 TCGv_vec x = tcg_temp_new_vec_matching(t);
4725 tcg_gen_add_vec(vece, x, a, b);
4726 tcg_gen_ssadd_vec(vece, t, a, b);
4727 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4728 tcg_gen_or_vec(vece, sat, sat, x);
4729 tcg_temp_free_vec(x);
4730}
4731
4732static const TCGOpcode vecop_list_sqadd[] = {
4733 INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
4734};
4735
4736const GVecGen4 sqadd_op[4] = {
4737 { .fniv = gen_sqadd_vec,
4738 .fno = gen_helper_gvec_sqadd_b,
4739 .opt_opc = vecop_list_sqadd,
4740 .write_aofs = true,
4741 .vece = MO_8 },
4742 { .fniv = gen_sqadd_vec,
4743 .fno = gen_helper_gvec_sqadd_h,
4744 .opt_opc = vecop_list_sqadd,
4745 .write_aofs = true,
4746 .vece = MO_16 },
4747 { .fniv = gen_sqadd_vec,
4748 .fno = gen_helper_gvec_sqadd_s,
4749 .opt_opc = vecop_list_sqadd,
4750 .write_aofs = true,
4751 .vece = MO_32 },
4752 { .fniv = gen_sqadd_vec,
4753 .fno = gen_helper_gvec_sqadd_d,
4754 .opt_opc = vecop_list_sqadd,
4755 .write_aofs = true,
4756 .vece = MO_64 },
4757};
4758
4759static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4760 TCGv_vec a, TCGv_vec b)
4761{
4762 TCGv_vec x = tcg_temp_new_vec_matching(t);
4763 tcg_gen_sub_vec(vece, x, a, b);
4764 tcg_gen_ussub_vec(vece, t, a, b);
4765 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4766 tcg_gen_or_vec(vece, sat, sat, x);
4767 tcg_temp_free_vec(x);
4768}
4769
4770static const TCGOpcode vecop_list_uqsub[] = {
4771 INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4772};
4773
4774const GVecGen4 uqsub_op[4] = {
4775 { .fniv = gen_uqsub_vec,
4776 .fno = gen_helper_gvec_uqsub_b,
4777 .opt_opc = vecop_list_uqsub,
4778 .write_aofs = true,
4779 .vece = MO_8 },
4780 { .fniv = gen_uqsub_vec,
4781 .fno = gen_helper_gvec_uqsub_h,
4782 .opt_opc = vecop_list_uqsub,
4783 .write_aofs = true,
4784 .vece = MO_16 },
4785 { .fniv = gen_uqsub_vec,
4786 .fno = gen_helper_gvec_uqsub_s,
4787 .opt_opc = vecop_list_uqsub,
4788 .write_aofs = true,
4789 .vece = MO_32 },
4790 { .fniv = gen_uqsub_vec,
4791 .fno = gen_helper_gvec_uqsub_d,
4792 .opt_opc = vecop_list_uqsub,
4793 .write_aofs = true,
4794 .vece = MO_64 },
4795};
4796
4797static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
4798 TCGv_vec a, TCGv_vec b)
4799{
4800 TCGv_vec x = tcg_temp_new_vec_matching(t);
4801 tcg_gen_sub_vec(vece, x, a, b);
4802 tcg_gen_sssub_vec(vece, t, a, b);
4803 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
4804 tcg_gen_or_vec(vece, sat, sat, x);
4805 tcg_temp_free_vec(x);
4806}
4807
4808static const TCGOpcode vecop_list_sqsub[] = {
4809 INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
4810};
4811
4812const GVecGen4 sqsub_op[4] = {
4813 { .fniv = gen_sqsub_vec,
4814 .fno = gen_helper_gvec_sqsub_b,
4815 .opt_opc = vecop_list_sqsub,
4816 .write_aofs = true,
4817 .vece = MO_8 },
4818 { .fniv = gen_sqsub_vec,
4819 .fno = gen_helper_gvec_sqsub_h,
4820 .opt_opc = vecop_list_sqsub,
4821 .write_aofs = true,
4822 .vece = MO_16 },
4823 { .fniv = gen_sqsub_vec,
4824 .fno = gen_helper_gvec_sqsub_s,
4825 .opt_opc = vecop_list_sqsub,
4826 .write_aofs = true,
4827 .vece = MO_32 },
4828 { .fniv = gen_sqsub_vec,
4829 .fno = gen_helper_gvec_sqsub_d,
4830 .opt_opc = vecop_list_sqsub,
4831 .write_aofs = true,
4832 .vece = MO_64 },
4833};
4834
4835
4836
4837
4838
4839
4840static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
4841{
4842 int op;
4843 int q;
4844 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
4845 int size;
4846 int shift;
4847 int pass;
4848 int count;
4849 int pairwise;
4850 int u;
4851 int vec_size;
4852 uint32_t imm;
4853 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4854 TCGv_ptr ptr1, ptr2, ptr3;
4855 TCGv_i64 tmp64;
4856
4857
4858
4859
4860
4861 if (s->fp_excp_el) {
4862 gen_exception_insn(s, 4, EXCP_UDEF,
4863 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
4864 return 0;
4865 }
4866
4867 if (!s->vfp_enabled)
4868 return 1;
4869 q = (insn & (1 << 6)) != 0;
4870 u = (insn >> 24) & 1;
4871 VFP_DREG_D(rd, insn);
4872 VFP_DREG_N(rn, insn);
4873 VFP_DREG_M(rm, insn);
4874 size = (insn >> 20) & 3;
4875 vec_size = q ? 16 : 8;
4876 rd_ofs = neon_reg_offset(rd, 0);
4877 rn_ofs = neon_reg_offset(rn, 0);
4878 rm_ofs = neon_reg_offset(rm, 0);
4879
4880 if ((insn & (1 << 23)) == 0) {
4881
4882 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4883
4884 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4885 return 1;
4886 }
4887
4888
4889
4890 if (q && ((rd | rn | rm) & 1)) {
4891 return 1;
4892 }
4893 switch (op) {
4894 case NEON_3R_SHA:
4895
4896
4897
4898
4899
4900 if (!q) {
4901 return 1;
4902 }
4903 if (!u) {
4904 if (!dc_isar_feature(aa32_sha1, s)) {
4905 return 1;
4906 }
4907 ptr1 = vfp_reg_ptr(true, rd);
4908 ptr2 = vfp_reg_ptr(true, rn);
4909 ptr3 = vfp_reg_ptr(true, rm);
4910 tmp4 = tcg_const_i32(size);
4911 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
4912 tcg_temp_free_i32(tmp4);
4913 } else {
4914 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
4915 return 1;
4916 }
4917 ptr1 = vfp_reg_ptr(true, rd);
4918 ptr2 = vfp_reg_ptr(true, rn);
4919 ptr3 = vfp_reg_ptr(true, rm);
4920 switch (size) {
4921 case 0:
4922 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
4923 break;
4924 case 1:
4925 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
4926 break;
4927 case 2:
4928 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
4929 break;
4930 }
4931 }
4932 tcg_temp_free_ptr(ptr1);
4933 tcg_temp_free_ptr(ptr2);
4934 tcg_temp_free_ptr(ptr3);
4935 return 0;
4936
4937 case NEON_3R_VPADD_VQRDMLAH:
4938 if (!u) {
4939 break;
4940 }
4941
4942 switch (size) {
4943 case 1:
4944 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
4945 q, rd, rn, rm);
4946 case 2:
4947 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
4948 q, rd, rn, rm);
4949 }
4950 return 1;
4951
4952 case NEON_3R_VFM_VQRDMLSH:
4953 if (!u) {
4954
4955 if (size == 1) {
4956 return 1;
4957 }
4958 break;
4959 }
4960
4961 switch (size) {
4962 case 1:
4963 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
4964 q, rd, rn, rm);
4965 case 2:
4966 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
4967 q, rd, rn, rm);
4968 }
4969 return 1;
4970
4971 case NEON_3R_LOGIC:
4972 switch ((u << 2) | size) {
4973 case 0:
4974 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
4975 vec_size, vec_size);
4976 break;
4977 case 1:
4978 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
4979 vec_size, vec_size);
4980 break;
4981 case 2:
4982 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
4983 vec_size, vec_size);
4984 break;
4985 case 3:
4986 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
4987 vec_size, vec_size);
4988 break;
4989 case 4:
4990 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
4991 vec_size, vec_size);
4992 break;
4993 case 5:
4994 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
4995 vec_size, vec_size);
4996 break;
4997 case 6:
4998 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
4999 vec_size, vec_size);
5000 break;
5001 case 7:
5002 tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
5003 vec_size, vec_size);
5004 break;
5005 }
5006 return 0;
5007
5008 case NEON_3R_VADD_VSUB:
5009 if (u) {
5010 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
5011 vec_size, vec_size);
5012 } else {
5013 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
5014 vec_size, vec_size);
5015 }
5016 return 0;
5017
5018 case NEON_3R_VQADD:
5019 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5020 rn_ofs, rm_ofs, vec_size, vec_size,
5021 (u ? uqadd_op : sqadd_op) + size);
5022 return 0;
5023
5024 case NEON_3R_VQSUB:
5025 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
5026 rn_ofs, rm_ofs, vec_size, vec_size,
5027 (u ? uqsub_op : sqsub_op) + size);
5028 return 0;
5029
5030 case NEON_3R_VMUL:
5031 if (u) {
5032
5033 if (size != 0) {
5034 return 1;
5035 }
5036 } else {
5037 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
5038 vec_size, vec_size);
5039 return 0;
5040 }
5041 break;
5042
5043 case NEON_3R_VML:
5044 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
5045 u ? &mls_op[size] : &mla_op[size]);
5046 return 0;
5047
5048 case NEON_3R_VTST_VCEQ:
5049 if (u) {
5050 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
5051 vec_size, vec_size);
5052 } else {
5053 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
5054 vec_size, vec_size, &cmtst_op[size]);
5055 }
5056 return 0;
5057
5058 case NEON_3R_VCGT:
5059 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
5060 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5061 return 0;
5062
5063 case NEON_3R_VCGE:
5064 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
5065 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
5066 return 0;
5067
5068 case NEON_3R_VMAX:
5069 if (u) {
5070 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
5071 vec_size, vec_size);
5072 } else {
5073 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
5074 vec_size, vec_size);
5075 }
5076 return 0;
5077 case NEON_3R_VMIN:
5078 if (u) {
5079 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
5080 vec_size, vec_size);
5081 } else {
5082 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
5083 vec_size, vec_size);
5084 }
5085 return 0;
5086 }
5087
5088 if (size == 3) {
5089
5090 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5091 neon_load_reg64(cpu_V0, rn + pass);
5092 neon_load_reg64(cpu_V1, rm + pass);
5093 switch (op) {
5094 case NEON_3R_VSHL:
5095 if (u) {
5096 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5097 } else {
5098 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5099 }
5100 break;
5101 case NEON_3R_VQSHL:
5102 if (u) {
5103 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5104 cpu_V1, cpu_V0);
5105 } else {
5106 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5107 cpu_V1, cpu_V0);
5108 }
5109 break;
5110 case NEON_3R_VRSHL:
5111 if (u) {
5112 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5113 } else {
5114 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5115 }
5116 break;
5117 case NEON_3R_VQRSHL:
5118 if (u) {
5119 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5120 cpu_V1, cpu_V0);
5121 } else {
5122 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5123 cpu_V1, cpu_V0);
5124 }
5125 break;
5126 default:
5127 abort();
5128 }
5129 neon_store_reg64(cpu_V0, rd + pass);
5130 }
5131 return 0;
5132 }
5133 pairwise = 0;
5134 switch (op) {
5135 case NEON_3R_VSHL:
5136 case NEON_3R_VQSHL:
5137 case NEON_3R_VRSHL:
5138 case NEON_3R_VQRSHL:
5139 {
5140 int rtmp;
5141
5142 rtmp = rn;
5143 rn = rm;
5144 rm = rtmp;
5145 }
5146 break;
5147 case NEON_3R_VPADD_VQRDMLAH:
5148 case NEON_3R_VPMAX:
5149 case NEON_3R_VPMIN:
5150 pairwise = 1;
5151 break;
5152 case NEON_3R_FLOAT_ARITH:
5153 pairwise = (u && size < 2);
5154 break;
5155 case NEON_3R_FLOAT_MINMAX:
5156 pairwise = u;
5157 break;
5158 case NEON_3R_FLOAT_CMP:
5159 if (!u && size) {
5160
5161 return 1;
5162 }
5163 break;
5164 case NEON_3R_FLOAT_ACMP:
5165 if (!u) {
5166 return 1;
5167 }
5168 break;
5169 case NEON_3R_FLOAT_MISC:
5170
5171 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5172 return 1;
5173 }
5174 break;
5175 case NEON_3R_VFM_VQRDMLSH:
5176 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
5177 return 1;
5178 }
5179 break;
5180 default:
5181 break;
5182 }
5183
5184 if (pairwise && q) {
5185
5186 return 1;
5187 }
5188
5189 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5190
5191 if (pairwise) {
5192
5193 if (pass < 1) {
5194 tmp = neon_load_reg(rn, 0);
5195 tmp2 = neon_load_reg(rn, 1);
5196 } else {
5197 tmp = neon_load_reg(rm, 0);
5198 tmp2 = neon_load_reg(rm, 1);
5199 }
5200 } else {
5201
5202 tmp = neon_load_reg(rn, pass);
5203 tmp2 = neon_load_reg(rm, pass);
5204 }
5205 switch (op) {
5206 case NEON_3R_VHADD:
5207 GEN_NEON_INTEGER_OP(hadd);
5208 break;
5209 case NEON_3R_VRHADD:
5210 GEN_NEON_INTEGER_OP(rhadd);
5211 break;
5212 case NEON_3R_VHSUB:
5213 GEN_NEON_INTEGER_OP(hsub);
5214 break;
5215 case NEON_3R_VSHL:
5216 GEN_NEON_INTEGER_OP(shl);
5217 break;
5218 case NEON_3R_VQSHL:
5219 GEN_NEON_INTEGER_OP_ENV(qshl);
5220 break;
5221 case NEON_3R_VRSHL:
5222 GEN_NEON_INTEGER_OP(rshl);
5223 break;
5224 case NEON_3R_VQRSHL:
5225 GEN_NEON_INTEGER_OP_ENV(qrshl);
5226 break;
5227 case NEON_3R_VABD:
5228 GEN_NEON_INTEGER_OP(abd);
5229 break;
5230 case NEON_3R_VABA:
5231 GEN_NEON_INTEGER_OP(abd);
5232 tcg_temp_free_i32(tmp2);
5233 tmp2 = neon_load_reg(rd, pass);
5234 gen_neon_add(size, tmp, tmp2);
5235 break;
5236 case NEON_3R_VMUL:
5237
5238 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5239 break;
5240 case NEON_3R_VPMAX:
5241 GEN_NEON_INTEGER_OP(pmax);
5242 break;
5243 case NEON_3R_VPMIN:
5244 GEN_NEON_INTEGER_OP(pmin);
5245 break;
5246 case NEON_3R_VQDMULH_VQRDMULH:
5247 if (!u) {
5248 switch (size) {
5249 case 1:
5250 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5251 break;
5252 case 2:
5253 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5254 break;
5255 default: abort();
5256 }
5257 } else {
5258 switch (size) {
5259 case 1:
5260 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5261 break;
5262 case 2:
5263 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5264 break;
5265 default: abort();
5266 }
5267 }
5268 break;
5269 case NEON_3R_VPADD_VQRDMLAH:
5270 switch (size) {
5271 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5272 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5273 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5274 default: abort();
5275 }
5276 break;
5277 case NEON_3R_FLOAT_ARITH:
5278 {
5279 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5280 switch ((u << 2) | size) {
5281 case 0:
5282 case 4:
5283 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5284 break;
5285 case 2:
5286 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5287 break;
5288 case 6:
5289 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5290 break;
5291 default:
5292 abort();
5293 }
5294 tcg_temp_free_ptr(fpstatus);
5295 break;
5296 }
5297 case NEON_3R_FLOAT_MULTIPLY:
5298 {
5299 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5300 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5301 if (!u) {
5302 tcg_temp_free_i32(tmp2);
5303 tmp2 = neon_load_reg(rd, pass);
5304 if (size == 0) {
5305 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5306 } else {
5307 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5308 }
5309 }
5310 tcg_temp_free_ptr(fpstatus);
5311 break;
5312 }
5313 case NEON_3R_FLOAT_CMP:
5314 {
5315 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5316 if (!u) {
5317 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5318 } else {
5319 if (size == 0) {
5320 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5321 } else {
5322 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5323 }
5324 }
5325 tcg_temp_free_ptr(fpstatus);
5326 break;
5327 }
5328 case NEON_3R_FLOAT_ACMP:
5329 {
5330 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5331 if (size == 0) {
5332 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5333 } else {
5334 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5335 }
5336 tcg_temp_free_ptr(fpstatus);
5337 break;
5338 }
5339 case NEON_3R_FLOAT_MINMAX:
5340 {
5341 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5342 if (size == 0) {
5343 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5344 } else {
5345 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5346 }
5347 tcg_temp_free_ptr(fpstatus);
5348 break;
5349 }
5350 case NEON_3R_FLOAT_MISC:
5351 if (u) {
5352
5353 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5354 if (size == 0) {
5355 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5356 } else {
5357 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5358 }
5359 tcg_temp_free_ptr(fpstatus);
5360 } else {
5361 if (size == 0) {
5362 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5363 } else {
5364 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5365 }
5366 }
5367 break;
5368 case NEON_3R_VFM_VQRDMLSH:
5369 {
5370
5371 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5372 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5373 if (size) {
5374
5375 gen_helper_vfp_negs(tmp, tmp);
5376 }
5377 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5378 tcg_temp_free_i32(tmp3);
5379 tcg_temp_free_ptr(fpstatus);
5380 break;
5381 }
5382 default:
5383 abort();
5384 }
5385 tcg_temp_free_i32(tmp2);
5386
5387
5388
5389
5390 if (pairwise && rd == rm) {
5391 neon_store_scratch(pass, tmp);
5392 } else {
5393 neon_store_reg(rd, pass, tmp);
5394 }
5395
5396 }
5397 if (pairwise && rd == rm) {
5398 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5399 tmp = neon_load_scratch(pass);
5400 neon_store_reg(rd, pass, tmp);
5401 }
5402 }
5403
5404 } else if (insn & (1 << 4)) {
5405 if ((insn & 0x00380080) != 0) {
5406
5407 op = (insn >> 8) & 0xf;
5408 if (insn & (1 << 7)) {
5409
5410 if (op > 7) {
5411 return 1;
5412 }
5413 size = 3;
5414 } else {
5415 size = 2;
5416 while ((insn & (1 << (size + 19))) == 0)
5417 size--;
5418 }
5419 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5420 if (op < 8) {
5421
5422
5423 if (q && ((rd | rm) & 1)) {
5424 return 1;
5425 }
5426 if (!u && (op == 4 || op == 6)) {
5427 return 1;
5428 }
5429
5430
5431 if (op <= 4) {
5432 shift = shift - (1 << (size + 3));
5433 }
5434
5435 switch (op) {
5436 case 0:
5437
5438 shift = -shift;
5439
5440
5441
5442
5443 if (!u) {
5444 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
5445 MIN(shift, (8 << size) - 1),
5446 vec_size, vec_size);
5447 } else if (shift >= 8 << size) {
5448 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5449 } else {
5450 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
5451 vec_size, vec_size);
5452 }
5453 return 0;
5454
5455 case 1:
5456
5457 shift = -shift;
5458
5459
5460
5461
5462 if (!u) {
5463 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5464 MIN(shift, (8 << size) - 1),
5465 &ssra_op[size]);
5466 } else if (shift >= 8 << size) {
5467
5468 } else {
5469 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5470 shift, &usra_op[size]);
5471 }
5472 return 0;
5473
5474 case 4:
5475 if (!u) {
5476 return 1;
5477 }
5478
5479 shift = -shift;
5480
5481 if (shift < 8 << size) {
5482 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
5483 shift, &sri_op[size]);
5484 }
5485 return 0;
5486
5487 case 5:
5488 if (u) {
5489
5490 if (shift < 8 << size) {
5491 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
5492 vec_size, shift, &sli_op[size]);
5493 }
5494 } else {
5495
5496
5497
5498 if (shift >= 8 << size) {
5499 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
5500 } else {
5501 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
5502 vec_size, vec_size);
5503 }
5504 }
5505 return 0;
5506 }
5507
5508 if (size == 3) {
5509 count = q + 1;
5510 } else {
5511 count = q ? 4: 2;
5512 }
5513
5514
5515
5516
5517 imm = dup_const(size, shift);
5518
5519 for (pass = 0; pass < count; pass++) {
5520 if (size == 3) {
5521 neon_load_reg64(cpu_V0, rm + pass);
5522 tcg_gen_movi_i64(cpu_V1, imm);
5523 switch (op) {
5524 case 2:
5525 case 3:
5526 if (u)
5527 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5528 else
5529 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5530 break;
5531 case 6:
5532 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5533 cpu_V0, cpu_V1);
5534 break;
5535 case 7:
5536 if (u) {
5537 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5538 cpu_V0, cpu_V1);
5539 } else {
5540 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5541 cpu_V0, cpu_V1);
5542 }
5543 break;
5544 default:
5545 g_assert_not_reached();
5546 }
5547 if (op == 3) {
5548
5549 neon_load_reg64(cpu_V1, rd + pass);
5550 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5551 }
5552 neon_store_reg64(cpu_V0, rd + pass);
5553 } else {
5554
5555 tmp = neon_load_reg(rm, pass);
5556 tmp2 = tcg_temp_new_i32();
5557 tcg_gen_movi_i32(tmp2, imm);
5558 switch (op) {
5559 case 2:
5560 case 3:
5561 GEN_NEON_INTEGER_OP(rshl);
5562 break;
5563 case 6:
5564 switch (size) {
5565 case 0:
5566 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5567 tmp, tmp2);
5568 break;
5569 case 1:
5570 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5571 tmp, tmp2);
5572 break;
5573 case 2:
5574 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5575 tmp, tmp2);
5576 break;
5577 default:
5578 abort();
5579 }
5580 break;
5581 case 7:
5582 GEN_NEON_INTEGER_OP_ENV(qshl);
5583 break;
5584 default:
5585 g_assert_not_reached();
5586 }
5587 tcg_temp_free_i32(tmp2);
5588
5589 if (op == 3) {
5590
5591 tmp2 = neon_load_reg(rd, pass);
5592 gen_neon_add(size, tmp, tmp2);
5593 tcg_temp_free_i32(tmp2);
5594 }
5595 neon_store_reg(rd, pass, tmp);
5596 }
5597 }
5598 } else if (op < 10) {
5599
5600
5601 int input_unsigned = (op == 8) ? !u : u;
5602 if (rm & 1) {
5603 return 1;
5604 }
5605 shift = shift - (1 << (size + 3));
5606 size++;
5607 if (size == 3) {
5608 tmp64 = tcg_const_i64(shift);
5609 neon_load_reg64(cpu_V0, rm);
5610 neon_load_reg64(cpu_V1, rm + 1);
5611 for (pass = 0; pass < 2; pass++) {
5612 TCGv_i64 in;
5613 if (pass == 0) {
5614 in = cpu_V0;
5615 } else {
5616 in = cpu_V1;
5617 }
5618 if (q) {
5619 if (input_unsigned) {
5620 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5621 } else {
5622 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5623 }
5624 } else {
5625 if (input_unsigned) {
5626 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5627 } else {
5628 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5629 }
5630 }
5631 tmp = tcg_temp_new_i32();
5632 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5633 neon_store_reg(rd, pass, tmp);
5634 }
5635 tcg_temp_free_i64(tmp64);
5636 } else {
5637 if (size == 1) {
5638 imm = (uint16_t)shift;
5639 imm |= imm << 16;
5640 } else {
5641
5642 imm = (uint32_t)shift;
5643 }
5644 tmp2 = tcg_const_i32(imm);
5645 tmp4 = neon_load_reg(rm + 1, 0);
5646 tmp5 = neon_load_reg(rm + 1, 1);
5647 for (pass = 0; pass < 2; pass++) {
5648 if (pass == 0) {
5649 tmp = neon_load_reg(rm, 0);
5650 } else {
5651 tmp = tmp4;
5652 }
5653 gen_neon_shift_narrow(size, tmp, tmp2, q,
5654 input_unsigned);
5655 if (pass == 0) {
5656 tmp3 = neon_load_reg(rm, 1);
5657 } else {
5658 tmp3 = tmp5;
5659 }
5660 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5661 input_unsigned);
5662 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5663 tcg_temp_free_i32(tmp);
5664 tcg_temp_free_i32(tmp3);
5665 tmp = tcg_temp_new_i32();
5666 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5667 neon_store_reg(rd, pass, tmp);
5668 }
5669 tcg_temp_free_i32(tmp2);
5670 }
5671 } else if (op == 10) {
5672
5673 if (q || (rd & 1)) {
5674 return 1;
5675 }
5676 tmp = neon_load_reg(rm, 0);
5677 tmp2 = neon_load_reg(rm, 1);
5678 for (pass = 0; pass < 2; pass++) {
5679 if (pass == 1)
5680 tmp = tmp2;
5681
5682 gen_neon_widen(cpu_V0, tmp, size, u);
5683
5684 if (shift != 0) {
5685
5686
5687 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5688
5689
5690
5691
5692
5693 if (size < 2 || !u) {
5694 uint64_t imm64;
5695 if (size == 0) {
5696 imm = (0xffu >> (8 - shift));
5697 imm |= imm << 16;
5698 } else if (size == 1) {
5699 imm = 0xffff >> (16 - shift);
5700 } else {
5701
5702 imm = 0xffffffff >> (32 - shift);
5703 }
5704 if (size < 2) {
5705 imm64 = imm | (((uint64_t)imm) << 32);
5706 } else {
5707 imm64 = imm;
5708 }
5709 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5710 }
5711 }
5712 neon_store_reg64(cpu_V0, rd + pass);
5713 }
5714 } else if (op >= 14) {
5715
5716 TCGv_ptr fpst;
5717 TCGv_i32 shiftv;
5718 VFPGenFixPointFn *fn;
5719
5720 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5721 return 1;
5722 }
5723
5724 if (!(op & 1)) {
5725 if (u) {
5726 fn = gen_helper_vfp_ultos;
5727 } else {
5728 fn = gen_helper_vfp_sltos;
5729 }
5730 } else {
5731 if (u) {
5732 fn = gen_helper_vfp_touls_round_to_zero;
5733 } else {
5734 fn = gen_helper_vfp_tosls_round_to_zero;
5735 }
5736 }
5737
5738
5739
5740
5741 shift = 32 - shift;
5742 fpst = get_fpstatus_ptr(1);
5743 shiftv = tcg_const_i32(shift);
5744 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5745 TCGv_i32 tmpf = neon_load_reg(rm, pass);
5746 fn(tmpf, tmpf, shiftv, fpst);
5747 neon_store_reg(rd, pass, tmpf);
5748 }
5749 tcg_temp_free_ptr(fpst);
5750 tcg_temp_free_i32(shiftv);
5751 } else {
5752 return 1;
5753 }
5754 } else {
5755 int invert, reg_ofs, vec_size;
5756
5757 if (q && (rd & 1)) {
5758 return 1;
5759 }
5760
5761 op = (insn >> 8) & 0xf;
5762
5763 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5764 invert = (insn & (1 << 5)) != 0;
5765
5766
5767
5768
5769 switch (op) {
5770 case 0: case 1:
5771
5772 break;
5773 case 2: case 3:
5774 imm <<= 8;
5775 break;
5776 case 4: case 5:
5777 imm <<= 16;
5778 break;
5779 case 6: case 7:
5780 imm <<= 24;
5781 break;
5782 case 8: case 9:
5783 imm |= imm << 16;
5784 break;
5785 case 10: case 11:
5786 imm = (imm << 8) | (imm << 24);
5787 break;
5788 case 12:
5789 imm = (imm << 8) | 0xff;
5790 break;
5791 case 13:
5792 imm = (imm << 16) | 0xffff;
5793 break;
5794 case 14:
5795 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5796 if (invert) {
5797 imm = ~imm;
5798 }
5799 break;
5800 case 15:
5801 if (invert) {
5802 return 1;
5803 }
5804 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5805 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5806 break;
5807 }
5808 if (invert) {
5809 imm = ~imm;
5810 }
5811
5812 reg_ofs = neon_reg_offset(rd, 0);
5813 vec_size = q ? 16 : 8;
5814
5815 if (op & 1 && op < 12) {
5816 if (invert) {
5817
5818
5819
5820 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
5821 vec_size, vec_size);
5822 } else {
5823 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
5824 vec_size, vec_size);
5825 }
5826 } else {
5827
5828 if (op == 14 && invert) {
5829 TCGv_i64 t64 = tcg_temp_new_i64();
5830
5831 for (pass = 0; pass <= q; ++pass) {
5832 uint64_t val = 0;
5833 int n;
5834
5835 for (n = 0; n < 8; n++) {
5836 if (imm & (1 << (n + pass * 8))) {
5837 val |= 0xffull << (n * 8);
5838 }
5839 }
5840 tcg_gen_movi_i64(t64, val);
5841 neon_store_reg64(t64, rd + pass);
5842 }
5843 tcg_temp_free_i64(t64);
5844 } else {
5845 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
5846 }
5847 }
5848 }
5849 } else {
5850 if (size != 3) {
5851 op = (insn >> 8) & 0xf;
5852 if ((insn & (1 << 6)) == 0) {
5853
5854 int src1_wide;
5855 int src2_wide;
5856 int prewiden;
5857
5858
5859
5860
5861
5862
5863 int undefreq;
5864
5865 static const int neon_3reg_wide[16][4] = {
5866 {1, 0, 0, 0},
5867 {1, 1, 0, 0},
5868 {1, 0, 0, 0},
5869 {1, 1, 0, 0},
5870 {0, 1, 1, 0},
5871 {0, 0, 0, 0},
5872 {0, 1, 1, 0},
5873 {0, 0, 0, 0},
5874 {0, 0, 0, 0},
5875 {0, 0, 0, 9},
5876 {0, 0, 0, 0},
5877 {0, 0, 0, 9},
5878 {0, 0, 0, 0},
5879 {0, 0, 0, 1},
5880 {0, 0, 0, 0xa},
5881 {0, 0, 0, 7},
5882 };
5883
5884 prewiden = neon_3reg_wide[op][0];
5885 src1_wide = neon_3reg_wide[op][1];
5886 src2_wide = neon_3reg_wide[op][2];
5887 undefreq = neon_3reg_wide[op][3];
5888
5889 if ((undefreq & (1 << size)) ||
5890 ((undefreq & 8) && u)) {
5891 return 1;
5892 }
5893 if ((src1_wide && (rn & 1)) ||
5894 (src2_wide && (rm & 1)) ||
5895 (!src2_wide && (rd & 1))) {
5896 return 1;
5897 }
5898
5899
5900
5901
5902 if (op == 14 && size == 2) {
5903 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
5904
5905 if (!dc_isar_feature(aa32_pmull, s)) {
5906 return 1;
5907 }
5908 tcg_rn = tcg_temp_new_i64();
5909 tcg_rm = tcg_temp_new_i64();
5910 tcg_rd = tcg_temp_new_i64();
5911 neon_load_reg64(tcg_rn, rn);
5912 neon_load_reg64(tcg_rm, rm);
5913 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
5914 neon_store_reg64(tcg_rd, rd);
5915 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
5916 neon_store_reg64(tcg_rd, rd + 1);
5917 tcg_temp_free_i64(tcg_rn);
5918 tcg_temp_free_i64(tcg_rm);
5919 tcg_temp_free_i64(tcg_rd);
5920 return 0;
5921 }
5922
5923
5924
5925
5926 if (rd == rm && !src2_wide) {
5927 tmp = neon_load_reg(rm, 1);
5928 neon_store_scratch(2, tmp);
5929 } else if (rd == rn && !src1_wide) {
5930 tmp = neon_load_reg(rn, 1);
5931 neon_store_scratch(2, tmp);
5932 }
5933 tmp3 = NULL;
5934 for (pass = 0; pass < 2; pass++) {
5935 if (src1_wide) {
5936 neon_load_reg64(cpu_V0, rn + pass);
5937 tmp = NULL;
5938 } else {
5939 if (pass == 1 && rd == rn) {
5940 tmp = neon_load_scratch(2);
5941 } else {
5942 tmp = neon_load_reg(rn, pass);
5943 }
5944 if (prewiden) {
5945 gen_neon_widen(cpu_V0, tmp, size, u);
5946 }
5947 }
5948 if (src2_wide) {
5949 neon_load_reg64(cpu_V1, rm + pass);
5950 tmp2 = NULL;
5951 } else {
5952 if (pass == 1 && rd == rm) {
5953 tmp2 = neon_load_scratch(2);
5954 } else {
5955 tmp2 = neon_load_reg(rm, pass);
5956 }
5957 if (prewiden) {
5958 gen_neon_widen(cpu_V1, tmp2, size, u);
5959 }
5960 }
5961 switch (op) {
5962 case 0: case 1: case 4:
5963 gen_neon_addl(size);
5964 break;
5965 case 2: case 3: case 6:
5966 gen_neon_subl(size);
5967 break;
5968 case 5: case 7:
5969 switch ((size << 1) | u) {
5970 case 0:
5971 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5972 break;
5973 case 1:
5974 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5975 break;
5976 case 2:
5977 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5978 break;
5979 case 3:
5980 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5981 break;
5982 case 4:
5983 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5984 break;
5985 case 5:
5986 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5987 break;
5988 default: abort();
5989 }
5990 tcg_temp_free_i32(tmp2);
5991 tcg_temp_free_i32(tmp);
5992 break;
5993 case 8: case 9: case 10: case 11: case 12: case 13:
5994
5995 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5996 break;
5997 case 14:
5998 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5999 tcg_temp_free_i32(tmp2);
6000 tcg_temp_free_i32(tmp);
6001 break;
6002 default:
6003 abort();
6004 }
6005 if (op == 13) {
6006
6007 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6008 neon_store_reg64(cpu_V0, rd + pass);
6009 } else if (op == 5 || (op >= 8 && op <= 11)) {
6010
6011 neon_load_reg64(cpu_V1, rd + pass);
6012 switch (op) {
6013 case 10:
6014 gen_neon_negl(cpu_V0, size);
6015
6016 case 5: case 8:
6017 gen_neon_addl(size);
6018 break;
6019 case 9: case 11:
6020 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6021 if (op == 11) {
6022 gen_neon_negl(cpu_V0, size);
6023 }
6024 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6025 break;
6026 default:
6027 abort();
6028 }
6029 neon_store_reg64(cpu_V0, rd + pass);
6030 } else if (op == 4 || op == 6) {
6031
6032 tmp = tcg_temp_new_i32();
6033 if (!u) {
6034 switch (size) {
6035 case 0:
6036 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6037 break;
6038 case 1:
6039 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6040 break;
6041 case 2:
6042 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6043 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6044 break;
6045 default: abort();
6046 }
6047 } else {
6048 switch (size) {
6049 case 0:
6050 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6051 break;
6052 case 1:
6053 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6054 break;
6055 case 2:
6056 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6057 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6058 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
6059 break;
6060 default: abort();
6061 }
6062 }
6063 if (pass == 0) {
6064 tmp3 = tmp;
6065 } else {
6066 neon_store_reg(rd, 0, tmp3);
6067 neon_store_reg(rd, 1, tmp);
6068 }
6069 } else {
6070
6071 neon_store_reg64(cpu_V0, rd + pass);
6072 }
6073 }
6074 } else {
6075
6076
6077
6078
6079 if (size == 0) {
6080 return 1;
6081 }
6082 switch (op) {
6083 case 1:
6084 case 5:
6085 case 9:
6086 if (size == 1) {
6087 return 1;
6088 }
6089
6090 case 0:
6091 case 4:
6092 case 8:
6093 case 12:
6094 case 13:
6095 if (u && ((rd | rn) & 1)) {
6096 return 1;
6097 }
6098 tmp = neon_get_scalar(size, rm);
6099 neon_store_scratch(0, tmp);
6100 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6101 tmp = neon_load_scratch(0);
6102 tmp2 = neon_load_reg(rn, pass);
6103 if (op == 12) {
6104 if (size == 1) {
6105 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6106 } else {
6107 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6108 }
6109 } else if (op == 13) {
6110 if (size == 1) {
6111 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6112 } else {
6113 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6114 }
6115 } else if (op & 1) {
6116 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6117 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6118 tcg_temp_free_ptr(fpstatus);
6119 } else {
6120 switch (size) {
6121 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6122 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6123 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6124 default: abort();
6125 }
6126 }
6127 tcg_temp_free_i32(tmp2);
6128 if (op < 8) {
6129
6130 tmp2 = neon_load_reg(rd, pass);
6131 switch (op) {
6132 case 0:
6133 gen_neon_add(size, tmp, tmp2);
6134 break;
6135 case 1:
6136 {
6137 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6138 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6139 tcg_temp_free_ptr(fpstatus);
6140 break;
6141 }
6142 case 4:
6143 gen_neon_rsb(size, tmp, tmp2);
6144 break;
6145 case 5:
6146 {
6147 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6148 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6149 tcg_temp_free_ptr(fpstatus);
6150 break;
6151 }
6152 default:
6153 abort();
6154 }
6155 tcg_temp_free_i32(tmp2);
6156 }
6157 neon_store_reg(rd, pass, tmp);
6158 }
6159 break;
6160 case 3:
6161 case 7:
6162 case 11:
6163 if (u == 1) {
6164 return 1;
6165 }
6166
6167 case 2:
6168 case 6:
6169 case 10:
6170 if (rd & 1) {
6171 return 1;
6172 }
6173 tmp2 = neon_get_scalar(size, rm);
6174
6175
6176 tmp4 = tcg_temp_new_i32();
6177 tcg_gen_mov_i32(tmp4, tmp2);
6178 tmp3 = neon_load_reg(rn, 1);
6179
6180 for (pass = 0; pass < 2; pass++) {
6181 if (pass == 0) {
6182 tmp = neon_load_reg(rn, 0);
6183 } else {
6184 tmp = tmp3;
6185 tmp2 = tmp4;
6186 }
6187 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6188 if (op != 11) {
6189 neon_load_reg64(cpu_V1, rd + pass);
6190 }
6191 switch (op) {
6192 case 6:
6193 gen_neon_negl(cpu_V0, size);
6194
6195 case 2:
6196 gen_neon_addl(size);
6197 break;
6198 case 3: case 7:
6199 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6200 if (op == 7) {
6201 gen_neon_negl(cpu_V0, size);
6202 }
6203 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6204 break;
6205 case 10:
6206
6207 break;
6208 case 11:
6209 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6210 break;
6211 default:
6212 abort();
6213 }
6214 neon_store_reg64(cpu_V0, rd + pass);
6215 }
6216 break;
6217 case 14:
6218 case 15:
6219 {
6220 NeonGenThreeOpEnvFn *fn;
6221
6222 if (!dc_isar_feature(aa32_rdm, s)) {
6223 return 1;
6224 }
6225 if (u && ((rd | rn) & 1)) {
6226 return 1;
6227 }
6228 if (op == 14) {
6229 if (size == 1) {
6230 fn = gen_helper_neon_qrdmlah_s16;
6231 } else {
6232 fn = gen_helper_neon_qrdmlah_s32;
6233 }
6234 } else {
6235 if (size == 1) {
6236 fn = gen_helper_neon_qrdmlsh_s16;
6237 } else {
6238 fn = gen_helper_neon_qrdmlsh_s32;
6239 }
6240 }
6241
6242 tmp2 = neon_get_scalar(size, rm);
6243 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6244 tmp = neon_load_reg(rn, pass);
6245 tmp3 = neon_load_reg(rd, pass);
6246 fn(tmp, cpu_env, tmp, tmp2, tmp3);
6247 tcg_temp_free_i32(tmp3);
6248 neon_store_reg(rd, pass, tmp);
6249 }
6250 tcg_temp_free_i32(tmp2);
6251 }
6252 break;
6253 default:
6254 g_assert_not_reached();
6255 }
6256 }
6257 } else {
6258 if (!u) {
6259
6260 imm = (insn >> 8) & 0xf;
6261
6262 if (imm > 7 && !q)
6263 return 1;
6264
6265 if (q && ((rd | rn | rm) & 1)) {
6266 return 1;
6267 }
6268
6269 if (imm == 0) {
6270 neon_load_reg64(cpu_V0, rn);
6271 if (q) {
6272 neon_load_reg64(cpu_V1, rn + 1);
6273 }
6274 } else if (imm == 8) {
6275 neon_load_reg64(cpu_V0, rn + 1);
6276 if (q) {
6277 neon_load_reg64(cpu_V1, rm);
6278 }
6279 } else if (q) {
6280 tmp64 = tcg_temp_new_i64();
6281 if (imm < 8) {
6282 neon_load_reg64(cpu_V0, rn);
6283 neon_load_reg64(tmp64, rn + 1);
6284 } else {
6285 neon_load_reg64(cpu_V0, rn + 1);
6286 neon_load_reg64(tmp64, rm);
6287 }
6288 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6289 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6290 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6291 if (imm < 8) {
6292 neon_load_reg64(cpu_V1, rm);
6293 } else {
6294 neon_load_reg64(cpu_V1, rm + 1);
6295 imm -= 8;
6296 }
6297 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6298 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6299 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6300 tcg_temp_free_i64(tmp64);
6301 } else {
6302
6303 neon_load_reg64(cpu_V0, rn);
6304 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6305 neon_load_reg64(cpu_V1, rm);
6306 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6307 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6308 }
6309 neon_store_reg64(cpu_V0, rd);
6310 if (q) {
6311 neon_store_reg64(cpu_V1, rd + 1);
6312 }
6313 } else if ((insn & (1 << 11)) == 0) {
6314
6315 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6316 size = (insn >> 18) & 3;
6317
6318 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6319 return 1;
6320 }
6321 if (neon_2rm_is_v8_op(op) &&
6322 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6323 return 1;
6324 }
6325 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6326 q && ((rm | rd) & 1)) {
6327 return 1;
6328 }
6329 switch (op) {
6330 case NEON_2RM_VREV64:
6331 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6332 tmp = neon_load_reg(rm, pass * 2);
6333 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6334 switch (size) {
6335 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6336 case 1: gen_swap_half(tmp); break;
6337 case 2: break;
6338 default: abort();
6339 }
6340 neon_store_reg(rd, pass * 2 + 1, tmp);
6341 if (size == 2) {
6342 neon_store_reg(rd, pass * 2, tmp2);
6343 } else {
6344 switch (size) {
6345 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6346 case 1: gen_swap_half(tmp2); break;
6347 default: abort();
6348 }
6349 neon_store_reg(rd, pass * 2, tmp2);
6350 }
6351 }
6352 break;
6353 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6354 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6355 for (pass = 0; pass < q + 1; pass++) {
6356 tmp = neon_load_reg(rm, pass * 2);
6357 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6358 tmp = neon_load_reg(rm, pass * 2 + 1);
6359 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6360 switch (size) {
6361 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6362 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6363 case 2: tcg_gen_add_i64(CPU_V001); break;
6364 default: abort();
6365 }
6366 if (op >= NEON_2RM_VPADAL) {
6367
6368 neon_load_reg64(cpu_V1, rd + pass);
6369 gen_neon_addl(size);
6370 }
6371 neon_store_reg64(cpu_V0, rd + pass);
6372 }
6373 break;
6374 case NEON_2RM_VTRN:
6375 if (size == 2) {
6376 int n;
6377 for (n = 0; n < (q ? 4 : 2); n += 2) {
6378 tmp = neon_load_reg(rm, n);
6379 tmp2 = neon_load_reg(rd, n + 1);
6380 neon_store_reg(rm, n, tmp2);
6381 neon_store_reg(rd, n + 1, tmp);
6382 }
6383 } else {
6384 goto elementwise;
6385 }
6386 break;
6387 case NEON_2RM_VUZP:
6388 if (gen_neon_unzip(rd, rm, size, q)) {
6389 return 1;
6390 }
6391 break;
6392 case NEON_2RM_VZIP:
6393 if (gen_neon_zip(rd, rm, size, q)) {
6394 return 1;
6395 }
6396 break;
6397 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6398
6399 if (rm & 1) {
6400 return 1;
6401 }
6402 tmp2 = NULL;
6403 for (pass = 0; pass < 2; pass++) {
6404 neon_load_reg64(cpu_V0, rm + pass);
6405 tmp = tcg_temp_new_i32();
6406 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6407 tmp, cpu_V0);
6408 if (pass == 0) {
6409 tmp2 = tmp;
6410 } else {
6411 neon_store_reg(rd, 0, tmp2);
6412 neon_store_reg(rd, 1, tmp);
6413 }
6414 }
6415 break;
6416 case NEON_2RM_VSHLL:
6417 if (q || (rd & 1)) {
6418 return 1;
6419 }
6420 tmp = neon_load_reg(rm, 0);
6421 tmp2 = neon_load_reg(rm, 1);
6422 for (pass = 0; pass < 2; pass++) {
6423 if (pass == 1)
6424 tmp = tmp2;
6425 gen_neon_widen(cpu_V0, tmp, size, 1);
6426 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6427 neon_store_reg64(cpu_V0, rd + pass);
6428 }
6429 break;
6430 case NEON_2RM_VCVT_F16_F32:
6431 {
6432 TCGv_ptr fpst;
6433 TCGv_i32 ahp;
6434
6435 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6436 q || (rm & 1)) {
6437 return 1;
6438 }
6439 fpst = get_fpstatus_ptr(true);
6440 ahp = get_ahp_flag();
6441 tmp = neon_load_reg(rm, 0);
6442 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6443 tmp2 = neon_load_reg(rm, 1);
6444 gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
6445 tcg_gen_shli_i32(tmp2, tmp2, 16);
6446 tcg_gen_or_i32(tmp2, tmp2, tmp);
6447 tcg_temp_free_i32(tmp);
6448 tmp = neon_load_reg(rm, 2);
6449 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
6450 tmp3 = neon_load_reg(rm, 3);
6451 neon_store_reg(rd, 0, tmp2);
6452 gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
6453 tcg_gen_shli_i32(tmp3, tmp3, 16);
6454 tcg_gen_or_i32(tmp3, tmp3, tmp);
6455 neon_store_reg(rd, 1, tmp3);
6456 tcg_temp_free_i32(tmp);
6457 tcg_temp_free_i32(ahp);
6458 tcg_temp_free_ptr(fpst);
6459 break;
6460 }
6461 case NEON_2RM_VCVT_F32_F16:
6462 {
6463 TCGv_ptr fpst;
6464 TCGv_i32 ahp;
6465 if (!dc_isar_feature(aa32_fp16_spconv, s) ||
6466 q || (rd & 1)) {
6467 return 1;
6468 }
6469 fpst = get_fpstatus_ptr(true);
6470 ahp = get_ahp_flag();
6471 tmp3 = tcg_temp_new_i32();
6472 tmp = neon_load_reg(rm, 0);
6473 tmp2 = neon_load_reg(rm, 1);
6474 tcg_gen_ext16u_i32(tmp3, tmp);
6475 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6476 neon_store_reg(rd, 0, tmp3);
6477 tcg_gen_shri_i32(tmp, tmp, 16);
6478 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
6479 neon_store_reg(rd, 1, tmp);
6480 tmp3 = tcg_temp_new_i32();
6481 tcg_gen_ext16u_i32(tmp3, tmp2);
6482 gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
6483 neon_store_reg(rd, 2, tmp3);
6484 tcg_gen_shri_i32(tmp2, tmp2, 16);
6485 gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
6486 neon_store_reg(rd, 3, tmp2);
6487 tcg_temp_free_i32(ahp);
6488 tcg_temp_free_ptr(fpst);
6489 break;
6490 }
6491 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6492 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
6493 return 1;
6494 }
6495 ptr1 = vfp_reg_ptr(true, rd);
6496 ptr2 = vfp_reg_ptr(true, rm);
6497
6498
6499
6500
6501 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6502
6503 if (op == NEON_2RM_AESE) {
6504 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
6505 } else {
6506 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
6507 }
6508 tcg_temp_free_ptr(ptr1);
6509 tcg_temp_free_ptr(ptr2);
6510 tcg_temp_free_i32(tmp3);
6511 break;
6512 case NEON_2RM_SHA1H:
6513 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
6514 return 1;
6515 }
6516 ptr1 = vfp_reg_ptr(true, rd);
6517 ptr2 = vfp_reg_ptr(true, rm);
6518
6519 gen_helper_crypto_sha1h(ptr1, ptr2);
6520
6521 tcg_temp_free_ptr(ptr1);
6522 tcg_temp_free_ptr(ptr2);
6523 break;
6524 case NEON_2RM_SHA1SU1:
6525 if ((rm | rd) & 1) {
6526 return 1;
6527 }
6528
6529 if (q) {
6530 if (!dc_isar_feature(aa32_sha2, s)) {
6531 return 1;
6532 }
6533 } else if (!dc_isar_feature(aa32_sha1, s)) {
6534 return 1;
6535 }
6536 ptr1 = vfp_reg_ptr(true, rd);
6537 ptr2 = vfp_reg_ptr(true, rm);
6538 if (q) {
6539 gen_helper_crypto_sha256su0(ptr1, ptr2);
6540 } else {
6541 gen_helper_crypto_sha1su1(ptr1, ptr2);
6542 }
6543 tcg_temp_free_ptr(ptr1);
6544 tcg_temp_free_ptr(ptr2);
6545 break;
6546
6547 case NEON_2RM_VMVN:
6548 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
6549 break;
6550 case NEON_2RM_VNEG:
6551 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
6552 break;
6553 case NEON_2RM_VABS:
6554 tcg_gen_gvec_abs(size, rd_ofs, rm_ofs, vec_size, vec_size);
6555 break;
6556
6557 default:
6558 elementwise:
6559 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6560 tmp = neon_load_reg(rm, pass);
6561 switch (op) {
6562 case NEON_2RM_VREV32:
6563 switch (size) {
6564 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6565 case 1: gen_swap_half(tmp); break;
6566 default: abort();
6567 }
6568 break;
6569 case NEON_2RM_VREV16:
6570 gen_rev16(tmp);
6571 break;
6572 case NEON_2RM_VCLS:
6573 switch (size) {
6574 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6575 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6576 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6577 default: abort();
6578 }
6579 break;
6580 case NEON_2RM_VCLZ:
6581 switch (size) {
6582 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6583 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6584 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
6585 default: abort();
6586 }
6587 break;
6588 case NEON_2RM_VCNT:
6589 gen_helper_neon_cnt_u8(tmp, tmp);
6590 break;
6591 case NEON_2RM_VQABS:
6592 switch (size) {
6593 case 0:
6594 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6595 break;
6596 case 1:
6597 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6598 break;
6599 case 2:
6600 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6601 break;
6602 default: abort();
6603 }
6604 break;
6605 case NEON_2RM_VQNEG:
6606 switch (size) {
6607 case 0:
6608 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6609 break;
6610 case 1:
6611 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6612 break;
6613 case 2:
6614 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6615 break;
6616 default: abort();
6617 }
6618 break;
6619 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6620 tmp2 = tcg_const_i32(0);
6621 switch(size) {
6622 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6623 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6624 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6625 default: abort();
6626 }
6627 tcg_temp_free_i32(tmp2);
6628 if (op == NEON_2RM_VCLE0) {
6629 tcg_gen_not_i32(tmp, tmp);
6630 }
6631 break;
6632 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6633 tmp2 = tcg_const_i32(0);
6634 switch(size) {
6635 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6636 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6637 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6638 default: abort();
6639 }
6640 tcg_temp_free_i32(tmp2);
6641 if (op == NEON_2RM_VCLT0) {
6642 tcg_gen_not_i32(tmp, tmp);
6643 }
6644 break;
6645 case NEON_2RM_VCEQ0:
6646 tmp2 = tcg_const_i32(0);
6647 switch(size) {
6648 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6649 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6650 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6651 default: abort();
6652 }
6653 tcg_temp_free_i32(tmp2);
6654 break;
6655 case NEON_2RM_VCGT0_F:
6656 {
6657 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6658 tmp2 = tcg_const_i32(0);
6659 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6660 tcg_temp_free_i32(tmp2);
6661 tcg_temp_free_ptr(fpstatus);
6662 break;
6663 }
6664 case NEON_2RM_VCGE0_F:
6665 {
6666 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6667 tmp2 = tcg_const_i32(0);
6668 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6669 tcg_temp_free_i32(tmp2);
6670 tcg_temp_free_ptr(fpstatus);
6671 break;
6672 }
6673 case NEON_2RM_VCEQ0_F:
6674 {
6675 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6676 tmp2 = tcg_const_i32(0);
6677 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6678 tcg_temp_free_i32(tmp2);
6679 tcg_temp_free_ptr(fpstatus);
6680 break;
6681 }
6682 case NEON_2RM_VCLE0_F:
6683 {
6684 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6685 tmp2 = tcg_const_i32(0);
6686 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6687 tcg_temp_free_i32(tmp2);
6688 tcg_temp_free_ptr(fpstatus);
6689 break;
6690 }
6691 case NEON_2RM_VCLT0_F:
6692 {
6693 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6694 tmp2 = tcg_const_i32(0);
6695 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6696 tcg_temp_free_i32(tmp2);
6697 tcg_temp_free_ptr(fpstatus);
6698 break;
6699 }
6700 case NEON_2RM_VABS_F:
6701 gen_helper_vfp_abss(tmp, tmp);
6702 break;
6703 case NEON_2RM_VNEG_F:
6704 gen_helper_vfp_negs(tmp, tmp);
6705 break;
6706 case NEON_2RM_VSWP:
6707 tmp2 = neon_load_reg(rd, pass);
6708 neon_store_reg(rm, pass, tmp2);
6709 break;
6710 case NEON_2RM_VTRN:
6711 tmp2 = neon_load_reg(rd, pass);
6712 switch (size) {
6713 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6714 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6715 default: abort();
6716 }
6717 neon_store_reg(rm, pass, tmp2);
6718 break;
6719 case NEON_2RM_VRINTN:
6720 case NEON_2RM_VRINTA:
6721 case NEON_2RM_VRINTM:
6722 case NEON_2RM_VRINTP:
6723 case NEON_2RM_VRINTZ:
6724 {
6725 TCGv_i32 tcg_rmode;
6726 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6727 int rmode;
6728
6729 if (op == NEON_2RM_VRINTZ) {
6730 rmode = FPROUNDING_ZERO;
6731 } else {
6732 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6733 }
6734
6735 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6736 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6737 cpu_env);
6738 gen_helper_rints(tmp, tmp, fpstatus);
6739 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6740 cpu_env);
6741 tcg_temp_free_ptr(fpstatus);
6742 tcg_temp_free_i32(tcg_rmode);
6743 break;
6744 }
6745 case NEON_2RM_VRINTX:
6746 {
6747 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6748 gen_helper_rints_exact(tmp, tmp, fpstatus);
6749 tcg_temp_free_ptr(fpstatus);
6750 break;
6751 }
6752 case NEON_2RM_VCVTAU:
6753 case NEON_2RM_VCVTAS:
6754 case NEON_2RM_VCVTNU:
6755 case NEON_2RM_VCVTNS:
6756 case NEON_2RM_VCVTPU:
6757 case NEON_2RM_VCVTPS:
6758 case NEON_2RM_VCVTMU:
6759 case NEON_2RM_VCVTMS:
6760 {
6761 bool is_signed = !extract32(insn, 7, 1);
6762 TCGv_ptr fpst = get_fpstatus_ptr(1);
6763 TCGv_i32 tcg_rmode, tcg_shift;
6764 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6765
6766 tcg_shift = tcg_const_i32(0);
6767 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6768 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6769 cpu_env);
6770
6771 if (is_signed) {
6772 gen_helper_vfp_tosls(tmp, tmp,
6773 tcg_shift, fpst);
6774 } else {
6775 gen_helper_vfp_touls(tmp, tmp,
6776 tcg_shift, fpst);
6777 }
6778
6779 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6780 cpu_env);
6781 tcg_temp_free_i32(tcg_rmode);
6782 tcg_temp_free_i32(tcg_shift);
6783 tcg_temp_free_ptr(fpst);
6784 break;
6785 }
6786 case NEON_2RM_VRECPE:
6787 {
6788 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6789 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6790 tcg_temp_free_ptr(fpstatus);
6791 break;
6792 }
6793 case NEON_2RM_VRSQRTE:
6794 {
6795 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6796 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6797 tcg_temp_free_ptr(fpstatus);
6798 break;
6799 }
6800 case NEON_2RM_VRECPE_F:
6801 {
6802 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6803 gen_helper_recpe_f32(tmp, tmp, fpstatus);
6804 tcg_temp_free_ptr(fpstatus);
6805 break;
6806 }
6807 case NEON_2RM_VRSQRTE_F:
6808 {
6809 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6810 gen_helper_rsqrte_f32(tmp, tmp, fpstatus);
6811 tcg_temp_free_ptr(fpstatus);
6812 break;
6813 }
6814 case NEON_2RM_VCVT_FS:
6815 {
6816 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6817 gen_helper_vfp_sitos(tmp, tmp, fpstatus);
6818 tcg_temp_free_ptr(fpstatus);
6819 break;
6820 }
6821 case NEON_2RM_VCVT_FU:
6822 {
6823 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6824 gen_helper_vfp_uitos(tmp, tmp, fpstatus);
6825 tcg_temp_free_ptr(fpstatus);
6826 break;
6827 }
6828 case NEON_2RM_VCVT_SF:
6829 {
6830 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6831 gen_helper_vfp_tosizs(tmp, tmp, fpstatus);
6832 tcg_temp_free_ptr(fpstatus);
6833 break;
6834 }
6835 case NEON_2RM_VCVT_UF:
6836 {
6837 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6838 gen_helper_vfp_touizs(tmp, tmp, fpstatus);
6839 tcg_temp_free_ptr(fpstatus);
6840 break;
6841 }
6842 default:
6843
6844
6845
6846 abort();
6847 }
6848 neon_store_reg(rd, pass, tmp);
6849 }
6850 break;
6851 }
6852 } else if ((insn & (1 << 10)) == 0) {
6853
6854 int n = ((insn >> 8) & 3) + 1;
6855 if ((rn + n) > 32) {
6856
6857
6858
6859 return 1;
6860 }
6861 n <<= 3;
6862 if (insn & (1 << 6)) {
6863 tmp = neon_load_reg(rd, 0);
6864 } else {
6865 tmp = tcg_temp_new_i32();
6866 tcg_gen_movi_i32(tmp, 0);
6867 }
6868 tmp2 = neon_load_reg(rm, 0);
6869 ptr1 = vfp_reg_ptr(true, rn);
6870 tmp5 = tcg_const_i32(n);
6871 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
6872 tcg_temp_free_i32(tmp);
6873 if (insn & (1 << 6)) {
6874 tmp = neon_load_reg(rd, 1);
6875 } else {
6876 tmp = tcg_temp_new_i32();
6877 tcg_gen_movi_i32(tmp, 0);
6878 }
6879 tmp3 = neon_load_reg(rm, 1);
6880 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
6881 tcg_temp_free_i32(tmp5);
6882 tcg_temp_free_ptr(ptr1);
6883 neon_store_reg(rd, 0, tmp2);
6884 neon_store_reg(rd, 1, tmp3);
6885 tcg_temp_free_i32(tmp);
6886 } else if ((insn & 0x380) == 0) {
6887
6888 int element;
6889 TCGMemOp size;
6890
6891 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6892 return 1;
6893 }
6894 if (insn & (1 << 16)) {
6895 size = MO_8;
6896 element = (insn >> 17) & 7;
6897 } else if (insn & (1 << 17)) {
6898 size = MO_16;
6899 element = (insn >> 18) & 3;
6900 } else {
6901 size = MO_32;
6902 element = (insn >> 19) & 1;
6903 }
6904 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
6905 neon_element_offset(rm, element, size),
6906 q ? 16 : 8, q ? 16 : 8);
6907 } else {
6908 return 1;
6909 }
6910 }
6911 }
6912 return 0;
6913}
6914
6915
6916
6917
6918
6919
6920
6921static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
6922{
6923 gen_helper_gvec_3 *fn_gvec = NULL;
6924 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
6925 int rd, rn, rm, opr_sz;
6926 int data = 0;
6927 int off_rn, off_rm;
6928 bool is_long = false, q = extract32(insn, 6, 1);
6929 bool ptr_is_env = false;
6930
6931 if ((insn & 0xfe200f10) == 0xfc200800) {
6932
6933 int size = extract32(insn, 20, 1);
6934 data = extract32(insn, 23, 2);
6935 if (!dc_isar_feature(aa32_vcma, s)
6936 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6937 return 1;
6938 }
6939 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
6940 } else if ((insn & 0xfea00f10) == 0xfc800800) {
6941
6942 int size = extract32(insn, 20, 1);
6943 data = extract32(insn, 24, 1);
6944 if (!dc_isar_feature(aa32_vcma, s)
6945 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
6946 return 1;
6947 }
6948 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
6949 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
6950
6951 bool u = extract32(insn, 4, 1);
6952 if (!dc_isar_feature(aa32_dp, s)) {
6953 return 1;
6954 }
6955 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
6956 } else if ((insn & 0xff300f10) == 0xfc200810) {
6957
6958 int is_s = extract32(insn, 23, 1);
6959 if (!dc_isar_feature(aa32_fhm, s)) {
6960 return 1;
6961 }
6962 is_long = true;
6963 data = is_s;
6964 fn_gvec_ptr = gen_helper_gvec_fmlal_a32;
6965 ptr_is_env = true;
6966 } else {
6967 return 1;
6968 }
6969
6970 VFP_DREG_D(rd, insn);
6971 if (rd & q) {
6972 return 1;
6973 }
6974 if (q || !is_long) {
6975 VFP_DREG_N(rn, insn);
6976 VFP_DREG_M(rm, insn);
6977 if ((rn | rm) & q & !is_long) {
6978 return 1;
6979 }
6980 off_rn = vfp_reg_offset(1, rn);
6981 off_rm = vfp_reg_offset(1, rm);
6982 } else {
6983 rn = VFP_SREG_N(insn);
6984 rm = VFP_SREG_M(insn);
6985 off_rn = vfp_reg_offset(0, rn);
6986 off_rm = vfp_reg_offset(0, rm);
6987 }
6988
6989 if (s->fp_excp_el) {
6990 gen_exception_insn(s, 4, EXCP_UDEF,
6991 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
6992 return 0;
6993 }
6994 if (!s->vfp_enabled) {
6995 return 1;
6996 }
6997
6998 opr_sz = (1 + q) * 8;
6999 if (fn_gvec_ptr) {
7000 TCGv_ptr ptr;
7001 if (ptr_is_env) {
7002 ptr = cpu_env;
7003 } else {
7004 ptr = get_fpstatus_ptr(1);
7005 }
7006 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
7007 opr_sz, opr_sz, data, fn_gvec_ptr);
7008 if (!ptr_is_env) {
7009 tcg_temp_free_ptr(ptr);
7010 }
7011 } else {
7012 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
7013 opr_sz, opr_sz, data, fn_gvec);
7014 }
7015 return 0;
7016}
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
7027{
7028 gen_helper_gvec_3 *fn_gvec = NULL;
7029 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
7030 int rd, rn, rm, opr_sz, data;
7031 int off_rn, off_rm;
7032 bool is_long = false, q = extract32(insn, 6, 1);
7033 bool ptr_is_env = false;
7034
7035 if ((insn & 0xff000f10) == 0xfe000800) {
7036
7037 int rot = extract32(insn, 20, 2);
7038 int size = extract32(insn, 23, 1);
7039 int index;
7040
7041 if (!dc_isar_feature(aa32_vcma, s)) {
7042 return 1;
7043 }
7044 if (size == 0) {
7045 if (!dc_isar_feature(aa32_fp16_arith, s)) {
7046 return 1;
7047 }
7048
7049 rm = extract32(insn, 0, 4);
7050 index = extract32(insn, 5, 1);
7051 } else {
7052
7053 VFP_DREG_M(rm, insn);
7054 index = 0;
7055 }
7056 data = (index << 2) | rot;
7057 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
7058 : gen_helper_gvec_fcmlah_idx);
7059 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
7060
7061 int u = extract32(insn, 4, 1);
7062
7063 if (!dc_isar_feature(aa32_dp, s)) {
7064 return 1;
7065 }
7066 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
7067
7068 data = extract32(insn, 5, 1);
7069 rm = extract32(insn, 0, 4);
7070 } else if ((insn & 0xffa00f10) == 0xfe000810) {
7071
7072 int is_s = extract32(insn, 20, 1);
7073 int vm20 = extract32(insn, 0, 3);
7074 int vm3 = extract32(insn, 3, 1);
7075 int m = extract32(insn, 5, 1);
7076 int index;
7077
7078 if (!dc_isar_feature(aa32_fhm, s)) {
7079 return 1;
7080 }
7081 if (q) {
7082 rm = vm20;
7083 index = m * 2 + vm3;
7084 } else {
7085 rm = vm20 * 2 + m;
7086 index = vm3;
7087 }
7088 is_long = true;
7089 data = (index << 2) | is_s;
7090 fn_gvec_ptr = gen_helper_gvec_fmlal_idx_a32;
7091 ptr_is_env = true;
7092 } else {
7093 return 1;
7094 }
7095
7096 VFP_DREG_D(rd, insn);
7097 if (rd & q) {
7098 return 1;
7099 }
7100 if (q || !is_long) {
7101 VFP_DREG_N(rn, insn);
7102 if (rn & q & !is_long) {
7103 return 1;
7104 }
7105 off_rn = vfp_reg_offset(1, rn);
7106 off_rm = vfp_reg_offset(1, rm);
7107 } else {
7108 rn = VFP_SREG_N(insn);
7109 off_rn = vfp_reg_offset(0, rn);
7110 off_rm = vfp_reg_offset(0, rm);
7111 }
7112 if (s->fp_excp_el) {
7113 gen_exception_insn(s, 4, EXCP_UDEF,
7114 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
7115 return 0;
7116 }
7117 if (!s->vfp_enabled) {
7118 return 1;
7119 }
7120
7121 opr_sz = (1 + q) * 8;
7122 if (fn_gvec_ptr) {
7123 TCGv_ptr ptr;
7124 if (ptr_is_env) {
7125 ptr = cpu_env;
7126 } else {
7127 ptr = get_fpstatus_ptr(1);
7128 }
7129 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), off_rn, off_rm, ptr,
7130 opr_sz, opr_sz, data, fn_gvec_ptr);
7131 if (!ptr_is_env) {
7132 tcg_temp_free_ptr(ptr);
7133 }
7134 } else {
7135 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd), off_rn, off_rm,
7136 opr_sz, opr_sz, data, fn_gvec);
7137 }
7138 return 0;
7139}
7140
7141static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7142{
7143 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7144 const ARMCPRegInfo *ri;
7145
7146 cpnum = (insn >> 8) & 0xf;
7147
7148
7149 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7150 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7151 return 1;
7152 }
7153 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7154 return disas_iwmmxt_insn(s, insn);
7155 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7156 return disas_dsp_insn(s, insn);
7157 }
7158 return 1;
7159 }
7160
7161
7162 is64 = (insn & (1 << 25)) == 0;
7163 if (!is64 && ((insn & (1 << 4)) == 0)) {
7164
7165 return 1;
7166 }
7167
7168 crm = insn & 0xf;
7169 if (is64) {
7170 crn = 0;
7171 opc1 = (insn >> 4) & 0xf;
7172 opc2 = 0;
7173 rt2 = (insn >> 16) & 0xf;
7174 } else {
7175 crn = (insn >> 16) & 0xf;
7176 opc1 = (insn >> 21) & 7;
7177 opc2 = (insn >> 5) & 7;
7178 rt2 = 0;
7179 }
7180 isread = (insn >> 20) & 1;
7181 rt = (insn >> 12) & 0xf;
7182
7183 ri = get_arm_cp_reginfo(s->cp_regs,
7184 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7185 if (ri) {
7186
7187 if (!cp_access_ok(s->current_el, ri, isread)) {
7188 return 1;
7189 }
7190
7191 if (ri->accessfn ||
7192 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7193
7194
7195
7196
7197
7198 TCGv_ptr tmpptr;
7199 TCGv_i32 tcg_syn, tcg_isread;
7200 uint32_t syndrome;
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210 switch (cpnum) {
7211 case 14:
7212 if (is64) {
7213 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7214 isread, false);
7215 } else {
7216 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7217 rt, isread, false);
7218 }
7219 break;
7220 case 15:
7221 if (is64) {
7222 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7223 isread, false);
7224 } else {
7225 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7226 rt, isread, false);
7227 }
7228 break;
7229 default:
7230
7231
7232
7233
7234
7235 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7236 syndrome = syn_uncategorized();
7237 break;
7238 }
7239
7240 gen_set_condexec(s);
7241 gen_set_pc_im(s, s->pc - 4);
7242 tmpptr = tcg_const_ptr(ri);
7243 tcg_syn = tcg_const_i32(syndrome);
7244 tcg_isread = tcg_const_i32(isread);
7245 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7246 tcg_isread);
7247 tcg_temp_free_ptr(tmpptr);
7248 tcg_temp_free_i32(tcg_syn);
7249 tcg_temp_free_i32(tcg_isread);
7250 }
7251
7252
7253 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7254 case ARM_CP_NOP:
7255 return 0;
7256 case ARM_CP_WFI:
7257 if (isread) {
7258 return 1;
7259 }
7260 gen_set_pc_im(s, s->pc);
7261 s->base.is_jmp = DISAS_WFI;
7262 return 0;
7263 default:
7264 break;
7265 }
7266
7267 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7268 gen_io_start();
7269 }
7270
7271 if (isread) {
7272
7273 if (is64) {
7274 TCGv_i64 tmp64;
7275 TCGv_i32 tmp;
7276 if (ri->type & ARM_CP_CONST) {
7277 tmp64 = tcg_const_i64(ri->resetvalue);
7278 } else if (ri->readfn) {
7279 TCGv_ptr tmpptr;
7280 tmp64 = tcg_temp_new_i64();
7281 tmpptr = tcg_const_ptr(ri);
7282 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7283 tcg_temp_free_ptr(tmpptr);
7284 } else {
7285 tmp64 = tcg_temp_new_i64();
7286 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7287 }
7288 tmp = tcg_temp_new_i32();
7289 tcg_gen_extrl_i64_i32(tmp, tmp64);
7290 store_reg(s, rt, tmp);
7291 tcg_gen_shri_i64(tmp64, tmp64, 32);
7292 tmp = tcg_temp_new_i32();
7293 tcg_gen_extrl_i64_i32(tmp, tmp64);
7294 tcg_temp_free_i64(tmp64);
7295 store_reg(s, rt2, tmp);
7296 } else {
7297 TCGv_i32 tmp;
7298 if (ri->type & ARM_CP_CONST) {
7299 tmp = tcg_const_i32(ri->resetvalue);
7300 } else if (ri->readfn) {
7301 TCGv_ptr tmpptr;
7302 tmp = tcg_temp_new_i32();
7303 tmpptr = tcg_const_ptr(ri);
7304 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7305 tcg_temp_free_ptr(tmpptr);
7306 } else {
7307 tmp = load_cpu_offset(ri->fieldoffset);
7308 }
7309 if (rt == 15) {
7310
7311
7312
7313 gen_set_nzcv(tmp);
7314 tcg_temp_free_i32(tmp);
7315 } else {
7316 store_reg(s, rt, tmp);
7317 }
7318 }
7319 } else {
7320
7321 if (ri->type & ARM_CP_CONST) {
7322
7323 return 0;
7324 }
7325
7326 if (is64) {
7327 TCGv_i32 tmplo, tmphi;
7328 TCGv_i64 tmp64 = tcg_temp_new_i64();
7329 tmplo = load_reg(s, rt);
7330 tmphi = load_reg(s, rt2);
7331 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7332 tcg_temp_free_i32(tmplo);
7333 tcg_temp_free_i32(tmphi);
7334 if (ri->writefn) {
7335 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7336 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7337 tcg_temp_free_ptr(tmpptr);
7338 } else {
7339 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7340 }
7341 tcg_temp_free_i64(tmp64);
7342 } else {
7343 if (ri->writefn) {
7344 TCGv_i32 tmp;
7345 TCGv_ptr tmpptr;
7346 tmp = load_reg(s, rt);
7347 tmpptr = tcg_const_ptr(ri);
7348 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7349 tcg_temp_free_ptr(tmpptr);
7350 tcg_temp_free_i32(tmp);
7351 } else {
7352 TCGv_i32 tmp = load_reg(s, rt);
7353 store_cpu_offset(tmp, ri->fieldoffset);
7354 }
7355 }
7356 }
7357
7358 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7359
7360 gen_io_end();
7361 gen_lookup_tb(s);
7362 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7363
7364
7365
7366
7367 gen_lookup_tb(s);
7368 }
7369
7370 return 0;
7371 }
7372
7373
7374
7375
7376 if (is64) {
7377 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7378 "64 bit system register cp:%d opc1: %d crm:%d "
7379 "(%s)\n",
7380 isread ? "read" : "write", cpnum, opc1, crm,
7381 s->ns ? "non-secure" : "secure");
7382 } else {
7383 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7384 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7385 "(%s)\n",
7386 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7387 s->ns ? "non-secure" : "secure");
7388 }
7389
7390 return 1;
7391}
7392
7393
7394
7395static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7396{
7397 TCGv_i32 tmp;
7398 tmp = tcg_temp_new_i32();
7399 tcg_gen_extrl_i64_i32(tmp, val);
7400 store_reg(s, rlow, tmp);
7401 tmp = tcg_temp_new_i32();
7402 tcg_gen_shri_i64(val, val, 32);
7403 tcg_gen_extrl_i64_i32(tmp, val);
7404 store_reg(s, rhigh, tmp);
7405}
7406
7407
7408static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7409{
7410 TCGv_i64 tmp;
7411 TCGv_i32 tmp2;
7412
7413
7414 tmp = tcg_temp_new_i64();
7415 tmp2 = load_reg(s, rlow);
7416 tcg_gen_extu_i32_i64(tmp, tmp2);
7417 tcg_temp_free_i32(tmp2);
7418 tcg_gen_add_i64(val, val, tmp);
7419 tcg_temp_free_i64(tmp);
7420}
7421
7422
7423static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7424{
7425 TCGv_i64 tmp;
7426 TCGv_i32 tmpl;
7427 TCGv_i32 tmph;
7428
7429
7430 tmpl = load_reg(s, rlow);
7431 tmph = load_reg(s, rhigh);
7432 tmp = tcg_temp_new_i64();
7433 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7434 tcg_temp_free_i32(tmpl);
7435 tcg_temp_free_i32(tmph);
7436 tcg_gen_add_i64(val, val, tmp);
7437 tcg_temp_free_i64(tmp);
7438}
7439
7440
7441static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7442{
7443 tcg_gen_mov_i32(cpu_NF, hi);
7444 tcg_gen_or_i32(cpu_ZF, lo, hi);
7445}
7446
7447
7448
7449
7450
7451
7452
7453static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7454 TCGv_i32 addr, int size)
7455{
7456 TCGv_i32 tmp = tcg_temp_new_i32();
7457 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7458
7459 s->is_ldex = true;
7460
7461 if (size == 3) {
7462 TCGv_i32 tmp2 = tcg_temp_new_i32();
7463 TCGv_i64 t64 = tcg_temp_new_i64();
7464
7465
7466
7467
7468
7469
7470
7471
7472
7473
7474
7475
7476 TCGv taddr = gen_aa32_addr(s, addr, opc);
7477
7478 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7479 tcg_temp_free(taddr);
7480 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7481 if (s->be_data == MO_BE) {
7482 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7483 } else {
7484 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7485 }
7486 tcg_temp_free_i64(t64);
7487
7488 store_reg(s, rt2, tmp2);
7489 } else {
7490 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
7491 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7492 }
7493
7494 store_reg(s, rt, tmp);
7495 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7496}
7497
7498static void gen_clrex(DisasContext *s)
7499{
7500 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7501}
7502
7503static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7504 TCGv_i32 addr, int size)
7505{
7506 TCGv_i32 t0, t1, t2;
7507 TCGv_i64 extaddr;
7508 TCGv taddr;
7509 TCGLabel *done_label;
7510 TCGLabel *fail_label;
7511 TCGMemOp opc = size | MO_ALIGN | s->be_data;
7512
7513
7514
7515
7516
7517
7518
7519 fail_label = gen_new_label();
7520 done_label = gen_new_label();
7521 extaddr = tcg_temp_new_i64();
7522 tcg_gen_extu_i32_i64(extaddr, addr);
7523 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7524 tcg_temp_free_i64(extaddr);
7525
7526 taddr = gen_aa32_addr(s, addr, opc);
7527 t0 = tcg_temp_new_i32();
7528 t1 = load_reg(s, rt);
7529 if (size == 3) {
7530 TCGv_i64 o64 = tcg_temp_new_i64();
7531 TCGv_i64 n64 = tcg_temp_new_i64();
7532
7533 t2 = load_reg(s, rt2);
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543 if (s->be_data == MO_BE) {
7544 tcg_gen_concat_i32_i64(n64, t2, t1);
7545 } else {
7546 tcg_gen_concat_i32_i64(n64, t1, t2);
7547 }
7548 tcg_temp_free_i32(t2);
7549
7550 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7551 get_mem_index(s), opc);
7552 tcg_temp_free_i64(n64);
7553
7554 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7555 tcg_gen_extrl_i64_i32(t0, o64);
7556
7557 tcg_temp_free_i64(o64);
7558 } else {
7559 t2 = tcg_temp_new_i32();
7560 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7561 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7562 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7563 tcg_temp_free_i32(t2);
7564 }
7565 tcg_temp_free_i32(t1);
7566 tcg_temp_free(taddr);
7567 tcg_gen_mov_i32(cpu_R[rd], t0);
7568 tcg_temp_free_i32(t0);
7569 tcg_gen_br(done_label);
7570
7571 gen_set_label(fail_label);
7572 tcg_gen_movi_i32(cpu_R[rd], 1);
7573 gen_set_label(done_label);
7574 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7575}
7576
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586static void gen_srs(DisasContext *s,
7587 uint32_t mode, uint32_t amode, bool writeback)
7588{
7589 int32_t offset;
7590 TCGv_i32 addr, tmp;
7591 bool undef = false;
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
7606 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7607 return;
7608 }
7609
7610 if (s->current_el == 0 || s->current_el == 2) {
7611 undef = true;
7612 }
7613
7614 switch (mode) {
7615 case ARM_CPU_MODE_USR:
7616 case ARM_CPU_MODE_FIQ:
7617 case ARM_CPU_MODE_IRQ:
7618 case ARM_CPU_MODE_SVC:
7619 case ARM_CPU_MODE_ABT:
7620 case ARM_CPU_MODE_UND:
7621 case ARM_CPU_MODE_SYS:
7622 break;
7623 case ARM_CPU_MODE_HYP:
7624 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7625 undef = true;
7626 }
7627 break;
7628 case ARM_CPU_MODE_MON:
7629
7630
7631
7632
7633 if (s->current_el != 3) {
7634 undef = true;
7635 }
7636 break;
7637 default:
7638 undef = true;
7639 }
7640
7641 if (undef) {
7642 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7643 default_exception_el(s));
7644 return;
7645 }
7646
7647 addr = tcg_temp_new_i32();
7648 tmp = tcg_const_i32(mode);
7649
7650 gen_set_condexec(s);
7651 gen_set_pc_im(s, s->pc - 4);
7652 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7653 tcg_temp_free_i32(tmp);
7654 switch (amode) {
7655 case 0:
7656 offset = -4;
7657 break;
7658 case 1:
7659 offset = 0;
7660 break;
7661 case 2:
7662 offset = -8;
7663 break;
7664 case 3:
7665 offset = 4;
7666 break;
7667 default:
7668 abort();
7669 }
7670 tcg_gen_addi_i32(addr, addr, offset);
7671 tmp = load_reg(s, 14);
7672 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7673 tcg_temp_free_i32(tmp);
7674 tmp = load_cpu_field(spsr);
7675 tcg_gen_addi_i32(addr, addr, 4);
7676 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
7677 tcg_temp_free_i32(tmp);
7678 if (writeback) {
7679 switch (amode) {
7680 case 0:
7681 offset = -8;
7682 break;
7683 case 1:
7684 offset = 4;
7685 break;
7686 case 2:
7687 offset = -4;
7688 break;
7689 case 3:
7690 offset = 0;
7691 break;
7692 default:
7693 abort();
7694 }
7695 tcg_gen_addi_i32(addr, addr, offset);
7696 tmp = tcg_const_i32(mode);
7697 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7698 tcg_temp_free_i32(tmp);
7699 }
7700 tcg_temp_free_i32(addr);
7701 s->base.is_jmp = DISAS_UPDATE;
7702}
7703
7704
7705static void arm_gen_condlabel(DisasContext *s)
7706{
7707 if (!s->condjmp) {
7708 s->condlabel = gen_new_label();
7709 s->condjmp = 1;
7710 }
7711}
7712
7713
7714static void arm_skip_unless(DisasContext *s, uint32_t cond)
7715{
7716 arm_gen_condlabel(s);
7717 arm_gen_test_cc(cond ^ 1, s->condlabel);
7718}
7719
7720static void disas_arm_insn(DisasContext *s, unsigned int insn)
7721{
7722 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7723 TCGv_i32 tmp;
7724 TCGv_i32 tmp2;
7725 TCGv_i32 tmp3;
7726 TCGv_i32 addr;
7727 TCGv_i64 tmp64;
7728
7729
7730
7731
7732 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7733 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
7734 default_exception_el(s));
7735 return;
7736 }
7737 cond = insn >> 28;
7738 if (cond == 0xf){
7739
7740
7741
7742
7743 ARCH(5);
7744
7745
7746 if (((insn >> 25) & 7) == 1) {
7747
7748 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7749 goto illegal_op;
7750 }
7751
7752 if (disas_neon_data_insn(s, insn)) {
7753 goto illegal_op;
7754 }
7755 return;
7756 }
7757 if ((insn & 0x0f100000) == 0x04000000) {
7758
7759 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7760 goto illegal_op;
7761 }
7762
7763 if (disas_neon_ls_insn(s, insn)) {
7764 goto illegal_op;
7765 }
7766 return;
7767 }
7768 if ((insn & 0x0f000e10) == 0x0e000a00) {
7769
7770 if (disas_vfp_insn(s, insn)) {
7771 goto illegal_op;
7772 }
7773 return;
7774 }
7775 if (((insn & 0x0f30f000) == 0x0510f000) ||
7776 ((insn & 0x0f30f010) == 0x0710f000)) {
7777 if ((insn & (1 << 22)) == 0) {
7778
7779 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7780 goto illegal_op;
7781 }
7782 }
7783
7784 ARCH(5TE);
7785 return;
7786 }
7787 if (((insn & 0x0f70f000) == 0x0450f000) ||
7788 ((insn & 0x0f70f010) == 0x0650f000)) {
7789 ARCH(7);
7790 return;
7791 }
7792 if (((insn & 0x0f700000) == 0x04100000) ||
7793 ((insn & 0x0f700010) == 0x06100000)) {
7794 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7795 goto illegal_op;
7796 }
7797 return;
7798 }
7799
7800 if ((insn & 0x0ffffdff) == 0x01010000) {
7801 ARCH(6);
7802
7803 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
7804 gen_helper_setend(cpu_env);
7805 s->base.is_jmp = DISAS_UPDATE;
7806 }
7807 return;
7808 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7809 switch ((insn >> 4) & 0xf) {
7810 case 1:
7811 ARCH(6K);
7812 gen_clrex(s);
7813 return;
7814 case 4:
7815 case 5:
7816 ARCH(7);
7817 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7818 return;
7819 case 6:
7820
7821
7822
7823
7824 gen_goto_tb(s, 0, s->pc & ~1);
7825 return;
7826 case 7:
7827 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
7828 goto illegal_op;
7829 }
7830
7831
7832
7833
7834 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
7835 gen_goto_tb(s, 0, s->pc & ~1);
7836 return;
7837 default:
7838 goto illegal_op;
7839 }
7840 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7841
7842 ARCH(6);
7843 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7844 return;
7845 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7846
7847 int32_t offset;
7848 if (IS_USER(s))
7849 goto illegal_op;
7850 ARCH(6);
7851 rn = (insn >> 16) & 0xf;
7852 addr = load_reg(s, rn);
7853 i = (insn >> 23) & 3;
7854 switch (i) {
7855 case 0: offset = -4; break;
7856 case 1: offset = 0; break;
7857 case 2: offset = -8; break;
7858 case 3: offset = 4; break;
7859 default: abort();
7860 }
7861 if (offset)
7862 tcg_gen_addi_i32(addr, addr, offset);
7863
7864 tmp = tcg_temp_new_i32();
7865 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7866 tcg_gen_addi_i32(addr, addr, 4);
7867 tmp2 = tcg_temp_new_i32();
7868 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
7869 if (insn & (1 << 21)) {
7870
7871 switch (i) {
7872 case 0: offset = -8; break;
7873 case 1: offset = 4; break;
7874 case 2: offset = -4; break;
7875 case 3: offset = 0; break;
7876 default: abort();
7877 }
7878 if (offset)
7879 tcg_gen_addi_i32(addr, addr, offset);
7880 store_reg(s, rn, addr);
7881 } else {
7882 tcg_temp_free_i32(addr);
7883 }
7884 gen_rfe(s, tmp, tmp2);
7885 return;
7886 } else if ((insn & 0x0e000000) == 0x0a000000) {
7887
7888 int32_t offset;
7889
7890 val = (uint32_t)s->pc;
7891 tmp = tcg_temp_new_i32();
7892 tcg_gen_movi_i32(tmp, val);
7893 store_reg(s, 14, tmp);
7894
7895 offset = (((int32_t)insn) << 8) >> 8;
7896
7897 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7898
7899 val += 4;
7900
7901 gen_bx_im(s, val);
7902 return;
7903 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7904 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7905
7906 if (extract32(s->c15_cpar, 1, 1)) {
7907 if (!disas_iwmmxt_insn(s, insn)) {
7908 return;
7909 }
7910 }
7911 }
7912 } else if ((insn & 0x0e000a00) == 0x0c000800
7913 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7914 if (disas_neon_insn_3same_ext(s, insn)) {
7915 goto illegal_op;
7916 }
7917 return;
7918 } else if ((insn & 0x0f000a00) == 0x0e000800
7919 && arm_dc_feature(s, ARM_FEATURE_V8)) {
7920 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
7921 goto illegal_op;
7922 }
7923 return;
7924 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7925
7926 ARCH(5TE);
7927 } else if ((insn & 0x0f000010) == 0x0e000010) {
7928
7929 } else if ((insn & 0x0ff10020) == 0x01000000) {
7930 uint32_t mask;
7931 uint32_t val;
7932
7933 if (IS_USER(s))
7934 return;
7935 mask = val = 0;
7936 if (insn & (1 << 19)) {
7937 if (insn & (1 << 8))
7938 mask |= CPSR_A;
7939 if (insn & (1 << 7))
7940 mask |= CPSR_I;
7941 if (insn & (1 << 6))
7942 mask |= CPSR_F;
7943 if (insn & (1 << 18))
7944 val |= mask;
7945 }
7946 if (insn & (1 << 17)) {
7947 mask |= CPSR_M;
7948 val |= (insn & 0x1f);
7949 }
7950 if (mask) {
7951 gen_set_psr_im(s, mask, 0, val);
7952 }
7953 return;
7954 }
7955 goto illegal_op;
7956 }
7957 if (cond != 0xe) {
7958
7959
7960 arm_skip_unless(s, cond);
7961 }
7962 if ((insn & 0x0f900000) == 0x03000000) {
7963 if ((insn & (1 << 21)) == 0) {
7964 ARCH(6T2);
7965 rd = (insn >> 12) & 0xf;
7966 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7967 if ((insn & (1 << 22)) == 0) {
7968
7969 tmp = tcg_temp_new_i32();
7970 tcg_gen_movi_i32(tmp, val);
7971 } else {
7972
7973 tmp = load_reg(s, rd);
7974 tcg_gen_ext16u_i32(tmp, tmp);
7975 tcg_gen_ori_i32(tmp, tmp, val << 16);
7976 }
7977 store_reg(s, rd, tmp);
7978 } else {
7979 if (((insn >> 12) & 0xf) != 0xf)
7980 goto illegal_op;
7981 if (((insn >> 16) & 0xf) == 0) {
7982 gen_nop_hint(s, insn & 0xff);
7983 } else {
7984
7985 val = insn & 0xff;
7986 shift = ((insn >> 8) & 0xf) * 2;
7987 if (shift)
7988 val = (val >> shift) | (val << (32 - shift));
7989 i = ((insn & (1 << 22)) != 0);
7990 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7991 i, val)) {
7992 goto illegal_op;
7993 }
7994 }
7995 }
7996 } else if ((insn & 0x0f900000) == 0x01000000
7997 && (insn & 0x00000090) != 0x00000090) {
7998
7999 op1 = (insn >> 21) & 3;
8000 sh = (insn >> 4) & 0xf;
8001 rm = insn & 0xf;
8002 switch (sh) {
8003 case 0x0:
8004 if (insn & (1 << 9)) {
8005
8006 int sysm = extract32(insn, 16, 4) |
8007 (extract32(insn, 8, 1) << 4);
8008 int r = extract32(insn, 22, 1);
8009
8010 if (op1 & 1) {
8011
8012 gen_msr_banked(s, r, sysm, rm);
8013 } else {
8014
8015 int rd = extract32(insn, 12, 4);
8016
8017 gen_mrs_banked(s, r, sysm, rd);
8018 }
8019 break;
8020 }
8021
8022
8023 if (op1 & 1) {
8024
8025 tmp = load_reg(s, rm);
8026 i = ((op1 & 2) != 0);
8027 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
8028 goto illegal_op;
8029 } else {
8030
8031 rd = (insn >> 12) & 0xf;
8032 if (op1 & 2) {
8033 if (IS_USER(s))
8034 goto illegal_op;
8035 tmp = load_cpu_field(spsr);
8036 } else {
8037 tmp = tcg_temp_new_i32();
8038 gen_helper_cpsr_read(tmp, cpu_env);
8039 }
8040 store_reg(s, rd, tmp);
8041 }
8042 break;
8043 case 0x1:
8044 if (op1 == 1) {
8045
8046 ARCH(4T);
8047 tmp = load_reg(s, rm);
8048 gen_bx(s, tmp);
8049 } else if (op1 == 3) {
8050
8051 ARCH(5);
8052 rd = (insn >> 12) & 0xf;
8053 tmp = load_reg(s, rm);
8054 tcg_gen_clzi_i32(tmp, tmp, 32);
8055 store_reg(s, rd, tmp);
8056 } else {
8057 goto illegal_op;
8058 }
8059 break;
8060 case 0x2:
8061 if (op1 == 1) {
8062 ARCH(5J);
8063
8064 tmp = load_reg(s, rm);
8065 gen_bx(s, tmp);
8066 } else {
8067 goto illegal_op;
8068 }
8069 break;
8070 case 0x3:
8071 if (op1 != 1)
8072 goto illegal_op;
8073
8074 ARCH(5);
8075
8076 tmp = load_reg(s, rm);
8077 tmp2 = tcg_temp_new_i32();
8078 tcg_gen_movi_i32(tmp2, s->pc);
8079 store_reg(s, 14, tmp2);
8080 gen_bx(s, tmp);
8081 break;
8082 case 0x4:
8083 {
8084
8085 uint32_t c = extract32(insn, 8, 4);
8086
8087
8088
8089
8090
8091 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
8092 goto illegal_op;
8093 }
8094
8095 rn = extract32(insn, 16, 4);
8096 rd = extract32(insn, 12, 4);
8097
8098 tmp = load_reg(s, rn);
8099 tmp2 = load_reg(s, rm);
8100 if (op1 == 0) {
8101 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8102 } else if (op1 == 1) {
8103 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8104 }
8105 tmp3 = tcg_const_i32(1 << op1);
8106 if (c & 0x2) {
8107 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8108 } else {
8109 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8110 }
8111 tcg_temp_free_i32(tmp2);
8112 tcg_temp_free_i32(tmp3);
8113 store_reg(s, rd, tmp);
8114 break;
8115 }
8116 case 0x5:
8117 ARCH(5TE);
8118 rd = (insn >> 12) & 0xf;
8119 rn = (insn >> 16) & 0xf;
8120 tmp = load_reg(s, rm);
8121 tmp2 = load_reg(s, rn);
8122 if (op1 & 2)
8123 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
8124 if (op1 & 1)
8125 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
8126 else
8127 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8128 tcg_temp_free_i32(tmp2);
8129 store_reg(s, rd, tmp);
8130 break;
8131 case 0x6:
8132 if (op1 != 3) {
8133 goto illegal_op;
8134 }
8135 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
8136 goto illegal_op;
8137 }
8138 if ((insn & 0x000fff0f) != 0x0000000e) {
8139
8140 goto illegal_op;
8141 }
8142
8143 if (s->current_el == 2) {
8144 tmp = load_cpu_field(elr_el[2]);
8145 } else {
8146 tmp = load_reg(s, 14);
8147 }
8148 gen_exception_return(s, tmp);
8149 break;
8150 case 7:
8151 {
8152 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
8153 switch (op1) {
8154 case 0:
8155
8156 gen_hlt(s, imm16);
8157 break;
8158 case 1:
8159
8160 ARCH(5);
8161 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
8162 break;
8163 case 2:
8164
8165 ARCH(7);
8166 if (IS_USER(s)) {
8167 goto illegal_op;
8168 }
8169 gen_hvc(s, imm16);
8170 break;
8171 case 3:
8172
8173 ARCH(6K);
8174 if (IS_USER(s)) {
8175 goto illegal_op;
8176 }
8177 gen_smc(s);
8178 break;
8179 default:
8180 g_assert_not_reached();
8181 }
8182 break;
8183 }
8184 case 0x8:
8185 case 0xa:
8186 case 0xc:
8187 case 0xe:
8188 ARCH(5TE);
8189 rs = (insn >> 8) & 0xf;
8190 rn = (insn >> 12) & 0xf;
8191 rd = (insn >> 16) & 0xf;
8192 if (op1 == 1) {
8193
8194 tmp = load_reg(s, rm);
8195 tmp2 = load_reg(s, rs);
8196 if (sh & 4)
8197 tcg_gen_sari_i32(tmp2, tmp2, 16);
8198 else
8199 gen_sxth(tmp2);
8200 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8201 tcg_gen_shri_i64(tmp64, tmp64, 16);
8202 tmp = tcg_temp_new_i32();
8203 tcg_gen_extrl_i64_i32(tmp, tmp64);
8204 tcg_temp_free_i64(tmp64);
8205 if ((sh & 2) == 0) {
8206 tmp2 = load_reg(s, rn);
8207 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8208 tcg_temp_free_i32(tmp2);
8209 }
8210 store_reg(s, rd, tmp);
8211 } else {
8212
8213 tmp = load_reg(s, rm);
8214 tmp2 = load_reg(s, rs);
8215 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8216 tcg_temp_free_i32(tmp2);
8217 if (op1 == 2) {
8218 tmp64 = tcg_temp_new_i64();
8219 tcg_gen_ext_i32_i64(tmp64, tmp);
8220 tcg_temp_free_i32(tmp);
8221 gen_addq(s, tmp64, rn, rd);
8222 gen_storeq_reg(s, rn, rd, tmp64);
8223 tcg_temp_free_i64(tmp64);
8224 } else {
8225 if (op1 == 0) {
8226 tmp2 = load_reg(s, rn);
8227 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8228 tcg_temp_free_i32(tmp2);
8229 }
8230 store_reg(s, rd, tmp);
8231 }
8232 }
8233 break;
8234 default:
8235 goto illegal_op;
8236 }
8237 } else if (((insn & 0x0e000000) == 0 &&
8238 (insn & 0x00000090) != 0x90) ||
8239 ((insn & 0x0e000000) == (1 << 25))) {
8240 int set_cc, logic_cc, shiftop;
8241
8242 op1 = (insn >> 21) & 0xf;
8243 set_cc = (insn >> 20) & 1;
8244 logic_cc = table_logic_cc[op1] & set_cc;
8245
8246
8247 if (insn & (1 << 25)) {
8248
8249 val = insn & 0xff;
8250 shift = ((insn >> 8) & 0xf) * 2;
8251 if (shift) {
8252 val = (val >> shift) | (val << (32 - shift));
8253 }
8254 tmp2 = tcg_temp_new_i32();
8255 tcg_gen_movi_i32(tmp2, val);
8256 if (logic_cc && shift) {
8257 gen_set_CF_bit31(tmp2);
8258 }
8259 } else {
8260
8261 rm = (insn) & 0xf;
8262 tmp2 = load_reg(s, rm);
8263 shiftop = (insn >> 5) & 3;
8264 if (!(insn & (1 << 4))) {
8265 shift = (insn >> 7) & 0x1f;
8266 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8267 } else {
8268 rs = (insn >> 8) & 0xf;
8269 tmp = load_reg(s, rs);
8270 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8271 }
8272 }
8273 if (op1 != 0x0f && op1 != 0x0d) {
8274 rn = (insn >> 16) & 0xf;
8275 tmp = load_reg(s, rn);
8276 } else {
8277 tmp = NULL;
8278 }
8279 rd = (insn >> 12) & 0xf;
8280 switch(op1) {
8281 case 0x00:
8282 tcg_gen_and_i32(tmp, tmp, tmp2);
8283 if (logic_cc) {
8284 gen_logic_CC(tmp);
8285 }
8286 store_reg_bx(s, rd, tmp);
8287 break;
8288 case 0x01:
8289 tcg_gen_xor_i32(tmp, tmp, tmp2);
8290 if (logic_cc) {
8291 gen_logic_CC(tmp);
8292 }
8293 store_reg_bx(s, rd, tmp);
8294 break;
8295 case 0x02:
8296 if (set_cc && rd == 15) {
8297
8298 if (IS_USER(s)) {
8299 goto illegal_op;
8300 }
8301 gen_sub_CC(tmp, tmp, tmp2);
8302 gen_exception_return(s, tmp);
8303 } else {
8304 if (set_cc) {
8305 gen_sub_CC(tmp, tmp, tmp2);
8306 } else {
8307 tcg_gen_sub_i32(tmp, tmp, tmp2);
8308 }
8309 store_reg_bx(s, rd, tmp);
8310 }
8311 break;
8312 case 0x03:
8313 if (set_cc) {
8314 gen_sub_CC(tmp, tmp2, tmp);
8315 } else {
8316 tcg_gen_sub_i32(tmp, tmp2, tmp);
8317 }
8318 store_reg_bx(s, rd, tmp);
8319 break;
8320 case 0x04:
8321 if (set_cc) {
8322 gen_add_CC(tmp, tmp, tmp2);
8323 } else {
8324 tcg_gen_add_i32(tmp, tmp, tmp2);
8325 }
8326 store_reg_bx(s, rd, tmp);
8327 break;
8328 case 0x05:
8329 if (set_cc) {
8330 gen_adc_CC(tmp, tmp, tmp2);
8331 } else {
8332 gen_add_carry(tmp, tmp, tmp2);
8333 }
8334 store_reg_bx(s, rd, tmp);
8335 break;
8336 case 0x06:
8337 if (set_cc) {
8338 gen_sbc_CC(tmp, tmp, tmp2);
8339 } else {
8340 gen_sub_carry(tmp, tmp, tmp2);
8341 }
8342 store_reg_bx(s, rd, tmp);
8343 break;
8344 case 0x07:
8345 if (set_cc) {
8346 gen_sbc_CC(tmp, tmp2, tmp);
8347 } else {
8348 gen_sub_carry(tmp, tmp2, tmp);
8349 }
8350 store_reg_bx(s, rd, tmp);
8351 break;
8352 case 0x08:
8353 if (set_cc) {
8354 tcg_gen_and_i32(tmp, tmp, tmp2);
8355 gen_logic_CC(tmp);
8356 }
8357 tcg_temp_free_i32(tmp);
8358 break;
8359 case 0x09:
8360 if (set_cc) {
8361 tcg_gen_xor_i32(tmp, tmp, tmp2);
8362 gen_logic_CC(tmp);
8363 }
8364 tcg_temp_free_i32(tmp);
8365 break;
8366 case 0x0a:
8367 if (set_cc) {
8368 gen_sub_CC(tmp, tmp, tmp2);
8369 }
8370 tcg_temp_free_i32(tmp);
8371 break;
8372 case 0x0b:
8373 if (set_cc) {
8374 gen_add_CC(tmp, tmp, tmp2);
8375 }
8376 tcg_temp_free_i32(tmp);
8377 break;
8378 case 0x0c:
8379 tcg_gen_or_i32(tmp, tmp, tmp2);
8380 if (logic_cc) {
8381 gen_logic_CC(tmp);
8382 }
8383 store_reg_bx(s, rd, tmp);
8384 break;
8385 case 0x0d:
8386 if (logic_cc && rd == 15) {
8387
8388 if (IS_USER(s)) {
8389 goto illegal_op;
8390 }
8391 gen_exception_return(s, tmp2);
8392 } else {
8393 if (logic_cc) {
8394 gen_logic_CC(tmp2);
8395 }
8396 store_reg_bx(s, rd, tmp2);
8397 }
8398 break;
8399 case 0x0e:
8400 tcg_gen_andc_i32(tmp, tmp, tmp2);
8401 if (logic_cc) {
8402 gen_logic_CC(tmp);
8403 }
8404 store_reg_bx(s, rd, tmp);
8405 break;
8406 default:
8407 case 0x0f:
8408 tcg_gen_not_i32(tmp2, tmp2);
8409 if (logic_cc) {
8410 gen_logic_CC(tmp2);
8411 }
8412 store_reg_bx(s, rd, tmp2);
8413 break;
8414 }
8415 if (op1 != 0x0f && op1 != 0x0d) {
8416 tcg_temp_free_i32(tmp2);
8417 }
8418 } else {
8419
8420 op1 = (insn >> 24) & 0xf;
8421 switch(op1) {
8422 case 0x0:
8423 case 0x1:
8424
8425 sh = (insn >> 5) & 3;
8426 if (sh == 0) {
8427 if (op1 == 0x0) {
8428 rd = (insn >> 16) & 0xf;
8429 rn = (insn >> 12) & 0xf;
8430 rs = (insn >> 8) & 0xf;
8431 rm = (insn) & 0xf;
8432 op1 = (insn >> 20) & 0xf;
8433 switch (op1) {
8434 case 0: case 1: case 2: case 3: case 6:
8435
8436 tmp = load_reg(s, rs);
8437 tmp2 = load_reg(s, rm);
8438 tcg_gen_mul_i32(tmp, tmp, tmp2);
8439 tcg_temp_free_i32(tmp2);
8440 if (insn & (1 << 22)) {
8441
8442 ARCH(6T2);
8443 tmp2 = load_reg(s, rn);
8444 tcg_gen_sub_i32(tmp, tmp2, tmp);
8445 tcg_temp_free_i32(tmp2);
8446 } else if (insn & (1 << 21)) {
8447
8448 tmp2 = load_reg(s, rn);
8449 tcg_gen_add_i32(tmp, tmp, tmp2);
8450 tcg_temp_free_i32(tmp2);
8451 }
8452 if (insn & (1 << 20))
8453 gen_logic_CC(tmp);
8454 store_reg(s, rd, tmp);
8455 break;
8456 case 4:
8457
8458 ARCH(6);
8459 tmp = load_reg(s, rs);
8460 tmp2 = load_reg(s, rm);
8461 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8462 gen_addq_lo(s, tmp64, rn);
8463 gen_addq_lo(s, tmp64, rd);
8464 gen_storeq_reg(s, rn, rd, tmp64);
8465 tcg_temp_free_i64(tmp64);
8466 break;
8467 case 8: case 9: case 10: case 11:
8468 case 12: case 13: case 14: case 15:
8469
8470 tmp = load_reg(s, rs);
8471 tmp2 = load_reg(s, rm);
8472 if (insn & (1 << 22)) {
8473 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8474 } else {
8475 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8476 }
8477 if (insn & (1 << 21)) {
8478 TCGv_i32 al = load_reg(s, rn);
8479 TCGv_i32 ah = load_reg(s, rd);
8480 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8481 tcg_temp_free_i32(al);
8482 tcg_temp_free_i32(ah);
8483 }
8484 if (insn & (1 << 20)) {
8485 gen_logicq_cc(tmp, tmp2);
8486 }
8487 store_reg(s, rn, tmp);
8488 store_reg(s, rd, tmp2);
8489 break;
8490 default:
8491 goto illegal_op;
8492 }
8493 } else {
8494 rn = (insn >> 16) & 0xf;
8495 rd = (insn >> 12) & 0xf;
8496 if (insn & (1 << 23)) {
8497
8498 bool is_ld = extract32(insn, 20, 1);
8499 bool is_lasr = !extract32(insn, 8, 1);
8500 int op2 = (insn >> 8) & 3;
8501 op1 = (insn >> 21) & 0x3;
8502
8503 switch (op2) {
8504 case 0:
8505 if (op1 == 1) {
8506 goto illegal_op;
8507 }
8508 ARCH(8);
8509 break;
8510 case 1:
8511 goto illegal_op;
8512 case 2:
8513 ARCH(8);
8514 break;
8515 case 3:
8516 if (op1) {
8517 ARCH(6K);
8518 } else {
8519 ARCH(6);
8520 }
8521 break;
8522 }
8523
8524 addr = tcg_temp_local_new_i32();
8525 load_reg_var(s, addr, rn);
8526
8527 if (is_lasr && !is_ld) {
8528 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
8529 }
8530
8531 if (op2 == 0) {
8532 if (is_ld) {
8533 tmp = tcg_temp_new_i32();
8534 switch (op1) {
8535 case 0:
8536 gen_aa32_ld32u_iss(s, tmp, addr,
8537 get_mem_index(s),
8538 rd | ISSIsAcqRel);
8539 break;
8540 case 2:
8541 gen_aa32_ld8u_iss(s, tmp, addr,
8542 get_mem_index(s),
8543 rd | ISSIsAcqRel);
8544 break;
8545 case 3:
8546 gen_aa32_ld16u_iss(s, tmp, addr,
8547 get_mem_index(s),
8548 rd | ISSIsAcqRel);
8549 break;
8550 default:
8551 abort();
8552 }
8553 store_reg(s, rd, tmp);
8554 } else {
8555 rm = insn & 0xf;
8556 tmp = load_reg(s, rm);
8557 switch (op1) {
8558 case 0:
8559 gen_aa32_st32_iss(s, tmp, addr,
8560 get_mem_index(s),
8561 rm | ISSIsAcqRel);
8562 break;
8563 case 2:
8564 gen_aa32_st8_iss(s, tmp, addr,
8565 get_mem_index(s),
8566 rm | ISSIsAcqRel);
8567 break;
8568 case 3:
8569 gen_aa32_st16_iss(s, tmp, addr,
8570 get_mem_index(s),
8571 rm | ISSIsAcqRel);
8572 break;
8573 default:
8574 abort();
8575 }
8576 tcg_temp_free_i32(tmp);
8577 }
8578 } else if (is_ld) {
8579 switch (op1) {
8580 case 0:
8581 gen_load_exclusive(s, rd, 15, addr, 2);
8582 break;
8583 case 1:
8584 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8585 break;
8586 case 2:
8587 gen_load_exclusive(s, rd, 15, addr, 0);
8588 break;
8589 case 3:
8590 gen_load_exclusive(s, rd, 15, addr, 1);
8591 break;
8592 default:
8593 abort();
8594 }
8595 } else {
8596 rm = insn & 0xf;
8597 switch (op1) {
8598 case 0:
8599 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8600 break;
8601 case 1:
8602 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8603 break;
8604 case 2:
8605 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8606 break;
8607 case 3:
8608 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8609 break;
8610 default:
8611 abort();
8612 }
8613 }
8614 tcg_temp_free_i32(addr);
8615
8616 if (is_lasr && is_ld) {
8617 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
8618 }
8619 } else if ((insn & 0x00300f00) == 0) {
8620
8621
8622
8623
8624 TCGv taddr;
8625 TCGMemOp opc = s->be_data;
8626
8627 rm = (insn) & 0xf;
8628
8629 if (insn & (1 << 22)) {
8630 opc |= MO_UB;
8631 } else {
8632 opc |= MO_UL | MO_ALIGN;
8633 }
8634
8635 addr = load_reg(s, rn);
8636 taddr = gen_aa32_addr(s, addr, opc);
8637 tcg_temp_free_i32(addr);
8638
8639 tmp = load_reg(s, rm);
8640 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8641 get_mem_index(s), opc);
8642 tcg_temp_free(taddr);
8643 store_reg(s, rd, tmp);
8644 } else {
8645 goto illegal_op;
8646 }
8647 }
8648 } else {
8649 int address_offset;
8650 bool load = insn & (1 << 20);
8651 bool wbit = insn & (1 << 21);
8652 bool pbit = insn & (1 << 24);
8653 bool doubleword = false;
8654 ISSInfo issinfo;
8655
8656
8657 rn = (insn >> 16) & 0xf;
8658 rd = (insn >> 12) & 0xf;
8659
8660
8661 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8662
8663 if (!load && (sh & 2)) {
8664
8665 ARCH(5TE);
8666 if (rd & 1) {
8667
8668 goto illegal_op;
8669 }
8670 load = (sh & 1) == 0;
8671 doubleword = true;
8672 }
8673
8674 addr = load_reg(s, rn);
8675 if (pbit) {
8676 gen_add_datah_offset(s, insn, 0, addr);
8677 }
8678 address_offset = 0;
8679
8680 if (doubleword) {
8681 if (!load) {
8682
8683 tmp = load_reg(s, rd);
8684 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8685 tcg_temp_free_i32(tmp);
8686 tcg_gen_addi_i32(addr, addr, 4);
8687 tmp = load_reg(s, rd + 1);
8688 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
8689 tcg_temp_free_i32(tmp);
8690 } else {
8691
8692 tmp = tcg_temp_new_i32();
8693 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8694 store_reg(s, rd, tmp);
8695 tcg_gen_addi_i32(addr, addr, 4);
8696 tmp = tcg_temp_new_i32();
8697 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8698 rd++;
8699 }
8700 address_offset = -4;
8701 } else if (load) {
8702
8703 tmp = tcg_temp_new_i32();
8704 switch (sh) {
8705 case 1:
8706 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8707 issinfo);
8708 break;
8709 case 2:
8710 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8711 issinfo);
8712 break;
8713 default:
8714 case 3:
8715 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8716 issinfo);
8717 break;
8718 }
8719 } else {
8720
8721 tmp = load_reg(s, rd);
8722 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
8723 tcg_temp_free_i32(tmp);
8724 }
8725
8726
8727
8728
8729 if (!pbit) {
8730 gen_add_datah_offset(s, insn, address_offset, addr);
8731 store_reg(s, rn, addr);
8732 } else if (wbit) {
8733 if (address_offset)
8734 tcg_gen_addi_i32(addr, addr, address_offset);
8735 store_reg(s, rn, addr);
8736 } else {
8737 tcg_temp_free_i32(addr);
8738 }
8739 if (load) {
8740
8741 store_reg(s, rd, tmp);
8742 }
8743 }
8744 break;
8745 case 0x4:
8746 case 0x5:
8747 goto do_ldst;
8748 case 0x6:
8749 case 0x7:
8750 if (insn & (1 << 4)) {
8751 ARCH(6);
8752
8753 rm = insn & 0xf;
8754 rn = (insn >> 16) & 0xf;
8755 rd = (insn >> 12) & 0xf;
8756 rs = (insn >> 8) & 0xf;
8757 switch ((insn >> 23) & 3) {
8758 case 0:
8759 op1 = (insn >> 20) & 7;
8760 tmp = load_reg(s, rn);
8761 tmp2 = load_reg(s, rm);
8762 sh = (insn >> 5) & 7;
8763 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8764 goto illegal_op;
8765 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8766 tcg_temp_free_i32(tmp2);
8767 store_reg(s, rd, tmp);
8768 break;
8769 case 1:
8770 if ((insn & 0x00700020) == 0) {
8771
8772 tmp = load_reg(s, rn);
8773 tmp2 = load_reg(s, rm);
8774 shift = (insn >> 7) & 0x1f;
8775 if (insn & (1 << 6)) {
8776
8777 if (shift == 0)
8778 shift = 31;
8779 tcg_gen_sari_i32(tmp2, tmp2, shift);
8780 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8781 tcg_gen_ext16u_i32(tmp2, tmp2);
8782 } else {
8783
8784 if (shift)
8785 tcg_gen_shli_i32(tmp2, tmp2, shift);
8786 tcg_gen_ext16u_i32(tmp, tmp);
8787 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8788 }
8789 tcg_gen_or_i32(tmp, tmp, tmp2);
8790 tcg_temp_free_i32(tmp2);
8791 store_reg(s, rd, tmp);
8792 } else if ((insn & 0x00200020) == 0x00200000) {
8793
8794 tmp = load_reg(s, rm);
8795 shift = (insn >> 7) & 0x1f;
8796 if (insn & (1 << 6)) {
8797 if (shift == 0)
8798 shift = 31;
8799 tcg_gen_sari_i32(tmp, tmp, shift);
8800 } else {
8801 tcg_gen_shli_i32(tmp, tmp, shift);
8802 }
8803 sh = (insn >> 16) & 0x1f;
8804 tmp2 = tcg_const_i32(sh);
8805 if (insn & (1 << 22))
8806 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8807 else
8808 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8809 tcg_temp_free_i32(tmp2);
8810 store_reg(s, rd, tmp);
8811 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8812
8813 tmp = load_reg(s, rm);
8814 sh = (insn >> 16) & 0x1f;
8815 tmp2 = tcg_const_i32(sh);
8816 if (insn & (1 << 22))
8817 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8818 else
8819 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8820 tcg_temp_free_i32(tmp2);
8821 store_reg(s, rd, tmp);
8822 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8823
8824 tmp = load_reg(s, rn);
8825 tmp2 = load_reg(s, rm);
8826 tmp3 = tcg_temp_new_i32();
8827 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8828 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8829 tcg_temp_free_i32(tmp3);
8830 tcg_temp_free_i32(tmp2);
8831 store_reg(s, rd, tmp);
8832 } else if ((insn & 0x000003e0) == 0x00000060) {
8833 tmp = load_reg(s, rm);
8834 shift = (insn >> 10) & 3;
8835
8836
8837 if (shift != 0)
8838 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8839 op1 = (insn >> 20) & 7;
8840 switch (op1) {
8841 case 0: gen_sxtb16(tmp); break;
8842 case 2: gen_sxtb(tmp); break;
8843 case 3: gen_sxth(tmp); break;
8844 case 4: gen_uxtb16(tmp); break;
8845 case 6: gen_uxtb(tmp); break;
8846 case 7: gen_uxth(tmp); break;
8847 default: goto illegal_op;
8848 }
8849 if (rn != 15) {
8850 tmp2 = load_reg(s, rn);
8851 if ((op1 & 3) == 0) {
8852 gen_add16(tmp, tmp2);
8853 } else {
8854 tcg_gen_add_i32(tmp, tmp, tmp2);
8855 tcg_temp_free_i32(tmp2);
8856 }
8857 }
8858 store_reg(s, rd, tmp);
8859 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8860
8861 tmp = load_reg(s, rm);
8862 if (insn & (1 << 22)) {
8863 if (insn & (1 << 7)) {
8864 gen_revsh(tmp);
8865 } else {
8866 ARCH(6T2);
8867 gen_helper_rbit(tmp, tmp);
8868 }
8869 } else {
8870 if (insn & (1 << 7))
8871 gen_rev16(tmp);
8872 else
8873 tcg_gen_bswap32_i32(tmp, tmp);
8874 }
8875 store_reg(s, rd, tmp);
8876 } else {
8877 goto illegal_op;
8878 }
8879 break;
8880 case 2:
8881 switch ((insn >> 20) & 0x7) {
8882 case 5:
8883 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8884
8885 goto illegal_op;
8886 }
8887
8888
8889 tmp = load_reg(s, rm);
8890 tmp2 = load_reg(s, rs);
8891 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8892
8893 if (rd != 15) {
8894 tmp = load_reg(s, rd);
8895 if (insn & (1 << 6)) {
8896 tmp64 = gen_subq_msw(tmp64, tmp);
8897 } else {
8898 tmp64 = gen_addq_msw(tmp64, tmp);
8899 }
8900 }
8901 if (insn & (1 << 5)) {
8902 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8903 }
8904 tcg_gen_shri_i64(tmp64, tmp64, 32);
8905 tmp = tcg_temp_new_i32();
8906 tcg_gen_extrl_i64_i32(tmp, tmp64);
8907 tcg_temp_free_i64(tmp64);
8908 store_reg(s, rn, tmp);
8909 break;
8910 case 0:
8911 case 4:
8912
8913 if (insn & (1 << 7)) {
8914 goto illegal_op;
8915 }
8916 tmp = load_reg(s, rm);
8917 tmp2 = load_reg(s, rs);
8918 if (insn & (1 << 5))
8919 gen_swap_half(tmp2);
8920 gen_smul_dual(tmp, tmp2);
8921 if (insn & (1 << 22)) {
8922
8923 TCGv_i64 tmp64_2;
8924
8925 tmp64 = tcg_temp_new_i64();
8926 tmp64_2 = tcg_temp_new_i64();
8927 tcg_gen_ext_i32_i64(tmp64, tmp);
8928 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8929 tcg_temp_free_i32(tmp);
8930 tcg_temp_free_i32(tmp2);
8931 if (insn & (1 << 6)) {
8932 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8933 } else {
8934 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8935 }
8936 tcg_temp_free_i64(tmp64_2);
8937 gen_addq(s, tmp64, rd, rn);
8938 gen_storeq_reg(s, rd, rn, tmp64);
8939 tcg_temp_free_i64(tmp64);
8940 } else {
8941
8942 if (insn & (1 << 6)) {
8943
8944 tcg_gen_sub_i32(tmp, tmp, tmp2);
8945 } else {
8946
8947
8948
8949
8950
8951 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8952 }
8953 tcg_temp_free_i32(tmp2);
8954 if (rd != 15)
8955 {
8956 tmp2 = load_reg(s, rd);
8957 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8958 tcg_temp_free_i32(tmp2);
8959 }
8960 store_reg(s, rn, tmp);
8961 }
8962 break;
8963 case 1:
8964 case 3:
8965
8966 if (!dc_isar_feature(arm_div, s)) {
8967 goto illegal_op;
8968 }
8969 if (((insn >> 5) & 7) || (rd != 15)) {
8970 goto illegal_op;
8971 }
8972 tmp = load_reg(s, rm);
8973 tmp2 = load_reg(s, rs);
8974 if (insn & (1 << 21)) {
8975 gen_helper_udiv(tmp, tmp, tmp2);
8976 } else {
8977 gen_helper_sdiv(tmp, tmp, tmp2);
8978 }
8979 tcg_temp_free_i32(tmp2);
8980 store_reg(s, rn, tmp);
8981 break;
8982 default:
8983 goto illegal_op;
8984 }
8985 break;
8986 case 3:
8987 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8988 switch (op1) {
8989 case 0:
8990 ARCH(6);
8991 tmp = load_reg(s, rm);
8992 tmp2 = load_reg(s, rs);
8993 gen_helper_usad8(tmp, tmp, tmp2);
8994 tcg_temp_free_i32(tmp2);
8995 if (rd != 15) {
8996 tmp2 = load_reg(s, rd);
8997 tcg_gen_add_i32(tmp, tmp, tmp2);
8998 tcg_temp_free_i32(tmp2);
8999 }
9000 store_reg(s, rn, tmp);
9001 break;
9002 case 0x20: case 0x24: case 0x28: case 0x2c:
9003
9004 ARCH(6T2);
9005 shift = (insn >> 7) & 0x1f;
9006 i = (insn >> 16) & 0x1f;
9007 if (i < shift) {
9008
9009 goto illegal_op;
9010 }
9011 i = i + 1 - shift;
9012 if (rm == 15) {
9013 tmp = tcg_temp_new_i32();
9014 tcg_gen_movi_i32(tmp, 0);
9015 } else {
9016 tmp = load_reg(s, rm);
9017 }
9018 if (i != 32) {
9019 tmp2 = load_reg(s, rd);
9020 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
9021 tcg_temp_free_i32(tmp2);
9022 }
9023 store_reg(s, rd, tmp);
9024 break;
9025 case 0x12: case 0x16: case 0x1a: case 0x1e:
9026 case 0x32: case 0x36: case 0x3a: case 0x3e:
9027 ARCH(6T2);
9028 tmp = load_reg(s, rm);
9029 shift = (insn >> 7) & 0x1f;
9030 i = ((insn >> 16) & 0x1f) + 1;
9031 if (shift + i > 32)
9032 goto illegal_op;
9033 if (i < 32) {
9034 if (op1 & 0x20) {
9035 tcg_gen_extract_i32(tmp, tmp, shift, i);
9036 } else {
9037 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9038 }
9039 }
9040 store_reg(s, rd, tmp);
9041 break;
9042 default:
9043 goto illegal_op;
9044 }
9045 break;
9046 }
9047 break;
9048 }
9049 do_ldst:
9050
9051
9052
9053
9054 sh = (0xf << 20) | (0xf << 4);
9055 if (op1 == 0x7 && ((insn & sh) == sh))
9056 {
9057 goto illegal_op;
9058 }
9059
9060 rn = (insn >> 16) & 0xf;
9061 rd = (insn >> 12) & 0xf;
9062 tmp2 = load_reg(s, rn);
9063 if ((insn & 0x01200000) == 0x00200000) {
9064
9065 i = get_a32_user_mem_index(s);
9066 } else {
9067 i = get_mem_index(s);
9068 }
9069 if (insn & (1 << 24))
9070 gen_add_data_offset(s, insn, tmp2);
9071 if (insn & (1 << 20)) {
9072
9073 tmp = tcg_temp_new_i32();
9074 if (insn & (1 << 22)) {
9075 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9076 } else {
9077 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9078 }
9079 } else {
9080
9081 tmp = load_reg(s, rd);
9082 if (insn & (1 << 22)) {
9083 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
9084 } else {
9085 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
9086 }
9087 tcg_temp_free_i32(tmp);
9088 }
9089 if (!(insn & (1 << 24))) {
9090 gen_add_data_offset(s, insn, tmp2);
9091 store_reg(s, rn, tmp2);
9092 } else if (insn & (1 << 21)) {
9093 store_reg(s, rn, tmp2);
9094 } else {
9095 tcg_temp_free_i32(tmp2);
9096 }
9097 if (insn & (1 << 20)) {
9098
9099 store_reg_from_load(s, rd, tmp);
9100 }
9101 break;
9102 case 0x08:
9103 case 0x09:
9104 {
9105 int j, n, loaded_base;
9106 bool exc_return = false;
9107 bool is_load = extract32(insn, 20, 1);
9108 bool user = false;
9109 TCGv_i32 loaded_var;
9110
9111
9112 if (insn & (1 << 22)) {
9113
9114 if (IS_USER(s))
9115 goto illegal_op;
9116
9117 if (is_load && extract32(insn, 15, 1)) {
9118 exc_return = true;
9119 } else {
9120 user = true;
9121 }
9122 }
9123 rn = (insn >> 16) & 0xf;
9124 addr = load_reg(s, rn);
9125
9126
9127 loaded_base = 0;
9128 loaded_var = NULL;
9129 n = 0;
9130 for (i = 0; i < 16; i++) {
9131 if (insn & (1 << i))
9132 n++;
9133 }
9134
9135 if (insn & (1 << 23)) {
9136 if (insn & (1 << 24)) {
9137
9138 tcg_gen_addi_i32(addr, addr, 4);
9139 } else {
9140
9141 }
9142 } else {
9143 if (insn & (1 << 24)) {
9144
9145 tcg_gen_addi_i32(addr, addr, -(n * 4));
9146 } else {
9147
9148 if (n != 1)
9149 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9150 }
9151 }
9152 j = 0;
9153 for (i = 0; i < 16; i++) {
9154 if (insn & (1 << i)) {
9155 if (is_load) {
9156
9157 tmp = tcg_temp_new_i32();
9158 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9159 if (user) {
9160 tmp2 = tcg_const_i32(i);
9161 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
9162 tcg_temp_free_i32(tmp2);
9163 tcg_temp_free_i32(tmp);
9164 } else if (i == rn) {
9165 loaded_var = tmp;
9166 loaded_base = 1;
9167 } else if (i == 15 && exc_return) {
9168 store_pc_exc_ret(s, tmp);
9169 } else {
9170 store_reg_from_load(s, i, tmp);
9171 }
9172 } else {
9173
9174 if (i == 15) {
9175
9176 val = (long)s->pc + 4;
9177 tmp = tcg_temp_new_i32();
9178 tcg_gen_movi_i32(tmp, val);
9179 } else if (user) {
9180 tmp = tcg_temp_new_i32();
9181 tmp2 = tcg_const_i32(i);
9182 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
9183 tcg_temp_free_i32(tmp2);
9184 } else {
9185 tmp = load_reg(s, i);
9186 }
9187 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9188 tcg_temp_free_i32(tmp);
9189 }
9190 j++;
9191
9192 if (j != n)
9193 tcg_gen_addi_i32(addr, addr, 4);
9194 }
9195 }
9196 if (insn & (1 << 21)) {
9197
9198 if (insn & (1 << 23)) {
9199 if (insn & (1 << 24)) {
9200
9201 } else {
9202
9203 tcg_gen_addi_i32(addr, addr, 4);
9204 }
9205 } else {
9206 if (insn & (1 << 24)) {
9207
9208 if (n != 1)
9209 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9210 } else {
9211
9212 tcg_gen_addi_i32(addr, addr, -(n * 4));
9213 }
9214 }
9215 store_reg(s, rn, addr);
9216 } else {
9217 tcg_temp_free_i32(addr);
9218 }
9219 if (loaded_base) {
9220 store_reg(s, rn, loaded_var);
9221 }
9222 if (exc_return) {
9223
9224 tmp = load_cpu_field(spsr);
9225 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9226 gen_io_start();
9227 }
9228 gen_helper_cpsr_write_eret(cpu_env, tmp);
9229 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
9230 gen_io_end();
9231 }
9232 tcg_temp_free_i32(tmp);
9233
9234 s->base.is_jmp = DISAS_EXIT;
9235 }
9236 }
9237 break;
9238 case 0xa:
9239 case 0xb:
9240 {
9241 int32_t offset;
9242
9243
9244 val = (int32_t)s->pc;
9245 if (insn & (1 << 24)) {
9246 tmp = tcg_temp_new_i32();
9247 tcg_gen_movi_i32(tmp, val);
9248 store_reg(s, 14, tmp);
9249 }
9250 offset = sextract32(insn << 2, 0, 26);
9251 val += offset + 4;
9252 gen_jmp(s, val);
9253 }
9254 break;
9255 case 0xc:
9256 case 0xd:
9257 case 0xe:
9258 if (((insn >> 8) & 0xe) == 10) {
9259
9260 if (disas_vfp_insn(s, insn)) {
9261 goto illegal_op;
9262 }
9263 } else if (disas_coproc_insn(s, insn)) {
9264
9265 goto illegal_op;
9266 }
9267 break;
9268 case 0xf:
9269
9270 gen_set_pc_im(s, s->pc);
9271 s->svc_imm = extract32(insn, 0, 24);
9272 s->base.is_jmp = DISAS_SWI;
9273 break;
9274 default:
9275 illegal_op:
9276 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9277 default_exception_el(s));
9278 break;
9279 }
9280 }
9281}
9282
9283static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9284{
9285
9286
9287
9288
9289 if ((insn >> 11) < 0x1d) {
9290
9291 return true;
9292 }
9293
9294
9295
9296
9297
9298
9299 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
9300 arm_dc_feature(s, ARM_FEATURE_M)) {
9301
9302
9303
9304 return false;
9305 }
9306
9307 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
9308
9309
9310
9311
9312 return false;
9313 }
9314
9315
9316
9317
9318
9319 return true;
9320}
9321
9322
9323static int
9324thumb2_logic_op(int op)
9325{
9326 return (op < 8);
9327}
9328
9329
9330
9331
9332
9333
9334
9335static int
9336gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9337 TCGv_i32 t0, TCGv_i32 t1)
9338{
9339 int logic_cc;
9340
9341 logic_cc = 0;
9342 switch (op) {
9343 case 0:
9344 tcg_gen_and_i32(t0, t0, t1);
9345 logic_cc = conds;
9346 break;
9347 case 1:
9348 tcg_gen_andc_i32(t0, t0, t1);
9349 logic_cc = conds;
9350 break;
9351 case 2:
9352 tcg_gen_or_i32(t0, t0, t1);
9353 logic_cc = conds;
9354 break;
9355 case 3:
9356 tcg_gen_orc_i32(t0, t0, t1);
9357 logic_cc = conds;
9358 break;
9359 case 4:
9360 tcg_gen_xor_i32(t0, t0, t1);
9361 logic_cc = conds;
9362 break;
9363 case 8:
9364 if (conds)
9365 gen_add_CC(t0, t0, t1);
9366 else
9367 tcg_gen_add_i32(t0, t0, t1);
9368 break;
9369 case 10:
9370 if (conds)
9371 gen_adc_CC(t0, t0, t1);
9372 else
9373 gen_adc(t0, t1);
9374 break;
9375 case 11:
9376 if (conds) {
9377 gen_sbc_CC(t0, t0, t1);
9378 } else {
9379 gen_sub_carry(t0, t0, t1);
9380 }
9381 break;
9382 case 13:
9383 if (conds)
9384 gen_sub_CC(t0, t0, t1);
9385 else
9386 tcg_gen_sub_i32(t0, t0, t1);
9387 break;
9388 case 14:
9389 if (conds)
9390 gen_sub_CC(t0, t1, t0);
9391 else
9392 tcg_gen_sub_i32(t0, t1, t0);
9393 break;
9394 default:
9395 return 1;
9396 }
9397 if (logic_cc) {
9398 gen_logic_CC(t0);
9399 if (shifter_out)
9400 gen_set_CF_bit31(t1);
9401 }
9402 return 0;
9403}
9404
9405
9406static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9407{
9408 uint32_t imm, shift, offset;
9409 uint32_t rd, rn, rm, rs;
9410 TCGv_i32 tmp;
9411 TCGv_i32 tmp2;
9412 TCGv_i32 tmp3;
9413 TCGv_i32 addr;
9414 TCGv_i64 tmp64;
9415 int op;
9416 int shiftop;
9417 int conds;
9418 int logic_cc;
9419
9420
9421
9422
9423
9424
9425 if (arm_dc_feature(s, ARM_FEATURE_M) &&
9426 !arm_dc_feature(s, ARM_FEATURE_V7)) {
9427 int i;
9428 bool found = false;
9429 static const uint32_t armv6m_insn[] = {0xf3808000 ,
9430 0xf3b08040 ,
9431 0xf3b08050 ,
9432 0xf3b08060 ,
9433 0xf3e08000 ,
9434 0xf000d000 };
9435 static const uint32_t armv6m_mask[] = {0xffe0d000,
9436 0xfff0d0f0,
9437 0xfff0d0f0,
9438 0xfff0d0f0,
9439 0xffe0d000,
9440 0xf800d000};
9441
9442 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
9443 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
9444 found = true;
9445 break;
9446 }
9447 }
9448 if (!found) {
9449 goto illegal_op;
9450 }
9451 } else if ((insn & 0xf800e800) != 0xf000e800) {
9452 ARCH(6T2);
9453 }
9454
9455 rn = (insn >> 16) & 0xf;
9456 rs = (insn >> 12) & 0xf;
9457 rd = (insn >> 8) & 0xf;
9458 rm = insn & 0xf;
9459 switch ((insn >> 25) & 0xf) {
9460 case 0: case 1: case 2: case 3:
9461
9462 abort();
9463 case 4:
9464 if (insn & (1 << 22)) {
9465
9466
9467
9468
9469 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9470 arm_dc_feature(s, ARM_FEATURE_V8)) {
9471
9472
9473
9474
9475
9476
9477
9478
9479
9480
9481
9482
9483
9484
9485 if (s->v8m_secure) {
9486
9487 s->condexec_cond = 0;
9488 s->condexec_mask = 0;
9489 }
9490 } else if (insn & 0x01200000) {
9491
9492
9493
9494
9495
9496
9497
9498 bool wback = extract32(insn, 21, 1);
9499
9500 if (rn == 15) {
9501 if (insn & (1 << 21)) {
9502
9503 goto illegal_op;
9504 }
9505 addr = tcg_temp_new_i32();
9506 tcg_gen_movi_i32(addr, s->pc & ~3);
9507 } else {
9508 addr = load_reg(s, rn);
9509 }
9510 offset = (insn & 0xff) * 4;
9511 if ((insn & (1 << 23)) == 0) {
9512 offset = -offset;
9513 }
9514
9515 if (s->v8m_stackcheck && rn == 13 && wback) {
9516
9517
9518
9519
9520
9521
9522
9523 if ((int32_t)offset < 0) {
9524 TCGv_i32 newsp = tcg_temp_new_i32();
9525
9526 tcg_gen_addi_i32(newsp, addr, offset);
9527 gen_helper_v8m_stackcheck(cpu_env, newsp);
9528 tcg_temp_free_i32(newsp);
9529 } else {
9530 gen_helper_v8m_stackcheck(cpu_env, addr);
9531 }
9532 }
9533
9534 if (insn & (1 << 24)) {
9535 tcg_gen_addi_i32(addr, addr, offset);
9536 offset = 0;
9537 }
9538 if (insn & (1 << 20)) {
9539
9540 tmp = tcg_temp_new_i32();
9541 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9542 store_reg(s, rs, tmp);
9543 tcg_gen_addi_i32(addr, addr, 4);
9544 tmp = tcg_temp_new_i32();
9545 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9546 store_reg(s, rd, tmp);
9547 } else {
9548
9549 tmp = load_reg(s, rs);
9550 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9551 tcg_temp_free_i32(tmp);
9552 tcg_gen_addi_i32(addr, addr, 4);
9553 tmp = load_reg(s, rd);
9554 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9555 tcg_temp_free_i32(tmp);
9556 }
9557 if (wback) {
9558
9559 tcg_gen_addi_i32(addr, addr, offset - 4);
9560 store_reg(s, rn, addr);
9561 } else {
9562 tcg_temp_free_i32(addr);
9563 }
9564 } else if ((insn & (1 << 23)) == 0) {
9565
9566
9567
9568
9569 if (rs == 15) {
9570 if (!(insn & (1 << 20)) &&
9571 arm_dc_feature(s, ARM_FEATURE_M) &&
9572 arm_dc_feature(s, ARM_FEATURE_V8)) {
9573
9574
9575
9576 bool alt = insn & (1 << 7);
9577 TCGv_i32 addr, op, ttresp;
9578
9579 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9580
9581 goto illegal_op;
9582 }
9583
9584 if (alt && !s->v8m_secure) {
9585 goto illegal_op;
9586 }
9587
9588 addr = load_reg(s, rn);
9589 op = tcg_const_i32(extract32(insn, 6, 2));
9590 ttresp = tcg_temp_new_i32();
9591 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9592 tcg_temp_free_i32(addr);
9593 tcg_temp_free_i32(op);
9594 store_reg(s, rd, ttresp);
9595 break;
9596 }
9597 goto illegal_op;
9598 }
9599 addr = tcg_temp_local_new_i32();
9600 load_reg_var(s, addr, rn);
9601 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9602 if (insn & (1 << 20)) {
9603 gen_load_exclusive(s, rs, 15, addr, 2);
9604 } else {
9605 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9606 }
9607 tcg_temp_free_i32(addr);
9608 } else if ((insn & (7 << 5)) == 0) {
9609
9610 if (rn == 15) {
9611 addr = tcg_temp_new_i32();
9612 tcg_gen_movi_i32(addr, s->pc);
9613 } else {
9614 addr = load_reg(s, rn);
9615 }
9616 tmp = load_reg(s, rm);
9617 tcg_gen_add_i32(addr, addr, tmp);
9618 if (insn & (1 << 4)) {
9619
9620 tcg_gen_add_i32(addr, addr, tmp);
9621 tcg_temp_free_i32(tmp);
9622 tmp = tcg_temp_new_i32();
9623 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9624 } else {
9625 tcg_temp_free_i32(tmp);
9626 tmp = tcg_temp_new_i32();
9627 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9628 }
9629 tcg_temp_free_i32(addr);
9630 tcg_gen_shli_i32(tmp, tmp, 1);
9631 tcg_gen_addi_i32(tmp, tmp, s->pc);
9632 store_reg(s, 15, tmp);
9633 } else {
9634 bool is_lasr = false;
9635 bool is_ld = extract32(insn, 20, 1);
9636 int op2 = (insn >> 6) & 0x3;
9637 op = (insn >> 4) & 0x3;
9638 switch (op2) {
9639 case 0:
9640 goto illegal_op;
9641 case 1:
9642
9643 if (op == 2) {
9644 goto illegal_op;
9645 }
9646 ARCH(7);
9647 break;
9648 case 2:
9649
9650 if (op == 3) {
9651 goto illegal_op;
9652 }
9653
9654 case 3:
9655
9656 ARCH(8);
9657 is_lasr = true;
9658 break;
9659 }
9660
9661 if (is_lasr && !is_ld) {
9662 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9663 }
9664
9665 addr = tcg_temp_local_new_i32();
9666 load_reg_var(s, addr, rn);
9667 if (!(op2 & 1)) {
9668 if (is_ld) {
9669 tmp = tcg_temp_new_i32();
9670 switch (op) {
9671 case 0:
9672 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9673 rs | ISSIsAcqRel);
9674 break;
9675 case 1:
9676 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9677 rs | ISSIsAcqRel);
9678 break;
9679 case 2:
9680 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9681 rs | ISSIsAcqRel);
9682 break;
9683 default:
9684 abort();
9685 }
9686 store_reg(s, rs, tmp);
9687 } else {
9688 tmp = load_reg(s, rs);
9689 switch (op) {
9690 case 0:
9691 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9692 rs | ISSIsAcqRel);
9693 break;
9694 case 1:
9695 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9696 rs | ISSIsAcqRel);
9697 break;
9698 case 2:
9699 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9700 rs | ISSIsAcqRel);
9701 break;
9702 default:
9703 abort();
9704 }
9705 tcg_temp_free_i32(tmp);
9706 }
9707 } else if (is_ld) {
9708 gen_load_exclusive(s, rs, rd, addr, op);
9709 } else {
9710 gen_store_exclusive(s, rm, rs, rd, addr, op);
9711 }
9712 tcg_temp_free_i32(addr);
9713
9714 if (is_lasr && is_ld) {
9715 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9716 }
9717 }
9718 } else {
9719
9720 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9721
9722 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9723 goto illegal_op;
9724 }
9725 if (insn & (1 << 20)) {
9726
9727 addr = load_reg(s, rn);
9728 if ((insn & (1 << 24)) == 0)
9729 tcg_gen_addi_i32(addr, addr, -8);
9730
9731 tmp = tcg_temp_new_i32();
9732 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9733 tcg_gen_addi_i32(addr, addr, 4);
9734 tmp2 = tcg_temp_new_i32();
9735 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9736 if (insn & (1 << 21)) {
9737
9738 if (insn & (1 << 24)) {
9739 tcg_gen_addi_i32(addr, addr, 4);
9740 } else {
9741 tcg_gen_addi_i32(addr, addr, -4);
9742 }
9743 store_reg(s, rn, addr);
9744 } else {
9745 tcg_temp_free_i32(addr);
9746 }
9747 gen_rfe(s, tmp, tmp2);
9748 } else {
9749
9750 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9751 insn & (1 << 21));
9752 }
9753 } else {
9754 int i, loaded_base = 0;
9755 TCGv_i32 loaded_var;
9756 bool wback = extract32(insn, 21, 1);
9757
9758 addr = load_reg(s, rn);
9759 offset = 0;
9760 for (i = 0; i < 16; i++) {
9761 if (insn & (1 << i))
9762 offset += 4;
9763 }
9764
9765 if (insn & (1 << 24)) {
9766 tcg_gen_addi_i32(addr, addr, -offset);
9767 }
9768
9769 if (s->v8m_stackcheck && rn == 13 && wback) {
9770
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781 gen_helper_v8m_stackcheck(cpu_env, addr);
9782 }
9783
9784 loaded_var = NULL;
9785 for (i = 0; i < 16; i++) {
9786 if ((insn & (1 << i)) == 0)
9787 continue;
9788 if (insn & (1 << 20)) {
9789
9790 tmp = tcg_temp_new_i32();
9791 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9792 if (i == 15) {
9793 gen_bx_excret(s, tmp);
9794 } else if (i == rn) {
9795 loaded_var = tmp;
9796 loaded_base = 1;
9797 } else {
9798 store_reg(s, i, tmp);
9799 }
9800 } else {
9801
9802 tmp = load_reg(s, i);
9803 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9804 tcg_temp_free_i32(tmp);
9805 }
9806 tcg_gen_addi_i32(addr, addr, 4);
9807 }
9808 if (loaded_base) {
9809 store_reg(s, rn, loaded_var);
9810 }
9811 if (wback) {
9812
9813 if (insn & (1 << 24)) {
9814 tcg_gen_addi_i32(addr, addr, -offset);
9815 }
9816
9817 if (insn & (1 << rn))
9818 goto illegal_op;
9819 store_reg(s, rn, addr);
9820 } else {
9821 tcg_temp_free_i32(addr);
9822 }
9823 }
9824 }
9825 break;
9826 case 5:
9827
9828 op = (insn >> 21) & 0xf;
9829 if (op == 6) {
9830 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9831 goto illegal_op;
9832 }
9833
9834 tmp = load_reg(s, rn);
9835 tmp2 = load_reg(s, rm);
9836 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9837 if (insn & (1 << 5)) {
9838
9839 if (shift == 0)
9840 shift = 31;
9841 tcg_gen_sari_i32(tmp2, tmp2, shift);
9842 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9843 tcg_gen_ext16u_i32(tmp2, tmp2);
9844 } else {
9845
9846 if (shift)
9847 tcg_gen_shli_i32(tmp2, tmp2, shift);
9848 tcg_gen_ext16u_i32(tmp, tmp);
9849 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9850 }
9851 tcg_gen_or_i32(tmp, tmp, tmp2);
9852 tcg_temp_free_i32(tmp2);
9853 store_reg(s, rd, tmp);
9854 } else {
9855
9856 if (rn == 15) {
9857 tmp = tcg_temp_new_i32();
9858 tcg_gen_movi_i32(tmp, 0);
9859 } else {
9860 tmp = load_reg(s, rn);
9861 }
9862 tmp2 = load_reg(s, rm);
9863
9864 shiftop = (insn >> 4) & 3;
9865 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9866 conds = (insn & (1 << 20)) != 0;
9867 logic_cc = (conds && thumb2_logic_op(op));
9868 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9869 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9870 goto illegal_op;
9871 tcg_temp_free_i32(tmp2);
9872 if (rd == 13 &&
9873 ((op == 2 && rn == 15) ||
9874 (op == 8 && rn == 13) ||
9875 (op == 13 && rn == 13))) {
9876
9877 store_sp_checked(s, tmp);
9878 } else if (rd != 15) {
9879 store_reg(s, rd, tmp);
9880 } else {
9881 tcg_temp_free_i32(tmp);
9882 }
9883 }
9884 break;
9885 case 13:
9886 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9887 if (op < 4 && (insn & 0xf000) != 0xf000)
9888 goto illegal_op;
9889 switch (op) {
9890 case 0:
9891 tmp = load_reg(s, rn);
9892 tmp2 = load_reg(s, rm);
9893 if ((insn & 0x70) != 0)
9894 goto illegal_op;
9895
9896
9897
9898
9899 op = (insn >> 21) & 3;
9900 logic_cc = (insn & (1 << 20)) != 0;
9901 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9902 if (logic_cc)
9903 gen_logic_CC(tmp);
9904 store_reg(s, rd, tmp);
9905 break;
9906 case 1:
9907 op = (insn >> 20) & 7;
9908 switch (op) {
9909 case 0:
9910 case 1:
9911 case 4:
9912 case 5:
9913 break;
9914 case 2:
9915 case 3:
9916 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9917 goto illegal_op;
9918 }
9919 break;
9920 default:
9921 goto illegal_op;
9922 }
9923 if (rn != 15) {
9924 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9925 goto illegal_op;
9926 }
9927 }
9928 tmp = load_reg(s, rm);
9929 shift = (insn >> 4) & 3;
9930
9931
9932 if (shift != 0)
9933 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9934 op = (insn >> 20) & 7;
9935 switch (op) {
9936 case 0: gen_sxth(tmp); break;
9937 case 1: gen_uxth(tmp); break;
9938 case 2: gen_sxtb16(tmp); break;
9939 case 3: gen_uxtb16(tmp); break;
9940 case 4: gen_sxtb(tmp); break;
9941 case 5: gen_uxtb(tmp); break;
9942 default:
9943 g_assert_not_reached();
9944 }
9945 if (rn != 15) {
9946 tmp2 = load_reg(s, rn);
9947 if ((op >> 1) == 1) {
9948 gen_add16(tmp, tmp2);
9949 } else {
9950 tcg_gen_add_i32(tmp, tmp, tmp2);
9951 tcg_temp_free_i32(tmp2);
9952 }
9953 }
9954 store_reg(s, rd, tmp);
9955 break;
9956 case 2:
9957 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9958 goto illegal_op;
9959 }
9960 op = (insn >> 20) & 7;
9961 shift = (insn >> 4) & 7;
9962 if ((op & 3) == 3 || (shift & 3) == 3)
9963 goto illegal_op;
9964 tmp = load_reg(s, rn);
9965 tmp2 = load_reg(s, rm);
9966 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9967 tcg_temp_free_i32(tmp2);
9968 store_reg(s, rd, tmp);
9969 break;
9970 case 3:
9971 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9972 if (op < 4) {
9973
9974 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9975 goto illegal_op;
9976 }
9977 tmp = load_reg(s, rn);
9978 tmp2 = load_reg(s, rm);
9979 if (op & 1)
9980 gen_helper_double_saturate(tmp, cpu_env, tmp);
9981 if (op & 2)
9982 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9983 else
9984 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9985 tcg_temp_free_i32(tmp2);
9986 } else {
9987 switch (op) {
9988 case 0x0a:
9989 case 0x08:
9990 case 0x09:
9991 case 0x0b:
9992 case 0x18:
9993 break;
9994 case 0x10:
9995 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9996 goto illegal_op;
9997 }
9998 break;
9999 case 0x20:
10000 case 0x21:
10001 case 0x22:
10002 case 0x28:
10003 case 0x29:
10004 case 0x2a:
10005 if (!dc_isar_feature(aa32_crc32, s)) {
10006 goto illegal_op;
10007 }
10008 break;
10009 default:
10010 goto illegal_op;
10011 }
10012 tmp = load_reg(s, rn);
10013 switch (op) {
10014 case 0x0a:
10015 gen_helper_rbit(tmp, tmp);
10016 break;
10017 case 0x08:
10018 tcg_gen_bswap32_i32(tmp, tmp);
10019 break;
10020 case 0x09:
10021 gen_rev16(tmp);
10022 break;
10023 case 0x0b:
10024 gen_revsh(tmp);
10025 break;
10026 case 0x10:
10027 tmp2 = load_reg(s, rm);
10028 tmp3 = tcg_temp_new_i32();
10029 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
10030 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
10031 tcg_temp_free_i32(tmp3);
10032 tcg_temp_free_i32(tmp2);
10033 break;
10034 case 0x18:
10035 tcg_gen_clzi_i32(tmp, tmp, 32);
10036 break;
10037 case 0x20:
10038 case 0x21:
10039 case 0x22:
10040 case 0x28:
10041 case 0x29:
10042 case 0x2a:
10043 {
10044
10045 uint32_t sz = op & 0x3;
10046 uint32_t c = op & 0x8;
10047
10048 tmp2 = load_reg(s, rm);
10049 if (sz == 0) {
10050 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10051 } else if (sz == 1) {
10052 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10053 }
10054 tmp3 = tcg_const_i32(1 << sz);
10055 if (c) {
10056 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10057 } else {
10058 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10059 }
10060 tcg_temp_free_i32(tmp2);
10061 tcg_temp_free_i32(tmp3);
10062 break;
10063 }
10064 default:
10065 g_assert_not_reached();
10066 }
10067 }
10068 store_reg(s, rd, tmp);
10069 break;
10070 case 4: case 5:
10071 switch ((insn >> 20) & 7) {
10072 case 0:
10073 case 7:
10074 break;
10075 case 1:
10076 case 2:
10077 case 3:
10078 case 4:
10079 case 5: case 6:
10080 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10081 goto illegal_op;
10082 }
10083 break;
10084 }
10085 op = (insn >> 4) & 0xf;
10086 tmp = load_reg(s, rn);
10087 tmp2 = load_reg(s, rm);
10088 switch ((insn >> 20) & 7) {
10089 case 0:
10090 tcg_gen_mul_i32(tmp, tmp, tmp2);
10091 tcg_temp_free_i32(tmp2);
10092 if (rs != 15) {
10093 tmp2 = load_reg(s, rs);
10094 if (op)
10095 tcg_gen_sub_i32(tmp, tmp2, tmp);
10096 else
10097 tcg_gen_add_i32(tmp, tmp, tmp2);
10098 tcg_temp_free_i32(tmp2);
10099 }
10100 break;
10101 case 1:
10102 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10103 tcg_temp_free_i32(tmp2);
10104 if (rs != 15) {
10105 tmp2 = load_reg(s, rs);
10106 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10107 tcg_temp_free_i32(tmp2);
10108 }
10109 break;
10110 case 2:
10111 case 4:
10112 if (op)
10113 gen_swap_half(tmp2);
10114 gen_smul_dual(tmp, tmp2);
10115 if (insn & (1 << 22)) {
10116
10117 tcg_gen_sub_i32(tmp, tmp, tmp2);
10118 } else {
10119
10120
10121
10122
10123 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10124 }
10125 tcg_temp_free_i32(tmp2);
10126 if (rs != 15)
10127 {
10128 tmp2 = load_reg(s, rs);
10129 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10130 tcg_temp_free_i32(tmp2);
10131 }
10132 break;
10133 case 3:
10134 if (op)
10135 tcg_gen_sari_i32(tmp2, tmp2, 16);
10136 else
10137 gen_sxth(tmp2);
10138 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10139 tcg_gen_shri_i64(tmp64, tmp64, 16);
10140 tmp = tcg_temp_new_i32();
10141 tcg_gen_extrl_i64_i32(tmp, tmp64);
10142 tcg_temp_free_i64(tmp64);
10143 if (rs != 15)
10144 {
10145 tmp2 = load_reg(s, rs);
10146 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10147 tcg_temp_free_i32(tmp2);
10148 }
10149 break;
10150 case 5: case 6:
10151 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10152 if (rs != 15) {
10153 tmp = load_reg(s, rs);
10154 if (insn & (1 << 20)) {
10155 tmp64 = gen_addq_msw(tmp64, tmp);
10156 } else {
10157 tmp64 = gen_subq_msw(tmp64, tmp);
10158 }
10159 }
10160 if (insn & (1 << 4)) {
10161 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10162 }
10163 tcg_gen_shri_i64(tmp64, tmp64, 32);
10164 tmp = tcg_temp_new_i32();
10165 tcg_gen_extrl_i64_i32(tmp, tmp64);
10166 tcg_temp_free_i64(tmp64);
10167 break;
10168 case 7:
10169 gen_helper_usad8(tmp, tmp, tmp2);
10170 tcg_temp_free_i32(tmp2);
10171 if (rs != 15) {
10172 tmp2 = load_reg(s, rs);
10173 tcg_gen_add_i32(tmp, tmp, tmp2);
10174 tcg_temp_free_i32(tmp2);
10175 }
10176 break;
10177 }
10178 store_reg(s, rd, tmp);
10179 break;
10180 case 6: case 7:
10181 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
10182 tmp = load_reg(s, rn);
10183 tmp2 = load_reg(s, rm);
10184 if ((op & 0x50) == 0x10) {
10185
10186 if (!dc_isar_feature(thumb_div, s)) {
10187 goto illegal_op;
10188 }
10189 if (op & 0x20)
10190 gen_helper_udiv(tmp, tmp, tmp2);
10191 else
10192 gen_helper_sdiv(tmp, tmp, tmp2);
10193 tcg_temp_free_i32(tmp2);
10194 store_reg(s, rd, tmp);
10195 } else if ((op & 0xe) == 0xc) {
10196
10197 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10198 tcg_temp_free_i32(tmp);
10199 tcg_temp_free_i32(tmp2);
10200 goto illegal_op;
10201 }
10202 if (op & 1)
10203 gen_swap_half(tmp2);
10204 gen_smul_dual(tmp, tmp2);
10205 if (op & 0x10) {
10206 tcg_gen_sub_i32(tmp, tmp, tmp2);
10207 } else {
10208 tcg_gen_add_i32(tmp, tmp, tmp2);
10209 }
10210 tcg_temp_free_i32(tmp2);
10211
10212 tmp64 = tcg_temp_new_i64();
10213 tcg_gen_ext_i32_i64(tmp64, tmp);
10214 tcg_temp_free_i32(tmp);
10215 gen_addq(s, tmp64, rs, rd);
10216 gen_storeq_reg(s, rs, rd, tmp64);
10217 tcg_temp_free_i64(tmp64);
10218 } else {
10219 if (op & 0x20) {
10220
10221 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
10222 } else {
10223 if (op & 8) {
10224
10225 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10226 tcg_temp_free_i32(tmp2);
10227 tcg_temp_free_i32(tmp);
10228 goto illegal_op;
10229 }
10230 gen_mulxy(tmp, tmp2, op & 2, op & 1);
10231 tcg_temp_free_i32(tmp2);
10232 tmp64 = tcg_temp_new_i64();
10233 tcg_gen_ext_i32_i64(tmp64, tmp);
10234 tcg_temp_free_i32(tmp);
10235 } else {
10236
10237 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10238 }
10239 }
10240 if (op & 4) {
10241
10242 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10243 tcg_temp_free_i64(tmp64);
10244 goto illegal_op;
10245 }
10246 gen_addq_lo(s, tmp64, rs);
10247 gen_addq_lo(s, tmp64, rd);
10248 } else if (op & 0x40) {
10249
10250 gen_addq(s, tmp64, rs, rd);
10251 }
10252 gen_storeq_reg(s, rs, rd, tmp64);
10253 tcg_temp_free_i64(tmp64);
10254 }
10255 break;
10256 }
10257 break;
10258 case 6: case 7: case 14: case 15:
10259
10260 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10261
10262 if (extract32(insn, 24, 2) == 3) {
10263 goto illegal_op;
10264 }
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
10276 (insn & 0xffa00f00) == 0xec200a00) {
10277
10278
10279
10280
10281 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
10282 goto illegal_op;
10283 }
10284
10285 if (arm_dc_feature(s, ARM_FEATURE_VFP)) {
10286 TCGv_i32 fptr = load_reg(s, rn);
10287
10288 if (extract32(insn, 20, 1)) {
10289 gen_helper_v7m_vlldm(cpu_env, fptr);
10290 } else {
10291 gen_helper_v7m_vlstm(cpu_env, fptr);
10292 }
10293 tcg_temp_free_i32(fptr);
10294
10295
10296 s->base.is_jmp = DISAS_UPDATE;
10297 }
10298 break;
10299 }
10300 if (arm_dc_feature(s, ARM_FEATURE_VFP) &&
10301 ((insn >> 8) & 0xe) == 10) {
10302
10303 if (disas_vfp_insn(s, insn)) {
10304 goto illegal_op;
10305 }
10306 break;
10307 }
10308
10309
10310 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10311 default_exception_el(s));
10312 break;
10313 }
10314 if ((insn & 0xfe000a00) == 0xfc000800
10315 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10316
10317 if (disas_neon_insn_3same_ext(s, insn)) {
10318 goto illegal_op;
10319 }
10320 } else if ((insn & 0xff000a00) == 0xfe000800
10321 && arm_dc_feature(s, ARM_FEATURE_V8)) {
10322
10323 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
10324 goto illegal_op;
10325 }
10326 } else if (((insn >> 24) & 3) == 3) {
10327
10328 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
10329 if (disas_neon_data_insn(s, insn)) {
10330 goto illegal_op;
10331 }
10332 } else if (((insn >> 8) & 0xe) == 10) {
10333 if (disas_vfp_insn(s, insn)) {
10334 goto illegal_op;
10335 }
10336 } else {
10337 if (insn & (1 << 28))
10338 goto illegal_op;
10339 if (disas_coproc_insn(s, insn)) {
10340 goto illegal_op;
10341 }
10342 }
10343 break;
10344 case 8: case 9: case 10: case 11:
10345 if (insn & (1 << 15)) {
10346
10347 if (insn & 0x5000) {
10348
10349
10350 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10351
10352 offset |= (insn & 0x7ff) << 1;
10353
10354
10355
10356 offset ^= ((~insn) & (1 << 13)) << 10;
10357 offset ^= ((~insn) & (1 << 11)) << 11;
10358
10359 if (insn & (1 << 14)) {
10360
10361 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
10362 }
10363
10364 offset += s->pc;
10365 if (insn & (1 << 12)) {
10366
10367 gen_jmp(s, offset);
10368 } else {
10369
10370 offset &= ~(uint32_t)2;
10371
10372 gen_bx_im(s, offset);
10373 }
10374 } else if (((insn >> 23) & 7) == 7) {
10375
10376 if (insn & (1 << 13))
10377 goto illegal_op;
10378
10379 if (insn & (1 << 26)) {
10380 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10381 goto illegal_op;
10382 }
10383 if (!(insn & (1 << 20))) {
10384
10385 int imm16 = extract32(insn, 16, 4) << 12
10386 | extract32(insn, 0, 12);
10387 ARCH(7);
10388 if (IS_USER(s)) {
10389 goto illegal_op;
10390 }
10391 gen_hvc(s, imm16);
10392 } else {
10393
10394 ARCH(6K);
10395 if (IS_USER(s)) {
10396 goto illegal_op;
10397 }
10398 gen_smc(s);
10399 }
10400 } else {
10401 op = (insn >> 20) & 7;
10402 switch (op) {
10403 case 0:
10404 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10405 tmp = load_reg(s, rn);
10406
10407 addr = tcg_const_i32(insn & 0xfff);
10408 gen_helper_v7m_msr(cpu_env, addr, tmp);
10409 tcg_temp_free_i32(addr);
10410 tcg_temp_free_i32(tmp);
10411 gen_lookup_tb(s);
10412 break;
10413 }
10414
10415 case 1:
10416 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10417 goto illegal_op;
10418 }
10419
10420 if (extract32(insn, 5, 1)) {
10421
10422 int sysm = extract32(insn, 8, 4) |
10423 (extract32(insn, 4, 1) << 4);
10424 int r = op & 1;
10425
10426 gen_msr_banked(s, r, sysm, rm);
10427 break;
10428 }
10429
10430
10431 tmp = load_reg(s, rn);
10432 if (gen_set_psr(s,
10433 msr_mask(s, (insn >> 8) & 0xf, op == 1),
10434 op == 1, tmp))
10435 goto illegal_op;
10436 break;
10437 case 2:
10438 if (((insn >> 8) & 7) == 0) {
10439 gen_nop_hint(s, insn & 0xff);
10440 }
10441
10442 if (IS_USER(s))
10443 break;
10444 offset = 0;
10445 imm = 0;
10446 if (insn & (1 << 10)) {
10447 if (insn & (1 << 7))
10448 offset |= CPSR_A;
10449 if (insn & (1 << 6))
10450 offset |= CPSR_I;
10451 if (insn & (1 << 5))
10452 offset |= CPSR_F;
10453 if (insn & (1 << 9))
10454 imm = CPSR_A | CPSR_I | CPSR_F;
10455 }
10456 if (insn & (1 << 8)) {
10457 offset |= 0x1f;
10458 imm |= (insn & 0x1f);
10459 }
10460 if (offset) {
10461 gen_set_psr_im(s, offset, 0, imm);
10462 }
10463 break;
10464 case 3:
10465 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
10466 !arm_dc_feature(s, ARM_FEATURE_M)) {
10467 goto illegal_op;
10468 }
10469 op = (insn >> 4) & 0xf;
10470 switch (op) {
10471 case 2:
10472 gen_clrex(s);
10473 break;
10474 case 4:
10475 case 5:
10476 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10477 break;
10478 case 6:
10479
10480
10481
10482
10483
10484 gen_goto_tb(s, 0, s->pc & ~1);
10485 break;
10486 case 7:
10487 if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
10488 goto illegal_op;
10489 }
10490
10491
10492
10493
10494 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
10495 gen_goto_tb(s, 0, s->pc & ~1);
10496 break;
10497 default:
10498 goto illegal_op;
10499 }
10500 break;
10501 case 4:
10502
10503
10504
10505 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10506 goto illegal_op;
10507 }
10508 tmp = load_reg(s, rn);
10509 gen_bx(s, tmp);
10510 break;
10511 case 5:
10512 if (IS_USER(s)) {
10513 goto illegal_op;
10514 }
10515 if (rn != 14 || rd != 15) {
10516 goto illegal_op;
10517 }
10518 if (s->current_el == 2) {
10519
10520 if (insn & 0xff) {
10521 goto illegal_op;
10522 }
10523 tmp = load_cpu_field(elr_el[2]);
10524 } else {
10525 tmp = load_reg(s, rn);
10526 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10527 }
10528 gen_exception_return(s, tmp);
10529 break;
10530 case 6:
10531 if (extract32(insn, 5, 1) &&
10532 !arm_dc_feature(s, ARM_FEATURE_M)) {
10533
10534 int sysm = extract32(insn, 16, 4) |
10535 (extract32(insn, 4, 1) << 4);
10536
10537 gen_mrs_banked(s, 0, sysm, rd);
10538 break;
10539 }
10540
10541 if (extract32(insn, 16, 4) != 0xf) {
10542 goto illegal_op;
10543 }
10544 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10545 extract32(insn, 0, 8) != 0) {
10546 goto illegal_op;
10547 }
10548
10549
10550 tmp = tcg_temp_new_i32();
10551 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10552 addr = tcg_const_i32(insn & 0xff);
10553 gen_helper_v7m_mrs(tmp, cpu_env, addr);
10554 tcg_temp_free_i32(addr);
10555 } else {
10556 gen_helper_cpsr_read(tmp, cpu_env);
10557 }
10558 store_reg(s, rd, tmp);
10559 break;
10560 case 7:
10561 if (extract32(insn, 5, 1) &&
10562 !arm_dc_feature(s, ARM_FEATURE_M)) {
10563
10564 int sysm = extract32(insn, 16, 4) |
10565 (extract32(insn, 4, 1) << 4);
10566
10567 gen_mrs_banked(s, 1, sysm, rd);
10568 break;
10569 }
10570
10571
10572
10573 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
10574 goto illegal_op;
10575 }
10576
10577 if (extract32(insn, 16, 4) != 0xf ||
10578 extract32(insn, 0, 8) != 0) {
10579 goto illegal_op;
10580 }
10581
10582 tmp = load_cpu_field(spsr);
10583 store_reg(s, rd, tmp);
10584 break;
10585 }
10586 }
10587 } else {
10588
10589 op = (insn >> 22) & 0xf;
10590
10591 arm_skip_unless(s, op);
10592
10593
10594 offset = (insn & 0x7ff) << 1;
10595
10596 offset |= (insn & 0x003f0000) >> 4;
10597
10598 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10599
10600 offset |= (insn & (1 << 13)) << 5;
10601
10602 offset |= (insn & (1 << 11)) << 8;
10603
10604
10605 gen_jmp(s, s->pc + offset);
10606 }
10607 } else {
10608
10609
10610
10611
10612 if (insn & (1 << 25)) {
10613
10614
10615
10616
10617 if (insn & (1 << 24)) {
10618 if (insn & (1 << 20))
10619 goto illegal_op;
10620
10621 op = (insn >> 21) & 7;
10622 imm = insn & 0x1f;
10623 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10624 if (rn == 15) {
10625 tmp = tcg_temp_new_i32();
10626 tcg_gen_movi_i32(tmp, 0);
10627 } else {
10628 tmp = load_reg(s, rn);
10629 }
10630 switch (op) {
10631 case 2:
10632 imm++;
10633 if (shift + imm > 32)
10634 goto illegal_op;
10635 if (imm < 32) {
10636 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10637 }
10638 break;
10639 case 6:
10640 imm++;
10641 if (shift + imm > 32)
10642 goto illegal_op;
10643 if (imm < 32) {
10644 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10645 }
10646 break;
10647 case 3:
10648 if (imm < shift)
10649 goto illegal_op;
10650 imm = imm + 1 - shift;
10651 if (imm != 32) {
10652 tmp2 = load_reg(s, rd);
10653 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10654 tcg_temp_free_i32(tmp2);
10655 }
10656 break;
10657 case 7:
10658 goto illegal_op;
10659 default:
10660 if (shift) {
10661 if (op & 1)
10662 tcg_gen_sari_i32(tmp, tmp, shift);
10663 else
10664 tcg_gen_shli_i32(tmp, tmp, shift);
10665 }
10666 tmp2 = tcg_const_i32(imm);
10667 if (op & 4) {
10668
10669 if ((op & 1) && shift == 0) {
10670 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10671 tcg_temp_free_i32(tmp);
10672 tcg_temp_free_i32(tmp2);
10673 goto illegal_op;
10674 }
10675 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10676 } else {
10677 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10678 }
10679 } else {
10680
10681 if ((op & 1) && shift == 0) {
10682 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10683 tcg_temp_free_i32(tmp);
10684 tcg_temp_free_i32(tmp2);
10685 goto illegal_op;
10686 }
10687 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10688 } else {
10689 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10690 }
10691 }
10692 tcg_temp_free_i32(tmp2);
10693 break;
10694 }
10695 store_reg(s, rd, tmp);
10696 } else {
10697 imm = ((insn & 0x04000000) >> 15)
10698 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10699 if (insn & (1 << 22)) {
10700
10701 imm |= (insn >> 4) & 0xf000;
10702 if (insn & (1 << 23)) {
10703
10704 tmp = load_reg(s, rd);
10705 tcg_gen_ext16u_i32(tmp, tmp);
10706 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10707 } else {
10708
10709 tmp = tcg_temp_new_i32();
10710 tcg_gen_movi_i32(tmp, imm);
10711 }
10712 store_reg(s, rd, tmp);
10713 } else {
10714
10715 if (rn == 15) {
10716 offset = s->pc & ~(uint32_t)3;
10717 if (insn & (1 << 23))
10718 offset -= imm;
10719 else
10720 offset += imm;
10721 tmp = tcg_temp_new_i32();
10722 tcg_gen_movi_i32(tmp, offset);
10723 store_reg(s, rd, tmp);
10724 } else {
10725 tmp = load_reg(s, rn);
10726 if (insn & (1 << 23))
10727 tcg_gen_subi_i32(tmp, tmp, imm);
10728 else
10729 tcg_gen_addi_i32(tmp, tmp, imm);
10730 if (rn == 13 && rd == 13) {
10731
10732 store_sp_checked(s, tmp);
10733 } else {
10734 store_reg(s, rd, tmp);
10735 }
10736 }
10737 }
10738 }
10739 } else {
10740
10741
10742
10743
10744 int shifter_out = 0;
10745
10746 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10747 imm = (insn & 0xff);
10748 switch (shift) {
10749 case 0:
10750
10751 break;
10752 case 1:
10753 imm |= imm << 16;
10754 break;
10755 case 2:
10756 imm |= imm << 16;
10757 imm <<= 8;
10758 break;
10759 case 3:
10760 imm |= imm << 16;
10761 imm |= imm << 8;
10762 break;
10763 default:
10764 shift = (shift << 1) | (imm >> 7);
10765 imm |= 0x80;
10766 imm = imm << (32 - shift);
10767 shifter_out = 1;
10768 break;
10769 }
10770 tmp2 = tcg_temp_new_i32();
10771 tcg_gen_movi_i32(tmp2, imm);
10772 rn = (insn >> 16) & 0xf;
10773 if (rn == 15) {
10774 tmp = tcg_temp_new_i32();
10775 tcg_gen_movi_i32(tmp, 0);
10776 } else {
10777 tmp = load_reg(s, rn);
10778 }
10779 op = (insn >> 21) & 0xf;
10780 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10781 shifter_out, tmp, tmp2))
10782 goto illegal_op;
10783 tcg_temp_free_i32(tmp2);
10784 rd = (insn >> 8) & 0xf;
10785 if (rd == 13 && rn == 13
10786 && (op == 8 || op == 13)) {
10787
10788 store_sp_checked(s, tmp);
10789 } else if (rd != 15) {
10790 store_reg(s, rd, tmp);
10791 } else {
10792 tcg_temp_free_i32(tmp);
10793 }
10794 }
10795 }
10796 break;
10797 case 12:
10798 {
10799 int postinc = 0;
10800 int writeback = 0;
10801 int memidx;
10802 ISSInfo issinfo;
10803
10804 if ((insn & 0x01100000) == 0x01000000) {
10805 if (disas_neon_ls_insn(s, insn)) {
10806 goto illegal_op;
10807 }
10808 break;
10809 }
10810 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10811 if (rs == 15) {
10812 if (!(insn & (1 << 20))) {
10813 goto illegal_op;
10814 }
10815 if (op != 2) {
10816
10817
10818
10819
10820
10821
10822
10823
10824
10825
10826
10827 int op1 = (insn >> 23) & 3;
10828 int op2 = (insn >> 6) & 0x3f;
10829 if (op & 2) {
10830 goto illegal_op;
10831 }
10832 if (rn == 15) {
10833
10834
10835
10836 return;
10837 }
10838 if (op1 & 1) {
10839 return;
10840 }
10841 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10842 return;
10843 }
10844
10845 goto illegal_op;
10846 }
10847 }
10848 memidx = get_mem_index(s);
10849 if (rn == 15) {
10850 addr = tcg_temp_new_i32();
10851
10852
10853 imm = s->pc & 0xfffffffc;
10854 if (insn & (1 << 23))
10855 imm += insn & 0xfff;
10856 else
10857 imm -= insn & 0xfff;
10858 tcg_gen_movi_i32(addr, imm);
10859 } else {
10860 addr = load_reg(s, rn);
10861 if (insn & (1 << 23)) {
10862
10863 imm = insn & 0xfff;
10864 tcg_gen_addi_i32(addr, addr, imm);
10865 } else {
10866 imm = insn & 0xff;
10867 switch ((insn >> 8) & 0xf) {
10868 case 0x0:
10869 shift = (insn >> 4) & 0xf;
10870 if (shift > 3) {
10871 tcg_temp_free_i32(addr);
10872 goto illegal_op;
10873 }
10874 tmp = load_reg(s, rm);
10875 if (shift)
10876 tcg_gen_shli_i32(tmp, tmp, shift);
10877 tcg_gen_add_i32(addr, addr, tmp);
10878 tcg_temp_free_i32(tmp);
10879 break;
10880 case 0xc:
10881 tcg_gen_addi_i32(addr, addr, -imm);
10882 break;
10883 case 0xe:
10884 tcg_gen_addi_i32(addr, addr, imm);
10885 memidx = get_a32_user_mem_index(s);
10886 break;
10887 case 0x9:
10888 imm = -imm;
10889
10890 case 0xb:
10891 postinc = 1;
10892 writeback = 1;
10893 break;
10894 case 0xd:
10895 imm = -imm;
10896
10897 case 0xf:
10898 writeback = 1;
10899 break;
10900 default:
10901 tcg_temp_free_i32(addr);
10902 goto illegal_op;
10903 }
10904 }
10905 }
10906
10907 issinfo = writeback ? ISSInvalid : rs;
10908
10909 if (s->v8m_stackcheck && rn == 13 && writeback) {
10910
10911
10912
10913
10914
10915
10916 if ((int32_t)imm < 0) {
10917 TCGv_i32 newsp = tcg_temp_new_i32();
10918
10919 tcg_gen_addi_i32(newsp, addr, imm);
10920 gen_helper_v8m_stackcheck(cpu_env, newsp);
10921 tcg_temp_free_i32(newsp);
10922 } else {
10923 gen_helper_v8m_stackcheck(cpu_env, addr);
10924 }
10925 }
10926
10927 if (writeback && !postinc) {
10928 tcg_gen_addi_i32(addr, addr, imm);
10929 }
10930
10931 if (insn & (1 << 20)) {
10932
10933 tmp = tcg_temp_new_i32();
10934 switch (op) {
10935 case 0:
10936 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
10937 break;
10938 case 4:
10939 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
10940 break;
10941 case 1:
10942 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
10943 break;
10944 case 5:
10945 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
10946 break;
10947 case 2:
10948 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
10949 break;
10950 default:
10951 tcg_temp_free_i32(tmp);
10952 tcg_temp_free_i32(addr);
10953 goto illegal_op;
10954 }
10955 if (rs == 15) {
10956 gen_bx_excret(s, tmp);
10957 } else {
10958 store_reg(s, rs, tmp);
10959 }
10960 } else {
10961
10962 tmp = load_reg(s, rs);
10963 switch (op) {
10964 case 0:
10965 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
10966 break;
10967 case 1:
10968 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
10969 break;
10970 case 2:
10971 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
10972 break;
10973 default:
10974 tcg_temp_free_i32(tmp);
10975 tcg_temp_free_i32(addr);
10976 goto illegal_op;
10977 }
10978 tcg_temp_free_i32(tmp);
10979 }
10980 if (postinc)
10981 tcg_gen_addi_i32(addr, addr, imm);
10982 if (writeback) {
10983 store_reg(s, rn, addr);
10984 } else {
10985 tcg_temp_free_i32(addr);
10986 }
10987 }
10988 break;
10989 default:
10990 goto illegal_op;
10991 }
10992 return;
10993illegal_op:
10994 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10995 default_exception_el(s));
10996}
10997
10998static void disas_thumb_insn(DisasContext *s, uint32_t insn)
10999{
11000 uint32_t val, op, rm, rn, rd, shift, cond;
11001 int32_t offset;
11002 int i;
11003 TCGv_i32 tmp;
11004 TCGv_i32 tmp2;
11005 TCGv_i32 addr;
11006
11007 switch (insn >> 12) {
11008 case 0: case 1:
11009
11010 rd = insn & 7;
11011 op = (insn >> 11) & 3;
11012 if (op == 3) {
11013
11014
11015
11016
11017
11018 rn = (insn >> 3) & 7;
11019 tmp = load_reg(s, rn);
11020 if (insn & (1 << 10)) {
11021
11022 tmp2 = tcg_temp_new_i32();
11023 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
11024 } else {
11025
11026 rm = (insn >> 6) & 7;
11027 tmp2 = load_reg(s, rm);
11028 }
11029 if (insn & (1 << 9)) {
11030 if (s->condexec_mask)
11031 tcg_gen_sub_i32(tmp, tmp, tmp2);
11032 else
11033 gen_sub_CC(tmp, tmp, tmp2);
11034 } else {
11035 if (s->condexec_mask)
11036 tcg_gen_add_i32(tmp, tmp, tmp2);
11037 else
11038 gen_add_CC(tmp, tmp, tmp2);
11039 }
11040 tcg_temp_free_i32(tmp2);
11041 store_reg(s, rd, tmp);
11042 } else {
11043
11044 rm = (insn >> 3) & 7;
11045 shift = (insn >> 6) & 0x1f;
11046 tmp = load_reg(s, rm);
11047 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11048 if (!s->condexec_mask)
11049 gen_logic_CC(tmp);
11050 store_reg(s, rd, tmp);
11051 }
11052 break;
11053 case 2: case 3:
11054
11055
11056
11057
11058 op = (insn >> 11) & 3;
11059 rd = (insn >> 8) & 0x7;
11060 if (op == 0) {
11061 tmp = tcg_temp_new_i32();
11062 tcg_gen_movi_i32(tmp, insn & 0xff);
11063 if (!s->condexec_mask)
11064 gen_logic_CC(tmp);
11065 store_reg(s, rd, tmp);
11066 } else {
11067 tmp = load_reg(s, rd);
11068 tmp2 = tcg_temp_new_i32();
11069 tcg_gen_movi_i32(tmp2, insn & 0xff);
11070 switch (op) {
11071 case 1:
11072 gen_sub_CC(tmp, tmp, tmp2);
11073 tcg_temp_free_i32(tmp);
11074 tcg_temp_free_i32(tmp2);
11075 break;
11076 case 2:
11077 if (s->condexec_mask)
11078 tcg_gen_add_i32(tmp, tmp, tmp2);
11079 else
11080 gen_add_CC(tmp, tmp, tmp2);
11081 tcg_temp_free_i32(tmp2);
11082 store_reg(s, rd, tmp);
11083 break;
11084 case 3:
11085 if (s->condexec_mask)
11086 tcg_gen_sub_i32(tmp, tmp, tmp2);
11087 else
11088 gen_sub_CC(tmp, tmp, tmp2);
11089 tcg_temp_free_i32(tmp2);
11090 store_reg(s, rd, tmp);
11091 break;
11092 }
11093 }
11094 break;
11095 case 4:
11096 if (insn & (1 << 11)) {
11097 rd = (insn >> 8) & 7;
11098
11099 val = s->pc + 2 + ((insn & 0xff) * 4);
11100 val &= ~(uint32_t)2;
11101 addr = tcg_temp_new_i32();
11102 tcg_gen_movi_i32(addr, val);
11103 tmp = tcg_temp_new_i32();
11104 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11105 rd | ISSIs16Bit);
11106 tcg_temp_free_i32(addr);
11107 store_reg(s, rd, tmp);
11108 break;
11109 }
11110 if (insn & (1 << 10)) {
11111
11112
11113
11114 rd = (insn & 7) | ((insn >> 4) & 8);
11115 rm = (insn >> 3) & 0xf;
11116 op = (insn >> 8) & 3;
11117 switch (op) {
11118 case 0:
11119 tmp = load_reg(s, rd);
11120 tmp2 = load_reg(s, rm);
11121 tcg_gen_add_i32(tmp, tmp, tmp2);
11122 tcg_temp_free_i32(tmp2);
11123 if (rd == 13) {
11124
11125 store_sp_checked(s, tmp);
11126 } else {
11127 store_reg(s, rd, tmp);
11128 }
11129 break;
11130 case 1:
11131 tmp = load_reg(s, rd);
11132 tmp2 = load_reg(s, rm);
11133 gen_sub_CC(tmp, tmp, tmp2);
11134 tcg_temp_free_i32(tmp2);
11135 tcg_temp_free_i32(tmp);
11136 break;
11137 case 2:
11138 tmp = load_reg(s, rm);
11139 if (rd == 13) {
11140
11141 store_sp_checked(s, tmp);
11142 } else {
11143 store_reg(s, rd, tmp);
11144 }
11145 break;
11146 case 3:
11147 {
11148
11149
11150
11151 bool link = insn & (1 << 7);
11152
11153 if (insn & 3) {
11154 goto undef;
11155 }
11156 if (link) {
11157 ARCH(5);
11158 }
11159 if ((insn & 4)) {
11160
11161
11162
11163
11164
11165
11166 if (!s->v8m_secure || IS_USER_ONLY) {
11167 goto undef;
11168 }
11169 if (link) {
11170 gen_blxns(s, rm);
11171 } else {
11172 gen_bxns(s, rm);
11173 }
11174 break;
11175 }
11176
11177 tmp = load_reg(s, rm);
11178 if (link) {
11179 val = (uint32_t)s->pc | 1;
11180 tmp2 = tcg_temp_new_i32();
11181 tcg_gen_movi_i32(tmp2, val);
11182 store_reg(s, 14, tmp2);
11183 gen_bx(s, tmp);
11184 } else {
11185
11186 gen_bx_excret(s, tmp);
11187 }
11188 break;
11189 }
11190 }
11191 break;
11192 }
11193
11194
11195
11196
11197
11198 rd = insn & 7;
11199 rm = (insn >> 3) & 7;
11200 op = (insn >> 6) & 0xf;
11201 if (op == 2 || op == 3 || op == 4 || op == 7) {
11202
11203 val = rm;
11204 rm = rd;
11205 rd = val;
11206 val = 1;
11207 } else {
11208 val = 0;
11209 }
11210
11211 if (op == 9) {
11212 tmp = tcg_temp_new_i32();
11213 tcg_gen_movi_i32(tmp, 0);
11214 } else if (op != 0xf) {
11215 tmp = load_reg(s, rd);
11216 } else {
11217 tmp = NULL;
11218 }
11219
11220 tmp2 = load_reg(s, rm);
11221 switch (op) {
11222 case 0x0:
11223 tcg_gen_and_i32(tmp, tmp, tmp2);
11224 if (!s->condexec_mask)
11225 gen_logic_CC(tmp);
11226 break;
11227 case 0x1:
11228 tcg_gen_xor_i32(tmp, tmp, tmp2);
11229 if (!s->condexec_mask)
11230 gen_logic_CC(tmp);
11231 break;
11232 case 0x2:
11233 if (s->condexec_mask) {
11234 gen_shl(tmp2, tmp2, tmp);
11235 } else {
11236 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
11237 gen_logic_CC(tmp2);
11238 }
11239 break;
11240 case 0x3:
11241 if (s->condexec_mask) {
11242 gen_shr(tmp2, tmp2, tmp);
11243 } else {
11244 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
11245 gen_logic_CC(tmp2);
11246 }
11247 break;
11248 case 0x4:
11249 if (s->condexec_mask) {
11250 gen_sar(tmp2, tmp2, tmp);
11251 } else {
11252 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
11253 gen_logic_CC(tmp2);
11254 }
11255 break;
11256 case 0x5:
11257 if (s->condexec_mask) {
11258 gen_adc(tmp, tmp2);
11259 } else {
11260 gen_adc_CC(tmp, tmp, tmp2);
11261 }
11262 break;
11263 case 0x6:
11264 if (s->condexec_mask) {
11265 gen_sub_carry(tmp, tmp, tmp2);
11266 } else {
11267 gen_sbc_CC(tmp, tmp, tmp2);
11268 }
11269 break;
11270 case 0x7:
11271 if (s->condexec_mask) {
11272 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11273 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
11274 } else {
11275 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
11276 gen_logic_CC(tmp2);
11277 }
11278 break;
11279 case 0x8:
11280 tcg_gen_and_i32(tmp, tmp, tmp2);
11281 gen_logic_CC(tmp);
11282 rd = 16;
11283 break;
11284 case 0x9:
11285 if (s->condexec_mask)
11286 tcg_gen_neg_i32(tmp, tmp2);
11287 else
11288 gen_sub_CC(tmp, tmp, tmp2);
11289 break;
11290 case 0xa:
11291 gen_sub_CC(tmp, tmp, tmp2);
11292 rd = 16;
11293 break;
11294 case 0xb:
11295 gen_add_CC(tmp, tmp, tmp2);
11296 rd = 16;
11297 break;
11298 case 0xc:
11299 tcg_gen_or_i32(tmp, tmp, tmp2);
11300 if (!s->condexec_mask)
11301 gen_logic_CC(tmp);
11302 break;
11303 case 0xd:
11304 tcg_gen_mul_i32(tmp, tmp, tmp2);
11305 if (!s->condexec_mask)
11306 gen_logic_CC(tmp);
11307 break;
11308 case 0xe:
11309 tcg_gen_andc_i32(tmp, tmp, tmp2);
11310 if (!s->condexec_mask)
11311 gen_logic_CC(tmp);
11312 break;
11313 case 0xf:
11314 tcg_gen_not_i32(tmp2, tmp2);
11315 if (!s->condexec_mask)
11316 gen_logic_CC(tmp2);
11317 val = 1;
11318 rm = rd;
11319 break;
11320 }
11321 if (rd != 16) {
11322 if (val) {
11323 store_reg(s, rm, tmp2);
11324 if (op != 0xf)
11325 tcg_temp_free_i32(tmp);
11326 } else {
11327 store_reg(s, rd, tmp);
11328 tcg_temp_free_i32(tmp2);
11329 }
11330 } else {
11331 tcg_temp_free_i32(tmp);
11332 tcg_temp_free_i32(tmp2);
11333 }
11334 break;
11335
11336 case 5:
11337
11338 rd = insn & 7;
11339 rn = (insn >> 3) & 7;
11340 rm = (insn >> 6) & 7;
11341 op = (insn >> 9) & 7;
11342 addr = load_reg(s, rn);
11343 tmp = load_reg(s, rm);
11344 tcg_gen_add_i32(addr, addr, tmp);
11345 tcg_temp_free_i32(tmp);
11346
11347 if (op < 3) {
11348 tmp = load_reg(s, rd);
11349 } else {
11350 tmp = tcg_temp_new_i32();
11351 }
11352
11353 switch (op) {
11354 case 0:
11355 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11356 break;
11357 case 1:
11358 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11359 break;
11360 case 2:
11361 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11362 break;
11363 case 3:
11364 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11365 break;
11366 case 4:
11367 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11368 break;
11369 case 5:
11370 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11371 break;
11372 case 6:
11373 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11374 break;
11375 case 7:
11376 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11377 break;
11378 }
11379 if (op >= 3) {
11380 store_reg(s, rd, tmp);
11381 } else {
11382 tcg_temp_free_i32(tmp);
11383 }
11384 tcg_temp_free_i32(addr);
11385 break;
11386
11387 case 6:
11388
11389 rd = insn & 7;
11390 rn = (insn >> 3) & 7;
11391 addr = load_reg(s, rn);
11392 val = (insn >> 4) & 0x7c;
11393 tcg_gen_addi_i32(addr, addr, val);
11394
11395 if (insn & (1 << 11)) {
11396
11397 tmp = tcg_temp_new_i32();
11398 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11399 store_reg(s, rd, tmp);
11400 } else {
11401
11402 tmp = load_reg(s, rd);
11403 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11404 tcg_temp_free_i32(tmp);
11405 }
11406 tcg_temp_free_i32(addr);
11407 break;
11408
11409 case 7:
11410
11411 rd = insn & 7;
11412 rn = (insn >> 3) & 7;
11413 addr = load_reg(s, rn);
11414 val = (insn >> 6) & 0x1f;
11415 tcg_gen_addi_i32(addr, addr, val);
11416
11417 if (insn & (1 << 11)) {
11418
11419 tmp = tcg_temp_new_i32();
11420 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11421 store_reg(s, rd, tmp);
11422 } else {
11423
11424 tmp = load_reg(s, rd);
11425 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11426 tcg_temp_free_i32(tmp);
11427 }
11428 tcg_temp_free_i32(addr);
11429 break;
11430
11431 case 8:
11432
11433 rd = insn & 7;
11434 rn = (insn >> 3) & 7;
11435 addr = load_reg(s, rn);
11436 val = (insn >> 5) & 0x3e;
11437 tcg_gen_addi_i32(addr, addr, val);
11438
11439 if (insn & (1 << 11)) {
11440
11441 tmp = tcg_temp_new_i32();
11442 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11443 store_reg(s, rd, tmp);
11444 } else {
11445
11446 tmp = load_reg(s, rd);
11447 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11448 tcg_temp_free_i32(tmp);
11449 }
11450 tcg_temp_free_i32(addr);
11451 break;
11452
11453 case 9:
11454
11455 rd = (insn >> 8) & 7;
11456 addr = load_reg(s, 13);
11457 val = (insn & 0xff) * 4;
11458 tcg_gen_addi_i32(addr, addr, val);
11459
11460 if (insn & (1 << 11)) {
11461
11462 tmp = tcg_temp_new_i32();
11463 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11464 store_reg(s, rd, tmp);
11465 } else {
11466
11467 tmp = load_reg(s, rd);
11468 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
11469 tcg_temp_free_i32(tmp);
11470 }
11471 tcg_temp_free_i32(addr);
11472 break;
11473
11474 case 10:
11475
11476
11477
11478
11479 rd = (insn >> 8) & 7;
11480 if (insn & (1 << 11)) {
11481
11482 tmp = load_reg(s, 13);
11483 } else {
11484
11485 tmp = tcg_temp_new_i32();
11486 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
11487 }
11488 val = (insn & 0xff) * 4;
11489 tcg_gen_addi_i32(tmp, tmp, val);
11490 store_reg(s, rd, tmp);
11491 break;
11492
11493 case 11:
11494
11495 op = (insn >> 8) & 0xf;
11496 switch (op) {
11497 case 0:
11498
11499
11500
11501
11502
11503 tmp = load_reg(s, 13);
11504 val = (insn & 0x7f) * 4;
11505 if (insn & (1 << 7))
11506 val = -(int32_t)val;
11507 tcg_gen_addi_i32(tmp, tmp, val);
11508 store_sp_checked(s, tmp);
11509 break;
11510
11511 case 2:
11512 ARCH(6);
11513 rd = insn & 7;
11514 rm = (insn >> 3) & 7;
11515 tmp = load_reg(s, rm);
11516 switch ((insn >> 6) & 3) {
11517 case 0: gen_sxth(tmp); break;
11518 case 1: gen_sxtb(tmp); break;
11519 case 2: gen_uxth(tmp); break;
11520 case 3: gen_uxtb(tmp); break;
11521 }
11522 store_reg(s, rd, tmp);
11523 break;
11524 case 4: case 5: case 0xc: case 0xd:
11525
11526
11527
11528
11529 addr = load_reg(s, 13);
11530 if (insn & (1 << 8))
11531 offset = 4;
11532 else
11533 offset = 0;
11534 for (i = 0; i < 8; i++) {
11535 if (insn & (1 << i))
11536 offset += 4;
11537 }
11538 if ((insn & (1 << 11)) == 0) {
11539 tcg_gen_addi_i32(addr, addr, -offset);
11540 }
11541
11542 if (s->v8m_stackcheck) {
11543
11544
11545
11546
11547
11548
11549 gen_helper_v8m_stackcheck(cpu_env, addr);
11550 }
11551
11552 for (i = 0; i < 8; i++) {
11553 if (insn & (1 << i)) {
11554 if (insn & (1 << 11)) {
11555
11556 tmp = tcg_temp_new_i32();
11557 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11558 store_reg(s, i, tmp);
11559 } else {
11560
11561 tmp = load_reg(s, i);
11562 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11563 tcg_temp_free_i32(tmp);
11564 }
11565
11566 tcg_gen_addi_i32(addr, addr, 4);
11567 }
11568 }
11569 tmp = NULL;
11570 if (insn & (1 << 8)) {
11571 if (insn & (1 << 11)) {
11572
11573 tmp = tcg_temp_new_i32();
11574 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11575
11576
11577 } else {
11578
11579 tmp = load_reg(s, 14);
11580 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11581 tcg_temp_free_i32(tmp);
11582 }
11583 tcg_gen_addi_i32(addr, addr, 4);
11584 }
11585 if ((insn & (1 << 11)) == 0) {
11586 tcg_gen_addi_i32(addr, addr, -offset);
11587 }
11588
11589 store_reg(s, 13, addr);
11590
11591 if ((insn & 0x0900) == 0x0900) {
11592 store_reg_from_load(s, 15, tmp);
11593 }
11594 break;
11595
11596 case 1: case 3: case 9: case 11:
11597 rm = insn & 7;
11598 tmp = load_reg(s, rm);
11599 arm_gen_condlabel(s);
11600 if (insn & (1 << 11))
11601 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
11602 else
11603 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
11604 tcg_temp_free_i32(tmp);
11605 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11606 val = (uint32_t)s->pc + 2;
11607 val += offset;
11608 gen_jmp(s, val);
11609 break;
11610
11611 case 15:
11612 if ((insn & 0xf) == 0) {
11613 gen_nop_hint(s, (insn >> 4) & 0xf);
11614 break;
11615 }
11616
11617
11618
11619
11620
11621
11622
11623
11624 s->condexec_cond = (insn >> 4) & 0xe;
11625 s->condexec_mask = insn & 0x1f;
11626
11627 break;
11628
11629 case 0xe:
11630 {
11631 int imm8 = extract32(insn, 0, 8);
11632 ARCH(5);
11633 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
11634 break;
11635 }
11636
11637 case 0xa:
11638 {
11639 int op1 = extract32(insn, 6, 2);
11640
11641 if (op1 == 2) {
11642
11643 int imm6 = extract32(insn, 0, 6);
11644
11645 gen_hlt(s, imm6);
11646 break;
11647 }
11648
11649
11650 ARCH(6);
11651 rn = (insn >> 3) & 0x7;
11652 rd = insn & 0x7;
11653 tmp = load_reg(s, rn);
11654 switch (op1) {
11655 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
11656 case 1: gen_rev16(tmp); break;
11657 case 3: gen_revsh(tmp); break;
11658 default:
11659 g_assert_not_reached();
11660 }
11661 store_reg(s, rd, tmp);
11662 break;
11663 }
11664
11665 case 6:
11666 switch ((insn >> 5) & 7) {
11667 case 2:
11668
11669 ARCH(6);
11670 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11671 gen_helper_setend(cpu_env);
11672 s->base.is_jmp = DISAS_UPDATE;
11673 }
11674 break;
11675 case 3:
11676
11677 ARCH(6);
11678 if (IS_USER(s)) {
11679 break;
11680 }
11681 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11682 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11683
11684 if (insn & 1) {
11685 addr = tcg_const_i32(19);
11686 gen_helper_v7m_msr(cpu_env, addr, tmp);
11687 tcg_temp_free_i32(addr);
11688 }
11689
11690 if (insn & 2) {
11691 addr = tcg_const_i32(16);
11692 gen_helper_v7m_msr(cpu_env, addr, tmp);
11693 tcg_temp_free_i32(addr);
11694 }
11695 tcg_temp_free_i32(tmp);
11696 gen_lookup_tb(s);
11697 } else {
11698 if (insn & (1 << 4)) {
11699 shift = CPSR_A | CPSR_I | CPSR_F;
11700 } else {
11701 shift = 0;
11702 }
11703 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
11704 }
11705 break;
11706 default:
11707 goto undef;
11708 }
11709 break;
11710
11711 default:
11712 goto undef;
11713 }
11714 break;
11715
11716 case 12:
11717 {
11718
11719 TCGv_i32 loaded_var = NULL;
11720 rn = (insn >> 8) & 0x7;
11721 addr = load_reg(s, rn);
11722 for (i = 0; i < 8; i++) {
11723 if (insn & (1 << i)) {
11724 if (insn & (1 << 11)) {
11725
11726 tmp = tcg_temp_new_i32();
11727 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
11728 if (i == rn) {
11729 loaded_var = tmp;
11730 } else {
11731 store_reg(s, i, tmp);
11732 }
11733 } else {
11734
11735 tmp = load_reg(s, i);
11736 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
11737 tcg_temp_free_i32(tmp);
11738 }
11739
11740 tcg_gen_addi_i32(addr, addr, 4);
11741 }
11742 }
11743 if ((insn & (1 << rn)) == 0) {
11744
11745 store_reg(s, rn, addr);
11746 } else {
11747
11748 if (insn & (1 << 11)) {
11749 store_reg(s, rn, loaded_var);
11750 }
11751 tcg_temp_free_i32(addr);
11752 }
11753 break;
11754 }
11755 case 13:
11756
11757 cond = (insn >> 8) & 0xf;
11758 if (cond == 0xe)
11759 goto undef;
11760
11761 if (cond == 0xf) {
11762
11763 gen_set_pc_im(s, s->pc);
11764 s->svc_imm = extract32(insn, 0, 8);
11765 s->base.is_jmp = DISAS_SWI;
11766 break;
11767 }
11768
11769 arm_skip_unless(s, cond);
11770
11771
11772 val = (uint32_t)s->pc + 2;
11773 offset = ((int32_t)insn << 24) >> 24;
11774 val += offset << 1;
11775 gen_jmp(s, val);
11776 break;
11777
11778 case 14:
11779 if (insn & (1 << 11)) {
11780
11781
11782
11783
11784 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11785 ARCH(5);
11786 offset = ((insn & 0x7ff) << 1);
11787 tmp = load_reg(s, 14);
11788 tcg_gen_addi_i32(tmp, tmp, offset);
11789 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11790
11791 tmp2 = tcg_temp_new_i32();
11792 tcg_gen_movi_i32(tmp2, s->pc | 1);
11793 store_reg(s, 14, tmp2);
11794 gen_bx(s, tmp);
11795 break;
11796 }
11797
11798 val = (uint32_t)s->pc;
11799 offset = ((int32_t)insn << 21) >> 21;
11800 val += (offset << 1) + 2;
11801 gen_jmp(s, val);
11802 break;
11803
11804 case 15:
11805
11806
11807
11808 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11809
11810 if (insn & (1 << 11)) {
11811
11812 offset = ((insn & 0x7ff) << 1) | 1;
11813 tmp = load_reg(s, 14);
11814 tcg_gen_addi_i32(tmp, tmp, offset);
11815
11816 tmp2 = tcg_temp_new_i32();
11817 tcg_gen_movi_i32(tmp2, s->pc | 1);
11818 store_reg(s, 14, tmp2);
11819 gen_bx(s, tmp);
11820 } else {
11821
11822 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11823
11824 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
11825 }
11826 break;
11827 }
11828 return;
11829illegal_op:
11830undef:
11831 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11832 default_exception_el(s));
11833}
11834
11835static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11836{
11837
11838
11839
11840
11841
11842
11843
11844 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
11845
11846 return !thumb_insn_is_16bit(s, insn);
11847}
11848
11849static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
11850{
11851 DisasContext *dc = container_of(dcbase, DisasContext, base);
11852 CPUARMState *env = cs->env_ptr;
11853 ARMCPU *cpu = env_archcpu(env);
11854 uint32_t tb_flags = dc->base.tb->flags;
11855 uint32_t condexec, core_mmu_idx;
11856
11857 dc->isar = &cpu->isar;
11858 dc->pc = dc->base.pc_first;
11859 dc->condjmp = 0;
11860
11861 dc->aarch64 = 0;
11862
11863
11864
11865 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11866 !arm_el_is_aa64(env, 3);
11867 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
11868 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
11869 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
11870 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
11871 dc->condexec_mask = (condexec & 0xf) << 1;
11872 dc->condexec_cond = condexec >> 4;
11873 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
11874 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
11875 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11876#if !defined(CONFIG_USER_ONLY)
11877 dc->user = (dc->current_el == 0);
11878#endif
11879 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
11880 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
11881 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
11882 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
11883 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11884 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
11885 dc->vec_stride = 0;
11886 } else {
11887 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
11888 dc->c15_cpar = 0;
11889 }
11890 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
11891 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11892 regime_is_secure(env, dc->mmu_idx);
11893 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
11894 dc->v8m_fpccr_s_wrong = FIELD_EX32(tb_flags, TBFLAG_A32, FPCCR_S_WRONG);
11895 dc->v7m_new_fp_ctxt_needed =
11896 FIELD_EX32(tb_flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED);
11897 dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_A32, LSPACT);
11898 dc->cp_regs = cpu->cp_regs;
11899 dc->features = env->features;
11900
11901
11902
11903
11904
11905
11906
11907
11908
11909
11910
11911
11912
11913
11914
11915
11916 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
11917 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
11918 dc->is_ldex = false;
11919 dc->ss_same_el = false;
11920
11921 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
11922
11923
11924 if (is_singlestepping(dc)) {
11925 dc->base.max_insns = 1;
11926 }
11927
11928
11929
11930 if (!dc->thumb) {
11931 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
11932 dc->base.max_insns = MIN(dc->base.max_insns, bound);
11933 }
11934
11935 cpu_V0 = tcg_temp_new_i64();
11936 cpu_V1 = tcg_temp_new_i64();
11937
11938 cpu_M0 = tcg_temp_new_i64();
11939}
11940
11941static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
11942{
11943 DisasContext *dc = container_of(dcbase, DisasContext, base);
11944
11945
11946
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977 if (dc->condexec_mask || dc->condexec_cond) {
11978 TCGv_i32 tmp = tcg_temp_new_i32();
11979 tcg_gen_movi_i32(tmp, 0);
11980 store_cpu_field(tmp, condexec_bits);
11981 }
11982}
11983
11984static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
11985{
11986 DisasContext *dc = container_of(dcbase, DisasContext, base);
11987
11988 tcg_gen_insn_start(dc->pc,
11989 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11990 0);
11991 dc->insn_start = tcg_last_op();
11992}
11993
11994static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
11995 const CPUBreakpoint *bp)
11996{
11997 DisasContext *dc = container_of(dcbase, DisasContext, base);
11998
11999 if (bp->flags & BP_CPU) {
12000 gen_set_condexec(dc);
12001 gen_set_pc_im(dc, dc->pc);
12002 gen_helper_check_breakpoints(cpu_env);
12003
12004 dc->base.is_jmp = DISAS_TOO_MANY;
12005 } else {
12006 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12007
12008
12009
12010
12011
12012
12013
12014 dc->pc += 2;
12015 dc->base.is_jmp = DISAS_NORETURN;
12016 }
12017
12018 return true;
12019}
12020
12021static bool arm_pre_translate_insn(DisasContext *dc)
12022{
12023#ifdef CONFIG_USER_ONLY
12024
12025 if (dc->pc >= 0xffff0000) {
12026
12027
12028 gen_exception_internal(EXCP_KERNEL_TRAP);
12029 dc->base.is_jmp = DISAS_NORETURN;
12030 return true;
12031 }
12032#endif
12033
12034 if (dc->ss_active && !dc->pstate_ss) {
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045 assert(dc->base.num_insns == 1);
12046 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12047 default_exception_el(dc));
12048 dc->base.is_jmp = DISAS_NORETURN;
12049 return true;
12050 }
12051
12052 return false;
12053}
12054
12055static void arm_post_translate_insn(DisasContext *dc)
12056{
12057 if (dc->condjmp && !dc->base.is_jmp) {
12058 gen_set_label(dc->condlabel);
12059 dc->condjmp = 0;
12060 }
12061 dc->base.pc_next = dc->pc;
12062 translator_loop_temp_check(&dc->base);
12063}
12064
12065static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12066{
12067 DisasContext *dc = container_of(dcbase, DisasContext, base);
12068 CPUARMState *env = cpu->env_ptr;
12069 unsigned int insn;
12070
12071 if (arm_pre_translate_insn(dc)) {
12072 return;
12073 }
12074
12075 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
12076 dc->insn = insn;
12077 dc->pc += 4;
12078 disas_arm_insn(dc, insn);
12079
12080 arm_post_translate_insn(dc);
12081
12082
12083
12084}
12085
12086static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12087{
12088
12089
12090
12091
12092
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102 if ((insn & 0xffffff00) == 0xbe00) {
12103
12104 return true;
12105 }
12106
12107 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12108 !arm_dc_feature(s, ARM_FEATURE_M)) {
12109
12110
12111
12112
12113
12114
12115
12116
12117
12118
12119
12120 return true;
12121 }
12122
12123 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12124 arm_dc_feature(s, ARM_FEATURE_M)) {
12125
12126 return true;
12127 }
12128
12129 return false;
12130}
12131
12132static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12133{
12134 DisasContext *dc = container_of(dcbase, DisasContext, base);
12135 CPUARMState *env = cpu->env_ptr;
12136 uint32_t insn;
12137 bool is_16bit;
12138
12139 if (arm_pre_translate_insn(dc)) {
12140 return;
12141 }
12142
12143 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12144 is_16bit = thumb_insn_is_16bit(dc, insn);
12145 dc->pc += 2;
12146 if (!is_16bit) {
12147 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12148
12149 insn = insn << 16 | insn2;
12150 dc->pc += 2;
12151 }
12152 dc->insn = insn;
12153
12154 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
12155 uint32_t cond = dc->condexec_cond;
12156
12157
12158
12159
12160
12161 if (cond < 0x0e) {
12162 arm_skip_unless(dc, cond);
12163 }
12164 }
12165
12166 if (is_16bit) {
12167 disas_thumb_insn(dc, insn);
12168 } else {
12169 disas_thumb2_insn(dc, insn);
12170 }
12171
12172
12173 if (dc->condexec_mask) {
12174 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12175 ((dc->condexec_mask >> 4) & 1));
12176 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12177 if (dc->condexec_mask == 0) {
12178 dc->condexec_cond = 0;
12179 }
12180 }
12181
12182 arm_post_translate_insn(dc);
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197 if (dc->base.is_jmp == DISAS_NEXT
12198 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
12199 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
12200 && insn_crosses_page(env, dc)))) {
12201 dc->base.is_jmp = DISAS_TOO_MANY;
12202 }
12203}
12204
12205static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
12206{
12207 DisasContext *dc = container_of(dcbase, DisasContext, base);
12208
12209 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
12210
12211 cpu_abort(cpu, "IO on conditional branch instruction");
12212 }
12213
12214
12215
12216
12217 gen_set_condexec(dc);
12218 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
12219
12220
12221
12222
12223
12224 gen_bx_excret_final_code(dc);
12225 } else if (unlikely(is_singlestepping(dc))) {
12226
12227 switch (dc->base.is_jmp) {
12228 case DISAS_SWI:
12229 gen_ss_advance(dc);
12230 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12231 default_exception_el(dc));
12232 break;
12233 case DISAS_HVC:
12234 gen_ss_advance(dc);
12235 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12236 break;
12237 case DISAS_SMC:
12238 gen_ss_advance(dc);
12239 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12240 break;
12241 case DISAS_NEXT:
12242 case DISAS_TOO_MANY:
12243 case DISAS_UPDATE:
12244 gen_set_pc_im(dc, dc->pc);
12245
12246 default:
12247
12248 gen_singlestep_exception(dc);
12249 break;
12250 case DISAS_NORETURN:
12251 break;
12252 }
12253 } else {
12254
12255
12256
12257
12258
12259
12260
12261
12262 switch(dc->base.is_jmp) {
12263 case DISAS_NEXT:
12264 case DISAS_TOO_MANY:
12265 gen_goto_tb(dc, 1, dc->pc);
12266 break;
12267 case DISAS_JUMP:
12268 gen_goto_ptr();
12269 break;
12270 case DISAS_UPDATE:
12271 gen_set_pc_im(dc, dc->pc);
12272
12273 default:
12274
12275 tcg_gen_exit_tb(NULL, 0);
12276 break;
12277 case DISAS_NORETURN:
12278
12279 break;
12280 case DISAS_WFI:
12281 {
12282 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12283 !(dc->insn & (1U << 31))) ? 2 : 4);
12284
12285 gen_helper_wfi(cpu_env, tmp);
12286 tcg_temp_free_i32(tmp);
12287
12288
12289
12290 tcg_gen_exit_tb(NULL, 0);
12291 break;
12292 }
12293 case DISAS_WFE:
12294 gen_helper_wfe(cpu_env);
12295 break;
12296 case DISAS_YIELD:
12297 gen_helper_yield(cpu_env);
12298 break;
12299 case DISAS_SWI:
12300 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12301 default_exception_el(dc));
12302 break;
12303 case DISAS_HVC:
12304 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
12305 break;
12306 case DISAS_SMC:
12307 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
12308 break;
12309 }
12310 }
12311
12312 if (dc->condjmp) {
12313
12314 gen_set_label(dc->condlabel);
12315 gen_set_condexec(dc);
12316 if (unlikely(is_singlestepping(dc))) {
12317 gen_set_pc_im(dc, dc->pc);
12318 gen_singlestep_exception(dc);
12319 } else {
12320 gen_goto_tb(dc, 1, dc->pc);
12321 }
12322 }
12323
12324
12325 dc->base.pc_next = dc->pc;
12326}
12327
12328static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12329{
12330 DisasContext *dc = container_of(dcbase, DisasContext, base);
12331
12332 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
12333 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
12334}
12335
12336static const TranslatorOps arm_translator_ops = {
12337 .init_disas_context = arm_tr_init_disas_context,
12338 .tb_start = arm_tr_tb_start,
12339 .insn_start = arm_tr_insn_start,
12340 .breakpoint_check = arm_tr_breakpoint_check,
12341 .translate_insn = arm_tr_translate_insn,
12342 .tb_stop = arm_tr_tb_stop,
12343 .disas_log = arm_tr_disas_log,
12344};
12345
12346static const TranslatorOps thumb_translator_ops = {
12347 .init_disas_context = arm_tr_init_disas_context,
12348 .tb_start = arm_tr_tb_start,
12349 .insn_start = arm_tr_insn_start,
12350 .breakpoint_check = arm_tr_breakpoint_check,
12351 .translate_insn = thumb_tr_translate_insn,
12352 .tb_stop = arm_tr_tb_stop,
12353 .disas_log = arm_tr_disas_log,
12354};
12355
12356
12357void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
12358{
12359 DisasContext dc;
12360 const TranslatorOps *ops = &arm_translator_ops;
12361
12362 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
12363 ops = &thumb_translator_ops;
12364 }
12365#ifdef TARGET_AARCH64
12366 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
12367 ops = &aarch64_translator_ops;
12368 }
12369#endif
12370
12371 translator_loop(ops, &dc.base, cpu, tb, max_insns);
12372}
12373
12374void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12375 target_ulong *data)
12376{
12377 if (is_a64(env)) {
12378 env->pc = data[0];
12379 env->condexec_bits = 0;
12380 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12381 } else {
12382 env->regs[15] = data[0];
12383 env->condexec_bits = data[1];
12384 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
12385 }
12386}
12387