1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define DEBUG_DISAS
21
22#include "qemu/osdep.h"
23#include "cpu.h"
24#include "disas/disas.h"
25#include "exec/exec-all.h"
26#include "tcg/tcg-op.h"
27#include "exec/cpu_ldst.h"
28#include "exec/helper-proto.h"
29#include "exec/helper-gen.h"
30#include "exec/translator.h"
31#include "trace-tcg.h"
32#include "exec/log.h"
33#include "qemu/qemu-print.h"
34
35
36typedef struct DisasContext {
37 DisasContextBase base;
38
39 uint32_t tbflags;
40 uint32_t envflags;
41 int memidx;
42 int gbank;
43 int fbank;
44 uint32_t delayed_pc;
45 uint32_t features;
46
47 uint16_t opcode;
48
49 bool has_movcal;
50} DisasContext;
51
52#if defined(CONFIG_USER_ONLY)
53#define IS_USER(ctx) 1
54#else
55#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
56#endif
57
58
59
60
61#define DISAS_STOP DISAS_TARGET_0
62
63
64static TCGv cpu_gregs[32];
65static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
66static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
67static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
68static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
69static TCGv cpu_lock_addr, cpu_lock_value;
70static TCGv cpu_fregs[32];
71
72
73static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
74
75#include "exec/gen-icount.h"
76
77void sh4_translate_init(void)
78{
79 int i;
80 static const char * const gregnames[24] = {
81 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86 };
87 static const char * const fregnames[32] = {
88 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
89 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
90 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
93 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
94 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96 };
97
98 for (i = 0; i < 24; i++) {
99 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
100 offsetof(CPUSH4State, gregs[i]),
101 gregnames[i]);
102 }
103 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
104
105 cpu_pc = tcg_global_mem_new_i32(cpu_env,
106 offsetof(CPUSH4State, pc), "PC");
107 cpu_sr = tcg_global_mem_new_i32(cpu_env,
108 offsetof(CPUSH4State, sr), "SR");
109 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
110 offsetof(CPUSH4State, sr_m), "SR_M");
111 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
112 offsetof(CPUSH4State, sr_q), "SR_Q");
113 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
114 offsetof(CPUSH4State, sr_t), "SR_T");
115 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
116 offsetof(CPUSH4State, ssr), "SSR");
117 cpu_spc = tcg_global_mem_new_i32(cpu_env,
118 offsetof(CPUSH4State, spc), "SPC");
119 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
120 offsetof(CPUSH4State, gbr), "GBR");
121 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
122 offsetof(CPUSH4State, vbr), "VBR");
123 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
124 offsetof(CPUSH4State, sgr), "SGR");
125 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
126 offsetof(CPUSH4State, dbr), "DBR");
127 cpu_mach = tcg_global_mem_new_i32(cpu_env,
128 offsetof(CPUSH4State, mach), "MACH");
129 cpu_macl = tcg_global_mem_new_i32(cpu_env,
130 offsetof(CPUSH4State, macl), "MACL");
131 cpu_pr = tcg_global_mem_new_i32(cpu_env,
132 offsetof(CPUSH4State, pr), "PR");
133 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
134 offsetof(CPUSH4State, fpscr), "FPSCR");
135 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
136 offsetof(CPUSH4State, fpul), "FPUL");
137
138 cpu_flags = tcg_global_mem_new_i32(cpu_env,
139 offsetof(CPUSH4State, flags), "_flags_");
140 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
141 offsetof(CPUSH4State, delayed_pc),
142 "_delayed_pc_");
143 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
144 offsetof(CPUSH4State,
145 delayed_cond),
146 "_delayed_cond_");
147 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
148 offsetof(CPUSH4State, lock_addr),
149 "_lock_addr_");
150 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
151 offsetof(CPUSH4State, lock_value),
152 "_lock_value_");
153
154 for (i = 0; i < 32; i++)
155 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
156 offsetof(CPUSH4State, fregs[i]),
157 fregnames[i]);
158}
159
160void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
161{
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
164 int i;
165
166 qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
167 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
168 qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
169 env->spc, env->ssr, env->gbr, env->vbr);
170 qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
171 env->sgr, env->dbr, env->delayed_pc, env->fpul);
172 for (i = 0; i < 24; i += 4) {
173 qemu_printf("r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
174 i, env->gregs[i], i + 1, env->gregs[i + 1],
175 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
176 }
177 if (env->flags & DELAY_SLOT) {
178 qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
179 env->delayed_pc);
180 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
181 qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
182 env->delayed_pc);
183 } else if (env->flags & DELAY_SLOT_RTE) {
184 qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
185 env->delayed_pc);
186 }
187}
188
189static void gen_read_sr(TCGv dst)
190{
191 TCGv t0 = tcg_temp_new();
192 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
193 tcg_gen_or_i32(dst, dst, t0);
194 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
195 tcg_gen_or_i32(dst, dst, t0);
196 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
197 tcg_gen_or_i32(dst, cpu_sr, t0);
198 tcg_temp_free_i32(t0);
199}
200
201static void gen_write_sr(TCGv src)
202{
203 tcg_gen_andi_i32(cpu_sr, src,
204 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
205 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
206 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
207 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
208}
209
210static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
211{
212 if (save_pc) {
213 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
214 }
215 if (ctx->delayed_pc != (uint32_t) -1) {
216 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
217 }
218 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
219 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
220 }
221}
222
223static inline bool use_exit_tb(DisasContext *ctx)
224{
225 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
226}
227
228static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
229{
230
231 if (unlikely(ctx->base.singlestep_enabled || use_exit_tb(ctx))) {
232 return false;
233 }
234#ifndef CONFIG_USER_ONLY
235 return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
236#else
237 return true;
238#endif
239}
240
241static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
242{
243 if (use_goto_tb(ctx, dest)) {
244 tcg_gen_goto_tb(n);
245 tcg_gen_movi_i32(cpu_pc, dest);
246 tcg_gen_exit_tb(ctx->base.tb, n);
247 } else {
248 tcg_gen_movi_i32(cpu_pc, dest);
249 if (ctx->base.singlestep_enabled) {
250 gen_helper_debug(cpu_env);
251 } else if (use_exit_tb(ctx)) {
252 tcg_gen_exit_tb(NULL, 0);
253 } else {
254 tcg_gen_lookup_and_goto_ptr();
255 }
256 }
257 ctx->base.is_jmp = DISAS_NORETURN;
258}
259
260static void gen_jump(DisasContext * ctx)
261{
262 if (ctx->delayed_pc == -1) {
263
264
265 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
266 tcg_gen_discard_i32(cpu_delayed_pc);
267 if (ctx->base.singlestep_enabled) {
268 gen_helper_debug(cpu_env);
269 } else if (use_exit_tb(ctx)) {
270 tcg_gen_exit_tb(NULL, 0);
271 } else {
272 tcg_gen_lookup_and_goto_ptr();
273 }
274 ctx->base.is_jmp = DISAS_NORETURN;
275 } else {
276 gen_goto_tb(ctx, 0, ctx->delayed_pc);
277 }
278}
279
280
281static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
282 bool jump_if_true)
283{
284 TCGLabel *l1 = gen_new_label();
285 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
286
287 if (ctx->tbflags & GUSA_EXCLUSIVE) {
288
289
290
291 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
292 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
293
294
295 gen_goto_tb(ctx, 0, dest);
296 gen_set_label(l1);
297 ctx->base.is_jmp = DISAS_NEXT;
298 return;
299 }
300
301 gen_save_cpu_state(ctx, false);
302 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
303 gen_goto_tb(ctx, 0, dest);
304 gen_set_label(l1);
305 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
306 ctx->base.is_jmp = DISAS_NORETURN;
307}
308
309
310static void gen_delayed_conditional_jump(DisasContext * ctx)
311{
312 TCGLabel *l1 = gen_new_label();
313 TCGv ds = tcg_temp_new();
314
315 tcg_gen_mov_i32(ds, cpu_delayed_cond);
316 tcg_gen_discard_i32(cpu_delayed_cond);
317
318 if (ctx->tbflags & GUSA_EXCLUSIVE) {
319
320
321
322 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
323
324
325 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
326 gen_jump(ctx);
327
328 gen_set_label(l1);
329 ctx->base.is_jmp = DISAS_NEXT;
330 return;
331 }
332
333 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
334 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
335 gen_set_label(l1);
336 gen_jump(ctx);
337}
338
339static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
340{
341
342 tcg_debug_assert((reg & 1) == 0);
343 reg ^= ctx->fbank;
344 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
345}
346
347static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
348{
349
350 tcg_debug_assert((reg & 1) == 0);
351 reg ^= ctx->fbank;
352 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
353}
354
355#define B3_0 (ctx->opcode & 0xf)
356#define B6_4 ((ctx->opcode >> 4) & 0x7)
357#define B7_4 ((ctx->opcode >> 4) & 0xf)
358#define B7_0 (ctx->opcode & 0xff)
359#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
360#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
361 (ctx->opcode & 0xfff))
362#define B11_8 ((ctx->opcode >> 8) & 0xf)
363#define B15_12 ((ctx->opcode >> 12) & 0xf)
364
365#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
366#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
367#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
368
369#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
370
371#define CHECK_NOT_DELAY_SLOT \
372 if (ctx->envflags & DELAY_SLOT_MASK) { \
373 goto do_illegal_slot; \
374 }
375
376#define CHECK_PRIVILEGED \
377 if (IS_USER(ctx)) { \
378 goto do_illegal; \
379 }
380
381#define CHECK_FPU_ENABLED \
382 if (ctx->tbflags & (1u << SR_FD)) { \
383 goto do_fpu_disabled; \
384 }
385
386#define CHECK_FPSCR_PR_0 \
387 if (ctx->tbflags & FPSCR_PR) { \
388 goto do_illegal; \
389 }
390
391#define CHECK_FPSCR_PR_1 \
392 if (!(ctx->tbflags & FPSCR_PR)) { \
393 goto do_illegal; \
394 }
395
396#define CHECK_SH4A \
397 if (!(ctx->features & SH_FEATURE_SH4A)) { \
398 goto do_illegal; \
399 }
400
401static void _decode_opc(DisasContext * ctx)
402{
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423 if (ctx->has_movcal)
424 {
425 int opcode = ctx->opcode & 0xf0ff;
426 if (opcode != 0x0093
427 && opcode != 0x00c3 )
428 {
429 gen_helper_discard_movcal_backup(cpu_env);
430 ctx->has_movcal = 0;
431 }
432 }
433
434#if 0
435 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
436#endif
437
438 switch (ctx->opcode) {
439 case 0x0019:
440 tcg_gen_movi_i32(cpu_sr_m, 0);
441 tcg_gen_movi_i32(cpu_sr_q, 0);
442 tcg_gen_movi_i32(cpu_sr_t, 0);
443 return;
444 case 0x000b:
445 CHECK_NOT_DELAY_SLOT
446 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
447 ctx->envflags |= DELAY_SLOT;
448 ctx->delayed_pc = (uint32_t) - 1;
449 return;
450 case 0x0028:
451 tcg_gen_movi_i32(cpu_mach, 0);
452 tcg_gen_movi_i32(cpu_macl, 0);
453 return;
454 case 0x0048:
455 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
456 return;
457 case 0x0008:
458 tcg_gen_movi_i32(cpu_sr_t, 0);
459 return;
460 case 0x0038:
461 CHECK_PRIVILEGED
462 gen_helper_ldtlb(cpu_env);
463 return;
464 case 0x002b:
465 CHECK_PRIVILEGED
466 CHECK_NOT_DELAY_SLOT
467 gen_write_sr(cpu_ssr);
468 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
469 ctx->envflags |= DELAY_SLOT_RTE;
470 ctx->delayed_pc = (uint32_t) - 1;
471 ctx->base.is_jmp = DISAS_STOP;
472 return;
473 case 0x0058:
474 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
475 return;
476 case 0x0018:
477 tcg_gen_movi_i32(cpu_sr_t, 1);
478 return;
479 case 0xfbfd:
480 CHECK_FPSCR_PR_0
481 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
482 ctx->base.is_jmp = DISAS_STOP;
483 return;
484 case 0xf3fd:
485 CHECK_FPSCR_PR_0
486 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
487 ctx->base.is_jmp = DISAS_STOP;
488 return;
489 case 0xf7fd:
490 CHECK_SH4A
491 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
492 ctx->base.is_jmp = DISAS_STOP;
493 return;
494 case 0x0009:
495 return;
496 case 0x001b:
497 CHECK_PRIVILEGED
498 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
499 gen_helper_sleep(cpu_env);
500 return;
501 }
502
503 switch (ctx->opcode & 0xf000) {
504 case 0x1000:
505 {
506 TCGv addr = tcg_temp_new();
507 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
508 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
509 tcg_temp_free(addr);
510 }
511 return;
512 case 0x5000:
513 {
514 TCGv addr = tcg_temp_new();
515 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
516 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
517 tcg_temp_free(addr);
518 }
519 return;
520 case 0xe000:
521#ifdef CONFIG_USER_ONLY
522
523
524
525 if (B11_8 == 15 && B7_0s < 0 &&
526 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
527 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
528 ctx->base.is_jmp = DISAS_STOP;
529 }
530#endif
531 tcg_gen_movi_i32(REG(B11_8), B7_0s);
532 return;
533 case 0x9000:
534 {
535 TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
536 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
537 tcg_temp_free(addr);
538 }
539 return;
540 case 0xd000:
541 {
542 TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
543 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
544 tcg_temp_free(addr);
545 }
546 return;
547 case 0x7000:
548 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
549 return;
550 case 0xa000:
551 CHECK_NOT_DELAY_SLOT
552 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
553 ctx->envflags |= DELAY_SLOT;
554 return;
555 case 0xb000:
556 CHECK_NOT_DELAY_SLOT
557 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
558 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
559 ctx->envflags |= DELAY_SLOT;
560 return;
561 }
562
563 switch (ctx->opcode & 0xf00f) {
564 case 0x6003:
565 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
566 return;
567 case 0x2000:
568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
569 return;
570 case 0x2001:
571 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
572 return;
573 case 0x2002:
574 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
575 return;
576 case 0x6000:
577 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
578 return;
579 case 0x6001:
580 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
581 return;
582 case 0x6002:
583 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
584 return;
585 case 0x2004:
586 {
587 TCGv addr = tcg_temp_new();
588 tcg_gen_subi_i32(addr, REG(B11_8), 1);
589
590 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
591 tcg_gen_mov_i32(REG(B11_8), addr);
592 tcg_temp_free(addr);
593 }
594 return;
595 case 0x2005:
596 {
597 TCGv addr = tcg_temp_new();
598 tcg_gen_subi_i32(addr, REG(B11_8), 2);
599 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
600 tcg_gen_mov_i32(REG(B11_8), addr);
601 tcg_temp_free(addr);
602 }
603 return;
604 case 0x2006:
605 {
606 TCGv addr = tcg_temp_new();
607 tcg_gen_subi_i32(addr, REG(B11_8), 4);
608 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
609 tcg_gen_mov_i32(REG(B11_8), addr);
610 tcg_temp_free(addr);
611 }
612 return;
613 case 0x6004:
614 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
615 if ( B11_8 != B7_4 )
616 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
617 return;
618 case 0x6005:
619 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
620 if ( B11_8 != B7_4 )
621 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
622 return;
623 case 0x6006:
624 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
625 if ( B11_8 != B7_4 )
626 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
627 return;
628 case 0x0004:
629 {
630 TCGv addr = tcg_temp_new();
631 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
632 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
633 tcg_temp_free(addr);
634 }
635 return;
636 case 0x0005:
637 {
638 TCGv addr = tcg_temp_new();
639 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
640 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
641 tcg_temp_free(addr);
642 }
643 return;
644 case 0x0006:
645 {
646 TCGv addr = tcg_temp_new();
647 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
648 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
649 tcg_temp_free(addr);
650 }
651 return;
652 case 0x000c:
653 {
654 TCGv addr = tcg_temp_new();
655 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
656 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
657 tcg_temp_free(addr);
658 }
659 return;
660 case 0x000d:
661 {
662 TCGv addr = tcg_temp_new();
663 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
664 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
665 tcg_temp_free(addr);
666 }
667 return;
668 case 0x000e:
669 {
670 TCGv addr = tcg_temp_new();
671 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
672 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
673 tcg_temp_free(addr);
674 }
675 return;
676 case 0x6008:
677 {
678 TCGv low = tcg_temp_new();
679 tcg_gen_ext16u_i32(low, REG(B7_4));
680 tcg_gen_bswap16_i32(low, low);
681 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
682 tcg_temp_free(low);
683 }
684 return;
685 case 0x6009:
686 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
687 return;
688 case 0x200d:
689 {
690 TCGv high, low;
691 high = tcg_temp_new();
692 tcg_gen_shli_i32(high, REG(B7_4), 16);
693 low = tcg_temp_new();
694 tcg_gen_shri_i32(low, REG(B11_8), 16);
695 tcg_gen_or_i32(REG(B11_8), high, low);
696 tcg_temp_free(low);
697 tcg_temp_free(high);
698 }
699 return;
700 case 0x300c:
701 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
702 return;
703 case 0x300e:
704 {
705 TCGv t0, t1;
706 t0 = tcg_const_tl(0);
707 t1 = tcg_temp_new();
708 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
709 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
710 REG(B11_8), t0, t1, cpu_sr_t);
711 tcg_temp_free(t0);
712 tcg_temp_free(t1);
713 }
714 return;
715 case 0x300f:
716 {
717 TCGv t0, t1, t2;
718 t0 = tcg_temp_new();
719 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
720 t1 = tcg_temp_new();
721 tcg_gen_xor_i32(t1, t0, REG(B11_8));
722 t2 = tcg_temp_new();
723 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
724 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
725 tcg_temp_free(t2);
726 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
727 tcg_temp_free(t1);
728 tcg_gen_mov_i32(REG(B7_4), t0);
729 tcg_temp_free(t0);
730 }
731 return;
732 case 0x2009:
733 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
734 return;
735 case 0x3000:
736 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
737 return;
738 case 0x3003:
739 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
740 return;
741 case 0x3007:
742 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
743 return;
744 case 0x3006:
745 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
746 return;
747 case 0x3002:
748 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
749 return;
750 case 0x200c:
751 {
752 TCGv cmp1 = tcg_temp_new();
753 TCGv cmp2 = tcg_temp_new();
754 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
755 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
756 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
757 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
758 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
759 tcg_temp_free(cmp2);
760 tcg_temp_free(cmp1);
761 }
762 return;
763 case 0x2007:
764 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);
765 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);
766 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);
767 return;
768 case 0x3004:
769 {
770 TCGv t0 = tcg_temp_new();
771 TCGv t1 = tcg_temp_new();
772 TCGv t2 = tcg_temp_new();
773 TCGv zero = tcg_const_i32(0);
774
775
776
777 tcg_gen_shri_i32(t0, REG(B11_8), 31);
778 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
779 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
780
781
782
783
784
785 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
786 tcg_gen_subi_i32(t1, t1, 1);
787 tcg_gen_neg_i32(t2, REG(B7_4));
788 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
789 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
790
791
792 tcg_gen_andi_i32(t1, t1, 1);
793 tcg_gen_xor_i32(t1, t1, t0);
794 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
795 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
796
797 tcg_temp_free(zero);
798 tcg_temp_free(t2);
799 tcg_temp_free(t1);
800 tcg_temp_free(t0);
801 }
802 return;
803 case 0x300d:
804 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
805 return;
806 case 0x3005:
807 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
808 return;
809 case 0x600e:
810 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
811 return;
812 case 0x600f:
813 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
814 return;
815 case 0x600c:
816 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
817 return;
818 case 0x600d:
819 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
820 return;
821 case 0x000f:
822 {
823 TCGv arg0, arg1;
824 arg0 = tcg_temp_new();
825 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
826 arg1 = tcg_temp_new();
827 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
828 gen_helper_macl(cpu_env, arg0, arg1);
829 tcg_temp_free(arg1);
830 tcg_temp_free(arg0);
831 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
832 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
833 }
834 return;
835 case 0x400f:
836 {
837 TCGv arg0, arg1;
838 arg0 = tcg_temp_new();
839 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
840 arg1 = tcg_temp_new();
841 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
842 gen_helper_macw(cpu_env, arg0, arg1);
843 tcg_temp_free(arg1);
844 tcg_temp_free(arg0);
845 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
846 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
847 }
848 return;
849 case 0x0007:
850 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
851 return;
852 case 0x200f:
853 {
854 TCGv arg0, arg1;
855 arg0 = tcg_temp_new();
856 tcg_gen_ext16s_i32(arg0, REG(B7_4));
857 arg1 = tcg_temp_new();
858 tcg_gen_ext16s_i32(arg1, REG(B11_8));
859 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
860 tcg_temp_free(arg1);
861 tcg_temp_free(arg0);
862 }
863 return;
864 case 0x200e:
865 {
866 TCGv arg0, arg1;
867 arg0 = tcg_temp_new();
868 tcg_gen_ext16u_i32(arg0, REG(B7_4));
869 arg1 = tcg_temp_new();
870 tcg_gen_ext16u_i32(arg1, REG(B11_8));
871 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
872 tcg_temp_free(arg1);
873 tcg_temp_free(arg0);
874 }
875 return;
876 case 0x600b:
877 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
878 return;
879 case 0x600a:
880 {
881 TCGv t0 = tcg_const_i32(0);
882 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
883 REG(B7_4), t0, cpu_sr_t, t0);
884 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
885 t0, t0, REG(B11_8), cpu_sr_t);
886 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
887 tcg_temp_free(t0);
888 }
889 return;
890 case 0x6007:
891 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
892 return;
893 case 0x200b:
894 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
895 return;
896 case 0x400c:
897 {
898 TCGv t0 = tcg_temp_new();
899 TCGv t1 = tcg_temp_new();
900 TCGv t2 = tcg_temp_new();
901
902 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
903
904
905 tcg_gen_shl_i32(t1, REG(B11_8), t0);
906
907
908
909 tcg_gen_xori_i32(t0, t0, 0x1f);
910 tcg_gen_sar_i32(t2, REG(B11_8), t0);
911 tcg_gen_sari_i32(t2, t2, 1);
912
913
914 tcg_gen_movi_i32(t0, 0);
915 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
916
917 tcg_temp_free(t0);
918 tcg_temp_free(t1);
919 tcg_temp_free(t2);
920 }
921 return;
922 case 0x400d:
923 {
924 TCGv t0 = tcg_temp_new();
925 TCGv t1 = tcg_temp_new();
926 TCGv t2 = tcg_temp_new();
927
928 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
929
930
931 tcg_gen_shl_i32(t1, REG(B11_8), t0);
932
933
934
935 tcg_gen_xori_i32(t0, t0, 0x1f);
936 tcg_gen_shr_i32(t2, REG(B11_8), t0);
937 tcg_gen_shri_i32(t2, t2, 1);
938
939
940 tcg_gen_movi_i32(t0, 0);
941 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
942
943 tcg_temp_free(t0);
944 tcg_temp_free(t1);
945 tcg_temp_free(t2);
946 }
947 return;
948 case 0x3008:
949 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
950 return;
951 case 0x300a:
952 {
953 TCGv t0, t1;
954 t0 = tcg_const_tl(0);
955 t1 = tcg_temp_new();
956 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
957 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
958 REG(B11_8), t0, t1, cpu_sr_t);
959 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
960 tcg_temp_free(t0);
961 tcg_temp_free(t1);
962 }
963 return;
964 case 0x300b:
965 {
966 TCGv t0, t1, t2;
967 t0 = tcg_temp_new();
968 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
969 t1 = tcg_temp_new();
970 tcg_gen_xor_i32(t1, t0, REG(B7_4));
971 t2 = tcg_temp_new();
972 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
973 tcg_gen_and_i32(t1, t1, t2);
974 tcg_temp_free(t2);
975 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
976 tcg_temp_free(t1);
977 tcg_gen_mov_i32(REG(B11_8), t0);
978 tcg_temp_free(t0);
979 }
980 return;
981 case 0x2008:
982 {
983 TCGv val = tcg_temp_new();
984 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
985 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
986 tcg_temp_free(val);
987 }
988 return;
989 case 0x200a:
990 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
991 return;
992 case 0xf00c:
993 CHECK_FPU_ENABLED
994 if (ctx->tbflags & FPSCR_SZ) {
995 int xsrc = XHACK(B7_4);
996 int xdst = XHACK(B11_8);
997 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
998 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
999 } else {
1000 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
1001 }
1002 return;
1003 case 0xf00a:
1004 CHECK_FPU_ENABLED
1005 if (ctx->tbflags & FPSCR_SZ) {
1006 TCGv_i64 fp = tcg_temp_new_i64();
1007 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1008 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1009 tcg_temp_free_i64(fp);
1010 } else {
1011 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1012 }
1013 return;
1014 case 0xf008:
1015 CHECK_FPU_ENABLED
1016 if (ctx->tbflags & FPSCR_SZ) {
1017 TCGv_i64 fp = tcg_temp_new_i64();
1018 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1019 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1020 tcg_temp_free_i64(fp);
1021 } else {
1022 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1023 }
1024 return;
1025 case 0xf009:
1026 CHECK_FPU_ENABLED
1027 if (ctx->tbflags & FPSCR_SZ) {
1028 TCGv_i64 fp = tcg_temp_new_i64();
1029 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1030 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1031 tcg_temp_free_i64(fp);
1032 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1033 } else {
1034 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1035 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1036 }
1037 return;
1038 case 0xf00b:
1039 CHECK_FPU_ENABLED
1040 {
1041 TCGv addr = tcg_temp_new_i32();
1042 if (ctx->tbflags & FPSCR_SZ) {
1043 TCGv_i64 fp = tcg_temp_new_i64();
1044 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1045 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1046 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1047 tcg_temp_free_i64(fp);
1048 } else {
1049 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1050 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1051 }
1052 tcg_gen_mov_i32(REG(B11_8), addr);
1053 tcg_temp_free(addr);
1054 }
1055 return;
1056 case 0xf006:
1057 CHECK_FPU_ENABLED
1058 {
1059 TCGv addr = tcg_temp_new_i32();
1060 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1061 if (ctx->tbflags & FPSCR_SZ) {
1062 TCGv_i64 fp = tcg_temp_new_i64();
1063 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1064 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1065 tcg_temp_free_i64(fp);
1066 } else {
1067 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1068 }
1069 tcg_temp_free(addr);
1070 }
1071 return;
1072 case 0xf007:
1073 CHECK_FPU_ENABLED
1074 {
1075 TCGv addr = tcg_temp_new();
1076 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1077 if (ctx->tbflags & FPSCR_SZ) {
1078 TCGv_i64 fp = tcg_temp_new_i64();
1079 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1080 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1081 tcg_temp_free_i64(fp);
1082 } else {
1083 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1084 }
1085 tcg_temp_free(addr);
1086 }
1087 return;
1088 case 0xf000:
1089 case 0xf001:
1090 case 0xf002:
1091 case 0xf003:
1092 case 0xf004:
1093 case 0xf005:
1094 {
1095 CHECK_FPU_ENABLED
1096 if (ctx->tbflags & FPSCR_PR) {
1097 TCGv_i64 fp0, fp1;
1098
1099 if (ctx->opcode & 0x0110) {
1100 goto do_illegal;
1101 }
1102 fp0 = tcg_temp_new_i64();
1103 fp1 = tcg_temp_new_i64();
1104 gen_load_fpr64(ctx, fp0, B11_8);
1105 gen_load_fpr64(ctx, fp1, B7_4);
1106 switch (ctx->opcode & 0xf00f) {
1107 case 0xf000:
1108 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1109 break;
1110 case 0xf001:
1111 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1112 break;
1113 case 0xf002:
1114 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1115 break;
1116 case 0xf003:
1117 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1118 break;
1119 case 0xf004:
1120 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1121 return;
1122 case 0xf005:
1123 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1124 return;
1125 }
1126 gen_store_fpr64(ctx, fp0, B11_8);
1127 tcg_temp_free_i64(fp0);
1128 tcg_temp_free_i64(fp1);
1129 } else {
1130 switch (ctx->opcode & 0xf00f) {
1131 case 0xf000:
1132 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1133 FREG(B11_8), FREG(B7_4));
1134 break;
1135 case 0xf001:
1136 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1137 FREG(B11_8), FREG(B7_4));
1138 break;
1139 case 0xf002:
1140 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1141 FREG(B11_8), FREG(B7_4));
1142 break;
1143 case 0xf003:
1144 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1145 FREG(B11_8), FREG(B7_4));
1146 break;
1147 case 0xf004:
1148 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1149 FREG(B11_8), FREG(B7_4));
1150 return;
1151 case 0xf005:
1152 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1153 FREG(B11_8), FREG(B7_4));
1154 return;
1155 }
1156 }
1157 }
1158 return;
1159 case 0xf00e:
1160 CHECK_FPU_ENABLED
1161 CHECK_FPSCR_PR_0
1162 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1163 FREG(0), FREG(B7_4), FREG(B11_8));
1164 return;
1165 }
1166
1167 switch (ctx->opcode & 0xff00) {
1168 case 0xc900:
1169 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1170 return;
1171 case 0xcd00:
1172 {
1173 TCGv addr, val;
1174 addr = tcg_temp_new();
1175 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1176 val = tcg_temp_new();
1177 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1178 tcg_gen_andi_i32(val, val, B7_0);
1179 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1180 tcg_temp_free(val);
1181 tcg_temp_free(addr);
1182 }
1183 return;
1184 case 0x8b00:
1185 CHECK_NOT_DELAY_SLOT
1186 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1187 return;
1188 case 0x8f00:
1189 CHECK_NOT_DELAY_SLOT
1190 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1191 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1192 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1193 return;
1194 case 0x8900:
1195 CHECK_NOT_DELAY_SLOT
1196 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1197 return;
1198 case 0x8d00:
1199 CHECK_NOT_DELAY_SLOT
1200 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1201 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1202 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1203 return;
1204 case 0x8800:
1205 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1206 return;
1207 case 0xc400:
1208 {
1209 TCGv addr = tcg_temp_new();
1210 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1211 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1212 tcg_temp_free(addr);
1213 }
1214 return;
1215 case 0xc500:
1216 {
1217 TCGv addr = tcg_temp_new();
1218 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1219 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1220 tcg_temp_free(addr);
1221 }
1222 return;
1223 case 0xc600:
1224 {
1225 TCGv addr = tcg_temp_new();
1226 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1227 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1228 tcg_temp_free(addr);
1229 }
1230 return;
1231 case 0xc000:
1232 {
1233 TCGv addr = tcg_temp_new();
1234 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1235 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1236 tcg_temp_free(addr);
1237 }
1238 return;
1239 case 0xc100:
1240 {
1241 TCGv addr = tcg_temp_new();
1242 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1243 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1244 tcg_temp_free(addr);
1245 }
1246 return;
1247 case 0xc200:
1248 {
1249 TCGv addr = tcg_temp_new();
1250 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1251 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1252 tcg_temp_free(addr);
1253 }
1254 return;
1255 case 0x8000:
1256 {
1257 TCGv addr = tcg_temp_new();
1258 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1259 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1260 tcg_temp_free(addr);
1261 }
1262 return;
1263 case 0x8100:
1264 {
1265 TCGv addr = tcg_temp_new();
1266 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1267 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1268 tcg_temp_free(addr);
1269 }
1270 return;
1271 case 0x8400:
1272 {
1273 TCGv addr = tcg_temp_new();
1274 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1275 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1276 tcg_temp_free(addr);
1277 }
1278 return;
1279 case 0x8500:
1280 {
1281 TCGv addr = tcg_temp_new();
1282 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1283 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1284 tcg_temp_free(addr);
1285 }
1286 return;
1287 case 0xc700:
1288 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1289 4 + B7_0 * 4) & ~3);
1290 return;
1291 case 0xcb00:
1292 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1293 return;
1294 case 0xcf00:
1295 {
1296 TCGv addr, val;
1297 addr = tcg_temp_new();
1298 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1299 val = tcg_temp_new();
1300 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1301 tcg_gen_ori_i32(val, val, B7_0);
1302 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1303 tcg_temp_free(val);
1304 tcg_temp_free(addr);
1305 }
1306 return;
1307 case 0xc300:
1308 {
1309 TCGv imm;
1310 CHECK_NOT_DELAY_SLOT
1311 gen_save_cpu_state(ctx, true);
1312 imm = tcg_const_i32(B7_0);
1313 gen_helper_trapa(cpu_env, imm);
1314 tcg_temp_free(imm);
1315 ctx->base.is_jmp = DISAS_NORETURN;
1316 }
1317 return;
1318 case 0xc800:
1319 {
1320 TCGv val = tcg_temp_new();
1321 tcg_gen_andi_i32(val, REG(0), B7_0);
1322 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1323 tcg_temp_free(val);
1324 }
1325 return;
1326 case 0xcc00:
1327 {
1328 TCGv val = tcg_temp_new();
1329 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1330 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1331 tcg_gen_andi_i32(val, val, B7_0);
1332 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1333 tcg_temp_free(val);
1334 }
1335 return;
1336 case 0xca00:
1337 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1338 return;
1339 case 0xce00:
1340 {
1341 TCGv addr, val;
1342 addr = tcg_temp_new();
1343 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1344 val = tcg_temp_new();
1345 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1346 tcg_gen_xori_i32(val, val, B7_0);
1347 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1348 tcg_temp_free(val);
1349 tcg_temp_free(addr);
1350 }
1351 return;
1352 }
1353
1354 switch (ctx->opcode & 0xf08f) {
1355 case 0x408e:
1356 CHECK_PRIVILEGED
1357 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1358 return;
1359 case 0x4087:
1360 CHECK_PRIVILEGED
1361 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1362 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1363 return;
1364 case 0x0082:
1365 CHECK_PRIVILEGED
1366 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1367 return;
1368 case 0x4083:
1369 CHECK_PRIVILEGED
1370 {
1371 TCGv addr = tcg_temp_new();
1372 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1373 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1374 tcg_gen_mov_i32(REG(B11_8), addr);
1375 tcg_temp_free(addr);
1376 }
1377 return;
1378 }
1379
1380 switch (ctx->opcode & 0xf0ff) {
1381 case 0x0023:
1382 CHECK_NOT_DELAY_SLOT
1383 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1384 ctx->envflags |= DELAY_SLOT;
1385 ctx->delayed_pc = (uint32_t) - 1;
1386 return;
1387 case 0x0003:
1388 CHECK_NOT_DELAY_SLOT
1389 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1390 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1391 ctx->envflags |= DELAY_SLOT;
1392 ctx->delayed_pc = (uint32_t) - 1;
1393 return;
1394 case 0x4015:
1395 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1396 return;
1397 case 0x4011:
1398 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1399 return;
1400 case 0x4010:
1401 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1402 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1403 return;
1404 case 0x402b:
1405 CHECK_NOT_DELAY_SLOT
1406 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1407 ctx->envflags |= DELAY_SLOT;
1408 ctx->delayed_pc = (uint32_t) - 1;
1409 return;
1410 case 0x400b:
1411 CHECK_NOT_DELAY_SLOT
1412 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1413 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1414 ctx->envflags |= DELAY_SLOT;
1415 ctx->delayed_pc = (uint32_t) - 1;
1416 return;
1417 case 0x400e:
1418 CHECK_PRIVILEGED
1419 {
1420 TCGv val = tcg_temp_new();
1421 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1422 gen_write_sr(val);
1423 tcg_temp_free(val);
1424 ctx->base.is_jmp = DISAS_STOP;
1425 }
1426 return;
1427 case 0x4007:
1428 CHECK_PRIVILEGED
1429 {
1430 TCGv val = tcg_temp_new();
1431 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1432 tcg_gen_andi_i32(val, val, 0x700083f3);
1433 gen_write_sr(val);
1434 tcg_temp_free(val);
1435 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1436 ctx->base.is_jmp = DISAS_STOP;
1437 }
1438 return;
1439 case 0x0002:
1440 CHECK_PRIVILEGED
1441 gen_read_sr(REG(B11_8));
1442 return;
1443 case 0x4003:
1444 CHECK_PRIVILEGED
1445 {
1446 TCGv addr = tcg_temp_new();
1447 TCGv val = tcg_temp_new();
1448 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1449 gen_read_sr(val);
1450 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1451 tcg_gen_mov_i32(REG(B11_8), addr);
1452 tcg_temp_free(val);
1453 tcg_temp_free(addr);
1454 }
1455 return;
1456#define LD(reg,ldnum,ldpnum,prechk) \
1457 case ldnum: \
1458 prechk \
1459 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1460 return; \
1461 case ldpnum: \
1462 prechk \
1463 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1464 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1465 return;
1466#define ST(reg,stnum,stpnum,prechk) \
1467 case stnum: \
1468 prechk \
1469 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1470 return; \
1471 case stpnum: \
1472 prechk \
1473 { \
1474 TCGv addr = tcg_temp_new(); \
1475 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1476 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1477 tcg_gen_mov_i32(REG(B11_8), addr); \
1478 tcg_temp_free(addr); \
1479 } \
1480 return;
1481#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1482 LD(reg,ldnum,ldpnum,prechk) \
1483 ST(reg,stnum,stpnum,prechk)
1484 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1485 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1486 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1487 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1488 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1489 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1490 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1491 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1492 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1493 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1494 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1495 case 0x406a:
1496 CHECK_FPU_ENABLED
1497 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1498 ctx->base.is_jmp = DISAS_STOP;
1499 return;
1500 case 0x4066:
1501 CHECK_FPU_ENABLED
1502 {
1503 TCGv addr = tcg_temp_new();
1504 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1505 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1506 gen_helper_ld_fpscr(cpu_env, addr);
1507 tcg_temp_free(addr);
1508 ctx->base.is_jmp = DISAS_STOP;
1509 }
1510 return;
1511 case 0x006a:
1512 CHECK_FPU_ENABLED
1513 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1514 return;
1515 case 0x4062:
1516 CHECK_FPU_ENABLED
1517 {
1518 TCGv addr, val;
1519 val = tcg_temp_new();
1520 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1521 addr = tcg_temp_new();
1522 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1523 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1524 tcg_gen_mov_i32(REG(B11_8), addr);
1525 tcg_temp_free(addr);
1526 tcg_temp_free(val);
1527 }
1528 return;
1529 case 0x00c3:
1530 {
1531 TCGv val = tcg_temp_new();
1532 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1533 gen_helper_movcal(cpu_env, REG(B11_8), val);
1534 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1535 tcg_temp_free(val);
1536 }
1537 ctx->has_movcal = 1;
1538 return;
1539 case 0x40a9:
1540 CHECK_SH4A
1541
1542 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1543 MO_TEUL | MO_UNALN);
1544 return;
1545 case 0x40e9:
1546 CHECK_SH4A
1547
1548 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1549 MO_TEUL | MO_UNALN);
1550 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1551 return;
1552 case 0x0029:
1553 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1554 return;
1555 case 0x0073:
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 CHECK_SH4A
1566 {
1567 TCGLabel *fail = gen_new_label();
1568 TCGLabel *done = gen_new_label();
1569
1570 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1571 TCGv tmp;
1572
1573 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1574 cpu_lock_addr, fail);
1575 tmp = tcg_temp_new();
1576 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1577 REG(0), ctx->memidx, MO_TEUL);
1578 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1579 tcg_temp_free(tmp);
1580 } else {
1581 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1582 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1583 tcg_gen_movi_i32(cpu_sr_t, 1);
1584 }
1585 tcg_gen_br(done);
1586
1587 gen_set_label(fail);
1588 tcg_gen_movi_i32(cpu_sr_t, 0);
1589
1590 gen_set_label(done);
1591 tcg_gen_movi_i32(cpu_lock_addr, -1);
1592 }
1593 return;
1594 case 0x0063:
1595
1596
1597
1598
1599
1600
1601
1602
1603 CHECK_SH4A
1604 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1605 TCGv tmp = tcg_temp_new();
1606 tcg_gen_mov_i32(tmp, REG(B11_8));
1607 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1608 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1609 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1610 tcg_temp_free(tmp);
1611 } else {
1612 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1613 tcg_gen_movi_i32(cpu_lock_addr, 0);
1614 }
1615 return;
1616 case 0x0093:
1617 {
1618 gen_helper_ocbi(cpu_env, REG(B11_8));
1619 }
1620 return;
1621 case 0x00a3:
1622 case 0x00b3:
1623
1624
1625
1626 return;
1627 case 0x0083:
1628 return;
1629 case 0x00d3:
1630 CHECK_SH4A
1631 return;
1632 case 0x00e3:
1633 CHECK_SH4A
1634 return;
1635 case 0x00ab:
1636 CHECK_SH4A
1637 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1638 return;
1639 case 0x4024:
1640 {
1641 TCGv tmp = tcg_temp_new();
1642 tcg_gen_mov_i32(tmp, cpu_sr_t);
1643 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1644 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1645 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1646 tcg_temp_free(tmp);
1647 }
1648 return;
1649 case 0x4025:
1650 {
1651 TCGv tmp = tcg_temp_new();
1652 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1653 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1654 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1655 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1656 tcg_temp_free(tmp);
1657 }
1658 return;
1659 case 0x4004:
1660 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1661 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1662 return;
1663 case 0x4005:
1664 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1665 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1666 return;
1667 case 0x4000:
1668 case 0x4020:
1669 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1670 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1671 return;
1672 case 0x4021:
1673 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1674 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1675 return;
1676 case 0x4001:
1677 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1678 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1679 return;
1680 case 0x4008:
1681 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1682 return;
1683 case 0x4018:
1684 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1685 return;
1686 case 0x4028:
1687 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1688 return;
1689 case 0x4009:
1690 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1691 return;
1692 case 0x4019:
1693 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1694 return;
1695 case 0x4029:
1696 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1697 return;
1698 case 0x401b:
1699 {
1700 TCGv val = tcg_const_i32(0x80);
1701 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1702 ctx->memidx, MO_UB);
1703 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1704 tcg_temp_free(val);
1705 }
1706 return;
1707 case 0xf00d:
1708 CHECK_FPU_ENABLED
1709 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1710 return;
1711 case 0xf01d:
1712 CHECK_FPU_ENABLED
1713 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1714 return;
1715 case 0xf02d:
1716 CHECK_FPU_ENABLED
1717 if (ctx->tbflags & FPSCR_PR) {
1718 TCGv_i64 fp;
1719 if (ctx->opcode & 0x0100) {
1720 goto do_illegal;
1721 }
1722 fp = tcg_temp_new_i64();
1723 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1724 gen_store_fpr64(ctx, fp, B11_8);
1725 tcg_temp_free_i64(fp);
1726 }
1727 else {
1728 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1729 }
1730 return;
1731 case 0xf03d:
1732 CHECK_FPU_ENABLED
1733 if (ctx->tbflags & FPSCR_PR) {
1734 TCGv_i64 fp;
1735 if (ctx->opcode & 0x0100) {
1736 goto do_illegal;
1737 }
1738 fp = tcg_temp_new_i64();
1739 gen_load_fpr64(ctx, fp, B11_8);
1740 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1741 tcg_temp_free_i64(fp);
1742 }
1743 else {
1744 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1745 }
1746 return;
1747 case 0xf04d:
1748 CHECK_FPU_ENABLED
1749 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1750 return;
1751 case 0xf05d:
1752 CHECK_FPU_ENABLED
1753 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1754 return;
1755 case 0xf06d:
1756 CHECK_FPU_ENABLED
1757 if (ctx->tbflags & FPSCR_PR) {
1758 if (ctx->opcode & 0x0100) {
1759 goto do_illegal;
1760 }
1761 TCGv_i64 fp = tcg_temp_new_i64();
1762 gen_load_fpr64(ctx, fp, B11_8);
1763 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1764 gen_store_fpr64(ctx, fp, B11_8);
1765 tcg_temp_free_i64(fp);
1766 } else {
1767 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1768 }
1769 return;
1770 case 0xf07d:
1771 CHECK_FPU_ENABLED
1772 CHECK_FPSCR_PR_0
1773 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1774 break;
1775 case 0xf08d:
1776 CHECK_FPU_ENABLED
1777 CHECK_FPSCR_PR_0
1778 tcg_gen_movi_i32(FREG(B11_8), 0);
1779 return;
1780 case 0xf09d:
1781 CHECK_FPU_ENABLED
1782 CHECK_FPSCR_PR_0
1783 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1784 return;
1785 case 0xf0ad:
1786 CHECK_FPU_ENABLED
1787 {
1788 TCGv_i64 fp = tcg_temp_new_i64();
1789 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1790 gen_store_fpr64(ctx, fp, B11_8);
1791 tcg_temp_free_i64(fp);
1792 }
1793 return;
1794 case 0xf0bd:
1795 CHECK_FPU_ENABLED
1796 {
1797 TCGv_i64 fp = tcg_temp_new_i64();
1798 gen_load_fpr64(ctx, fp, B11_8);
1799 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1800 tcg_temp_free_i64(fp);
1801 }
1802 return;
1803 case 0xf0ed:
1804 CHECK_FPU_ENABLED
1805 CHECK_FPSCR_PR_1
1806 {
1807 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1808 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1809 gen_helper_fipr(cpu_env, m, n);
1810 tcg_temp_free(m);
1811 tcg_temp_free(n);
1812 return;
1813 }
1814 break;
1815 case 0xf0fd:
1816 CHECK_FPU_ENABLED
1817 CHECK_FPSCR_PR_1
1818 {
1819 if ((ctx->opcode & 0x0300) != 0x0100) {
1820 goto do_illegal;
1821 }
1822 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1823 gen_helper_ftrv(cpu_env, n);
1824 tcg_temp_free(n);
1825 return;
1826 }
1827 break;
1828 }
1829#if 0
1830 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1831 ctx->opcode, ctx->base.pc_next);
1832 fflush(stderr);
1833#endif
1834 do_illegal:
1835 if (ctx->envflags & DELAY_SLOT_MASK) {
1836 do_illegal_slot:
1837 gen_save_cpu_state(ctx, true);
1838 gen_helper_raise_slot_illegal_instruction(cpu_env);
1839 } else {
1840 gen_save_cpu_state(ctx, true);
1841 gen_helper_raise_illegal_instruction(cpu_env);
1842 }
1843 ctx->base.is_jmp = DISAS_NORETURN;
1844 return;
1845
1846 do_fpu_disabled:
1847 gen_save_cpu_state(ctx, true);
1848 if (ctx->envflags & DELAY_SLOT_MASK) {
1849 gen_helper_raise_slot_fpu_disable(cpu_env);
1850 } else {
1851 gen_helper_raise_fpu_disable(cpu_env);
1852 }
1853 ctx->base.is_jmp = DISAS_NORETURN;
1854 return;
1855}
1856
1857static void decode_opc(DisasContext * ctx)
1858{
1859 uint32_t old_flags = ctx->envflags;
1860
1861 _decode_opc(ctx);
1862
1863 if (old_flags & DELAY_SLOT_MASK) {
1864
1865 ctx->envflags &= ~DELAY_SLOT_MASK;
1866
1867
1868
1869 if (ctx->tbflags & GUSA_EXCLUSIVE
1870 && old_flags & DELAY_SLOT_CONDITIONAL) {
1871 gen_delayed_conditional_jump(ctx);
1872 return;
1873 }
1874
1875
1876 ctx->envflags &= ~GUSA_MASK;
1877
1878 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1879 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1880 gen_delayed_conditional_jump(ctx);
1881 } else {
1882 gen_jump(ctx);
1883 }
1884 }
1885}
1886
1887#ifdef CONFIG_USER_ONLY
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1898{
1899 uint16_t insns[5];
1900 int ld_adr, ld_dst, ld_mop;
1901 int op_dst, op_src, op_opc;
1902 int mv_src, mt_dst, st_src, st_mop;
1903 TCGv op_arg;
1904 uint32_t pc = ctx->base.pc_next;
1905 uint32_t pc_end = ctx->base.tb->cs_base;
1906 int max_insns = (pc_end - pc) / 2;
1907 int i;
1908
1909
1910
1911 if (max_insns > ARRAY_SIZE(insns)) {
1912 goto fail;
1913 }
1914
1915
1916 for (i = 0; i < max_insns; ++i) {
1917 insns[i] = translator_lduw(env, pc + i * 2);
1918 }
1919
1920 ld_adr = ld_dst = ld_mop = -1;
1921 mv_src = -1;
1922 op_dst = op_src = op_opc = -1;
1923 mt_dst = -1;
1924 st_src = st_mop = -1;
1925 op_arg = NULL;
1926 i = 0;
1927
1928#define NEXT_INSN \
1929 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1930
1931
1932
1933
1934 NEXT_INSN;
1935 switch (ctx->opcode & 0xf00f) {
1936 case 0x6000:
1937 ld_mop = MO_SB;
1938 break;
1939 case 0x6001:
1940 ld_mop = MO_TESW;
1941 break;
1942 case 0x6002:
1943 ld_mop = MO_TESL;
1944 break;
1945 default:
1946 goto fail;
1947 }
1948 ld_adr = B7_4;
1949 ld_dst = B11_8;
1950 if (ld_adr == ld_dst) {
1951 goto fail;
1952 }
1953
1954 op_dst = ld_dst;
1955
1956
1957
1958
1959 NEXT_INSN;
1960 switch (ctx->opcode & 0xf00f) {
1961 case 0x6003:
1962
1963
1964
1965
1966
1967 op_dst = B11_8;
1968 mv_src = B7_4;
1969 if (op_dst == ld_dst) {
1970
1971 goto fail;
1972 }
1973 if (mv_src != ld_dst) {
1974
1975 op_src = ld_dst;
1976 }
1977 break;
1978
1979 default:
1980
1981 --i;
1982 }
1983
1984
1985
1986
1987 NEXT_INSN;
1988 switch (ctx->opcode & 0xf00f) {
1989 case 0x300c:
1990 op_opc = INDEX_op_add_i32;
1991 goto do_reg_op;
1992 case 0x2009:
1993 op_opc = INDEX_op_and_i32;
1994 goto do_reg_op;
1995 case 0x200a:
1996 op_opc = INDEX_op_xor_i32;
1997 goto do_reg_op;
1998 case 0x200b:
1999 op_opc = INDEX_op_or_i32;
2000 do_reg_op:
2001
2002
2003 if (op_dst != B11_8) {
2004 goto fail;
2005 }
2006 if (op_src < 0) {
2007
2008 op_src = B7_4;
2009 } else if (op_src == B7_4) {
2010
2011
2012
2013
2014 op_src = mv_src;
2015 } else {
2016 goto fail;
2017 }
2018 op_arg = REG(op_src);
2019 break;
2020
2021 case 0x6007:
2022 if (ld_dst != B7_4 || mv_src >= 0) {
2023 goto fail;
2024 }
2025 op_dst = B11_8;
2026 op_opc = INDEX_op_xor_i32;
2027 op_arg = tcg_const_i32(-1);
2028 break;
2029
2030 case 0x7000 ... 0x700f:
2031 if (op_dst != B11_8 || mv_src >= 0) {
2032 goto fail;
2033 }
2034 op_opc = INDEX_op_add_i32;
2035 op_arg = tcg_const_i32(B7_0s);
2036 break;
2037
2038 case 0x3000:
2039
2040
2041
2042 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2043 goto fail;
2044 }
2045 op_opc = INDEX_op_setcond_i32;
2046 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2047 op_arg = REG(op_src);
2048
2049 NEXT_INSN;
2050 switch (ctx->opcode & 0xff00) {
2051 case 0x8b00:
2052 case 0x8f00:
2053 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2054 goto fail;
2055 }
2056 if ((ctx->opcode & 0xff00) == 0x8b00) {
2057 break;
2058 }
2059
2060
2061
2062 NEXT_INSN;
2063 if ((ctx->opcode & 0xf0ff) == 0x0029) {
2064 mt_dst = B11_8;
2065 } else {
2066 goto fail;
2067 }
2068 break;
2069
2070 default:
2071 goto fail;
2072 }
2073 break;
2074
2075 case 0x2008:
2076
2077 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2078 goto fail;
2079 }
2080 op_opc = INDEX_op_setcond_i32;
2081 op_arg = tcg_const_i32(0);
2082
2083 NEXT_INSN;
2084 if ((ctx->opcode & 0xff00) != 0x8900
2085 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2086 goto fail;
2087 }
2088 break;
2089
2090 default:
2091
2092 --i;
2093 }
2094
2095
2096
2097
2098
2099 if (i != max_insns - 1) {
2100 goto fail;
2101 }
2102 NEXT_INSN;
2103 switch (ctx->opcode & 0xf00f) {
2104 case 0x2000:
2105 st_mop = MO_UB;
2106 break;
2107 case 0x2001:
2108 st_mop = MO_UW;
2109 break;
2110 case 0x2002:
2111 st_mop = MO_UL;
2112 break;
2113 default:
2114 goto fail;
2115 }
2116
2117 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2118 goto fail;
2119 }
2120 st_src = B7_4;
2121
2122#undef NEXT_INSN
2123
2124
2125
2126
2127 switch (op_opc) {
2128 case -1:
2129
2130 if (st_src == ld_dst || mv_src >= 0) {
2131 goto fail;
2132 }
2133 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2134 ctx->memidx, ld_mop);
2135 break;
2136
2137 case INDEX_op_add_i32:
2138 if (op_dst != st_src) {
2139 goto fail;
2140 }
2141 if (op_dst == ld_dst && st_mop == MO_UL) {
2142 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2143 op_arg, ctx->memidx, ld_mop);
2144 } else {
2145 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2146 op_arg, ctx->memidx, ld_mop);
2147 if (op_dst != ld_dst) {
2148
2149
2150 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2151 }
2152 }
2153 break;
2154
2155 case INDEX_op_and_i32:
2156 if (op_dst != st_src) {
2157 goto fail;
2158 }
2159 if (op_dst == ld_dst) {
2160 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2161 op_arg, ctx->memidx, ld_mop);
2162 } else {
2163 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2164 op_arg, ctx->memidx, ld_mop);
2165 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2166 }
2167 break;
2168
2169 case INDEX_op_or_i32:
2170 if (op_dst != st_src) {
2171 goto fail;
2172 }
2173 if (op_dst == ld_dst) {
2174 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2175 op_arg, ctx->memidx, ld_mop);
2176 } else {
2177 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2178 op_arg, ctx->memidx, ld_mop);
2179 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2180 }
2181 break;
2182
2183 case INDEX_op_xor_i32:
2184 if (op_dst != st_src) {
2185 goto fail;
2186 }
2187 if (op_dst == ld_dst) {
2188 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2189 op_arg, ctx->memidx, ld_mop);
2190 } else {
2191 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2192 op_arg, ctx->memidx, ld_mop);
2193 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2194 }
2195 break;
2196
2197 case INDEX_op_setcond_i32:
2198 if (st_src == ld_dst) {
2199 goto fail;
2200 }
2201 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2202 REG(st_src), ctx->memidx, ld_mop);
2203 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2204 if (mt_dst >= 0) {
2205 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2206 }
2207 break;
2208
2209 default:
2210 g_assert_not_reached();
2211 }
2212
2213
2214 if (op_src < 0 && op_arg) {
2215 tcg_temp_free_i32(op_arg);
2216 }
2217
2218
2219 ctx->envflags &= ~GUSA_MASK;
2220 ctx->base.pc_next = pc_end;
2221 ctx->base.num_insns += max_insns - 1;
2222 return;
2223
2224 fail:
2225 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2226 pc, pc_end);
2227
2228
2229
2230 ctx->envflags |= GUSA_EXCLUSIVE;
2231 gen_save_cpu_state(ctx, false);
2232 gen_helper_exclusive(cpu_env);
2233 ctx->base.is_jmp = DISAS_NORETURN;
2234
2235
2236
2237
2238
2239 ctx->base.pc_next = pc_end;
2240 ctx->base.num_insns += max_insns - 1;
2241}
2242#endif
2243
2244static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2245{
2246 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2247 CPUSH4State *env = cs->env_ptr;
2248 uint32_t tbflags;
2249 int bound;
2250
2251 ctx->tbflags = tbflags = ctx->base.tb->flags;
2252 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2253 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2254
2255
2256 ctx->delayed_pc = -1;
2257 ctx->features = env->features;
2258 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2259 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2260 (tbflags & (1 << SR_RB))) * 0x10;
2261 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2262
2263 if (tbflags & GUSA_MASK) {
2264 uint32_t pc = ctx->base.pc_next;
2265 uint32_t pc_end = ctx->base.tb->cs_base;
2266 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
2267 int max_insns = (pc_end - pc) / 2;
2268
2269 if (pc != pc_end + backup || max_insns < 2) {
2270
2271
2272 ctx->envflags &= ~GUSA_MASK;
2273 } else if (tbflags & GUSA_EXCLUSIVE) {
2274
2275
2276
2277 ctx->base.max_insns = max_insns;
2278 return;
2279 }
2280 }
2281
2282
2283
2284 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2285 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2286}
2287
2288static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2289{
2290}
2291
2292static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2293{
2294 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2295
2296 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2297}
2298
2299static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
2300 const CPUBreakpoint *bp)
2301{
2302 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2303
2304
2305 gen_save_cpu_state(ctx, true);
2306 gen_helper_debug(cpu_env);
2307 ctx->base.is_jmp = DISAS_NORETURN;
2308
2309
2310
2311
2312 ctx->base.pc_next += 2;
2313 return true;
2314}
2315
2316static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2317{
2318 CPUSH4State *env = cs->env_ptr;
2319 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2320
2321#ifdef CONFIG_USER_ONLY
2322 if (unlikely(ctx->envflags & GUSA_MASK)
2323 && !(ctx->envflags & GUSA_EXCLUSIVE)) {
2324
2325
2326
2327
2328
2329 decode_gusa(ctx, env);
2330 return;
2331 }
2332#endif
2333
2334 ctx->opcode = translator_lduw(env, ctx->base.pc_next);
2335 decode_opc(ctx);
2336 ctx->base.pc_next += 2;
2337}
2338
2339static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2340{
2341 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2342
2343 if (ctx->tbflags & GUSA_EXCLUSIVE) {
2344
2345 ctx->envflags &= ~GUSA_MASK;
2346 }
2347
2348 switch (ctx->base.is_jmp) {
2349 case DISAS_STOP:
2350 gen_save_cpu_state(ctx, true);
2351 if (ctx->base.singlestep_enabled) {
2352 gen_helper_debug(cpu_env);
2353 } else {
2354 tcg_gen_exit_tb(NULL, 0);
2355 }
2356 break;
2357 case DISAS_NEXT:
2358 case DISAS_TOO_MANY:
2359 gen_save_cpu_state(ctx, false);
2360 gen_goto_tb(ctx, 0, ctx->base.pc_next);
2361 break;
2362 case DISAS_NORETURN:
2363 break;
2364 default:
2365 g_assert_not_reached();
2366 }
2367}
2368
2369static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2370{
2371 qemu_log("IN:\n");
2372 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2373}
2374
2375static const TranslatorOps sh4_tr_ops = {
2376 .init_disas_context = sh4_tr_init_disas_context,
2377 .tb_start = sh4_tr_tb_start,
2378 .insn_start = sh4_tr_insn_start,
2379 .breakpoint_check = sh4_tr_breakpoint_check,
2380 .translate_insn = sh4_tr_translate_insn,
2381 .tb_stop = sh4_tr_tb_stop,
2382 .disas_log = sh4_tr_disas_log,
2383};
2384
2385void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
2386{
2387 DisasContext ctx;
2388
2389 translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
2390}
2391
2392void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2393 target_ulong *data)
2394{
2395 env->pc = data[0];
2396 env->flags = data[1];
2397
2398
2399
2400}
2401