1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define DEBUG_DISAS
21
22#include "qemu/osdep.h"
23#include "cpu.h"
24#include "disas/disas.h"
25#include "exec/exec-all.h"
26#include "tcg-op.h"
27#include "exec/cpu_ldst.h"
28#include "exec/helper-proto.h"
29#include "exec/helper-gen.h"
30#include "exec/translator.h"
31#include "trace-tcg.h"
32#include "exec/log.h"
33
34
35typedef struct DisasContext {
36 DisasContextBase base;
37
38 uint32_t tbflags;
39 uint32_t envflags;
40 int memidx;
41 int gbank;
42 int fbank;
43 uint32_t delayed_pc;
44 uint32_t features;
45
46 uint16_t opcode;
47
48 bool has_movcal;
49} DisasContext;
50
51#if defined(CONFIG_USER_ONLY)
52#define IS_USER(ctx) 1
53#else
54#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
55#endif
56
57
58
59
60#define DISAS_STOP DISAS_TARGET_0
61
62
63static TCGv cpu_gregs[32];
64static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
65static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
66static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
68static TCGv cpu_lock_addr, cpu_lock_value;
69static TCGv cpu_fregs[32];
70
71
72static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
73
74#include "exec/gen-icount.h"
75
76void sh4_translate_init(void)
77{
78 int i;
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 };
86 static const char * const fregnames[32] = {
87 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
88 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
89 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
92 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
93 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
95 };
96
97 for (i = 0; i < 24; i++) {
98 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
99 offsetof(CPUSH4State, gregs[i]),
100 gregnames[i]);
101 }
102 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
103
104 cpu_pc = tcg_global_mem_new_i32(cpu_env,
105 offsetof(CPUSH4State, pc), "PC");
106 cpu_sr = tcg_global_mem_new_i32(cpu_env,
107 offsetof(CPUSH4State, sr), "SR");
108 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, sr_m), "SR_M");
110 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr_q), "SR_Q");
112 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_t), "SR_T");
114 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, ssr), "SSR");
116 cpu_spc = tcg_global_mem_new_i32(cpu_env,
117 offsetof(CPUSH4State, spc), "SPC");
118 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
119 offsetof(CPUSH4State, gbr), "GBR");
120 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
121 offsetof(CPUSH4State, vbr), "VBR");
122 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
123 offsetof(CPUSH4State, sgr), "SGR");
124 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
125 offsetof(CPUSH4State, dbr), "DBR");
126 cpu_mach = tcg_global_mem_new_i32(cpu_env,
127 offsetof(CPUSH4State, mach), "MACH");
128 cpu_macl = tcg_global_mem_new_i32(cpu_env,
129 offsetof(CPUSH4State, macl), "MACL");
130 cpu_pr = tcg_global_mem_new_i32(cpu_env,
131 offsetof(CPUSH4State, pr), "PR");
132 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
133 offsetof(CPUSH4State, fpscr), "FPSCR");
134 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
135 offsetof(CPUSH4State, fpul), "FPUL");
136
137 cpu_flags = tcg_global_mem_new_i32(cpu_env,
138 offsetof(CPUSH4State, flags), "_flags_");
139 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
140 offsetof(CPUSH4State, delayed_pc),
141 "_delayed_pc_");
142 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State,
144 delayed_cond),
145 "_delayed_cond_");
146 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State, lock_addr),
148 "_lock_addr_");
149 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
150 offsetof(CPUSH4State, lock_value),
151 "_lock_value_");
152
153 for (i = 0; i < 32; i++)
154 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
155 offsetof(CPUSH4State, fregs[i]),
156 fregnames[i]);
157}
158
159void superh_cpu_dump_state(CPUState *cs, FILE *f,
160 fprintf_function cpu_fprintf, int flags)
161{
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
164 int i;
165 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
171 for (i = 0; i < 24; i += 4) {
172 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175 }
176 if (env->flags & DELAY_SLOT) {
177 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178 env->delayed_pc);
179 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
180 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
182 } else if (env->flags & DELAY_SLOT_RTE) {
183 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
185 }
186}
187
188static void gen_read_sr(TCGv dst)
189{
190 TCGv t0 = tcg_temp_new();
191 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196 tcg_gen_or_i32(dst, cpu_sr, t0);
197 tcg_temp_free_i32(t0);
198}
199
200static void gen_write_sr(TCGv src)
201{
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
207}
208
209static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
210{
211 if (save_pc) {
212 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
213 }
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
216 }
217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
219 }
220}
221
222static inline bool use_exit_tb(DisasContext *ctx)
223{
224 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
225}
226
227static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
228{
229
230 if (unlikely(ctx->base.singlestep_enabled || use_exit_tb(ctx))) {
231 return false;
232 }
233#ifndef CONFIG_USER_ONLY
234 return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
235#else
236 return true;
237#endif
238}
239
240static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
241{
242 if (use_goto_tb(ctx, dest)) {
243 tcg_gen_goto_tb(n);
244 tcg_gen_movi_i32(cpu_pc, dest);
245 tcg_gen_exit_tb(ctx->base.tb, n);
246 } else {
247 tcg_gen_movi_i32(cpu_pc, dest);
248 if (ctx->base.singlestep_enabled) {
249 gen_helper_debug(cpu_env);
250 } else if (use_exit_tb(ctx)) {
251 tcg_gen_exit_tb(NULL, 0);
252 } else {
253 tcg_gen_lookup_and_goto_ptr();
254 }
255 }
256 ctx->base.is_jmp = DISAS_NORETURN;
257}
258
259static void gen_jump(DisasContext * ctx)
260{
261 if (ctx->delayed_pc == -1) {
262
263
264 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
265 tcg_gen_discard_i32(cpu_delayed_pc);
266 if (ctx->base.singlestep_enabled) {
267 gen_helper_debug(cpu_env);
268 } else if (use_exit_tb(ctx)) {
269 tcg_gen_exit_tb(NULL, 0);
270 } else {
271 tcg_gen_lookup_and_goto_ptr();
272 }
273 ctx->base.is_jmp = DISAS_NORETURN;
274 } else {
275 gen_goto_tb(ctx, 0, ctx->delayed_pc);
276 }
277}
278
279
280static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
281 bool jump_if_true)
282{
283 TCGLabel *l1 = gen_new_label();
284 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
285
286 if (ctx->tbflags & GUSA_EXCLUSIVE) {
287
288
289
290 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
291 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
292
293
294 gen_goto_tb(ctx, 0, dest);
295 gen_set_label(l1);
296 return;
297 }
298
299 gen_save_cpu_state(ctx, false);
300 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
301 gen_goto_tb(ctx, 0, dest);
302 gen_set_label(l1);
303 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
304 ctx->base.is_jmp = DISAS_NORETURN;
305}
306
307
308static void gen_delayed_conditional_jump(DisasContext * ctx)
309{
310 TCGLabel *l1 = gen_new_label();
311 TCGv ds = tcg_temp_new();
312
313 tcg_gen_mov_i32(ds, cpu_delayed_cond);
314 tcg_gen_discard_i32(cpu_delayed_cond);
315
316 if (ctx->tbflags & GUSA_EXCLUSIVE) {
317
318
319
320 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
321
322
323 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
324 gen_jump(ctx);
325
326 gen_set_label(l1);
327 ctx->base.is_jmp = DISAS_NEXT;
328 return;
329 }
330
331 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
332 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
333 gen_set_label(l1);
334 gen_jump(ctx);
335}
336
337static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
338{
339
340 tcg_debug_assert((reg & 1) == 0);
341 reg ^= ctx->fbank;
342 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
343}
344
345static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
346{
347
348 tcg_debug_assert((reg & 1) == 0);
349 reg ^= ctx->fbank;
350 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
351}
352
353#define B3_0 (ctx->opcode & 0xf)
354#define B6_4 ((ctx->opcode >> 4) & 0x7)
355#define B7_4 ((ctx->opcode >> 4) & 0xf)
356#define B7_0 (ctx->opcode & 0xff)
357#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
358#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
359 (ctx->opcode & 0xfff))
360#define B11_8 ((ctx->opcode >> 8) & 0xf)
361#define B15_12 ((ctx->opcode >> 12) & 0xf)
362
363#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
364#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
365#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
366
367#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
368
369#define CHECK_NOT_DELAY_SLOT \
370 if (ctx->envflags & DELAY_SLOT_MASK) { \
371 goto do_illegal_slot; \
372 }
373
374#define CHECK_PRIVILEGED \
375 if (IS_USER(ctx)) { \
376 goto do_illegal; \
377 }
378
379#define CHECK_FPU_ENABLED \
380 if (ctx->tbflags & (1u << SR_FD)) { \
381 goto do_fpu_disabled; \
382 }
383
384#define CHECK_FPSCR_PR_0 \
385 if (ctx->tbflags & FPSCR_PR) { \
386 goto do_illegal; \
387 }
388
389#define CHECK_FPSCR_PR_1 \
390 if (!(ctx->tbflags & FPSCR_PR)) { \
391 goto do_illegal; \
392 }
393
394#define CHECK_SH4A \
395 if (!(ctx->features & SH_FEATURE_SH4A)) { \
396 goto do_illegal; \
397 }
398
399static void _decode_opc(DisasContext * ctx)
400{
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421 if (ctx->has_movcal)
422 {
423 int opcode = ctx->opcode & 0xf0ff;
424 if (opcode != 0x0093
425 && opcode != 0x00c3 )
426 {
427 gen_helper_discard_movcal_backup(cpu_env);
428 ctx->has_movcal = 0;
429 }
430 }
431
432#if 0
433 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
434#endif
435
436 switch (ctx->opcode) {
437 case 0x0019:
438 tcg_gen_movi_i32(cpu_sr_m, 0);
439 tcg_gen_movi_i32(cpu_sr_q, 0);
440 tcg_gen_movi_i32(cpu_sr_t, 0);
441 return;
442 case 0x000b:
443 CHECK_NOT_DELAY_SLOT
444 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
445 ctx->envflags |= DELAY_SLOT;
446 ctx->delayed_pc = (uint32_t) - 1;
447 return;
448 case 0x0028:
449 tcg_gen_movi_i32(cpu_mach, 0);
450 tcg_gen_movi_i32(cpu_macl, 0);
451 return;
452 case 0x0048:
453 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
454 return;
455 case 0x0008:
456 tcg_gen_movi_i32(cpu_sr_t, 0);
457 return;
458 case 0x0038:
459 CHECK_PRIVILEGED
460 gen_helper_ldtlb(cpu_env);
461 return;
462 case 0x002b:
463 CHECK_PRIVILEGED
464 CHECK_NOT_DELAY_SLOT
465 gen_write_sr(cpu_ssr);
466 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
467 ctx->envflags |= DELAY_SLOT_RTE;
468 ctx->delayed_pc = (uint32_t) - 1;
469 ctx->base.is_jmp = DISAS_STOP;
470 return;
471 case 0x0058:
472 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
473 return;
474 case 0x0018:
475 tcg_gen_movi_i32(cpu_sr_t, 1);
476 return;
477 case 0xfbfd:
478 CHECK_FPSCR_PR_0
479 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
480 ctx->base.is_jmp = DISAS_STOP;
481 return;
482 case 0xf3fd:
483 CHECK_FPSCR_PR_0
484 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
485 ctx->base.is_jmp = DISAS_STOP;
486 return;
487 case 0xf7fd:
488 CHECK_SH4A
489 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
490 ctx->base.is_jmp = DISAS_STOP;
491 return;
492 case 0x0009:
493 return;
494 case 0x001b:
495 CHECK_PRIVILEGED
496 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
497 gen_helper_sleep(cpu_env);
498 return;
499 }
500
501 switch (ctx->opcode & 0xf000) {
502 case 0x1000:
503 {
504 TCGv addr = tcg_temp_new();
505 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
506 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
507 tcg_temp_free(addr);
508 }
509 return;
510 case 0x5000:
511 {
512 TCGv addr = tcg_temp_new();
513 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
514 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
515 tcg_temp_free(addr);
516 }
517 return;
518 case 0xe000:
519#ifdef CONFIG_USER_ONLY
520
521
522
523 if (B11_8 == 15 && B7_0s < 0 &&
524 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
525 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
526 ctx->base.is_jmp = DISAS_STOP;
527 }
528#endif
529 tcg_gen_movi_i32(REG(B11_8), B7_0s);
530 return;
531 case 0x9000:
532 {
533 TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
534 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
535 tcg_temp_free(addr);
536 }
537 return;
538 case 0xd000:
539 {
540 TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
541 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
542 tcg_temp_free(addr);
543 }
544 return;
545 case 0x7000:
546 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
547 return;
548 case 0xa000:
549 CHECK_NOT_DELAY_SLOT
550 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
551 ctx->envflags |= DELAY_SLOT;
552 return;
553 case 0xb000:
554 CHECK_NOT_DELAY_SLOT
555 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
556 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
557 ctx->envflags |= DELAY_SLOT;
558 return;
559 }
560
561 switch (ctx->opcode & 0xf00f) {
562 case 0x6003:
563 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
564 return;
565 case 0x2000:
566 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
567 return;
568 case 0x2001:
569 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
570 return;
571 case 0x2002:
572 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
573 return;
574 case 0x6000:
575 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
576 return;
577 case 0x6001:
578 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
579 return;
580 case 0x6002:
581 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
582 return;
583 case 0x2004:
584 {
585 TCGv addr = tcg_temp_new();
586 tcg_gen_subi_i32(addr, REG(B11_8), 1);
587
588 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
589 tcg_gen_mov_i32(REG(B11_8), addr);
590 tcg_temp_free(addr);
591 }
592 return;
593 case 0x2005:
594 {
595 TCGv addr = tcg_temp_new();
596 tcg_gen_subi_i32(addr, REG(B11_8), 2);
597 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
598 tcg_gen_mov_i32(REG(B11_8), addr);
599 tcg_temp_free(addr);
600 }
601 return;
602 case 0x2006:
603 {
604 TCGv addr = tcg_temp_new();
605 tcg_gen_subi_i32(addr, REG(B11_8), 4);
606 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
607 tcg_gen_mov_i32(REG(B11_8), addr);
608 tcg_temp_free(addr);
609 }
610 return;
611 case 0x6004:
612 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
613 if ( B11_8 != B7_4 )
614 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
615 return;
616 case 0x6005:
617 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
618 if ( B11_8 != B7_4 )
619 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
620 return;
621 case 0x6006:
622 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
623 if ( B11_8 != B7_4 )
624 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
625 return;
626 case 0x0004:
627 {
628 TCGv addr = tcg_temp_new();
629 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
630 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
631 tcg_temp_free(addr);
632 }
633 return;
634 case 0x0005:
635 {
636 TCGv addr = tcg_temp_new();
637 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
638 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
639 tcg_temp_free(addr);
640 }
641 return;
642 case 0x0006:
643 {
644 TCGv addr = tcg_temp_new();
645 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
646 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
647 tcg_temp_free(addr);
648 }
649 return;
650 case 0x000c:
651 {
652 TCGv addr = tcg_temp_new();
653 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
654 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
655 tcg_temp_free(addr);
656 }
657 return;
658 case 0x000d:
659 {
660 TCGv addr = tcg_temp_new();
661 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
662 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
663 tcg_temp_free(addr);
664 }
665 return;
666 case 0x000e:
667 {
668 TCGv addr = tcg_temp_new();
669 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
670 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
671 tcg_temp_free(addr);
672 }
673 return;
674 case 0x6008:
675 {
676 TCGv low = tcg_temp_new();
677 tcg_gen_ext16u_i32(low, REG(B7_4));
678 tcg_gen_bswap16_i32(low, low);
679 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
680 tcg_temp_free(low);
681 }
682 return;
683 case 0x6009:
684 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
685 return;
686 case 0x200d:
687 {
688 TCGv high, low;
689 high = tcg_temp_new();
690 tcg_gen_shli_i32(high, REG(B7_4), 16);
691 low = tcg_temp_new();
692 tcg_gen_shri_i32(low, REG(B11_8), 16);
693 tcg_gen_or_i32(REG(B11_8), high, low);
694 tcg_temp_free(low);
695 tcg_temp_free(high);
696 }
697 return;
698 case 0x300c:
699 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
700 return;
701 case 0x300e:
702 {
703 TCGv t0, t1;
704 t0 = tcg_const_tl(0);
705 t1 = tcg_temp_new();
706 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
707 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
708 REG(B11_8), t0, t1, cpu_sr_t);
709 tcg_temp_free(t0);
710 tcg_temp_free(t1);
711 }
712 return;
713 case 0x300f:
714 {
715 TCGv t0, t1, t2;
716 t0 = tcg_temp_new();
717 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
718 t1 = tcg_temp_new();
719 tcg_gen_xor_i32(t1, t0, REG(B11_8));
720 t2 = tcg_temp_new();
721 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
722 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
723 tcg_temp_free(t2);
724 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
725 tcg_temp_free(t1);
726 tcg_gen_mov_i32(REG(B7_4), t0);
727 tcg_temp_free(t0);
728 }
729 return;
730 case 0x2009:
731 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
732 return;
733 case 0x3000:
734 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
735 return;
736 case 0x3003:
737 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
738 return;
739 case 0x3007:
740 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
741 return;
742 case 0x3006:
743 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
744 return;
745 case 0x3002:
746 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
747 return;
748 case 0x200c:
749 {
750 TCGv cmp1 = tcg_temp_new();
751 TCGv cmp2 = tcg_temp_new();
752 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
753 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
754 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
755 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
756 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
757 tcg_temp_free(cmp2);
758 tcg_temp_free(cmp1);
759 }
760 return;
761 case 0x2007:
762 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);
763 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);
764 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);
765 return;
766 case 0x3004:
767 {
768 TCGv t0 = tcg_temp_new();
769 TCGv t1 = tcg_temp_new();
770 TCGv t2 = tcg_temp_new();
771 TCGv zero = tcg_const_i32(0);
772
773
774
775 tcg_gen_shri_i32(t0, REG(B11_8), 31);
776 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
777 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
778
779
780
781
782
783 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
784 tcg_gen_subi_i32(t1, t1, 1);
785 tcg_gen_neg_i32(t2, REG(B7_4));
786 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
787 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
788
789
790 tcg_gen_andi_i32(t1, t1, 1);
791 tcg_gen_xor_i32(t1, t1, t0);
792 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
793 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
794
795 tcg_temp_free(zero);
796 tcg_temp_free(t2);
797 tcg_temp_free(t1);
798 tcg_temp_free(t0);
799 }
800 return;
801 case 0x300d:
802 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
803 return;
804 case 0x3005:
805 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
806 return;
807 case 0x600e:
808 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
809 return;
810 case 0x600f:
811 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
812 return;
813 case 0x600c:
814 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
815 return;
816 case 0x600d:
817 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
818 return;
819 case 0x000f:
820 {
821 TCGv arg0, arg1;
822 arg0 = tcg_temp_new();
823 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
824 arg1 = tcg_temp_new();
825 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
826 gen_helper_macl(cpu_env, arg0, arg1);
827 tcg_temp_free(arg1);
828 tcg_temp_free(arg0);
829 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
830 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
831 }
832 return;
833 case 0x400f:
834 {
835 TCGv arg0, arg1;
836 arg0 = tcg_temp_new();
837 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
838 arg1 = tcg_temp_new();
839 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
840 gen_helper_macw(cpu_env, arg0, arg1);
841 tcg_temp_free(arg1);
842 tcg_temp_free(arg0);
843 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
844 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
845 }
846 return;
847 case 0x0007:
848 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
849 return;
850 case 0x200f:
851 {
852 TCGv arg0, arg1;
853 arg0 = tcg_temp_new();
854 tcg_gen_ext16s_i32(arg0, REG(B7_4));
855 arg1 = tcg_temp_new();
856 tcg_gen_ext16s_i32(arg1, REG(B11_8));
857 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
858 tcg_temp_free(arg1);
859 tcg_temp_free(arg0);
860 }
861 return;
862 case 0x200e:
863 {
864 TCGv arg0, arg1;
865 arg0 = tcg_temp_new();
866 tcg_gen_ext16u_i32(arg0, REG(B7_4));
867 arg1 = tcg_temp_new();
868 tcg_gen_ext16u_i32(arg1, REG(B11_8));
869 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
870 tcg_temp_free(arg1);
871 tcg_temp_free(arg0);
872 }
873 return;
874 case 0x600b:
875 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
876 return;
877 case 0x600a:
878 {
879 TCGv t0 = tcg_const_i32(0);
880 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
881 REG(B7_4), t0, cpu_sr_t, t0);
882 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
883 t0, t0, REG(B11_8), cpu_sr_t);
884 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
885 tcg_temp_free(t0);
886 }
887 return;
888 case 0x6007:
889 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
890 return;
891 case 0x200b:
892 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
893 return;
894 case 0x400c:
895 {
896 TCGv t0 = tcg_temp_new();
897 TCGv t1 = tcg_temp_new();
898 TCGv t2 = tcg_temp_new();
899
900 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
901
902
903 tcg_gen_shl_i32(t1, REG(B11_8), t0);
904
905
906
907 tcg_gen_xori_i32(t0, t0, 0x1f);
908 tcg_gen_sar_i32(t2, REG(B11_8), t0);
909 tcg_gen_sari_i32(t2, t2, 1);
910
911
912 tcg_gen_movi_i32(t0, 0);
913 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
914
915 tcg_temp_free(t0);
916 tcg_temp_free(t1);
917 tcg_temp_free(t2);
918 }
919 return;
920 case 0x400d:
921 {
922 TCGv t0 = tcg_temp_new();
923 TCGv t1 = tcg_temp_new();
924 TCGv t2 = tcg_temp_new();
925
926 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
927
928
929 tcg_gen_shl_i32(t1, REG(B11_8), t0);
930
931
932
933 tcg_gen_xori_i32(t0, t0, 0x1f);
934 tcg_gen_shr_i32(t2, REG(B11_8), t0);
935 tcg_gen_shri_i32(t2, t2, 1);
936
937
938 tcg_gen_movi_i32(t0, 0);
939 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
940
941 tcg_temp_free(t0);
942 tcg_temp_free(t1);
943 tcg_temp_free(t2);
944 }
945 return;
946 case 0x3008:
947 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
948 return;
949 case 0x300a:
950 {
951 TCGv t0, t1;
952 t0 = tcg_const_tl(0);
953 t1 = tcg_temp_new();
954 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
955 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
956 REG(B11_8), t0, t1, cpu_sr_t);
957 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
958 tcg_temp_free(t0);
959 tcg_temp_free(t1);
960 }
961 return;
962 case 0x300b:
963 {
964 TCGv t0, t1, t2;
965 t0 = tcg_temp_new();
966 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
967 t1 = tcg_temp_new();
968 tcg_gen_xor_i32(t1, t0, REG(B7_4));
969 t2 = tcg_temp_new();
970 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
971 tcg_gen_and_i32(t1, t1, t2);
972 tcg_temp_free(t2);
973 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
974 tcg_temp_free(t1);
975 tcg_gen_mov_i32(REG(B11_8), t0);
976 tcg_temp_free(t0);
977 }
978 return;
979 case 0x2008:
980 {
981 TCGv val = tcg_temp_new();
982 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
983 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
984 tcg_temp_free(val);
985 }
986 return;
987 case 0x200a:
988 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
989 return;
990 case 0xf00c:
991 CHECK_FPU_ENABLED
992 if (ctx->tbflags & FPSCR_SZ) {
993 int xsrc = XHACK(B7_4);
994 int xdst = XHACK(B11_8);
995 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
996 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
997 } else {
998 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
999 }
1000 return;
1001 case 0xf00a:
1002 CHECK_FPU_ENABLED
1003 if (ctx->tbflags & FPSCR_SZ) {
1004 TCGv_i64 fp = tcg_temp_new_i64();
1005 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1006 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1007 tcg_temp_free_i64(fp);
1008 } else {
1009 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1010 }
1011 return;
1012 case 0xf008:
1013 CHECK_FPU_ENABLED
1014 if (ctx->tbflags & FPSCR_SZ) {
1015 TCGv_i64 fp = tcg_temp_new_i64();
1016 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1017 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1018 tcg_temp_free_i64(fp);
1019 } else {
1020 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1021 }
1022 return;
1023 case 0xf009:
1024 CHECK_FPU_ENABLED
1025 if (ctx->tbflags & FPSCR_SZ) {
1026 TCGv_i64 fp = tcg_temp_new_i64();
1027 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1028 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1029 tcg_temp_free_i64(fp);
1030 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1031 } else {
1032 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1033 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1034 }
1035 return;
1036 case 0xf00b:
1037 CHECK_FPU_ENABLED
1038 {
1039 TCGv addr = tcg_temp_new_i32();
1040 if (ctx->tbflags & FPSCR_SZ) {
1041 TCGv_i64 fp = tcg_temp_new_i64();
1042 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1043 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1044 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1045 tcg_temp_free_i64(fp);
1046 } else {
1047 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1048 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1049 }
1050 tcg_gen_mov_i32(REG(B11_8), addr);
1051 tcg_temp_free(addr);
1052 }
1053 return;
1054 case 0xf006:
1055 CHECK_FPU_ENABLED
1056 {
1057 TCGv addr = tcg_temp_new_i32();
1058 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1059 if (ctx->tbflags & FPSCR_SZ) {
1060 TCGv_i64 fp = tcg_temp_new_i64();
1061 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1062 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1063 tcg_temp_free_i64(fp);
1064 } else {
1065 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1066 }
1067 tcg_temp_free(addr);
1068 }
1069 return;
1070 case 0xf007:
1071 CHECK_FPU_ENABLED
1072 {
1073 TCGv addr = tcg_temp_new();
1074 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1075 if (ctx->tbflags & FPSCR_SZ) {
1076 TCGv_i64 fp = tcg_temp_new_i64();
1077 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1078 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1079 tcg_temp_free_i64(fp);
1080 } else {
1081 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1082 }
1083 tcg_temp_free(addr);
1084 }
1085 return;
1086 case 0xf000:
1087 case 0xf001:
1088 case 0xf002:
1089 case 0xf003:
1090 case 0xf004:
1091 case 0xf005:
1092 {
1093 CHECK_FPU_ENABLED
1094 if (ctx->tbflags & FPSCR_PR) {
1095 TCGv_i64 fp0, fp1;
1096
1097 if (ctx->opcode & 0x0110) {
1098 goto do_illegal;
1099 }
1100 fp0 = tcg_temp_new_i64();
1101 fp1 = tcg_temp_new_i64();
1102 gen_load_fpr64(ctx, fp0, B11_8);
1103 gen_load_fpr64(ctx, fp1, B7_4);
1104 switch (ctx->opcode & 0xf00f) {
1105 case 0xf000:
1106 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1107 break;
1108 case 0xf001:
1109 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1110 break;
1111 case 0xf002:
1112 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1113 break;
1114 case 0xf003:
1115 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1116 break;
1117 case 0xf004:
1118 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1119 return;
1120 case 0xf005:
1121 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1122 return;
1123 }
1124 gen_store_fpr64(ctx, fp0, B11_8);
1125 tcg_temp_free_i64(fp0);
1126 tcg_temp_free_i64(fp1);
1127 } else {
1128 switch (ctx->opcode & 0xf00f) {
1129 case 0xf000:
1130 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1131 FREG(B11_8), FREG(B7_4));
1132 break;
1133 case 0xf001:
1134 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1135 FREG(B11_8), FREG(B7_4));
1136 break;
1137 case 0xf002:
1138 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1139 FREG(B11_8), FREG(B7_4));
1140 break;
1141 case 0xf003:
1142 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1143 FREG(B11_8), FREG(B7_4));
1144 break;
1145 case 0xf004:
1146 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1147 FREG(B11_8), FREG(B7_4));
1148 return;
1149 case 0xf005:
1150 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1151 FREG(B11_8), FREG(B7_4));
1152 return;
1153 }
1154 }
1155 }
1156 return;
1157 case 0xf00e:
1158 CHECK_FPU_ENABLED
1159 CHECK_FPSCR_PR_0
1160 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1161 FREG(0), FREG(B7_4), FREG(B11_8));
1162 return;
1163 }
1164
1165 switch (ctx->opcode & 0xff00) {
1166 case 0xc900:
1167 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1168 return;
1169 case 0xcd00:
1170 {
1171 TCGv addr, val;
1172 addr = tcg_temp_new();
1173 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1174 val = tcg_temp_new();
1175 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1176 tcg_gen_andi_i32(val, val, B7_0);
1177 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1178 tcg_temp_free(val);
1179 tcg_temp_free(addr);
1180 }
1181 return;
1182 case 0x8b00:
1183 CHECK_NOT_DELAY_SLOT
1184 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
1185 return;
1186 case 0x8f00:
1187 CHECK_NOT_DELAY_SLOT
1188 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1189 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1190 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1191 return;
1192 case 0x8900:
1193 CHECK_NOT_DELAY_SLOT
1194 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
1195 return;
1196 case 0x8d00:
1197 CHECK_NOT_DELAY_SLOT
1198 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1199 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
1200 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1201 return;
1202 case 0x8800:
1203 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1204 return;
1205 case 0xc400:
1206 {
1207 TCGv addr = tcg_temp_new();
1208 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1209 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1210 tcg_temp_free(addr);
1211 }
1212 return;
1213 case 0xc500:
1214 {
1215 TCGv addr = tcg_temp_new();
1216 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1217 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1218 tcg_temp_free(addr);
1219 }
1220 return;
1221 case 0xc600:
1222 {
1223 TCGv addr = tcg_temp_new();
1224 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1225 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1226 tcg_temp_free(addr);
1227 }
1228 return;
1229 case 0xc000:
1230 {
1231 TCGv addr = tcg_temp_new();
1232 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1233 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1234 tcg_temp_free(addr);
1235 }
1236 return;
1237 case 0xc100:
1238 {
1239 TCGv addr = tcg_temp_new();
1240 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1241 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1242 tcg_temp_free(addr);
1243 }
1244 return;
1245 case 0xc200:
1246 {
1247 TCGv addr = tcg_temp_new();
1248 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1249 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1250 tcg_temp_free(addr);
1251 }
1252 return;
1253 case 0x8000:
1254 {
1255 TCGv addr = tcg_temp_new();
1256 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1257 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1258 tcg_temp_free(addr);
1259 }
1260 return;
1261 case 0x8100:
1262 {
1263 TCGv addr = tcg_temp_new();
1264 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1265 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1266 tcg_temp_free(addr);
1267 }
1268 return;
1269 case 0x8400:
1270 {
1271 TCGv addr = tcg_temp_new();
1272 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1273 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1274 tcg_temp_free(addr);
1275 }
1276 return;
1277 case 0x8500:
1278 {
1279 TCGv addr = tcg_temp_new();
1280 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1281 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1282 tcg_temp_free(addr);
1283 }
1284 return;
1285 case 0xc700:
1286 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1287 4 + B7_0 * 4) & ~3);
1288 return;
1289 case 0xcb00:
1290 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1291 return;
1292 case 0xcf00:
1293 {
1294 TCGv addr, val;
1295 addr = tcg_temp_new();
1296 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1297 val = tcg_temp_new();
1298 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1299 tcg_gen_ori_i32(val, val, B7_0);
1300 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1301 tcg_temp_free(val);
1302 tcg_temp_free(addr);
1303 }
1304 return;
1305 case 0xc300:
1306 {
1307 TCGv imm;
1308 CHECK_NOT_DELAY_SLOT
1309 gen_save_cpu_state(ctx, true);
1310 imm = tcg_const_i32(B7_0);
1311 gen_helper_trapa(cpu_env, imm);
1312 tcg_temp_free(imm);
1313 ctx->base.is_jmp = DISAS_NORETURN;
1314 }
1315 return;
1316 case 0xc800:
1317 {
1318 TCGv val = tcg_temp_new();
1319 tcg_gen_andi_i32(val, REG(0), B7_0);
1320 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1321 tcg_temp_free(val);
1322 }
1323 return;
1324 case 0xcc00:
1325 {
1326 TCGv val = tcg_temp_new();
1327 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1328 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1329 tcg_gen_andi_i32(val, val, B7_0);
1330 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1331 tcg_temp_free(val);
1332 }
1333 return;
1334 case 0xca00:
1335 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1336 return;
1337 case 0xce00:
1338 {
1339 TCGv addr, val;
1340 addr = tcg_temp_new();
1341 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1342 val = tcg_temp_new();
1343 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1344 tcg_gen_xori_i32(val, val, B7_0);
1345 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1346 tcg_temp_free(val);
1347 tcg_temp_free(addr);
1348 }
1349 return;
1350 }
1351
1352 switch (ctx->opcode & 0xf08f) {
1353 case 0x408e:
1354 CHECK_PRIVILEGED
1355 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1356 return;
1357 case 0x4087:
1358 CHECK_PRIVILEGED
1359 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1360 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1361 return;
1362 case 0x0082:
1363 CHECK_PRIVILEGED
1364 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1365 return;
1366 case 0x4083:
1367 CHECK_PRIVILEGED
1368 {
1369 TCGv addr = tcg_temp_new();
1370 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1371 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1372 tcg_gen_mov_i32(REG(B11_8), addr);
1373 tcg_temp_free(addr);
1374 }
1375 return;
1376 }
1377
1378 switch (ctx->opcode & 0xf0ff) {
1379 case 0x0023:
1380 CHECK_NOT_DELAY_SLOT
1381 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
1382 ctx->envflags |= DELAY_SLOT;
1383 ctx->delayed_pc = (uint32_t) - 1;
1384 return;
1385 case 0x0003:
1386 CHECK_NOT_DELAY_SLOT
1387 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1388 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1389 ctx->envflags |= DELAY_SLOT;
1390 ctx->delayed_pc = (uint32_t) - 1;
1391 return;
1392 case 0x4015:
1393 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1394 return;
1395 case 0x4011:
1396 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1397 return;
1398 case 0x4010:
1399 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1400 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1401 return;
1402 case 0x402b:
1403 CHECK_NOT_DELAY_SLOT
1404 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1405 ctx->envflags |= DELAY_SLOT;
1406 ctx->delayed_pc = (uint32_t) - 1;
1407 return;
1408 case 0x400b:
1409 CHECK_NOT_DELAY_SLOT
1410 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
1411 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1412 ctx->envflags |= DELAY_SLOT;
1413 ctx->delayed_pc = (uint32_t) - 1;
1414 return;
1415 case 0x400e:
1416 CHECK_PRIVILEGED
1417 {
1418 TCGv val = tcg_temp_new();
1419 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1420 gen_write_sr(val);
1421 tcg_temp_free(val);
1422 ctx->base.is_jmp = DISAS_STOP;
1423 }
1424 return;
1425 case 0x4007:
1426 CHECK_PRIVILEGED
1427 {
1428 TCGv val = tcg_temp_new();
1429 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1430 tcg_gen_andi_i32(val, val, 0x700083f3);
1431 gen_write_sr(val);
1432 tcg_temp_free(val);
1433 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1434 ctx->base.is_jmp = DISAS_STOP;
1435 }
1436 return;
1437 case 0x0002:
1438 CHECK_PRIVILEGED
1439 gen_read_sr(REG(B11_8));
1440 return;
1441 case 0x4003:
1442 CHECK_PRIVILEGED
1443 {
1444 TCGv addr = tcg_temp_new();
1445 TCGv val = tcg_temp_new();
1446 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1447 gen_read_sr(val);
1448 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1449 tcg_gen_mov_i32(REG(B11_8), addr);
1450 tcg_temp_free(val);
1451 tcg_temp_free(addr);
1452 }
1453 return;
1454#define LD(reg,ldnum,ldpnum,prechk) \
1455 case ldnum: \
1456 prechk \
1457 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1458 return; \
1459 case ldpnum: \
1460 prechk \
1461 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1462 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1463 return;
1464#define ST(reg,stnum,stpnum,prechk) \
1465 case stnum: \
1466 prechk \
1467 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1468 return; \
1469 case stpnum: \
1470 prechk \
1471 { \
1472 TCGv addr = tcg_temp_new(); \
1473 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1474 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1475 tcg_gen_mov_i32(REG(B11_8), addr); \
1476 tcg_temp_free(addr); \
1477 } \
1478 return;
1479#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1480 LD(reg,ldnum,ldpnum,prechk) \
1481 ST(reg,stnum,stpnum,prechk)
1482 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1483 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1484 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1485 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1486 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1487 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1488 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1489 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1490 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1491 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1492 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1493 case 0x406a:
1494 CHECK_FPU_ENABLED
1495 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1496 ctx->base.is_jmp = DISAS_STOP;
1497 return;
1498 case 0x4066:
1499 CHECK_FPU_ENABLED
1500 {
1501 TCGv addr = tcg_temp_new();
1502 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1503 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1504 gen_helper_ld_fpscr(cpu_env, addr);
1505 tcg_temp_free(addr);
1506 ctx->base.is_jmp = DISAS_STOP;
1507 }
1508 return;
1509 case 0x006a:
1510 CHECK_FPU_ENABLED
1511 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1512 return;
1513 case 0x4062:
1514 CHECK_FPU_ENABLED
1515 {
1516 TCGv addr, val;
1517 val = tcg_temp_new();
1518 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1519 addr = tcg_temp_new();
1520 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1521 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1522 tcg_gen_mov_i32(REG(B11_8), addr);
1523 tcg_temp_free(addr);
1524 tcg_temp_free(val);
1525 }
1526 return;
1527 case 0x00c3:
1528 {
1529 TCGv val = tcg_temp_new();
1530 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1531 gen_helper_movcal(cpu_env, REG(B11_8), val);
1532 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1533 tcg_temp_free(val);
1534 }
1535 ctx->has_movcal = 1;
1536 return;
1537 case 0x40a9:
1538 CHECK_SH4A
1539
1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1541 MO_TEUL | MO_UNALN);
1542 return;
1543 break;
1544 case 0x40e9:
1545 CHECK_SH4A
1546
1547 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1548 MO_TEUL | MO_UNALN);
1549 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1550 return;
1551 break;
1552 case 0x0029:
1553 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1554 return;
1555 case 0x0073:
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 CHECK_SH4A
1566 {
1567 TCGLabel *fail = gen_new_label();
1568 TCGLabel *done = gen_new_label();
1569
1570 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1571 TCGv tmp;
1572
1573 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1574 cpu_lock_addr, fail);
1575 tmp = tcg_temp_new();
1576 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1577 REG(0), ctx->memidx, MO_TEUL);
1578 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1579 tcg_temp_free(tmp);
1580 } else {
1581 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1582 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1583 tcg_gen_movi_i32(cpu_sr_t, 1);
1584 }
1585 tcg_gen_br(done);
1586
1587 gen_set_label(fail);
1588 tcg_gen_movi_i32(cpu_sr_t, 0);
1589
1590 gen_set_label(done);
1591 tcg_gen_movi_i32(cpu_lock_addr, -1);
1592 }
1593 return;
1594 case 0x0063:
1595
1596
1597
1598
1599
1600
1601
1602
1603 CHECK_SH4A
1604 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
1605 TCGv tmp = tcg_temp_new();
1606 tcg_gen_mov_i32(tmp, REG(B11_8));
1607 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1608 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1609 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1610 tcg_temp_free(tmp);
1611 } else {
1612 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1613 tcg_gen_movi_i32(cpu_lock_addr, 0);
1614 }
1615 return;
1616 case 0x0093:
1617 {
1618 gen_helper_ocbi(cpu_env, REG(B11_8));
1619 }
1620 return;
1621 case 0x00a3:
1622 case 0x00b3:
1623
1624
1625
1626 return;
1627 case 0x0083:
1628 return;
1629 case 0x00d3:
1630 CHECK_SH4A
1631 return;
1632 case 0x00e3:
1633 CHECK_SH4A
1634 return;
1635 case 0x00ab:
1636 CHECK_SH4A
1637 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1638 return;
1639 break;
1640 case 0x4024:
1641 {
1642 TCGv tmp = tcg_temp_new();
1643 tcg_gen_mov_i32(tmp, cpu_sr_t);
1644 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1645 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1646 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1647 tcg_temp_free(tmp);
1648 }
1649 return;
1650 case 0x4025:
1651 {
1652 TCGv tmp = tcg_temp_new();
1653 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1654 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1655 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1656 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1657 tcg_temp_free(tmp);
1658 }
1659 return;
1660 case 0x4004:
1661 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1662 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1663 return;
1664 case 0x4005:
1665 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1666 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1667 return;
1668 case 0x4000:
1669 case 0x4020:
1670 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1671 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1672 return;
1673 case 0x4021:
1674 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1675 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1676 return;
1677 case 0x4001:
1678 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1679 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1680 return;
1681 case 0x4008:
1682 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1683 return;
1684 case 0x4018:
1685 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1686 return;
1687 case 0x4028:
1688 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1689 return;
1690 case 0x4009:
1691 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1692 return;
1693 case 0x4019:
1694 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1695 return;
1696 case 0x4029:
1697 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1698 return;
1699 case 0x401b:
1700 {
1701 TCGv val = tcg_const_i32(0x80);
1702 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1703 ctx->memidx, MO_UB);
1704 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1705 tcg_temp_free(val);
1706 }
1707 return;
1708 case 0xf00d:
1709 CHECK_FPU_ENABLED
1710 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1711 return;
1712 case 0xf01d:
1713 CHECK_FPU_ENABLED
1714 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1715 return;
1716 case 0xf02d:
1717 CHECK_FPU_ENABLED
1718 if (ctx->tbflags & FPSCR_PR) {
1719 TCGv_i64 fp;
1720 if (ctx->opcode & 0x0100) {
1721 goto do_illegal;
1722 }
1723 fp = tcg_temp_new_i64();
1724 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1725 gen_store_fpr64(ctx, fp, B11_8);
1726 tcg_temp_free_i64(fp);
1727 }
1728 else {
1729 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1730 }
1731 return;
1732 case 0xf03d:
1733 CHECK_FPU_ENABLED
1734 if (ctx->tbflags & FPSCR_PR) {
1735 TCGv_i64 fp;
1736 if (ctx->opcode & 0x0100) {
1737 goto do_illegal;
1738 }
1739 fp = tcg_temp_new_i64();
1740 gen_load_fpr64(ctx, fp, B11_8);
1741 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1742 tcg_temp_free_i64(fp);
1743 }
1744 else {
1745 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1746 }
1747 return;
1748 case 0xf04d:
1749 CHECK_FPU_ENABLED
1750 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1751 return;
1752 case 0xf05d:
1753 CHECK_FPU_ENABLED
1754 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1755 return;
1756 case 0xf06d:
1757 CHECK_FPU_ENABLED
1758 if (ctx->tbflags & FPSCR_PR) {
1759 if (ctx->opcode & 0x0100) {
1760 goto do_illegal;
1761 }
1762 TCGv_i64 fp = tcg_temp_new_i64();
1763 gen_load_fpr64(ctx, fp, B11_8);
1764 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1765 gen_store_fpr64(ctx, fp, B11_8);
1766 tcg_temp_free_i64(fp);
1767 } else {
1768 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1769 }
1770 return;
1771 case 0xf07d:
1772 CHECK_FPU_ENABLED
1773 CHECK_FPSCR_PR_0
1774 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1775 break;
1776 case 0xf08d:
1777 CHECK_FPU_ENABLED
1778 CHECK_FPSCR_PR_0
1779 tcg_gen_movi_i32(FREG(B11_8), 0);
1780 return;
1781 case 0xf09d:
1782 CHECK_FPU_ENABLED
1783 CHECK_FPSCR_PR_0
1784 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1785 return;
1786 case 0xf0ad:
1787 CHECK_FPU_ENABLED
1788 {
1789 TCGv_i64 fp = tcg_temp_new_i64();
1790 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1791 gen_store_fpr64(ctx, fp, B11_8);
1792 tcg_temp_free_i64(fp);
1793 }
1794 return;
1795 case 0xf0bd:
1796 CHECK_FPU_ENABLED
1797 {
1798 TCGv_i64 fp = tcg_temp_new_i64();
1799 gen_load_fpr64(ctx, fp, B11_8);
1800 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1801 tcg_temp_free_i64(fp);
1802 }
1803 return;
1804 case 0xf0ed:
1805 CHECK_FPU_ENABLED
1806 CHECK_FPSCR_PR_1
1807 {
1808 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1809 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1810 gen_helper_fipr(cpu_env, m, n);
1811 tcg_temp_free(m);
1812 tcg_temp_free(n);
1813 return;
1814 }
1815 break;
1816 case 0xf0fd:
1817 CHECK_FPU_ENABLED
1818 CHECK_FPSCR_PR_1
1819 {
1820 if ((ctx->opcode & 0x0300) != 0x0100) {
1821 goto do_illegal;
1822 }
1823 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1824 gen_helper_ftrv(cpu_env, n);
1825 tcg_temp_free(n);
1826 return;
1827 }
1828 break;
1829 }
1830#if 0
1831 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1832 ctx->opcode, ctx->base.pc_next);
1833 fflush(stderr);
1834#endif
1835 do_illegal:
1836 if (ctx->envflags & DELAY_SLOT_MASK) {
1837 do_illegal_slot:
1838 gen_save_cpu_state(ctx, true);
1839 gen_helper_raise_slot_illegal_instruction(cpu_env);
1840 } else {
1841 gen_save_cpu_state(ctx, true);
1842 gen_helper_raise_illegal_instruction(cpu_env);
1843 }
1844 ctx->base.is_jmp = DISAS_NORETURN;
1845 return;
1846
1847 do_fpu_disabled:
1848 gen_save_cpu_state(ctx, true);
1849 if (ctx->envflags & DELAY_SLOT_MASK) {
1850 gen_helper_raise_slot_fpu_disable(cpu_env);
1851 } else {
1852 gen_helper_raise_fpu_disable(cpu_env);
1853 }
1854 ctx->base.is_jmp = DISAS_NORETURN;
1855 return;
1856}
1857
1858static void decode_opc(DisasContext * ctx)
1859{
1860 uint32_t old_flags = ctx->envflags;
1861
1862 _decode_opc(ctx);
1863
1864 if (old_flags & DELAY_SLOT_MASK) {
1865
1866 ctx->envflags &= ~DELAY_SLOT_MASK;
1867
1868
1869
1870 if (ctx->tbflags & GUSA_EXCLUSIVE
1871 && old_flags & DELAY_SLOT_CONDITIONAL) {
1872 gen_delayed_conditional_jump(ctx);
1873 return;
1874 }
1875
1876
1877 ctx->envflags &= ~GUSA_MASK;
1878
1879 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1880 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1881 gen_delayed_conditional_jump(ctx);
1882 } else {
1883 gen_jump(ctx);
1884 }
1885 }
1886}
1887
1888#ifdef CONFIG_USER_ONLY
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
1899{
1900 uint16_t insns[5];
1901 int ld_adr, ld_dst, ld_mop;
1902 int op_dst, op_src, op_opc;
1903 int mv_src, mt_dst, st_src, st_mop;
1904 TCGv op_arg;
1905 uint32_t pc = ctx->base.pc_next;
1906 uint32_t pc_end = ctx->base.tb->cs_base;
1907 int max_insns = (pc_end - pc) / 2;
1908 int i;
1909
1910
1911
1912 if (max_insns > ARRAY_SIZE(insns)) {
1913 goto fail;
1914 }
1915
1916
1917 for (i = 0; i < max_insns; ++i) {
1918 insns[i] = cpu_lduw_code(env, pc + i * 2);
1919 }
1920
1921 ld_adr = ld_dst = ld_mop = -1;
1922 mv_src = -1;
1923 op_dst = op_src = op_opc = -1;
1924 mt_dst = -1;
1925 st_src = st_mop = -1;
1926 op_arg = NULL;
1927 i = 0;
1928
1929#define NEXT_INSN \
1930 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1931
1932
1933
1934
1935 NEXT_INSN;
1936 switch (ctx->opcode & 0xf00f) {
1937 case 0x6000:
1938 ld_mop = MO_SB;
1939 break;
1940 case 0x6001:
1941 ld_mop = MO_TESW;
1942 break;
1943 case 0x6002:
1944 ld_mop = MO_TESL;
1945 break;
1946 default:
1947 goto fail;
1948 }
1949 ld_adr = B7_4;
1950 ld_dst = B11_8;
1951 if (ld_adr == ld_dst) {
1952 goto fail;
1953 }
1954
1955 op_dst = ld_dst;
1956
1957
1958
1959
1960 NEXT_INSN;
1961 switch (ctx->opcode & 0xf00f) {
1962 case 0x6003:
1963
1964
1965
1966 op_dst = B11_8;
1967 mv_src = B7_4;
1968 if (op_dst == ld_dst) {
1969
1970 goto fail;
1971 }
1972 if (mv_src != ld_dst) {
1973
1974 op_src = ld_dst;
1975 }
1976 break;
1977
1978 default:
1979
1980 --i;
1981 }
1982
1983
1984
1985
1986 NEXT_INSN;
1987 switch (ctx->opcode & 0xf00f) {
1988 case 0x300c:
1989 op_opc = INDEX_op_add_i32;
1990 goto do_reg_op;
1991 case 0x2009:
1992 op_opc = INDEX_op_and_i32;
1993 goto do_reg_op;
1994 case 0x200a:
1995 op_opc = INDEX_op_xor_i32;
1996 goto do_reg_op;
1997 case 0x200b:
1998 op_opc = INDEX_op_or_i32;
1999 do_reg_op:
2000
2001
2002 if (op_dst != B11_8) {
2003 goto fail;
2004 }
2005 if (op_src < 0) {
2006
2007 op_src = B7_4;
2008 } else if (op_src == B7_4) {
2009
2010
2011
2012
2013 op_src = mv_src;
2014 } else {
2015 goto fail;
2016 }
2017 op_arg = REG(op_src);
2018 break;
2019
2020 case 0x6007:
2021 if (ld_dst != B7_4 || mv_src >= 0) {
2022 goto fail;
2023 }
2024 op_dst = B11_8;
2025 op_opc = INDEX_op_xor_i32;
2026 op_arg = tcg_const_i32(-1);
2027 break;
2028
2029 case 0x7000 ... 0x700f:
2030 if (op_dst != B11_8 || mv_src >= 0) {
2031 goto fail;
2032 }
2033 op_opc = INDEX_op_add_i32;
2034 op_arg = tcg_const_i32(B7_0s);
2035 break;
2036
2037 case 0x3000:
2038
2039
2040
2041 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2042 goto fail;
2043 }
2044 op_opc = INDEX_op_setcond_i32;
2045 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2046 op_arg = REG(op_src);
2047
2048 NEXT_INSN;
2049 switch (ctx->opcode & 0xff00) {
2050 case 0x8b00:
2051 case 0x8f00:
2052 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2053 goto fail;
2054 }
2055 if ((ctx->opcode & 0xff00) == 0x8b00) {
2056 break;
2057 }
2058
2059
2060
2061 NEXT_INSN;
2062 if ((ctx->opcode & 0xf0ff) == 0x0029) {
2063 mt_dst = B11_8;
2064 } else {
2065 goto fail;
2066 }
2067 break;
2068
2069 default:
2070 goto fail;
2071 }
2072 break;
2073
2074 case 0x2008:
2075
2076 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2077 goto fail;
2078 }
2079 op_opc = INDEX_op_setcond_i32;
2080 op_arg = tcg_const_i32(0);
2081
2082 NEXT_INSN;
2083 if ((ctx->opcode & 0xff00) != 0x8900
2084 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2085 goto fail;
2086 }
2087 break;
2088
2089 default:
2090
2091 --i;
2092 }
2093
2094
2095
2096
2097
2098 if (i != max_insns - 1) {
2099 goto fail;
2100 }
2101 NEXT_INSN;
2102 switch (ctx->opcode & 0xf00f) {
2103 case 0x2000:
2104 st_mop = MO_UB;
2105 break;
2106 case 0x2001:
2107 st_mop = MO_UW;
2108 break;
2109 case 0x2002:
2110 st_mop = MO_UL;
2111 break;
2112 default:
2113 goto fail;
2114 }
2115
2116 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2117 goto fail;
2118 }
2119 st_src = B7_4;
2120
2121#undef NEXT_INSN
2122
2123
2124
2125
2126 switch (op_opc) {
2127 case -1:
2128
2129 if (st_src == ld_dst || mv_src >= 0) {
2130 goto fail;
2131 }
2132 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2133 ctx->memidx, ld_mop);
2134 break;
2135
2136 case INDEX_op_add_i32:
2137 if (op_dst != st_src) {
2138 goto fail;
2139 }
2140 if (op_dst == ld_dst && st_mop == MO_UL) {
2141 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2142 op_arg, ctx->memidx, ld_mop);
2143 } else {
2144 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2145 op_arg, ctx->memidx, ld_mop);
2146 if (op_dst != ld_dst) {
2147
2148
2149 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2150 }
2151 }
2152 break;
2153
2154 case INDEX_op_and_i32:
2155 if (op_dst != st_src) {
2156 goto fail;
2157 }
2158 if (op_dst == ld_dst) {
2159 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2160 op_arg, ctx->memidx, ld_mop);
2161 } else {
2162 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2163 op_arg, ctx->memidx, ld_mop);
2164 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2165 }
2166 break;
2167
2168 case INDEX_op_or_i32:
2169 if (op_dst != st_src) {
2170 goto fail;
2171 }
2172 if (op_dst == ld_dst) {
2173 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2174 op_arg, ctx->memidx, ld_mop);
2175 } else {
2176 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2177 op_arg, ctx->memidx, ld_mop);
2178 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2179 }
2180 break;
2181
2182 case INDEX_op_xor_i32:
2183 if (op_dst != st_src) {
2184 goto fail;
2185 }
2186 if (op_dst == ld_dst) {
2187 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2188 op_arg, ctx->memidx, ld_mop);
2189 } else {
2190 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2191 op_arg, ctx->memidx, ld_mop);
2192 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2193 }
2194 break;
2195
2196 case INDEX_op_setcond_i32:
2197 if (st_src == ld_dst) {
2198 goto fail;
2199 }
2200 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2201 REG(st_src), ctx->memidx, ld_mop);
2202 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2203 if (mt_dst >= 0) {
2204 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2205 }
2206 break;
2207
2208 default:
2209 g_assert_not_reached();
2210 }
2211
2212
2213 if (op_src < 0 && op_arg) {
2214 tcg_temp_free_i32(op_arg);
2215 }
2216
2217
2218 ctx->envflags &= ~GUSA_MASK;
2219 ctx->base.pc_next = pc_end;
2220 ctx->base.num_insns += max_insns - 1;
2221 return;
2222
2223 fail:
2224 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2225 pc, pc_end);
2226
2227
2228
2229 ctx->envflags |= GUSA_EXCLUSIVE;
2230 gen_save_cpu_state(ctx, false);
2231 gen_helper_exclusive(cpu_env);
2232 ctx->base.is_jmp = DISAS_NORETURN;
2233
2234
2235
2236
2237
2238 ctx->base.pc_next = pc_end;
2239 ctx->base.num_insns += max_insns - 1;
2240}
2241#endif
2242
2243static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2244{
2245 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2246 CPUSH4State *env = cs->env_ptr;
2247 uint32_t tbflags;
2248 int bound;
2249
2250 ctx->tbflags = tbflags = ctx->base.tb->flags;
2251 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2252 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2253
2254
2255 ctx->delayed_pc = -1;
2256 ctx->features = env->features;
2257 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2258 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2259 (tbflags & (1 << SR_RB))) * 0x10;
2260 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2261
2262 if (tbflags & GUSA_MASK) {
2263 uint32_t pc = ctx->base.pc_next;
2264 uint32_t pc_end = ctx->base.tb->cs_base;
2265 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
2266 int max_insns = (pc_end - pc) / 2;
2267
2268 if (pc != pc_end + backup || max_insns < 2) {
2269
2270
2271 ctx->envflags &= ~GUSA_MASK;
2272 } else if (tbflags & GUSA_EXCLUSIVE) {
2273
2274
2275
2276 ctx->base.max_insns = max_insns;
2277 return;
2278 }
2279 }
2280
2281
2282
2283 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2284 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2285}
2286
2287static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2288{
2289}
2290
2291static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2292{
2293 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2294
2295 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2296}
2297
2298static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
2299 const CPUBreakpoint *bp)
2300{
2301 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2302
2303
2304 gen_save_cpu_state(ctx, true);
2305 gen_helper_debug(cpu_env);
2306 ctx->base.is_jmp = DISAS_NORETURN;
2307
2308
2309
2310
2311 ctx->base.pc_next += 2;
2312 return true;
2313}
2314
2315static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2316{
2317 CPUSH4State *env = cs->env_ptr;
2318 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2319
2320#ifdef CONFIG_USER_ONLY
2321 if (unlikely(ctx->envflags & GUSA_MASK)
2322 && !(ctx->envflags & GUSA_EXCLUSIVE)) {
2323
2324
2325
2326
2327
2328 decode_gusa(ctx, env);
2329 return;
2330 }
2331#endif
2332
2333 ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next);
2334 decode_opc(ctx);
2335 ctx->base.pc_next += 2;
2336}
2337
2338static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2339{
2340 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2341
2342 if (ctx->tbflags & GUSA_EXCLUSIVE) {
2343
2344 ctx->envflags &= ~GUSA_MASK;
2345 }
2346
2347 switch (ctx->base.is_jmp) {
2348 case DISAS_STOP:
2349 gen_save_cpu_state(ctx, true);
2350 if (ctx->base.singlestep_enabled) {
2351 gen_helper_debug(cpu_env);
2352 } else {
2353 tcg_gen_exit_tb(NULL, 0);
2354 }
2355 break;
2356 case DISAS_NEXT:
2357 case DISAS_TOO_MANY:
2358 gen_save_cpu_state(ctx, false);
2359 gen_goto_tb(ctx, 0, ctx->base.pc_next);
2360 break;
2361 case DISAS_NORETURN:
2362 break;
2363 default:
2364 g_assert_not_reached();
2365 }
2366}
2367
2368static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2369{
2370 qemu_log("IN:\n");
2371 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2372}
2373
2374static const TranslatorOps sh4_tr_ops = {
2375 .init_disas_context = sh4_tr_init_disas_context,
2376 .tb_start = sh4_tr_tb_start,
2377 .insn_start = sh4_tr_insn_start,
2378 .breakpoint_check = sh4_tr_breakpoint_check,
2379 .translate_insn = sh4_tr_translate_insn,
2380 .tb_stop = sh4_tr_tb_stop,
2381 .disas_log = sh4_tr_disas_log,
2382};
2383
2384void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
2385{
2386 DisasContext ctx;
2387
2388 translator_loop(&sh4_tr_ops, &ctx.base, cs, tb);
2389}
2390
2391void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2392 target_ulong *data)
2393{
2394 env->pc = data[0];
2395 env->flags = data[1];
2396
2397
2398
2399}
2400