qemu/target/sh4/translate.c
<<
>>
Prefs
   1/*
   2 *  SH4 translation
   3 *
   4 *  Copyright (c) 2005 Samuel Tardieu
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#define DEBUG_DISAS
  21
  22#include "qemu/osdep.h"
  23#include "cpu.h"
  24#include "disas/disas.h"
  25#include "exec/exec-all.h"
  26#include "tcg-op.h"
  27#include "exec/cpu_ldst.h"
  28
  29#include "exec/helper-proto.h"
  30#include "exec/helper-gen.h"
  31
  32#include "trace-tcg.h"
  33#include "exec/log.h"
  34
  35
  36typedef struct DisasContext {
  37    struct TranslationBlock *tb;
  38    target_ulong pc;
  39    uint16_t opcode;
  40    uint32_t tbflags;    /* should stay unmodified during the TB translation */
  41    uint32_t envflags;   /* should stay in sync with env->flags using TCG ops */
  42    int bstate;
  43    int memidx;
  44    int gbank;
  45    int fbank;
  46    uint32_t delayed_pc;
  47    int singlestep_enabled;
  48    uint32_t features;
  49    int has_movcal;
  50} DisasContext;
  51
  52#if defined(CONFIG_USER_ONLY)
  53#define IS_USER(ctx) 1
  54#else
  55#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
  56#endif
  57
  58enum {
  59    BS_NONE     = 0, /* We go out of the TB without reaching a branch or an
  60                      * exception condition
  61                      */
  62    BS_STOP     = 1, /* We want to stop translation for any reason */
  63    BS_BRANCH   = 2, /* We reached a branch condition     */
  64    BS_EXCP     = 3, /* We reached an exception condition */
  65};
  66
  67/* global register indexes */
  68static TCGv_env cpu_env;
  69static TCGv cpu_gregs[32];
  70static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
  71static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
  72static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
  73static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
  74static TCGv cpu_fregs[32];
  75
  76/* internal register indexes */
  77static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
  78
  79#include "exec/gen-icount.h"
  80
  81void sh4_translate_init(void)
  82{
  83    int i;
  84    static int done_init = 0;
  85    static const char * const gregnames[24] = {
  86        "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
  87        "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
  88        "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
  89        "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
  90        "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
  91    };
  92    static const char * const fregnames[32] = {
  93         "FPR0_BANK0",  "FPR1_BANK0",  "FPR2_BANK0",  "FPR3_BANK0",
  94         "FPR4_BANK0",  "FPR5_BANK0",  "FPR6_BANK0",  "FPR7_BANK0",
  95         "FPR8_BANK0",  "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
  96        "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
  97         "FPR0_BANK1",  "FPR1_BANK1",  "FPR2_BANK1",  "FPR3_BANK1",
  98         "FPR4_BANK1",  "FPR5_BANK1",  "FPR6_BANK1",  "FPR7_BANK1",
  99         "FPR8_BANK1",  "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
 100        "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
 101    };
 102
 103    if (done_init) {
 104        return;
 105    }
 106
 107    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 108    tcg_ctx.tcg_env = cpu_env;
 109
 110    for (i = 0; i < 24; i++) {
 111        cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
 112                                              offsetof(CPUSH4State, gregs[i]),
 113                                              gregnames[i]);
 114    }
 115    memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
 116
 117    cpu_pc = tcg_global_mem_new_i32(cpu_env,
 118                                    offsetof(CPUSH4State, pc), "PC");
 119    cpu_sr = tcg_global_mem_new_i32(cpu_env,
 120                                    offsetof(CPUSH4State, sr), "SR");
 121    cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
 122                                      offsetof(CPUSH4State, sr_m), "SR_M");
 123    cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
 124                                      offsetof(CPUSH4State, sr_q), "SR_Q");
 125    cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
 126                                      offsetof(CPUSH4State, sr_t), "SR_T");
 127    cpu_ssr = tcg_global_mem_new_i32(cpu_env,
 128                                     offsetof(CPUSH4State, ssr), "SSR");
 129    cpu_spc = tcg_global_mem_new_i32(cpu_env,
 130                                     offsetof(CPUSH4State, spc), "SPC");
 131    cpu_gbr = tcg_global_mem_new_i32(cpu_env,
 132                                     offsetof(CPUSH4State, gbr), "GBR");
 133    cpu_vbr = tcg_global_mem_new_i32(cpu_env,
 134                                     offsetof(CPUSH4State, vbr), "VBR");
 135    cpu_sgr = tcg_global_mem_new_i32(cpu_env,
 136                                     offsetof(CPUSH4State, sgr), "SGR");
 137    cpu_dbr = tcg_global_mem_new_i32(cpu_env,
 138                                     offsetof(CPUSH4State, dbr), "DBR");
 139    cpu_mach = tcg_global_mem_new_i32(cpu_env,
 140                                      offsetof(CPUSH4State, mach), "MACH");
 141    cpu_macl = tcg_global_mem_new_i32(cpu_env,
 142                                      offsetof(CPUSH4State, macl), "MACL");
 143    cpu_pr = tcg_global_mem_new_i32(cpu_env,
 144                                    offsetof(CPUSH4State, pr), "PR");
 145    cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
 146                                       offsetof(CPUSH4State, fpscr), "FPSCR");
 147    cpu_fpul = tcg_global_mem_new_i32(cpu_env,
 148                                      offsetof(CPUSH4State, fpul), "FPUL");
 149
 150    cpu_flags = tcg_global_mem_new_i32(cpu_env,
 151                                       offsetof(CPUSH4State, flags), "_flags_");
 152    cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
 153                                            offsetof(CPUSH4State, delayed_pc),
 154                                            "_delayed_pc_");
 155    cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
 156                                              offsetof(CPUSH4State,
 157                                                       delayed_cond),
 158                                              "_delayed_cond_");
 159    cpu_ldst = tcg_global_mem_new_i32(cpu_env,
 160                                      offsetof(CPUSH4State, ldst), "_ldst_");
 161
 162    for (i = 0; i < 32; i++)
 163        cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
 164                                              offsetof(CPUSH4State, fregs[i]),
 165                                              fregnames[i]);
 166
 167    done_init = 1;
 168}
 169
 170void superh_cpu_dump_state(CPUState *cs, FILE *f,
 171                           fprintf_function cpu_fprintf, int flags)
 172{
 173    SuperHCPU *cpu = SUPERH_CPU(cs);
 174    CPUSH4State *env = &cpu->env;
 175    int i;
 176    cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
 177                env->pc, cpu_read_sr(env), env->pr, env->fpscr);
 178    cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
 179                env->spc, env->ssr, env->gbr, env->vbr);
 180    cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
 181                env->sgr, env->dbr, env->delayed_pc, env->fpul);
 182    for (i = 0; i < 24; i += 4) {
 183        cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
 184                    i, env->gregs[i], i + 1, env->gregs[i + 1],
 185                    i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
 186    }
 187    if (env->flags & DELAY_SLOT) {
 188        cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
 189                    env->delayed_pc);
 190    } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
 191        cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
 192                    env->delayed_pc);
 193    } else if (env->flags & DELAY_SLOT_RTE) {
 194        cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
 195                    env->delayed_pc);
 196    }
 197}
 198
 199static void gen_read_sr(TCGv dst)
 200{
 201    TCGv t0 = tcg_temp_new();
 202    tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
 203    tcg_gen_or_i32(dst, dst, t0);
 204    tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
 205    tcg_gen_or_i32(dst, dst, t0);
 206    tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
 207    tcg_gen_or_i32(dst, cpu_sr, t0);
 208    tcg_temp_free_i32(t0);
 209}
 210
 211static void gen_write_sr(TCGv src)
 212{
 213    tcg_gen_andi_i32(cpu_sr, src,
 214                     ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
 215    tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
 216    tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
 217    tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
 218}
 219
 220static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
 221{
 222    if (save_pc) {
 223        tcg_gen_movi_i32(cpu_pc, ctx->pc);
 224    }
 225    if (ctx->delayed_pc != (uint32_t) -1) {
 226        tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
 227    }
 228    if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
 229        tcg_gen_movi_i32(cpu_flags, ctx->envflags);
 230    }
 231}
 232
 233static inline bool use_exit_tb(DisasContext *ctx)
 234{
 235    return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
 236}
 237
 238static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
 239{
 240    /* Use a direct jump if in same page and singlestep not enabled */
 241    if (unlikely(ctx->singlestep_enabled || use_exit_tb(ctx))) {
 242        return false;
 243    }
 244#ifndef CONFIG_USER_ONLY
 245    return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 246#else
 247    return true;
 248#endif
 249}
 250
 251static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
 252{
 253    if (use_goto_tb(ctx, dest)) {
 254        tcg_gen_goto_tb(n);
 255        tcg_gen_movi_i32(cpu_pc, dest);
 256        tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
 257    } else {
 258        tcg_gen_movi_i32(cpu_pc, dest);
 259        if (ctx->singlestep_enabled) {
 260            gen_helper_debug(cpu_env);
 261        } else if (use_exit_tb(ctx)) {
 262            tcg_gen_exit_tb(0);
 263        } else {
 264            tcg_gen_lookup_and_goto_ptr(cpu_pc);
 265        }
 266    }
 267}
 268
 269static void gen_jump(DisasContext * ctx)
 270{
 271    if (ctx->delayed_pc == -1) {
 272        /* Target is not statically known, it comes necessarily from a
 273           delayed jump as immediate jump are conditinal jumps */
 274        tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
 275        tcg_gen_discard_i32(cpu_delayed_pc);
 276        if (ctx->singlestep_enabled) {
 277            gen_helper_debug(cpu_env);
 278        } else if (use_exit_tb(ctx)) {
 279            tcg_gen_exit_tb(0);
 280        } else {
 281            tcg_gen_lookup_and_goto_ptr(cpu_pc);
 282        }
 283    } else {
 284        gen_goto_tb(ctx, 0, ctx->delayed_pc);
 285    }
 286}
 287
 288/* Immediate conditional jump (bt or bf) */
 289static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
 290                                 bool jump_if_true)
 291{
 292    TCGLabel *l1 = gen_new_label();
 293    TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
 294
 295    if (ctx->tbflags & GUSA_EXCLUSIVE) {
 296        /* When in an exclusive region, we must continue to the end.
 297           Therefore, exit the region on a taken branch, but otherwise
 298           fall through to the next instruction.  */
 299        tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
 300        tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
 301        /* Note that this won't actually use a goto_tb opcode because we
 302           disallow it in use_goto_tb, but it handles exit + singlestep.  */
 303        gen_goto_tb(ctx, 0, dest);
 304        gen_set_label(l1);
 305        return;
 306    }
 307
 308    gen_save_cpu_state(ctx, false);
 309    tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
 310    gen_goto_tb(ctx, 0, dest);
 311    gen_set_label(l1);
 312    gen_goto_tb(ctx, 1, ctx->pc + 2);
 313    ctx->bstate = BS_BRANCH;
 314}
 315
 316/* Delayed conditional jump (bt or bf) */
 317static void gen_delayed_conditional_jump(DisasContext * ctx)
 318{
 319    TCGLabel *l1 = gen_new_label();
 320    TCGv ds = tcg_temp_new();
 321
 322    tcg_gen_mov_i32(ds, cpu_delayed_cond);
 323    tcg_gen_discard_i32(cpu_delayed_cond);
 324
 325    if (ctx->tbflags & GUSA_EXCLUSIVE) {
 326        /* When in an exclusive region, we must continue to the end.
 327           Therefore, exit the region on a taken branch, but otherwise
 328           fall through to the next instruction.  */
 329        tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
 330
 331        /* Leave the gUSA region.  */
 332        tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
 333        gen_jump(ctx);
 334
 335        gen_set_label(l1);
 336        return;
 337    }
 338
 339    tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
 340    gen_goto_tb(ctx, 1, ctx->pc + 2);
 341    gen_set_label(l1);
 342    gen_jump(ctx);
 343}
 344
 345static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
 346{
 347    /* We have already signaled illegal instruction for odd Dr.  */
 348    tcg_debug_assert((reg & 1) == 0);
 349    reg ^= ctx->fbank;
 350    tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
 351}
 352
 353static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
 354{
 355    /* We have already signaled illegal instruction for odd Dr.  */
 356    tcg_debug_assert((reg & 1) == 0);
 357    reg ^= ctx->fbank;
 358    tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
 359}
 360
 361#define B3_0 (ctx->opcode & 0xf)
 362#define B6_4 ((ctx->opcode >> 4) & 0x7)
 363#define B7_4 ((ctx->opcode >> 4) & 0xf)
 364#define B7_0 (ctx->opcode & 0xff)
 365#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
 366#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
 367  (ctx->opcode & 0xfff))
 368#define B11_8 ((ctx->opcode >> 8) & 0xf)
 369#define B15_12 ((ctx->opcode >> 12) & 0xf)
 370
 371#define REG(x)     cpu_gregs[(x) ^ ctx->gbank]
 372#define ALTREG(x)  cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
 373#define FREG(x)    cpu_fregs[(x) ^ ctx->fbank]
 374
 375#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
 376
 377#define CHECK_NOT_DELAY_SLOT \
 378    if (ctx->envflags & DELAY_SLOT_MASK) {  \
 379        goto do_illegal_slot;               \
 380    }
 381
 382#define CHECK_PRIVILEGED \
 383    if (IS_USER(ctx)) {                     \
 384        goto do_illegal;                    \
 385    }
 386
 387#define CHECK_FPU_ENABLED \
 388    if (ctx->tbflags & (1u << SR_FD)) {     \
 389        goto do_fpu_disabled;               \
 390    }
 391
 392#define CHECK_FPSCR_PR_0 \
 393    if (ctx->tbflags & FPSCR_PR) {          \
 394        goto do_illegal;                    \
 395    }
 396
 397#define CHECK_FPSCR_PR_1 \
 398    if (!(ctx->tbflags & FPSCR_PR)) {       \
 399        goto do_illegal;                    \
 400    }
 401
 402#define CHECK_SH4A \
 403    if (!(ctx->features & SH_FEATURE_SH4A)) { \
 404        goto do_illegal;                      \
 405    }
 406
 407static void _decode_opc(DisasContext * ctx)
 408{
 409    /* This code tries to make movcal emulation sufficiently
 410       accurate for Linux purposes.  This instruction writes
 411       memory, and prior to that, always allocates a cache line.
 412       It is used in two contexts:
 413       - in memcpy, where data is copied in blocks, the first write
 414       of to a block uses movca.l for performance.
 415       - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
 416       to flush the cache. Here, the data written by movcal.l is never
 417       written to memory, and the data written is just bogus.
 418
 419       To simulate this, we simulate movcal.l, we store the value to memory,
 420       but we also remember the previous content. If we see ocbi, we check
 421       if movcal.l for that address was done previously. If so, the write should
 422       not have hit the memory, so we restore the previous content.
 423       When we see an instruction that is neither movca.l
 424       nor ocbi, the previous content is discarded.
 425
 426       To optimize, we only try to flush stores when we're at the start of
 427       TB, or if we already saw movca.l in this TB and did not flush stores
 428       yet.  */
 429    if (ctx->has_movcal)
 430        {
 431          int opcode = ctx->opcode & 0xf0ff;
 432          if (opcode != 0x0093 /* ocbi */
 433              && opcode != 0x00c3 /* movca.l */)
 434              {
 435                  gen_helper_discard_movcal_backup(cpu_env);
 436                  ctx->has_movcal = 0;
 437              }
 438        }
 439
 440#if 0
 441    fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
 442#endif
 443
 444    switch (ctx->opcode) {
 445    case 0x0019:                /* div0u */
 446        tcg_gen_movi_i32(cpu_sr_m, 0);
 447        tcg_gen_movi_i32(cpu_sr_q, 0);
 448        tcg_gen_movi_i32(cpu_sr_t, 0);
 449        return;
 450    case 0x000b:                /* rts */
 451        CHECK_NOT_DELAY_SLOT
 452        tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
 453        ctx->envflags |= DELAY_SLOT;
 454        ctx->delayed_pc = (uint32_t) - 1;
 455        return;
 456    case 0x0028:                /* clrmac */
 457        tcg_gen_movi_i32(cpu_mach, 0);
 458        tcg_gen_movi_i32(cpu_macl, 0);
 459        return;
 460    case 0x0048:                /* clrs */
 461        tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
 462        return;
 463    case 0x0008:                /* clrt */
 464        tcg_gen_movi_i32(cpu_sr_t, 0);
 465        return;
 466    case 0x0038:                /* ldtlb */
 467        CHECK_PRIVILEGED
 468        gen_helper_ldtlb(cpu_env);
 469        return;
 470    case 0x002b:                /* rte */
 471        CHECK_PRIVILEGED
 472        CHECK_NOT_DELAY_SLOT
 473        gen_write_sr(cpu_ssr);
 474        tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
 475        ctx->envflags |= DELAY_SLOT_RTE;
 476        ctx->delayed_pc = (uint32_t) - 1;
 477        ctx->bstate = BS_STOP;
 478        return;
 479    case 0x0058:                /* sets */
 480        tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
 481        return;
 482    case 0x0018:                /* sett */
 483        tcg_gen_movi_i32(cpu_sr_t, 1);
 484        return;
 485    case 0xfbfd:                /* frchg */
 486        CHECK_FPSCR_PR_0
 487        tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
 488        ctx->bstate = BS_STOP;
 489        return;
 490    case 0xf3fd:                /* fschg */
 491        CHECK_FPSCR_PR_0
 492        tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
 493        ctx->bstate = BS_STOP;
 494        return;
 495    case 0xf7fd:                /* fpchg */
 496        CHECK_SH4A
 497        tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
 498        ctx->bstate = BS_STOP;
 499        return;
 500    case 0x0009:                /* nop */
 501        return;
 502    case 0x001b:                /* sleep */
 503        CHECK_PRIVILEGED
 504        tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
 505        gen_helper_sleep(cpu_env);
 506        return;
 507    }
 508
 509    switch (ctx->opcode & 0xf000) {
 510    case 0x1000:                /* mov.l Rm,@(disp,Rn) */
 511        {
 512            TCGv addr = tcg_temp_new();
 513            tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
 514            tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
 515            tcg_temp_free(addr);
 516        }
 517        return;
 518    case 0x5000:                /* mov.l @(disp,Rm),Rn */
 519        {
 520            TCGv addr = tcg_temp_new();
 521            tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
 522            tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
 523            tcg_temp_free(addr);
 524        }
 525        return;
 526    case 0xe000:                /* mov #imm,Rn */
 527#ifdef CONFIG_USER_ONLY
 528        /* Detect the start of a gUSA region.  If so, update envflags
 529           and end the TB.  This will allow us to see the end of the
 530           region (stored in R0) in the next TB.  */
 531        if (B11_8 == 15 && B7_0s < 0 && parallel_cpus) {
 532            ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
 533            ctx->bstate = BS_STOP;
 534        }
 535#endif
 536        tcg_gen_movi_i32(REG(B11_8), B7_0s);
 537        return;
 538    case 0x9000:                /* mov.w @(disp,PC),Rn */
 539        {
 540            TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
 541            tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
 542            tcg_temp_free(addr);
 543        }
 544        return;
 545    case 0xd000:                /* mov.l @(disp,PC),Rn */
 546        {
 547            TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
 548            tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
 549            tcg_temp_free(addr);
 550        }
 551        return;
 552    case 0x7000:                /* add #imm,Rn */
 553        tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
 554        return;
 555    case 0xa000:                /* bra disp */
 556        CHECK_NOT_DELAY_SLOT
 557        ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
 558        ctx->envflags |= DELAY_SLOT;
 559        return;
 560    case 0xb000:                /* bsr disp */
 561        CHECK_NOT_DELAY_SLOT
 562        tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
 563        ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
 564        ctx->envflags |= DELAY_SLOT;
 565        return;
 566    }
 567
 568    switch (ctx->opcode & 0xf00f) {
 569    case 0x6003:                /* mov Rm,Rn */
 570        tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
 571        return;
 572    case 0x2000:                /* mov.b Rm,@Rn */
 573        tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
 574        return;
 575    case 0x2001:                /* mov.w Rm,@Rn */
 576        tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
 577        return;
 578    case 0x2002:                /* mov.l Rm,@Rn */
 579        tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
 580        return;
 581    case 0x6000:                /* mov.b @Rm,Rn */
 582        tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
 583        return;
 584    case 0x6001:                /* mov.w @Rm,Rn */
 585        tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
 586        return;
 587    case 0x6002:                /* mov.l @Rm,Rn */
 588        tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
 589        return;
 590    case 0x2004:                /* mov.b Rm,@-Rn */
 591        {
 592            TCGv addr = tcg_temp_new();
 593            tcg_gen_subi_i32(addr, REG(B11_8), 1);
 594            /* might cause re-execution */
 595            tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
 596            tcg_gen_mov_i32(REG(B11_8), addr);                  /* modify register status */
 597            tcg_temp_free(addr);
 598        }
 599        return;
 600    case 0x2005:                /* mov.w Rm,@-Rn */
 601        {
 602            TCGv addr = tcg_temp_new();
 603            tcg_gen_subi_i32(addr, REG(B11_8), 2);
 604            tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
 605            tcg_gen_mov_i32(REG(B11_8), addr);
 606            tcg_temp_free(addr);
 607        }
 608        return;
 609    case 0x2006:                /* mov.l Rm,@-Rn */
 610        {
 611            TCGv addr = tcg_temp_new();
 612            tcg_gen_subi_i32(addr, REG(B11_8), 4);
 613            tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
 614            tcg_gen_mov_i32(REG(B11_8), addr);
 615        }
 616        return;
 617    case 0x6004:                /* mov.b @Rm+,Rn */
 618        tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
 619        if ( B11_8 != B7_4 )
 620                tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
 621        return;
 622    case 0x6005:                /* mov.w @Rm+,Rn */
 623        tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
 624        if ( B11_8 != B7_4 )
 625                tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
 626        return;
 627    case 0x6006:                /* mov.l @Rm+,Rn */
 628        tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
 629        if ( B11_8 != B7_4 )
 630                tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
 631        return;
 632    case 0x0004:                /* mov.b Rm,@(R0,Rn) */
 633        {
 634            TCGv addr = tcg_temp_new();
 635            tcg_gen_add_i32(addr, REG(B11_8), REG(0));
 636            tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
 637            tcg_temp_free(addr);
 638        }
 639        return;
 640    case 0x0005:                /* mov.w Rm,@(R0,Rn) */
 641        {
 642            TCGv addr = tcg_temp_new();
 643            tcg_gen_add_i32(addr, REG(B11_8), REG(0));
 644            tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
 645            tcg_temp_free(addr);
 646        }
 647        return;
 648    case 0x0006:                /* mov.l Rm,@(R0,Rn) */
 649        {
 650            TCGv addr = tcg_temp_new();
 651            tcg_gen_add_i32(addr, REG(B11_8), REG(0));
 652            tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
 653            tcg_temp_free(addr);
 654        }
 655        return;
 656    case 0x000c:                /* mov.b @(R0,Rm),Rn */
 657        {
 658            TCGv addr = tcg_temp_new();
 659            tcg_gen_add_i32(addr, REG(B7_4), REG(0));
 660            tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
 661            tcg_temp_free(addr);
 662        }
 663        return;
 664    case 0x000d:                /* mov.w @(R0,Rm),Rn */
 665        {
 666            TCGv addr = tcg_temp_new();
 667            tcg_gen_add_i32(addr, REG(B7_4), REG(0));
 668            tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
 669            tcg_temp_free(addr);
 670        }
 671        return;
 672    case 0x000e:                /* mov.l @(R0,Rm),Rn */
 673        {
 674            TCGv addr = tcg_temp_new();
 675            tcg_gen_add_i32(addr, REG(B7_4), REG(0));
 676            tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
 677            tcg_temp_free(addr);
 678        }
 679        return;
 680    case 0x6008:                /* swap.b Rm,Rn */
 681        {
 682            TCGv low = tcg_temp_new();;
 683            tcg_gen_ext16u_i32(low, REG(B7_4));
 684            tcg_gen_bswap16_i32(low, low);
 685            tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
 686            tcg_temp_free(low);
 687        }
 688        return;
 689    case 0x6009:                /* swap.w Rm,Rn */
 690        tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
 691        return;
 692    case 0x200d:                /* xtrct Rm,Rn */
 693        {
 694            TCGv high, low;
 695            high = tcg_temp_new();
 696            tcg_gen_shli_i32(high, REG(B7_4), 16);
 697            low = tcg_temp_new();
 698            tcg_gen_shri_i32(low, REG(B11_8), 16);
 699            tcg_gen_or_i32(REG(B11_8), high, low);
 700            tcg_temp_free(low);
 701            tcg_temp_free(high);
 702        }
 703        return;
 704    case 0x300c:                /* add Rm,Rn */
 705        tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
 706        return;
 707    case 0x300e:                /* addc Rm,Rn */
 708        {
 709            TCGv t0, t1;
 710            t0 = tcg_const_tl(0);
 711            t1 = tcg_temp_new();
 712            tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
 713            tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
 714                             REG(B11_8), t0, t1, cpu_sr_t);
 715            tcg_temp_free(t0);
 716            tcg_temp_free(t1);
 717        }
 718        return;
 719    case 0x300f:                /* addv Rm,Rn */
 720        {
 721            TCGv t0, t1, t2;
 722            t0 = tcg_temp_new();
 723            tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
 724            t1 = tcg_temp_new();
 725            tcg_gen_xor_i32(t1, t0, REG(B11_8));
 726            t2 = tcg_temp_new();
 727            tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
 728            tcg_gen_andc_i32(cpu_sr_t, t1, t2);
 729            tcg_temp_free(t2);
 730            tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
 731            tcg_temp_free(t1);
 732            tcg_gen_mov_i32(REG(B7_4), t0);
 733            tcg_temp_free(t0);
 734        }
 735        return;
 736    case 0x2009:                /* and Rm,Rn */
 737        tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
 738        return;
 739    case 0x3000:                /* cmp/eq Rm,Rn */
 740        tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
 741        return;
 742    case 0x3003:                /* cmp/ge Rm,Rn */
 743        tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
 744        return;
 745    case 0x3007:                /* cmp/gt Rm,Rn */
 746        tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
 747        return;
 748    case 0x3006:                /* cmp/hi Rm,Rn */
 749        tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
 750        return;
 751    case 0x3002:                /* cmp/hs Rm,Rn */
 752        tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
 753        return;
 754    case 0x200c:                /* cmp/str Rm,Rn */
 755        {
 756            TCGv cmp1 = tcg_temp_new();
 757            TCGv cmp2 = tcg_temp_new();
 758            tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
 759            tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
 760            tcg_gen_andc_i32(cmp1, cmp1, cmp2);
 761            tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
 762            tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
 763            tcg_temp_free(cmp2);
 764            tcg_temp_free(cmp1);
 765        }
 766        return;
 767    case 0x2007:                /* div0s Rm,Rn */
 768        tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31);         /* SR_Q */
 769        tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31);          /* SR_M */
 770        tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m);      /* SR_T */
 771        return;
 772    case 0x3004:                /* div1 Rm,Rn */
 773        {
 774            TCGv t0 = tcg_temp_new();
 775            TCGv t1 = tcg_temp_new();
 776            TCGv t2 = tcg_temp_new();
 777            TCGv zero = tcg_const_i32(0);
 778
 779            /* shift left arg1, saving the bit being pushed out and inserting
 780               T on the right */
 781            tcg_gen_shri_i32(t0, REG(B11_8), 31);
 782            tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
 783            tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
 784
 785            /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
 786               using 64-bit temps, we compute arg0's high part from q ^ m, so
 787               that it is 0x00000000 when adding the value or 0xffffffff when
 788               subtracting it. */
 789            tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
 790            tcg_gen_subi_i32(t1, t1, 1);
 791            tcg_gen_neg_i32(t2, REG(B7_4));
 792            tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
 793            tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
 794
 795            /* compute T and Q depending on carry */
 796            tcg_gen_andi_i32(t1, t1, 1);
 797            tcg_gen_xor_i32(t1, t1, t0);
 798            tcg_gen_xori_i32(cpu_sr_t, t1, 1);
 799            tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
 800
 801            tcg_temp_free(zero);
 802            tcg_temp_free(t2);
 803            tcg_temp_free(t1);
 804            tcg_temp_free(t0);
 805        }
 806        return;
 807    case 0x300d:                /* dmuls.l Rm,Rn */
 808        tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
 809        return;
 810    case 0x3005:                /* dmulu.l Rm,Rn */
 811        tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
 812        return;
 813    case 0x600e:                /* exts.b Rm,Rn */
 814        tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
 815        return;
 816    case 0x600f:                /* exts.w Rm,Rn */
 817        tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
 818        return;
 819    case 0x600c:                /* extu.b Rm,Rn */
 820        tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
 821        return;
 822    case 0x600d:                /* extu.w Rm,Rn */
 823        tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
 824        return;
 825    case 0x000f:                /* mac.l @Rm+,@Rn+ */
 826        {
 827            TCGv arg0, arg1;
 828            arg0 = tcg_temp_new();
 829            tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
 830            arg1 = tcg_temp_new();
 831            tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
 832            gen_helper_macl(cpu_env, arg0, arg1);
 833            tcg_temp_free(arg1);
 834            tcg_temp_free(arg0);
 835            tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
 836            tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
 837        }
 838        return;
 839    case 0x400f:                /* mac.w @Rm+,@Rn+ */
 840        {
 841            TCGv arg0, arg1;
 842            arg0 = tcg_temp_new();
 843            tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
 844            arg1 = tcg_temp_new();
 845            tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
 846            gen_helper_macw(cpu_env, arg0, arg1);
 847            tcg_temp_free(arg1);
 848            tcg_temp_free(arg0);
 849            tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
 850            tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
 851        }
 852        return;
 853    case 0x0007:                /* mul.l Rm,Rn */
 854        tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
 855        return;
 856    case 0x200f:                /* muls.w Rm,Rn */
 857        {
 858            TCGv arg0, arg1;
 859            arg0 = tcg_temp_new();
 860            tcg_gen_ext16s_i32(arg0, REG(B7_4));
 861            arg1 = tcg_temp_new();
 862            tcg_gen_ext16s_i32(arg1, REG(B11_8));
 863            tcg_gen_mul_i32(cpu_macl, arg0, arg1);
 864            tcg_temp_free(arg1);
 865            tcg_temp_free(arg0);
 866        }
 867        return;
 868    case 0x200e:                /* mulu.w Rm,Rn */
 869        {
 870            TCGv arg0, arg1;
 871            arg0 = tcg_temp_new();
 872            tcg_gen_ext16u_i32(arg0, REG(B7_4));
 873            arg1 = tcg_temp_new();
 874            tcg_gen_ext16u_i32(arg1, REG(B11_8));
 875            tcg_gen_mul_i32(cpu_macl, arg0, arg1);
 876            tcg_temp_free(arg1);
 877            tcg_temp_free(arg0);
 878        }
 879        return;
 880    case 0x600b:                /* neg Rm,Rn */
 881        tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
 882        return;
 883    case 0x600a:                /* negc Rm,Rn */
 884        {
 885            TCGv t0 = tcg_const_i32(0);
 886            tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
 887                             REG(B7_4), t0, cpu_sr_t, t0);
 888            tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
 889                             t0, t0, REG(B11_8), cpu_sr_t);
 890            tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
 891            tcg_temp_free(t0);
 892        }
 893        return;
 894    case 0x6007:                /* not Rm,Rn */
 895        tcg_gen_not_i32(REG(B11_8), REG(B7_4));
 896        return;
 897    case 0x200b:                /* or Rm,Rn */
 898        tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
 899        return;
 900    case 0x400c:                /* shad Rm,Rn */
 901        {
 902            TCGv t0 = tcg_temp_new();
 903            TCGv t1 = tcg_temp_new();
 904            TCGv t2 = tcg_temp_new();
 905
 906            tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
 907
 908            /* positive case: shift to the left */
 909            tcg_gen_shl_i32(t1, REG(B11_8), t0);
 910
 911            /* negative case: shift to the right in two steps to
 912               correctly handle the -32 case */
 913            tcg_gen_xori_i32(t0, t0, 0x1f);
 914            tcg_gen_sar_i32(t2, REG(B11_8), t0);
 915            tcg_gen_sari_i32(t2, t2, 1);
 916
 917            /* select between the two cases */
 918            tcg_gen_movi_i32(t0, 0);
 919            tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
 920
 921            tcg_temp_free(t0);
 922            tcg_temp_free(t1);
 923            tcg_temp_free(t2);
 924        }
 925        return;
 926    case 0x400d:                /* shld Rm,Rn */
 927        {
 928            TCGv t0 = tcg_temp_new();
 929            TCGv t1 = tcg_temp_new();
 930            TCGv t2 = tcg_temp_new();
 931
 932            tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
 933
 934            /* positive case: shift to the left */
 935            tcg_gen_shl_i32(t1, REG(B11_8), t0);
 936
 937            /* negative case: shift to the right in two steps to
 938               correctly handle the -32 case */
 939            tcg_gen_xori_i32(t0, t0, 0x1f);
 940            tcg_gen_shr_i32(t2, REG(B11_8), t0);
 941            tcg_gen_shri_i32(t2, t2, 1);
 942
 943            /* select between the two cases */
 944            tcg_gen_movi_i32(t0, 0);
 945            tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
 946
 947            tcg_temp_free(t0);
 948            tcg_temp_free(t1);
 949            tcg_temp_free(t2);
 950        }
 951        return;
 952    case 0x3008:                /* sub Rm,Rn */
 953        tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
 954        return;
 955    case 0x300a:                /* subc Rm,Rn */
 956        {
 957            TCGv t0, t1;
 958            t0 = tcg_const_tl(0);
 959            t1 = tcg_temp_new();
 960            tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
 961            tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
 962                             REG(B11_8), t0, t1, cpu_sr_t);
 963            tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
 964            tcg_temp_free(t0);
 965            tcg_temp_free(t1);
 966        }
 967        return;
 968    case 0x300b:                /* subv Rm,Rn */
 969        {
 970            TCGv t0, t1, t2;
 971            t0 = tcg_temp_new();
 972            tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
 973            t1 = tcg_temp_new();
 974            tcg_gen_xor_i32(t1, t0, REG(B7_4));
 975            t2 = tcg_temp_new();
 976            tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
 977            tcg_gen_and_i32(t1, t1, t2);
 978            tcg_temp_free(t2);
 979            tcg_gen_shri_i32(cpu_sr_t, t1, 31);
 980            tcg_temp_free(t1);
 981            tcg_gen_mov_i32(REG(B11_8), t0);
 982            tcg_temp_free(t0);
 983        }
 984        return;
 985    case 0x2008:                /* tst Rm,Rn */
 986        {
 987            TCGv val = tcg_temp_new();
 988            tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
 989            tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
 990            tcg_temp_free(val);
 991        }
 992        return;
 993    case 0x200a:                /* xor Rm,Rn */
 994        tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
 995        return;
 996    case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
 997        CHECK_FPU_ENABLED
 998        if (ctx->tbflags & FPSCR_SZ) {
 999            int xsrc = XHACK(B7_4);
1000            int xdst = XHACK(B11_8);
1001            tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
1002            tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
1003        } else {
1004            tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
1005        }
1006        return;
1007    case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1008        CHECK_FPU_ENABLED
1009        if (ctx->tbflags & FPSCR_SZ) {
1010            TCGv_i64 fp = tcg_temp_new_i64();
1011            gen_load_fpr64(ctx, fp, XHACK(B7_4));
1012            tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1013            tcg_temp_free_i64(fp);
1014        } else {
1015            tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1016        }
1017        return;
1018    case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1019        CHECK_FPU_ENABLED
1020        if (ctx->tbflags & FPSCR_SZ) {
1021            TCGv_i64 fp = tcg_temp_new_i64();
1022            tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1023            gen_store_fpr64(ctx, fp, XHACK(B11_8));
1024            tcg_temp_free_i64(fp);
1025        } else {
1026            tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1027        }
1028        return;
1029    case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1030        CHECK_FPU_ENABLED
1031        if (ctx->tbflags & FPSCR_SZ) {
1032            TCGv_i64 fp = tcg_temp_new_i64();
1033            tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1034            gen_store_fpr64(ctx, fp, XHACK(B11_8));
1035            tcg_temp_free_i64(fp);
1036            tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1037        } else {
1038            tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1039            tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1040        }
1041        return;
1042    case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1043        CHECK_FPU_ENABLED
1044        {
1045            TCGv addr = tcg_temp_new_i32();
1046            if (ctx->tbflags & FPSCR_SZ) {
1047                TCGv_i64 fp = tcg_temp_new_i64();
1048                gen_load_fpr64(ctx, fp, XHACK(B7_4));
1049                tcg_gen_subi_i32(addr, REG(B11_8), 8);
1050                tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1051                tcg_temp_free_i64(fp);
1052            } else {
1053                tcg_gen_subi_i32(addr, REG(B11_8), 4);
1054                tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1055            }
1056            tcg_gen_mov_i32(REG(B11_8), addr);
1057            tcg_temp_free(addr);
1058        }
1059        return;
1060    case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1061        CHECK_FPU_ENABLED
1062        {
1063            TCGv addr = tcg_temp_new_i32();
1064            tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1065            if (ctx->tbflags & FPSCR_SZ) {
1066                TCGv_i64 fp = tcg_temp_new_i64();
1067                tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1068                gen_store_fpr64(ctx, fp, XHACK(B11_8));
1069                tcg_temp_free_i64(fp);
1070            } else {
1071                tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1072            }
1073            tcg_temp_free(addr);
1074        }
1075        return;
1076    case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1077        CHECK_FPU_ENABLED
1078        {
1079            TCGv addr = tcg_temp_new();
1080            tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1081            if (ctx->tbflags & FPSCR_SZ) {
1082                TCGv_i64 fp = tcg_temp_new_i64();
1083                gen_load_fpr64(ctx, fp, XHACK(B7_4));
1084                tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1085                tcg_temp_free_i64(fp);
1086            } else {
1087                tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1088            }
1089            tcg_temp_free(addr);
1090        }
1091        return;
1092    case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1093    case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1094    case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1095    case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1096    case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1097    case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1098        {
1099            CHECK_FPU_ENABLED
1100            if (ctx->tbflags & FPSCR_PR) {
1101                TCGv_i64 fp0, fp1;
1102
1103                if (ctx->opcode & 0x0110) {
1104                    goto do_illegal;
1105                }
1106                fp0 = tcg_temp_new_i64();
1107                fp1 = tcg_temp_new_i64();
1108                gen_load_fpr64(ctx, fp0, B11_8);
1109                gen_load_fpr64(ctx, fp1, B7_4);
1110                switch (ctx->opcode & 0xf00f) {
1111                case 0xf000:            /* fadd Rm,Rn */
1112                    gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1113                    break;
1114                case 0xf001:            /* fsub Rm,Rn */
1115                    gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1116                    break;
1117                case 0xf002:            /* fmul Rm,Rn */
1118                    gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1119                    break;
1120                case 0xf003:            /* fdiv Rm,Rn */
1121                    gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1122                    break;
1123                case 0xf004:            /* fcmp/eq Rm,Rn */
1124                    gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1125                    return;
1126                case 0xf005:            /* fcmp/gt Rm,Rn */
1127                    gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1128                    return;
1129                }
1130                gen_store_fpr64(ctx, fp0, B11_8);
1131                tcg_temp_free_i64(fp0);
1132                tcg_temp_free_i64(fp1);
1133            } else {
1134                switch (ctx->opcode & 0xf00f) {
1135                case 0xf000:            /* fadd Rm,Rn */
1136                    gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1137                                       FREG(B11_8), FREG(B7_4));
1138                    break;
1139                case 0xf001:            /* fsub Rm,Rn */
1140                    gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1141                                       FREG(B11_8), FREG(B7_4));
1142                    break;
1143                case 0xf002:            /* fmul Rm,Rn */
1144                    gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1145                                       FREG(B11_8), FREG(B7_4));
1146                    break;
1147                case 0xf003:            /* fdiv Rm,Rn */
1148                    gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1149                                       FREG(B11_8), FREG(B7_4));
1150                    break;
1151                case 0xf004:            /* fcmp/eq Rm,Rn */
1152                    gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1153                                          FREG(B11_8), FREG(B7_4));
1154                    return;
1155                case 0xf005:            /* fcmp/gt Rm,Rn */
1156                    gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1157                                          FREG(B11_8), FREG(B7_4));
1158                    return;
1159                }
1160            }
1161        }
1162        return;
1163    case 0xf00e: /* fmac FR0,RM,Rn */
1164        CHECK_FPU_ENABLED
1165        CHECK_FPSCR_PR_0
1166        gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1167                           FREG(0), FREG(B7_4), FREG(B11_8));
1168        return;
1169    }
1170
1171    switch (ctx->opcode & 0xff00) {
1172    case 0xc900:                /* and #imm,R0 */
1173        tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1174        return;
1175    case 0xcd00:                /* and.b #imm,@(R0,GBR) */
1176        {
1177            TCGv addr, val;
1178            addr = tcg_temp_new();
1179            tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1180            val = tcg_temp_new();
1181            tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1182            tcg_gen_andi_i32(val, val, B7_0);
1183            tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1184            tcg_temp_free(val);
1185            tcg_temp_free(addr);
1186        }
1187        return;
1188    case 0x8b00:                /* bf label */
1189        CHECK_NOT_DELAY_SLOT
1190        gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false);
1191        return;
1192    case 0x8f00:                /* bf/s label */
1193        CHECK_NOT_DELAY_SLOT
1194        tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1195        ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1196        ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1197        return;
1198    case 0x8900:                /* bt label */
1199        CHECK_NOT_DELAY_SLOT
1200        gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true);
1201        return;
1202    case 0x8d00:                /* bt/s label */
1203        CHECK_NOT_DELAY_SLOT
1204        tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1205        ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1206        ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1207        return;
1208    case 0x8800:                /* cmp/eq #imm,R0 */
1209        tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1210        return;
1211    case 0xc400:                /* mov.b @(disp,GBR),R0 */
1212        {
1213            TCGv addr = tcg_temp_new();
1214            tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1215            tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1216            tcg_temp_free(addr);
1217        }
1218        return;
1219    case 0xc500:                /* mov.w @(disp,GBR),R0 */
1220        {
1221            TCGv addr = tcg_temp_new();
1222            tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1223            tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1224            tcg_temp_free(addr);
1225        }
1226        return;
1227    case 0xc600:                /* mov.l @(disp,GBR),R0 */
1228        {
1229            TCGv addr = tcg_temp_new();
1230            tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1231            tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1232            tcg_temp_free(addr);
1233        }
1234        return;
1235    case 0xc000:                /* mov.b R0,@(disp,GBR) */
1236        {
1237            TCGv addr = tcg_temp_new();
1238            tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1239            tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1240            tcg_temp_free(addr);
1241        }
1242        return;
1243    case 0xc100:                /* mov.w R0,@(disp,GBR) */
1244        {
1245            TCGv addr = tcg_temp_new();
1246            tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1247            tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1248            tcg_temp_free(addr);
1249        }
1250        return;
1251    case 0xc200:                /* mov.l R0,@(disp,GBR) */
1252        {
1253            TCGv addr = tcg_temp_new();
1254            tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1255            tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1256            tcg_temp_free(addr);
1257        }
1258        return;
1259    case 0x8000:                /* mov.b R0,@(disp,Rn) */
1260        {
1261            TCGv addr = tcg_temp_new();
1262            tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1263            tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1264            tcg_temp_free(addr);
1265        }
1266        return;
1267    case 0x8100:                /* mov.w R0,@(disp,Rn) */
1268        {
1269            TCGv addr = tcg_temp_new();
1270            tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1271            tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1272            tcg_temp_free(addr);
1273        }
1274        return;
1275    case 0x8400:                /* mov.b @(disp,Rn),R0 */
1276        {
1277            TCGv addr = tcg_temp_new();
1278            tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1279            tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1280            tcg_temp_free(addr);
1281        }
1282        return;
1283    case 0x8500:                /* mov.w @(disp,Rn),R0 */
1284        {
1285            TCGv addr = tcg_temp_new();
1286            tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1287            tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1288            tcg_temp_free(addr);
1289        }
1290        return;
1291    case 0xc700:                /* mova @(disp,PC),R0 */
1292        tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1293        return;
1294    case 0xcb00:                /* or #imm,R0 */
1295        tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1296        return;
1297    case 0xcf00:                /* or.b #imm,@(R0,GBR) */
1298        {
1299            TCGv addr, val;
1300            addr = tcg_temp_new();
1301            tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1302            val = tcg_temp_new();
1303            tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1304            tcg_gen_ori_i32(val, val, B7_0);
1305            tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1306            tcg_temp_free(val);
1307            tcg_temp_free(addr);
1308        }
1309        return;
1310    case 0xc300:                /* trapa #imm */
1311        {
1312            TCGv imm;
1313            CHECK_NOT_DELAY_SLOT
1314            gen_save_cpu_state(ctx, true);
1315            imm = tcg_const_i32(B7_0);
1316            gen_helper_trapa(cpu_env, imm);
1317            tcg_temp_free(imm);
1318            ctx->bstate = BS_EXCP;
1319        }
1320        return;
1321    case 0xc800:                /* tst #imm,R0 */
1322        {
1323            TCGv val = tcg_temp_new();
1324            tcg_gen_andi_i32(val, REG(0), B7_0);
1325            tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1326            tcg_temp_free(val);
1327        }
1328        return;
1329    case 0xcc00:                /* tst.b #imm,@(R0,GBR) */
1330        {
1331            TCGv val = tcg_temp_new();
1332            tcg_gen_add_i32(val, REG(0), cpu_gbr);
1333            tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1334            tcg_gen_andi_i32(val, val, B7_0);
1335            tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1336            tcg_temp_free(val);
1337        }
1338        return;
1339    case 0xca00:                /* xor #imm,R0 */
1340        tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1341        return;
1342    case 0xce00:                /* xor.b #imm,@(R0,GBR) */
1343        {
1344            TCGv addr, val;
1345            addr = tcg_temp_new();
1346            tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1347            val = tcg_temp_new();
1348            tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1349            tcg_gen_xori_i32(val, val, B7_0);
1350            tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1351            tcg_temp_free(val);
1352            tcg_temp_free(addr);
1353        }
1354        return;
1355    }
1356
1357    switch (ctx->opcode & 0xf08f) {
1358    case 0x408e:                /* ldc Rm,Rn_BANK */
1359        CHECK_PRIVILEGED
1360        tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1361        return;
1362    case 0x4087:                /* ldc.l @Rm+,Rn_BANK */
1363        CHECK_PRIVILEGED
1364        tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1365        tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1366        return;
1367    case 0x0082:                /* stc Rm_BANK,Rn */
1368        CHECK_PRIVILEGED
1369        tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1370        return;
1371    case 0x4083:                /* stc.l Rm_BANK,@-Rn */
1372        CHECK_PRIVILEGED
1373        {
1374            TCGv addr = tcg_temp_new();
1375            tcg_gen_subi_i32(addr, REG(B11_8), 4);
1376            tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1377            tcg_gen_mov_i32(REG(B11_8), addr);
1378            tcg_temp_free(addr);
1379        }
1380        return;
1381    }
1382
1383    switch (ctx->opcode & 0xf0ff) {
1384    case 0x0023:                /* braf Rn */
1385        CHECK_NOT_DELAY_SLOT
1386        tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1387        ctx->envflags |= DELAY_SLOT;
1388        ctx->delayed_pc = (uint32_t) - 1;
1389        return;
1390    case 0x0003:                /* bsrf Rn */
1391        CHECK_NOT_DELAY_SLOT
1392        tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1393        tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1394        ctx->envflags |= DELAY_SLOT;
1395        ctx->delayed_pc = (uint32_t) - 1;
1396        return;
1397    case 0x4015:                /* cmp/pl Rn */
1398        tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1399        return;
1400    case 0x4011:                /* cmp/pz Rn */
1401        tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1402        return;
1403    case 0x4010:                /* dt Rn */
1404        tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1405        tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1406        return;
1407    case 0x402b:                /* jmp @Rn */
1408        CHECK_NOT_DELAY_SLOT
1409        tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1410        ctx->envflags |= DELAY_SLOT;
1411        ctx->delayed_pc = (uint32_t) - 1;
1412        return;
1413    case 0x400b:                /* jsr @Rn */
1414        CHECK_NOT_DELAY_SLOT
1415        tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1416        tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1417        ctx->envflags |= DELAY_SLOT;
1418        ctx->delayed_pc = (uint32_t) - 1;
1419        return;
1420    case 0x400e:                /* ldc Rm,SR */
1421        CHECK_PRIVILEGED
1422        {
1423            TCGv val = tcg_temp_new();
1424            tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1425            gen_write_sr(val);
1426            tcg_temp_free(val);
1427            ctx->bstate = BS_STOP;
1428        }
1429        return;
1430    case 0x4007:                /* ldc.l @Rm+,SR */
1431        CHECK_PRIVILEGED
1432        {
1433            TCGv val = tcg_temp_new();
1434            tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1435            tcg_gen_andi_i32(val, val, 0x700083f3);
1436            gen_write_sr(val);
1437            tcg_temp_free(val);
1438            tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1439            ctx->bstate = BS_STOP;
1440        }
1441        return;
1442    case 0x0002:                /* stc SR,Rn */
1443        CHECK_PRIVILEGED
1444        gen_read_sr(REG(B11_8));
1445        return;
1446    case 0x4003:                /* stc SR,@-Rn */
1447        CHECK_PRIVILEGED
1448        {
1449            TCGv addr = tcg_temp_new();
1450            TCGv val = tcg_temp_new();
1451            tcg_gen_subi_i32(addr, REG(B11_8), 4);
1452            gen_read_sr(val);
1453            tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1454            tcg_gen_mov_i32(REG(B11_8), addr);
1455            tcg_temp_free(val);
1456            tcg_temp_free(addr);
1457        }
1458        return;
1459#define LD(reg,ldnum,ldpnum,prechk)             \
1460  case ldnum:                                                   \
1461    prechk                                                      \
1462    tcg_gen_mov_i32 (cpu_##reg, REG(B11_8));                    \
1463    return;                                                     \
1464  case ldpnum:                                                  \
1465    prechk                                                      \
1466    tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1467    tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);                \
1468    return;
1469#define ST(reg,stnum,stpnum,prechk)             \
1470  case stnum:                                                   \
1471    prechk                                                      \
1472    tcg_gen_mov_i32 (REG(B11_8), cpu_##reg);                    \
1473    return;                                                     \
1474  case stpnum:                                                  \
1475    prechk                                                      \
1476    {                                                           \
1477        TCGv addr = tcg_temp_new();                             \
1478        tcg_gen_subi_i32(addr, REG(B11_8), 4);                  \
1479        tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1480        tcg_gen_mov_i32(REG(B11_8), addr);                      \
1481        tcg_temp_free(addr);                                    \
1482    }                                                           \
1483    return;
1484#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk)              \
1485        LD(reg,ldnum,ldpnum,prechk)                             \
1486        ST(reg,stnum,stpnum,prechk)
1487        LDST(gbr,  0x401e, 0x4017, 0x0012, 0x4013, {})
1488        LDST(vbr,  0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1489        LDST(ssr,  0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1490        LDST(spc,  0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1491        ST(sgr,  0x003a, 0x4032, CHECK_PRIVILEGED)
1492        LD(sgr,  0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1493        LDST(dbr,  0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1494        LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1495        LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1496        LDST(pr,   0x402a, 0x4026, 0x002a, 0x4022, {})
1497        LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1498    case 0x406a:                /* lds Rm,FPSCR */
1499        CHECK_FPU_ENABLED
1500        gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1501        ctx->bstate = BS_STOP;
1502        return;
1503    case 0x4066:                /* lds.l @Rm+,FPSCR */
1504        CHECK_FPU_ENABLED
1505        {
1506            TCGv addr = tcg_temp_new();
1507            tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1508            tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1509            gen_helper_ld_fpscr(cpu_env, addr);
1510            tcg_temp_free(addr);
1511            ctx->bstate = BS_STOP;
1512        }
1513        return;
1514    case 0x006a:                /* sts FPSCR,Rn */
1515        CHECK_FPU_ENABLED
1516        tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1517        return;
1518    case 0x4062:                /* sts FPSCR,@-Rn */
1519        CHECK_FPU_ENABLED
1520        {
1521            TCGv addr, val;
1522            val = tcg_temp_new();
1523            tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1524            addr = tcg_temp_new();
1525            tcg_gen_subi_i32(addr, REG(B11_8), 4);
1526            tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1527            tcg_gen_mov_i32(REG(B11_8), addr);
1528            tcg_temp_free(addr);
1529            tcg_temp_free(val);
1530        }
1531        return;
1532    case 0x00c3:                /* movca.l R0,@Rm */
1533        {
1534            TCGv val = tcg_temp_new();
1535            tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1536            gen_helper_movcal(cpu_env, REG(B11_8), val);
1537            tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1538        }
1539        ctx->has_movcal = 1;
1540        return;
1541    case 0x40a9:                /* movua.l @Rm,R0 */
1542        CHECK_SH4A
1543        /* Load non-boundary-aligned data */
1544        tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1545                            MO_TEUL | MO_UNALN);
1546        return;
1547        break;
1548    case 0x40e9:                /* movua.l @Rm+,R0 */
1549        CHECK_SH4A
1550        /* Load non-boundary-aligned data */
1551        tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1552                            MO_TEUL | MO_UNALN);
1553        tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1554        return;
1555        break;
1556    case 0x0029:                /* movt Rn */
1557        tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1558        return;
1559    case 0x0073:
1560        /* MOVCO.L
1561               LDST -> T
1562               If (T == 1) R0 -> (Rn)
1563               0 -> LDST
1564        */
1565        CHECK_SH4A
1566        {
1567            TCGLabel *label = gen_new_label();
1568            tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1569            tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1570            tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1571            gen_set_label(label);
1572            tcg_gen_movi_i32(cpu_ldst, 0);
1573            return;
1574        }
1575    case 0x0063:
1576        /* MOVLI.L @Rm,R0
1577               1 -> LDST
1578               (Rm) -> R0
1579               When interrupt/exception
1580               occurred 0 -> LDST
1581        */
1582        CHECK_SH4A
1583        tcg_gen_movi_i32(cpu_ldst, 0);
1584        tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1585        tcg_gen_movi_i32(cpu_ldst, 1);
1586        return;
1587    case 0x0093:                /* ocbi @Rn */
1588        {
1589            gen_helper_ocbi(cpu_env, REG(B11_8));
1590        }
1591        return;
1592    case 0x00a3:                /* ocbp @Rn */
1593    case 0x00b3:                /* ocbwb @Rn */
1594        /* These instructions are supposed to do nothing in case of
1595           a cache miss. Given that we only partially emulate caches
1596           it is safe to simply ignore them. */
1597        return;
1598    case 0x0083:                /* pref @Rn */
1599        return;
1600    case 0x00d3:                /* prefi @Rn */
1601        CHECK_SH4A
1602        return;
1603    case 0x00e3:                /* icbi @Rn */
1604        CHECK_SH4A
1605        return;
1606    case 0x00ab:                /* synco */
1607        CHECK_SH4A
1608        tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1609        return;
1610        break;
1611    case 0x4024:                /* rotcl Rn */
1612        {
1613            TCGv tmp = tcg_temp_new();
1614            tcg_gen_mov_i32(tmp, cpu_sr_t);
1615            tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1616            tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1617            tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1618            tcg_temp_free(tmp);
1619        }
1620        return;
1621    case 0x4025:                /* rotcr Rn */
1622        {
1623            TCGv tmp = tcg_temp_new();
1624            tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1625            tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1626            tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1627            tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1628            tcg_temp_free(tmp);
1629        }
1630        return;
1631    case 0x4004:                /* rotl Rn */
1632        tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1633        tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1634        return;
1635    case 0x4005:                /* rotr Rn */
1636        tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1637        tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1638        return;
1639    case 0x4000:                /* shll Rn */
1640    case 0x4020:                /* shal Rn */
1641        tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1642        tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1643        return;
1644    case 0x4021:                /* shar Rn */
1645        tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1646        tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1647        return;
1648    case 0x4001:                /* shlr Rn */
1649        tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1650        tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1651        return;
1652    case 0x4008:                /* shll2 Rn */
1653        tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1654        return;
1655    case 0x4018:                /* shll8 Rn */
1656        tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1657        return;
1658    case 0x4028:                /* shll16 Rn */
1659        tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1660        return;
1661    case 0x4009:                /* shlr2 Rn */
1662        tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1663        return;
1664    case 0x4019:                /* shlr8 Rn */
1665        tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1666        return;
1667    case 0x4029:                /* shlr16 Rn */
1668        tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1669        return;
1670    case 0x401b:                /* tas.b @Rn */
1671        {
1672            TCGv val = tcg_const_i32(0x80);
1673            tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1674                                        ctx->memidx, MO_UB);
1675            tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1676            tcg_temp_free(val);
1677        }
1678        return;
1679    case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1680        CHECK_FPU_ENABLED
1681        tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1682        return;
1683    case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1684        CHECK_FPU_ENABLED
1685        tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1686        return;
1687    case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1688        CHECK_FPU_ENABLED
1689        if (ctx->tbflags & FPSCR_PR) {
1690            TCGv_i64 fp;
1691            if (ctx->opcode & 0x0100) {
1692                goto do_illegal;
1693            }
1694            fp = tcg_temp_new_i64();
1695            gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1696            gen_store_fpr64(ctx, fp, B11_8);
1697            tcg_temp_free_i64(fp);
1698        }
1699        else {
1700            gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1701        }
1702        return;
1703    case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1704        CHECK_FPU_ENABLED
1705        if (ctx->tbflags & FPSCR_PR) {
1706            TCGv_i64 fp;
1707            if (ctx->opcode & 0x0100) {
1708                goto do_illegal;
1709            }
1710            fp = tcg_temp_new_i64();
1711            gen_load_fpr64(ctx, fp, B11_8);
1712            gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1713            tcg_temp_free_i64(fp);
1714        }
1715        else {
1716            gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1717        }
1718        return;
1719    case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1720        CHECK_FPU_ENABLED
1721        tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1722        return;
1723    case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1724        CHECK_FPU_ENABLED
1725        tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1726        return;
1727    case 0xf06d: /* fsqrt FRn */
1728        CHECK_FPU_ENABLED
1729        if (ctx->tbflags & FPSCR_PR) {
1730            if (ctx->opcode & 0x0100) {
1731                goto do_illegal;
1732            }
1733            TCGv_i64 fp = tcg_temp_new_i64();
1734            gen_load_fpr64(ctx, fp, B11_8);
1735            gen_helper_fsqrt_DT(fp, cpu_env, fp);
1736            gen_store_fpr64(ctx, fp, B11_8);
1737            tcg_temp_free_i64(fp);
1738        } else {
1739            gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1740        }
1741        return;
1742    case 0xf07d: /* fsrra FRn */
1743        CHECK_FPU_ENABLED
1744        CHECK_FPSCR_PR_0
1745        gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1746        break;
1747    case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1748        CHECK_FPU_ENABLED
1749        CHECK_FPSCR_PR_0
1750        tcg_gen_movi_i32(FREG(B11_8), 0);
1751        return;
1752    case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1753        CHECK_FPU_ENABLED
1754        CHECK_FPSCR_PR_0
1755        tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1756        return;
1757    case 0xf0ad: /* fcnvsd FPUL,DRn */
1758        CHECK_FPU_ENABLED
1759        {
1760            TCGv_i64 fp = tcg_temp_new_i64();
1761            gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1762            gen_store_fpr64(ctx, fp, B11_8);
1763            tcg_temp_free_i64(fp);
1764        }
1765        return;
1766    case 0xf0bd: /* fcnvds DRn,FPUL */
1767        CHECK_FPU_ENABLED
1768        {
1769            TCGv_i64 fp = tcg_temp_new_i64();
1770            gen_load_fpr64(ctx, fp, B11_8);
1771            gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1772            tcg_temp_free_i64(fp);
1773        }
1774        return;
1775    case 0xf0ed: /* fipr FVm,FVn */
1776        CHECK_FPU_ENABLED
1777        CHECK_FPSCR_PR_1
1778        {
1779            TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1780            TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1781            gen_helper_fipr(cpu_env, m, n);
1782            tcg_temp_free(m);
1783            tcg_temp_free(n);
1784            return;
1785        }
1786        break;
1787    case 0xf0fd: /* ftrv XMTRX,FVn */
1788        CHECK_FPU_ENABLED
1789        CHECK_FPSCR_PR_1
1790        {
1791            if ((ctx->opcode & 0x0300) != 0x0100) {
1792                goto do_illegal;
1793            }
1794            TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1795            gen_helper_ftrv(cpu_env, n);
1796            tcg_temp_free(n);
1797            return;
1798        }
1799        break;
1800    }
1801#if 0
1802    fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1803            ctx->opcode, ctx->pc);
1804    fflush(stderr);
1805#endif
1806 do_illegal:
1807    if (ctx->envflags & DELAY_SLOT_MASK) {
1808 do_illegal_slot:
1809        gen_save_cpu_state(ctx, true);
1810        gen_helper_raise_slot_illegal_instruction(cpu_env);
1811    } else {
1812        gen_save_cpu_state(ctx, true);
1813        gen_helper_raise_illegal_instruction(cpu_env);
1814    }
1815    ctx->bstate = BS_EXCP;
1816    return;
1817
1818 do_fpu_disabled:
1819    gen_save_cpu_state(ctx, true);
1820    if (ctx->envflags & DELAY_SLOT_MASK) {
1821        gen_helper_raise_slot_fpu_disable(cpu_env);
1822    } else {
1823        gen_helper_raise_fpu_disable(cpu_env);
1824    }
1825    ctx->bstate = BS_EXCP;
1826    return;
1827}
1828
1829static void decode_opc(DisasContext * ctx)
1830{
1831    uint32_t old_flags = ctx->envflags;
1832
1833    _decode_opc(ctx);
1834
1835    if (old_flags & DELAY_SLOT_MASK) {
1836        /* go out of the delay slot */
1837        ctx->envflags &= ~DELAY_SLOT_MASK;
1838
1839        /* When in an exclusive region, we must continue to the end
1840           for conditional branches.  */
1841        if (ctx->tbflags & GUSA_EXCLUSIVE
1842            && old_flags & DELAY_SLOT_CONDITIONAL) {
1843            gen_delayed_conditional_jump(ctx);
1844            return;
1845        }
1846        /* Otherwise this is probably an invalid gUSA region.
1847           Drop the GUSA bits so the next TB doesn't see them.  */
1848        ctx->envflags &= ~GUSA_MASK;
1849
1850        tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1851        ctx->bstate = BS_BRANCH;
1852        if (old_flags & DELAY_SLOT_CONDITIONAL) {
1853            gen_delayed_conditional_jump(ctx);
1854        } else {
1855            gen_jump(ctx);
1856        }
1857    }
1858}
1859
1860#ifdef CONFIG_USER_ONLY
1861/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1862   Upon an interrupt, a real kernel would simply notice magic values in
1863   the registers and reset the PC to the start of the sequence.
1864
1865   For QEMU, we cannot do this in quite the same way.  Instead, we notice
1866   the normal start of such a sequence (mov #-x,r15).  While we can handle
1867   any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1868   sequences and transform them into atomic operations as seen by the host.
1869*/
1870static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
1871{
1872    uint16_t insns[5];
1873    int ld_adr, ld_dst, ld_mop;
1874    int op_dst, op_src, op_opc;
1875    int mv_src, mt_dst, st_src, st_mop;
1876    TCGv op_arg;
1877
1878    uint32_t pc = ctx->pc;
1879    uint32_t pc_end = ctx->tb->cs_base;
1880    int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
1881    int max_insns = (pc_end - pc) / 2;
1882    int i;
1883
1884    if (pc != pc_end + backup || max_insns < 2) {
1885        /* This is a malformed gUSA region.  Don't do anything special,
1886           since the interpreter is likely to get confused.  */
1887        ctx->envflags &= ~GUSA_MASK;
1888        return 0;
1889    }
1890
1891    if (ctx->tbflags & GUSA_EXCLUSIVE) {
1892        /* Regardless of single-stepping or the end of the page,
1893           we must complete execution of the gUSA region while
1894           holding the exclusive lock.  */
1895        *pmax_insns = max_insns;
1896        return 0;
1897    }
1898
1899    /* The state machine below will consume only a few insns.
1900       If there are more than that in a region, fail now.  */
1901    if (max_insns > ARRAY_SIZE(insns)) {
1902        goto fail;
1903    }
1904
1905    /* Read all of the insns for the region.  */
1906    for (i = 0; i < max_insns; ++i) {
1907        insns[i] = cpu_lduw_code(env, pc + i * 2);
1908    }
1909
1910    ld_adr = ld_dst = ld_mop = -1;
1911    mv_src = -1;
1912    op_dst = op_src = op_opc = -1;
1913    mt_dst = -1;
1914    st_src = st_mop = -1;
1915    TCGV_UNUSED(op_arg);
1916    i = 0;
1917
1918#define NEXT_INSN \
1919    do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1920
1921    /*
1922     * Expect a load to begin the region.
1923     */
1924    NEXT_INSN;
1925    switch (ctx->opcode & 0xf00f) {
1926    case 0x6000: /* mov.b @Rm,Rn */
1927        ld_mop = MO_SB;
1928        break;
1929    case 0x6001: /* mov.w @Rm,Rn */
1930        ld_mop = MO_TESW;
1931        break;
1932    case 0x6002: /* mov.l @Rm,Rn */
1933        ld_mop = MO_TESL;
1934        break;
1935    default:
1936        goto fail;
1937    }
1938    ld_adr = B7_4;
1939    ld_dst = B11_8;
1940    if (ld_adr == ld_dst) {
1941        goto fail;
1942    }
1943    /* Unless we see a mov, any two-operand operation must use ld_dst.  */
1944    op_dst = ld_dst;
1945
1946    /*
1947     * Expect an optional register move.
1948     */
1949    NEXT_INSN;
1950    switch (ctx->opcode & 0xf00f) {
1951    case 0x6003: /* mov Rm,Rn */
1952        /* Here we want to recognize ld_dst being saved for later consumtion,
1953           or for another input register being copied so that ld_dst need not
1954           be clobbered during the operation.  */
1955        op_dst = B11_8;
1956        mv_src = B7_4;
1957        if (op_dst == ld_dst) {
1958            /* Overwriting the load output.  */
1959            goto fail;
1960        }
1961        if (mv_src != ld_dst) {
1962            /* Copying a new input; constrain op_src to match the load.  */
1963            op_src = ld_dst;
1964        }
1965        break;
1966
1967    default:
1968        /* Put back and re-examine as operation.  */
1969        --i;
1970    }
1971
1972    /*
1973     * Expect the operation.
1974     */
1975    NEXT_INSN;
1976    switch (ctx->opcode & 0xf00f) {
1977    case 0x300c: /* add Rm,Rn */
1978        op_opc = INDEX_op_add_i32;
1979        goto do_reg_op;
1980    case 0x2009: /* and Rm,Rn */
1981        op_opc = INDEX_op_and_i32;
1982        goto do_reg_op;
1983    case 0x200a: /* xor Rm,Rn */
1984        op_opc = INDEX_op_xor_i32;
1985        goto do_reg_op;
1986    case 0x200b: /* or Rm,Rn */
1987        op_opc = INDEX_op_or_i32;
1988    do_reg_op:
1989        /* The operation register should be as expected, and the
1990           other input cannot depend on the load.  */
1991        if (op_dst != B11_8) {
1992            goto fail;
1993        }
1994        if (op_src < 0) {
1995            /* Unconstrainted input.  */
1996            op_src = B7_4;
1997        } else if (op_src == B7_4) {
1998            /* Constrained input matched load.  All operations are
1999               commutative; "swap" them by "moving" the load output
2000               to the (implicit) first argument and the move source
2001               to the (explicit) second argument.  */
2002            op_src = mv_src;
2003        } else {
2004            goto fail;
2005        }
2006        op_arg = REG(op_src);
2007        break;
2008
2009    case 0x6007: /* not Rm,Rn */
2010        if (ld_dst != B7_4 || mv_src >= 0) {
2011            goto fail;
2012        }
2013        op_dst = B11_8;
2014        op_opc = INDEX_op_xor_i32;
2015        op_arg = tcg_const_i32(-1);
2016        break;
2017
2018    case 0x7000 ... 0x700f: /* add #imm,Rn */
2019        if (op_dst != B11_8 || mv_src >= 0) {
2020            goto fail;
2021        }
2022        op_opc = INDEX_op_add_i32;
2023        op_arg = tcg_const_i32(B7_0s);
2024        break;
2025
2026    case 0x3000: /* cmp/eq Rm,Rn */
2027        /* Looking for the middle of a compare-and-swap sequence,
2028           beginning with the compare.  Operands can be either order,
2029           but with only one overlapping the load.  */
2030        if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2031            goto fail;
2032        }
2033        op_opc = INDEX_op_setcond_i32;  /* placeholder */
2034        op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2035        op_arg = REG(op_src);
2036
2037        NEXT_INSN;
2038        switch (ctx->opcode & 0xff00) {
2039        case 0x8b00: /* bf label */
2040        case 0x8f00: /* bf/s label */
2041            if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2042                goto fail;
2043            }
2044            if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2045                break;
2046            }
2047            /* We're looking to unconditionally modify Rn with the
2048               result of the comparison, within the delay slot of
2049               the branch.  This is used by older gcc.  */
2050            NEXT_INSN;
2051            if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2052                mt_dst = B11_8;
2053            } else {
2054                goto fail;
2055            }
2056            break;
2057
2058        default:
2059            goto fail;
2060        }
2061        break;
2062
2063    case 0x2008: /* tst Rm,Rn */
2064        /* Looking for a compare-and-swap against zero.  */
2065        if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2066            goto fail;
2067        }
2068        op_opc = INDEX_op_setcond_i32;
2069        op_arg = tcg_const_i32(0);
2070
2071        NEXT_INSN;
2072        if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2073            || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2074            goto fail;
2075        }
2076        break;
2077
2078    default:
2079        /* Put back and re-examine as store.  */
2080        --i;
2081    }
2082
2083    /*
2084     * Expect the store.
2085     */
2086    /* The store must be the last insn.  */
2087    if (i != max_insns - 1) {
2088        goto fail;
2089    }
2090    NEXT_INSN;
2091    switch (ctx->opcode & 0xf00f) {
2092    case 0x2000: /* mov.b Rm,@Rn */
2093        st_mop = MO_UB;
2094        break;
2095    case 0x2001: /* mov.w Rm,@Rn */
2096        st_mop = MO_UW;
2097        break;
2098    case 0x2002: /* mov.l Rm,@Rn */
2099        st_mop = MO_UL;
2100        break;
2101    default:
2102        goto fail;
2103    }
2104    /* The store must match the load.  */
2105    if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2106        goto fail;
2107    }
2108    st_src = B7_4;
2109
2110#undef NEXT_INSN
2111
2112    /*
2113     * Emit the operation.
2114     */
2115    tcg_gen_insn_start(pc, ctx->envflags);
2116    switch (op_opc) {
2117    case -1:
2118        /* No operation found.  Look for exchange pattern.  */
2119        if (st_src == ld_dst || mv_src >= 0) {
2120            goto fail;
2121        }
2122        tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2123                                ctx->memidx, ld_mop);
2124        break;
2125
2126    case INDEX_op_add_i32:
2127        if (op_dst != st_src) {
2128            goto fail;
2129        }
2130        if (op_dst == ld_dst && st_mop == MO_UL) {
2131            tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2132                                         op_arg, ctx->memidx, ld_mop);
2133        } else {
2134            tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2135                                         op_arg, ctx->memidx, ld_mop);
2136            if (op_dst != ld_dst) {
2137                /* Note that mop sizes < 4 cannot use add_fetch
2138                   because it won't carry into the higher bits.  */
2139                tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2140            }
2141        }
2142        break;
2143
2144    case INDEX_op_and_i32:
2145        if (op_dst != st_src) {
2146            goto fail;
2147        }
2148        if (op_dst == ld_dst) {
2149            tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2150                                         op_arg, ctx->memidx, ld_mop);
2151        } else {
2152            tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2153                                         op_arg, ctx->memidx, ld_mop);
2154            tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2155        }
2156        break;
2157
2158    case INDEX_op_or_i32:
2159        if (op_dst != st_src) {
2160            goto fail;
2161        }
2162        if (op_dst == ld_dst) {
2163            tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2164                                        op_arg, ctx->memidx, ld_mop);
2165        } else {
2166            tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2167                                        op_arg, ctx->memidx, ld_mop);
2168            tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2169        }
2170        break;
2171
2172    case INDEX_op_xor_i32:
2173        if (op_dst != st_src) {
2174            goto fail;
2175        }
2176        if (op_dst == ld_dst) {
2177            tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2178                                         op_arg, ctx->memidx, ld_mop);
2179        } else {
2180            tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2181                                         op_arg, ctx->memidx, ld_mop);
2182            tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2183        }
2184        break;
2185
2186    case INDEX_op_setcond_i32:
2187        if (st_src == ld_dst) {
2188            goto fail;
2189        }
2190        tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2191                                   REG(st_src), ctx->memidx, ld_mop);
2192        tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2193        if (mt_dst >= 0) {
2194            tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2195        }
2196        break;
2197
2198    default:
2199        g_assert_not_reached();
2200    }
2201
2202    /* If op_src is not a valid register, then op_arg was a constant.  */
2203    if (op_src < 0) {
2204        tcg_temp_free_i32(op_arg);
2205    }
2206
2207    /* The entire region has been translated.  */
2208    ctx->envflags &= ~GUSA_MASK;
2209    ctx->pc = pc_end;
2210    return max_insns;
2211
2212 fail:
2213    qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2214                  pc, pc_end);
2215
2216    /* Restart with the EXCLUSIVE bit set, within a TB run via
2217       cpu_exec_step_atomic holding the exclusive lock.  */
2218    tcg_gen_insn_start(pc, ctx->envflags);
2219    ctx->envflags |= GUSA_EXCLUSIVE;
2220    gen_save_cpu_state(ctx, false);
2221    gen_helper_exclusive(cpu_env);
2222    ctx->bstate = BS_EXCP;
2223
2224    /* We're not executing an instruction, but we must report one for the
2225       purposes of accounting within the TB.  We might as well report the
2226       entire region consumed via ctx->pc so that it's immediately available
2227       in the disassembly dump.  */
2228    ctx->pc = pc_end;
2229    return 1;
2230}
2231#endif
2232
2233void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
2234{
2235    CPUSH4State *env = cs->env_ptr;
2236    DisasContext ctx;
2237    target_ulong pc_start;
2238    int num_insns;
2239    int max_insns;
2240
2241    pc_start = tb->pc;
2242    ctx.pc = pc_start;
2243    ctx.tbflags = (uint32_t)tb->flags;
2244    ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
2245    ctx.bstate = BS_NONE;
2246    ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2247    /* We don't know if the delayed pc came from a dynamic or static branch,
2248       so assume it is a dynamic branch.  */
2249    ctx.delayed_pc = -1; /* use delayed pc from env pointer */
2250    ctx.tb = tb;
2251    ctx.singlestep_enabled = cs->singlestep_enabled;
2252    ctx.features = env->features;
2253    ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
2254    ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
2255                 (ctx.tbflags & (1 << SR_RB))) * 0x10;
2256    ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
2257
2258    max_insns = tb->cflags & CF_COUNT_MASK;
2259    if (max_insns == 0) {
2260        max_insns = CF_COUNT_MASK;
2261    }
2262    max_insns = MIN(max_insns, TCG_MAX_INSNS);
2263
2264    /* Since the ISA is fixed-width, we can bound by the number
2265       of instructions remaining on the page.  */
2266    num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2;
2267    max_insns = MIN(max_insns, num_insns);
2268
2269    /* Single stepping means just that.  */
2270    if (ctx.singlestep_enabled || singlestep) {
2271        max_insns = 1;
2272    }
2273
2274    gen_tb_start(tb);
2275    num_insns = 0;
2276
2277#ifdef CONFIG_USER_ONLY
2278    if (ctx.tbflags & GUSA_MASK) {
2279        num_insns = decode_gusa(&ctx, env, &max_insns);
2280    }
2281#endif
2282
2283    while (ctx.bstate == BS_NONE
2284           && num_insns < max_insns
2285           && !tcg_op_buf_full()) {
2286        tcg_gen_insn_start(ctx.pc, ctx.envflags);
2287        num_insns++;
2288
2289        if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2290            /* We have hit a breakpoint - make sure PC is up-to-date */
2291            gen_save_cpu_state(&ctx, true);
2292            gen_helper_debug(cpu_env);
2293            ctx.bstate = BS_EXCP;
2294            /* The address covered by the breakpoint must be included in
2295               [tb->pc, tb->pc + tb->size) in order to for it to be
2296               properly cleared -- thus we increment the PC here so that
2297               the logic setting tb->size below does the right thing.  */
2298            ctx.pc += 2;
2299            break;
2300        }
2301
2302        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2303            gen_io_start();
2304        }
2305
2306        ctx.opcode = cpu_lduw_code(env, ctx.pc);
2307        decode_opc(&ctx);
2308        ctx.pc += 2;
2309    }
2310    if (tb->cflags & CF_LAST_IO) {
2311        gen_io_end();
2312    }
2313
2314    if (ctx.tbflags & GUSA_EXCLUSIVE) {
2315        /* Ending the region of exclusivity.  Clear the bits.  */
2316        ctx.envflags &= ~GUSA_MASK;
2317    }
2318
2319    if (cs->singlestep_enabled) {
2320        gen_save_cpu_state(&ctx, true);
2321        gen_helper_debug(cpu_env);
2322    } else {
2323        switch (ctx.bstate) {
2324        case BS_STOP:
2325            gen_save_cpu_state(&ctx, true);
2326            tcg_gen_exit_tb(0);
2327            break;
2328        case BS_NONE:
2329            gen_save_cpu_state(&ctx, false);
2330            gen_goto_tb(&ctx, 0, ctx.pc);
2331            break;
2332        case BS_EXCP:
2333            /* fall through */
2334        case BS_BRANCH:
2335        default:
2336            break;
2337        }
2338    }
2339
2340    gen_tb_end(tb, num_insns);
2341
2342    tb->size = ctx.pc - pc_start;
2343    tb->icount = num_insns;
2344
2345#ifdef DEBUG_DISAS
2346    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2347        && qemu_log_in_addr_range(pc_start)) {
2348        qemu_log_lock();
2349        qemu_log("IN:\n");      /* , lookup_symbol(pc_start)); */
2350        log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
2351        qemu_log("\n");
2352        qemu_log_unlock();
2353    }
2354#endif
2355}
2356
2357void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2358                          target_ulong *data)
2359{
2360    env->pc = data[0];
2361    env->flags = data[1];
2362    /* Theoretically delayed_pc should also be restored. In practice the
2363       branch instruction is re-executed after exception, so the delayed
2364       branch target will be recomputed. */
2365}
2366