qemu/target/m68k/translate.c
<<
>>
Prefs
   1/*
   2 *  m68k translation
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery
   5 *  Written by Paul Brook
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "exec/exec-all.h"
  25#include "tcg-op.h"
  26#include "qemu/log.h"
  27#include "exec/cpu_ldst.h"
  28
  29#include "exec/helper-proto.h"
  30#include "exec/helper-gen.h"
  31
  32#include "trace-tcg.h"
  33#include "exec/log.h"
  34
  35
  36//#define DEBUG_DISPATCH 1
  37
  38/* Fake floating point.  */
  39#define tcg_gen_mov_f64 tcg_gen_mov_i64
  40#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
  41#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
  42
  43#define DEFO32(name, offset) static TCGv QREG_##name;
  44#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
  45#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
  46#include "qregs.def"
  47#undef DEFO32
  48#undef DEFO64
  49#undef DEFF64
  50
  51static TCGv_i32 cpu_halted;
  52static TCGv_i32 cpu_exception_index;
  53
  54static TCGv_env cpu_env;
  55
  56static char cpu_reg_names[3*8*3 + 5*4];
  57static TCGv cpu_dregs[8];
  58static TCGv cpu_aregs[8];
  59static TCGv_i64 cpu_fregs[8];
  60static TCGv_i64 cpu_macc[4];
  61
  62#define REG(insn, pos)  (((insn) >> (pos)) & 7)
  63#define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
  64#define AREG(insn, pos) get_areg(s, REG(insn, pos))
  65#define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
  66#define MACREG(acc)     cpu_macc[acc]
  67#define QREG_SP         get_areg(s, 7)
  68
  69static TCGv NULL_QREG;
  70#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
  71/* Used to distinguish stores from bad addressing modes.  */
  72static TCGv store_dummy;
  73
  74#include "exec/gen-icount.h"
  75
  76void m68k_tcg_init(void)
  77{
  78    char *p;
  79    int i;
  80
  81    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  82    tcg_ctx.tcg_env = cpu_env;
  83
  84#define DEFO32(name, offset) \
  85    QREG_##name = tcg_global_mem_new_i32(cpu_env, \
  86        offsetof(CPUM68KState, offset), #name);
  87#define DEFO64(name, offset) \
  88    QREG_##name = tcg_global_mem_new_i64(cpu_env, \
  89        offsetof(CPUM68KState, offset), #name);
  90#define DEFF64(name, offset) DEFO64(name, offset)
  91#include "qregs.def"
  92#undef DEFO32
  93#undef DEFO64
  94#undef DEFF64
  95
  96    cpu_halted = tcg_global_mem_new_i32(cpu_env,
  97                                        -offsetof(M68kCPU, env) +
  98                                        offsetof(CPUState, halted), "HALTED");
  99    cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
 100                                                 -offsetof(M68kCPU, env) +
 101                                                 offsetof(CPUState, exception_index),
 102                                                 "EXCEPTION");
 103
 104    p = cpu_reg_names;
 105    for (i = 0; i < 8; i++) {
 106        sprintf(p, "D%d", i);
 107        cpu_dregs[i] = tcg_global_mem_new(cpu_env,
 108                                          offsetof(CPUM68KState, dregs[i]), p);
 109        p += 3;
 110        sprintf(p, "A%d", i);
 111        cpu_aregs[i] = tcg_global_mem_new(cpu_env,
 112                                          offsetof(CPUM68KState, aregs[i]), p);
 113        p += 3;
 114        sprintf(p, "F%d", i);
 115        cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
 116                                          offsetof(CPUM68KState, fregs[i]), p);
 117        p += 3;
 118    }
 119    for (i = 0; i < 4; i++) {
 120        sprintf(p, "ACC%d", i);
 121        cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
 122                                         offsetof(CPUM68KState, macc[i]), p);
 123        p += 5;
 124    }
 125
 126    NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
 127    store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
 128}
 129
 130/* internal defines */
 131typedef struct DisasContext {
 132    CPUM68KState *env;
 133    target_ulong insn_pc; /* Start of the current instruction.  */
 134    target_ulong pc;
 135    int is_jmp;
 136    CCOp cc_op; /* Current CC operation */
 137    int cc_op_synced;
 138    int user;
 139    uint32_t fpcr;
 140    struct TranslationBlock *tb;
 141    int singlestep_enabled;
 142    TCGv_i64 mactmp;
 143    int done_mac;
 144    int writeback_mask;
 145    TCGv writeback[8];
 146} DisasContext;
 147
 148static TCGv get_areg(DisasContext *s, unsigned regno)
 149{
 150    if (s->writeback_mask & (1 << regno)) {
 151        return s->writeback[regno];
 152    } else {
 153        return cpu_aregs[regno];
 154    }
 155}
 156
 157static void delay_set_areg(DisasContext *s, unsigned regno,
 158                           TCGv val, bool give_temp)
 159{
 160    if (s->writeback_mask & (1 << regno)) {
 161        if (give_temp) {
 162            tcg_temp_free(s->writeback[regno]);
 163            s->writeback[regno] = val;
 164        } else {
 165            tcg_gen_mov_i32(s->writeback[regno], val);
 166        }
 167    } else {
 168        s->writeback_mask |= 1 << regno;
 169        if (give_temp) {
 170            s->writeback[regno] = val;
 171        } else {
 172            TCGv tmp = tcg_temp_new();
 173            s->writeback[regno] = tmp;
 174            tcg_gen_mov_i32(tmp, val);
 175        }
 176    }
 177}
 178
 179static void do_writebacks(DisasContext *s)
 180{
 181    unsigned mask = s->writeback_mask;
 182    if (mask) {
 183        s->writeback_mask = 0;
 184        do {
 185            unsigned regno = ctz32(mask);
 186            tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
 187            tcg_temp_free(s->writeback[regno]);
 188            mask &= mask - 1;
 189        } while (mask);
 190    }
 191}
 192
 193#define DISAS_JUMP_NEXT 4
 194
 195#if defined(CONFIG_USER_ONLY)
 196#define IS_USER(s) 1
 197#else
 198#define IS_USER(s) s->user
 199#endif
 200
 201/* XXX: move that elsewhere */
 202/* ??? Fix exceptions.  */
 203static void *gen_throws_exception;
 204#define gen_last_qop NULL
 205
 206typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
 207
 208#ifdef DEBUG_DISPATCH
 209#define DISAS_INSN(name)                                                \
 210    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 211                                  uint16_t insn);                       \
 212    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 213                             uint16_t insn)                             \
 214    {                                                                   \
 215        qemu_log("Dispatch " #name "\n");                               \
 216        real_disas_##name(env, s, insn);                                \
 217    }                                                                   \
 218    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 219                                  uint16_t insn)
 220#else
 221#define DISAS_INSN(name)                                                \
 222    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 223                             uint16_t insn)
 224#endif
 225
 226static const uint8_t cc_op_live[CC_OP_NB] = {
 227    [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
 228    [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
 229    [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
 230    [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
 231    [CC_OP_LOGIC] = CCF_X | CCF_N
 232};
 233
 234static void set_cc_op(DisasContext *s, CCOp op)
 235{
 236    CCOp old_op = s->cc_op;
 237    int dead;
 238
 239    if (old_op == op) {
 240        return;
 241    }
 242    s->cc_op = op;
 243    s->cc_op_synced = 0;
 244
 245    /* Discard CC computation that will no longer be used.
 246       Note that X and N are never dead.  */
 247    dead = cc_op_live[old_op] & ~cc_op_live[op];
 248    if (dead & CCF_C) {
 249        tcg_gen_discard_i32(QREG_CC_C);
 250    }
 251    if (dead & CCF_Z) {
 252        tcg_gen_discard_i32(QREG_CC_Z);
 253    }
 254    if (dead & CCF_V) {
 255        tcg_gen_discard_i32(QREG_CC_V);
 256    }
 257}
 258
 259/* Update the CPU env CC_OP state.  */
 260static void update_cc_op(DisasContext *s)
 261{
 262    if (!s->cc_op_synced) {
 263        s->cc_op_synced = 1;
 264        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
 265    }
 266}
 267
 268/* Generate a load from the specified address.  Narrow values are
 269   sign extended to full register width.  */
 270static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
 271{
 272    TCGv tmp;
 273    int index = IS_USER(s);
 274    tmp = tcg_temp_new_i32();
 275    switch(opsize) {
 276    case OS_BYTE:
 277        if (sign)
 278            tcg_gen_qemu_ld8s(tmp, addr, index);
 279        else
 280            tcg_gen_qemu_ld8u(tmp, addr, index);
 281        break;
 282    case OS_WORD:
 283        if (sign)
 284            tcg_gen_qemu_ld16s(tmp, addr, index);
 285        else
 286            tcg_gen_qemu_ld16u(tmp, addr, index);
 287        break;
 288    case OS_LONG:
 289    case OS_SINGLE:
 290        tcg_gen_qemu_ld32u(tmp, addr, index);
 291        break;
 292    default:
 293        g_assert_not_reached();
 294    }
 295    gen_throws_exception = gen_last_qop;
 296    return tmp;
 297}
 298
 299static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
 300{
 301    TCGv_i64 tmp;
 302    int index = IS_USER(s);
 303    tmp = tcg_temp_new_i64();
 304    tcg_gen_qemu_ldf64(tmp, addr, index);
 305    gen_throws_exception = gen_last_qop;
 306    return tmp;
 307}
 308
 309/* Generate a store.  */
 310static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
 311{
 312    int index = IS_USER(s);
 313    switch(opsize) {
 314    case OS_BYTE:
 315        tcg_gen_qemu_st8(val, addr, index);
 316        break;
 317    case OS_WORD:
 318        tcg_gen_qemu_st16(val, addr, index);
 319        break;
 320    case OS_LONG:
 321    case OS_SINGLE:
 322        tcg_gen_qemu_st32(val, addr, index);
 323        break;
 324    default:
 325        g_assert_not_reached();
 326    }
 327    gen_throws_exception = gen_last_qop;
 328}
 329
 330static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
 331{
 332    int index = IS_USER(s);
 333    tcg_gen_qemu_stf64(val, addr, index);
 334    gen_throws_exception = gen_last_qop;
 335}
 336
 337typedef enum {
 338    EA_STORE,
 339    EA_LOADU,
 340    EA_LOADS
 341} ea_what;
 342
 343/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
 344   otherwise generate a store.  */
 345static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
 346                     ea_what what)
 347{
 348    if (what == EA_STORE) {
 349        gen_store(s, opsize, addr, val);
 350        return store_dummy;
 351    } else {
 352        return gen_load(s, opsize, addr, what == EA_LOADS);
 353    }
 354}
 355
 356/* Read a 16-bit immediate constant */
 357static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
 358{
 359    uint16_t im;
 360    im = cpu_lduw_code(env, s->pc);
 361    s->pc += 2;
 362    return im;
 363}
 364
 365/* Read an 8-bit immediate constant */
 366static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
 367{
 368    return read_im16(env, s);
 369}
 370
 371/* Read a 32-bit immediate constant.  */
 372static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
 373{
 374    uint32_t im;
 375    im = read_im16(env, s) << 16;
 376    im |= 0xffff & read_im16(env, s);
 377    return im;
 378}
 379
 380/* Calculate and address index.  */
 381static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
 382{
 383    TCGv add;
 384    int scale;
 385
 386    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
 387    if ((ext & 0x800) == 0) {
 388        tcg_gen_ext16s_i32(tmp, add);
 389        add = tmp;
 390    }
 391    scale = (ext >> 9) & 3;
 392    if (scale != 0) {
 393        tcg_gen_shli_i32(tmp, add, scale);
 394        add = tmp;
 395    }
 396    return add;
 397}
 398
 399/* Handle a base + index + displacement effective addresss.
 400   A NULL_QREG base means pc-relative.  */
 401static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
 402{
 403    uint32_t offset;
 404    uint16_t ext;
 405    TCGv add;
 406    TCGv tmp;
 407    uint32_t bd, od;
 408
 409    offset = s->pc;
 410    ext = read_im16(env, s);
 411
 412    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
 413        return NULL_QREG;
 414
 415    if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
 416        !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
 417        ext &= ~(3 << 9);
 418    }
 419
 420    if (ext & 0x100) {
 421        /* full extension word format */
 422        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
 423            return NULL_QREG;
 424
 425        if ((ext & 0x30) > 0x10) {
 426            /* base displacement */
 427            if ((ext & 0x30) == 0x20) {
 428                bd = (int16_t)read_im16(env, s);
 429            } else {
 430                bd = read_im32(env, s);
 431            }
 432        } else {
 433            bd = 0;
 434        }
 435        tmp = tcg_temp_new();
 436        if ((ext & 0x44) == 0) {
 437            /* pre-index */
 438            add = gen_addr_index(s, ext, tmp);
 439        } else {
 440            add = NULL_QREG;
 441        }
 442        if ((ext & 0x80) == 0) {
 443            /* base not suppressed */
 444            if (IS_NULL_QREG(base)) {
 445                base = tcg_const_i32(offset + bd);
 446                bd = 0;
 447            }
 448            if (!IS_NULL_QREG(add)) {
 449                tcg_gen_add_i32(tmp, add, base);
 450                add = tmp;
 451            } else {
 452                add = base;
 453            }
 454        }
 455        if (!IS_NULL_QREG(add)) {
 456            if (bd != 0) {
 457                tcg_gen_addi_i32(tmp, add, bd);
 458                add = tmp;
 459            }
 460        } else {
 461            add = tcg_const_i32(bd);
 462        }
 463        if ((ext & 3) != 0) {
 464            /* memory indirect */
 465            base = gen_load(s, OS_LONG, add, 0);
 466            if ((ext & 0x44) == 4) {
 467                add = gen_addr_index(s, ext, tmp);
 468                tcg_gen_add_i32(tmp, add, base);
 469                add = tmp;
 470            } else {
 471                add = base;
 472            }
 473            if ((ext & 3) > 1) {
 474                /* outer displacement */
 475                if ((ext & 3) == 2) {
 476                    od = (int16_t)read_im16(env, s);
 477                } else {
 478                    od = read_im32(env, s);
 479                }
 480            } else {
 481                od = 0;
 482            }
 483            if (od != 0) {
 484                tcg_gen_addi_i32(tmp, add, od);
 485                add = tmp;
 486            }
 487        }
 488    } else {
 489        /* brief extension word format */
 490        tmp = tcg_temp_new();
 491        add = gen_addr_index(s, ext, tmp);
 492        if (!IS_NULL_QREG(base)) {
 493            tcg_gen_add_i32(tmp, add, base);
 494            if ((int8_t)ext)
 495                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
 496        } else {
 497            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
 498        }
 499        add = tmp;
 500    }
 501    return add;
 502}
 503
 504/* Sign or zero extend a value.  */
 505
 506static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
 507{
 508    switch (opsize) {
 509    case OS_BYTE:
 510        if (sign) {
 511            tcg_gen_ext8s_i32(res, val);
 512        } else {
 513            tcg_gen_ext8u_i32(res, val);
 514        }
 515        break;
 516    case OS_WORD:
 517        if (sign) {
 518            tcg_gen_ext16s_i32(res, val);
 519        } else {
 520            tcg_gen_ext16u_i32(res, val);
 521        }
 522        break;
 523    case OS_LONG:
 524        tcg_gen_mov_i32(res, val);
 525        break;
 526    default:
 527        g_assert_not_reached();
 528    }
 529}
 530
 531/* Evaluate all the CC flags.  */
 532
 533static void gen_flush_flags(DisasContext *s)
 534{
 535    TCGv t0, t1;
 536
 537    switch (s->cc_op) {
 538    case CC_OP_FLAGS:
 539        return;
 540
 541    case CC_OP_ADDB:
 542    case CC_OP_ADDW:
 543    case CC_OP_ADDL:
 544        tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
 545        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 546        /* Compute signed overflow for addition.  */
 547        t0 = tcg_temp_new();
 548        t1 = tcg_temp_new();
 549        tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
 550        gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
 551        tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
 552        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
 553        tcg_temp_free(t0);
 554        tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
 555        tcg_temp_free(t1);
 556        break;
 557
 558    case CC_OP_SUBB:
 559    case CC_OP_SUBW:
 560    case CC_OP_SUBL:
 561        tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
 562        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 563        /* Compute signed overflow for subtraction.  */
 564        t0 = tcg_temp_new();
 565        t1 = tcg_temp_new();
 566        tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
 567        gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
 568        tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
 569        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
 570        tcg_temp_free(t0);
 571        tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
 572        tcg_temp_free(t1);
 573        break;
 574
 575    case CC_OP_CMPB:
 576    case CC_OP_CMPW:
 577    case CC_OP_CMPL:
 578        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
 579        tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
 580        gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
 581        /* Compute signed overflow for subtraction.  */
 582        t0 = tcg_temp_new();
 583        tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
 584        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
 585        tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
 586        tcg_temp_free(t0);
 587        tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
 588        break;
 589
 590    case CC_OP_LOGIC:
 591        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 592        tcg_gen_movi_i32(QREG_CC_C, 0);
 593        tcg_gen_movi_i32(QREG_CC_V, 0);
 594        break;
 595
 596    case CC_OP_DYNAMIC:
 597        gen_helper_flush_flags(cpu_env, QREG_CC_OP);
 598        s->cc_op_synced = 1;
 599        break;
 600
 601    default:
 602        t0 = tcg_const_i32(s->cc_op);
 603        gen_helper_flush_flags(cpu_env, t0);
 604        tcg_temp_free(t0);
 605        s->cc_op_synced = 1;
 606        break;
 607    }
 608
 609    /* Note that flush_flags also assigned to env->cc_op.  */
 610    s->cc_op = CC_OP_FLAGS;
 611}
 612
 613static inline TCGv gen_extend(TCGv val, int opsize, int sign)
 614{
 615    TCGv tmp;
 616
 617    if (opsize == OS_LONG) {
 618        tmp = val;
 619    } else {
 620        tmp = tcg_temp_new();
 621        gen_ext(tmp, val, opsize, sign);
 622    }
 623
 624    return tmp;
 625}
 626
 627static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
 628{
 629    gen_ext(QREG_CC_N, val, opsize, 1);
 630    set_cc_op(s, CC_OP_LOGIC);
 631}
 632
 633static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
 634{
 635    tcg_gen_mov_i32(QREG_CC_N, dest);
 636    tcg_gen_mov_i32(QREG_CC_V, src);
 637    set_cc_op(s, CC_OP_CMPB + opsize);
 638}
 639
 640static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
 641{
 642    gen_ext(QREG_CC_N, dest, opsize, 1);
 643    tcg_gen_mov_i32(QREG_CC_V, src);
 644}
 645
 646static inline int opsize_bytes(int opsize)
 647{
 648    switch (opsize) {
 649    case OS_BYTE: return 1;
 650    case OS_WORD: return 2;
 651    case OS_LONG: return 4;
 652    case OS_SINGLE: return 4;
 653    case OS_DOUBLE: return 8;
 654    case OS_EXTENDED: return 12;
 655    case OS_PACKED: return 12;
 656    default:
 657        g_assert_not_reached();
 658    }
 659}
 660
 661static inline int insn_opsize(int insn)
 662{
 663    switch ((insn >> 6) & 3) {
 664    case 0: return OS_BYTE;
 665    case 1: return OS_WORD;
 666    case 2: return OS_LONG;
 667    default:
 668        g_assert_not_reached();
 669    }
 670}
 671
 672/* Assign value to a register.  If the width is less than the register width
 673   only the low part of the register is set.  */
 674static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
 675{
 676    TCGv tmp;
 677    switch (opsize) {
 678    case OS_BYTE:
 679        tcg_gen_andi_i32(reg, reg, 0xffffff00);
 680        tmp = tcg_temp_new();
 681        tcg_gen_ext8u_i32(tmp, val);
 682        tcg_gen_or_i32(reg, reg, tmp);
 683        tcg_temp_free(tmp);
 684        break;
 685    case OS_WORD:
 686        tcg_gen_andi_i32(reg, reg, 0xffff0000);
 687        tmp = tcg_temp_new();
 688        tcg_gen_ext16u_i32(tmp, val);
 689        tcg_gen_or_i32(reg, reg, tmp);
 690        tcg_temp_free(tmp);
 691        break;
 692    case OS_LONG:
 693    case OS_SINGLE:
 694        tcg_gen_mov_i32(reg, val);
 695        break;
 696    default:
 697        g_assert_not_reached();
 698    }
 699}
 700
 701/* Generate code for an "effective address".  Does not adjust the base
 702   register for autoincrement addressing modes.  */
 703static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
 704                         int mode, int reg0, int opsize)
 705{
 706    TCGv reg;
 707    TCGv tmp;
 708    uint16_t ext;
 709    uint32_t offset;
 710
 711    switch (mode) {
 712    case 0: /* Data register direct.  */
 713    case 1: /* Address register direct.  */
 714        return NULL_QREG;
 715    case 3: /* Indirect postincrement.  */
 716        if (opsize == OS_UNSIZED) {
 717            return NULL_QREG;
 718        }
 719        /* fallthru */
 720    case 2: /* Indirect register */
 721        return get_areg(s, reg0);
 722    case 4: /* Indirect predecrememnt.  */
 723        if (opsize == OS_UNSIZED) {
 724            return NULL_QREG;
 725        }
 726        reg = get_areg(s, reg0);
 727        tmp = tcg_temp_new();
 728        if (reg0 == 7 && opsize == OS_BYTE &&
 729            m68k_feature(s->env, M68K_FEATURE_M68000)) {
 730            tcg_gen_subi_i32(tmp, reg, 2);
 731        } else {
 732            tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
 733        }
 734        return tmp;
 735    case 5: /* Indirect displacement.  */
 736        reg = get_areg(s, reg0);
 737        tmp = tcg_temp_new();
 738        ext = read_im16(env, s);
 739        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
 740        return tmp;
 741    case 6: /* Indirect index + displacement.  */
 742        reg = get_areg(s, reg0);
 743        return gen_lea_indexed(env, s, reg);
 744    case 7: /* Other */
 745        switch (reg0) {
 746        case 0: /* Absolute short.  */
 747            offset = (int16_t)read_im16(env, s);
 748            return tcg_const_i32(offset);
 749        case 1: /* Absolute long.  */
 750            offset = read_im32(env, s);
 751            return tcg_const_i32(offset);
 752        case 2: /* pc displacement  */
 753            offset = s->pc;
 754            offset += (int16_t)read_im16(env, s);
 755            return tcg_const_i32(offset);
 756        case 3: /* pc index+displacement.  */
 757            return gen_lea_indexed(env, s, NULL_QREG);
 758        case 4: /* Immediate.  */
 759        default:
 760            return NULL_QREG;
 761        }
 762    }
 763    /* Should never happen.  */
 764    return NULL_QREG;
 765}
 766
 767static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 768                    int opsize)
 769{
 770    int mode = extract32(insn, 3, 3);
 771    int reg0 = REG(insn, 0);
 772    return gen_lea_mode(env, s, mode, reg0, opsize);
 773}
 774
 775/* Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
 776   a write otherwise it is a read (0 == sign extend, -1 == zero extend).
 777   ADDRP is non-null for readwrite operands.  */
 778static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
 779                        int opsize, TCGv val, TCGv *addrp, ea_what what)
 780{
 781    TCGv reg, tmp, result;
 782    int32_t offset;
 783
 784    switch (mode) {
 785    case 0: /* Data register direct.  */
 786        reg = cpu_dregs[reg0];
 787        if (what == EA_STORE) {
 788            gen_partset_reg(opsize, reg, val);
 789            return store_dummy;
 790        } else {
 791            return gen_extend(reg, opsize, what == EA_LOADS);
 792        }
 793    case 1: /* Address register direct.  */
 794        reg = get_areg(s, reg0);
 795        if (what == EA_STORE) {
 796            tcg_gen_mov_i32(reg, val);
 797            return store_dummy;
 798        } else {
 799            return gen_extend(reg, opsize, what == EA_LOADS);
 800        }
 801    case 2: /* Indirect register */
 802        reg = get_areg(s, reg0);
 803        return gen_ldst(s, opsize, reg, val, what);
 804    case 3: /* Indirect postincrement.  */
 805        reg = get_areg(s, reg0);
 806        result = gen_ldst(s, opsize, reg, val, what);
 807        if (what == EA_STORE || !addrp) {
 808            TCGv tmp = tcg_temp_new();
 809            if (reg0 == 7 && opsize == OS_BYTE &&
 810                m68k_feature(s->env, M68K_FEATURE_M68000)) {
 811                tcg_gen_addi_i32(tmp, reg, 2);
 812            } else {
 813                tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
 814            }
 815            delay_set_areg(s, reg0, tmp, true);
 816        }
 817        return result;
 818    case 4: /* Indirect predecrememnt.  */
 819        if (addrp && what == EA_STORE) {
 820            tmp = *addrp;
 821        } else {
 822            tmp = gen_lea_mode(env, s, mode, reg0, opsize);
 823            if (IS_NULL_QREG(tmp)) {
 824                return tmp;
 825            }
 826            if (addrp) {
 827                *addrp = tmp;
 828            }
 829        }
 830        result = gen_ldst(s, opsize, tmp, val, what);
 831        if (what == EA_STORE || !addrp) {
 832            delay_set_areg(s, reg0, tmp, false);
 833        }
 834        return result;
 835    case 5: /* Indirect displacement.  */
 836    case 6: /* Indirect index + displacement.  */
 837    do_indirect:
 838        if (addrp && what == EA_STORE) {
 839            tmp = *addrp;
 840        } else {
 841            tmp = gen_lea_mode(env, s, mode, reg0, opsize);
 842            if (IS_NULL_QREG(tmp)) {
 843                return tmp;
 844            }
 845            if (addrp) {
 846                *addrp = tmp;
 847            }
 848        }
 849        return gen_ldst(s, opsize, tmp, val, what);
 850    case 7: /* Other */
 851        switch (reg0) {
 852        case 0: /* Absolute short.  */
 853        case 1: /* Absolute long.  */
 854        case 2: /* pc displacement  */
 855        case 3: /* pc index+displacement.  */
 856            goto do_indirect;
 857        case 4: /* Immediate.  */
 858            /* Sign extend values for consistency.  */
 859            switch (opsize) {
 860            case OS_BYTE:
 861                if (what == EA_LOADS) {
 862                    offset = (int8_t)read_im8(env, s);
 863                } else {
 864                    offset = read_im8(env, s);
 865                }
 866                break;
 867            case OS_WORD:
 868                if (what == EA_LOADS) {
 869                    offset = (int16_t)read_im16(env, s);
 870                } else {
 871                    offset = read_im16(env, s);
 872                }
 873                break;
 874            case OS_LONG:
 875                offset = read_im32(env, s);
 876                break;
 877            default:
 878                g_assert_not_reached();
 879            }
 880            return tcg_const_i32(offset);
 881        default:
 882            return NULL_QREG;
 883        }
 884    }
 885    /* Should never happen.  */
 886    return NULL_QREG;
 887}
 888
 889static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 890                   int opsize, TCGv val, TCGv *addrp, ea_what what)
 891{
 892    int mode = extract32(insn, 3, 3);
 893    int reg0 = REG(insn, 0);
 894    return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
 895}
 896
 897typedef struct {
 898    TCGCond tcond;
 899    bool g1;
 900    bool g2;
 901    TCGv v1;
 902    TCGv v2;
 903} DisasCompare;
 904
 905static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
 906{
 907    TCGv tmp, tmp2;
 908    TCGCond tcond;
 909    CCOp op = s->cc_op;
 910
 911    /* The CC_OP_CMP form can handle most normal comparisons directly.  */
 912    if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
 913        c->g1 = c->g2 = 1;
 914        c->v1 = QREG_CC_N;
 915        c->v2 = QREG_CC_V;
 916        switch (cond) {
 917        case 2: /* HI */
 918        case 3: /* LS */
 919            tcond = TCG_COND_LEU;
 920            goto done;
 921        case 4: /* CC */
 922        case 5: /* CS */
 923            tcond = TCG_COND_LTU;
 924            goto done;
 925        case 6: /* NE */
 926        case 7: /* EQ */
 927            tcond = TCG_COND_EQ;
 928            goto done;
 929        case 10: /* PL */
 930        case 11: /* MI */
 931            c->g1 = c->g2 = 0;
 932            c->v2 = tcg_const_i32(0);
 933            c->v1 = tmp = tcg_temp_new();
 934            tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
 935            gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
 936            /* fallthru */
 937        case 12: /* GE */
 938        case 13: /* LT */
 939            tcond = TCG_COND_LT;
 940            goto done;
 941        case 14: /* GT */
 942        case 15: /* LE */
 943            tcond = TCG_COND_LE;
 944            goto done;
 945        }
 946    }
 947
 948    c->g1 = 1;
 949    c->g2 = 0;
 950    c->v2 = tcg_const_i32(0);
 951
 952    switch (cond) {
 953    case 0: /* T */
 954    case 1: /* F */
 955        c->v1 = c->v2;
 956        tcond = TCG_COND_NEVER;
 957        goto done;
 958    case 14: /* GT (!(Z || (N ^ V))) */
 959    case 15: /* LE (Z || (N ^ V)) */
 960        /* Logic operations clear V, which simplifies LE to (Z || N),
 961           and since Z and N are co-located, this becomes a normal
 962           comparison vs N.  */
 963        if (op == CC_OP_LOGIC) {
 964            c->v1 = QREG_CC_N;
 965            tcond = TCG_COND_LE;
 966            goto done;
 967        }
 968        break;
 969    case 12: /* GE (!(N ^ V)) */
 970    case 13: /* LT (N ^ V) */
 971        /* Logic operations clear V, which simplifies this to N.  */
 972        if (op != CC_OP_LOGIC) {
 973            break;
 974        }
 975        /* fallthru */
 976    case 10: /* PL (!N) */
 977    case 11: /* MI (N) */
 978        /* Several cases represent N normally.  */
 979        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
 980            op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
 981            op == CC_OP_LOGIC) {
 982            c->v1 = QREG_CC_N;
 983            tcond = TCG_COND_LT;
 984            goto done;
 985        }
 986        break;
 987    case 6: /* NE (!Z) */
 988    case 7: /* EQ (Z) */
 989        /* Some cases fold Z into N.  */
 990        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
 991            op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
 992            op == CC_OP_LOGIC) {
 993            tcond = TCG_COND_EQ;
 994            c->v1 = QREG_CC_N;
 995            goto done;
 996        }
 997        break;
 998    case 4: /* CC (!C) */
 999    case 5: /* CS (C) */
1000        /* Some cases fold C into X.  */
1001        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1002            op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL) {
1003            tcond = TCG_COND_NE;
1004            c->v1 = QREG_CC_X;
1005            goto done;
1006        }
1007        /* fallthru */
1008    case 8: /* VC (!V) */
1009    case 9: /* VS (V) */
1010        /* Logic operations clear V and C.  */
1011        if (op == CC_OP_LOGIC) {
1012            tcond = TCG_COND_NEVER;
1013            c->v1 = c->v2;
1014            goto done;
1015        }
1016        break;
1017    }
1018
1019    /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1020    gen_flush_flags(s);
1021
1022    switch (cond) {
1023    case 0: /* T */
1024    case 1: /* F */
1025    default:
1026        /* Invalid, or handled above.  */
1027        abort();
1028    case 2: /* HI (!C && !Z) -> !(C || Z)*/
1029    case 3: /* LS (C || Z) */
1030        c->v1 = tmp = tcg_temp_new();
1031        c->g1 = 0;
1032        tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1033        tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1034        tcond = TCG_COND_NE;
1035        break;
1036    case 4: /* CC (!C) */
1037    case 5: /* CS (C) */
1038        c->v1 = QREG_CC_C;
1039        tcond = TCG_COND_NE;
1040        break;
1041    case 6: /* NE (!Z) */
1042    case 7: /* EQ (Z) */
1043        c->v1 = QREG_CC_Z;
1044        tcond = TCG_COND_EQ;
1045        break;
1046    case 8: /* VC (!V) */
1047    case 9: /* VS (V) */
1048        c->v1 = QREG_CC_V;
1049        tcond = TCG_COND_LT;
1050        break;
1051    case 10: /* PL (!N) */
1052    case 11: /* MI (N) */
1053        c->v1 = QREG_CC_N;
1054        tcond = TCG_COND_LT;
1055        break;
1056    case 12: /* GE (!(N ^ V)) */
1057    case 13: /* LT (N ^ V) */
1058        c->v1 = tmp = tcg_temp_new();
1059        c->g1 = 0;
1060        tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1061        tcond = TCG_COND_LT;
1062        break;
1063    case 14: /* GT (!(Z || (N ^ V))) */
1064    case 15: /* LE (Z || (N ^ V)) */
1065        c->v1 = tmp = tcg_temp_new();
1066        c->g1 = 0;
1067        tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1068        tcg_gen_neg_i32(tmp, tmp);
1069        tmp2 = tcg_temp_new();
1070        tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1071        tcg_gen_or_i32(tmp, tmp, tmp2);
1072        tcg_temp_free(tmp2);
1073        tcond = TCG_COND_LT;
1074        break;
1075    }
1076
1077 done:
1078    if ((cond & 1) == 0) {
1079        tcond = tcg_invert_cond(tcond);
1080    }
1081    c->tcond = tcond;
1082}
1083
1084static void free_cond(DisasCompare *c)
1085{
1086    if (!c->g1) {
1087        tcg_temp_free(c->v1);
1088    }
1089    if (!c->g2) {
1090        tcg_temp_free(c->v2);
1091    }
1092}
1093
1094static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1095{
1096  DisasCompare c;
1097
1098  gen_cc_cond(&c, s, cond);
1099  update_cc_op(s);
1100  tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1101  free_cond(&c);
1102}
1103
1104/* Force a TB lookup after an instruction that changes the CPU state.  */
1105static void gen_lookup_tb(DisasContext *s)
1106{
1107    update_cc_op(s);
1108    tcg_gen_movi_i32(QREG_PC, s->pc);
1109    s->is_jmp = DISAS_UPDATE;
1110}
1111
1112/* Generate a jump to an immediate address.  */
1113static void gen_jmp_im(DisasContext *s, uint32_t dest)
1114{
1115    update_cc_op(s);
1116    tcg_gen_movi_i32(QREG_PC, dest);
1117    s->is_jmp = DISAS_JUMP;
1118}
1119
1120/* Generate a jump to the address in qreg DEST.  */
1121static void gen_jmp(DisasContext *s, TCGv dest)
1122{
1123    update_cc_op(s);
1124    tcg_gen_mov_i32(QREG_PC, dest);
1125    s->is_jmp = DISAS_JUMP;
1126}
1127
1128static void gen_raise_exception(int nr)
1129{
1130    TCGv_i32 tmp = tcg_const_i32(nr);
1131
1132    gen_helper_raise_exception(cpu_env, tmp);
1133    tcg_temp_free_i32(tmp);
1134}
1135
1136static void gen_exception(DisasContext *s, uint32_t where, int nr)
1137{
1138    update_cc_op(s);
1139    gen_jmp_im(s, where);
1140    gen_raise_exception(nr);
1141}
1142
1143static inline void gen_addr_fault(DisasContext *s)
1144{
1145    gen_exception(s, s->insn_pc, EXCP_ADDRESS);
1146}
1147
1148#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1149        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1150                        op_sign ? EA_LOADS : EA_LOADU);                 \
1151        if (IS_NULL_QREG(result)) {                                     \
1152            gen_addr_fault(s);                                          \
1153            return;                                                     \
1154        }                                                               \
1155    } while (0)
1156
1157#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1158        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1159        if (IS_NULL_QREG(ea_result)) {                                  \
1160            gen_addr_fault(s);                                          \
1161            return;                                                     \
1162        }                                                               \
1163    } while (0)
1164
1165static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1166{
1167#ifndef CONFIG_USER_ONLY
1168    return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1169           (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1170#else
1171    return true;
1172#endif
1173}
1174
1175/* Generate a jump to an immediate address.  */
1176static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1177{
1178    if (unlikely(s->singlestep_enabled)) {
1179        gen_exception(s, dest, EXCP_DEBUG);
1180    } else if (use_goto_tb(s, dest)) {
1181        tcg_gen_goto_tb(n);
1182        tcg_gen_movi_i32(QREG_PC, dest);
1183        tcg_gen_exit_tb((uintptr_t)s->tb + n);
1184    } else {
1185        gen_jmp_im(s, dest);
1186        tcg_gen_exit_tb(0);
1187    }
1188    s->is_jmp = DISAS_TB_JUMP;
1189}
1190
1191DISAS_INSN(scc)
1192{
1193    DisasCompare c;
1194    int cond;
1195    TCGv tmp;
1196
1197    cond = (insn >> 8) & 0xf;
1198    gen_cc_cond(&c, s, cond);
1199
1200    tmp = tcg_temp_new();
1201    tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1202    free_cond(&c);
1203
1204    tcg_gen_neg_i32(tmp, tmp);
1205    DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1206    tcg_temp_free(tmp);
1207}
1208
1209DISAS_INSN(dbcc)
1210{
1211    TCGLabel *l1;
1212    TCGv reg;
1213    TCGv tmp;
1214    int16_t offset;
1215    uint32_t base;
1216
1217    reg = DREG(insn, 0);
1218    base = s->pc;
1219    offset = (int16_t)read_im16(env, s);
1220    l1 = gen_new_label();
1221    gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1222
1223    tmp = tcg_temp_new();
1224    tcg_gen_ext16s_i32(tmp, reg);
1225    tcg_gen_addi_i32(tmp, tmp, -1);
1226    gen_partset_reg(OS_WORD, reg, tmp);
1227    tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1228    gen_jmp_tb(s, 1, base + offset);
1229    gen_set_label(l1);
1230    gen_jmp_tb(s, 0, s->pc);
1231}
1232
1233DISAS_INSN(undef_mac)
1234{
1235    gen_exception(s, s->pc - 2, EXCP_LINEA);
1236}
1237
1238DISAS_INSN(undef_fpu)
1239{
1240    gen_exception(s, s->pc - 2, EXCP_LINEF);
1241}
1242
1243DISAS_INSN(undef)
1244{
1245    /* ??? This is both instructions that are as yet unimplemented
1246       for the 680x0 series, as well as those that are implemented
1247       but actually illegal for CPU32 or pre-68020.  */
1248    qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
1249                  insn, s->pc - 2);
1250    gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1251}
1252
1253DISAS_INSN(mulw)
1254{
1255    TCGv reg;
1256    TCGv tmp;
1257    TCGv src;
1258    int sign;
1259
1260    sign = (insn & 0x100) != 0;
1261    reg = DREG(insn, 9);
1262    tmp = tcg_temp_new();
1263    if (sign)
1264        tcg_gen_ext16s_i32(tmp, reg);
1265    else
1266        tcg_gen_ext16u_i32(tmp, reg);
1267    SRC_EA(env, src, OS_WORD, sign, NULL);
1268    tcg_gen_mul_i32(tmp, tmp, src);
1269    tcg_gen_mov_i32(reg, tmp);
1270    gen_logic_cc(s, tmp, OS_LONG);
1271    tcg_temp_free(tmp);
1272}
1273
1274DISAS_INSN(divw)
1275{
1276    int sign;
1277    TCGv src;
1278    TCGv destr;
1279
1280    /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1281
1282    sign = (insn & 0x100) != 0;
1283
1284    /* dest.l / src.w */
1285
1286    SRC_EA(env, src, OS_WORD, sign, NULL);
1287    destr = tcg_const_i32(REG(insn, 9));
1288    if (sign) {
1289        gen_helper_divsw(cpu_env, destr, src);
1290    } else {
1291        gen_helper_divuw(cpu_env, destr, src);
1292    }
1293    tcg_temp_free(destr);
1294
1295    set_cc_op(s, CC_OP_FLAGS);
1296}
1297
1298DISAS_INSN(divl)
1299{
1300    TCGv num, reg, den;
1301    int sign;
1302    uint16_t ext;
1303
1304    ext = read_im16(env, s);
1305
1306    sign = (ext & 0x0800) != 0;
1307
1308    if (ext & 0x400) {
1309        if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1310            gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
1311            return;
1312        }
1313
1314        /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1315
1316        SRC_EA(env, den, OS_LONG, 0, NULL);
1317        num = tcg_const_i32(REG(ext, 12));
1318        reg = tcg_const_i32(REG(ext, 0));
1319        if (sign) {
1320            gen_helper_divsll(cpu_env, num, reg, den);
1321        } else {
1322            gen_helper_divull(cpu_env, num, reg, den);
1323        }
1324        tcg_temp_free(reg);
1325        tcg_temp_free(num);
1326        set_cc_op(s, CC_OP_FLAGS);
1327        return;
1328    }
1329
1330    /* divX.l <EA>, Dq        32/32 -> 32q     */
1331    /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1332
1333    SRC_EA(env, den, OS_LONG, 0, NULL);
1334    num = tcg_const_i32(REG(ext, 12));
1335    reg = tcg_const_i32(REG(ext, 0));
1336    if (sign) {
1337        gen_helper_divsl(cpu_env, num, reg, den);
1338    } else {
1339        gen_helper_divul(cpu_env, num, reg, den);
1340    }
1341    tcg_temp_free(reg);
1342    tcg_temp_free(num);
1343
1344    set_cc_op(s, CC_OP_FLAGS);
1345}
1346
1347static void bcd_add(TCGv dest, TCGv src)
1348{
1349    TCGv t0, t1;
1350
1351    /*  dest10 = dest10 + src10 + X
1352     *
1353     *        t1 = src
1354     *        t2 = t1 + 0x066
1355     *        t3 = t2 + dest + X
1356     *        t4 = t2 ^ dest
1357     *        t5 = t3 ^ t4
1358     *        t6 = ~t5 & 0x110
1359     *        t7 = (t6 >> 2) | (t6 >> 3)
1360     *        return t3 - t7
1361     */
1362
1363    /* t1 = (src + 0x066) + dest + X
1364     *    = result with some possible exceding 0x6
1365     */
1366
1367    t0 = tcg_const_i32(0x066);
1368    tcg_gen_add_i32(t0, t0, src);
1369
1370    t1 = tcg_temp_new();
1371    tcg_gen_add_i32(t1, t0, dest);
1372    tcg_gen_add_i32(t1, t1, QREG_CC_X);
1373
1374    /* we will remove exceding 0x6 where there is no carry */
1375
1376    /* t0 = (src + 0x0066) ^ dest
1377     *    = t1 without carries
1378     */
1379
1380    tcg_gen_xor_i32(t0, t0, dest);
1381
1382    /* extract the carries
1383     * t0 = t0 ^ t1
1384     *    = only the carries
1385     */
1386
1387    tcg_gen_xor_i32(t0, t0, t1);
1388
1389    /* generate 0x1 where there is no carry
1390     * and for each 0x10, generate a 0x6
1391     */
1392
1393    tcg_gen_shri_i32(t0, t0, 3);
1394    tcg_gen_not_i32(t0, t0);
1395    tcg_gen_andi_i32(t0, t0, 0x22);
1396    tcg_gen_add_i32(dest, t0, t0);
1397    tcg_gen_add_i32(dest, dest, t0);
1398    tcg_temp_free(t0);
1399
1400    /* remove the exceding 0x6
1401     * for digits that have not generated a carry
1402     */
1403
1404    tcg_gen_sub_i32(dest, t1, dest);
1405    tcg_temp_free(t1);
1406}
1407
1408static void bcd_sub(TCGv dest, TCGv src)
1409{
1410    TCGv t0, t1, t2;
1411
1412    /*  dest10 = dest10 - src10 - X
1413     *         = bcd_add(dest + 1 - X, 0x199 - src)
1414     */
1415
1416    /* t0 = 0x066 + (0x199 - src) */
1417
1418    t0 = tcg_temp_new();
1419    tcg_gen_subfi_i32(t0, 0x1ff, src);
1420
1421    /* t1 = t0 + dest + 1 - X*/
1422
1423    t1 = tcg_temp_new();
1424    tcg_gen_add_i32(t1, t0, dest);
1425    tcg_gen_addi_i32(t1, t1, 1);
1426    tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1427
1428    /* t2 = t0 ^ dest */
1429
1430    t2 = tcg_temp_new();
1431    tcg_gen_xor_i32(t2, t0, dest);
1432
1433    /* t0 = t1 ^ t2 */
1434
1435    tcg_gen_xor_i32(t0, t1, t2);
1436
1437    /* t2 = ~t0 & 0x110
1438     * t0 = (t2 >> 2) | (t2 >> 3)
1439     *
1440     * to fit on 8bit operands, changed in:
1441     *
1442     * t2 = ~(t0 >> 3) & 0x22
1443     * t0 = t2 + t2
1444     * t0 = t0 + t2
1445     */
1446
1447    tcg_gen_shri_i32(t2, t0, 3);
1448    tcg_gen_not_i32(t2, t2);
1449    tcg_gen_andi_i32(t2, t2, 0x22);
1450    tcg_gen_add_i32(t0, t2, t2);
1451    tcg_gen_add_i32(t0, t0, t2);
1452    tcg_temp_free(t2);
1453
1454    /* return t1 - t0 */
1455
1456    tcg_gen_sub_i32(dest, t1, t0);
1457    tcg_temp_free(t0);
1458    tcg_temp_free(t1);
1459}
1460
1461static void bcd_flags(TCGv val)
1462{
1463    tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1464    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1465
1466    tcg_gen_shri_i32(QREG_CC_C, val, 8);
1467    tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
1468
1469    tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1470}
1471
1472DISAS_INSN(abcd_reg)
1473{
1474    TCGv src;
1475    TCGv dest;
1476
1477    gen_flush_flags(s); /* !Z is sticky */
1478
1479    src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1480    dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1481    bcd_add(dest, src);
1482    gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1483
1484    bcd_flags(dest);
1485}
1486
1487DISAS_INSN(abcd_mem)
1488{
1489    TCGv src, dest, addr;
1490
1491    gen_flush_flags(s); /* !Z is sticky */
1492
1493    /* Indirect pre-decrement load (mode 4) */
1494
1495    src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1496                      NULL_QREG, NULL, EA_LOADU);
1497    dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1498                       NULL_QREG, &addr, EA_LOADU);
1499
1500    bcd_add(dest, src);
1501
1502    gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1503
1504    bcd_flags(dest);
1505}
1506
1507DISAS_INSN(sbcd_reg)
1508{
1509    TCGv src, dest;
1510
1511    gen_flush_flags(s); /* !Z is sticky */
1512
1513    src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
1514    dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
1515
1516    bcd_sub(dest, src);
1517
1518    gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1519
1520    bcd_flags(dest);
1521}
1522
1523DISAS_INSN(sbcd_mem)
1524{
1525    TCGv src, dest, addr;
1526
1527    gen_flush_flags(s); /* !Z is sticky */
1528
1529    /* Indirect pre-decrement load (mode 4) */
1530
1531    src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1532                      NULL_QREG, NULL, EA_LOADU);
1533    dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1534                       NULL_QREG, &addr, EA_LOADU);
1535
1536    bcd_sub(dest, src);
1537
1538    gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
1539
1540    bcd_flags(dest);
1541}
1542
1543DISAS_INSN(nbcd)
1544{
1545    TCGv src, dest;
1546    TCGv addr;
1547
1548    gen_flush_flags(s); /* !Z is sticky */
1549
1550    SRC_EA(env, src, OS_BYTE, 0, &addr);
1551
1552    dest = tcg_const_i32(0);
1553    bcd_sub(dest, src);
1554
1555    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1556
1557    bcd_flags(dest);
1558
1559    tcg_temp_free(dest);
1560}
1561
1562DISAS_INSN(addsub)
1563{
1564    TCGv reg;
1565    TCGv dest;
1566    TCGv src;
1567    TCGv tmp;
1568    TCGv addr;
1569    int add;
1570    int opsize;
1571
1572    add = (insn & 0x4000) != 0;
1573    opsize = insn_opsize(insn);
1574    reg = gen_extend(DREG(insn, 9), opsize, 1);
1575    dest = tcg_temp_new();
1576    if (insn & 0x100) {
1577        SRC_EA(env, tmp, opsize, 1, &addr);
1578        src = reg;
1579    } else {
1580        tmp = reg;
1581        SRC_EA(env, src, opsize, 1, NULL);
1582    }
1583    if (add) {
1584        tcg_gen_add_i32(dest, tmp, src);
1585        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1586        set_cc_op(s, CC_OP_ADDB + opsize);
1587    } else {
1588        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1589        tcg_gen_sub_i32(dest, tmp, src);
1590        set_cc_op(s, CC_OP_SUBB + opsize);
1591    }
1592    gen_update_cc_add(dest, src, opsize);
1593    if (insn & 0x100) {
1594        DEST_EA(env, insn, opsize, dest, &addr);
1595    } else {
1596        gen_partset_reg(opsize, DREG(insn, 9), dest);
1597    }
1598    tcg_temp_free(dest);
1599}
1600
1601/* Reverse the order of the bits in REG.  */
1602DISAS_INSN(bitrev)
1603{
1604    TCGv reg;
1605    reg = DREG(insn, 0);
1606    gen_helper_bitrev(reg, reg);
1607}
1608
1609DISAS_INSN(bitop_reg)
1610{
1611    int opsize;
1612    int op;
1613    TCGv src1;
1614    TCGv src2;
1615    TCGv tmp;
1616    TCGv addr;
1617    TCGv dest;
1618
1619    if ((insn & 0x38) != 0)
1620        opsize = OS_BYTE;
1621    else
1622        opsize = OS_LONG;
1623    op = (insn >> 6) & 3;
1624    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1625
1626    gen_flush_flags(s);
1627    src2 = tcg_temp_new();
1628    if (opsize == OS_BYTE)
1629        tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1630    else
1631        tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1632
1633    tmp = tcg_const_i32(1);
1634    tcg_gen_shl_i32(tmp, tmp, src2);
1635    tcg_temp_free(src2);
1636
1637    tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1638
1639    dest = tcg_temp_new();
1640    switch (op) {
1641    case 1: /* bchg */
1642        tcg_gen_xor_i32(dest, src1, tmp);
1643        break;
1644    case 2: /* bclr */
1645        tcg_gen_andc_i32(dest, src1, tmp);
1646        break;
1647    case 3: /* bset */
1648        tcg_gen_or_i32(dest, src1, tmp);
1649        break;
1650    default: /* btst */
1651        break;
1652    }
1653    tcg_temp_free(tmp);
1654    if (op) {
1655        DEST_EA(env, insn, opsize, dest, &addr);
1656    }
1657    tcg_temp_free(dest);
1658}
1659
1660DISAS_INSN(sats)
1661{
1662    TCGv reg;
1663    reg = DREG(insn, 0);
1664    gen_flush_flags(s);
1665    gen_helper_sats(reg, reg, QREG_CC_V);
1666    gen_logic_cc(s, reg, OS_LONG);
1667}
1668
1669static void gen_push(DisasContext *s, TCGv val)
1670{
1671    TCGv tmp;
1672
1673    tmp = tcg_temp_new();
1674    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1675    gen_store(s, OS_LONG, tmp, val);
1676    tcg_gen_mov_i32(QREG_SP, tmp);
1677    tcg_temp_free(tmp);
1678}
1679
1680static TCGv mreg(int reg)
1681{
1682    if (reg < 8) {
1683        /* Dx */
1684        return cpu_dregs[reg];
1685    }
1686    /* Ax */
1687    return cpu_aregs[reg & 7];
1688}
1689
1690DISAS_INSN(movem)
1691{
1692    TCGv addr, incr, tmp, r[16];
1693    int is_load = (insn & 0x0400) != 0;
1694    int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1695    uint16_t mask = read_im16(env, s);
1696    int mode = extract32(insn, 3, 3);
1697    int reg0 = REG(insn, 0);
1698    int i;
1699
1700    tmp = cpu_aregs[reg0];
1701
1702    switch (mode) {
1703    case 0: /* data register direct */
1704    case 1: /* addr register direct */
1705    do_addr_fault:
1706        gen_addr_fault(s);
1707        return;
1708
1709    case 2: /* indirect */
1710        break;
1711
1712    case 3: /* indirect post-increment */
1713        if (!is_load) {
1714            /* post-increment is not allowed */
1715            goto do_addr_fault;
1716        }
1717        break;
1718
1719    case 4: /* indirect pre-decrement */
1720        if (is_load) {
1721            /* pre-decrement is not allowed */
1722            goto do_addr_fault;
1723        }
1724        /* We want a bare copy of the address reg, without any pre-decrement
1725           adjustment, as gen_lea would provide.  */
1726        break;
1727
1728    default:
1729        tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1730        if (IS_NULL_QREG(tmp)) {
1731            goto do_addr_fault;
1732        }
1733        break;
1734    }
1735
1736    addr = tcg_temp_new();
1737    tcg_gen_mov_i32(addr, tmp);
1738    incr = tcg_const_i32(opsize_bytes(opsize));
1739
1740    if (is_load) {
1741        /* memory to register */
1742        for (i = 0; i < 16; i++) {
1743            if (mask & (1 << i)) {
1744                r[i] = gen_load(s, opsize, addr, 1);
1745                tcg_gen_add_i32(addr, addr, incr);
1746            }
1747        }
1748        for (i = 0; i < 16; i++) {
1749            if (mask & (1 << i)) {
1750                tcg_gen_mov_i32(mreg(i), r[i]);
1751                tcg_temp_free(r[i]);
1752            }
1753        }
1754        if (mode == 3) {
1755            /* post-increment: movem (An)+,X */
1756            tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1757        }
1758    } else {
1759        /* register to memory */
1760        if (mode == 4) {
1761            /* pre-decrement: movem X,-(An) */
1762            for (i = 15; i >= 0; i--) {
1763                if ((mask << i) & 0x8000) {
1764                    tcg_gen_sub_i32(addr, addr, incr);
1765                    if (reg0 + 8 == i &&
1766                        m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
1767                        /* M68020+: if the addressing register is the
1768                         * register moved to memory, the value written
1769                         * is the initial value decremented by the size of
1770                         * the operation, regardless of how many actual
1771                         * stores have been performed until this point.
1772                         * M68000/M68010: the value is the initial value.
1773                         */
1774                        tmp = tcg_temp_new();
1775                        tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
1776                        gen_store(s, opsize, addr, tmp);
1777                        tcg_temp_free(tmp);
1778                    } else {
1779                        gen_store(s, opsize, addr, mreg(i));
1780                    }
1781                }
1782            }
1783            tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1784        } else {
1785            for (i = 0; i < 16; i++) {
1786                if (mask & (1 << i)) {
1787                    gen_store(s, opsize, addr, mreg(i));
1788                    tcg_gen_add_i32(addr, addr, incr);
1789                }
1790            }
1791        }
1792    }
1793
1794    tcg_temp_free(incr);
1795    tcg_temp_free(addr);
1796}
1797
1798DISAS_INSN(bitop_im)
1799{
1800    int opsize;
1801    int op;
1802    TCGv src1;
1803    uint32_t mask;
1804    int bitnum;
1805    TCGv tmp;
1806    TCGv addr;
1807
1808    if ((insn & 0x38) != 0)
1809        opsize = OS_BYTE;
1810    else
1811        opsize = OS_LONG;
1812    op = (insn >> 6) & 3;
1813
1814    bitnum = read_im16(env, s);
1815    if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
1816        if (bitnum & 0xfe00) {
1817            disas_undef(env, s, insn);
1818            return;
1819        }
1820    } else {
1821        if (bitnum & 0xff00) {
1822            disas_undef(env, s, insn);
1823            return;
1824        }
1825    }
1826
1827    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1828
1829    gen_flush_flags(s);
1830    if (opsize == OS_BYTE)
1831        bitnum &= 7;
1832    else
1833        bitnum &= 31;
1834    mask = 1 << bitnum;
1835
1836   tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
1837
1838    if (op) {
1839        tmp = tcg_temp_new();
1840        switch (op) {
1841        case 1: /* bchg */
1842            tcg_gen_xori_i32(tmp, src1, mask);
1843            break;
1844        case 2: /* bclr */
1845            tcg_gen_andi_i32(tmp, src1, ~mask);
1846            break;
1847        case 3: /* bset */
1848            tcg_gen_ori_i32(tmp, src1, mask);
1849            break;
1850        default: /* btst */
1851            break;
1852        }
1853        DEST_EA(env, insn, opsize, tmp, &addr);
1854        tcg_temp_free(tmp);
1855    }
1856}
1857
1858DISAS_INSN(arith_im)
1859{
1860    int op;
1861    TCGv im;
1862    TCGv src1;
1863    TCGv dest;
1864    TCGv addr;
1865    int opsize;
1866
1867    op = (insn >> 9) & 7;
1868    opsize = insn_opsize(insn);
1869    switch (opsize) {
1870    case OS_BYTE:
1871        im = tcg_const_i32((int8_t)read_im8(env, s));
1872        break;
1873    case OS_WORD:
1874        im = tcg_const_i32((int16_t)read_im16(env, s));
1875        break;
1876    case OS_LONG:
1877        im = tcg_const_i32(read_im32(env, s));
1878        break;
1879    default:
1880       abort();
1881    }
1882    SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
1883    dest = tcg_temp_new();
1884    switch (op) {
1885    case 0: /* ori */
1886        tcg_gen_or_i32(dest, src1, im);
1887        gen_logic_cc(s, dest, opsize);
1888        break;
1889    case 1: /* andi */
1890        tcg_gen_and_i32(dest, src1, im);
1891        gen_logic_cc(s, dest, opsize);
1892        break;
1893    case 2: /* subi */
1894        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
1895        tcg_gen_sub_i32(dest, src1, im);
1896        gen_update_cc_add(dest, im, opsize);
1897        set_cc_op(s, CC_OP_SUBB + opsize);
1898        break;
1899    case 3: /* addi */
1900        tcg_gen_add_i32(dest, src1, im);
1901        gen_update_cc_add(dest, im, opsize);
1902        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1903        set_cc_op(s, CC_OP_ADDB + opsize);
1904        break;
1905    case 5: /* eori */
1906        tcg_gen_xor_i32(dest, src1, im);
1907        gen_logic_cc(s, dest, opsize);
1908        break;
1909    case 6: /* cmpi */
1910        gen_update_cc_cmp(s, src1, im, opsize);
1911        break;
1912    default:
1913        abort();
1914    }
1915    tcg_temp_free(im);
1916    if (op != 6) {
1917        DEST_EA(env, insn, opsize, dest, &addr);
1918    }
1919    tcg_temp_free(dest);
1920}
1921
1922DISAS_INSN(cas)
1923{
1924    int opsize;
1925    TCGv addr;
1926    uint16_t ext;
1927    TCGv load;
1928    TCGv cmp;
1929    TCGMemOp opc;
1930
1931    switch ((insn >> 9) & 3) {
1932    case 1:
1933        opsize = OS_BYTE;
1934        opc = MO_SB;
1935        break;
1936    case 2:
1937        opsize = OS_WORD;
1938        opc = MO_TESW;
1939        break;
1940    case 3:
1941        opsize = OS_LONG;
1942        opc = MO_TESL;
1943        break;
1944    default:
1945        g_assert_not_reached();
1946    }
1947
1948    ext = read_im16(env, s);
1949
1950    /* cas Dc,Du,<EA> */
1951
1952    addr = gen_lea(env, s, insn, opsize);
1953    if (IS_NULL_QREG(addr)) {
1954        gen_addr_fault(s);
1955        return;
1956    }
1957
1958    cmp = gen_extend(DREG(ext, 0), opsize, 1);
1959
1960    /* if  <EA> == Dc then
1961     *     <EA> = Du
1962     *     Dc = <EA> (because <EA> == Dc)
1963     * else
1964     *     Dc = <EA>
1965     */
1966
1967    load = tcg_temp_new();
1968    tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
1969                               IS_USER(s), opc);
1970    /* update flags before setting cmp to load */
1971    gen_update_cc_cmp(s, load, cmp, opsize);
1972    gen_partset_reg(opsize, DREG(ext, 0), load);
1973
1974    tcg_temp_free(load);
1975
1976    switch (extract32(insn, 3, 3)) {
1977    case 3: /* Indirect postincrement.  */
1978        tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
1979        break;
1980    case 4: /* Indirect predecrememnt.  */
1981        tcg_gen_mov_i32(AREG(insn, 0), addr);
1982        break;
1983    }
1984}
1985
1986DISAS_INSN(cas2w)
1987{
1988    uint16_t ext1, ext2;
1989    TCGv addr1, addr2;
1990    TCGv regs;
1991
1992    /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
1993
1994    ext1 = read_im16(env, s);
1995
1996    if (ext1 & 0x8000) {
1997        /* Address Register */
1998        addr1 = AREG(ext1, 12);
1999    } else {
2000        /* Data Register */
2001        addr1 = DREG(ext1, 12);
2002    }
2003
2004    ext2 = read_im16(env, s);
2005    if (ext2 & 0x8000) {
2006        /* Address Register */
2007        addr2 = AREG(ext2, 12);
2008    } else {
2009        /* Data Register */
2010        addr2 = DREG(ext2, 12);
2011    }
2012
2013    /* if (R1) == Dc1 && (R2) == Dc2 then
2014     *     (R1) = Du1
2015     *     (R2) = Du2
2016     * else
2017     *     Dc1 = (R1)
2018     *     Dc2 = (R2)
2019     */
2020
2021    regs = tcg_const_i32(REG(ext2, 6) |
2022                         (REG(ext1, 6) << 3) |
2023                         (REG(ext2, 0) << 6) |
2024                         (REG(ext1, 0) << 9));
2025    gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2026    tcg_temp_free(regs);
2027
2028    /* Note that cas2w also assigned to env->cc_op.  */
2029    s->cc_op = CC_OP_CMPW;
2030    s->cc_op_synced = 1;
2031}
2032
2033DISAS_INSN(cas2l)
2034{
2035    uint16_t ext1, ext2;
2036    TCGv addr1, addr2, regs;
2037
2038    /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2039
2040    ext1 = read_im16(env, s);
2041
2042    if (ext1 & 0x8000) {
2043        /* Address Register */
2044        addr1 = AREG(ext1, 12);
2045    } else {
2046        /* Data Register */
2047        addr1 = DREG(ext1, 12);
2048    }
2049
2050    ext2 = read_im16(env, s);
2051    if (ext2 & 0x8000) {
2052        /* Address Register */
2053        addr2 = AREG(ext2, 12);
2054    } else {
2055        /* Data Register */
2056        addr2 = DREG(ext2, 12);
2057    }
2058
2059    /* if (R1) == Dc1 && (R2) == Dc2 then
2060     *     (R1) = Du1
2061     *     (R2) = Du2
2062     * else
2063     *     Dc1 = (R1)
2064     *     Dc2 = (R2)
2065     */
2066
2067    regs = tcg_const_i32(REG(ext2, 6) |
2068                         (REG(ext1, 6) << 3) |
2069                         (REG(ext2, 0) << 6) |
2070                         (REG(ext1, 0) << 9));
2071    gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2072    tcg_temp_free(regs);
2073
2074    /* Note that cas2l also assigned to env->cc_op.  */
2075    s->cc_op = CC_OP_CMPL;
2076    s->cc_op_synced = 1;
2077}
2078
2079DISAS_INSN(byterev)
2080{
2081    TCGv reg;
2082
2083    reg = DREG(insn, 0);
2084    tcg_gen_bswap32_i32(reg, reg);
2085}
2086
2087DISAS_INSN(move)
2088{
2089    TCGv src;
2090    TCGv dest;
2091    int op;
2092    int opsize;
2093
2094    switch (insn >> 12) {
2095    case 1: /* move.b */
2096        opsize = OS_BYTE;
2097        break;
2098    case 2: /* move.l */
2099        opsize = OS_LONG;
2100        break;
2101    case 3: /* move.w */
2102        opsize = OS_WORD;
2103        break;
2104    default:
2105        abort();
2106    }
2107    SRC_EA(env, src, opsize, 1, NULL);
2108    op = (insn >> 6) & 7;
2109    if (op == 1) {
2110        /* movea */
2111        /* The value will already have been sign extended.  */
2112        dest = AREG(insn, 9);
2113        tcg_gen_mov_i32(dest, src);
2114    } else {
2115        /* normal move */
2116        uint16_t dest_ea;
2117        dest_ea = ((insn >> 9) & 7) | (op << 3);
2118        DEST_EA(env, dest_ea, opsize, src, NULL);
2119        /* This will be correct because loads sign extend.  */
2120        gen_logic_cc(s, src, opsize);
2121    }
2122}
2123
2124DISAS_INSN(negx)
2125{
2126    TCGv z;
2127    TCGv src;
2128    TCGv addr;
2129    int opsize;
2130
2131    opsize = insn_opsize(insn);
2132    SRC_EA(env, src, opsize, 1, &addr);
2133
2134    gen_flush_flags(s); /* compute old Z */
2135
2136    /* Perform substract with borrow.
2137     * (X, N) =  -(src + X);
2138     */
2139
2140    z = tcg_const_i32(0);
2141    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2142    tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2143    tcg_temp_free(z);
2144    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2145
2146    tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2147
2148    /* Compute signed-overflow for negation.  The normal formula for
2149     * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2150     * this simplies to res & src.
2151     */
2152
2153    tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2154
2155    /* Copy the rest of the results into place.  */
2156    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2157    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2158
2159    set_cc_op(s, CC_OP_FLAGS);
2160
2161    /* result is in QREG_CC_N */
2162
2163    DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2164}
2165
2166DISAS_INSN(lea)
2167{
2168    TCGv reg;
2169    TCGv tmp;
2170
2171    reg = AREG(insn, 9);
2172    tmp = gen_lea(env, s, insn, OS_LONG);
2173    if (IS_NULL_QREG(tmp)) {
2174        gen_addr_fault(s);
2175        return;
2176    }
2177    tcg_gen_mov_i32(reg, tmp);
2178}
2179
2180DISAS_INSN(clr)
2181{
2182    int opsize;
2183    TCGv zero;
2184
2185    zero = tcg_const_i32(0);
2186
2187    opsize = insn_opsize(insn);
2188    DEST_EA(env, insn, opsize, zero, NULL);
2189    gen_logic_cc(s, zero, opsize);
2190    tcg_temp_free(zero);
2191}
2192
2193static TCGv gen_get_ccr(DisasContext *s)
2194{
2195    TCGv dest;
2196
2197    gen_flush_flags(s);
2198    update_cc_op(s);
2199    dest = tcg_temp_new();
2200    gen_helper_get_ccr(dest, cpu_env);
2201    return dest;
2202}
2203
2204DISAS_INSN(move_from_ccr)
2205{
2206    TCGv ccr;
2207
2208    ccr = gen_get_ccr(s);
2209    DEST_EA(env, insn, OS_WORD, ccr, NULL);
2210}
2211
2212DISAS_INSN(neg)
2213{
2214    TCGv src1;
2215    TCGv dest;
2216    TCGv addr;
2217    int opsize;
2218
2219    opsize = insn_opsize(insn);
2220    SRC_EA(env, src1, opsize, 1, &addr);
2221    dest = tcg_temp_new();
2222    tcg_gen_neg_i32(dest, src1);
2223    set_cc_op(s, CC_OP_SUBB + opsize);
2224    gen_update_cc_add(dest, src1, opsize);
2225    tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2226    DEST_EA(env, insn, opsize, dest, &addr);
2227    tcg_temp_free(dest);
2228}
2229
2230static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2231{
2232    if (ccr_only) {
2233        tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2234        tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2235        tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2236        tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2237        tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2238    } else {
2239        gen_helper_set_sr(cpu_env, tcg_const_i32(val));
2240    }
2241    set_cc_op(s, CC_OP_FLAGS);
2242}
2243
2244static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2245                       int ccr_only)
2246{
2247    if ((insn & 0x38) == 0) {
2248        if (ccr_only) {
2249            gen_helper_set_ccr(cpu_env, DREG(insn, 0));
2250        } else {
2251            gen_helper_set_sr(cpu_env, DREG(insn, 0));
2252        }
2253        set_cc_op(s, CC_OP_FLAGS);
2254    } else if ((insn & 0x3f) == 0x3c) {
2255        uint16_t val;
2256        val = read_im16(env, s);
2257        gen_set_sr_im(s, val, ccr_only);
2258    } else {
2259        disas_undef(env, s, insn);
2260    }
2261}
2262
2263
2264DISAS_INSN(move_to_ccr)
2265{
2266    gen_set_sr(env, s, insn, 1);
2267}
2268
2269DISAS_INSN(not)
2270{
2271    TCGv src1;
2272    TCGv dest;
2273    TCGv addr;
2274    int opsize;
2275
2276    opsize = insn_opsize(insn);
2277    SRC_EA(env, src1, opsize, 1, &addr);
2278    dest = tcg_temp_new();
2279    tcg_gen_not_i32(dest, src1);
2280    DEST_EA(env, insn, opsize, dest, &addr);
2281    gen_logic_cc(s, dest, opsize);
2282}
2283
2284DISAS_INSN(swap)
2285{
2286    TCGv src1;
2287    TCGv src2;
2288    TCGv reg;
2289
2290    src1 = tcg_temp_new();
2291    src2 = tcg_temp_new();
2292    reg = DREG(insn, 0);
2293    tcg_gen_shli_i32(src1, reg, 16);
2294    tcg_gen_shri_i32(src2, reg, 16);
2295    tcg_gen_or_i32(reg, src1, src2);
2296    tcg_temp_free(src2);
2297    tcg_temp_free(src1);
2298    gen_logic_cc(s, reg, OS_LONG);
2299}
2300
2301DISAS_INSN(bkpt)
2302{
2303    gen_exception(s, s->pc - 2, EXCP_DEBUG);
2304}
2305
2306DISAS_INSN(pea)
2307{
2308    TCGv tmp;
2309
2310    tmp = gen_lea(env, s, insn, OS_LONG);
2311    if (IS_NULL_QREG(tmp)) {
2312        gen_addr_fault(s);
2313        return;
2314    }
2315    gen_push(s, tmp);
2316}
2317
2318DISAS_INSN(ext)
2319{
2320    int op;
2321    TCGv reg;
2322    TCGv tmp;
2323
2324    reg = DREG(insn, 0);
2325    op = (insn >> 6) & 7;
2326    tmp = tcg_temp_new();
2327    if (op == 3)
2328        tcg_gen_ext16s_i32(tmp, reg);
2329    else
2330        tcg_gen_ext8s_i32(tmp, reg);
2331    if (op == 2)
2332        gen_partset_reg(OS_WORD, reg, tmp);
2333    else
2334        tcg_gen_mov_i32(reg, tmp);
2335    gen_logic_cc(s, tmp, OS_LONG);
2336    tcg_temp_free(tmp);
2337}
2338
2339DISAS_INSN(tst)
2340{
2341    int opsize;
2342    TCGv tmp;
2343
2344    opsize = insn_opsize(insn);
2345    SRC_EA(env, tmp, opsize, 1, NULL);
2346    gen_logic_cc(s, tmp, opsize);
2347}
2348
2349DISAS_INSN(pulse)
2350{
2351  /* Implemented as a NOP.  */
2352}
2353
2354DISAS_INSN(illegal)
2355{
2356    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2357}
2358
2359/* ??? This should be atomic.  */
2360DISAS_INSN(tas)
2361{
2362    TCGv dest;
2363    TCGv src1;
2364    TCGv addr;
2365
2366    dest = tcg_temp_new();
2367    SRC_EA(env, src1, OS_BYTE, 1, &addr);
2368    gen_logic_cc(s, src1, OS_BYTE);
2369    tcg_gen_ori_i32(dest, src1, 0x80);
2370    DEST_EA(env, insn, OS_BYTE, dest, &addr);
2371    tcg_temp_free(dest);
2372}
2373
2374DISAS_INSN(mull)
2375{
2376    uint16_t ext;
2377    TCGv src1;
2378    int sign;
2379
2380    ext = read_im16(env, s);
2381
2382    sign = ext & 0x800;
2383
2384    if (ext & 0x400) {
2385        if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2386            gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
2387            return;
2388        }
2389
2390        SRC_EA(env, src1, OS_LONG, 0, NULL);
2391
2392        if (sign) {
2393            tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2394        } else {
2395            tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2396        }
2397        /* if Dl == Dh, 68040 returns low word */
2398        tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2399        tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2400        tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2401
2402        tcg_gen_movi_i32(QREG_CC_V, 0);
2403        tcg_gen_movi_i32(QREG_CC_C, 0);
2404
2405        set_cc_op(s, CC_OP_FLAGS);
2406        return;
2407    }
2408    SRC_EA(env, src1, OS_LONG, 0, NULL);
2409    if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2410        tcg_gen_movi_i32(QREG_CC_C, 0);
2411        if (sign) {
2412            tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2413            /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2414            tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2415            tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2416        } else {
2417            tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2418            /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2419            tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2420        }
2421        tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2422        tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2423
2424        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2425
2426        set_cc_op(s, CC_OP_FLAGS);
2427    } else {
2428        /* The upper 32 bits of the product are discarded, so
2429           muls.l and mulu.l are functionally equivalent.  */
2430        tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2431        gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2432    }
2433}
2434
2435static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2436{
2437    TCGv reg;
2438    TCGv tmp;
2439
2440    reg = AREG(insn, 0);
2441    tmp = tcg_temp_new();
2442    tcg_gen_subi_i32(tmp, QREG_SP, 4);
2443    gen_store(s, OS_LONG, tmp, reg);
2444    if ((insn & 7) != 7) {
2445        tcg_gen_mov_i32(reg, tmp);
2446    }
2447    tcg_gen_addi_i32(QREG_SP, tmp, offset);
2448    tcg_temp_free(tmp);
2449}
2450
2451DISAS_INSN(link)
2452{
2453    int16_t offset;
2454
2455    offset = read_im16(env, s);
2456    gen_link(s, insn, offset);
2457}
2458
2459DISAS_INSN(linkl)
2460{
2461    int32_t offset;
2462
2463    offset = read_im32(env, s);
2464    gen_link(s, insn, offset);
2465}
2466
2467DISAS_INSN(unlk)
2468{
2469    TCGv src;
2470    TCGv reg;
2471    TCGv tmp;
2472
2473    src = tcg_temp_new();
2474    reg = AREG(insn, 0);
2475    tcg_gen_mov_i32(src, reg);
2476    tmp = gen_load(s, OS_LONG, src, 0);
2477    tcg_gen_mov_i32(reg, tmp);
2478    tcg_gen_addi_i32(QREG_SP, src, 4);
2479    tcg_temp_free(src);
2480}
2481
2482DISAS_INSN(nop)
2483{
2484}
2485
2486DISAS_INSN(rts)
2487{
2488    TCGv tmp;
2489
2490    tmp = gen_load(s, OS_LONG, QREG_SP, 0);
2491    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2492    gen_jmp(s, tmp);
2493}
2494
2495DISAS_INSN(jump)
2496{
2497    TCGv tmp;
2498
2499    /* Load the target address first to ensure correct exception
2500       behavior.  */
2501    tmp = gen_lea(env, s, insn, OS_LONG);
2502    if (IS_NULL_QREG(tmp)) {
2503        gen_addr_fault(s);
2504        return;
2505    }
2506    if ((insn & 0x40) == 0) {
2507        /* jsr */
2508        gen_push(s, tcg_const_i32(s->pc));
2509    }
2510    gen_jmp(s, tmp);
2511}
2512
2513DISAS_INSN(addsubq)
2514{
2515    TCGv src;
2516    TCGv dest;
2517    TCGv val;
2518    int imm;
2519    TCGv addr;
2520    int opsize;
2521
2522    if ((insn & 070) == 010) {
2523        /* Operation on address register is always long.  */
2524        opsize = OS_LONG;
2525    } else {
2526        opsize = insn_opsize(insn);
2527    }
2528    SRC_EA(env, src, opsize, 1, &addr);
2529    imm = (insn >> 9) & 7;
2530    if (imm == 0) {
2531        imm = 8;
2532    }
2533    val = tcg_const_i32(imm);
2534    dest = tcg_temp_new();
2535    tcg_gen_mov_i32(dest, src);
2536    if ((insn & 0x38) == 0x08) {
2537        /* Don't update condition codes if the destination is an
2538           address register.  */
2539        if (insn & 0x0100) {
2540            tcg_gen_sub_i32(dest, dest, val);
2541        } else {
2542            tcg_gen_add_i32(dest, dest, val);
2543        }
2544    } else {
2545        if (insn & 0x0100) {
2546            tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2547            tcg_gen_sub_i32(dest, dest, val);
2548            set_cc_op(s, CC_OP_SUBB + opsize);
2549        } else {
2550            tcg_gen_add_i32(dest, dest, val);
2551            tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2552            set_cc_op(s, CC_OP_ADDB + opsize);
2553        }
2554        gen_update_cc_add(dest, val, opsize);
2555    }
2556    tcg_temp_free(val);
2557    DEST_EA(env, insn, opsize, dest, &addr);
2558    tcg_temp_free(dest);
2559}
2560
2561DISAS_INSN(tpf)
2562{
2563    switch (insn & 7) {
2564    case 2: /* One extension word.  */
2565        s->pc += 2;
2566        break;
2567    case 3: /* Two extension words.  */
2568        s->pc += 4;
2569        break;
2570    case 4: /* No extension words.  */
2571        break;
2572    default:
2573        disas_undef(env, s, insn);
2574    }
2575}
2576
2577DISAS_INSN(branch)
2578{
2579    int32_t offset;
2580    uint32_t base;
2581    int op;
2582    TCGLabel *l1;
2583
2584    base = s->pc;
2585    op = (insn >> 8) & 0xf;
2586    offset = (int8_t)insn;
2587    if (offset == 0) {
2588        offset = (int16_t)read_im16(env, s);
2589    } else if (offset == -1) {
2590        offset = read_im32(env, s);
2591    }
2592    if (op == 1) {
2593        /* bsr */
2594        gen_push(s, tcg_const_i32(s->pc));
2595    }
2596    if (op > 1) {
2597        /* Bcc */
2598        l1 = gen_new_label();
2599        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
2600        gen_jmp_tb(s, 1, base + offset);
2601        gen_set_label(l1);
2602        gen_jmp_tb(s, 0, s->pc);
2603    } else {
2604        /* Unconditional branch.  */
2605        gen_jmp_tb(s, 0, base + offset);
2606    }
2607}
2608
2609DISAS_INSN(moveq)
2610{
2611    tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
2612    gen_logic_cc(s, DREG(insn, 9), OS_LONG);
2613}
2614
2615DISAS_INSN(mvzs)
2616{
2617    int opsize;
2618    TCGv src;
2619    TCGv reg;
2620
2621    if (insn & 0x40)
2622        opsize = OS_WORD;
2623    else
2624        opsize = OS_BYTE;
2625    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2626    reg = DREG(insn, 9);
2627    tcg_gen_mov_i32(reg, src);
2628    gen_logic_cc(s, src, opsize);
2629}
2630
2631DISAS_INSN(or)
2632{
2633    TCGv reg;
2634    TCGv dest;
2635    TCGv src;
2636    TCGv addr;
2637    int opsize;
2638
2639    opsize = insn_opsize(insn);
2640    reg = gen_extend(DREG(insn, 9), opsize, 0);
2641    dest = tcg_temp_new();
2642    if (insn & 0x100) {
2643        SRC_EA(env, src, opsize, 0, &addr);
2644        tcg_gen_or_i32(dest, src, reg);
2645        DEST_EA(env, insn, opsize, dest, &addr);
2646    } else {
2647        SRC_EA(env, src, opsize, 0, NULL);
2648        tcg_gen_or_i32(dest, src, reg);
2649        gen_partset_reg(opsize, DREG(insn, 9), dest);
2650    }
2651    gen_logic_cc(s, dest, opsize);
2652    tcg_temp_free(dest);
2653}
2654
2655DISAS_INSN(suba)
2656{
2657    TCGv src;
2658    TCGv reg;
2659
2660    SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2661    reg = AREG(insn, 9);
2662    tcg_gen_sub_i32(reg, reg, src);
2663}
2664
2665static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2666{
2667    TCGv tmp;
2668
2669    gen_flush_flags(s); /* compute old Z */
2670
2671    /* Perform substract with borrow.
2672     * (X, N) = dest - (src + X);
2673     */
2674
2675    tmp = tcg_const_i32(0);
2676    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
2677    tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
2678    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2679    tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2680
2681    /* Compute signed-overflow for substract.  */
2682
2683    tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
2684    tcg_gen_xor_i32(tmp, dest, src);
2685    tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
2686    tcg_temp_free(tmp);
2687
2688    /* Copy the rest of the results into place.  */
2689    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2690    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2691
2692    set_cc_op(s, CC_OP_FLAGS);
2693
2694    /* result is in QREG_CC_N */
2695}
2696
2697DISAS_INSN(subx_reg)
2698{
2699    TCGv dest;
2700    TCGv src;
2701    int opsize;
2702
2703    opsize = insn_opsize(insn);
2704
2705    src = gen_extend(DREG(insn, 0), opsize, 1);
2706    dest = gen_extend(DREG(insn, 9), opsize, 1);
2707
2708    gen_subx(s, src, dest, opsize);
2709
2710    gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2711}
2712
2713DISAS_INSN(subx_mem)
2714{
2715    TCGv src;
2716    TCGv addr_src;
2717    TCGv dest;
2718    TCGv addr_dest;
2719    int opsize;
2720
2721    opsize = insn_opsize(insn);
2722
2723    addr_src = AREG(insn, 0);
2724    tcg_gen_subi_i32(addr_src, addr_src, opsize);
2725    src = gen_load(s, opsize, addr_src, 1);
2726
2727    addr_dest = AREG(insn, 9);
2728    tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
2729    dest = gen_load(s, opsize, addr_dest, 1);
2730
2731    gen_subx(s, src, dest, opsize);
2732
2733    gen_store(s, opsize, addr_dest, QREG_CC_N);
2734}
2735
2736DISAS_INSN(mov3q)
2737{
2738    TCGv src;
2739    int val;
2740
2741    val = (insn >> 9) & 7;
2742    if (val == 0)
2743        val = -1;
2744    src = tcg_const_i32(val);
2745    gen_logic_cc(s, src, OS_LONG);
2746    DEST_EA(env, insn, OS_LONG, src, NULL);
2747    tcg_temp_free(src);
2748}
2749
2750DISAS_INSN(cmp)
2751{
2752    TCGv src;
2753    TCGv reg;
2754    int opsize;
2755
2756    opsize = insn_opsize(insn);
2757    SRC_EA(env, src, opsize, 1, NULL);
2758    reg = gen_extend(DREG(insn, 9), opsize, 1);
2759    gen_update_cc_cmp(s, reg, src, opsize);
2760}
2761
2762DISAS_INSN(cmpa)
2763{
2764    int opsize;
2765    TCGv src;
2766    TCGv reg;
2767
2768    if (insn & 0x100) {
2769        opsize = OS_LONG;
2770    } else {
2771        opsize = OS_WORD;
2772    }
2773    SRC_EA(env, src, opsize, 1, NULL);
2774    reg = AREG(insn, 9);
2775    gen_update_cc_cmp(s, reg, src, OS_LONG);
2776}
2777
2778DISAS_INSN(cmpm)
2779{
2780    int opsize = insn_opsize(insn);
2781    TCGv src, dst;
2782
2783    /* Post-increment load (mode 3) from Ay.  */
2784    src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
2785                      NULL_QREG, NULL, EA_LOADS);
2786    /* Post-increment load (mode 3) from Ax.  */
2787    dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
2788                      NULL_QREG, NULL, EA_LOADS);
2789
2790    gen_update_cc_cmp(s, dst, src, opsize);
2791}
2792
2793DISAS_INSN(eor)
2794{
2795    TCGv src;
2796    TCGv dest;
2797    TCGv addr;
2798    int opsize;
2799
2800    opsize = insn_opsize(insn);
2801
2802    SRC_EA(env, src, opsize, 0, &addr);
2803    dest = tcg_temp_new();
2804    tcg_gen_xor_i32(dest, src, DREG(insn, 9));
2805    gen_logic_cc(s, dest, opsize);
2806    DEST_EA(env, insn, opsize, dest, &addr);
2807    tcg_temp_free(dest);
2808}
2809
2810static void do_exg(TCGv reg1, TCGv reg2)
2811{
2812    TCGv temp = tcg_temp_new();
2813    tcg_gen_mov_i32(temp, reg1);
2814    tcg_gen_mov_i32(reg1, reg2);
2815    tcg_gen_mov_i32(reg2, temp);
2816    tcg_temp_free(temp);
2817}
2818
2819DISAS_INSN(exg_dd)
2820{
2821    /* exchange Dx and Dy */
2822    do_exg(DREG(insn, 9), DREG(insn, 0));
2823}
2824
2825DISAS_INSN(exg_aa)
2826{
2827    /* exchange Ax and Ay */
2828    do_exg(AREG(insn, 9), AREG(insn, 0));
2829}
2830
2831DISAS_INSN(exg_da)
2832{
2833    /* exchange Dx and Ay */
2834    do_exg(DREG(insn, 9), AREG(insn, 0));
2835}
2836
2837DISAS_INSN(and)
2838{
2839    TCGv src;
2840    TCGv reg;
2841    TCGv dest;
2842    TCGv addr;
2843    int opsize;
2844
2845    dest = tcg_temp_new();
2846
2847    opsize = insn_opsize(insn);
2848    reg = DREG(insn, 9);
2849    if (insn & 0x100) {
2850        SRC_EA(env, src, opsize, 0, &addr);
2851        tcg_gen_and_i32(dest, src, reg);
2852        DEST_EA(env, insn, opsize, dest, &addr);
2853    } else {
2854        SRC_EA(env, src, opsize, 0, NULL);
2855        tcg_gen_and_i32(dest, src, reg);
2856        gen_partset_reg(opsize, reg, dest);
2857    }
2858    gen_logic_cc(s, dest, opsize);
2859    tcg_temp_free(dest);
2860}
2861
2862DISAS_INSN(adda)
2863{
2864    TCGv src;
2865    TCGv reg;
2866
2867    SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2868    reg = AREG(insn, 9);
2869    tcg_gen_add_i32(reg, reg, src);
2870}
2871
2872static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2873{
2874    TCGv tmp;
2875
2876    gen_flush_flags(s); /* compute old Z */
2877
2878    /* Perform addition with carry.
2879     * (X, N) = src + dest + X;
2880     */
2881
2882    tmp = tcg_const_i32(0);
2883    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
2884    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
2885    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2886
2887    /* Compute signed-overflow for addition.  */
2888
2889    tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
2890    tcg_gen_xor_i32(tmp, dest, src);
2891    tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
2892    tcg_temp_free(tmp);
2893
2894    /* Copy the rest of the results into place.  */
2895    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2896    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2897
2898    set_cc_op(s, CC_OP_FLAGS);
2899
2900    /* result is in QREG_CC_N */
2901}
2902
2903DISAS_INSN(addx_reg)
2904{
2905    TCGv dest;
2906    TCGv src;
2907    int opsize;
2908
2909    opsize = insn_opsize(insn);
2910
2911    dest = gen_extend(DREG(insn, 9), opsize, 1);
2912    src = gen_extend(DREG(insn, 0), opsize, 1);
2913
2914    gen_addx(s, src, dest, opsize);
2915
2916    gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2917}
2918
2919DISAS_INSN(addx_mem)
2920{
2921    TCGv src;
2922    TCGv addr_src;
2923    TCGv dest;
2924    TCGv addr_dest;
2925    int opsize;
2926
2927    opsize = insn_opsize(insn);
2928
2929    addr_src = AREG(insn, 0);
2930    tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
2931    src = gen_load(s, opsize, addr_src, 1);
2932
2933    addr_dest = AREG(insn, 9);
2934    tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
2935    dest = gen_load(s, opsize, addr_dest, 1);
2936
2937    gen_addx(s, src, dest, opsize);
2938
2939    gen_store(s, opsize, addr_dest, QREG_CC_N);
2940}
2941
2942static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
2943{
2944    int count = (insn >> 9) & 7;
2945    int logical = insn & 8;
2946    int left = insn & 0x100;
2947    int bits = opsize_bytes(opsize) * 8;
2948    TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
2949
2950    if (count == 0) {
2951        count = 8;
2952    }
2953
2954    tcg_gen_movi_i32(QREG_CC_V, 0);
2955    if (left) {
2956        tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
2957        tcg_gen_shli_i32(QREG_CC_N, reg, count);
2958
2959        /* Note that ColdFire always clears V (done above),
2960           while M68000 sets if the most significant bit is changed at
2961           any time during the shift operation */
2962        if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
2963            /* if shift count >= bits, V is (reg != 0) */
2964            if (count >= bits) {
2965                tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
2966            } else {
2967                TCGv t0 = tcg_temp_new();
2968                tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
2969                tcg_gen_sari_i32(t0, reg, bits - count - 1);
2970                tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
2971                tcg_temp_free(t0);
2972            }
2973            tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2974        }
2975    } else {
2976        tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
2977        if (logical) {
2978            tcg_gen_shri_i32(QREG_CC_N, reg, count);
2979        } else {
2980            tcg_gen_sari_i32(QREG_CC_N, reg, count);
2981        }
2982    }
2983
2984    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2985    tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
2986    tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2987    tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
2988
2989    gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
2990    set_cc_op(s, CC_OP_FLAGS);
2991}
2992
2993static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
2994{
2995    int logical = insn & 8;
2996    int left = insn & 0x100;
2997    int bits = opsize_bytes(opsize) * 8;
2998    TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
2999    TCGv s32;
3000    TCGv_i64 t64, s64;
3001
3002    t64 = tcg_temp_new_i64();
3003    s64 = tcg_temp_new_i64();
3004    s32 = tcg_temp_new();
3005
3006    /* Note that m68k truncates the shift count modulo 64, not 32.
3007       In addition, a 64-bit shift makes it easy to find "the last
3008       bit shifted out", for the carry flag.  */
3009    tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3010    tcg_gen_extu_i32_i64(s64, s32);
3011    tcg_gen_extu_i32_i64(t64, reg);
3012
3013    /* Optimistically set V=0.  Also used as a zero source below.  */
3014    tcg_gen_movi_i32(QREG_CC_V, 0);
3015    if (left) {
3016        tcg_gen_shl_i64(t64, t64, s64);
3017
3018        if (opsize == OS_LONG) {
3019            tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3020            /* Note that C=0 if shift count is 0, and we get that for free.  */
3021        } else {
3022            TCGv zero = tcg_const_i32(0);
3023            tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3024            tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3025            tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3026                                s32, zero, zero, QREG_CC_C);
3027            tcg_temp_free(zero);
3028        }
3029        tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3030
3031        /* X = C, but only if the shift count was non-zero.  */
3032        tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3033                            QREG_CC_C, QREG_CC_X);
3034
3035        /* M68000 sets V if the most significant bit is changed at
3036         * any time during the shift operation.  Do this via creating
3037         * an extension of the sign bit, comparing, and discarding
3038         * the bits below the sign bit.  I.e.
3039         *     int64_t s = (intN_t)reg;
3040         *     int64_t t = (int64_t)(intN_t)reg << count;
3041         *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3042         */
3043        if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3044            TCGv_i64 tt = tcg_const_i64(32);
3045            /* if shift is greater than 32, use 32 */
3046            tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3047            tcg_temp_free_i64(tt);
3048            /* Sign extend the input to 64 bits; re-do the shift.  */
3049            tcg_gen_ext_i32_i64(t64, reg);
3050            tcg_gen_shl_i64(s64, t64, s64);
3051            /* Clear all bits that are unchanged.  */
3052            tcg_gen_xor_i64(t64, t64, s64);
3053            /* Ignore the bits below the sign bit.  */
3054            tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3055            /* If any bits remain set, we have overflow.  */
3056            tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3057            tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3058            tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3059        }
3060    } else {
3061        tcg_gen_shli_i64(t64, t64, 32);
3062        if (logical) {
3063            tcg_gen_shr_i64(t64, t64, s64);
3064        } else {
3065            tcg_gen_sar_i64(t64, t64, s64);
3066        }
3067        tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3068
3069        /* Note that C=0 if shift count is 0, and we get that for free.  */
3070        tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3071
3072        /* X = C, but only if the shift count was non-zero.  */
3073        tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3074                            QREG_CC_C, QREG_CC_X);
3075    }
3076    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3077    tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3078
3079    tcg_temp_free(s32);
3080    tcg_temp_free_i64(s64);
3081    tcg_temp_free_i64(t64);
3082
3083    /* Write back the result.  */
3084    gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3085    set_cc_op(s, CC_OP_FLAGS);
3086}
3087
3088DISAS_INSN(shift8_im)
3089{
3090    shift_im(s, insn, OS_BYTE);
3091}
3092
3093DISAS_INSN(shift16_im)
3094{
3095    shift_im(s, insn, OS_WORD);
3096}
3097
3098DISAS_INSN(shift_im)
3099{
3100    shift_im(s, insn, OS_LONG);
3101}
3102
3103DISAS_INSN(shift8_reg)
3104{
3105    shift_reg(s, insn, OS_BYTE);
3106}
3107
3108DISAS_INSN(shift16_reg)
3109{
3110    shift_reg(s, insn, OS_WORD);
3111}
3112
3113DISAS_INSN(shift_reg)
3114{
3115    shift_reg(s, insn, OS_LONG);
3116}
3117
3118DISAS_INSN(shift_mem)
3119{
3120    int logical = insn & 8;
3121    int left = insn & 0x100;
3122    TCGv src;
3123    TCGv addr;
3124
3125    SRC_EA(env, src, OS_WORD, !logical, &addr);
3126    tcg_gen_movi_i32(QREG_CC_V, 0);
3127    if (left) {
3128        tcg_gen_shri_i32(QREG_CC_C, src, 15);
3129        tcg_gen_shli_i32(QREG_CC_N, src, 1);
3130
3131        /* Note that ColdFire always clears V,
3132           while M68000 sets if the most significant bit is changed at
3133           any time during the shift operation */
3134        if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3135            src = gen_extend(src, OS_WORD, 1);
3136            tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3137        }
3138    } else {
3139        tcg_gen_mov_i32(QREG_CC_C, src);
3140        if (logical) {
3141            tcg_gen_shri_i32(QREG_CC_N, src, 1);
3142        } else {
3143            tcg_gen_sari_i32(QREG_CC_N, src, 1);
3144        }
3145    }
3146
3147    gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3148    tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3149    tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3150    tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3151
3152    DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3153    set_cc_op(s, CC_OP_FLAGS);
3154}
3155
3156static void rotate(TCGv reg, TCGv shift, int left, int size)
3157{
3158    switch (size) {
3159    case 8:
3160        /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3161        tcg_gen_ext8u_i32(reg, reg);
3162        tcg_gen_muli_i32(reg, reg, 0x01010101);
3163        goto do_long;
3164    case 16:
3165        /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3166        tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3167        goto do_long;
3168    do_long:
3169    default:
3170        if (left) {
3171            tcg_gen_rotl_i32(reg, reg, shift);
3172        } else {
3173            tcg_gen_rotr_i32(reg, reg, shift);
3174        }
3175    }
3176
3177    /* compute flags */
3178
3179    switch (size) {
3180    case 8:
3181        tcg_gen_ext8s_i32(reg, reg);
3182        break;
3183    case 16:
3184        tcg_gen_ext16s_i32(reg, reg);
3185        break;
3186    default:
3187        break;
3188    }
3189
3190    /* QREG_CC_X is not affected */
3191
3192    tcg_gen_mov_i32(QREG_CC_N, reg);
3193    tcg_gen_mov_i32(QREG_CC_Z, reg);
3194
3195    if (left) {
3196        tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3197    } else {
3198        tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3199    }
3200
3201    tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3202}
3203
3204static void rotate_x_flags(TCGv reg, TCGv X, int size)
3205{
3206    switch (size) {
3207    case 8:
3208        tcg_gen_ext8s_i32(reg, reg);
3209        break;
3210    case 16:
3211        tcg_gen_ext16s_i32(reg, reg);
3212        break;
3213    default:
3214        break;
3215    }
3216    tcg_gen_mov_i32(QREG_CC_N, reg);
3217    tcg_gen_mov_i32(QREG_CC_Z, reg);
3218    tcg_gen_mov_i32(QREG_CC_X, X);
3219    tcg_gen_mov_i32(QREG_CC_C, X);
3220    tcg_gen_movi_i32(QREG_CC_V, 0);
3221}
3222
3223/* Result of rotate_x() is valid if 0 <= shift <= size */
3224static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3225{
3226    TCGv X, shl, shr, shx, sz, zero;
3227
3228    sz = tcg_const_i32(size);
3229
3230    shr = tcg_temp_new();
3231    shl = tcg_temp_new();
3232    shx = tcg_temp_new();
3233    if (left) {
3234        tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3235        tcg_gen_movi_i32(shr, size + 1);
3236        tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3237        tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3238        /* shx = shx < 0 ? size : shx; */
3239        zero = tcg_const_i32(0);
3240        tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3241        tcg_temp_free(zero);
3242    } else {
3243        tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3244        tcg_gen_movi_i32(shl, size + 1);
3245        tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3246        tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3247    }
3248
3249    /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3250
3251    tcg_gen_shl_i32(shl, reg, shl);
3252    tcg_gen_shr_i32(shr, reg, shr);
3253    tcg_gen_or_i32(reg, shl, shr);
3254    tcg_temp_free(shl);
3255    tcg_temp_free(shr);
3256    tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3257    tcg_gen_or_i32(reg, reg, shx);
3258    tcg_temp_free(shx);
3259
3260    /* X = (reg >> size) & 1 */
3261
3262    X = tcg_temp_new();
3263    tcg_gen_shr_i32(X, reg, sz);
3264    tcg_gen_andi_i32(X, X, 1);
3265    tcg_temp_free(sz);
3266
3267    return X;
3268}
3269
3270/* Result of rotate32_x() is valid if 0 <= shift < 33 */
3271static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3272{
3273    TCGv_i64 t0, shift64;
3274    TCGv X, lo, hi, zero;
3275
3276    shift64 = tcg_temp_new_i64();
3277    tcg_gen_extu_i32_i64(shift64, shift);
3278
3279    t0 = tcg_temp_new_i64();
3280
3281    X = tcg_temp_new();
3282    lo = tcg_temp_new();
3283    hi = tcg_temp_new();
3284
3285    if (left) {
3286        /* create [reg:X:..] */
3287
3288        tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3289        tcg_gen_concat_i32_i64(t0, lo, reg);
3290
3291        /* rotate */
3292
3293        tcg_gen_rotl_i64(t0, t0, shift64);
3294        tcg_temp_free_i64(shift64);
3295
3296        /* result is [reg:..:reg:X] */
3297
3298        tcg_gen_extr_i64_i32(lo, hi, t0);
3299        tcg_gen_andi_i32(X, lo, 1);
3300
3301        tcg_gen_shri_i32(lo, lo, 1);
3302    } else {
3303        /* create [..:X:reg] */
3304
3305        tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3306
3307        tcg_gen_rotr_i64(t0, t0, shift64);
3308        tcg_temp_free_i64(shift64);
3309
3310        /* result is value: [X:reg:..:reg] */
3311
3312        tcg_gen_extr_i64_i32(lo, hi, t0);
3313
3314        /* extract X */
3315
3316        tcg_gen_shri_i32(X, hi, 31);
3317
3318        /* extract result */
3319
3320        tcg_gen_shli_i32(hi, hi, 1);
3321    }
3322    tcg_temp_free_i64(t0);
3323    tcg_gen_or_i32(lo, lo, hi);
3324    tcg_temp_free(hi);
3325
3326    /* if shift == 0, register and X are not affected */
3327
3328    zero = tcg_const_i32(0);
3329    tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3330    tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3331    tcg_temp_free(zero);
3332    tcg_temp_free(lo);
3333
3334    return X;
3335}
3336
3337DISAS_INSN(rotate_im)
3338{
3339    TCGv shift;
3340    int tmp;
3341    int left = (insn & 0x100);
3342
3343    tmp = (insn >> 9) & 7;
3344    if (tmp == 0) {
3345        tmp = 8;
3346    }
3347
3348    shift = tcg_const_i32(tmp);
3349    if (insn & 8) {
3350        rotate(DREG(insn, 0), shift, left, 32);
3351    } else {
3352        TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3353        rotate_x_flags(DREG(insn, 0), X, 32);
3354        tcg_temp_free(X);
3355    }
3356    tcg_temp_free(shift);
3357
3358    set_cc_op(s, CC_OP_FLAGS);
3359}
3360
3361DISAS_INSN(rotate8_im)
3362{
3363    int left = (insn & 0x100);
3364    TCGv reg;
3365    TCGv shift;
3366    int tmp;
3367
3368    reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3369
3370    tmp = (insn >> 9) & 7;
3371    if (tmp == 0) {
3372        tmp = 8;
3373    }
3374
3375    shift = tcg_const_i32(tmp);
3376    if (insn & 8) {
3377        rotate(reg, shift, left, 8);
3378    } else {
3379        TCGv X = rotate_x(reg, shift, left, 8);
3380        rotate_x_flags(reg, X, 8);
3381        tcg_temp_free(X);
3382    }
3383    tcg_temp_free(shift);
3384    gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3385    set_cc_op(s, CC_OP_FLAGS);
3386}
3387
3388DISAS_INSN(rotate16_im)
3389{
3390    int left = (insn & 0x100);
3391    TCGv reg;
3392    TCGv shift;
3393    int tmp;
3394
3395    reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3396    tmp = (insn >> 9) & 7;
3397    if (tmp == 0) {
3398        tmp = 8;
3399    }
3400
3401    shift = tcg_const_i32(tmp);
3402    if (insn & 8) {
3403        rotate(reg, shift, left, 16);
3404    } else {
3405        TCGv X = rotate_x(reg, shift, left, 16);
3406        rotate_x_flags(reg, X, 16);
3407        tcg_temp_free(X);
3408    }
3409    tcg_temp_free(shift);
3410    gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3411    set_cc_op(s, CC_OP_FLAGS);
3412}
3413
3414DISAS_INSN(rotate_reg)
3415{
3416    TCGv reg;
3417    TCGv src;
3418    TCGv t0, t1;
3419    int left = (insn & 0x100);
3420
3421    reg = DREG(insn, 0);
3422    src = DREG(insn, 9);
3423    /* shift in [0..63] */
3424    t0 = tcg_temp_new();
3425    tcg_gen_andi_i32(t0, src, 63);
3426    t1 = tcg_temp_new_i32();
3427    if (insn & 8) {
3428        tcg_gen_andi_i32(t1, src, 31);
3429        rotate(reg, t1, left, 32);
3430        /* if shift == 0, clear C */
3431        tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3432                            t0, QREG_CC_V /* 0 */,
3433                            QREG_CC_V /* 0 */, QREG_CC_C);
3434    } else {
3435        TCGv X;
3436        /* modulo 33 */
3437        tcg_gen_movi_i32(t1, 33);
3438        tcg_gen_remu_i32(t1, t0, t1);
3439        X = rotate32_x(DREG(insn, 0), t1, left);
3440        rotate_x_flags(DREG(insn, 0), X, 32);
3441        tcg_temp_free(X);
3442    }
3443    tcg_temp_free(t1);
3444    tcg_temp_free(t0);
3445    set_cc_op(s, CC_OP_FLAGS);
3446}
3447
3448DISAS_INSN(rotate8_reg)
3449{
3450    TCGv reg;
3451    TCGv src;
3452    TCGv t0, t1;
3453    int left = (insn & 0x100);
3454
3455    reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
3456    src = DREG(insn, 9);
3457    /* shift in [0..63] */
3458    t0 = tcg_temp_new_i32();
3459    tcg_gen_andi_i32(t0, src, 63);
3460    t1 = tcg_temp_new_i32();
3461    if (insn & 8) {
3462        tcg_gen_andi_i32(t1, src, 7);
3463        rotate(reg, t1, left, 8);
3464        /* if shift == 0, clear C */
3465        tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3466                            t0, QREG_CC_V /* 0 */,
3467                            QREG_CC_V /* 0 */, QREG_CC_C);
3468    } else {
3469        TCGv X;
3470        /* modulo 9 */
3471        tcg_gen_movi_i32(t1, 9);
3472        tcg_gen_remu_i32(t1, t0, t1);
3473        X = rotate_x(reg, t1, left, 8);
3474        rotate_x_flags(reg, X, 8);
3475        tcg_temp_free(X);
3476    }
3477    tcg_temp_free(t1);
3478    tcg_temp_free(t0);
3479    gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3480    set_cc_op(s, CC_OP_FLAGS);
3481}
3482
3483DISAS_INSN(rotate16_reg)
3484{
3485    TCGv reg;
3486    TCGv src;
3487    TCGv t0, t1;
3488    int left = (insn & 0x100);
3489
3490    reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
3491    src = DREG(insn, 9);
3492    /* shift in [0..63] */
3493    t0 = tcg_temp_new_i32();
3494    tcg_gen_andi_i32(t0, src, 63);
3495    t1 = tcg_temp_new_i32();
3496    if (insn & 8) {
3497        tcg_gen_andi_i32(t1, src, 15);
3498        rotate(reg, t1, left, 16);
3499        /* if shift == 0, clear C */
3500        tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3501                            t0, QREG_CC_V /* 0 */,
3502                            QREG_CC_V /* 0 */, QREG_CC_C);
3503    } else {
3504        TCGv X;
3505        /* modulo 17 */
3506        tcg_gen_movi_i32(t1, 17);
3507        tcg_gen_remu_i32(t1, t0, t1);
3508        X = rotate_x(reg, t1, left, 16);
3509        rotate_x_flags(reg, X, 16);
3510        tcg_temp_free(X);
3511    }
3512    tcg_temp_free(t1);
3513    tcg_temp_free(t0);
3514    gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3515    set_cc_op(s, CC_OP_FLAGS);
3516}
3517
3518DISAS_INSN(rotate_mem)
3519{
3520    TCGv src;
3521    TCGv addr;
3522    TCGv shift;
3523    int left = (insn & 0x100);
3524
3525    SRC_EA(env, src, OS_WORD, 0, &addr);
3526
3527    shift = tcg_const_i32(1);
3528    if (insn & 0x0200) {
3529        rotate(src, shift, left, 16);
3530    } else {
3531        TCGv X = rotate_x(src, shift, left, 16);
3532        rotate_x_flags(src, X, 16);
3533        tcg_temp_free(X);
3534    }
3535    tcg_temp_free(shift);
3536    DEST_EA(env, insn, OS_WORD, src, &addr);
3537    set_cc_op(s, CC_OP_FLAGS);
3538}
3539
3540DISAS_INSN(bfext_reg)
3541{
3542    int ext = read_im16(env, s);
3543    int is_sign = insn & 0x200;
3544    TCGv src = DREG(insn, 0);
3545    TCGv dst = DREG(ext, 12);
3546    int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3547    int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3548    int pos = 32 - ofs - len;        /* little bit-endian */
3549    TCGv tmp = tcg_temp_new();
3550    TCGv shift;
3551
3552    /* In general, we're going to rotate the field so that it's at the
3553       top of the word and then right-shift by the compliment of the
3554       width to extend the field.  */
3555    if (ext & 0x20) {
3556        /* Variable width.  */
3557        if (ext & 0x800) {
3558            /* Variable offset.  */
3559            tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3560            tcg_gen_rotl_i32(tmp, src, tmp);
3561        } else {
3562            tcg_gen_rotli_i32(tmp, src, ofs);
3563        }
3564
3565        shift = tcg_temp_new();
3566        tcg_gen_neg_i32(shift, DREG(ext, 0));
3567        tcg_gen_andi_i32(shift, shift, 31);
3568        tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3569        if (is_sign) {
3570            tcg_gen_mov_i32(dst, QREG_CC_N);
3571        } else {
3572            tcg_gen_shr_i32(dst, tmp, shift);
3573        }
3574        tcg_temp_free(shift);
3575    } else {
3576        /* Immediate width.  */
3577        if (ext & 0x800) {
3578            /* Variable offset */
3579            tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3580            tcg_gen_rotl_i32(tmp, src, tmp);
3581            src = tmp;
3582            pos = 32 - len;
3583        } else {
3584            /* Immediate offset.  If the field doesn't wrap around the
3585               end of the word, rely on (s)extract completely.  */
3586            if (pos < 0) {
3587                tcg_gen_rotli_i32(tmp, src, ofs);
3588                src = tmp;
3589                pos = 32 - len;
3590            }
3591        }
3592
3593        tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3594        if (is_sign) {
3595            tcg_gen_mov_i32(dst, QREG_CC_N);
3596        } else {
3597            tcg_gen_extract_i32(dst, src, pos, len);
3598        }
3599    }
3600
3601    tcg_temp_free(tmp);
3602    set_cc_op(s, CC_OP_LOGIC);
3603}
3604
3605DISAS_INSN(bfext_mem)
3606{
3607    int ext = read_im16(env, s);
3608    int is_sign = insn & 0x200;
3609    TCGv dest = DREG(ext, 12);
3610    TCGv addr, len, ofs;
3611
3612    addr = gen_lea(env, s, insn, OS_UNSIZED);
3613    if (IS_NULL_QREG(addr)) {
3614        gen_addr_fault(s);
3615        return;
3616    }
3617
3618    if (ext & 0x20) {
3619        len = DREG(ext, 0);
3620    } else {
3621        len = tcg_const_i32(extract32(ext, 0, 5));
3622    }
3623    if (ext & 0x800) {
3624        ofs = DREG(ext, 6);
3625    } else {
3626        ofs = tcg_const_i32(extract32(ext, 6, 5));
3627    }
3628
3629    if (is_sign) {
3630        gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
3631        tcg_gen_mov_i32(QREG_CC_N, dest);
3632    } else {
3633        TCGv_i64 tmp = tcg_temp_new_i64();
3634        gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
3635        tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
3636        tcg_temp_free_i64(tmp);
3637    }
3638    set_cc_op(s, CC_OP_LOGIC);
3639
3640    if (!(ext & 0x20)) {
3641        tcg_temp_free(len);
3642    }
3643    if (!(ext & 0x800)) {
3644        tcg_temp_free(ofs);
3645    }
3646}
3647
3648DISAS_INSN(bfop_reg)
3649{
3650    int ext = read_im16(env, s);
3651    TCGv src = DREG(insn, 0);
3652    int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3653    int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3654    TCGv mask, tofs, tlen;
3655
3656    TCGV_UNUSED(tofs);
3657    TCGV_UNUSED(tlen);
3658    if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
3659        tofs = tcg_temp_new();
3660        tlen = tcg_temp_new();
3661    }
3662
3663    if ((ext & 0x820) == 0) {
3664        /* Immediate width and offset.  */
3665        uint32_t maski = 0x7fffffffu >> (len - 1);
3666        if (ofs + len <= 32) {
3667            tcg_gen_shli_i32(QREG_CC_N, src, ofs);
3668        } else {
3669            tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
3670        }
3671        tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
3672        mask = tcg_const_i32(ror32(maski, ofs));
3673        if (!TCGV_IS_UNUSED(tofs)) {
3674            tcg_gen_movi_i32(tofs, ofs);
3675            tcg_gen_movi_i32(tlen, len);
3676        }
3677    } else {
3678        TCGv tmp = tcg_temp_new();
3679        if (ext & 0x20) {
3680            /* Variable width */
3681            tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
3682            tcg_gen_andi_i32(tmp, tmp, 31);
3683            mask = tcg_const_i32(0x7fffffffu);
3684            tcg_gen_shr_i32(mask, mask, tmp);
3685            if (!TCGV_IS_UNUSED(tlen)) {
3686                tcg_gen_addi_i32(tlen, tmp, 1);
3687            }
3688        } else {
3689            /* Immediate width */
3690            mask = tcg_const_i32(0x7fffffffu >> (len - 1));
3691            if (!TCGV_IS_UNUSED(tlen)) {
3692                tcg_gen_movi_i32(tlen, len);
3693            }
3694        }
3695        if (ext & 0x800) {
3696            /* Variable offset */
3697            tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3698            tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
3699            tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
3700            tcg_gen_rotr_i32(mask, mask, tmp);
3701            if (!TCGV_IS_UNUSED(tofs)) {
3702                tcg_gen_mov_i32(tofs, tmp);
3703            }
3704        } else {
3705            /* Immediate offset (and variable width) */
3706            tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
3707            tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
3708            tcg_gen_rotri_i32(mask, mask, ofs);
3709            if (!TCGV_IS_UNUSED(tofs)) {
3710                tcg_gen_movi_i32(tofs, ofs);
3711            }
3712        }
3713        tcg_temp_free(tmp);
3714    }
3715    set_cc_op(s, CC_OP_LOGIC);
3716
3717    switch (insn & 0x0f00) {
3718    case 0x0a00: /* bfchg */
3719        tcg_gen_eqv_i32(src, src, mask);
3720        break;
3721    case 0x0c00: /* bfclr */
3722        tcg_gen_and_i32(src, src, mask);
3723        break;
3724    case 0x0d00: /* bfffo */
3725        gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
3726        tcg_temp_free(tlen);
3727        tcg_temp_free(tofs);
3728        break;
3729    case 0x0e00: /* bfset */
3730        tcg_gen_orc_i32(src, src, mask);
3731        break;
3732    case 0x0800: /* bftst */
3733        /* flags already set; no other work to do.  */
3734        break;
3735    default:
3736        g_assert_not_reached();
3737    }
3738    tcg_temp_free(mask);
3739}
3740
3741DISAS_INSN(bfop_mem)
3742{
3743    int ext = read_im16(env, s);
3744    TCGv addr, len, ofs;
3745    TCGv_i64 t64;
3746
3747    addr = gen_lea(env, s, insn, OS_UNSIZED);
3748    if (IS_NULL_QREG(addr)) {
3749        gen_addr_fault(s);
3750        return;
3751    }
3752
3753    if (ext & 0x20) {
3754        len = DREG(ext, 0);
3755    } else {
3756        len = tcg_const_i32(extract32(ext, 0, 5));
3757    }
3758    if (ext & 0x800) {
3759        ofs = DREG(ext, 6);
3760    } else {
3761        ofs = tcg_const_i32(extract32(ext, 6, 5));
3762    }
3763
3764    switch (insn & 0x0f00) {
3765    case 0x0a00: /* bfchg */
3766        gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3767        break;
3768    case 0x0c00: /* bfclr */
3769        gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3770        break;
3771    case 0x0d00: /* bfffo */
3772        t64 = tcg_temp_new_i64();
3773        gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
3774        tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
3775        tcg_temp_free_i64(t64);
3776        break;
3777    case 0x0e00: /* bfset */
3778        gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3779        break;
3780    case 0x0800: /* bftst */
3781        gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
3782        break;
3783    default:
3784        g_assert_not_reached();
3785    }
3786    set_cc_op(s, CC_OP_LOGIC);
3787
3788    if (!(ext & 0x20)) {
3789        tcg_temp_free(len);
3790    }
3791    if (!(ext & 0x800)) {
3792        tcg_temp_free(ofs);
3793    }
3794}
3795
3796DISAS_INSN(bfins_reg)
3797{
3798    int ext = read_im16(env, s);
3799    TCGv dst = DREG(insn, 0);
3800    TCGv src = DREG(ext, 12);
3801    int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3802    int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3803    int pos = 32 - ofs - len;        /* little bit-endian */
3804    TCGv tmp;
3805
3806    tmp = tcg_temp_new();
3807
3808    if (ext & 0x20) {
3809        /* Variable width */
3810        tcg_gen_neg_i32(tmp, DREG(ext, 0));
3811        tcg_gen_andi_i32(tmp, tmp, 31);
3812        tcg_gen_shl_i32(QREG_CC_N, src, tmp);
3813    } else {
3814        /* Immediate width */
3815        tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
3816    }
3817    set_cc_op(s, CC_OP_LOGIC);
3818
3819    /* Immediate width and offset */
3820    if ((ext & 0x820) == 0) {
3821        /* Check for suitability for deposit.  */
3822        if (pos >= 0) {
3823            tcg_gen_deposit_i32(dst, dst, src, pos, len);
3824        } else {
3825            uint32_t maski = -2U << (len - 1);
3826            uint32_t roti = (ofs + len) & 31;
3827            tcg_gen_andi_i32(tmp, src, ~maski);
3828            tcg_gen_rotri_i32(tmp, tmp, roti);
3829            tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
3830            tcg_gen_or_i32(dst, dst, tmp);
3831        }
3832    } else {
3833        TCGv mask = tcg_temp_new();
3834        TCGv rot = tcg_temp_new();
3835
3836        if (ext & 0x20) {
3837            /* Variable width */
3838            tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
3839            tcg_gen_andi_i32(rot, rot, 31);
3840            tcg_gen_movi_i32(mask, -2);
3841            tcg_gen_shl_i32(mask, mask, rot);
3842            tcg_gen_mov_i32(rot, DREG(ext, 0));
3843            tcg_gen_andc_i32(tmp, src, mask);
3844        } else {
3845            /* Immediate width (variable offset) */
3846            uint32_t maski = -2U << (len - 1);
3847            tcg_gen_andi_i32(tmp, src, ~maski);
3848            tcg_gen_movi_i32(mask, maski);
3849            tcg_gen_movi_i32(rot, len & 31);
3850        }
3851        if (ext & 0x800) {
3852            /* Variable offset */
3853            tcg_gen_add_i32(rot, rot, DREG(ext, 6));
3854        } else {
3855            /* Immediate offset (variable width) */
3856            tcg_gen_addi_i32(rot, rot, ofs);
3857        }
3858        tcg_gen_andi_i32(rot, rot, 31);
3859        tcg_gen_rotr_i32(mask, mask, rot);
3860        tcg_gen_rotr_i32(tmp, tmp, rot);
3861        tcg_gen_and_i32(dst, dst, mask);
3862        tcg_gen_or_i32(dst, dst, tmp);
3863
3864        tcg_temp_free(rot);
3865        tcg_temp_free(mask);
3866    }
3867    tcg_temp_free(tmp);
3868}
3869
3870DISAS_INSN(bfins_mem)
3871{
3872    int ext = read_im16(env, s);
3873    TCGv src = DREG(ext, 12);
3874    TCGv addr, len, ofs;
3875
3876    addr = gen_lea(env, s, insn, OS_UNSIZED);
3877    if (IS_NULL_QREG(addr)) {
3878        gen_addr_fault(s);
3879        return;
3880    }
3881
3882    if (ext & 0x20) {
3883        len = DREG(ext, 0);
3884    } else {
3885        len = tcg_const_i32(extract32(ext, 0, 5));
3886    }
3887    if (ext & 0x800) {
3888        ofs = DREG(ext, 6);
3889    } else {
3890        ofs = tcg_const_i32(extract32(ext, 6, 5));
3891    }
3892
3893    gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
3894    set_cc_op(s, CC_OP_LOGIC);
3895
3896    if (!(ext & 0x20)) {
3897        tcg_temp_free(len);
3898    }
3899    if (!(ext & 0x800)) {
3900        tcg_temp_free(ofs);
3901    }
3902}
3903
3904DISAS_INSN(ff1)
3905{
3906    TCGv reg;
3907    reg = DREG(insn, 0);
3908    gen_logic_cc(s, reg, OS_LONG);
3909    gen_helper_ff1(reg, reg);
3910}
3911
3912static TCGv gen_get_sr(DisasContext *s)
3913{
3914    TCGv ccr;
3915    TCGv sr;
3916
3917    ccr = gen_get_ccr(s);
3918    sr = tcg_temp_new();
3919    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
3920    tcg_gen_or_i32(sr, sr, ccr);
3921    return sr;
3922}
3923
3924DISAS_INSN(strldsr)
3925{
3926    uint16_t ext;
3927    uint32_t addr;
3928
3929    addr = s->pc - 2;
3930    ext = read_im16(env, s);
3931    if (ext != 0x46FC) {
3932        gen_exception(s, addr, EXCP_UNSUPPORTED);
3933        return;
3934    }
3935    ext = read_im16(env, s);
3936    if (IS_USER(s) || (ext & SR_S) == 0) {
3937        gen_exception(s, addr, EXCP_PRIVILEGE);
3938        return;
3939    }
3940    gen_push(s, gen_get_sr(s));
3941    gen_set_sr_im(s, ext, 0);
3942}
3943
3944DISAS_INSN(move_from_sr)
3945{
3946    TCGv sr;
3947
3948    if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
3949        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3950        return;
3951    }
3952    sr = gen_get_sr(s);
3953    DEST_EA(env, insn, OS_WORD, sr, NULL);
3954}
3955
3956DISAS_INSN(move_to_sr)
3957{
3958    if (IS_USER(s)) {
3959        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3960        return;
3961    }
3962    gen_set_sr(env, s, insn, 0);
3963    gen_lookup_tb(s);
3964}
3965
3966DISAS_INSN(move_from_usp)
3967{
3968    if (IS_USER(s)) {
3969        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3970        return;
3971    }
3972    tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
3973                   offsetof(CPUM68KState, sp[M68K_USP]));
3974}
3975
3976DISAS_INSN(move_to_usp)
3977{
3978    if (IS_USER(s)) {
3979        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3980        return;
3981    }
3982    tcg_gen_st_i32(AREG(insn, 0), cpu_env,
3983                   offsetof(CPUM68KState, sp[M68K_USP]));
3984}
3985
3986DISAS_INSN(halt)
3987{
3988    gen_exception(s, s->pc, EXCP_HALT_INSN);
3989}
3990
3991DISAS_INSN(stop)
3992{
3993    uint16_t ext;
3994
3995    if (IS_USER(s)) {
3996        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
3997        return;
3998    }
3999
4000    ext = read_im16(env, s);
4001
4002    gen_set_sr_im(s, ext, 0);
4003    tcg_gen_movi_i32(cpu_halted, 1);
4004    gen_exception(s, s->pc, EXCP_HLT);
4005}
4006
4007DISAS_INSN(rte)
4008{
4009    if (IS_USER(s)) {
4010        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4011        return;
4012    }
4013    gen_exception(s, s->pc - 2, EXCP_RTE);
4014}
4015
4016DISAS_INSN(movec)
4017{
4018    uint16_t ext;
4019    TCGv reg;
4020
4021    if (IS_USER(s)) {
4022        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4023        return;
4024    }
4025
4026    ext = read_im16(env, s);
4027
4028    if (ext & 0x8000) {
4029        reg = AREG(ext, 12);
4030    } else {
4031        reg = DREG(ext, 12);
4032    }
4033    gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4034    gen_lookup_tb(s);
4035}
4036
4037DISAS_INSN(intouch)
4038{
4039    if (IS_USER(s)) {
4040        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4041        return;
4042    }
4043    /* ICache fetch.  Implement as no-op.  */
4044}
4045
4046DISAS_INSN(cpushl)
4047{
4048    if (IS_USER(s)) {
4049        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4050        return;
4051    }
4052    /* Cache push/invalidate.  Implement as no-op.  */
4053}
4054
4055DISAS_INSN(wddata)
4056{
4057    gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4058}
4059
4060DISAS_INSN(wdebug)
4061{
4062    M68kCPU *cpu = m68k_env_get_cpu(env);
4063
4064    if (IS_USER(s)) {
4065        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
4066        return;
4067    }
4068    /* TODO: Implement wdebug.  */
4069    cpu_abort(CPU(cpu), "WDEBUG not implemented");
4070}
4071
4072DISAS_INSN(trap)
4073{
4074    gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
4075}
4076
4077/* ??? FP exceptions are not implemented.  Most exceptions are deferred until
4078   immediately before the next FP instruction is executed.  */
4079DISAS_INSN(fpu)
4080{
4081    uint16_t ext;
4082    int32_t offset;
4083    int opmode;
4084    TCGv_i64 src;
4085    TCGv_i64 dest;
4086    TCGv_i64 res;
4087    TCGv tmp32;
4088    int round;
4089    int set_dest;
4090    int opsize;
4091
4092    ext = read_im16(env, s);
4093    opmode = ext & 0x7f;
4094    switch ((ext >> 13) & 7) {
4095    case 0: case 2:
4096        break;
4097    case 1:
4098        goto undef;
4099    case 3: /* fmove out */
4100        src = FREG(ext, 7);
4101        tmp32 = tcg_temp_new_i32();
4102        /* fmove */
4103        /* ??? TODO: Proper behavior on overflow.  */
4104        switch ((ext >> 10) & 7) {
4105        case 0:
4106            opsize = OS_LONG;
4107            gen_helper_f64_to_i32(tmp32, cpu_env, src);
4108            break;
4109        case 1:
4110            opsize = OS_SINGLE;
4111            gen_helper_f64_to_f32(tmp32, cpu_env, src);
4112            break;
4113        case 4:
4114            opsize = OS_WORD;
4115            gen_helper_f64_to_i32(tmp32, cpu_env, src);
4116            break;
4117        case 5: /* OS_DOUBLE */
4118            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
4119            switch ((insn >> 3) & 7) {
4120            case 2:
4121            case 3:
4122                break;
4123            case 4:
4124                tcg_gen_addi_i32(tmp32, tmp32, -8);
4125                break;
4126            case 5:
4127                offset = cpu_ldsw_code(env, s->pc);
4128                s->pc += 2;
4129                tcg_gen_addi_i32(tmp32, tmp32, offset);
4130                break;
4131            default:
4132                goto undef;
4133            }
4134            gen_store64(s, tmp32, src);
4135            switch ((insn >> 3) & 7) {
4136            case 3:
4137                tcg_gen_addi_i32(tmp32, tmp32, 8);
4138                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4139                break;
4140            case 4:
4141                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4142                break;
4143            }
4144            tcg_temp_free_i32(tmp32);
4145            return;
4146        case 6:
4147            opsize = OS_BYTE;
4148            gen_helper_f64_to_i32(tmp32, cpu_env, src);
4149            break;
4150        default:
4151            goto undef;
4152        }
4153        DEST_EA(env, insn, opsize, tmp32, NULL);
4154        tcg_temp_free_i32(tmp32);
4155        return;
4156    case 4: /* fmove to control register.  */
4157        switch ((ext >> 10) & 7) {
4158        case 4: /* FPCR */
4159            /* Not implemented.  Ignore writes.  */
4160            break;
4161        case 1: /* FPIAR */
4162        case 2: /* FPSR */
4163        default:
4164            cpu_abort(NULL, "Unimplemented: fmove to control %d",
4165                      (ext >> 10) & 7);
4166        }
4167        break;
4168    case 5: /* fmove from control register.  */
4169        switch ((ext >> 10) & 7) {
4170        case 4: /* FPCR */
4171            /* Not implemented.  Always return zero.  */
4172            tmp32 = tcg_const_i32(0);
4173            break;
4174        case 1: /* FPIAR */
4175        case 2: /* FPSR */
4176        default:
4177            cpu_abort(NULL, "Unimplemented: fmove from control %d",
4178                      (ext >> 10) & 7);
4179            goto undef;
4180        }
4181        DEST_EA(env, insn, OS_LONG, tmp32, NULL);
4182        break;
4183    case 6: /* fmovem */
4184    case 7:
4185        {
4186            TCGv addr;
4187            uint16_t mask;
4188            int i;
4189            if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
4190                goto undef;
4191            tmp32 = gen_lea(env, s, insn, OS_LONG);
4192            if (IS_NULL_QREG(tmp32)) {
4193                gen_addr_fault(s);
4194                return;
4195            }
4196            addr = tcg_temp_new_i32();
4197            tcg_gen_mov_i32(addr, tmp32);
4198            mask = 0x80;
4199            for (i = 0; i < 8; i++) {
4200                if (ext & mask) {
4201                    dest = FREG(i, 0);
4202                    if (ext & (1 << 13)) {
4203                        /* store */
4204                        tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
4205                    } else {
4206                        /* load */
4207                        tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
4208                    }
4209                    if (ext & (mask - 1))
4210                        tcg_gen_addi_i32(addr, addr, 8);
4211                }
4212                mask >>= 1;
4213            }
4214            tcg_temp_free_i32(addr);
4215        }
4216        return;
4217    }
4218    if (ext & (1 << 14)) {
4219        /* Source effective address.  */
4220        switch ((ext >> 10) & 7) {
4221        case 0: opsize = OS_LONG; break;
4222        case 1: opsize = OS_SINGLE; break;
4223        case 4: opsize = OS_WORD; break;
4224        case 5: opsize = OS_DOUBLE; break;
4225        case 6: opsize = OS_BYTE; break;
4226        default:
4227            goto undef;
4228        }
4229        if (opsize == OS_DOUBLE) {
4230            tmp32 = tcg_temp_new_i32();
4231            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
4232            switch ((insn >> 3) & 7) {
4233            case 2:
4234            case 3:
4235                break;
4236            case 4:
4237                tcg_gen_addi_i32(tmp32, tmp32, -8);
4238                break;
4239            case 5:
4240                offset = cpu_ldsw_code(env, s->pc);
4241                s->pc += 2;
4242                tcg_gen_addi_i32(tmp32, tmp32, offset);
4243                break;
4244            case 7:
4245                offset = cpu_ldsw_code(env, s->pc);
4246                offset += s->pc - 2;
4247                s->pc += 2;
4248                tcg_gen_addi_i32(tmp32, tmp32, offset);
4249                break;
4250            default:
4251                goto undef;
4252            }
4253            src = gen_load64(s, tmp32);
4254            switch ((insn >> 3) & 7) {
4255            case 3:
4256                tcg_gen_addi_i32(tmp32, tmp32, 8);
4257                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4258                break;
4259            case 4:
4260                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
4261                break;
4262            }
4263            tcg_temp_free_i32(tmp32);
4264        } else {
4265            SRC_EA(env, tmp32, opsize, 1, NULL);
4266            src = tcg_temp_new_i64();
4267            switch (opsize) {
4268            case OS_LONG:
4269            case OS_WORD:
4270            case OS_BYTE:
4271                gen_helper_i32_to_f64(src, cpu_env, tmp32);
4272                break;
4273            case OS_SINGLE:
4274                gen_helper_f32_to_f64(src, cpu_env, tmp32);
4275                break;
4276            }
4277        }
4278    } else {
4279        /* Source register.  */
4280        src = FREG(ext, 10);
4281    }
4282    dest = FREG(ext, 7);
4283    res = tcg_temp_new_i64();
4284    if (opmode != 0x3a)
4285        tcg_gen_mov_f64(res, dest);
4286    round = 1;
4287    set_dest = 1;
4288    switch (opmode) {
4289    case 0: case 0x40: case 0x44: /* fmove */
4290        tcg_gen_mov_f64(res, src);
4291        break;
4292    case 1: /* fint */
4293        gen_helper_iround_f64(res, cpu_env, src);
4294        round = 0;
4295        break;
4296    case 3: /* fintrz */
4297        gen_helper_itrunc_f64(res, cpu_env, src);
4298        round = 0;
4299        break;
4300    case 4: case 0x41: case 0x45: /* fsqrt */
4301        gen_helper_sqrt_f64(res, cpu_env, src);
4302        break;
4303    case 0x18: case 0x58: case 0x5c: /* fabs */
4304        gen_helper_abs_f64(res, src);
4305        break;
4306    case 0x1a: case 0x5a: case 0x5e: /* fneg */
4307        gen_helper_chs_f64(res, src);
4308        break;
4309    case 0x20: case 0x60: case 0x64: /* fdiv */
4310        gen_helper_div_f64(res, cpu_env, res, src);
4311        break;
4312    case 0x22: case 0x62: case 0x66: /* fadd */
4313        gen_helper_add_f64(res, cpu_env, res, src);
4314        break;
4315    case 0x23: case 0x63: case 0x67: /* fmul */
4316        gen_helper_mul_f64(res, cpu_env, res, src);
4317        break;
4318    case 0x28: case 0x68: case 0x6c: /* fsub */
4319        gen_helper_sub_f64(res, cpu_env, res, src);
4320        break;
4321    case 0x38: /* fcmp */
4322        gen_helper_sub_cmp_f64(res, cpu_env, res, src);
4323        set_dest = 0;
4324        round = 0;
4325        break;
4326    case 0x3a: /* ftst */
4327        tcg_gen_mov_f64(res, src);
4328        set_dest = 0;
4329        round = 0;
4330        break;
4331    default:
4332        goto undef;
4333    }
4334    if (ext & (1 << 14)) {
4335        tcg_temp_free_i64(src);
4336    }
4337    if (round) {
4338        if (opmode & 0x40) {
4339            if ((opmode & 0x4) != 0)
4340                round = 0;
4341        } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
4342            round = 0;
4343        }
4344    }
4345    if (round) {
4346        TCGv tmp = tcg_temp_new_i32();
4347        gen_helper_f64_to_f32(tmp, cpu_env, res);
4348        gen_helper_f32_to_f64(res, cpu_env, tmp);
4349        tcg_temp_free_i32(tmp);
4350    }
4351    tcg_gen_mov_f64(QREG_FP_RESULT, res);
4352    if (set_dest) {
4353        tcg_gen_mov_f64(dest, res);
4354    }
4355    tcg_temp_free_i64(res);
4356    return;
4357undef:
4358    /* FIXME: Is this right for offset addressing modes?  */
4359    s->pc -= 2;
4360    disas_undef_fpu(env, s, insn);
4361}
4362
4363DISAS_INSN(fbcc)
4364{
4365    uint32_t offset;
4366    uint32_t addr;
4367    TCGv flag;
4368    TCGLabel *l1;
4369
4370    addr = s->pc;
4371    offset = cpu_ldsw_code(env, s->pc);
4372    s->pc += 2;
4373    if (insn & (1 << 6)) {
4374        offset = (offset << 16) | read_im16(env, s);
4375    }
4376
4377    l1 = gen_new_label();
4378    /* TODO: Raise BSUN exception.  */
4379    flag = tcg_temp_new();
4380    gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
4381    /* Jump to l1 if condition is true.  */
4382    switch (insn & 0xf) {
4383    case 0: /* f */
4384        break;
4385    case 1: /* eq (=0) */
4386        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
4387        break;
4388    case 2: /* ogt (=1) */
4389        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
4390        break;
4391    case 3: /* oge (=0 or =1) */
4392        tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
4393        break;
4394    case 4: /* olt (=-1) */
4395        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
4396        break;
4397    case 5: /* ole (=-1 or =0) */
4398        tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
4399        break;
4400    case 6: /* ogl (=-1 or =1) */
4401        tcg_gen_andi_i32(flag, flag, 1);
4402        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
4403        break;
4404    case 7: /* or (=2) */
4405        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
4406        break;
4407    case 8: /* un (<2) */
4408        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
4409        break;
4410    case 9: /* ueq (=0 or =2) */
4411        tcg_gen_andi_i32(flag, flag, 1);
4412        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
4413        break;
4414    case 10: /* ugt (>0) */
4415        tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
4416        break;
4417    case 11: /* uge (>=0) */
4418        tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
4419        break;
4420    case 12: /* ult (=-1 or =2) */
4421        tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
4422        break;
4423    case 13: /* ule (!=1) */
4424        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
4425        break;
4426    case 14: /* ne (!=0) */
4427        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
4428        break;
4429    case 15: /* t */
4430        tcg_gen_br(l1);
4431        break;
4432    }
4433    gen_jmp_tb(s, 0, s->pc);
4434    gen_set_label(l1);
4435    gen_jmp_tb(s, 1, addr + offset);
4436}
4437
4438DISAS_INSN(frestore)
4439{
4440    M68kCPU *cpu = m68k_env_get_cpu(env);
4441
4442    /* TODO: Implement frestore.  */
4443    cpu_abort(CPU(cpu), "FRESTORE not implemented");
4444}
4445
4446DISAS_INSN(fsave)
4447{
4448    M68kCPU *cpu = m68k_env_get_cpu(env);
4449
4450    /* TODO: Implement fsave.  */
4451    cpu_abort(CPU(cpu), "FSAVE not implemented");
4452}
4453
4454static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
4455{
4456    TCGv tmp = tcg_temp_new();
4457    if (s->env->macsr & MACSR_FI) {
4458        if (upper)
4459            tcg_gen_andi_i32(tmp, val, 0xffff0000);
4460        else
4461            tcg_gen_shli_i32(tmp, val, 16);
4462    } else if (s->env->macsr & MACSR_SU) {
4463        if (upper)
4464            tcg_gen_sari_i32(tmp, val, 16);
4465        else
4466            tcg_gen_ext16s_i32(tmp, val);
4467    } else {
4468        if (upper)
4469            tcg_gen_shri_i32(tmp, val, 16);
4470        else
4471            tcg_gen_ext16u_i32(tmp, val);
4472    }
4473    return tmp;
4474}
4475
4476static void gen_mac_clear_flags(void)
4477{
4478    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
4479                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
4480}
4481
4482DISAS_INSN(mac)
4483{
4484    TCGv rx;
4485    TCGv ry;
4486    uint16_t ext;
4487    int acc;
4488    TCGv tmp;
4489    TCGv addr;
4490    TCGv loadval;
4491    int dual;
4492    TCGv saved_flags;
4493
4494    if (!s->done_mac) {
4495        s->mactmp = tcg_temp_new_i64();
4496        s->done_mac = 1;
4497    }
4498
4499    ext = read_im16(env, s);
4500
4501    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
4502    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
4503    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
4504        disas_undef(env, s, insn);
4505        return;
4506    }
4507    if (insn & 0x30) {
4508        /* MAC with load.  */
4509        tmp = gen_lea(env, s, insn, OS_LONG);
4510        addr = tcg_temp_new();
4511        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
4512        /* Load the value now to ensure correct exception behavior.
4513           Perform writeback after reading the MAC inputs.  */
4514        loadval = gen_load(s, OS_LONG, addr, 0);
4515
4516        acc ^= 1;
4517        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
4518        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
4519    } else {
4520        loadval = addr = NULL_QREG;
4521        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
4522        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4523    }
4524
4525    gen_mac_clear_flags();
4526#if 0
4527    l1 = -1;
4528    /* Disabled because conditional branches clobber temporary vars.  */
4529    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
4530        /* Skip the multiply if we know we will ignore it.  */
4531        l1 = gen_new_label();
4532        tmp = tcg_temp_new();
4533        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
4534        gen_op_jmp_nz32(tmp, l1);
4535    }
4536#endif
4537
4538    if ((ext & 0x0800) == 0) {
4539        /* Word.  */
4540        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
4541        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
4542    }
4543    if (s->env->macsr & MACSR_FI) {
4544        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
4545    } else {
4546        if (s->env->macsr & MACSR_SU)
4547            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
4548        else
4549            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
4550        switch ((ext >> 9) & 3) {
4551        case 1:
4552            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
4553            break;
4554        case 3:
4555            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
4556            break;
4557        }
4558    }
4559
4560    if (dual) {
4561        /* Save the overflow flag from the multiply.  */
4562        saved_flags = tcg_temp_new();
4563        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
4564    } else {
4565        saved_flags = NULL_QREG;
4566    }
4567
4568#if 0
4569    /* Disabled because conditional branches clobber temporary vars.  */
4570    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
4571        /* Skip the accumulate if the value is already saturated.  */
4572        l1 = gen_new_label();
4573        tmp = tcg_temp_new();
4574        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
4575        gen_op_jmp_nz32(tmp, l1);
4576    }
4577#endif
4578
4579    if (insn & 0x100)
4580        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
4581    else
4582        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
4583
4584    if (s->env->macsr & MACSR_FI)
4585        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
4586    else if (s->env->macsr & MACSR_SU)
4587        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
4588    else
4589        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
4590
4591#if 0
4592    /* Disabled because conditional branches clobber temporary vars.  */
4593    if (l1 != -1)
4594        gen_set_label(l1);
4595#endif
4596
4597    if (dual) {
4598        /* Dual accumulate variant.  */
4599        acc = (ext >> 2) & 3;
4600        /* Restore the overflow flag from the multiplier.  */
4601        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
4602#if 0
4603        /* Disabled because conditional branches clobber temporary vars.  */
4604        if ((s->env->macsr & MACSR_OMC) != 0) {
4605            /* Skip the accumulate if the value is already saturated.  */
4606            l1 = gen_new_label();
4607            tmp = tcg_temp_new();
4608            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
4609            gen_op_jmp_nz32(tmp, l1);
4610        }
4611#endif
4612        if (ext & 2)
4613            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
4614        else
4615            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
4616        if (s->env->macsr & MACSR_FI)
4617            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
4618        else if (s->env->macsr & MACSR_SU)
4619            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
4620        else
4621            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
4622#if 0
4623        /* Disabled because conditional branches clobber temporary vars.  */
4624        if (l1 != -1)
4625            gen_set_label(l1);
4626#endif
4627    }
4628    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
4629
4630    if (insn & 0x30) {
4631        TCGv rw;
4632        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
4633        tcg_gen_mov_i32(rw, loadval);
4634        /* FIXME: Should address writeback happen with the masked or
4635           unmasked value?  */
4636        switch ((insn >> 3) & 7) {
4637        case 3: /* Post-increment.  */
4638            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
4639            break;
4640        case 4: /* Pre-decrement.  */
4641            tcg_gen_mov_i32(AREG(insn, 0), addr);
4642        }
4643    }
4644}
4645
4646DISAS_INSN(from_mac)
4647{
4648    TCGv rx;
4649    TCGv_i64 acc;
4650    int accnum;
4651
4652    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4653    accnum = (insn >> 9) & 3;
4654    acc = MACREG(accnum);
4655    if (s->env->macsr & MACSR_FI) {
4656        gen_helper_get_macf(rx, cpu_env, acc);
4657    } else if ((s->env->macsr & MACSR_OMC) == 0) {
4658        tcg_gen_extrl_i64_i32(rx, acc);
4659    } else if (s->env->macsr & MACSR_SU) {
4660        gen_helper_get_macs(rx, acc);
4661    } else {
4662        gen_helper_get_macu(rx, acc);
4663    }
4664    if (insn & 0x40) {
4665        tcg_gen_movi_i64(acc, 0);
4666        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
4667    }
4668}
4669
4670DISAS_INSN(move_mac)
4671{
4672    /* FIXME: This can be done without a helper.  */
4673    int src;
4674    TCGv dest;
4675    src = insn & 3;
4676    dest = tcg_const_i32((insn >> 9) & 3);
4677    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
4678    gen_mac_clear_flags();
4679    gen_helper_mac_set_flags(cpu_env, dest);
4680}
4681
4682DISAS_INSN(from_macsr)
4683{
4684    TCGv reg;
4685
4686    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4687    tcg_gen_mov_i32(reg, QREG_MACSR);
4688}
4689
4690DISAS_INSN(from_mask)
4691{
4692    TCGv reg;
4693    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4694    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
4695}
4696
4697DISAS_INSN(from_mext)
4698{
4699    TCGv reg;
4700    TCGv acc;
4701    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
4702    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
4703    if (s->env->macsr & MACSR_FI)
4704        gen_helper_get_mac_extf(reg, cpu_env, acc);
4705    else
4706        gen_helper_get_mac_exti(reg, cpu_env, acc);
4707}
4708
4709DISAS_INSN(macsr_to_ccr)
4710{
4711    TCGv tmp = tcg_temp_new();
4712    tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
4713    gen_helper_set_sr(cpu_env, tmp);
4714    tcg_temp_free(tmp);
4715    set_cc_op(s, CC_OP_FLAGS);
4716}
4717
4718DISAS_INSN(to_mac)
4719{
4720    TCGv_i64 acc;
4721    TCGv val;
4722    int accnum;
4723    accnum = (insn >> 9) & 3;
4724    acc = MACREG(accnum);
4725    SRC_EA(env, val, OS_LONG, 0, NULL);
4726    if (s->env->macsr & MACSR_FI) {
4727        tcg_gen_ext_i32_i64(acc, val);
4728        tcg_gen_shli_i64(acc, acc, 8);
4729    } else if (s->env->macsr & MACSR_SU) {
4730        tcg_gen_ext_i32_i64(acc, val);
4731    } else {
4732        tcg_gen_extu_i32_i64(acc, val);
4733    }
4734    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
4735    gen_mac_clear_flags();
4736    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
4737}
4738
4739DISAS_INSN(to_macsr)
4740{
4741    TCGv val;
4742    SRC_EA(env, val, OS_LONG, 0, NULL);
4743    gen_helper_set_macsr(cpu_env, val);
4744    gen_lookup_tb(s);
4745}
4746
4747DISAS_INSN(to_mask)
4748{
4749    TCGv val;
4750    SRC_EA(env, val, OS_LONG, 0, NULL);
4751    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
4752}
4753
4754DISAS_INSN(to_mext)
4755{
4756    TCGv val;
4757    TCGv acc;
4758    SRC_EA(env, val, OS_LONG, 0, NULL);
4759    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
4760    if (s->env->macsr & MACSR_FI)
4761        gen_helper_set_mac_extf(cpu_env, val, acc);
4762    else if (s->env->macsr & MACSR_SU)
4763        gen_helper_set_mac_exts(cpu_env, val, acc);
4764    else
4765        gen_helper_set_mac_extu(cpu_env, val, acc);
4766}
4767
4768static disas_proc opcode_table[65536];
4769
4770static void
4771register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
4772{
4773  int i;
4774  int from;
4775  int to;
4776
4777  /* Sanity check.  All set bits must be included in the mask.  */
4778  if (opcode & ~mask) {
4779      fprintf(stderr,
4780              "qemu internal error: bogus opcode definition %04x/%04x\n",
4781              opcode, mask);
4782      abort();
4783  }
4784  /* This could probably be cleverer.  For now just optimize the case where
4785     the top bits are known.  */
4786  /* Find the first zero bit in the mask.  */
4787  i = 0x8000;
4788  while ((i & mask) != 0)
4789      i >>= 1;
4790  /* Iterate over all combinations of this and lower bits.  */
4791  if (i == 0)
4792      i = 1;
4793  else
4794      i <<= 1;
4795  from = opcode & ~(i - 1);
4796  to = from + i;
4797  for (i = from; i < to; i++) {
4798      if ((i & mask) == opcode)
4799          opcode_table[i] = proc;
4800  }
4801}
4802
4803/* Register m68k opcode handlers.  Order is important.
4804   Later insn override earlier ones.  */
4805void register_m68k_insns (CPUM68KState *env)
4806{
4807    /* Build the opcode table only once to avoid
4808       multithreading issues. */
4809    if (opcode_table[0] != NULL) {
4810        return;
4811    }
4812
4813    /* use BASE() for instruction available
4814     * for CF_ISA_A and M68000.
4815     */
4816#define BASE(name, opcode, mask) \
4817    register_opcode(disas_##name, 0x##opcode, 0x##mask)
4818#define INSN(name, opcode, mask, feature) do { \
4819    if (m68k_feature(env, M68K_FEATURE_##feature)) \
4820        BASE(name, opcode, mask); \
4821    } while(0)
4822    BASE(undef,     0000, 0000);
4823    INSN(arith_im,  0080, fff8, CF_ISA_A);
4824    INSN(arith_im,  0000, ff00, M68000);
4825    INSN(undef,     00c0, ffc0, M68000);
4826    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
4827    BASE(bitop_reg, 0100, f1c0);
4828    BASE(bitop_reg, 0140, f1c0);
4829    BASE(bitop_reg, 0180, f1c0);
4830    BASE(bitop_reg, 01c0, f1c0);
4831    INSN(arith_im,  0280, fff8, CF_ISA_A);
4832    INSN(arith_im,  0200, ff00, M68000);
4833    INSN(undef,     02c0, ffc0, M68000);
4834    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
4835    INSN(arith_im,  0480, fff8, CF_ISA_A);
4836    INSN(arith_im,  0400, ff00, M68000);
4837    INSN(undef,     04c0, ffc0, M68000);
4838    INSN(arith_im,  0600, ff00, M68000);
4839    INSN(undef,     06c0, ffc0, M68000);
4840    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
4841    INSN(arith_im,  0680, fff8, CF_ISA_A);
4842    INSN(arith_im,  0c00, ff38, CF_ISA_A);
4843    INSN(arith_im,  0c00, ff00, M68000);
4844    BASE(bitop_im,  0800, ffc0);
4845    BASE(bitop_im,  0840, ffc0);
4846    BASE(bitop_im,  0880, ffc0);
4847    BASE(bitop_im,  08c0, ffc0);
4848    INSN(arith_im,  0a80, fff8, CF_ISA_A);
4849    INSN(arith_im,  0a00, ff00, M68000);
4850    INSN(cas,       0ac0, ffc0, CAS);
4851    INSN(cas,       0cc0, ffc0, CAS);
4852    INSN(cas,       0ec0, ffc0, CAS);
4853    INSN(cas2w,     0cfc, ffff, CAS);
4854    INSN(cas2l,     0efc, ffff, CAS);
4855    BASE(move,      1000, f000);
4856    BASE(move,      2000, f000);
4857    BASE(move,      3000, f000);
4858    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
4859    INSN(negx,      4080, fff8, CF_ISA_A);
4860    INSN(negx,      4000, ff00, M68000);
4861    INSN(undef,     40c0, ffc0, M68000);
4862    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
4863    INSN(move_from_sr, 40c0, ffc0, M68000);
4864    BASE(lea,       41c0, f1c0);
4865    BASE(clr,       4200, ff00);
4866    BASE(undef,     42c0, ffc0);
4867    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
4868    INSN(move_from_ccr, 42c0, ffc0, M68000);
4869    INSN(neg,       4480, fff8, CF_ISA_A);
4870    INSN(neg,       4400, ff00, M68000);
4871    INSN(undef,     44c0, ffc0, M68000);
4872    BASE(move_to_ccr, 44c0, ffc0);
4873    INSN(not,       4680, fff8, CF_ISA_A);
4874    INSN(not,       4600, ff00, M68000);
4875    INSN(undef,     46c0, ffc0, M68000);
4876    INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
4877    INSN(nbcd,      4800, ffc0, M68000);
4878    INSN(linkl,     4808, fff8, M68000);
4879    BASE(pea,       4840, ffc0);
4880    BASE(swap,      4840, fff8);
4881    INSN(bkpt,      4848, fff8, BKPT);
4882    INSN(movem,     48d0, fbf8, CF_ISA_A);
4883    INSN(movem,     48e8, fbf8, CF_ISA_A);
4884    INSN(movem,     4880, fb80, M68000);
4885    BASE(ext,       4880, fff8);
4886    BASE(ext,       48c0, fff8);
4887    BASE(ext,       49c0, fff8);
4888    BASE(tst,       4a00, ff00);
4889    INSN(tas,       4ac0, ffc0, CF_ISA_B);
4890    INSN(tas,       4ac0, ffc0, M68000);
4891    INSN(halt,      4ac8, ffff, CF_ISA_A);
4892    INSN(pulse,     4acc, ffff, CF_ISA_A);
4893    BASE(illegal,   4afc, ffff);
4894    INSN(mull,      4c00, ffc0, CF_ISA_A);
4895    INSN(mull,      4c00, ffc0, LONG_MULDIV);
4896    INSN(divl,      4c40, ffc0, CF_ISA_A);
4897    INSN(divl,      4c40, ffc0, LONG_MULDIV);
4898    INSN(sats,      4c80, fff8, CF_ISA_B);
4899    BASE(trap,      4e40, fff0);
4900    BASE(link,      4e50, fff8);
4901    BASE(unlk,      4e58, fff8);
4902    INSN(move_to_usp, 4e60, fff8, USP);
4903    INSN(move_from_usp, 4e68, fff8, USP);
4904    BASE(nop,       4e71, ffff);
4905    BASE(stop,      4e72, ffff);
4906    BASE(rte,       4e73, ffff);
4907    BASE(rts,       4e75, ffff);
4908    INSN(movec,     4e7b, ffff, CF_ISA_A);
4909    BASE(jump,      4e80, ffc0);
4910    BASE(jump,      4ec0, ffc0);
4911    INSN(addsubq,   5000, f080, M68000);
4912    BASE(addsubq,   5080, f0c0);
4913    INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
4914    INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
4915    INSN(dbcc,      50c8, f0f8, M68000);
4916    INSN(tpf,       51f8, fff8, CF_ISA_A);
4917
4918    /* Branch instructions.  */
4919    BASE(branch,    6000, f000);
4920    /* Disable long branch instructions, then add back the ones we want.  */
4921    BASE(undef,     60ff, f0ff); /* All long branches.  */
4922    INSN(branch,    60ff, f0ff, CF_ISA_B);
4923    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
4924    INSN(branch,    60ff, ffff, BRAL);
4925    INSN(branch,    60ff, f0ff, BCCL);
4926
4927    BASE(moveq,     7000, f100);
4928    INSN(mvzs,      7100, f100, CF_ISA_B);
4929    BASE(or,        8000, f000);
4930    BASE(divw,      80c0, f0c0);
4931    INSN(sbcd_reg,  8100, f1f8, M68000);
4932    INSN(sbcd_mem,  8108, f1f8, M68000);
4933    BASE(addsub,    9000, f000);
4934    INSN(undef,     90c0, f0c0, CF_ISA_A);
4935    INSN(subx_reg,  9180, f1f8, CF_ISA_A);
4936    INSN(subx_reg,  9100, f138, M68000);
4937    INSN(subx_mem,  9108, f138, M68000);
4938    INSN(suba,      91c0, f1c0, CF_ISA_A);
4939    INSN(suba,      90c0, f0c0, M68000);
4940
4941    BASE(undef_mac, a000, f000);
4942    INSN(mac,       a000, f100, CF_EMAC);
4943    INSN(from_mac,  a180, f9b0, CF_EMAC);
4944    INSN(move_mac,  a110, f9fc, CF_EMAC);
4945    INSN(from_macsr,a980, f9f0, CF_EMAC);
4946    INSN(from_mask, ad80, fff0, CF_EMAC);
4947    INSN(from_mext, ab80, fbf0, CF_EMAC);
4948    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
4949    INSN(to_mac,    a100, f9c0, CF_EMAC);
4950    INSN(to_macsr,  a900, ffc0, CF_EMAC);
4951    INSN(to_mext,   ab00, fbc0, CF_EMAC);
4952    INSN(to_mask,   ad00, ffc0, CF_EMAC);
4953
4954    INSN(mov3q,     a140, f1c0, CF_ISA_B);
4955    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
4956    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
4957    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
4958    INSN(cmp,       b080, f1c0, CF_ISA_A);
4959    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
4960    INSN(cmp,       b000, f100, M68000);
4961    INSN(eor,       b100, f100, M68000);
4962    INSN(cmpm,      b108, f138, M68000);
4963    INSN(cmpa,      b0c0, f0c0, M68000);
4964    INSN(eor,       b180, f1c0, CF_ISA_A);
4965    BASE(and,       c000, f000);
4966    INSN(exg_dd,    c140, f1f8, M68000);
4967    INSN(exg_aa,    c148, f1f8, M68000);
4968    INSN(exg_da,    c188, f1f8, M68000);
4969    BASE(mulw,      c0c0, f0c0);
4970    INSN(abcd_reg,  c100, f1f8, M68000);
4971    INSN(abcd_mem,  c108, f1f8, M68000);
4972    BASE(addsub,    d000, f000);
4973    INSN(undef,     d0c0, f0c0, CF_ISA_A);
4974    INSN(addx_reg,      d180, f1f8, CF_ISA_A);
4975    INSN(addx_reg,  d100, f138, M68000);
4976    INSN(addx_mem,  d108, f138, M68000);
4977    INSN(adda,      d1c0, f1c0, CF_ISA_A);
4978    INSN(adda,      d0c0, f0c0, M68000);
4979    INSN(shift_im,  e080, f0f0, CF_ISA_A);
4980    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
4981    INSN(shift8_im, e000, f0f0, M68000);
4982    INSN(shift16_im, e040, f0f0, M68000);
4983    INSN(shift_im,  e080, f0f0, M68000);
4984    INSN(shift8_reg, e020, f0f0, M68000);
4985    INSN(shift16_reg, e060, f0f0, M68000);
4986    INSN(shift_reg, e0a0, f0f0, M68000);
4987    INSN(shift_mem, e0c0, fcc0, M68000);
4988    INSN(rotate_im, e090, f0f0, M68000);
4989    INSN(rotate8_im, e010, f0f0, M68000);
4990    INSN(rotate16_im, e050, f0f0, M68000);
4991    INSN(rotate_reg, e0b0, f0f0, M68000);
4992    INSN(rotate8_reg, e030, f0f0, M68000);
4993    INSN(rotate16_reg, e070, f0f0, M68000);
4994    INSN(rotate_mem, e4c0, fcc0, M68000);
4995    INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
4996    INSN(bfext_reg, e9c0, fdf8, BITFIELD);
4997    INSN(bfins_mem, efc0, ffc0, BITFIELD);
4998    INSN(bfins_reg, efc0, fff8, BITFIELD);
4999    INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5000    INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5001    INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5002    INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5003    INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5004    INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5005    INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5006    INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5007    INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5008    INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5009    INSN(undef_fpu, f000, f000, CF_ISA_A);
5010    INSN(fpu,       f200, ffc0, CF_FPU);
5011    INSN(fbcc,      f280, ffc0, CF_FPU);
5012    INSN(frestore,  f340, ffc0, CF_FPU);
5013    INSN(fsave,     f340, ffc0, CF_FPU);
5014    INSN(intouch,   f340, ffc0, CF_ISA_A);
5015    INSN(cpushl,    f428, ff38, CF_ISA_A);
5016    INSN(wddata,    fb00, ff00, CF_ISA_A);
5017    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
5018#undef INSN
5019}
5020
5021/* ??? Some of this implementation is not exception safe.  We should always
5022   write back the result to memory before setting the condition codes.  */
5023static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
5024{
5025    uint16_t insn = read_im16(env, s);
5026    opcode_table[insn](env, s, insn);
5027    do_writebacks(s);
5028}
5029
5030/* generate intermediate code for basic block 'tb'.  */
5031void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
5032{
5033    M68kCPU *cpu = m68k_env_get_cpu(env);
5034    CPUState *cs = CPU(cpu);
5035    DisasContext dc1, *dc = &dc1;
5036    target_ulong pc_start;
5037    int pc_offset;
5038    int num_insns;
5039    int max_insns;
5040
5041    /* generate intermediate code */
5042    pc_start = tb->pc;
5043
5044    dc->tb = tb;
5045
5046    dc->env = env;
5047    dc->is_jmp = DISAS_NEXT;
5048    dc->pc = pc_start;
5049    dc->cc_op = CC_OP_DYNAMIC;
5050    dc->cc_op_synced = 1;
5051    dc->singlestep_enabled = cs->singlestep_enabled;
5052    dc->fpcr = env->fpcr;
5053    dc->user = (env->sr & SR_S) == 0;
5054    dc->done_mac = 0;
5055    dc->writeback_mask = 0;
5056    num_insns = 0;
5057    max_insns = tb->cflags & CF_COUNT_MASK;
5058    if (max_insns == 0) {
5059        max_insns = CF_COUNT_MASK;
5060    }
5061    if (max_insns > TCG_MAX_INSNS) {
5062        max_insns = TCG_MAX_INSNS;
5063    }
5064
5065    gen_tb_start(tb);
5066    do {
5067        pc_offset = dc->pc - pc_start;
5068        gen_throws_exception = NULL;
5069        tcg_gen_insn_start(dc->pc, dc->cc_op);
5070        num_insns++;
5071
5072        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5073            gen_exception(dc, dc->pc, EXCP_DEBUG);
5074            dc->is_jmp = DISAS_JUMP;
5075            /* The address covered by the breakpoint must be included in
5076               [tb->pc, tb->pc + tb->size) in order to for it to be
5077               properly cleared -- thus we increment the PC here so that
5078               the logic setting tb->size below does the right thing.  */
5079            dc->pc += 2;
5080            break;
5081        }
5082
5083        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5084            gen_io_start();
5085        }
5086
5087        dc->insn_pc = dc->pc;
5088        disas_m68k_insn(env, dc);
5089    } while (!dc->is_jmp && !tcg_op_buf_full() &&
5090             !cs->singlestep_enabled &&
5091             !singlestep &&
5092             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
5093             num_insns < max_insns);
5094
5095    if (tb->cflags & CF_LAST_IO)
5096        gen_io_end();
5097    if (unlikely(cs->singlestep_enabled)) {
5098        /* Make sure the pc is updated, and raise a debug exception.  */
5099        if (!dc->is_jmp) {
5100            update_cc_op(dc);
5101            tcg_gen_movi_i32(QREG_PC, dc->pc);
5102        }
5103        gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
5104    } else {
5105        switch(dc->is_jmp) {
5106        case DISAS_NEXT:
5107            update_cc_op(dc);
5108            gen_jmp_tb(dc, 0, dc->pc);
5109            break;
5110        default:
5111        case DISAS_JUMP:
5112        case DISAS_UPDATE:
5113            update_cc_op(dc);
5114            /* indicate that the hash table must be used to find the next TB */
5115            tcg_gen_exit_tb(0);
5116            break;
5117        case DISAS_TB_JUMP:
5118            /* nothing more to generate */
5119            break;
5120        }
5121    }
5122    gen_tb_end(tb, num_insns);
5123
5124#ifdef DEBUG_DISAS
5125    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5126        && qemu_log_in_addr_range(pc_start)) {
5127        qemu_log_lock();
5128        qemu_log("----------------\n");
5129        qemu_log("IN: %s\n", lookup_symbol(pc_start));
5130        log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
5131        qemu_log("\n");
5132        qemu_log_unlock();
5133    }
5134#endif
5135    tb->size = dc->pc - pc_start;
5136    tb->icount = num_insns;
5137}
5138
5139void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
5140                         int flags)
5141{
5142    M68kCPU *cpu = M68K_CPU(cs);
5143    CPUM68KState *env = &cpu->env;
5144    int i;
5145    uint16_t sr;
5146    CPU_DoubleU u;
5147    for (i = 0; i < 8; i++)
5148      {
5149        u.d = env->fregs[i];
5150        cpu_fprintf(f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
5151                    i, env->dregs[i], i, env->aregs[i],
5152                    i, u.l.upper, u.l.lower, *(double *)&u.d);
5153      }
5154    cpu_fprintf (f, "PC = %08x   ", env->pc);
5155    sr = env->sr | cpu_m68k_get_ccr(env);
5156    cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
5157                (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
5158                (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
5159    cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
5160}
5161
5162void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
5163                          target_ulong *data)
5164{
5165    int cc_op = data[1];
5166    env->pc = data[0];
5167    if (cc_op != CC_OP_DYNAMIC) {
5168        env->cc_op = cc_op;
5169    }
5170}
5171