qemu/target-m68k/translate.c
<<
>>
Prefs
   1/*
   2 *  m68k translation
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery
   5 *  Written by Paul Brook
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "exec/exec-all.h"
  25#include "tcg-op.h"
  26#include "qemu/log.h"
  27#include "exec/cpu_ldst.h"
  28
  29#include "exec/helper-proto.h"
  30#include "exec/helper-gen.h"
  31
  32#include "trace-tcg.h"
  33#include "exec/log.h"
  34
  35
  36//#define DEBUG_DISPATCH 1
  37
  38/* Fake floating point.  */
  39#define tcg_gen_mov_f64 tcg_gen_mov_i64
  40#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
  41#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
  42
  43#define DEFO32(name, offset) static TCGv QREG_##name;
  44#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
  45#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
  46#include "qregs.def"
  47#undef DEFO32
  48#undef DEFO64
  49#undef DEFF64
  50
  51static TCGv_i32 cpu_halted;
  52static TCGv_i32 cpu_exception_index;
  53
  54static TCGv_env cpu_env;
  55
  56static char cpu_reg_names[3*8*3 + 5*4];
  57static TCGv cpu_dregs[8];
  58static TCGv cpu_aregs[8];
  59static TCGv_i64 cpu_fregs[8];
  60static TCGv_i64 cpu_macc[4];
  61
  62#define REG(insn, pos) (((insn) >> (pos)) & 7)
  63#define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
  64#define AREG(insn, pos) cpu_aregs[REG(insn, pos)]
  65#define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
  66#define MACREG(acc) cpu_macc[acc]
  67#define QREG_SP cpu_aregs[7]
  68
  69static TCGv NULL_QREG;
  70#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
  71/* Used to distinguish stores from bad addressing modes.  */
  72static TCGv store_dummy;
  73
  74#include "exec/gen-icount.h"
  75
  76void m68k_tcg_init(void)
  77{
  78    char *p;
  79    int i;
  80
  81    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  82    tcg_ctx.tcg_env = cpu_env;
  83
  84#define DEFO32(name, offset) \
  85    QREG_##name = tcg_global_mem_new_i32(cpu_env, \
  86        offsetof(CPUM68KState, offset), #name);
  87#define DEFO64(name, offset) \
  88    QREG_##name = tcg_global_mem_new_i64(cpu_env, \
  89        offsetof(CPUM68KState, offset), #name);
  90#define DEFF64(name, offset) DEFO64(name, offset)
  91#include "qregs.def"
  92#undef DEFO32
  93#undef DEFO64
  94#undef DEFF64
  95
  96    cpu_halted = tcg_global_mem_new_i32(cpu_env,
  97                                        -offsetof(M68kCPU, env) +
  98                                        offsetof(CPUState, halted), "HALTED");
  99    cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
 100                                                 -offsetof(M68kCPU, env) +
 101                                                 offsetof(CPUState, exception_index),
 102                                                 "EXCEPTION");
 103
 104    p = cpu_reg_names;
 105    for (i = 0; i < 8; i++) {
 106        sprintf(p, "D%d", i);
 107        cpu_dregs[i] = tcg_global_mem_new(cpu_env,
 108                                          offsetof(CPUM68KState, dregs[i]), p);
 109        p += 3;
 110        sprintf(p, "A%d", i);
 111        cpu_aregs[i] = tcg_global_mem_new(cpu_env,
 112                                          offsetof(CPUM68KState, aregs[i]), p);
 113        p += 3;
 114        sprintf(p, "F%d", i);
 115        cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
 116                                          offsetof(CPUM68KState, fregs[i]), p);
 117        p += 3;
 118    }
 119    for (i = 0; i < 4; i++) {
 120        sprintf(p, "ACC%d", i);
 121        cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
 122                                         offsetof(CPUM68KState, macc[i]), p);
 123        p += 5;
 124    }
 125
 126    NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
 127    store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
 128}
 129
 130/* internal defines */
 131typedef struct DisasContext {
 132    CPUM68KState *env;
 133    target_ulong insn_pc; /* Start of the current instruction.  */
 134    target_ulong pc;
 135    int is_jmp;
 136    CCOp cc_op; /* Current CC operation */
 137    int cc_op_synced;
 138    int user;
 139    uint32_t fpcr;
 140    struct TranslationBlock *tb;
 141    int singlestep_enabled;
 142    TCGv_i64 mactmp;
 143    int done_mac;
 144} DisasContext;
 145
 146#define DISAS_JUMP_NEXT 4
 147
 148#if defined(CONFIG_USER_ONLY)
 149#define IS_USER(s) 1
 150#else
 151#define IS_USER(s) s->user
 152#endif
 153
 154/* XXX: move that elsewhere */
 155/* ??? Fix exceptions.  */
 156static void *gen_throws_exception;
 157#define gen_last_qop NULL
 158
 159typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
 160
 161#ifdef DEBUG_DISPATCH
 162#define DISAS_INSN(name)                                                \
 163    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 164                                  uint16_t insn);                       \
 165    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 166                             uint16_t insn)                             \
 167    {                                                                   \
 168        qemu_log("Dispatch " #name "\n");                               \
 169        real_disas_##name(env, s, insn);                                \
 170    }                                                                   \
 171    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 172                                  uint16_t insn)
 173#else
 174#define DISAS_INSN(name)                                                \
 175    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 176                             uint16_t insn)
 177#endif
 178
 179static const uint8_t cc_op_live[CC_OP_NB] = {
 180    [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
 181    [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
 182    [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
 183    [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
 184    [CC_OP_LOGIC] = CCF_X | CCF_N
 185};
 186
 187static void set_cc_op(DisasContext *s, CCOp op)
 188{
 189    CCOp old_op = s->cc_op;
 190    int dead;
 191
 192    if (old_op == op) {
 193        return;
 194    }
 195    s->cc_op = op;
 196    s->cc_op_synced = 0;
 197
 198    /* Discard CC computation that will no longer be used.
 199       Note that X and N are never dead.  */
 200    dead = cc_op_live[old_op] & ~cc_op_live[op];
 201    if (dead & CCF_C) {
 202        tcg_gen_discard_i32(QREG_CC_C);
 203    }
 204    if (dead & CCF_Z) {
 205        tcg_gen_discard_i32(QREG_CC_Z);
 206    }
 207    if (dead & CCF_V) {
 208        tcg_gen_discard_i32(QREG_CC_V);
 209    }
 210}
 211
 212/* Update the CPU env CC_OP state.  */
 213static void update_cc_op(DisasContext *s)
 214{
 215    if (!s->cc_op_synced) {
 216        s->cc_op_synced = 1;
 217        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
 218    }
 219}
 220
 221/* Generate a load from the specified address.  Narrow values are
 222   sign extended to full register width.  */
 223static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
 224{
 225    TCGv tmp;
 226    int index = IS_USER(s);
 227    tmp = tcg_temp_new_i32();
 228    switch(opsize) {
 229    case OS_BYTE:
 230        if (sign)
 231            tcg_gen_qemu_ld8s(tmp, addr, index);
 232        else
 233            tcg_gen_qemu_ld8u(tmp, addr, index);
 234        break;
 235    case OS_WORD:
 236        if (sign)
 237            tcg_gen_qemu_ld16s(tmp, addr, index);
 238        else
 239            tcg_gen_qemu_ld16u(tmp, addr, index);
 240        break;
 241    case OS_LONG:
 242    case OS_SINGLE:
 243        tcg_gen_qemu_ld32u(tmp, addr, index);
 244        break;
 245    default:
 246        g_assert_not_reached();
 247    }
 248    gen_throws_exception = gen_last_qop;
 249    return tmp;
 250}
 251
 252static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
 253{
 254    TCGv_i64 tmp;
 255    int index = IS_USER(s);
 256    tmp = tcg_temp_new_i64();
 257    tcg_gen_qemu_ldf64(tmp, addr, index);
 258    gen_throws_exception = gen_last_qop;
 259    return tmp;
 260}
 261
 262/* Generate a store.  */
 263static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
 264{
 265    int index = IS_USER(s);
 266    switch(opsize) {
 267    case OS_BYTE:
 268        tcg_gen_qemu_st8(val, addr, index);
 269        break;
 270    case OS_WORD:
 271        tcg_gen_qemu_st16(val, addr, index);
 272        break;
 273    case OS_LONG:
 274    case OS_SINGLE:
 275        tcg_gen_qemu_st32(val, addr, index);
 276        break;
 277    default:
 278        g_assert_not_reached();
 279    }
 280    gen_throws_exception = gen_last_qop;
 281}
 282
 283static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
 284{
 285    int index = IS_USER(s);
 286    tcg_gen_qemu_stf64(val, addr, index);
 287    gen_throws_exception = gen_last_qop;
 288}
 289
 290typedef enum {
 291    EA_STORE,
 292    EA_LOADU,
 293    EA_LOADS
 294} ea_what;
 295
 296/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
 297   otherwise generate a store.  */
 298static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
 299                     ea_what what)
 300{
 301    if (what == EA_STORE) {
 302        gen_store(s, opsize, addr, val);
 303        return store_dummy;
 304    } else {
 305        return gen_load(s, opsize, addr, what == EA_LOADS);
 306    }
 307}
 308
 309/* Read a 16-bit immediate constant */
 310static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
 311{
 312    uint16_t im;
 313    im = cpu_lduw_code(env, s->pc);
 314    s->pc += 2;
 315    return im;
 316}
 317
 318/* Read an 8-bit immediate constant */
 319static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
 320{
 321    return read_im16(env, s);
 322}
 323
 324/* Read a 32-bit immediate constant.  */
 325static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
 326{
 327    uint32_t im;
 328    im = read_im16(env, s) << 16;
 329    im |= 0xffff & read_im16(env, s);
 330    return im;
 331}
 332
 333/* Calculate and address index.  */
 334static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
 335{
 336    TCGv add;
 337    int scale;
 338
 339    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
 340    if ((ext & 0x800) == 0) {
 341        tcg_gen_ext16s_i32(tmp, add);
 342        add = tmp;
 343    }
 344    scale = (ext >> 9) & 3;
 345    if (scale != 0) {
 346        tcg_gen_shli_i32(tmp, add, scale);
 347        add = tmp;
 348    }
 349    return add;
 350}
 351
 352/* Handle a base + index + displacement effective addresss.
 353   A NULL_QREG base means pc-relative.  */
 354static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
 355{
 356    uint32_t offset;
 357    uint16_t ext;
 358    TCGv add;
 359    TCGv tmp;
 360    uint32_t bd, od;
 361
 362    offset = s->pc;
 363    ext = read_im16(env, s);
 364
 365    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
 366        return NULL_QREG;
 367
 368    if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
 369        !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
 370        ext &= ~(3 << 9);
 371    }
 372
 373    if (ext & 0x100) {
 374        /* full extension word format */
 375        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
 376            return NULL_QREG;
 377
 378        if ((ext & 0x30) > 0x10) {
 379            /* base displacement */
 380            if ((ext & 0x30) == 0x20) {
 381                bd = (int16_t)read_im16(env, s);
 382            } else {
 383                bd = read_im32(env, s);
 384            }
 385        } else {
 386            bd = 0;
 387        }
 388        tmp = tcg_temp_new();
 389        if ((ext & 0x44) == 0) {
 390            /* pre-index */
 391            add = gen_addr_index(ext, tmp);
 392        } else {
 393            add = NULL_QREG;
 394        }
 395        if ((ext & 0x80) == 0) {
 396            /* base not suppressed */
 397            if (IS_NULL_QREG(base)) {
 398                base = tcg_const_i32(offset + bd);
 399                bd = 0;
 400            }
 401            if (!IS_NULL_QREG(add)) {
 402                tcg_gen_add_i32(tmp, add, base);
 403                add = tmp;
 404            } else {
 405                add = base;
 406            }
 407        }
 408        if (!IS_NULL_QREG(add)) {
 409            if (bd != 0) {
 410                tcg_gen_addi_i32(tmp, add, bd);
 411                add = tmp;
 412            }
 413        } else {
 414            add = tcg_const_i32(bd);
 415        }
 416        if ((ext & 3) != 0) {
 417            /* memory indirect */
 418            base = gen_load(s, OS_LONG, add, 0);
 419            if ((ext & 0x44) == 4) {
 420                add = gen_addr_index(ext, tmp);
 421                tcg_gen_add_i32(tmp, add, base);
 422                add = tmp;
 423            } else {
 424                add = base;
 425            }
 426            if ((ext & 3) > 1) {
 427                /* outer displacement */
 428                if ((ext & 3) == 2) {
 429                    od = (int16_t)read_im16(env, s);
 430                } else {
 431                    od = read_im32(env, s);
 432                }
 433            } else {
 434                od = 0;
 435            }
 436            if (od != 0) {
 437                tcg_gen_addi_i32(tmp, add, od);
 438                add = tmp;
 439            }
 440        }
 441    } else {
 442        /* brief extension word format */
 443        tmp = tcg_temp_new();
 444        add = gen_addr_index(ext, tmp);
 445        if (!IS_NULL_QREG(base)) {
 446            tcg_gen_add_i32(tmp, add, base);
 447            if ((int8_t)ext)
 448                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
 449        } else {
 450            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
 451        }
 452        add = tmp;
 453    }
 454    return add;
 455}
 456
 457/* Sign or zero extend a value.  */
 458
 459static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
 460{
 461    switch (opsize) {
 462    case OS_BYTE:
 463        if (sign) {
 464            tcg_gen_ext8s_i32(res, val);
 465        } else {
 466            tcg_gen_ext8u_i32(res, val);
 467        }
 468        break;
 469    case OS_WORD:
 470        if (sign) {
 471            tcg_gen_ext16s_i32(res, val);
 472        } else {
 473            tcg_gen_ext16u_i32(res, val);
 474        }
 475        break;
 476    case OS_LONG:
 477        tcg_gen_mov_i32(res, val);
 478        break;
 479    default:
 480        g_assert_not_reached();
 481    }
 482}
 483
 484/* Evaluate all the CC flags.  */
 485
 486static void gen_flush_flags(DisasContext *s)
 487{
 488    TCGv t0, t1;
 489
 490    switch (s->cc_op) {
 491    case CC_OP_FLAGS:
 492        return;
 493
 494    case CC_OP_ADDB:
 495    case CC_OP_ADDW:
 496    case CC_OP_ADDL:
 497        tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
 498        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 499        /* Compute signed overflow for addition.  */
 500        t0 = tcg_temp_new();
 501        t1 = tcg_temp_new();
 502        tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
 503        gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
 504        tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
 505        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
 506        tcg_temp_free(t0);
 507        tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
 508        tcg_temp_free(t1);
 509        break;
 510
 511    case CC_OP_SUBB:
 512    case CC_OP_SUBW:
 513    case CC_OP_SUBL:
 514        tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
 515        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 516        /* Compute signed overflow for subtraction.  */
 517        t0 = tcg_temp_new();
 518        t1 = tcg_temp_new();
 519        tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
 520        gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
 521        tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
 522        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
 523        tcg_temp_free(t0);
 524        tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
 525        tcg_temp_free(t1);
 526        break;
 527
 528    case CC_OP_CMPB:
 529    case CC_OP_CMPW:
 530    case CC_OP_CMPL:
 531        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
 532        tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
 533        gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
 534        /* Compute signed overflow for subtraction.  */
 535        t0 = tcg_temp_new();
 536        tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
 537        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
 538        tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
 539        tcg_temp_free(t0);
 540        tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
 541        break;
 542
 543    case CC_OP_LOGIC:
 544        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 545        tcg_gen_movi_i32(QREG_CC_C, 0);
 546        tcg_gen_movi_i32(QREG_CC_V, 0);
 547        break;
 548
 549    case CC_OP_DYNAMIC:
 550        gen_helper_flush_flags(cpu_env, QREG_CC_OP);
 551        break;
 552
 553    default:
 554        t0 = tcg_const_i32(s->cc_op);
 555        gen_helper_flush_flags(cpu_env, t0);
 556        tcg_temp_free(t0);
 557        break;
 558    }
 559
 560    /* Note that flush_flags also assigned to env->cc_op.  */
 561    s->cc_op = CC_OP_FLAGS;
 562    s->cc_op_synced = 1;
 563}
 564
 565static inline TCGv gen_extend(TCGv val, int opsize, int sign)
 566{
 567    TCGv tmp;
 568
 569    if (opsize == OS_LONG) {
 570        tmp = val;
 571    } else {
 572        tmp = tcg_temp_new();
 573        gen_ext(tmp, val, opsize, sign);
 574    }
 575
 576    return tmp;
 577}
 578
 579static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
 580{
 581    gen_ext(QREG_CC_N, val, opsize, 1);
 582    set_cc_op(s, CC_OP_LOGIC);
 583}
 584
 585static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
 586{
 587    tcg_gen_mov_i32(QREG_CC_N, dest);
 588    tcg_gen_mov_i32(QREG_CC_V, src);
 589    set_cc_op(s, CC_OP_CMPB + opsize);
 590}
 591
 592static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
 593{
 594    gen_ext(QREG_CC_N, dest, opsize, 1);
 595    tcg_gen_mov_i32(QREG_CC_V, src);
 596}
 597
 598static inline int opsize_bytes(int opsize)
 599{
 600    switch (opsize) {
 601    case OS_BYTE: return 1;
 602    case OS_WORD: return 2;
 603    case OS_LONG: return 4;
 604    case OS_SINGLE: return 4;
 605    case OS_DOUBLE: return 8;
 606    case OS_EXTENDED: return 12;
 607    case OS_PACKED: return 12;
 608    default:
 609        g_assert_not_reached();
 610    }
 611}
 612
 613static inline int insn_opsize(int insn)
 614{
 615    switch ((insn >> 6) & 3) {
 616    case 0: return OS_BYTE;
 617    case 1: return OS_WORD;
 618    case 2: return OS_LONG;
 619    default:
 620        g_assert_not_reached();
 621    }
 622}
 623
 624/* Assign value to a register.  If the width is less than the register width
 625   only the low part of the register is set.  */
 626static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
 627{
 628    TCGv tmp;
 629    switch (opsize) {
 630    case OS_BYTE:
 631        tcg_gen_andi_i32(reg, reg, 0xffffff00);
 632        tmp = tcg_temp_new();
 633        tcg_gen_ext8u_i32(tmp, val);
 634        tcg_gen_or_i32(reg, reg, tmp);
 635        break;
 636    case OS_WORD:
 637        tcg_gen_andi_i32(reg, reg, 0xffff0000);
 638        tmp = tcg_temp_new();
 639        tcg_gen_ext16u_i32(tmp, val);
 640        tcg_gen_or_i32(reg, reg, tmp);
 641        break;
 642    case OS_LONG:
 643    case OS_SINGLE:
 644        tcg_gen_mov_i32(reg, val);
 645        break;
 646    default:
 647        g_assert_not_reached();
 648    }
 649}
 650
 651/* Generate code for an "effective address".  Does not adjust the base
 652   register for autoincrement addressing modes.  */
 653static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 654                    int opsize)
 655{
 656    TCGv reg;
 657    TCGv tmp;
 658    uint16_t ext;
 659    uint32_t offset;
 660
 661    switch ((insn >> 3) & 7) {
 662    case 0: /* Data register direct.  */
 663    case 1: /* Address register direct.  */
 664        return NULL_QREG;
 665    case 2: /* Indirect register */
 666    case 3: /* Indirect postincrement.  */
 667        return AREG(insn, 0);
 668    case 4: /* Indirect predecrememnt.  */
 669        reg = AREG(insn, 0);
 670        tmp = tcg_temp_new();
 671        tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
 672        return tmp;
 673    case 5: /* Indirect displacement.  */
 674        reg = AREG(insn, 0);
 675        tmp = tcg_temp_new();
 676        ext = read_im16(env, s);
 677        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
 678        return tmp;
 679    case 6: /* Indirect index + displacement.  */
 680        reg = AREG(insn, 0);
 681        return gen_lea_indexed(env, s, reg);
 682    case 7: /* Other */
 683        switch (insn & 7) {
 684        case 0: /* Absolute short.  */
 685            offset = (int16_t)read_im16(env, s);
 686            return tcg_const_i32(offset);
 687        case 1: /* Absolute long.  */
 688            offset = read_im32(env, s);
 689            return tcg_const_i32(offset);
 690        case 2: /* pc displacement  */
 691            offset = s->pc;
 692            offset += (int16_t)read_im16(env, s);
 693            return tcg_const_i32(offset);
 694        case 3: /* pc index+displacement.  */
 695            return gen_lea_indexed(env, s, NULL_QREG);
 696        case 4: /* Immediate.  */
 697        default:
 698            return NULL_QREG;
 699        }
 700    }
 701    /* Should never happen.  */
 702    return NULL_QREG;
 703}
 704
 705/* Helper function for gen_ea. Reuse the computed address between the
 706   for read/write operands.  */
 707static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
 708                               uint16_t insn, int opsize, TCGv val,
 709                               TCGv *addrp, ea_what what)
 710{
 711    TCGv tmp;
 712
 713    if (addrp && what == EA_STORE) {
 714        tmp = *addrp;
 715    } else {
 716        tmp = gen_lea(env, s, insn, opsize);
 717        if (IS_NULL_QREG(tmp))
 718            return tmp;
 719        if (addrp)
 720            *addrp = tmp;
 721    }
 722    return gen_ldst(s, opsize, tmp, val, what);
 723}
 724
 725/* Generate code to load/store a value from/into an EA.  If VAL > 0 this is
 726   a write otherwise it is a read (0 == sign extend, -1 == zero extend).
 727   ADDRP is non-null for readwrite operands.  */
 728static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 729                   int opsize, TCGv val, TCGv *addrp, ea_what what)
 730{
 731    TCGv reg;
 732    TCGv result;
 733    uint32_t offset;
 734
 735    switch ((insn >> 3) & 7) {
 736    case 0: /* Data register direct.  */
 737        reg = DREG(insn, 0);
 738        if (what == EA_STORE) {
 739            gen_partset_reg(opsize, reg, val);
 740            return store_dummy;
 741        } else {
 742            return gen_extend(reg, opsize, what == EA_LOADS);
 743        }
 744    case 1: /* Address register direct.  */
 745        reg = AREG(insn, 0);
 746        if (what == EA_STORE) {
 747            tcg_gen_mov_i32(reg, val);
 748            return store_dummy;
 749        } else {
 750            return gen_extend(reg, opsize, what == EA_LOADS);
 751        }
 752    case 2: /* Indirect register */
 753        reg = AREG(insn, 0);
 754        return gen_ldst(s, opsize, reg, val, what);
 755    case 3: /* Indirect postincrement.  */
 756        reg = AREG(insn, 0);
 757        result = gen_ldst(s, opsize, reg, val, what);
 758        /* ??? This is not exception safe.  The instruction may still
 759           fault after this point.  */
 760        if (what == EA_STORE || !addrp)
 761            tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
 762        return result;
 763    case 4: /* Indirect predecrememnt.  */
 764        {
 765            TCGv tmp;
 766            if (addrp && what == EA_STORE) {
 767                tmp = *addrp;
 768            } else {
 769                tmp = gen_lea(env, s, insn, opsize);
 770                if (IS_NULL_QREG(tmp))
 771                    return tmp;
 772                if (addrp)
 773                    *addrp = tmp;
 774            }
 775            result = gen_ldst(s, opsize, tmp, val, what);
 776            /* ??? This is not exception safe.  The instruction may still
 777               fault after this point.  */
 778            if (what == EA_STORE || !addrp) {
 779                reg = AREG(insn, 0);
 780                tcg_gen_mov_i32(reg, tmp);
 781            }
 782        }
 783        return result;
 784    case 5: /* Indirect displacement.  */
 785    case 6: /* Indirect index + displacement.  */
 786        return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 787    case 7: /* Other */
 788        switch (insn & 7) {
 789        case 0: /* Absolute short.  */
 790        case 1: /* Absolute long.  */
 791        case 2: /* pc displacement  */
 792        case 3: /* pc index+displacement.  */
 793            return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 794        case 4: /* Immediate.  */
 795            /* Sign extend values for consistency.  */
 796            switch (opsize) {
 797            case OS_BYTE:
 798                if (what == EA_LOADS) {
 799                    offset = (int8_t)read_im8(env, s);
 800                } else {
 801                    offset = read_im8(env, s);
 802                }
 803                break;
 804            case OS_WORD:
 805                if (what == EA_LOADS) {
 806                    offset = (int16_t)read_im16(env, s);
 807                } else {
 808                    offset = read_im16(env, s);
 809                }
 810                break;
 811            case OS_LONG:
 812                offset = read_im32(env, s);
 813                break;
 814            default:
 815                g_assert_not_reached();
 816            }
 817            return tcg_const_i32(offset);
 818        default:
 819            return NULL_QREG;
 820        }
 821    }
 822    /* Should never happen.  */
 823    return NULL_QREG;
 824}
 825
 826typedef struct {
 827    TCGCond tcond;
 828    bool g1;
 829    bool g2;
 830    TCGv v1;
 831    TCGv v2;
 832} DisasCompare;
 833
 834static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
 835{
 836    TCGv tmp, tmp2;
 837    TCGCond tcond;
 838    CCOp op = s->cc_op;
 839
 840    /* The CC_OP_CMP form can handle most normal comparisons directly.  */
 841    if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
 842        c->g1 = c->g2 = 1;
 843        c->v1 = QREG_CC_N;
 844        c->v2 = QREG_CC_V;
 845        switch (cond) {
 846        case 2: /* HI */
 847        case 3: /* LS */
 848            tcond = TCG_COND_LEU;
 849            goto done;
 850        case 4: /* CC */
 851        case 5: /* CS */
 852            tcond = TCG_COND_LTU;
 853            goto done;
 854        case 6: /* NE */
 855        case 7: /* EQ */
 856            tcond = TCG_COND_EQ;
 857            goto done;
 858        case 10: /* PL */
 859        case 11: /* MI */
 860            c->g1 = c->g2 = 0;
 861            c->v2 = tcg_const_i32(0);
 862            c->v1 = tmp = tcg_temp_new();
 863            tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
 864            gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
 865            /* fallthru */
 866        case 12: /* GE */
 867        case 13: /* LT */
 868            tcond = TCG_COND_LT;
 869            goto done;
 870        case 14: /* GT */
 871        case 15: /* LE */
 872            tcond = TCG_COND_LE;
 873            goto done;
 874        }
 875    }
 876
 877    c->g1 = 1;
 878    c->g2 = 0;
 879    c->v2 = tcg_const_i32(0);
 880
 881    switch (cond) {
 882    case 0: /* T */
 883    case 1: /* F */
 884        c->v1 = c->v2;
 885        tcond = TCG_COND_NEVER;
 886        goto done;
 887    case 14: /* GT (!(Z || (N ^ V))) */
 888    case 15: /* LE (Z || (N ^ V)) */
 889        /* Logic operations clear V, which simplifies LE to (Z || N),
 890           and since Z and N are co-located, this becomes a normal
 891           comparison vs N.  */
 892        if (op == CC_OP_LOGIC) {
 893            c->v1 = QREG_CC_N;
 894            tcond = TCG_COND_LE;
 895            goto done;
 896        }
 897        break;
 898    case 12: /* GE (!(N ^ V)) */
 899    case 13: /* LT (N ^ V) */
 900        /* Logic operations clear V, which simplifies this to N.  */
 901        if (op != CC_OP_LOGIC) {
 902            break;
 903        }
 904        /* fallthru */
 905    case 10: /* PL (!N) */
 906    case 11: /* MI (N) */
 907        /* Several cases represent N normally.  */
 908        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
 909            op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
 910            op == CC_OP_LOGIC) {
 911            c->v1 = QREG_CC_N;
 912            tcond = TCG_COND_LT;
 913            goto done;
 914        }
 915        break;
 916    case 6: /* NE (!Z) */
 917    case 7: /* EQ (Z) */
 918        /* Some cases fold Z into N.  */
 919        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
 920            op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
 921            op == CC_OP_LOGIC) {
 922            tcond = TCG_COND_EQ;
 923            c->v1 = QREG_CC_N;
 924            goto done;
 925        }
 926        break;
 927    case 4: /* CC (!C) */
 928    case 5: /* CS (C) */
 929        /* Some cases fold C into X.  */
 930        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
 931            op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL) {
 932            tcond = TCG_COND_NE;
 933            c->v1 = QREG_CC_X;
 934            goto done;
 935        }
 936        /* fallthru */
 937    case 8: /* VC (!V) */
 938    case 9: /* VS (V) */
 939        /* Logic operations clear V and C.  */
 940        if (op == CC_OP_LOGIC) {
 941            tcond = TCG_COND_NEVER;
 942            c->v1 = c->v2;
 943            goto done;
 944        }
 945        break;
 946    }
 947
 948    /* Otherwise, flush flag state to CC_OP_FLAGS.  */
 949    gen_flush_flags(s);
 950
 951    switch (cond) {
 952    case 0: /* T */
 953    case 1: /* F */
 954    default:
 955        /* Invalid, or handled above.  */
 956        abort();
 957    case 2: /* HI (!C && !Z) -> !(C || Z)*/
 958    case 3: /* LS (C || Z) */
 959        c->v1 = tmp = tcg_temp_new();
 960        c->g1 = 0;
 961        tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
 962        tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
 963        tcond = TCG_COND_NE;
 964        break;
 965    case 4: /* CC (!C) */
 966    case 5: /* CS (C) */
 967        c->v1 = QREG_CC_C;
 968        tcond = TCG_COND_NE;
 969        break;
 970    case 6: /* NE (!Z) */
 971    case 7: /* EQ (Z) */
 972        c->v1 = QREG_CC_Z;
 973        tcond = TCG_COND_EQ;
 974        break;
 975    case 8: /* VC (!V) */
 976    case 9: /* VS (V) */
 977        c->v1 = QREG_CC_V;
 978        tcond = TCG_COND_LT;
 979        break;
 980    case 10: /* PL (!N) */
 981    case 11: /* MI (N) */
 982        c->v1 = QREG_CC_N;
 983        tcond = TCG_COND_LT;
 984        break;
 985    case 12: /* GE (!(N ^ V)) */
 986    case 13: /* LT (N ^ V) */
 987        c->v1 = tmp = tcg_temp_new();
 988        c->g1 = 0;
 989        tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
 990        tcond = TCG_COND_LT;
 991        break;
 992    case 14: /* GT (!(Z || (N ^ V))) */
 993    case 15: /* LE (Z || (N ^ V)) */
 994        c->v1 = tmp = tcg_temp_new();
 995        c->g1 = 0;
 996        tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
 997        tcg_gen_neg_i32(tmp, tmp);
 998        tmp2 = tcg_temp_new();
 999        tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1000        tcg_gen_or_i32(tmp, tmp, tmp2);
1001        tcg_temp_free(tmp2);
1002        tcond = TCG_COND_LT;
1003        break;
1004    }
1005
1006 done:
1007    if ((cond & 1) == 0) {
1008        tcond = tcg_invert_cond(tcond);
1009    }
1010    c->tcond = tcond;
1011}
1012
1013static void free_cond(DisasCompare *c)
1014{
1015    if (!c->g1) {
1016        tcg_temp_free(c->v1);
1017    }
1018    if (!c->g2) {
1019        tcg_temp_free(c->v2);
1020    }
1021}
1022
1023static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1024{
1025  DisasCompare c;
1026
1027  gen_cc_cond(&c, s, cond);
1028  update_cc_op(s);
1029  tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1030  free_cond(&c);
1031}
1032
1033/* Force a TB lookup after an instruction that changes the CPU state.  */
1034static void gen_lookup_tb(DisasContext *s)
1035{
1036    update_cc_op(s);
1037    tcg_gen_movi_i32(QREG_PC, s->pc);
1038    s->is_jmp = DISAS_UPDATE;
1039}
1040
1041/* Generate a jump to an immediate address.  */
1042static void gen_jmp_im(DisasContext *s, uint32_t dest)
1043{
1044    update_cc_op(s);
1045    tcg_gen_movi_i32(QREG_PC, dest);
1046    s->is_jmp = DISAS_JUMP;
1047}
1048
1049/* Generate a jump to the address in qreg DEST.  */
1050static void gen_jmp(DisasContext *s, TCGv dest)
1051{
1052    update_cc_op(s);
1053    tcg_gen_mov_i32(QREG_PC, dest);
1054    s->is_jmp = DISAS_JUMP;
1055}
1056
1057static void gen_exception(DisasContext *s, uint32_t where, int nr)
1058{
1059    update_cc_op(s);
1060    gen_jmp_im(s, where);
1061    gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
1062}
1063
1064static inline void gen_addr_fault(DisasContext *s)
1065{
1066    gen_exception(s, s->insn_pc, EXCP_ADDRESS);
1067}
1068
1069#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1070        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1071                        op_sign ? EA_LOADS : EA_LOADU);                 \
1072        if (IS_NULL_QREG(result)) {                                     \
1073            gen_addr_fault(s);                                          \
1074            return;                                                     \
1075        }                                                               \
1076    } while (0)
1077
1078#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1079        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
1080        if (IS_NULL_QREG(ea_result)) {                                  \
1081            gen_addr_fault(s);                                          \
1082            return;                                                     \
1083        }                                                               \
1084    } while (0)
1085
1086static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
1087{
1088#ifndef CONFIG_USER_ONLY
1089    return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
1090           (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
1091#else
1092    return true;
1093#endif
1094}
1095
1096/* Generate a jump to an immediate address.  */
1097static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1098{
1099    if (unlikely(s->singlestep_enabled)) {
1100        gen_exception(s, dest, EXCP_DEBUG);
1101    } else if (use_goto_tb(s, dest)) {
1102        tcg_gen_goto_tb(n);
1103        tcg_gen_movi_i32(QREG_PC, dest);
1104        tcg_gen_exit_tb((uintptr_t)s->tb + n);
1105    } else {
1106        gen_jmp_im(s, dest);
1107        tcg_gen_exit_tb(0);
1108    }
1109    s->is_jmp = DISAS_TB_JUMP;
1110}
1111
1112DISAS_INSN(scc)
1113{
1114    DisasCompare c;
1115    int cond;
1116    TCGv tmp;
1117
1118    cond = (insn >> 8) & 0xf;
1119    gen_cc_cond(&c, s, cond);
1120
1121    tmp = tcg_temp_new();
1122    tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1123    free_cond(&c);
1124
1125    tcg_gen_neg_i32(tmp, tmp);
1126    DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1127    tcg_temp_free(tmp);
1128}
1129
1130DISAS_INSN(dbcc)
1131{
1132    TCGLabel *l1;
1133    TCGv reg;
1134    TCGv tmp;
1135    int16_t offset;
1136    uint32_t base;
1137
1138    reg = DREG(insn, 0);
1139    base = s->pc;
1140    offset = (int16_t)read_im16(env, s);
1141    l1 = gen_new_label();
1142    gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1143
1144    tmp = tcg_temp_new();
1145    tcg_gen_ext16s_i32(tmp, reg);
1146    tcg_gen_addi_i32(tmp, tmp, -1);
1147    gen_partset_reg(OS_WORD, reg, tmp);
1148    tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1149    gen_jmp_tb(s, 1, base + offset);
1150    gen_set_label(l1);
1151    gen_jmp_tb(s, 0, s->pc);
1152}
1153
1154DISAS_INSN(undef_mac)
1155{
1156    gen_exception(s, s->pc - 2, EXCP_LINEA);
1157}
1158
1159DISAS_INSN(undef_fpu)
1160{
1161    gen_exception(s, s->pc - 2, EXCP_LINEF);
1162}
1163
1164DISAS_INSN(undef)
1165{
1166    M68kCPU *cpu = m68k_env_get_cpu(env);
1167
1168    gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
1169    cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
1170}
1171
1172DISAS_INSN(mulw)
1173{
1174    TCGv reg;
1175    TCGv tmp;
1176    TCGv src;
1177    int sign;
1178
1179    sign = (insn & 0x100) != 0;
1180    reg = DREG(insn, 9);
1181    tmp = tcg_temp_new();
1182    if (sign)
1183        tcg_gen_ext16s_i32(tmp, reg);
1184    else
1185        tcg_gen_ext16u_i32(tmp, reg);
1186    SRC_EA(env, src, OS_WORD, sign, NULL);
1187    tcg_gen_mul_i32(tmp, tmp, src);
1188    tcg_gen_mov_i32(reg, tmp);
1189    gen_logic_cc(s, tmp, OS_LONG);
1190}
1191
1192DISAS_INSN(divw)
1193{
1194    TCGv reg;
1195    TCGv tmp;
1196    TCGv src;
1197    int sign;
1198
1199    sign = (insn & 0x100) != 0;
1200    reg = DREG(insn, 9);
1201    if (sign) {
1202        tcg_gen_ext16s_i32(QREG_DIV1, reg);
1203    } else {
1204        tcg_gen_ext16u_i32(QREG_DIV1, reg);
1205    }
1206    SRC_EA(env, src, OS_WORD, sign, NULL);
1207    tcg_gen_mov_i32(QREG_DIV2, src);
1208    if (sign) {
1209        gen_helper_divs(cpu_env, tcg_const_i32(1));
1210    } else {
1211        gen_helper_divu(cpu_env, tcg_const_i32(1));
1212    }
1213
1214    tmp = tcg_temp_new();
1215    src = tcg_temp_new();
1216    tcg_gen_ext16u_i32(tmp, QREG_DIV1);
1217    tcg_gen_shli_i32(src, QREG_DIV2, 16);
1218    tcg_gen_or_i32(reg, tmp, src);
1219
1220    set_cc_op(s, CC_OP_FLAGS);
1221}
1222
1223DISAS_INSN(divl)
1224{
1225    TCGv num;
1226    TCGv den;
1227    TCGv reg;
1228    uint16_t ext;
1229
1230    ext = read_im16(env, s);
1231    if (ext & 0x87f8) {
1232        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1233        return;
1234    }
1235    num = DREG(ext, 12);
1236    reg = DREG(ext, 0);
1237    tcg_gen_mov_i32(QREG_DIV1, num);
1238    SRC_EA(env, den, OS_LONG, 0, NULL);
1239    tcg_gen_mov_i32(QREG_DIV2, den);
1240    if (ext & 0x0800) {
1241        gen_helper_divs(cpu_env, tcg_const_i32(0));
1242    } else {
1243        gen_helper_divu(cpu_env, tcg_const_i32(0));
1244    }
1245    if ((ext & 7) == ((ext >> 12) & 7)) {
1246        /* div */
1247        tcg_gen_mov_i32 (reg, QREG_DIV1);
1248    } else {
1249        /* rem */
1250        tcg_gen_mov_i32 (reg, QREG_DIV2);
1251    }
1252    set_cc_op(s, CC_OP_FLAGS);
1253}
1254
1255DISAS_INSN(addsub)
1256{
1257    TCGv reg;
1258    TCGv dest;
1259    TCGv src;
1260    TCGv tmp;
1261    TCGv addr;
1262    int add;
1263    int opsize;
1264
1265    add = (insn & 0x4000) != 0;
1266    opsize = insn_opsize(insn);
1267    reg = gen_extend(DREG(insn, 9), opsize, 1);
1268    dest = tcg_temp_new();
1269    if (insn & 0x100) {
1270        SRC_EA(env, tmp, opsize, 1, &addr);
1271        src = reg;
1272    } else {
1273        tmp = reg;
1274        SRC_EA(env, src, opsize, 1, NULL);
1275    }
1276    if (add) {
1277        tcg_gen_add_i32(dest, tmp, src);
1278        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1279        set_cc_op(s, CC_OP_ADDB + opsize);
1280    } else {
1281        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1282        tcg_gen_sub_i32(dest, tmp, src);
1283        set_cc_op(s, CC_OP_SUBB + opsize);
1284    }
1285    gen_update_cc_add(dest, src, opsize);
1286    if (insn & 0x100) {
1287        DEST_EA(env, insn, opsize, dest, &addr);
1288    } else {
1289        gen_partset_reg(opsize, DREG(insn, 9), dest);
1290    }
1291    tcg_temp_free(dest);
1292}
1293
1294/* Reverse the order of the bits in REG.  */
1295DISAS_INSN(bitrev)
1296{
1297    TCGv reg;
1298    reg = DREG(insn, 0);
1299    gen_helper_bitrev(reg, reg);
1300}
1301
1302DISAS_INSN(bitop_reg)
1303{
1304    int opsize;
1305    int op;
1306    TCGv src1;
1307    TCGv src2;
1308    TCGv tmp;
1309    TCGv addr;
1310    TCGv dest;
1311
1312    if ((insn & 0x38) != 0)
1313        opsize = OS_BYTE;
1314    else
1315        opsize = OS_LONG;
1316    op = (insn >> 6) & 3;
1317    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1318
1319    gen_flush_flags(s);
1320    src2 = tcg_temp_new();
1321    if (opsize == OS_BYTE)
1322        tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1323    else
1324        tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1325
1326    tmp = tcg_const_i32(1);
1327    tcg_gen_shl_i32(tmp, tmp, src2);
1328    tcg_temp_free(src2);
1329
1330    tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1331
1332    dest = tcg_temp_new();
1333    switch (op) {
1334    case 1: /* bchg */
1335        tcg_gen_xor_i32(dest, src1, tmp);
1336        break;
1337    case 2: /* bclr */
1338        tcg_gen_andc_i32(dest, src1, tmp);
1339        break;
1340    case 3: /* bset */
1341        tcg_gen_or_i32(dest, src1, tmp);
1342        break;
1343    default: /* btst */
1344        break;
1345    }
1346    tcg_temp_free(tmp);
1347    if (op) {
1348        DEST_EA(env, insn, opsize, dest, &addr);
1349    }
1350    tcg_temp_free(dest);
1351}
1352
1353DISAS_INSN(sats)
1354{
1355    TCGv reg;
1356    reg = DREG(insn, 0);
1357    gen_flush_flags(s);
1358    gen_helper_sats(reg, reg, QREG_CC_V);
1359    gen_logic_cc(s, reg, OS_LONG);
1360}
1361
1362static void gen_push(DisasContext *s, TCGv val)
1363{
1364    TCGv tmp;
1365
1366    tmp = tcg_temp_new();
1367    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1368    gen_store(s, OS_LONG, tmp, val);
1369    tcg_gen_mov_i32(QREG_SP, tmp);
1370}
1371
1372DISAS_INSN(movem)
1373{
1374    TCGv addr;
1375    int i;
1376    uint16_t mask;
1377    TCGv reg;
1378    TCGv tmp;
1379    int is_load;
1380
1381    mask = read_im16(env, s);
1382    tmp = gen_lea(env, s, insn, OS_LONG);
1383    if (IS_NULL_QREG(tmp)) {
1384        gen_addr_fault(s);
1385        return;
1386    }
1387    addr = tcg_temp_new();
1388    tcg_gen_mov_i32(addr, tmp);
1389    is_load = ((insn & 0x0400) != 0);
1390    for (i = 0; i < 16; i++, mask >>= 1) {
1391        if (mask & 1) {
1392            if (i < 8)
1393                reg = DREG(i, 0);
1394            else
1395                reg = AREG(i, 0);
1396            if (is_load) {
1397                tmp = gen_load(s, OS_LONG, addr, 0);
1398                tcg_gen_mov_i32(reg, tmp);
1399            } else {
1400                gen_store(s, OS_LONG, addr, reg);
1401            }
1402            if (mask != 1)
1403                tcg_gen_addi_i32(addr, addr, 4);
1404        }
1405    }
1406}
1407
1408DISAS_INSN(bitop_im)
1409{
1410    int opsize;
1411    int op;
1412    TCGv src1;
1413    uint32_t mask;
1414    int bitnum;
1415    TCGv tmp;
1416    TCGv addr;
1417
1418    if ((insn & 0x38) != 0)
1419        opsize = OS_BYTE;
1420    else
1421        opsize = OS_LONG;
1422    op = (insn >> 6) & 3;
1423
1424    bitnum = read_im16(env, s);
1425    if (bitnum & 0xff00) {
1426        disas_undef(env, s, insn);
1427        return;
1428    }
1429
1430    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1431
1432    gen_flush_flags(s);
1433    if (opsize == OS_BYTE)
1434        bitnum &= 7;
1435    else
1436        bitnum &= 31;
1437    mask = 1 << bitnum;
1438
1439   tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
1440
1441    if (op) {
1442        tmp = tcg_temp_new();
1443        switch (op) {
1444        case 1: /* bchg */
1445            tcg_gen_xori_i32(tmp, src1, mask);
1446            break;
1447        case 2: /* bclr */
1448            tcg_gen_andi_i32(tmp, src1, ~mask);
1449            break;
1450        case 3: /* bset */
1451            tcg_gen_ori_i32(tmp, src1, mask);
1452            break;
1453        default: /* btst */
1454            break;
1455        }
1456        DEST_EA(env, insn, opsize, tmp, &addr);
1457        tcg_temp_free(tmp);
1458    }
1459}
1460
1461DISAS_INSN(arith_im)
1462{
1463    int op;
1464    TCGv im;
1465    TCGv src1;
1466    TCGv dest;
1467    TCGv addr;
1468    int opsize;
1469
1470    op = (insn >> 9) & 7;
1471    opsize = insn_opsize(insn);
1472    switch (opsize) {
1473    case OS_BYTE:
1474        im = tcg_const_i32((int8_t)read_im8(env, s));
1475        break;
1476    case OS_WORD:
1477        im = tcg_const_i32((int16_t)read_im16(env, s));
1478        break;
1479    case OS_LONG:
1480        im = tcg_const_i32(read_im32(env, s));
1481        break;
1482    default:
1483       abort();
1484    }
1485    SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
1486    dest = tcg_temp_new();
1487    switch (op) {
1488    case 0: /* ori */
1489        tcg_gen_or_i32(dest, src1, im);
1490        gen_logic_cc(s, dest, opsize);
1491        break;
1492    case 1: /* andi */
1493        tcg_gen_and_i32(dest, src1, im);
1494        gen_logic_cc(s, dest, opsize);
1495        break;
1496    case 2: /* subi */
1497        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
1498        tcg_gen_sub_i32(dest, src1, im);
1499        gen_update_cc_add(dest, im, opsize);
1500        set_cc_op(s, CC_OP_SUBB + opsize);
1501        break;
1502    case 3: /* addi */
1503        tcg_gen_add_i32(dest, src1, im);
1504        gen_update_cc_add(dest, im, opsize);
1505        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
1506        set_cc_op(s, CC_OP_ADDB + opsize);
1507        break;
1508    case 5: /* eori */
1509        tcg_gen_xor_i32(dest, src1, im);
1510        gen_logic_cc(s, dest, opsize);
1511        break;
1512    case 6: /* cmpi */
1513        gen_update_cc_cmp(s, src1, im, opsize);
1514        break;
1515    default:
1516        abort();
1517    }
1518    tcg_temp_free(im);
1519    if (op != 6) {
1520        DEST_EA(env, insn, opsize, dest, &addr);
1521    }
1522    tcg_temp_free(dest);
1523}
1524
1525DISAS_INSN(byterev)
1526{
1527    TCGv reg;
1528
1529    reg = DREG(insn, 0);
1530    tcg_gen_bswap32_i32(reg, reg);
1531}
1532
1533DISAS_INSN(move)
1534{
1535    TCGv src;
1536    TCGv dest;
1537    int op;
1538    int opsize;
1539
1540    switch (insn >> 12) {
1541    case 1: /* move.b */
1542        opsize = OS_BYTE;
1543        break;
1544    case 2: /* move.l */
1545        opsize = OS_LONG;
1546        break;
1547    case 3: /* move.w */
1548        opsize = OS_WORD;
1549        break;
1550    default:
1551        abort();
1552    }
1553    SRC_EA(env, src, opsize, 1, NULL);
1554    op = (insn >> 6) & 7;
1555    if (op == 1) {
1556        /* movea */
1557        /* The value will already have been sign extended.  */
1558        dest = AREG(insn, 9);
1559        tcg_gen_mov_i32(dest, src);
1560    } else {
1561        /* normal move */
1562        uint16_t dest_ea;
1563        dest_ea = ((insn >> 9) & 7) | (op << 3);
1564        DEST_EA(env, dest_ea, opsize, src, NULL);
1565        /* This will be correct because loads sign extend.  */
1566        gen_logic_cc(s, src, opsize);
1567    }
1568}
1569
1570DISAS_INSN(negx)
1571{
1572    TCGv z;
1573    TCGv src;
1574    TCGv addr;
1575    int opsize;
1576
1577    opsize = insn_opsize(insn);
1578    SRC_EA(env, src, opsize, 1, &addr);
1579
1580    gen_flush_flags(s); /* compute old Z */
1581
1582    /* Perform substract with borrow.
1583     * (X, N) =  -(src + X);
1584     */
1585
1586    z = tcg_const_i32(0);
1587    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
1588    tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
1589    tcg_temp_free(z);
1590    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
1591
1592    tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
1593
1594    /* Compute signed-overflow for negation.  The normal formula for
1595     * subtraction is (res ^ src) & (src ^ dest), but with dest==0
1596     * this simplies to res & src.
1597     */
1598
1599    tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
1600
1601    /* Copy the rest of the results into place.  */
1602    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
1603    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
1604
1605    set_cc_op(s, CC_OP_FLAGS);
1606
1607    /* result is in QREG_CC_N */
1608
1609    DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
1610}
1611
1612DISAS_INSN(lea)
1613{
1614    TCGv reg;
1615    TCGv tmp;
1616
1617    reg = AREG(insn, 9);
1618    tmp = gen_lea(env, s, insn, OS_LONG);
1619    if (IS_NULL_QREG(tmp)) {
1620        gen_addr_fault(s);
1621        return;
1622    }
1623    tcg_gen_mov_i32(reg, tmp);
1624}
1625
1626DISAS_INSN(clr)
1627{
1628    int opsize;
1629
1630    opsize = insn_opsize(insn);
1631    DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1632    gen_logic_cc(s, tcg_const_i32(0), opsize);
1633}
1634
1635static TCGv gen_get_ccr(DisasContext *s)
1636{
1637    TCGv dest;
1638
1639    gen_flush_flags(s);
1640    update_cc_op(s);
1641    dest = tcg_temp_new();
1642    gen_helper_get_ccr(dest, cpu_env);
1643    return dest;
1644}
1645
1646DISAS_INSN(move_from_ccr)
1647{
1648    TCGv ccr;
1649
1650    ccr = gen_get_ccr(s);
1651    DEST_EA(env, insn, OS_WORD, ccr, NULL);
1652}
1653
1654DISAS_INSN(neg)
1655{
1656    TCGv src1;
1657    TCGv dest;
1658    TCGv addr;
1659    int opsize;
1660
1661    opsize = insn_opsize(insn);
1662    SRC_EA(env, src1, opsize, 1, &addr);
1663    dest = tcg_temp_new();
1664    tcg_gen_neg_i32(dest, src1);
1665    set_cc_op(s, CC_OP_SUBB + opsize);
1666    gen_update_cc_add(dest, src1, opsize);
1667    tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
1668    DEST_EA(env, insn, opsize, dest, &addr);
1669    tcg_temp_free(dest);
1670}
1671
1672static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1673{
1674    if (ccr_only) {
1675        tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
1676        tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
1677        tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
1678        tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
1679        tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
1680    } else {
1681        gen_helper_set_sr(cpu_env, tcg_const_i32(val));
1682    }
1683    set_cc_op(s, CC_OP_FLAGS);
1684}
1685
1686static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1687                       int ccr_only)
1688{
1689    if ((insn & 0x38) == 0) {
1690        if (ccr_only) {
1691            gen_helper_set_ccr(cpu_env, DREG(insn, 0));
1692        } else {
1693            gen_helper_set_sr(cpu_env, DREG(insn, 0));
1694        }
1695        set_cc_op(s, CC_OP_FLAGS);
1696    } else if ((insn & 0x3f) == 0x3c) {
1697        uint16_t val;
1698        val = read_im16(env, s);
1699        gen_set_sr_im(s, val, ccr_only);
1700    } else {
1701        disas_undef(env, s, insn);
1702    }
1703}
1704
1705
1706DISAS_INSN(move_to_ccr)
1707{
1708    gen_set_sr(env, s, insn, 1);
1709}
1710
1711DISAS_INSN(not)
1712{
1713    TCGv src1;
1714    TCGv dest;
1715    TCGv addr;
1716    int opsize;
1717
1718    opsize = insn_opsize(insn);
1719    SRC_EA(env, src1, opsize, 1, &addr);
1720    dest = tcg_temp_new();
1721    tcg_gen_not_i32(dest, src1);
1722    DEST_EA(env, insn, opsize, dest, &addr);
1723    gen_logic_cc(s, dest, opsize);
1724}
1725
1726DISAS_INSN(swap)
1727{
1728    TCGv src1;
1729    TCGv src2;
1730    TCGv reg;
1731
1732    src1 = tcg_temp_new();
1733    src2 = tcg_temp_new();
1734    reg = DREG(insn, 0);
1735    tcg_gen_shli_i32(src1, reg, 16);
1736    tcg_gen_shri_i32(src2, reg, 16);
1737    tcg_gen_or_i32(reg, src1, src2);
1738    gen_logic_cc(s, reg, OS_LONG);
1739}
1740
1741DISAS_INSN(bkpt)
1742{
1743    gen_exception(s, s->pc - 2, EXCP_DEBUG);
1744}
1745
1746DISAS_INSN(pea)
1747{
1748    TCGv tmp;
1749
1750    tmp = gen_lea(env, s, insn, OS_LONG);
1751    if (IS_NULL_QREG(tmp)) {
1752        gen_addr_fault(s);
1753        return;
1754    }
1755    gen_push(s, tmp);
1756}
1757
1758DISAS_INSN(ext)
1759{
1760    int op;
1761    TCGv reg;
1762    TCGv tmp;
1763
1764    reg = DREG(insn, 0);
1765    op = (insn >> 6) & 7;
1766    tmp = tcg_temp_new();
1767    if (op == 3)
1768        tcg_gen_ext16s_i32(tmp, reg);
1769    else
1770        tcg_gen_ext8s_i32(tmp, reg);
1771    if (op == 2)
1772        gen_partset_reg(OS_WORD, reg, tmp);
1773    else
1774        tcg_gen_mov_i32(reg, tmp);
1775    gen_logic_cc(s, tmp, OS_LONG);
1776}
1777
1778DISAS_INSN(tst)
1779{
1780    int opsize;
1781    TCGv tmp;
1782
1783    opsize = insn_opsize(insn);
1784    SRC_EA(env, tmp, opsize, 1, NULL);
1785    gen_logic_cc(s, tmp, opsize);
1786}
1787
1788DISAS_INSN(pulse)
1789{
1790  /* Implemented as a NOP.  */
1791}
1792
1793DISAS_INSN(illegal)
1794{
1795    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1796}
1797
1798/* ??? This should be atomic.  */
1799DISAS_INSN(tas)
1800{
1801    TCGv dest;
1802    TCGv src1;
1803    TCGv addr;
1804
1805    dest = tcg_temp_new();
1806    SRC_EA(env, src1, OS_BYTE, 1, &addr);
1807    gen_logic_cc(s, src1, OS_BYTE);
1808    tcg_gen_ori_i32(dest, src1, 0x80);
1809    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1810}
1811
1812DISAS_INSN(mull)
1813{
1814    uint16_t ext;
1815    TCGv reg;
1816    TCGv src1;
1817    TCGv dest;
1818
1819    /* The upper 32 bits of the product are discarded, so
1820       muls.l and mulu.l are functionally equivalent.  */
1821    ext = read_im16(env, s);
1822    if (ext & 0x87ff) {
1823        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1824        return;
1825    }
1826    reg = DREG(ext, 12);
1827    SRC_EA(env, src1, OS_LONG, 0, NULL);
1828    dest = tcg_temp_new();
1829    tcg_gen_mul_i32(dest, src1, reg);
1830    tcg_gen_mov_i32(reg, dest);
1831    /* Unlike m68k, coldfire always clears the overflow bit.  */
1832    gen_logic_cc(s, dest, OS_LONG);
1833}
1834
1835static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
1836{
1837    TCGv reg;
1838    TCGv tmp;
1839
1840    reg = AREG(insn, 0);
1841    tmp = tcg_temp_new();
1842    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1843    gen_store(s, OS_LONG, tmp, reg);
1844    if ((insn & 7) != 7) {
1845        tcg_gen_mov_i32(reg, tmp);
1846    }
1847    tcg_gen_addi_i32(QREG_SP, tmp, offset);
1848    tcg_temp_free(tmp);
1849}
1850
1851DISAS_INSN(link)
1852{
1853    int16_t offset;
1854
1855    offset = read_im16(env, s);
1856    gen_link(s, insn, offset);
1857}
1858
1859DISAS_INSN(linkl)
1860{
1861    int32_t offset;
1862
1863    offset = read_im32(env, s);
1864    gen_link(s, insn, offset);
1865}
1866
1867DISAS_INSN(unlk)
1868{
1869    TCGv src;
1870    TCGv reg;
1871    TCGv tmp;
1872
1873    src = tcg_temp_new();
1874    reg = AREG(insn, 0);
1875    tcg_gen_mov_i32(src, reg);
1876    tmp = gen_load(s, OS_LONG, src, 0);
1877    tcg_gen_mov_i32(reg, tmp);
1878    tcg_gen_addi_i32(QREG_SP, src, 4);
1879}
1880
1881DISAS_INSN(nop)
1882{
1883}
1884
1885DISAS_INSN(rts)
1886{
1887    TCGv tmp;
1888
1889    tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1890    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1891    gen_jmp(s, tmp);
1892}
1893
1894DISAS_INSN(jump)
1895{
1896    TCGv tmp;
1897
1898    /* Load the target address first to ensure correct exception
1899       behavior.  */
1900    tmp = gen_lea(env, s, insn, OS_LONG);
1901    if (IS_NULL_QREG(tmp)) {
1902        gen_addr_fault(s);
1903        return;
1904    }
1905    if ((insn & 0x40) == 0) {
1906        /* jsr */
1907        gen_push(s, tcg_const_i32(s->pc));
1908    }
1909    gen_jmp(s, tmp);
1910}
1911
1912DISAS_INSN(addsubq)
1913{
1914    TCGv src;
1915    TCGv dest;
1916    TCGv val;
1917    int imm;
1918    TCGv addr;
1919    int opsize;
1920
1921    if ((insn & 070) == 010) {
1922        /* Operation on address register is always long.  */
1923        opsize = OS_LONG;
1924    } else {
1925        opsize = insn_opsize(insn);
1926    }
1927    SRC_EA(env, src, opsize, 1, &addr);
1928    imm = (insn >> 9) & 7;
1929    if (imm == 0) {
1930        imm = 8;
1931    }
1932    val = tcg_const_i32(imm);
1933    dest = tcg_temp_new();
1934    tcg_gen_mov_i32(dest, src);
1935    if ((insn & 0x38) == 0x08) {
1936        /* Don't update condition codes if the destination is an
1937           address register.  */
1938        if (insn & 0x0100) {
1939            tcg_gen_sub_i32(dest, dest, val);
1940        } else {
1941            tcg_gen_add_i32(dest, dest, val);
1942        }
1943    } else {
1944        if (insn & 0x0100) {
1945            tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
1946            tcg_gen_sub_i32(dest, dest, val);
1947            set_cc_op(s, CC_OP_SUBB + opsize);
1948        } else {
1949            tcg_gen_add_i32(dest, dest, val);
1950            tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
1951            set_cc_op(s, CC_OP_ADDB + opsize);
1952        }
1953        gen_update_cc_add(dest, val, opsize);
1954    }
1955    DEST_EA(env, insn, opsize, dest, &addr);
1956}
1957
1958DISAS_INSN(tpf)
1959{
1960    switch (insn & 7) {
1961    case 2: /* One extension word.  */
1962        s->pc += 2;
1963        break;
1964    case 3: /* Two extension words.  */
1965        s->pc += 4;
1966        break;
1967    case 4: /* No extension words.  */
1968        break;
1969    default:
1970        disas_undef(env, s, insn);
1971    }
1972}
1973
1974DISAS_INSN(branch)
1975{
1976    int32_t offset;
1977    uint32_t base;
1978    int op;
1979    TCGLabel *l1;
1980
1981    base = s->pc;
1982    op = (insn >> 8) & 0xf;
1983    offset = (int8_t)insn;
1984    if (offset == 0) {
1985        offset = (int16_t)read_im16(env, s);
1986    } else if (offset == -1) {
1987        offset = read_im32(env, s);
1988    }
1989    if (op == 1) {
1990        /* bsr */
1991        gen_push(s, tcg_const_i32(s->pc));
1992    }
1993    if (op > 1) {
1994        /* Bcc */
1995        l1 = gen_new_label();
1996        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1997        gen_jmp_tb(s, 1, base + offset);
1998        gen_set_label(l1);
1999        gen_jmp_tb(s, 0, s->pc);
2000    } else {
2001        /* Unconditional branch.  */
2002        gen_jmp_tb(s, 0, base + offset);
2003    }
2004}
2005
2006DISAS_INSN(moveq)
2007{
2008    uint32_t val;
2009
2010    val = (int8_t)insn;
2011    tcg_gen_movi_i32(DREG(insn, 9), val);
2012    gen_logic_cc(s, tcg_const_i32(val), OS_LONG);
2013}
2014
2015DISAS_INSN(mvzs)
2016{
2017    int opsize;
2018    TCGv src;
2019    TCGv reg;
2020
2021    if (insn & 0x40)
2022        opsize = OS_WORD;
2023    else
2024        opsize = OS_BYTE;
2025    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
2026    reg = DREG(insn, 9);
2027    tcg_gen_mov_i32(reg, src);
2028    gen_logic_cc(s, src, opsize);
2029}
2030
2031DISAS_INSN(or)
2032{
2033    TCGv reg;
2034    TCGv dest;
2035    TCGv src;
2036    TCGv addr;
2037    int opsize;
2038
2039    opsize = insn_opsize(insn);
2040    reg = gen_extend(DREG(insn, 9), opsize, 0);
2041    dest = tcg_temp_new();
2042    if (insn & 0x100) {
2043        SRC_EA(env, src, opsize, 0, &addr);
2044        tcg_gen_or_i32(dest, src, reg);
2045        DEST_EA(env, insn, opsize, dest, &addr);
2046    } else {
2047        SRC_EA(env, src, opsize, 0, NULL);
2048        tcg_gen_or_i32(dest, src, reg);
2049        gen_partset_reg(opsize, DREG(insn, 9), dest);
2050    }
2051    gen_logic_cc(s, dest, opsize);
2052}
2053
2054DISAS_INSN(suba)
2055{
2056    TCGv src;
2057    TCGv reg;
2058
2059    SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2060    reg = AREG(insn, 9);
2061    tcg_gen_sub_i32(reg, reg, src);
2062}
2063
2064static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2065{
2066    TCGv tmp;
2067
2068    gen_flush_flags(s); /* compute old Z */
2069
2070    /* Perform substract with borrow.
2071     * (X, N) = dest - (src + X);
2072     */
2073
2074    tmp = tcg_const_i32(0);
2075    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
2076    tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
2077    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2078    tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2079
2080    /* Compute signed-overflow for substract.  */
2081
2082    tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
2083    tcg_gen_xor_i32(tmp, dest, src);
2084    tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
2085    tcg_temp_free(tmp);
2086
2087    /* Copy the rest of the results into place.  */
2088    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2089    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2090
2091    set_cc_op(s, CC_OP_FLAGS);
2092
2093    /* result is in QREG_CC_N */
2094}
2095
2096DISAS_INSN(subx_reg)
2097{
2098    TCGv dest;
2099    TCGv src;
2100    int opsize;
2101
2102    opsize = insn_opsize(insn);
2103
2104    src = gen_extend(DREG(insn, 0), opsize, 1);
2105    dest = gen_extend(DREG(insn, 9), opsize, 1);
2106
2107    gen_subx(s, src, dest, opsize);
2108
2109    gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2110}
2111
2112DISAS_INSN(subx_mem)
2113{
2114    TCGv src;
2115    TCGv addr_src;
2116    TCGv dest;
2117    TCGv addr_dest;
2118    int opsize;
2119
2120    opsize = insn_opsize(insn);
2121
2122    addr_src = AREG(insn, 0);
2123    tcg_gen_subi_i32(addr_src, addr_src, opsize);
2124    src = gen_load(s, opsize, addr_src, 1);
2125
2126    addr_dest = AREG(insn, 9);
2127    tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
2128    dest = gen_load(s, opsize, addr_dest, 1);
2129
2130    gen_subx(s, src, dest, opsize);
2131
2132    gen_store(s, opsize, addr_dest, QREG_CC_N);
2133}
2134
2135DISAS_INSN(mov3q)
2136{
2137    TCGv src;
2138    int val;
2139
2140    val = (insn >> 9) & 7;
2141    if (val == 0)
2142        val = -1;
2143    src = tcg_const_i32(val);
2144    gen_logic_cc(s, src, OS_LONG);
2145    DEST_EA(env, insn, OS_LONG, src, NULL);
2146}
2147
2148DISAS_INSN(cmp)
2149{
2150    TCGv src;
2151    TCGv reg;
2152    int opsize;
2153
2154    opsize = insn_opsize(insn);
2155    SRC_EA(env, src, opsize, 1, NULL);
2156    reg = gen_extend(DREG(insn, 9), opsize, 1);
2157    gen_update_cc_cmp(s, reg, src, opsize);
2158}
2159
2160DISAS_INSN(cmpa)
2161{
2162    int opsize;
2163    TCGv src;
2164    TCGv reg;
2165
2166    if (insn & 0x100) {
2167        opsize = OS_LONG;
2168    } else {
2169        opsize = OS_WORD;
2170    }
2171    SRC_EA(env, src, opsize, 1, NULL);
2172    reg = AREG(insn, 9);
2173    gen_update_cc_cmp(s, reg, src, OS_LONG);
2174}
2175
2176DISAS_INSN(eor)
2177{
2178    TCGv src;
2179    TCGv dest;
2180    TCGv addr;
2181    int opsize;
2182
2183    opsize = insn_opsize(insn);
2184
2185    SRC_EA(env, src, opsize, 0, &addr);
2186    dest = tcg_temp_new();
2187    tcg_gen_xor_i32(dest, src, DREG(insn, 9));
2188    gen_logic_cc(s, dest, opsize);
2189    DEST_EA(env, insn, opsize, dest, &addr);
2190}
2191
2192static void do_exg(TCGv reg1, TCGv reg2)
2193{
2194    TCGv temp = tcg_temp_new();
2195    tcg_gen_mov_i32(temp, reg1);
2196    tcg_gen_mov_i32(reg1, reg2);
2197    tcg_gen_mov_i32(reg2, temp);
2198    tcg_temp_free(temp);
2199}
2200
2201DISAS_INSN(exg_dd)
2202{
2203    /* exchange Dx and Dy */
2204    do_exg(DREG(insn, 9), DREG(insn, 0));
2205}
2206
2207DISAS_INSN(exg_aa)
2208{
2209    /* exchange Ax and Ay */
2210    do_exg(AREG(insn, 9), AREG(insn, 0));
2211}
2212
2213DISAS_INSN(exg_da)
2214{
2215    /* exchange Dx and Ay */
2216    do_exg(DREG(insn, 9), AREG(insn, 0));
2217}
2218
2219DISAS_INSN(and)
2220{
2221    TCGv src;
2222    TCGv reg;
2223    TCGv dest;
2224    TCGv addr;
2225    int opsize;
2226
2227    dest = tcg_temp_new();
2228
2229    opsize = insn_opsize(insn);
2230    reg = DREG(insn, 9);
2231    if (insn & 0x100) {
2232        SRC_EA(env, src, opsize, 0, &addr);
2233        tcg_gen_and_i32(dest, src, reg);
2234        DEST_EA(env, insn, opsize, dest, &addr);
2235    } else {
2236        SRC_EA(env, src, opsize, 0, NULL);
2237        tcg_gen_and_i32(dest, src, reg);
2238        gen_partset_reg(opsize, reg, dest);
2239    }
2240    tcg_temp_free(dest);
2241    gen_logic_cc(s, dest, opsize);
2242}
2243
2244DISAS_INSN(adda)
2245{
2246    TCGv src;
2247    TCGv reg;
2248
2249    SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
2250    reg = AREG(insn, 9);
2251    tcg_gen_add_i32(reg, reg, src);
2252}
2253
2254static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
2255{
2256    TCGv tmp;
2257
2258    gen_flush_flags(s); /* compute old Z */
2259
2260    /* Perform addition with carry.
2261     * (X, N) = src + dest + X;
2262     */
2263
2264    tmp = tcg_const_i32(0);
2265    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
2266    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
2267    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2268
2269    /* Compute signed-overflow for addition.  */
2270
2271    tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
2272    tcg_gen_xor_i32(tmp, dest, src);
2273    tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
2274    tcg_temp_free(tmp);
2275
2276    /* Copy the rest of the results into place.  */
2277    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2278    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2279
2280    set_cc_op(s, CC_OP_FLAGS);
2281
2282    /* result is in QREG_CC_N */
2283}
2284
2285DISAS_INSN(addx_reg)
2286{
2287    TCGv dest;
2288    TCGv src;
2289    int opsize;
2290
2291    opsize = insn_opsize(insn);
2292
2293    dest = gen_extend(DREG(insn, 9), opsize, 1);
2294    src = gen_extend(DREG(insn, 0), opsize, 1);
2295
2296    gen_addx(s, src, dest, opsize);
2297
2298    gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
2299}
2300
2301DISAS_INSN(addx_mem)
2302{
2303    TCGv src;
2304    TCGv addr_src;
2305    TCGv dest;
2306    TCGv addr_dest;
2307    int opsize;
2308
2309    opsize = insn_opsize(insn);
2310
2311    addr_src = AREG(insn, 0);
2312    tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
2313    src = gen_load(s, opsize, addr_src, 1);
2314
2315    addr_dest = AREG(insn, 9);
2316    tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
2317    dest = gen_load(s, opsize, addr_dest, 1);
2318
2319    gen_addx(s, src, dest, opsize);
2320
2321    gen_store(s, opsize, addr_dest, QREG_CC_N);
2322}
2323
2324/* TODO: This could be implemented without helper functions.  */
2325DISAS_INSN(shift_im)
2326{
2327    TCGv reg;
2328    int tmp;
2329    TCGv shift;
2330
2331    set_cc_op(s, CC_OP_FLAGS);
2332
2333    reg = DREG(insn, 0);
2334    tmp = (insn >> 9) & 7;
2335    if (tmp == 0)
2336        tmp = 8;
2337    shift = tcg_const_i32(tmp);
2338    /* No need to flush flags becuse we know we will set C flag.  */
2339    if (insn & 0x100) {
2340        gen_helper_shl_cc(reg, cpu_env, reg, shift);
2341    } else {
2342        if (insn & 8) {
2343            gen_helper_shr_cc(reg, cpu_env, reg, shift);
2344        } else {
2345            gen_helper_sar_cc(reg, cpu_env, reg, shift);
2346        }
2347    }
2348}
2349
2350DISAS_INSN(shift_reg)
2351{
2352    TCGv reg;
2353    TCGv shift;
2354
2355    reg = DREG(insn, 0);
2356    shift = DREG(insn, 9);
2357    if (insn & 0x100) {
2358        gen_helper_shl_cc(reg, cpu_env, reg, shift);
2359    } else {
2360        if (insn & 8) {
2361            gen_helper_shr_cc(reg, cpu_env, reg, shift);
2362        } else {
2363            gen_helper_sar_cc(reg, cpu_env, reg, shift);
2364        }
2365    }
2366    set_cc_op(s, CC_OP_FLAGS);
2367}
2368
2369DISAS_INSN(ff1)
2370{
2371    TCGv reg;
2372    reg = DREG(insn, 0);
2373    gen_logic_cc(s, reg, OS_LONG);
2374    gen_helper_ff1(reg, reg);
2375}
2376
2377static TCGv gen_get_sr(DisasContext *s)
2378{
2379    TCGv ccr;
2380    TCGv sr;
2381
2382    ccr = gen_get_ccr(s);
2383    sr = tcg_temp_new();
2384    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2385    tcg_gen_or_i32(sr, sr, ccr);
2386    return sr;
2387}
2388
2389DISAS_INSN(strldsr)
2390{
2391    uint16_t ext;
2392    uint32_t addr;
2393
2394    addr = s->pc - 2;
2395    ext = read_im16(env, s);
2396    if (ext != 0x46FC) {
2397        gen_exception(s, addr, EXCP_UNSUPPORTED);
2398        return;
2399    }
2400    ext = read_im16(env, s);
2401    if (IS_USER(s) || (ext & SR_S) == 0) {
2402        gen_exception(s, addr, EXCP_PRIVILEGE);
2403        return;
2404    }
2405    gen_push(s, gen_get_sr(s));
2406    gen_set_sr_im(s, ext, 0);
2407}
2408
2409DISAS_INSN(move_from_sr)
2410{
2411    TCGv sr;
2412
2413    if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
2414        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2415        return;
2416    }
2417    sr = gen_get_sr(s);
2418    DEST_EA(env, insn, OS_WORD, sr, NULL);
2419}
2420
2421DISAS_INSN(move_to_sr)
2422{
2423    if (IS_USER(s)) {
2424        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2425        return;
2426    }
2427    gen_set_sr(env, s, insn, 0);
2428    gen_lookup_tb(s);
2429}
2430
2431DISAS_INSN(move_from_usp)
2432{
2433    if (IS_USER(s)) {
2434        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2435        return;
2436    }
2437    tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
2438                   offsetof(CPUM68KState, sp[M68K_USP]));
2439}
2440
2441DISAS_INSN(move_to_usp)
2442{
2443    if (IS_USER(s)) {
2444        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2445        return;
2446    }
2447    tcg_gen_st_i32(AREG(insn, 0), cpu_env,
2448                   offsetof(CPUM68KState, sp[M68K_USP]));
2449}
2450
2451DISAS_INSN(halt)
2452{
2453    gen_exception(s, s->pc, EXCP_HALT_INSN);
2454}
2455
2456DISAS_INSN(stop)
2457{
2458    uint16_t ext;
2459
2460    if (IS_USER(s)) {
2461        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2462        return;
2463    }
2464
2465    ext = read_im16(env, s);
2466
2467    gen_set_sr_im(s, ext, 0);
2468    tcg_gen_movi_i32(cpu_halted, 1);
2469    gen_exception(s, s->pc, EXCP_HLT);
2470}
2471
2472DISAS_INSN(rte)
2473{
2474    if (IS_USER(s)) {
2475        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2476        return;
2477    }
2478    gen_exception(s, s->pc - 2, EXCP_RTE);
2479}
2480
2481DISAS_INSN(movec)
2482{
2483    uint16_t ext;
2484    TCGv reg;
2485
2486    if (IS_USER(s)) {
2487        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2488        return;
2489    }
2490
2491    ext = read_im16(env, s);
2492
2493    if (ext & 0x8000) {
2494        reg = AREG(ext, 12);
2495    } else {
2496        reg = DREG(ext, 12);
2497    }
2498    gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2499    gen_lookup_tb(s);
2500}
2501
2502DISAS_INSN(intouch)
2503{
2504    if (IS_USER(s)) {
2505        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2506        return;
2507    }
2508    /* ICache fetch.  Implement as no-op.  */
2509}
2510
2511DISAS_INSN(cpushl)
2512{
2513    if (IS_USER(s)) {
2514        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2515        return;
2516    }
2517    /* Cache push/invalidate.  Implement as no-op.  */
2518}
2519
2520DISAS_INSN(wddata)
2521{
2522    gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2523}
2524
2525DISAS_INSN(wdebug)
2526{
2527    M68kCPU *cpu = m68k_env_get_cpu(env);
2528
2529    if (IS_USER(s)) {
2530        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2531        return;
2532    }
2533    /* TODO: Implement wdebug.  */
2534    cpu_abort(CPU(cpu), "WDEBUG not implemented");
2535}
2536
2537DISAS_INSN(trap)
2538{
2539    gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2540}
2541
2542/* ??? FP exceptions are not implemented.  Most exceptions are deferred until
2543   immediately before the next FP instruction is executed.  */
2544DISAS_INSN(fpu)
2545{
2546    uint16_t ext;
2547    int32_t offset;
2548    int opmode;
2549    TCGv_i64 src;
2550    TCGv_i64 dest;
2551    TCGv_i64 res;
2552    TCGv tmp32;
2553    int round;
2554    int set_dest;
2555    int opsize;
2556
2557    ext = read_im16(env, s);
2558    opmode = ext & 0x7f;
2559    switch ((ext >> 13) & 7) {
2560    case 0: case 2:
2561        break;
2562    case 1:
2563        goto undef;
2564    case 3: /* fmove out */
2565        src = FREG(ext, 7);
2566        tmp32 = tcg_temp_new_i32();
2567        /* fmove */
2568        /* ??? TODO: Proper behavior on overflow.  */
2569        switch ((ext >> 10) & 7) {
2570        case 0:
2571            opsize = OS_LONG;
2572            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2573            break;
2574        case 1:
2575            opsize = OS_SINGLE;
2576            gen_helper_f64_to_f32(tmp32, cpu_env, src);
2577            break;
2578        case 4:
2579            opsize = OS_WORD;
2580            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2581            break;
2582        case 5: /* OS_DOUBLE */
2583            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2584            switch ((insn >> 3) & 7) {
2585            case 2:
2586            case 3:
2587                break;
2588            case 4:
2589                tcg_gen_addi_i32(tmp32, tmp32, -8);
2590                break;
2591            case 5:
2592                offset = cpu_ldsw_code(env, s->pc);
2593                s->pc += 2;
2594                tcg_gen_addi_i32(tmp32, tmp32, offset);
2595                break;
2596            default:
2597                goto undef;
2598            }
2599            gen_store64(s, tmp32, src);
2600            switch ((insn >> 3) & 7) {
2601            case 3:
2602                tcg_gen_addi_i32(tmp32, tmp32, 8);
2603                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2604                break;
2605            case 4:
2606                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2607                break;
2608            }
2609            tcg_temp_free_i32(tmp32);
2610            return;
2611        case 6:
2612            opsize = OS_BYTE;
2613            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2614            break;
2615        default:
2616            goto undef;
2617        }
2618        DEST_EA(env, insn, opsize, tmp32, NULL);
2619        tcg_temp_free_i32(tmp32);
2620        return;
2621    case 4: /* fmove to control register.  */
2622        switch ((ext >> 10) & 7) {
2623        case 4: /* FPCR */
2624            /* Not implemented.  Ignore writes.  */
2625            break;
2626        case 1: /* FPIAR */
2627        case 2: /* FPSR */
2628        default:
2629            cpu_abort(NULL, "Unimplemented: fmove to control %d",
2630                      (ext >> 10) & 7);
2631        }
2632        break;
2633    case 5: /* fmove from control register.  */
2634        switch ((ext >> 10) & 7) {
2635        case 4: /* FPCR */
2636            /* Not implemented.  Always return zero.  */
2637            tmp32 = tcg_const_i32(0);
2638            break;
2639        case 1: /* FPIAR */
2640        case 2: /* FPSR */
2641        default:
2642            cpu_abort(NULL, "Unimplemented: fmove from control %d",
2643                      (ext >> 10) & 7);
2644            goto undef;
2645        }
2646        DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2647        break;
2648    case 6: /* fmovem */
2649    case 7:
2650        {
2651            TCGv addr;
2652            uint16_t mask;
2653            int i;
2654            if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2655                goto undef;
2656            tmp32 = gen_lea(env, s, insn, OS_LONG);
2657            if (IS_NULL_QREG(tmp32)) {
2658                gen_addr_fault(s);
2659                return;
2660            }
2661            addr = tcg_temp_new_i32();
2662            tcg_gen_mov_i32(addr, tmp32);
2663            mask = 0x80;
2664            for (i = 0; i < 8; i++) {
2665                if (ext & mask) {
2666                    dest = FREG(i, 0);
2667                    if (ext & (1 << 13)) {
2668                        /* store */
2669                        tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2670                    } else {
2671                        /* load */
2672                        tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2673                    }
2674                    if (ext & (mask - 1))
2675                        tcg_gen_addi_i32(addr, addr, 8);
2676                }
2677                mask >>= 1;
2678            }
2679            tcg_temp_free_i32(addr);
2680        }
2681        return;
2682    }
2683    if (ext & (1 << 14)) {
2684        /* Source effective address.  */
2685        switch ((ext >> 10) & 7) {
2686        case 0: opsize = OS_LONG; break;
2687        case 1: opsize = OS_SINGLE; break;
2688        case 4: opsize = OS_WORD; break;
2689        case 5: opsize = OS_DOUBLE; break;
2690        case 6: opsize = OS_BYTE; break;
2691        default:
2692            goto undef;
2693        }
2694        if (opsize == OS_DOUBLE) {
2695            tmp32 = tcg_temp_new_i32();
2696            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2697            switch ((insn >> 3) & 7) {
2698            case 2:
2699            case 3:
2700                break;
2701            case 4:
2702                tcg_gen_addi_i32(tmp32, tmp32, -8);
2703                break;
2704            case 5:
2705                offset = cpu_ldsw_code(env, s->pc);
2706                s->pc += 2;
2707                tcg_gen_addi_i32(tmp32, tmp32, offset);
2708                break;
2709            case 7:
2710                offset = cpu_ldsw_code(env, s->pc);
2711                offset += s->pc - 2;
2712                s->pc += 2;
2713                tcg_gen_addi_i32(tmp32, tmp32, offset);
2714                break;
2715            default:
2716                goto undef;
2717            }
2718            src = gen_load64(s, tmp32);
2719            switch ((insn >> 3) & 7) {
2720            case 3:
2721                tcg_gen_addi_i32(tmp32, tmp32, 8);
2722                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2723                break;
2724            case 4:
2725                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2726                break;
2727            }
2728            tcg_temp_free_i32(tmp32);
2729        } else {
2730            SRC_EA(env, tmp32, opsize, 1, NULL);
2731            src = tcg_temp_new_i64();
2732            switch (opsize) {
2733            case OS_LONG:
2734            case OS_WORD:
2735            case OS_BYTE:
2736                gen_helper_i32_to_f64(src, cpu_env, tmp32);
2737                break;
2738            case OS_SINGLE:
2739                gen_helper_f32_to_f64(src, cpu_env, tmp32);
2740                break;
2741            }
2742        }
2743    } else {
2744        /* Source register.  */
2745        src = FREG(ext, 10);
2746    }
2747    dest = FREG(ext, 7);
2748    res = tcg_temp_new_i64();
2749    if (opmode != 0x3a)
2750        tcg_gen_mov_f64(res, dest);
2751    round = 1;
2752    set_dest = 1;
2753    switch (opmode) {
2754    case 0: case 0x40: case 0x44: /* fmove */
2755        tcg_gen_mov_f64(res, src);
2756        break;
2757    case 1: /* fint */
2758        gen_helper_iround_f64(res, cpu_env, src);
2759        round = 0;
2760        break;
2761    case 3: /* fintrz */
2762        gen_helper_itrunc_f64(res, cpu_env, src);
2763        round = 0;
2764        break;
2765    case 4: case 0x41: case 0x45: /* fsqrt */
2766        gen_helper_sqrt_f64(res, cpu_env, src);
2767        break;
2768    case 0x18: case 0x58: case 0x5c: /* fabs */
2769        gen_helper_abs_f64(res, src);
2770        break;
2771    case 0x1a: case 0x5a: case 0x5e: /* fneg */
2772        gen_helper_chs_f64(res, src);
2773        break;
2774    case 0x20: case 0x60: case 0x64: /* fdiv */
2775        gen_helper_div_f64(res, cpu_env, res, src);
2776        break;
2777    case 0x22: case 0x62: case 0x66: /* fadd */
2778        gen_helper_add_f64(res, cpu_env, res, src);
2779        break;
2780    case 0x23: case 0x63: case 0x67: /* fmul */
2781        gen_helper_mul_f64(res, cpu_env, res, src);
2782        break;
2783    case 0x28: case 0x68: case 0x6c: /* fsub */
2784        gen_helper_sub_f64(res, cpu_env, res, src);
2785        break;
2786    case 0x38: /* fcmp */
2787        gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2788        set_dest = 0;
2789        round = 0;
2790        break;
2791    case 0x3a: /* ftst */
2792        tcg_gen_mov_f64(res, src);
2793        set_dest = 0;
2794        round = 0;
2795        break;
2796    default:
2797        goto undef;
2798    }
2799    if (ext & (1 << 14)) {
2800        tcg_temp_free_i64(src);
2801    }
2802    if (round) {
2803        if (opmode & 0x40) {
2804            if ((opmode & 0x4) != 0)
2805                round = 0;
2806        } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2807            round = 0;
2808        }
2809    }
2810    if (round) {
2811        TCGv tmp = tcg_temp_new_i32();
2812        gen_helper_f64_to_f32(tmp, cpu_env, res);
2813        gen_helper_f32_to_f64(res, cpu_env, tmp);
2814        tcg_temp_free_i32(tmp);
2815    }
2816    tcg_gen_mov_f64(QREG_FP_RESULT, res);
2817    if (set_dest) {
2818        tcg_gen_mov_f64(dest, res);
2819    }
2820    tcg_temp_free_i64(res);
2821    return;
2822undef:
2823    /* FIXME: Is this right for offset addressing modes?  */
2824    s->pc -= 2;
2825    disas_undef_fpu(env, s, insn);
2826}
2827
2828DISAS_INSN(fbcc)
2829{
2830    uint32_t offset;
2831    uint32_t addr;
2832    TCGv flag;
2833    TCGLabel *l1;
2834
2835    addr = s->pc;
2836    offset = cpu_ldsw_code(env, s->pc);
2837    s->pc += 2;
2838    if (insn & (1 << 6)) {
2839        offset = (offset << 16) | read_im16(env, s);
2840    }
2841
2842    l1 = gen_new_label();
2843    /* TODO: Raise BSUN exception.  */
2844    flag = tcg_temp_new();
2845    gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2846    /* Jump to l1 if condition is true.  */
2847    switch (insn & 0xf) {
2848    case 0: /* f */
2849        break;
2850    case 1: /* eq (=0) */
2851        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2852        break;
2853    case 2: /* ogt (=1) */
2854        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2855        break;
2856    case 3: /* oge (=0 or =1) */
2857        tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2858        break;
2859    case 4: /* olt (=-1) */
2860        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2861        break;
2862    case 5: /* ole (=-1 or =0) */
2863        tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2864        break;
2865    case 6: /* ogl (=-1 or =1) */
2866        tcg_gen_andi_i32(flag, flag, 1);
2867        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2868        break;
2869    case 7: /* or (=2) */
2870        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2871        break;
2872    case 8: /* un (<2) */
2873        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2874        break;
2875    case 9: /* ueq (=0 or =2) */
2876        tcg_gen_andi_i32(flag, flag, 1);
2877        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2878        break;
2879    case 10: /* ugt (>0) */
2880        tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2881        break;
2882    case 11: /* uge (>=0) */
2883        tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2884        break;
2885    case 12: /* ult (=-1 or =2) */
2886        tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2887        break;
2888    case 13: /* ule (!=1) */
2889        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2890        break;
2891    case 14: /* ne (!=0) */
2892        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2893        break;
2894    case 15: /* t */
2895        tcg_gen_br(l1);
2896        break;
2897    }
2898    gen_jmp_tb(s, 0, s->pc);
2899    gen_set_label(l1);
2900    gen_jmp_tb(s, 1, addr + offset);
2901}
2902
2903DISAS_INSN(frestore)
2904{
2905    M68kCPU *cpu = m68k_env_get_cpu(env);
2906
2907    /* TODO: Implement frestore.  */
2908    cpu_abort(CPU(cpu), "FRESTORE not implemented");
2909}
2910
2911DISAS_INSN(fsave)
2912{
2913    M68kCPU *cpu = m68k_env_get_cpu(env);
2914
2915    /* TODO: Implement fsave.  */
2916    cpu_abort(CPU(cpu), "FSAVE not implemented");
2917}
2918
2919static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2920{
2921    TCGv tmp = tcg_temp_new();
2922    if (s->env->macsr & MACSR_FI) {
2923        if (upper)
2924            tcg_gen_andi_i32(tmp, val, 0xffff0000);
2925        else
2926            tcg_gen_shli_i32(tmp, val, 16);
2927    } else if (s->env->macsr & MACSR_SU) {
2928        if (upper)
2929            tcg_gen_sari_i32(tmp, val, 16);
2930        else
2931            tcg_gen_ext16s_i32(tmp, val);
2932    } else {
2933        if (upper)
2934            tcg_gen_shri_i32(tmp, val, 16);
2935        else
2936            tcg_gen_ext16u_i32(tmp, val);
2937    }
2938    return tmp;
2939}
2940
2941static void gen_mac_clear_flags(void)
2942{
2943    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2944                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2945}
2946
2947DISAS_INSN(mac)
2948{
2949    TCGv rx;
2950    TCGv ry;
2951    uint16_t ext;
2952    int acc;
2953    TCGv tmp;
2954    TCGv addr;
2955    TCGv loadval;
2956    int dual;
2957    TCGv saved_flags;
2958
2959    if (!s->done_mac) {
2960        s->mactmp = tcg_temp_new_i64();
2961        s->done_mac = 1;
2962    }
2963
2964    ext = read_im16(env, s);
2965
2966    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2967    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2968    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2969        disas_undef(env, s, insn);
2970        return;
2971    }
2972    if (insn & 0x30) {
2973        /* MAC with load.  */
2974        tmp = gen_lea(env, s, insn, OS_LONG);
2975        addr = tcg_temp_new();
2976        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2977        /* Load the value now to ensure correct exception behavior.
2978           Perform writeback after reading the MAC inputs.  */
2979        loadval = gen_load(s, OS_LONG, addr, 0);
2980
2981        acc ^= 1;
2982        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2983        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2984    } else {
2985        loadval = addr = NULL_QREG;
2986        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2987        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2988    }
2989
2990    gen_mac_clear_flags();
2991#if 0
2992    l1 = -1;
2993    /* Disabled because conditional branches clobber temporary vars.  */
2994    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2995        /* Skip the multiply if we know we will ignore it.  */
2996        l1 = gen_new_label();
2997        tmp = tcg_temp_new();
2998        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2999        gen_op_jmp_nz32(tmp, l1);
3000    }
3001#endif
3002
3003    if ((ext & 0x0800) == 0) {
3004        /* Word.  */
3005        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
3006        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
3007    }
3008    if (s->env->macsr & MACSR_FI) {
3009        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
3010    } else {
3011        if (s->env->macsr & MACSR_SU)
3012            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
3013        else
3014            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
3015        switch ((ext >> 9) & 3) {
3016        case 1:
3017            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
3018            break;
3019        case 3:
3020            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
3021            break;
3022        }
3023    }
3024
3025    if (dual) {
3026        /* Save the overflow flag from the multiply.  */
3027        saved_flags = tcg_temp_new();
3028        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
3029    } else {
3030        saved_flags = NULL_QREG;
3031    }
3032
3033#if 0
3034    /* Disabled because conditional branches clobber temporary vars.  */
3035    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
3036        /* Skip the accumulate if the value is already saturated.  */
3037        l1 = gen_new_label();
3038        tmp = tcg_temp_new();
3039        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
3040        gen_op_jmp_nz32(tmp, l1);
3041    }
3042#endif
3043
3044    if (insn & 0x100)
3045        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
3046    else
3047        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
3048
3049    if (s->env->macsr & MACSR_FI)
3050        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
3051    else if (s->env->macsr & MACSR_SU)
3052        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
3053    else
3054        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
3055
3056#if 0
3057    /* Disabled because conditional branches clobber temporary vars.  */
3058    if (l1 != -1)
3059        gen_set_label(l1);
3060#endif
3061
3062    if (dual) {
3063        /* Dual accumulate variant.  */
3064        acc = (ext >> 2) & 3;
3065        /* Restore the overflow flag from the multiplier.  */
3066        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
3067#if 0
3068        /* Disabled because conditional branches clobber temporary vars.  */
3069        if ((s->env->macsr & MACSR_OMC) != 0) {
3070            /* Skip the accumulate if the value is already saturated.  */
3071            l1 = gen_new_label();
3072            tmp = tcg_temp_new();
3073            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
3074            gen_op_jmp_nz32(tmp, l1);
3075        }
3076#endif
3077        if (ext & 2)
3078            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
3079        else
3080            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
3081        if (s->env->macsr & MACSR_FI)
3082            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
3083        else if (s->env->macsr & MACSR_SU)
3084            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
3085        else
3086            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
3087#if 0
3088        /* Disabled because conditional branches clobber temporary vars.  */
3089        if (l1 != -1)
3090            gen_set_label(l1);
3091#endif
3092    }
3093    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
3094
3095    if (insn & 0x30) {
3096        TCGv rw;
3097        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
3098        tcg_gen_mov_i32(rw, loadval);
3099        /* FIXME: Should address writeback happen with the masked or
3100           unmasked value?  */
3101        switch ((insn >> 3) & 7) {
3102        case 3: /* Post-increment.  */
3103            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
3104            break;
3105        case 4: /* Pre-decrement.  */
3106            tcg_gen_mov_i32(AREG(insn, 0), addr);
3107        }
3108    }
3109}
3110
3111DISAS_INSN(from_mac)
3112{
3113    TCGv rx;
3114    TCGv_i64 acc;
3115    int accnum;
3116
3117    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3118    accnum = (insn >> 9) & 3;
3119    acc = MACREG(accnum);
3120    if (s->env->macsr & MACSR_FI) {
3121        gen_helper_get_macf(rx, cpu_env, acc);
3122    } else if ((s->env->macsr & MACSR_OMC) == 0) {
3123        tcg_gen_extrl_i64_i32(rx, acc);
3124    } else if (s->env->macsr & MACSR_SU) {
3125        gen_helper_get_macs(rx, acc);
3126    } else {
3127        gen_helper_get_macu(rx, acc);
3128    }
3129    if (insn & 0x40) {
3130        tcg_gen_movi_i64(acc, 0);
3131        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
3132    }
3133}
3134
3135DISAS_INSN(move_mac)
3136{
3137    /* FIXME: This can be done without a helper.  */
3138    int src;
3139    TCGv dest;
3140    src = insn & 3;
3141    dest = tcg_const_i32((insn >> 9) & 3);
3142    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
3143    gen_mac_clear_flags();
3144    gen_helper_mac_set_flags(cpu_env, dest);
3145}
3146
3147DISAS_INSN(from_macsr)
3148{
3149    TCGv reg;
3150
3151    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3152    tcg_gen_mov_i32(reg, QREG_MACSR);
3153}
3154
3155DISAS_INSN(from_mask)
3156{
3157    TCGv reg;
3158    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3159    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
3160}
3161
3162DISAS_INSN(from_mext)
3163{
3164    TCGv reg;
3165    TCGv acc;
3166    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
3167    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
3168    if (s->env->macsr & MACSR_FI)
3169        gen_helper_get_mac_extf(reg, cpu_env, acc);
3170    else
3171        gen_helper_get_mac_exti(reg, cpu_env, acc);
3172}
3173
3174DISAS_INSN(macsr_to_ccr)
3175{
3176    TCGv tmp = tcg_temp_new();
3177    tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
3178    gen_helper_set_sr(cpu_env, tmp);
3179    tcg_temp_free(tmp);
3180    set_cc_op(s, CC_OP_FLAGS);
3181}
3182
3183DISAS_INSN(to_mac)
3184{
3185    TCGv_i64 acc;
3186    TCGv val;
3187    int accnum;
3188    accnum = (insn >> 9) & 3;
3189    acc = MACREG(accnum);
3190    SRC_EA(env, val, OS_LONG, 0, NULL);
3191    if (s->env->macsr & MACSR_FI) {
3192        tcg_gen_ext_i32_i64(acc, val);
3193        tcg_gen_shli_i64(acc, acc, 8);
3194    } else if (s->env->macsr & MACSR_SU) {
3195        tcg_gen_ext_i32_i64(acc, val);
3196    } else {
3197        tcg_gen_extu_i32_i64(acc, val);
3198    }
3199    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
3200    gen_mac_clear_flags();
3201    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
3202}
3203
3204DISAS_INSN(to_macsr)
3205{
3206    TCGv val;
3207    SRC_EA(env, val, OS_LONG, 0, NULL);
3208    gen_helper_set_macsr(cpu_env, val);
3209    gen_lookup_tb(s);
3210}
3211
3212DISAS_INSN(to_mask)
3213{
3214    TCGv val;
3215    SRC_EA(env, val, OS_LONG, 0, NULL);
3216    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
3217}
3218
3219DISAS_INSN(to_mext)
3220{
3221    TCGv val;
3222    TCGv acc;
3223    SRC_EA(env, val, OS_LONG, 0, NULL);
3224    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
3225    if (s->env->macsr & MACSR_FI)
3226        gen_helper_set_mac_extf(cpu_env, val, acc);
3227    else if (s->env->macsr & MACSR_SU)
3228        gen_helper_set_mac_exts(cpu_env, val, acc);
3229    else
3230        gen_helper_set_mac_extu(cpu_env, val, acc);
3231}
3232
3233static disas_proc opcode_table[65536];
3234
3235static void
3236register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
3237{
3238  int i;
3239  int from;
3240  int to;
3241
3242  /* Sanity check.  All set bits must be included in the mask.  */
3243  if (opcode & ~mask) {
3244      fprintf(stderr,
3245              "qemu internal error: bogus opcode definition %04x/%04x\n",
3246              opcode, mask);
3247      abort();
3248  }
3249  /* This could probably be cleverer.  For now just optimize the case where
3250     the top bits are known.  */
3251  /* Find the first zero bit in the mask.  */
3252  i = 0x8000;
3253  while ((i & mask) != 0)
3254      i >>= 1;
3255  /* Iterate over all combinations of this and lower bits.  */
3256  if (i == 0)
3257      i = 1;
3258  else
3259      i <<= 1;
3260  from = opcode & ~(i - 1);
3261  to = from + i;
3262  for (i = from; i < to; i++) {
3263      if ((i & mask) == opcode)
3264          opcode_table[i] = proc;
3265  }
3266}
3267
3268/* Register m68k opcode handlers.  Order is important.
3269   Later insn override earlier ones.  */
3270void register_m68k_insns (CPUM68KState *env)
3271{
3272    /* Build the opcode table only once to avoid
3273       multithreading issues. */
3274    if (opcode_table[0] != NULL) {
3275        return;
3276    }
3277
3278    /* use BASE() for instruction available
3279     * for CF_ISA_A and M68000.
3280     */
3281#define BASE(name, opcode, mask) \
3282    register_opcode(disas_##name, 0x##opcode, 0x##mask)
3283#define INSN(name, opcode, mask, feature) do { \
3284    if (m68k_feature(env, M68K_FEATURE_##feature)) \
3285        BASE(name, opcode, mask); \
3286    } while(0)
3287    BASE(undef,     0000, 0000);
3288    INSN(arith_im,  0080, fff8, CF_ISA_A);
3289    INSN(arith_im,  0000, ff00, M68000);
3290    INSN(undef,     00c0, ffc0, M68000);
3291    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
3292    BASE(bitop_reg, 0100, f1c0);
3293    BASE(bitop_reg, 0140, f1c0);
3294    BASE(bitop_reg, 0180, f1c0);
3295    BASE(bitop_reg, 01c0, f1c0);
3296    INSN(arith_im,  0280, fff8, CF_ISA_A);
3297    INSN(arith_im,  0200, ff00, M68000);
3298    INSN(undef,     02c0, ffc0, M68000);
3299    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
3300    INSN(arith_im,  0480, fff8, CF_ISA_A);
3301    INSN(arith_im,  0400, ff00, M68000);
3302    INSN(undef,     04c0, ffc0, M68000);
3303    INSN(arith_im,  0600, ff00, M68000);
3304    INSN(undef,     06c0, ffc0, M68000);
3305    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
3306    INSN(arith_im,  0680, fff8, CF_ISA_A);
3307    INSN(arith_im,  0c00, ff38, CF_ISA_A);
3308    INSN(arith_im,  0c00, ff00, M68000);
3309    BASE(bitop_im,  0800, ffc0);
3310    BASE(bitop_im,  0840, ffc0);
3311    BASE(bitop_im,  0880, ffc0);
3312    BASE(bitop_im,  08c0, ffc0);
3313    INSN(arith_im,  0a80, fff8, CF_ISA_A);
3314    INSN(arith_im,  0a00, ff00, M68000);
3315    BASE(move,      1000, f000);
3316    BASE(move,      2000, f000);
3317    BASE(move,      3000, f000);
3318    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
3319    INSN(negx,      4080, fff8, CF_ISA_A);
3320    INSN(negx,      4000, ff00, M68000);
3321    INSN(undef,     40c0, ffc0, M68000);
3322    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
3323    INSN(move_from_sr, 40c0, ffc0, M68000);
3324    BASE(lea,       41c0, f1c0);
3325    BASE(clr,       4200, ff00);
3326    BASE(undef,     42c0, ffc0);
3327    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
3328    INSN(move_from_ccr, 42c0, ffc0, M68000);
3329    INSN(neg,       4480, fff8, CF_ISA_A);
3330    INSN(neg,       4400, ff00, M68000);
3331    INSN(undef,     44c0, ffc0, M68000);
3332    BASE(move_to_ccr, 44c0, ffc0);
3333    INSN(not,       4680, fff8, CF_ISA_A);
3334    INSN(not,       4600, ff00, M68000);
3335    INSN(undef,     46c0, ffc0, M68000);
3336    INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
3337    INSN(linkl,     4808, fff8, M68000);
3338    BASE(pea,       4840, ffc0);
3339    BASE(swap,      4840, fff8);
3340    INSN(bkpt,      4848, fff8, BKPT);
3341    BASE(movem,     48c0, fbc0);
3342    BASE(ext,       4880, fff8);
3343    BASE(ext,       48c0, fff8);
3344    BASE(ext,       49c0, fff8);
3345    BASE(tst,       4a00, ff00);
3346    INSN(tas,       4ac0, ffc0, CF_ISA_B);
3347    INSN(tas,       4ac0, ffc0, M68000);
3348    INSN(halt,      4ac8, ffff, CF_ISA_A);
3349    INSN(pulse,     4acc, ffff, CF_ISA_A);
3350    BASE(illegal,   4afc, ffff);
3351    INSN(mull,      4c00, ffc0, CF_ISA_A);
3352    INSN(mull,      4c00, ffc0, LONG_MULDIV);
3353    INSN(divl,      4c40, ffc0, CF_ISA_A);
3354    INSN(divl,      4c40, ffc0, LONG_MULDIV);
3355    INSN(sats,      4c80, fff8, CF_ISA_B);
3356    BASE(trap,      4e40, fff0);
3357    BASE(link,      4e50, fff8);
3358    BASE(unlk,      4e58, fff8);
3359    INSN(move_to_usp, 4e60, fff8, USP);
3360    INSN(move_from_usp, 4e68, fff8, USP);
3361    BASE(nop,       4e71, ffff);
3362    BASE(stop,      4e72, ffff);
3363    BASE(rte,       4e73, ffff);
3364    BASE(rts,       4e75, ffff);
3365    INSN(movec,     4e7b, ffff, CF_ISA_A);
3366    BASE(jump,      4e80, ffc0);
3367    BASE(jump,      4ec0, ffc0);
3368    INSN(addsubq,   5000, f080, M68000);
3369    BASE(addsubq,   5080, f0c0);
3370    INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
3371    INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
3372    INSN(dbcc,      50c8, f0f8, M68000);
3373    INSN(tpf,       51f8, fff8, CF_ISA_A);
3374
3375    /* Branch instructions.  */
3376    BASE(branch,    6000, f000);
3377    /* Disable long branch instructions, then add back the ones we want.  */
3378    BASE(undef,     60ff, f0ff); /* All long branches.  */
3379    INSN(branch,    60ff, f0ff, CF_ISA_B);
3380    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
3381    INSN(branch,    60ff, ffff, BRAL);
3382    INSN(branch,    60ff, f0ff, BCCL);
3383
3384    BASE(moveq,     7000, f100);
3385    INSN(mvzs,      7100, f100, CF_ISA_B);
3386    BASE(or,        8000, f000);
3387    BASE(divw,      80c0, f0c0);
3388    BASE(addsub,    9000, f000);
3389    INSN(undef,     90c0, f0c0, CF_ISA_A);
3390    INSN(subx_reg,  9180, f1f8, CF_ISA_A);
3391    INSN(subx_reg,  9100, f138, M68000);
3392    INSN(subx_mem,  9108, f138, M68000);
3393    INSN(suba,      91c0, f1c0, CF_ISA_A);
3394    INSN(suba,      90c0, f0c0, M68000);
3395
3396    BASE(undef_mac, a000, f000);
3397    INSN(mac,       a000, f100, CF_EMAC);
3398    INSN(from_mac,  a180, f9b0, CF_EMAC);
3399    INSN(move_mac,  a110, f9fc, CF_EMAC);
3400    INSN(from_macsr,a980, f9f0, CF_EMAC);
3401    INSN(from_mask, ad80, fff0, CF_EMAC);
3402    INSN(from_mext, ab80, fbf0, CF_EMAC);
3403    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
3404    INSN(to_mac,    a100, f9c0, CF_EMAC);
3405    INSN(to_macsr,  a900, ffc0, CF_EMAC);
3406    INSN(to_mext,   ab00, fbc0, CF_EMAC);
3407    INSN(to_mask,   ad00, ffc0, CF_EMAC);
3408
3409    INSN(mov3q,     a140, f1c0, CF_ISA_B);
3410    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
3411    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
3412    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
3413    INSN(cmp,       b080, f1c0, CF_ISA_A);
3414    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
3415    INSN(cmp,       b000, f100, M68000);
3416    INSN(eor,       b100, f100, M68000);
3417    INSN(cmpa,      b0c0, f0c0, M68000);
3418    INSN(eor,       b180, f1c0, CF_ISA_A);
3419    BASE(and,       c000, f000);
3420    INSN(exg_dd,    c140, f1f8, M68000);
3421    INSN(exg_aa,    c148, f1f8, M68000);
3422    INSN(exg_da,    c188, f1f8, M68000);
3423    BASE(mulw,      c0c0, f0c0);
3424    BASE(addsub,    d000, f000);
3425    INSN(undef,     d0c0, f0c0, CF_ISA_A);
3426    INSN(addx_reg,      d180, f1f8, CF_ISA_A);
3427    INSN(addx_reg,  d100, f138, M68000);
3428    INSN(addx_mem,  d108, f138, M68000);
3429    INSN(adda,      d1c0, f1c0, CF_ISA_A);
3430    INSN(adda,      d0c0, f0c0, M68000);
3431    INSN(shift_im,  e080, f0f0, CF_ISA_A);
3432    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
3433    INSN(undef_fpu, f000, f000, CF_ISA_A);
3434    INSN(fpu,       f200, ffc0, CF_FPU);
3435    INSN(fbcc,      f280, ffc0, CF_FPU);
3436    INSN(frestore,  f340, ffc0, CF_FPU);
3437    INSN(fsave,     f340, ffc0, CF_FPU);
3438    INSN(intouch,   f340, ffc0, CF_ISA_A);
3439    INSN(cpushl,    f428, ff38, CF_ISA_A);
3440    INSN(wddata,    fb00, ff00, CF_ISA_A);
3441    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
3442#undef INSN
3443}
3444
3445/* ??? Some of this implementation is not exception safe.  We should always
3446   write back the result to memory before setting the condition codes.  */
3447static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
3448{
3449    uint16_t insn;
3450
3451    insn = read_im16(env, s);
3452
3453    opcode_table[insn](env, s, insn);
3454}
3455
3456/* generate intermediate code for basic block 'tb'.  */
3457void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
3458{
3459    M68kCPU *cpu = m68k_env_get_cpu(env);
3460    CPUState *cs = CPU(cpu);
3461    DisasContext dc1, *dc = &dc1;
3462    target_ulong pc_start;
3463    int pc_offset;
3464    int num_insns;
3465    int max_insns;
3466
3467    /* generate intermediate code */
3468    pc_start = tb->pc;
3469
3470    dc->tb = tb;
3471
3472    dc->env = env;
3473    dc->is_jmp = DISAS_NEXT;
3474    dc->pc = pc_start;
3475    dc->cc_op = CC_OP_DYNAMIC;
3476    dc->cc_op_synced = 1;
3477    dc->singlestep_enabled = cs->singlestep_enabled;
3478    dc->fpcr = env->fpcr;
3479    dc->user = (env->sr & SR_S) == 0;
3480    dc->done_mac = 0;
3481    num_insns = 0;
3482    max_insns = tb->cflags & CF_COUNT_MASK;
3483    if (max_insns == 0) {
3484        max_insns = CF_COUNT_MASK;
3485    }
3486    if (max_insns > TCG_MAX_INSNS) {
3487        max_insns = TCG_MAX_INSNS;
3488    }
3489
3490    gen_tb_start(tb);
3491    do {
3492        pc_offset = dc->pc - pc_start;
3493        gen_throws_exception = NULL;
3494        tcg_gen_insn_start(dc->pc, dc->cc_op);
3495        num_insns++;
3496
3497        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
3498            gen_exception(dc, dc->pc, EXCP_DEBUG);
3499            dc->is_jmp = DISAS_JUMP;
3500            /* The address covered by the breakpoint must be included in
3501               [tb->pc, tb->pc + tb->size) in order to for it to be
3502               properly cleared -- thus we increment the PC here so that
3503               the logic setting tb->size below does the right thing.  */
3504            dc->pc += 2;
3505            break;
3506        }
3507
3508        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3509            gen_io_start();
3510        }
3511
3512        dc->insn_pc = dc->pc;
3513        disas_m68k_insn(env, dc);
3514    } while (!dc->is_jmp && !tcg_op_buf_full() &&
3515             !cs->singlestep_enabled &&
3516             !singlestep &&
3517             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3518             num_insns < max_insns);
3519
3520    if (tb->cflags & CF_LAST_IO)
3521        gen_io_end();
3522    if (unlikely(cs->singlestep_enabled)) {
3523        /* Make sure the pc is updated, and raise a debug exception.  */
3524        if (!dc->is_jmp) {
3525            update_cc_op(dc);
3526            tcg_gen_movi_i32(QREG_PC, dc->pc);
3527        }
3528        gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3529    } else {
3530        switch(dc->is_jmp) {
3531        case DISAS_NEXT:
3532            update_cc_op(dc);
3533            gen_jmp_tb(dc, 0, dc->pc);
3534            break;
3535        default:
3536        case DISAS_JUMP:
3537        case DISAS_UPDATE:
3538            update_cc_op(dc);
3539            /* indicate that the hash table must be used to find the next TB */
3540            tcg_gen_exit_tb(0);
3541            break;
3542        case DISAS_TB_JUMP:
3543            /* nothing more to generate */
3544            break;
3545        }
3546    }
3547    gen_tb_end(tb, num_insns);
3548
3549#ifdef DEBUG_DISAS
3550    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3551        && qemu_log_in_addr_range(pc_start)) {
3552        qemu_log_lock();
3553        qemu_log("----------------\n");
3554        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3555        log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
3556        qemu_log("\n");
3557        qemu_log_unlock();
3558    }
3559#endif
3560    tb->size = dc->pc - pc_start;
3561    tb->icount = num_insns;
3562}
3563
3564void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
3565                         int flags)
3566{
3567    M68kCPU *cpu = M68K_CPU(cs);
3568    CPUM68KState *env = &cpu->env;
3569    int i;
3570    uint16_t sr;
3571    CPU_DoubleU u;
3572    for (i = 0; i < 8; i++)
3573      {
3574        u.d = env->fregs[i];
3575        cpu_fprintf(f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
3576                    i, env->dregs[i], i, env->aregs[i],
3577                    i, u.l.upper, u.l.lower, *(double *)&u.d);
3578      }
3579    cpu_fprintf (f, "PC = %08x   ", env->pc);
3580    sr = env->sr | cpu_m68k_get_ccr(env);
3581    cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
3582                (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3583                (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3584    cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3585}
3586
3587void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
3588                          target_ulong *data)
3589{
3590    int cc_op = data[1];
3591    env->pc = data[0];
3592    if (cc_op != CC_OP_DYNAMIC) {
3593        env->cc_op = cc_op;
3594    }
3595}
3596