qemu/target-m68k/translate.c
<<
>>
Prefs
   1/*
   2 *  m68k translation
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery
   5 *  Written by Paul Brook
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "tcg-op.h"
  25#include "qemu/log.h"
  26#include "exec/cpu_ldst.h"
  27
  28#include "exec/helper-proto.h"
  29#include "exec/helper-gen.h"
  30
  31#include "trace-tcg.h"
  32#include "exec/log.h"
  33
  34
  35//#define DEBUG_DISPATCH 1
  36
  37/* Fake floating point.  */
  38#define tcg_gen_mov_f64 tcg_gen_mov_i64
  39#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
  40#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
  41
  42#define DEFO32(name, offset) static TCGv QREG_##name;
  43#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
  44#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
  45#include "qregs.def"
  46#undef DEFO32
  47#undef DEFO64
  48#undef DEFF64
  49
  50static TCGv_i32 cpu_halted;
  51static TCGv_i32 cpu_exception_index;
  52
  53static TCGv_env cpu_env;
  54
  55static char cpu_reg_names[3*8*3 + 5*4];
  56static TCGv cpu_dregs[8];
  57static TCGv cpu_aregs[8];
  58static TCGv_i64 cpu_fregs[8];
  59static TCGv_i64 cpu_macc[4];
  60
  61#define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
  62#define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
  63#define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
  64#define MACREG(acc) cpu_macc[acc]
  65#define QREG_SP cpu_aregs[7]
  66
  67static TCGv NULL_QREG;
  68#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
  69/* Used to distinguish stores from bad addressing modes.  */
  70static TCGv store_dummy;
  71
  72#include "exec/gen-icount.h"
  73
  74void m68k_tcg_init(void)
  75{
  76    char *p;
  77    int i;
  78
  79    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  80
  81#define DEFO32(name, offset) \
  82    QREG_##name = tcg_global_mem_new_i32(cpu_env, \
  83        offsetof(CPUM68KState, offset), #name);
  84#define DEFO64(name, offset) \
  85    QREG_##name = tcg_global_mem_new_i64(cpu_env, \
  86        offsetof(CPUM68KState, offset), #name);
  87#define DEFF64(name, offset) DEFO64(name, offset)
  88#include "qregs.def"
  89#undef DEFO32
  90#undef DEFO64
  91#undef DEFF64
  92
  93    cpu_halted = tcg_global_mem_new_i32(cpu_env,
  94                                        -offsetof(M68kCPU, env) +
  95                                        offsetof(CPUState, halted), "HALTED");
  96    cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
  97                                                 -offsetof(M68kCPU, env) +
  98                                                 offsetof(CPUState, exception_index),
  99                                                 "EXCEPTION");
 100
 101    p = cpu_reg_names;
 102    for (i = 0; i < 8; i++) {
 103        sprintf(p, "D%d", i);
 104        cpu_dregs[i] = tcg_global_mem_new(cpu_env,
 105                                          offsetof(CPUM68KState, dregs[i]), p);
 106        p += 3;
 107        sprintf(p, "A%d", i);
 108        cpu_aregs[i] = tcg_global_mem_new(cpu_env,
 109                                          offsetof(CPUM68KState, aregs[i]), p);
 110        p += 3;
 111        sprintf(p, "F%d", i);
 112        cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
 113                                          offsetof(CPUM68KState, fregs[i]), p);
 114        p += 3;
 115    }
 116    for (i = 0; i < 4; i++) {
 117        sprintf(p, "ACC%d", i);
 118        cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
 119                                         offsetof(CPUM68KState, macc[i]), p);
 120        p += 5;
 121    }
 122
 123    NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
 124    store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
 125}
 126
 127/* internal defines */
 128typedef struct DisasContext {
 129    CPUM68KState *env;
 130    target_ulong insn_pc; /* Start of the current instruction.  */
 131    target_ulong pc;
 132    int is_jmp;
 133    int cc_op;
 134    int user;
 135    uint32_t fpcr;
 136    struct TranslationBlock *tb;
 137    int singlestep_enabled;
 138    TCGv_i64 mactmp;
 139    int done_mac;
 140} DisasContext;
 141
 142#define DISAS_JUMP_NEXT 4
 143
 144#if defined(CONFIG_USER_ONLY)
 145#define IS_USER(s) 1
 146#else
 147#define IS_USER(s) s->user
 148#endif
 149
 150/* XXX: move that elsewhere */
 151/* ??? Fix exceptions.  */
 152static void *gen_throws_exception;
 153#define gen_last_qop NULL
 154
 155#define OS_BYTE 0
 156#define OS_WORD 1
 157#define OS_LONG 2
 158#define OS_SINGLE 4
 159#define OS_DOUBLE 5
 160
 161typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
 162
 163#ifdef DEBUG_DISPATCH
 164#define DISAS_INSN(name)                                                \
 165    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 166                                  uint16_t insn);                       \
 167    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 168                             uint16_t insn)                             \
 169    {                                                                   \
 170        qemu_log("Dispatch " #name "\n");                               \
 171        real_disas_##name(s, env, insn);                                \
 172    }                                                                   \
 173    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 174                                  uint16_t insn)
 175#else
 176#define DISAS_INSN(name)                                                \
 177    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 178                             uint16_t insn)
 179#endif
 180
 181/* Generate a load from the specified address.  Narrow values are
 182   sign extended to full register width.  */
 183static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
 184{
 185    TCGv tmp;
 186    int index = IS_USER(s);
 187    tmp = tcg_temp_new_i32();
 188    switch(opsize) {
 189    case OS_BYTE:
 190        if (sign)
 191            tcg_gen_qemu_ld8s(tmp, addr, index);
 192        else
 193            tcg_gen_qemu_ld8u(tmp, addr, index);
 194        break;
 195    case OS_WORD:
 196        if (sign)
 197            tcg_gen_qemu_ld16s(tmp, addr, index);
 198        else
 199            tcg_gen_qemu_ld16u(tmp, addr, index);
 200        break;
 201    case OS_LONG:
 202    case OS_SINGLE:
 203        tcg_gen_qemu_ld32u(tmp, addr, index);
 204        break;
 205    default:
 206        g_assert_not_reached();
 207    }
 208    gen_throws_exception = gen_last_qop;
 209    return tmp;
 210}
 211
 212static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
 213{
 214    TCGv_i64 tmp;
 215    int index = IS_USER(s);
 216    tmp = tcg_temp_new_i64();
 217    tcg_gen_qemu_ldf64(tmp, addr, index);
 218    gen_throws_exception = gen_last_qop;
 219    return tmp;
 220}
 221
 222/* Generate a store.  */
 223static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
 224{
 225    int index = IS_USER(s);
 226    switch(opsize) {
 227    case OS_BYTE:
 228        tcg_gen_qemu_st8(val, addr, index);
 229        break;
 230    case OS_WORD:
 231        tcg_gen_qemu_st16(val, addr, index);
 232        break;
 233    case OS_LONG:
 234    case OS_SINGLE:
 235        tcg_gen_qemu_st32(val, addr, index);
 236        break;
 237    default:
 238        g_assert_not_reached();
 239    }
 240    gen_throws_exception = gen_last_qop;
 241}
 242
 243static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
 244{
 245    int index = IS_USER(s);
 246    tcg_gen_qemu_stf64(val, addr, index);
 247    gen_throws_exception = gen_last_qop;
 248}
 249
 250typedef enum {
 251    EA_STORE,
 252    EA_LOADU,
 253    EA_LOADS
 254} ea_what;
 255
 256/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
 257   otherwise generate a store.  */
 258static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
 259                     ea_what what)
 260{
 261    if (what == EA_STORE) {
 262        gen_store(s, opsize, addr, val);
 263        return store_dummy;
 264    } else {
 265        return gen_load(s, opsize, addr, what == EA_LOADS);
 266    }
 267}
 268
 269/* Read a 32-bit immediate constant.  */
 270static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
 271{
 272    uint32_t im;
 273    im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16;
 274    s->pc += 2;
 275    im |= cpu_lduw_code(env, s->pc);
 276    s->pc += 2;
 277    return im;
 278}
 279
 280/* Calculate and address index.  */
 281static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
 282{
 283    TCGv add;
 284    int scale;
 285
 286    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
 287    if ((ext & 0x800) == 0) {
 288        tcg_gen_ext16s_i32(tmp, add);
 289        add = tmp;
 290    }
 291    scale = (ext >> 9) & 3;
 292    if (scale != 0) {
 293        tcg_gen_shli_i32(tmp, add, scale);
 294        add = tmp;
 295    }
 296    return add;
 297}
 298
 299/* Handle a base + index + displacement effective addresss.
 300   A NULL_QREG base means pc-relative.  */
 301static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
 302{
 303    uint32_t offset;
 304    uint16_t ext;
 305    TCGv add;
 306    TCGv tmp;
 307    uint32_t bd, od;
 308
 309    offset = s->pc;
 310    ext = cpu_lduw_code(env, s->pc);
 311    s->pc += 2;
 312
 313    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
 314        return NULL_QREG;
 315
 316    if (ext & 0x100) {
 317        /* full extension word format */
 318        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
 319            return NULL_QREG;
 320
 321        if ((ext & 0x30) > 0x10) {
 322            /* base displacement */
 323            if ((ext & 0x30) == 0x20) {
 324                bd = (int16_t)cpu_lduw_code(env, s->pc);
 325                s->pc += 2;
 326            } else {
 327                bd = read_im32(env, s);
 328            }
 329        } else {
 330            bd = 0;
 331        }
 332        tmp = tcg_temp_new();
 333        if ((ext & 0x44) == 0) {
 334            /* pre-index */
 335            add = gen_addr_index(ext, tmp);
 336        } else {
 337            add = NULL_QREG;
 338        }
 339        if ((ext & 0x80) == 0) {
 340            /* base not suppressed */
 341            if (IS_NULL_QREG(base)) {
 342                base = tcg_const_i32(offset + bd);
 343                bd = 0;
 344            }
 345            if (!IS_NULL_QREG(add)) {
 346                tcg_gen_add_i32(tmp, add, base);
 347                add = tmp;
 348            } else {
 349                add = base;
 350            }
 351        }
 352        if (!IS_NULL_QREG(add)) {
 353            if (bd != 0) {
 354                tcg_gen_addi_i32(tmp, add, bd);
 355                add = tmp;
 356            }
 357        } else {
 358            add = tcg_const_i32(bd);
 359        }
 360        if ((ext & 3) != 0) {
 361            /* memory indirect */
 362            base = gen_load(s, OS_LONG, add, 0);
 363            if ((ext & 0x44) == 4) {
 364                add = gen_addr_index(ext, tmp);
 365                tcg_gen_add_i32(tmp, add, base);
 366                add = tmp;
 367            } else {
 368                add = base;
 369            }
 370            if ((ext & 3) > 1) {
 371                /* outer displacement */
 372                if ((ext & 3) == 2) {
 373                    od = (int16_t)cpu_lduw_code(env, s->pc);
 374                    s->pc += 2;
 375                } else {
 376                    od = read_im32(env, s);
 377                }
 378            } else {
 379                od = 0;
 380            }
 381            if (od != 0) {
 382                tcg_gen_addi_i32(tmp, add, od);
 383                add = tmp;
 384            }
 385        }
 386    } else {
 387        /* brief extension word format */
 388        tmp = tcg_temp_new();
 389        add = gen_addr_index(ext, tmp);
 390        if (!IS_NULL_QREG(base)) {
 391            tcg_gen_add_i32(tmp, add, base);
 392            if ((int8_t)ext)
 393                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
 394        } else {
 395            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
 396        }
 397        add = tmp;
 398    }
 399    return add;
 400}
 401
 402/* Update the CPU env CC_OP state.  */
 403static inline void gen_flush_cc_op(DisasContext *s)
 404{
 405    if (s->cc_op != CC_OP_DYNAMIC)
 406        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
 407}
 408
 409/* Evaluate all the CC flags.  */
 410static inline void gen_flush_flags(DisasContext *s)
 411{
 412    if (s->cc_op == CC_OP_FLAGS)
 413        return;
 414    gen_flush_cc_op(s);
 415    gen_helper_flush_flags(cpu_env, QREG_CC_OP);
 416    s->cc_op = CC_OP_FLAGS;
 417}
 418
 419static void gen_logic_cc(DisasContext *s, TCGv val)
 420{
 421    tcg_gen_mov_i32(QREG_CC_DEST, val);
 422    s->cc_op = CC_OP_LOGIC;
 423}
 424
 425static void gen_update_cc_add(TCGv dest, TCGv src)
 426{
 427    tcg_gen_mov_i32(QREG_CC_DEST, dest);
 428    tcg_gen_mov_i32(QREG_CC_SRC, src);
 429}
 430
 431static inline int opsize_bytes(int opsize)
 432{
 433    switch (opsize) {
 434    case OS_BYTE: return 1;
 435    case OS_WORD: return 2;
 436    case OS_LONG: return 4;
 437    case OS_SINGLE: return 4;
 438    case OS_DOUBLE: return 8;
 439    default:
 440        g_assert_not_reached();
 441    }
 442}
 443
 444/* Assign value to a register.  If the width is less than the register width
 445   only the low part of the register is set.  */
 446static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
 447{
 448    TCGv tmp;
 449    switch (opsize) {
 450    case OS_BYTE:
 451        tcg_gen_andi_i32(reg, reg, 0xffffff00);
 452        tmp = tcg_temp_new();
 453        tcg_gen_ext8u_i32(tmp, val);
 454        tcg_gen_or_i32(reg, reg, tmp);
 455        break;
 456    case OS_WORD:
 457        tcg_gen_andi_i32(reg, reg, 0xffff0000);
 458        tmp = tcg_temp_new();
 459        tcg_gen_ext16u_i32(tmp, val);
 460        tcg_gen_or_i32(reg, reg, tmp);
 461        break;
 462    case OS_LONG:
 463    case OS_SINGLE:
 464        tcg_gen_mov_i32(reg, val);
 465        break;
 466    default:
 467        g_assert_not_reached();
 468    }
 469}
 470
 471/* Sign or zero extend a value.  */
 472static inline TCGv gen_extend(TCGv val, int opsize, int sign)
 473{
 474    TCGv tmp;
 475
 476    switch (opsize) {
 477    case OS_BYTE:
 478        tmp = tcg_temp_new();
 479        if (sign)
 480            tcg_gen_ext8s_i32(tmp, val);
 481        else
 482            tcg_gen_ext8u_i32(tmp, val);
 483        break;
 484    case OS_WORD:
 485        tmp = tcg_temp_new();
 486        if (sign)
 487            tcg_gen_ext16s_i32(tmp, val);
 488        else
 489            tcg_gen_ext16u_i32(tmp, val);
 490        break;
 491    case OS_LONG:
 492    case OS_SINGLE:
 493        tmp = val;
 494        break;
 495    default:
 496        g_assert_not_reached();
 497    }
 498    return tmp;
 499}
 500
 501/* Generate code for an "effective address".  Does not adjust the base
 502   register for autoincrement addressing modes.  */
 503static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 504                    int opsize)
 505{
 506    TCGv reg;
 507    TCGv tmp;
 508    uint16_t ext;
 509    uint32_t offset;
 510
 511    switch ((insn >> 3) & 7) {
 512    case 0: /* Data register direct.  */
 513    case 1: /* Address register direct.  */
 514        return NULL_QREG;
 515    case 2: /* Indirect register */
 516    case 3: /* Indirect postincrement.  */
 517        return AREG(insn, 0);
 518    case 4: /* Indirect predecrememnt.  */
 519        reg = AREG(insn, 0);
 520        tmp = tcg_temp_new();
 521        tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
 522        return tmp;
 523    case 5: /* Indirect displacement.  */
 524        reg = AREG(insn, 0);
 525        tmp = tcg_temp_new();
 526        ext = cpu_lduw_code(env, s->pc);
 527        s->pc += 2;
 528        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
 529        return tmp;
 530    case 6: /* Indirect index + displacement.  */
 531        reg = AREG(insn, 0);
 532        return gen_lea_indexed(env, s, reg);
 533    case 7: /* Other */
 534        switch (insn & 7) {
 535        case 0: /* Absolute short.  */
 536            offset = cpu_ldsw_code(env, s->pc);
 537            s->pc += 2;
 538            return tcg_const_i32(offset);
 539        case 1: /* Absolute long.  */
 540            offset = read_im32(env, s);
 541            return tcg_const_i32(offset);
 542        case 2: /* pc displacement  */
 543            offset = s->pc;
 544            offset += cpu_ldsw_code(env, s->pc);
 545            s->pc += 2;
 546            return tcg_const_i32(offset);
 547        case 3: /* pc index+displacement.  */
 548            return gen_lea_indexed(env, s, NULL_QREG);
 549        case 4: /* Immediate.  */
 550        default:
 551            return NULL_QREG;
 552        }
 553    }
 554    /* Should never happen.  */
 555    return NULL_QREG;
 556}
 557
 558/* Helper function for gen_ea. Reuse the computed address between the
 559   for read/write operands.  */
 560static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
 561                               uint16_t insn, int opsize, TCGv val,
 562                               TCGv *addrp, ea_what what)
 563{
 564    TCGv tmp;
 565
 566    if (addrp && what == EA_STORE) {
 567        tmp = *addrp;
 568    } else {
 569        tmp = gen_lea(env, s, insn, opsize);
 570        if (IS_NULL_QREG(tmp))
 571            return tmp;
 572        if (addrp)
 573            *addrp = tmp;
 574    }
 575    return gen_ldst(s, opsize, tmp, val, what);
 576}
 577
 578/* Generate code to load/store a value from/into an EA.  If VAL > 0 this is
 579   a write otherwise it is a read (0 == sign extend, -1 == zero extend).
 580   ADDRP is non-null for readwrite operands.  */
 581static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 582                   int opsize, TCGv val, TCGv *addrp, ea_what what)
 583{
 584    TCGv reg;
 585    TCGv result;
 586    uint32_t offset;
 587
 588    switch ((insn >> 3) & 7) {
 589    case 0: /* Data register direct.  */
 590        reg = DREG(insn, 0);
 591        if (what == EA_STORE) {
 592            gen_partset_reg(opsize, reg, val);
 593            return store_dummy;
 594        } else {
 595            return gen_extend(reg, opsize, what == EA_LOADS);
 596        }
 597    case 1: /* Address register direct.  */
 598        reg = AREG(insn, 0);
 599        if (what == EA_STORE) {
 600            tcg_gen_mov_i32(reg, val);
 601            return store_dummy;
 602        } else {
 603            return gen_extend(reg, opsize, what == EA_LOADS);
 604        }
 605    case 2: /* Indirect register */
 606        reg = AREG(insn, 0);
 607        return gen_ldst(s, opsize, reg, val, what);
 608    case 3: /* Indirect postincrement.  */
 609        reg = AREG(insn, 0);
 610        result = gen_ldst(s, opsize, reg, val, what);
 611        /* ??? This is not exception safe.  The instruction may still
 612           fault after this point.  */
 613        if (what == EA_STORE || !addrp)
 614            tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
 615        return result;
 616    case 4: /* Indirect predecrememnt.  */
 617        {
 618            TCGv tmp;
 619            if (addrp && what == EA_STORE) {
 620                tmp = *addrp;
 621            } else {
 622                tmp = gen_lea(env, s, insn, opsize);
 623                if (IS_NULL_QREG(tmp))
 624                    return tmp;
 625                if (addrp)
 626                    *addrp = tmp;
 627            }
 628            result = gen_ldst(s, opsize, tmp, val, what);
 629            /* ??? This is not exception safe.  The instruction may still
 630               fault after this point.  */
 631            if (what == EA_STORE || !addrp) {
 632                reg = AREG(insn, 0);
 633                tcg_gen_mov_i32(reg, tmp);
 634            }
 635        }
 636        return result;
 637    case 5: /* Indirect displacement.  */
 638    case 6: /* Indirect index + displacement.  */
 639        return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 640    case 7: /* Other */
 641        switch (insn & 7) {
 642        case 0: /* Absolute short.  */
 643        case 1: /* Absolute long.  */
 644        case 2: /* pc displacement  */
 645        case 3: /* pc index+displacement.  */
 646            return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 647        case 4: /* Immediate.  */
 648            /* Sign extend values for consistency.  */
 649            switch (opsize) {
 650            case OS_BYTE:
 651                if (what == EA_LOADS) {
 652                    offset = cpu_ldsb_code(env, s->pc + 1);
 653                } else {
 654                    offset = cpu_ldub_code(env, s->pc + 1);
 655                }
 656                s->pc += 2;
 657                break;
 658            case OS_WORD:
 659                if (what == EA_LOADS) {
 660                    offset = cpu_ldsw_code(env, s->pc);
 661                } else {
 662                    offset = cpu_lduw_code(env, s->pc);
 663                }
 664                s->pc += 2;
 665                break;
 666            case OS_LONG:
 667                offset = read_im32(env, s);
 668                break;
 669            default:
 670                g_assert_not_reached();
 671            }
 672            return tcg_const_i32(offset);
 673        default:
 674            return NULL_QREG;
 675        }
 676    }
 677    /* Should never happen.  */
 678    return NULL_QREG;
 679}
 680
 681/* This generates a conditional branch, clobbering all temporaries.  */
 682static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
 683{
 684    TCGv tmp;
 685
 686    /* TODO: Optimize compare/branch pairs rather than always flushing
 687       flag state to CC_OP_FLAGS.  */
 688    gen_flush_flags(s);
 689    switch (cond) {
 690    case 0: /* T */
 691        tcg_gen_br(l1);
 692        break;
 693    case 1: /* F */
 694        break;
 695    case 2: /* HI (!C && !Z) */
 696        tmp = tcg_temp_new();
 697        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
 698        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 699        break;
 700    case 3: /* LS (C || Z) */
 701        tmp = tcg_temp_new();
 702        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
 703        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 704        break;
 705    case 4: /* CC (!C) */
 706        tmp = tcg_temp_new();
 707        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
 708        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 709        break;
 710    case 5: /* CS (C) */
 711        tmp = tcg_temp_new();
 712        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
 713        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 714        break;
 715    case 6: /* NE (!Z) */
 716        tmp = tcg_temp_new();
 717        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
 718        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 719        break;
 720    case 7: /* EQ (Z) */
 721        tmp = tcg_temp_new();
 722        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
 723        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 724        break;
 725    case 8: /* VC (!V) */
 726        tmp = tcg_temp_new();
 727        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
 728        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 729        break;
 730    case 9: /* VS (V) */
 731        tmp = tcg_temp_new();
 732        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
 733        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 734        break;
 735    case 10: /* PL (!N) */
 736        tmp = tcg_temp_new();
 737        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 738        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 739        break;
 740    case 11: /* MI (N) */
 741        tmp = tcg_temp_new();
 742        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 743        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 744        break;
 745    case 12: /* GE (!(N ^ V)) */
 746        tmp = tcg_temp_new();
 747        assert(CCF_V == (CCF_N >> 2));
 748        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
 749        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 750        tcg_gen_andi_i32(tmp, tmp, CCF_V);
 751        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 752        break;
 753    case 13: /* LT (N ^ V) */
 754        tmp = tcg_temp_new();
 755        assert(CCF_V == (CCF_N >> 2));
 756        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
 757        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 758        tcg_gen_andi_i32(tmp, tmp, CCF_V);
 759        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 760        break;
 761    case 14: /* GT (!(Z || (N ^ V))) */
 762        tmp = tcg_temp_new();
 763        assert(CCF_V == (CCF_N >> 2));
 764        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 765        tcg_gen_shri_i32(tmp, tmp, 2);
 766        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 767        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
 768        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 769        break;
 770    case 15: /* LE (Z || (N ^ V)) */
 771        tmp = tcg_temp_new();
 772        assert(CCF_V == (CCF_N >> 2));
 773        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 774        tcg_gen_shri_i32(tmp, tmp, 2);
 775        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 776        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
 777        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 778        break;
 779    default:
 780        /* Should ever happen.  */
 781        abort();
 782    }
 783}
 784
 785DISAS_INSN(scc)
 786{
 787    TCGLabel *l1;
 788    int cond;
 789    TCGv reg;
 790
 791    l1 = gen_new_label();
 792    cond = (insn >> 8) & 0xf;
 793    reg = DREG(insn, 0);
 794    tcg_gen_andi_i32(reg, reg, 0xffffff00);
 795    /* This is safe because we modify the reg directly, with no other values
 796       live.  */
 797    gen_jmpcc(s, cond ^ 1, l1);
 798    tcg_gen_ori_i32(reg, reg, 0xff);
 799    gen_set_label(l1);
 800}
 801
 802/* Force a TB lookup after an instruction that changes the CPU state.  */
 803static void gen_lookup_tb(DisasContext *s)
 804{
 805    gen_flush_cc_op(s);
 806    tcg_gen_movi_i32(QREG_PC, s->pc);
 807    s->is_jmp = DISAS_UPDATE;
 808}
 809
 810/* Generate a jump to an immediate address.  */
 811static void gen_jmp_im(DisasContext *s, uint32_t dest)
 812{
 813    gen_flush_cc_op(s);
 814    tcg_gen_movi_i32(QREG_PC, dest);
 815    s->is_jmp = DISAS_JUMP;
 816}
 817
 818/* Generate a jump to the address in qreg DEST.  */
 819static void gen_jmp(DisasContext *s, TCGv dest)
 820{
 821    gen_flush_cc_op(s);
 822    tcg_gen_mov_i32(QREG_PC, dest);
 823    s->is_jmp = DISAS_JUMP;
 824}
 825
 826static void gen_exception(DisasContext *s, uint32_t where, int nr)
 827{
 828    gen_flush_cc_op(s);
 829    gen_jmp_im(s, where);
 830    gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
 831}
 832
 833static inline void gen_addr_fault(DisasContext *s)
 834{
 835    gen_exception(s, s->insn_pc, EXCP_ADDRESS);
 836}
 837
 838#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
 839        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
 840                        op_sign ? EA_LOADS : EA_LOADU);                 \
 841        if (IS_NULL_QREG(result)) {                                     \
 842            gen_addr_fault(s);                                          \
 843            return;                                                     \
 844        }                                                               \
 845    } while (0)
 846
 847#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
 848        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
 849        if (IS_NULL_QREG(ea_result)) {                                  \
 850            gen_addr_fault(s);                                          \
 851            return;                                                     \
 852        }                                                               \
 853    } while (0)
 854
 855/* Generate a jump to an immediate address.  */
 856static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
 857{
 858    TranslationBlock *tb;
 859
 860    tb = s->tb;
 861    if (unlikely(s->singlestep_enabled)) {
 862        gen_exception(s, dest, EXCP_DEBUG);
 863    } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
 864               (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
 865        tcg_gen_goto_tb(n);
 866        tcg_gen_movi_i32(QREG_PC, dest);
 867        tcg_gen_exit_tb((uintptr_t)tb + n);
 868    } else {
 869        gen_jmp_im(s, dest);
 870        tcg_gen_exit_tb(0);
 871    }
 872    s->is_jmp = DISAS_TB_JUMP;
 873}
 874
 875DISAS_INSN(undef_mac)
 876{
 877    gen_exception(s, s->pc - 2, EXCP_LINEA);
 878}
 879
 880DISAS_INSN(undef_fpu)
 881{
 882    gen_exception(s, s->pc - 2, EXCP_LINEF);
 883}
 884
 885DISAS_INSN(undef)
 886{
 887    M68kCPU *cpu = m68k_env_get_cpu(env);
 888
 889    gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
 890    cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
 891}
 892
 893DISAS_INSN(mulw)
 894{
 895    TCGv reg;
 896    TCGv tmp;
 897    TCGv src;
 898    int sign;
 899
 900    sign = (insn & 0x100) != 0;
 901    reg = DREG(insn, 9);
 902    tmp = tcg_temp_new();
 903    if (sign)
 904        tcg_gen_ext16s_i32(tmp, reg);
 905    else
 906        tcg_gen_ext16u_i32(tmp, reg);
 907    SRC_EA(env, src, OS_WORD, sign, NULL);
 908    tcg_gen_mul_i32(tmp, tmp, src);
 909    tcg_gen_mov_i32(reg, tmp);
 910    /* Unlike m68k, coldfire always clears the overflow bit.  */
 911    gen_logic_cc(s, tmp);
 912}
 913
 914DISAS_INSN(divw)
 915{
 916    TCGv reg;
 917    TCGv tmp;
 918    TCGv src;
 919    int sign;
 920
 921    sign = (insn & 0x100) != 0;
 922    reg = DREG(insn, 9);
 923    if (sign) {
 924        tcg_gen_ext16s_i32(QREG_DIV1, reg);
 925    } else {
 926        tcg_gen_ext16u_i32(QREG_DIV1, reg);
 927    }
 928    SRC_EA(env, src, OS_WORD, sign, NULL);
 929    tcg_gen_mov_i32(QREG_DIV2, src);
 930    if (sign) {
 931        gen_helper_divs(cpu_env, tcg_const_i32(1));
 932    } else {
 933        gen_helper_divu(cpu_env, tcg_const_i32(1));
 934    }
 935
 936    tmp = tcg_temp_new();
 937    src = tcg_temp_new();
 938    tcg_gen_ext16u_i32(tmp, QREG_DIV1);
 939    tcg_gen_shli_i32(src, QREG_DIV2, 16);
 940    tcg_gen_or_i32(reg, tmp, src);
 941    s->cc_op = CC_OP_FLAGS;
 942}
 943
 944DISAS_INSN(divl)
 945{
 946    TCGv num;
 947    TCGv den;
 948    TCGv reg;
 949    uint16_t ext;
 950
 951    ext = cpu_lduw_code(env, s->pc);
 952    s->pc += 2;
 953    if (ext & 0x87f8) {
 954        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
 955        return;
 956    }
 957    num = DREG(ext, 12);
 958    reg = DREG(ext, 0);
 959    tcg_gen_mov_i32(QREG_DIV1, num);
 960    SRC_EA(env, den, OS_LONG, 0, NULL);
 961    tcg_gen_mov_i32(QREG_DIV2, den);
 962    if (ext & 0x0800) {
 963        gen_helper_divs(cpu_env, tcg_const_i32(0));
 964    } else {
 965        gen_helper_divu(cpu_env, tcg_const_i32(0));
 966    }
 967    if ((ext & 7) == ((ext >> 12) & 7)) {
 968        /* div */
 969        tcg_gen_mov_i32 (reg, QREG_DIV1);
 970    } else {
 971        /* rem */
 972        tcg_gen_mov_i32 (reg, QREG_DIV2);
 973    }
 974    s->cc_op = CC_OP_FLAGS;
 975}
 976
 977DISAS_INSN(addsub)
 978{
 979    TCGv reg;
 980    TCGv dest;
 981    TCGv src;
 982    TCGv tmp;
 983    TCGv addr;
 984    int add;
 985
 986    add = (insn & 0x4000) != 0;
 987    reg = DREG(insn, 9);
 988    dest = tcg_temp_new();
 989    if (insn & 0x100) {
 990        SRC_EA(env, tmp, OS_LONG, 0, &addr);
 991        src = reg;
 992    } else {
 993        tmp = reg;
 994        SRC_EA(env, src, OS_LONG, 0, NULL);
 995    }
 996    if (add) {
 997        tcg_gen_add_i32(dest, tmp, src);
 998        gen_helper_xflag_lt(QREG_CC_X, dest, src);
 999        s->cc_op = CC_OP_ADD;
1000    } else {
1001        gen_helper_xflag_lt(QREG_CC_X, tmp, src);
1002        tcg_gen_sub_i32(dest, tmp, src);
1003        s->cc_op = CC_OP_SUB;
1004    }
1005    gen_update_cc_add(dest, src);
1006    if (insn & 0x100) {
1007        DEST_EA(env, insn, OS_LONG, dest, &addr);
1008    } else {
1009        tcg_gen_mov_i32(reg, dest);
1010    }
1011}
1012
1013
1014/* Reverse the order of the bits in REG.  */
1015DISAS_INSN(bitrev)
1016{
1017    TCGv reg;
1018    reg = DREG(insn, 0);
1019    gen_helper_bitrev(reg, reg);
1020}
1021
1022DISAS_INSN(bitop_reg)
1023{
1024    int opsize;
1025    int op;
1026    TCGv src1;
1027    TCGv src2;
1028    TCGv tmp;
1029    TCGv addr;
1030    TCGv dest;
1031
1032    if ((insn & 0x38) != 0)
1033        opsize = OS_BYTE;
1034    else
1035        opsize = OS_LONG;
1036    op = (insn >> 6) & 3;
1037    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1038    src2 = DREG(insn, 9);
1039    dest = tcg_temp_new();
1040
1041    gen_flush_flags(s);
1042    tmp = tcg_temp_new();
1043    if (opsize == OS_BYTE)
1044        tcg_gen_andi_i32(tmp, src2, 7);
1045    else
1046        tcg_gen_andi_i32(tmp, src2, 31);
1047    src2 = tmp;
1048    tmp = tcg_temp_new();
1049    tcg_gen_shr_i32(tmp, src1, src2);
1050    tcg_gen_andi_i32(tmp, tmp, 1);
1051    tcg_gen_shli_i32(tmp, tmp, 2);
1052    /* Clear CCF_Z if bit set.  */
1053    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1054    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1055
1056    tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1057    switch (op) {
1058    case 1: /* bchg */
1059        tcg_gen_xor_i32(dest, src1, tmp);
1060        break;
1061    case 2: /* bclr */
1062        tcg_gen_not_i32(tmp, tmp);
1063        tcg_gen_and_i32(dest, src1, tmp);
1064        break;
1065    case 3: /* bset */
1066        tcg_gen_or_i32(dest, src1, tmp);
1067        break;
1068    default: /* btst */
1069        break;
1070    }
1071    if (op)
1072        DEST_EA(env, insn, opsize, dest, &addr);
1073}
1074
1075DISAS_INSN(sats)
1076{
1077    TCGv reg;
1078    reg = DREG(insn, 0);
1079    gen_flush_flags(s);
1080    gen_helper_sats(reg, reg, QREG_CC_DEST);
1081    gen_logic_cc(s, reg);
1082}
1083
1084static void gen_push(DisasContext *s, TCGv val)
1085{
1086    TCGv tmp;
1087
1088    tmp = tcg_temp_new();
1089    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1090    gen_store(s, OS_LONG, tmp, val);
1091    tcg_gen_mov_i32(QREG_SP, tmp);
1092}
1093
1094DISAS_INSN(movem)
1095{
1096    TCGv addr;
1097    int i;
1098    uint16_t mask;
1099    TCGv reg;
1100    TCGv tmp;
1101    int is_load;
1102
1103    mask = cpu_lduw_code(env, s->pc);
1104    s->pc += 2;
1105    tmp = gen_lea(env, s, insn, OS_LONG);
1106    if (IS_NULL_QREG(tmp)) {
1107        gen_addr_fault(s);
1108        return;
1109    }
1110    addr = tcg_temp_new();
1111    tcg_gen_mov_i32(addr, tmp);
1112    is_load = ((insn & 0x0400) != 0);
1113    for (i = 0; i < 16; i++, mask >>= 1) {
1114        if (mask & 1) {
1115            if (i < 8)
1116                reg = DREG(i, 0);
1117            else
1118                reg = AREG(i, 0);
1119            if (is_load) {
1120                tmp = gen_load(s, OS_LONG, addr, 0);
1121                tcg_gen_mov_i32(reg, tmp);
1122            } else {
1123                gen_store(s, OS_LONG, addr, reg);
1124            }
1125            if (mask != 1)
1126                tcg_gen_addi_i32(addr, addr, 4);
1127        }
1128    }
1129}
1130
1131DISAS_INSN(bitop_im)
1132{
1133    int opsize;
1134    int op;
1135    TCGv src1;
1136    uint32_t mask;
1137    int bitnum;
1138    TCGv tmp;
1139    TCGv addr;
1140
1141    if ((insn & 0x38) != 0)
1142        opsize = OS_BYTE;
1143    else
1144        opsize = OS_LONG;
1145    op = (insn >> 6) & 3;
1146
1147    bitnum = cpu_lduw_code(env, s->pc);
1148    s->pc += 2;
1149    if (bitnum & 0xff00) {
1150        disas_undef(env, s, insn);
1151        return;
1152    }
1153
1154    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1155
1156    gen_flush_flags(s);
1157    if (opsize == OS_BYTE)
1158        bitnum &= 7;
1159    else
1160        bitnum &= 31;
1161    mask = 1 << bitnum;
1162
1163    tmp = tcg_temp_new();
1164    assert (CCF_Z == (1 << 2));
1165    if (bitnum > 2)
1166        tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1167    else if (bitnum < 2)
1168        tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1169    else
1170        tcg_gen_mov_i32(tmp, src1);
1171    tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1172    /* Clear CCF_Z if bit set.  */
1173    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1174    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1175    if (op) {
1176        switch (op) {
1177        case 1: /* bchg */
1178            tcg_gen_xori_i32(tmp, src1, mask);
1179            break;
1180        case 2: /* bclr */
1181            tcg_gen_andi_i32(tmp, src1, ~mask);
1182            break;
1183        case 3: /* bset */
1184            tcg_gen_ori_i32(tmp, src1, mask);
1185            break;
1186        default: /* btst */
1187            break;
1188        }
1189        DEST_EA(env, insn, opsize, tmp, &addr);
1190    }
1191}
1192
1193DISAS_INSN(arith_im)
1194{
1195    int op;
1196    uint32_t im;
1197    TCGv src1;
1198    TCGv dest;
1199    TCGv addr;
1200
1201    op = (insn >> 9) & 7;
1202    SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1203    im = read_im32(env, s);
1204    dest = tcg_temp_new();
1205    switch (op) {
1206    case 0: /* ori */
1207        tcg_gen_ori_i32(dest, src1, im);
1208        gen_logic_cc(s, dest);
1209        break;
1210    case 1: /* andi */
1211        tcg_gen_andi_i32(dest, src1, im);
1212        gen_logic_cc(s, dest);
1213        break;
1214    case 2: /* subi */
1215        tcg_gen_mov_i32(dest, src1);
1216        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1217        tcg_gen_subi_i32(dest, dest, im);
1218        gen_update_cc_add(dest, tcg_const_i32(im));
1219        s->cc_op = CC_OP_SUB;
1220        break;
1221    case 3: /* addi */
1222        tcg_gen_mov_i32(dest, src1);
1223        tcg_gen_addi_i32(dest, dest, im);
1224        gen_update_cc_add(dest, tcg_const_i32(im));
1225        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1226        s->cc_op = CC_OP_ADD;
1227        break;
1228    case 5: /* eori */
1229        tcg_gen_xori_i32(dest, src1, im);
1230        gen_logic_cc(s, dest);
1231        break;
1232    case 6: /* cmpi */
1233        tcg_gen_mov_i32(dest, src1);
1234        tcg_gen_subi_i32(dest, dest, im);
1235        gen_update_cc_add(dest, tcg_const_i32(im));
1236        s->cc_op = CC_OP_SUB;
1237        break;
1238    default:
1239        abort();
1240    }
1241    if (op != 6) {
1242        DEST_EA(env, insn, OS_LONG, dest, &addr);
1243    }
1244}
1245
1246DISAS_INSN(byterev)
1247{
1248    TCGv reg;
1249
1250    reg = DREG(insn, 0);
1251    tcg_gen_bswap32_i32(reg, reg);
1252}
1253
1254DISAS_INSN(move)
1255{
1256    TCGv src;
1257    TCGv dest;
1258    int op;
1259    int opsize;
1260
1261    switch (insn >> 12) {
1262    case 1: /* move.b */
1263        opsize = OS_BYTE;
1264        break;
1265    case 2: /* move.l */
1266        opsize = OS_LONG;
1267        break;
1268    case 3: /* move.w */
1269        opsize = OS_WORD;
1270        break;
1271    default:
1272        abort();
1273    }
1274    SRC_EA(env, src, opsize, 1, NULL);
1275    op = (insn >> 6) & 7;
1276    if (op == 1) {
1277        /* movea */
1278        /* The value will already have been sign extended.  */
1279        dest = AREG(insn, 9);
1280        tcg_gen_mov_i32(dest, src);
1281    } else {
1282        /* normal move */
1283        uint16_t dest_ea;
1284        dest_ea = ((insn >> 9) & 7) | (op << 3);
1285        DEST_EA(env, dest_ea, opsize, src, NULL);
1286        /* This will be correct because loads sign extend.  */
1287        gen_logic_cc(s, src);
1288    }
1289}
1290
1291DISAS_INSN(negx)
1292{
1293    TCGv reg;
1294
1295    gen_flush_flags(s);
1296    reg = DREG(insn, 0);
1297    gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1298}
1299
1300DISAS_INSN(lea)
1301{
1302    TCGv reg;
1303    TCGv tmp;
1304
1305    reg = AREG(insn, 9);
1306    tmp = gen_lea(env, s, insn, OS_LONG);
1307    if (IS_NULL_QREG(tmp)) {
1308        gen_addr_fault(s);
1309        return;
1310    }
1311    tcg_gen_mov_i32(reg, tmp);
1312}
1313
1314DISAS_INSN(clr)
1315{
1316    int opsize;
1317
1318    switch ((insn >> 6) & 3) {
1319    case 0: /* clr.b */
1320        opsize = OS_BYTE;
1321        break;
1322    case 1: /* clr.w */
1323        opsize = OS_WORD;
1324        break;
1325    case 2: /* clr.l */
1326        opsize = OS_LONG;
1327        break;
1328    default:
1329        abort();
1330    }
1331    DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1332    gen_logic_cc(s, tcg_const_i32(0));
1333}
1334
1335static TCGv gen_get_ccr(DisasContext *s)
1336{
1337    TCGv dest;
1338
1339    gen_flush_flags(s);
1340    dest = tcg_temp_new();
1341    tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1342    tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1343    return dest;
1344}
1345
1346DISAS_INSN(move_from_ccr)
1347{
1348    TCGv reg;
1349    TCGv ccr;
1350
1351    ccr = gen_get_ccr(s);
1352    reg = DREG(insn, 0);
1353    gen_partset_reg(OS_WORD, reg, ccr);
1354}
1355
1356DISAS_INSN(neg)
1357{
1358    TCGv reg;
1359    TCGv src1;
1360
1361    reg = DREG(insn, 0);
1362    src1 = tcg_temp_new();
1363    tcg_gen_mov_i32(src1, reg);
1364    tcg_gen_neg_i32(reg, src1);
1365    s->cc_op = CC_OP_SUB;
1366    gen_update_cc_add(reg, src1);
1367    gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1368    s->cc_op = CC_OP_SUB;
1369}
1370
1371static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1372{
1373    tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1374    tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1375    if (!ccr_only) {
1376        gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1377    }
1378}
1379
1380static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1381                       int ccr_only)
1382{
1383    TCGv tmp;
1384    TCGv reg;
1385
1386    s->cc_op = CC_OP_FLAGS;
1387    if ((insn & 0x38) == 0)
1388      {
1389        tmp = tcg_temp_new();
1390        reg = DREG(insn, 0);
1391        tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1392        tcg_gen_shri_i32(tmp, reg, 4);
1393        tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1394        if (!ccr_only) {
1395            gen_helper_set_sr(cpu_env, reg);
1396        }
1397      }
1398    else if ((insn & 0x3f) == 0x3c)
1399      {
1400        uint16_t val;
1401        val = cpu_lduw_code(env, s->pc);
1402        s->pc += 2;
1403        gen_set_sr_im(s, val, ccr_only);
1404      }
1405    else
1406        disas_undef(env, s, insn);
1407}
1408
1409DISAS_INSN(move_to_ccr)
1410{
1411    gen_set_sr(env, s, insn, 1);
1412}
1413
1414DISAS_INSN(not)
1415{
1416    TCGv reg;
1417
1418    reg = DREG(insn, 0);
1419    tcg_gen_not_i32(reg, reg);
1420    gen_logic_cc(s, reg);
1421}
1422
1423DISAS_INSN(swap)
1424{
1425    TCGv src1;
1426    TCGv src2;
1427    TCGv reg;
1428
1429    src1 = tcg_temp_new();
1430    src2 = tcg_temp_new();
1431    reg = DREG(insn, 0);
1432    tcg_gen_shli_i32(src1, reg, 16);
1433    tcg_gen_shri_i32(src2, reg, 16);
1434    tcg_gen_or_i32(reg, src1, src2);
1435    gen_logic_cc(s, reg);
1436}
1437
1438DISAS_INSN(pea)
1439{
1440    TCGv tmp;
1441
1442    tmp = gen_lea(env, s, insn, OS_LONG);
1443    if (IS_NULL_QREG(tmp)) {
1444        gen_addr_fault(s);
1445        return;
1446    }
1447    gen_push(s, tmp);
1448}
1449
1450DISAS_INSN(ext)
1451{
1452    int op;
1453    TCGv reg;
1454    TCGv tmp;
1455
1456    reg = DREG(insn, 0);
1457    op = (insn >> 6) & 7;
1458    tmp = tcg_temp_new();
1459    if (op == 3)
1460        tcg_gen_ext16s_i32(tmp, reg);
1461    else
1462        tcg_gen_ext8s_i32(tmp, reg);
1463    if (op == 2)
1464        gen_partset_reg(OS_WORD, reg, tmp);
1465    else
1466        tcg_gen_mov_i32(reg, tmp);
1467    gen_logic_cc(s, tmp);
1468}
1469
1470DISAS_INSN(tst)
1471{
1472    int opsize;
1473    TCGv tmp;
1474
1475    switch ((insn >> 6) & 3) {
1476    case 0: /* tst.b */
1477        opsize = OS_BYTE;
1478        break;
1479    case 1: /* tst.w */
1480        opsize = OS_WORD;
1481        break;
1482    case 2: /* tst.l */
1483        opsize = OS_LONG;
1484        break;
1485    default:
1486        abort();
1487    }
1488    SRC_EA(env, tmp, opsize, 1, NULL);
1489    gen_logic_cc(s, tmp);
1490}
1491
1492DISAS_INSN(pulse)
1493{
1494  /* Implemented as a NOP.  */
1495}
1496
1497DISAS_INSN(illegal)
1498{
1499    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1500}
1501
1502/* ??? This should be atomic.  */
1503DISAS_INSN(tas)
1504{
1505    TCGv dest;
1506    TCGv src1;
1507    TCGv addr;
1508
1509    dest = tcg_temp_new();
1510    SRC_EA(env, src1, OS_BYTE, 1, &addr);
1511    gen_logic_cc(s, src1);
1512    tcg_gen_ori_i32(dest, src1, 0x80);
1513    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1514}
1515
1516DISAS_INSN(mull)
1517{
1518    uint16_t ext;
1519    TCGv reg;
1520    TCGv src1;
1521    TCGv dest;
1522
1523    /* The upper 32 bits of the product are discarded, so
1524       muls.l and mulu.l are functionally equivalent.  */
1525    ext = cpu_lduw_code(env, s->pc);
1526    s->pc += 2;
1527    if (ext & 0x87ff) {
1528        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1529        return;
1530    }
1531    reg = DREG(ext, 12);
1532    SRC_EA(env, src1, OS_LONG, 0, NULL);
1533    dest = tcg_temp_new();
1534    tcg_gen_mul_i32(dest, src1, reg);
1535    tcg_gen_mov_i32(reg, dest);
1536    /* Unlike m68k, coldfire always clears the overflow bit.  */
1537    gen_logic_cc(s, dest);
1538}
1539
1540DISAS_INSN(link)
1541{
1542    int16_t offset;
1543    TCGv reg;
1544    TCGv tmp;
1545
1546    offset = cpu_ldsw_code(env, s->pc);
1547    s->pc += 2;
1548    reg = AREG(insn, 0);
1549    tmp = tcg_temp_new();
1550    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1551    gen_store(s, OS_LONG, tmp, reg);
1552    if ((insn & 7) != 7)
1553        tcg_gen_mov_i32(reg, tmp);
1554    tcg_gen_addi_i32(QREG_SP, tmp, offset);
1555}
1556
1557DISAS_INSN(unlk)
1558{
1559    TCGv src;
1560    TCGv reg;
1561    TCGv tmp;
1562
1563    src = tcg_temp_new();
1564    reg = AREG(insn, 0);
1565    tcg_gen_mov_i32(src, reg);
1566    tmp = gen_load(s, OS_LONG, src, 0);
1567    tcg_gen_mov_i32(reg, tmp);
1568    tcg_gen_addi_i32(QREG_SP, src, 4);
1569}
1570
1571DISAS_INSN(nop)
1572{
1573}
1574
1575DISAS_INSN(rts)
1576{
1577    TCGv tmp;
1578
1579    tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1580    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1581    gen_jmp(s, tmp);
1582}
1583
1584DISAS_INSN(jump)
1585{
1586    TCGv tmp;
1587
1588    /* Load the target address first to ensure correct exception
1589       behavior.  */
1590    tmp = gen_lea(env, s, insn, OS_LONG);
1591    if (IS_NULL_QREG(tmp)) {
1592        gen_addr_fault(s);
1593        return;
1594    }
1595    if ((insn & 0x40) == 0) {
1596        /* jsr */
1597        gen_push(s, tcg_const_i32(s->pc));
1598    }
1599    gen_jmp(s, tmp);
1600}
1601
1602DISAS_INSN(addsubq)
1603{
1604    TCGv src1;
1605    TCGv src2;
1606    TCGv dest;
1607    int val;
1608    TCGv addr;
1609
1610    SRC_EA(env, src1, OS_LONG, 0, &addr);
1611    val = (insn >> 9) & 7;
1612    if (val == 0)
1613        val = 8;
1614    dest = tcg_temp_new();
1615    tcg_gen_mov_i32(dest, src1);
1616    if ((insn & 0x38) == 0x08) {
1617        /* Don't update condition codes if the destination is an
1618           address register.  */
1619        if (insn & 0x0100) {
1620            tcg_gen_subi_i32(dest, dest, val);
1621        } else {
1622            tcg_gen_addi_i32(dest, dest, val);
1623        }
1624    } else {
1625        src2 = tcg_const_i32(val);
1626        if (insn & 0x0100) {
1627            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1628            tcg_gen_subi_i32(dest, dest, val);
1629            s->cc_op = CC_OP_SUB;
1630        } else {
1631            tcg_gen_addi_i32(dest, dest, val);
1632            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1633            s->cc_op = CC_OP_ADD;
1634        }
1635        gen_update_cc_add(dest, src2);
1636    }
1637    DEST_EA(env, insn, OS_LONG, dest, &addr);
1638}
1639
1640DISAS_INSN(tpf)
1641{
1642    switch (insn & 7) {
1643    case 2: /* One extension word.  */
1644        s->pc += 2;
1645        break;
1646    case 3: /* Two extension words.  */
1647        s->pc += 4;
1648        break;
1649    case 4: /* No extension words.  */
1650        break;
1651    default:
1652        disas_undef(env, s, insn);
1653    }
1654}
1655
1656DISAS_INSN(branch)
1657{
1658    int32_t offset;
1659    uint32_t base;
1660    int op;
1661    TCGLabel *l1;
1662
1663    base = s->pc;
1664    op = (insn >> 8) & 0xf;
1665    offset = (int8_t)insn;
1666    if (offset == 0) {
1667        offset = cpu_ldsw_code(env, s->pc);
1668        s->pc += 2;
1669    } else if (offset == -1) {
1670        offset = read_im32(env, s);
1671    }
1672    if (op == 1) {
1673        /* bsr */
1674        gen_push(s, tcg_const_i32(s->pc));
1675    }
1676    gen_flush_cc_op(s);
1677    if (op > 1) {
1678        /* Bcc */
1679        l1 = gen_new_label();
1680        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1681        gen_jmp_tb(s, 1, base + offset);
1682        gen_set_label(l1);
1683        gen_jmp_tb(s, 0, s->pc);
1684    } else {
1685        /* Unconditional branch.  */
1686        gen_jmp_tb(s, 0, base + offset);
1687    }
1688}
1689
1690DISAS_INSN(moveq)
1691{
1692    uint32_t val;
1693
1694    val = (int8_t)insn;
1695    tcg_gen_movi_i32(DREG(insn, 9), val);
1696    gen_logic_cc(s, tcg_const_i32(val));
1697}
1698
1699DISAS_INSN(mvzs)
1700{
1701    int opsize;
1702    TCGv src;
1703    TCGv reg;
1704
1705    if (insn & 0x40)
1706        opsize = OS_WORD;
1707    else
1708        opsize = OS_BYTE;
1709    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
1710    reg = DREG(insn, 9);
1711    tcg_gen_mov_i32(reg, src);
1712    gen_logic_cc(s, src);
1713}
1714
1715DISAS_INSN(or)
1716{
1717    TCGv reg;
1718    TCGv dest;
1719    TCGv src;
1720    TCGv addr;
1721
1722    reg = DREG(insn, 9);
1723    dest = tcg_temp_new();
1724    if (insn & 0x100) {
1725        SRC_EA(env, src, OS_LONG, 0, &addr);
1726        tcg_gen_or_i32(dest, src, reg);
1727        DEST_EA(env, insn, OS_LONG, dest, &addr);
1728    } else {
1729        SRC_EA(env, src, OS_LONG, 0, NULL);
1730        tcg_gen_or_i32(dest, src, reg);
1731        tcg_gen_mov_i32(reg, dest);
1732    }
1733    gen_logic_cc(s, dest);
1734}
1735
1736DISAS_INSN(suba)
1737{
1738    TCGv src;
1739    TCGv reg;
1740
1741    SRC_EA(env, src, OS_LONG, 0, NULL);
1742    reg = AREG(insn, 9);
1743    tcg_gen_sub_i32(reg, reg, src);
1744}
1745
1746DISAS_INSN(subx)
1747{
1748    TCGv reg;
1749    TCGv src;
1750
1751    gen_flush_flags(s);
1752    reg = DREG(insn, 9);
1753    src = DREG(insn, 0);
1754    gen_helper_subx_cc(reg, cpu_env, reg, src);
1755}
1756
1757DISAS_INSN(mov3q)
1758{
1759    TCGv src;
1760    int val;
1761
1762    val = (insn >> 9) & 7;
1763    if (val == 0)
1764        val = -1;
1765    src = tcg_const_i32(val);
1766    gen_logic_cc(s, src);
1767    DEST_EA(env, insn, OS_LONG, src, NULL);
1768}
1769
1770DISAS_INSN(cmp)
1771{
1772    int op;
1773    TCGv src;
1774    TCGv reg;
1775    TCGv dest;
1776    int opsize;
1777
1778    op = (insn >> 6) & 3;
1779    switch (op) {
1780    case 0: /* cmp.b */
1781        opsize = OS_BYTE;
1782        s->cc_op = CC_OP_CMPB;
1783        break;
1784    case 1: /* cmp.w */
1785        opsize = OS_WORD;
1786        s->cc_op = CC_OP_CMPW;
1787        break;
1788    case 2: /* cmp.l */
1789        opsize = OS_LONG;
1790        s->cc_op = CC_OP_SUB;
1791        break;
1792    default:
1793        abort();
1794    }
1795    SRC_EA(env, src, opsize, 1, NULL);
1796    reg = DREG(insn, 9);
1797    dest = tcg_temp_new();
1798    tcg_gen_sub_i32(dest, reg, src);
1799    gen_update_cc_add(dest, src);
1800}
1801
1802DISAS_INSN(cmpa)
1803{
1804    int opsize;
1805    TCGv src;
1806    TCGv reg;
1807    TCGv dest;
1808
1809    if (insn & 0x100) {
1810        opsize = OS_LONG;
1811    } else {
1812        opsize = OS_WORD;
1813    }
1814    SRC_EA(env, src, opsize, 1, NULL);
1815    reg = AREG(insn, 9);
1816    dest = tcg_temp_new();
1817    tcg_gen_sub_i32(dest, reg, src);
1818    gen_update_cc_add(dest, src);
1819    s->cc_op = CC_OP_SUB;
1820}
1821
1822DISAS_INSN(eor)
1823{
1824    TCGv src;
1825    TCGv reg;
1826    TCGv dest;
1827    TCGv addr;
1828
1829    SRC_EA(env, src, OS_LONG, 0, &addr);
1830    reg = DREG(insn, 9);
1831    dest = tcg_temp_new();
1832    tcg_gen_xor_i32(dest, src, reg);
1833    gen_logic_cc(s, dest);
1834    DEST_EA(env, insn, OS_LONG, dest, &addr);
1835}
1836
1837DISAS_INSN(and)
1838{
1839    TCGv src;
1840    TCGv reg;
1841    TCGv dest;
1842    TCGv addr;
1843
1844    reg = DREG(insn, 9);
1845    dest = tcg_temp_new();
1846    if (insn & 0x100) {
1847        SRC_EA(env, src, OS_LONG, 0, &addr);
1848        tcg_gen_and_i32(dest, src, reg);
1849        DEST_EA(env, insn, OS_LONG, dest, &addr);
1850    } else {
1851        SRC_EA(env, src, OS_LONG, 0, NULL);
1852        tcg_gen_and_i32(dest, src, reg);
1853        tcg_gen_mov_i32(reg, dest);
1854    }
1855    gen_logic_cc(s, dest);
1856}
1857
1858DISAS_INSN(adda)
1859{
1860    TCGv src;
1861    TCGv reg;
1862
1863    SRC_EA(env, src, OS_LONG, 0, NULL);
1864    reg = AREG(insn, 9);
1865    tcg_gen_add_i32(reg, reg, src);
1866}
1867
1868DISAS_INSN(addx)
1869{
1870    TCGv reg;
1871    TCGv src;
1872
1873    gen_flush_flags(s);
1874    reg = DREG(insn, 9);
1875    src = DREG(insn, 0);
1876    gen_helper_addx_cc(reg, cpu_env, reg, src);
1877    s->cc_op = CC_OP_FLAGS;
1878}
1879
1880/* TODO: This could be implemented without helper functions.  */
1881DISAS_INSN(shift_im)
1882{
1883    TCGv reg;
1884    int tmp;
1885    TCGv shift;
1886
1887    reg = DREG(insn, 0);
1888    tmp = (insn >> 9) & 7;
1889    if (tmp == 0)
1890        tmp = 8;
1891    shift = tcg_const_i32(tmp);
1892    /* No need to flush flags becuse we know we will set C flag.  */
1893    if (insn & 0x100) {
1894        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1895    } else {
1896        if (insn & 8) {
1897            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1898        } else {
1899            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1900        }
1901    }
1902    s->cc_op = CC_OP_SHIFT;
1903}
1904
1905DISAS_INSN(shift_reg)
1906{
1907    TCGv reg;
1908    TCGv shift;
1909
1910    reg = DREG(insn, 0);
1911    shift = DREG(insn, 9);
1912    /* Shift by zero leaves C flag unmodified.   */
1913    gen_flush_flags(s);
1914    if (insn & 0x100) {
1915        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1916    } else {
1917        if (insn & 8) {
1918            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1919        } else {
1920            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1921        }
1922    }
1923    s->cc_op = CC_OP_SHIFT;
1924}
1925
1926DISAS_INSN(ff1)
1927{
1928    TCGv reg;
1929    reg = DREG(insn, 0);
1930    gen_logic_cc(s, reg);
1931    gen_helper_ff1(reg, reg);
1932}
1933
1934static TCGv gen_get_sr(DisasContext *s)
1935{
1936    TCGv ccr;
1937    TCGv sr;
1938
1939    ccr = gen_get_ccr(s);
1940    sr = tcg_temp_new();
1941    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1942    tcg_gen_or_i32(sr, sr, ccr);
1943    return sr;
1944}
1945
1946DISAS_INSN(strldsr)
1947{
1948    uint16_t ext;
1949    uint32_t addr;
1950
1951    addr = s->pc - 2;
1952    ext = cpu_lduw_code(env, s->pc);
1953    s->pc += 2;
1954    if (ext != 0x46FC) {
1955        gen_exception(s, addr, EXCP_UNSUPPORTED);
1956        return;
1957    }
1958    ext = cpu_lduw_code(env, s->pc);
1959    s->pc += 2;
1960    if (IS_USER(s) || (ext & SR_S) == 0) {
1961        gen_exception(s, addr, EXCP_PRIVILEGE);
1962        return;
1963    }
1964    gen_push(s, gen_get_sr(s));
1965    gen_set_sr_im(s, ext, 0);
1966}
1967
1968DISAS_INSN(move_from_sr)
1969{
1970    TCGv reg;
1971    TCGv sr;
1972
1973    if (IS_USER(s)) {
1974        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1975        return;
1976    }
1977    sr = gen_get_sr(s);
1978    reg = DREG(insn, 0);
1979    gen_partset_reg(OS_WORD, reg, sr);
1980}
1981
1982DISAS_INSN(move_to_sr)
1983{
1984    if (IS_USER(s)) {
1985        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1986        return;
1987    }
1988    gen_set_sr(env, s, insn, 0);
1989    gen_lookup_tb(s);
1990}
1991
1992DISAS_INSN(move_from_usp)
1993{
1994    if (IS_USER(s)) {
1995        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1996        return;
1997    }
1998    tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
1999                   offsetof(CPUM68KState, sp[M68K_USP]));
2000}
2001
2002DISAS_INSN(move_to_usp)
2003{
2004    if (IS_USER(s)) {
2005        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2006        return;
2007    }
2008    tcg_gen_st_i32(AREG(insn, 0), cpu_env,
2009                   offsetof(CPUM68KState, sp[M68K_USP]));
2010}
2011
2012DISAS_INSN(halt)
2013{
2014    gen_exception(s, s->pc, EXCP_HALT_INSN);
2015}
2016
2017DISAS_INSN(stop)
2018{
2019    uint16_t ext;
2020
2021    if (IS_USER(s)) {
2022        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2023        return;
2024    }
2025
2026    ext = cpu_lduw_code(env, s->pc);
2027    s->pc += 2;
2028
2029    gen_set_sr_im(s, ext, 0);
2030    tcg_gen_movi_i32(cpu_halted, 1);
2031    gen_exception(s, s->pc, EXCP_HLT);
2032}
2033
2034DISAS_INSN(rte)
2035{
2036    if (IS_USER(s)) {
2037        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2038        return;
2039    }
2040    gen_exception(s, s->pc - 2, EXCP_RTE);
2041}
2042
2043DISAS_INSN(movec)
2044{
2045    uint16_t ext;
2046    TCGv reg;
2047
2048    if (IS_USER(s)) {
2049        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2050        return;
2051    }
2052
2053    ext = cpu_lduw_code(env, s->pc);
2054    s->pc += 2;
2055
2056    if (ext & 0x8000) {
2057        reg = AREG(ext, 12);
2058    } else {
2059        reg = DREG(ext, 12);
2060    }
2061    gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2062    gen_lookup_tb(s);
2063}
2064
2065DISAS_INSN(intouch)
2066{
2067    if (IS_USER(s)) {
2068        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2069        return;
2070    }
2071    /* ICache fetch.  Implement as no-op.  */
2072}
2073
2074DISAS_INSN(cpushl)
2075{
2076    if (IS_USER(s)) {
2077        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2078        return;
2079    }
2080    /* Cache push/invalidate.  Implement as no-op.  */
2081}
2082
2083DISAS_INSN(wddata)
2084{
2085    gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2086}
2087
2088DISAS_INSN(wdebug)
2089{
2090    M68kCPU *cpu = m68k_env_get_cpu(env);
2091
2092    if (IS_USER(s)) {
2093        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2094        return;
2095    }
2096    /* TODO: Implement wdebug.  */
2097    cpu_abort(CPU(cpu), "WDEBUG not implemented");
2098}
2099
2100DISAS_INSN(trap)
2101{
2102    gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2103}
2104
2105/* ??? FP exceptions are not implemented.  Most exceptions are deferred until
2106   immediately before the next FP instruction is executed.  */
2107DISAS_INSN(fpu)
2108{
2109    uint16_t ext;
2110    int32_t offset;
2111    int opmode;
2112    TCGv_i64 src;
2113    TCGv_i64 dest;
2114    TCGv_i64 res;
2115    TCGv tmp32;
2116    int round;
2117    int set_dest;
2118    int opsize;
2119
2120    ext = cpu_lduw_code(env, s->pc);
2121    s->pc += 2;
2122    opmode = ext & 0x7f;
2123    switch ((ext >> 13) & 7) {
2124    case 0: case 2:
2125        break;
2126    case 1:
2127        goto undef;
2128    case 3: /* fmove out */
2129        src = FREG(ext, 7);
2130        tmp32 = tcg_temp_new_i32();
2131        /* fmove */
2132        /* ??? TODO: Proper behavior on overflow.  */
2133        switch ((ext >> 10) & 7) {
2134        case 0:
2135            opsize = OS_LONG;
2136            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2137            break;
2138        case 1:
2139            opsize = OS_SINGLE;
2140            gen_helper_f64_to_f32(tmp32, cpu_env, src);
2141            break;
2142        case 4:
2143            opsize = OS_WORD;
2144            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2145            break;
2146        case 5: /* OS_DOUBLE */
2147            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2148            switch ((insn >> 3) & 7) {
2149            case 2:
2150            case 3:
2151                break;
2152            case 4:
2153                tcg_gen_addi_i32(tmp32, tmp32, -8);
2154                break;
2155            case 5:
2156                offset = cpu_ldsw_code(env, s->pc);
2157                s->pc += 2;
2158                tcg_gen_addi_i32(tmp32, tmp32, offset);
2159                break;
2160            default:
2161                goto undef;
2162            }
2163            gen_store64(s, tmp32, src);
2164            switch ((insn >> 3) & 7) {
2165            case 3:
2166                tcg_gen_addi_i32(tmp32, tmp32, 8);
2167                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2168                break;
2169            case 4:
2170                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2171                break;
2172            }
2173            tcg_temp_free_i32(tmp32);
2174            return;
2175        case 6:
2176            opsize = OS_BYTE;
2177            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2178            break;
2179        default:
2180            goto undef;
2181        }
2182        DEST_EA(env, insn, opsize, tmp32, NULL);
2183        tcg_temp_free_i32(tmp32);
2184        return;
2185    case 4: /* fmove to control register.  */
2186        switch ((ext >> 10) & 7) {
2187        case 4: /* FPCR */
2188            /* Not implemented.  Ignore writes.  */
2189            break;
2190        case 1: /* FPIAR */
2191        case 2: /* FPSR */
2192        default:
2193            cpu_abort(NULL, "Unimplemented: fmove to control %d",
2194                      (ext >> 10) & 7);
2195        }
2196        break;
2197    case 5: /* fmove from control register.  */
2198        switch ((ext >> 10) & 7) {
2199        case 4: /* FPCR */
2200            /* Not implemented.  Always return zero.  */
2201            tmp32 = tcg_const_i32(0);
2202            break;
2203        case 1: /* FPIAR */
2204        case 2: /* FPSR */
2205        default:
2206            cpu_abort(NULL, "Unimplemented: fmove from control %d",
2207                      (ext >> 10) & 7);
2208            goto undef;
2209        }
2210        DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2211        break;
2212    case 6: /* fmovem */
2213    case 7:
2214        {
2215            TCGv addr;
2216            uint16_t mask;
2217            int i;
2218            if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2219                goto undef;
2220            tmp32 = gen_lea(env, s, insn, OS_LONG);
2221            if (IS_NULL_QREG(tmp32)) {
2222                gen_addr_fault(s);
2223                return;
2224            }
2225            addr = tcg_temp_new_i32();
2226            tcg_gen_mov_i32(addr, tmp32);
2227            mask = 0x80;
2228            for (i = 0; i < 8; i++) {
2229                if (ext & mask) {
2230                    dest = FREG(i, 0);
2231                    if (ext & (1 << 13)) {
2232                        /* store */
2233                        tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2234                    } else {
2235                        /* load */
2236                        tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2237                    }
2238                    if (ext & (mask - 1))
2239                        tcg_gen_addi_i32(addr, addr, 8);
2240                }
2241                mask >>= 1;
2242            }
2243            tcg_temp_free_i32(addr);
2244        }
2245        return;
2246    }
2247    if (ext & (1 << 14)) {
2248        /* Source effective address.  */
2249        switch ((ext >> 10) & 7) {
2250        case 0: opsize = OS_LONG; break;
2251        case 1: opsize = OS_SINGLE; break;
2252        case 4: opsize = OS_WORD; break;
2253        case 5: opsize = OS_DOUBLE; break;
2254        case 6: opsize = OS_BYTE; break;
2255        default:
2256            goto undef;
2257        }
2258        if (opsize == OS_DOUBLE) {
2259            tmp32 = tcg_temp_new_i32();
2260            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2261            switch ((insn >> 3) & 7) {
2262            case 2:
2263            case 3:
2264                break;
2265            case 4:
2266                tcg_gen_addi_i32(tmp32, tmp32, -8);
2267                break;
2268            case 5:
2269                offset = cpu_ldsw_code(env, s->pc);
2270                s->pc += 2;
2271                tcg_gen_addi_i32(tmp32, tmp32, offset);
2272                break;
2273            case 7:
2274                offset = cpu_ldsw_code(env, s->pc);
2275                offset += s->pc - 2;
2276                s->pc += 2;
2277                tcg_gen_addi_i32(tmp32, tmp32, offset);
2278                break;
2279            default:
2280                goto undef;
2281            }
2282            src = gen_load64(s, tmp32);
2283            switch ((insn >> 3) & 7) {
2284            case 3:
2285                tcg_gen_addi_i32(tmp32, tmp32, 8);
2286                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2287                break;
2288            case 4:
2289                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2290                break;
2291            }
2292            tcg_temp_free_i32(tmp32);
2293        } else {
2294            SRC_EA(env, tmp32, opsize, 1, NULL);
2295            src = tcg_temp_new_i64();
2296            switch (opsize) {
2297            case OS_LONG:
2298            case OS_WORD:
2299            case OS_BYTE:
2300                gen_helper_i32_to_f64(src, cpu_env, tmp32);
2301                break;
2302            case OS_SINGLE:
2303                gen_helper_f32_to_f64(src, cpu_env, tmp32);
2304                break;
2305            }
2306        }
2307    } else {
2308        /* Source register.  */
2309        src = FREG(ext, 10);
2310    }
2311    dest = FREG(ext, 7);
2312    res = tcg_temp_new_i64();
2313    if (opmode != 0x3a)
2314        tcg_gen_mov_f64(res, dest);
2315    round = 1;
2316    set_dest = 1;
2317    switch (opmode) {
2318    case 0: case 0x40: case 0x44: /* fmove */
2319        tcg_gen_mov_f64(res, src);
2320        break;
2321    case 1: /* fint */
2322        gen_helper_iround_f64(res, cpu_env, src);
2323        round = 0;
2324        break;
2325    case 3: /* fintrz */
2326        gen_helper_itrunc_f64(res, cpu_env, src);
2327        round = 0;
2328        break;
2329    case 4: case 0x41: case 0x45: /* fsqrt */
2330        gen_helper_sqrt_f64(res, cpu_env, src);
2331        break;
2332    case 0x18: case 0x58: case 0x5c: /* fabs */
2333        gen_helper_abs_f64(res, src);
2334        break;
2335    case 0x1a: case 0x5a: case 0x5e: /* fneg */
2336        gen_helper_chs_f64(res, src);
2337        break;
2338    case 0x20: case 0x60: case 0x64: /* fdiv */
2339        gen_helper_div_f64(res, cpu_env, res, src);
2340        break;
2341    case 0x22: case 0x62: case 0x66: /* fadd */
2342        gen_helper_add_f64(res, cpu_env, res, src);
2343        break;
2344    case 0x23: case 0x63: case 0x67: /* fmul */
2345        gen_helper_mul_f64(res, cpu_env, res, src);
2346        break;
2347    case 0x28: case 0x68: case 0x6c: /* fsub */
2348        gen_helper_sub_f64(res, cpu_env, res, src);
2349        break;
2350    case 0x38: /* fcmp */
2351        gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2352        set_dest = 0;
2353        round = 0;
2354        break;
2355    case 0x3a: /* ftst */
2356        tcg_gen_mov_f64(res, src);
2357        set_dest = 0;
2358        round = 0;
2359        break;
2360    default:
2361        goto undef;
2362    }
2363    if (ext & (1 << 14)) {
2364        tcg_temp_free_i64(src);
2365    }
2366    if (round) {
2367        if (opmode & 0x40) {
2368            if ((opmode & 0x4) != 0)
2369                round = 0;
2370        } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2371            round = 0;
2372        }
2373    }
2374    if (round) {
2375        TCGv tmp = tcg_temp_new_i32();
2376        gen_helper_f64_to_f32(tmp, cpu_env, res);
2377        gen_helper_f32_to_f64(res, cpu_env, tmp);
2378        tcg_temp_free_i32(tmp);
2379    }
2380    tcg_gen_mov_f64(QREG_FP_RESULT, res);
2381    if (set_dest) {
2382        tcg_gen_mov_f64(dest, res);
2383    }
2384    tcg_temp_free_i64(res);
2385    return;
2386undef:
2387    /* FIXME: Is this right for offset addressing modes?  */
2388    s->pc -= 2;
2389    disas_undef_fpu(env, s, insn);
2390}
2391
2392DISAS_INSN(fbcc)
2393{
2394    uint32_t offset;
2395    uint32_t addr;
2396    TCGv flag;
2397    TCGLabel *l1;
2398
2399    addr = s->pc;
2400    offset = cpu_ldsw_code(env, s->pc);
2401    s->pc += 2;
2402    if (insn & (1 << 6)) {
2403        offset = (offset << 16) | cpu_lduw_code(env, s->pc);
2404        s->pc += 2;
2405    }
2406
2407    l1 = gen_new_label();
2408    /* TODO: Raise BSUN exception.  */
2409    flag = tcg_temp_new();
2410    gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2411    /* Jump to l1 if condition is true.  */
2412    switch (insn & 0xf) {
2413    case 0: /* f */
2414        break;
2415    case 1: /* eq (=0) */
2416        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2417        break;
2418    case 2: /* ogt (=1) */
2419        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2420        break;
2421    case 3: /* oge (=0 or =1) */
2422        tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2423        break;
2424    case 4: /* olt (=-1) */
2425        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2426        break;
2427    case 5: /* ole (=-1 or =0) */
2428        tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2429        break;
2430    case 6: /* ogl (=-1 or =1) */
2431        tcg_gen_andi_i32(flag, flag, 1);
2432        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2433        break;
2434    case 7: /* or (=2) */
2435        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2436        break;
2437    case 8: /* un (<2) */
2438        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2439        break;
2440    case 9: /* ueq (=0 or =2) */
2441        tcg_gen_andi_i32(flag, flag, 1);
2442        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2443        break;
2444    case 10: /* ugt (>0) */
2445        tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2446        break;
2447    case 11: /* uge (>=0) */
2448        tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2449        break;
2450    case 12: /* ult (=-1 or =2) */
2451        tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2452        break;
2453    case 13: /* ule (!=1) */
2454        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2455        break;
2456    case 14: /* ne (!=0) */
2457        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2458        break;
2459    case 15: /* t */
2460        tcg_gen_br(l1);
2461        break;
2462    }
2463    gen_jmp_tb(s, 0, s->pc);
2464    gen_set_label(l1);
2465    gen_jmp_tb(s, 1, addr + offset);
2466}
2467
2468DISAS_INSN(frestore)
2469{
2470    M68kCPU *cpu = m68k_env_get_cpu(env);
2471
2472    /* TODO: Implement frestore.  */
2473    cpu_abort(CPU(cpu), "FRESTORE not implemented");
2474}
2475
2476DISAS_INSN(fsave)
2477{
2478    M68kCPU *cpu = m68k_env_get_cpu(env);
2479
2480    /* TODO: Implement fsave.  */
2481    cpu_abort(CPU(cpu), "FSAVE not implemented");
2482}
2483
2484static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2485{
2486    TCGv tmp = tcg_temp_new();
2487    if (s->env->macsr & MACSR_FI) {
2488        if (upper)
2489            tcg_gen_andi_i32(tmp, val, 0xffff0000);
2490        else
2491            tcg_gen_shli_i32(tmp, val, 16);
2492    } else if (s->env->macsr & MACSR_SU) {
2493        if (upper)
2494            tcg_gen_sari_i32(tmp, val, 16);
2495        else
2496            tcg_gen_ext16s_i32(tmp, val);
2497    } else {
2498        if (upper)
2499            tcg_gen_shri_i32(tmp, val, 16);
2500        else
2501            tcg_gen_ext16u_i32(tmp, val);
2502    }
2503    return tmp;
2504}
2505
2506static void gen_mac_clear_flags(void)
2507{
2508    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2509                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2510}
2511
2512DISAS_INSN(mac)
2513{
2514    TCGv rx;
2515    TCGv ry;
2516    uint16_t ext;
2517    int acc;
2518    TCGv tmp;
2519    TCGv addr;
2520    TCGv loadval;
2521    int dual;
2522    TCGv saved_flags;
2523
2524    if (!s->done_mac) {
2525        s->mactmp = tcg_temp_new_i64();
2526        s->done_mac = 1;
2527    }
2528
2529    ext = cpu_lduw_code(env, s->pc);
2530    s->pc += 2;
2531
2532    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2533    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2534    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2535        disas_undef(env, s, insn);
2536        return;
2537    }
2538    if (insn & 0x30) {
2539        /* MAC with load.  */
2540        tmp = gen_lea(env, s, insn, OS_LONG);
2541        addr = tcg_temp_new();
2542        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2543        /* Load the value now to ensure correct exception behavior.
2544           Perform writeback after reading the MAC inputs.  */
2545        loadval = gen_load(s, OS_LONG, addr, 0);
2546
2547        acc ^= 1;
2548        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2549        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2550    } else {
2551        loadval = addr = NULL_QREG;
2552        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2553        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2554    }
2555
2556    gen_mac_clear_flags();
2557#if 0
2558    l1 = -1;
2559    /* Disabled because conditional branches clobber temporary vars.  */
2560    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2561        /* Skip the multiply if we know we will ignore it.  */
2562        l1 = gen_new_label();
2563        tmp = tcg_temp_new();
2564        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2565        gen_op_jmp_nz32(tmp, l1);
2566    }
2567#endif
2568
2569    if ((ext & 0x0800) == 0) {
2570        /* Word.  */
2571        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2572        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2573    }
2574    if (s->env->macsr & MACSR_FI) {
2575        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2576    } else {
2577        if (s->env->macsr & MACSR_SU)
2578            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2579        else
2580            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2581        switch ((ext >> 9) & 3) {
2582        case 1:
2583            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2584            break;
2585        case 3:
2586            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2587            break;
2588        }
2589    }
2590
2591    if (dual) {
2592        /* Save the overflow flag from the multiply.  */
2593        saved_flags = tcg_temp_new();
2594        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2595    } else {
2596        saved_flags = NULL_QREG;
2597    }
2598
2599#if 0
2600    /* Disabled because conditional branches clobber temporary vars.  */
2601    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2602        /* Skip the accumulate if the value is already saturated.  */
2603        l1 = gen_new_label();
2604        tmp = tcg_temp_new();
2605        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2606        gen_op_jmp_nz32(tmp, l1);
2607    }
2608#endif
2609
2610    if (insn & 0x100)
2611        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2612    else
2613        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2614
2615    if (s->env->macsr & MACSR_FI)
2616        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2617    else if (s->env->macsr & MACSR_SU)
2618        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2619    else
2620        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2621
2622#if 0
2623    /* Disabled because conditional branches clobber temporary vars.  */
2624    if (l1 != -1)
2625        gen_set_label(l1);
2626#endif
2627
2628    if (dual) {
2629        /* Dual accumulate variant.  */
2630        acc = (ext >> 2) & 3;
2631        /* Restore the overflow flag from the multiplier.  */
2632        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2633#if 0
2634        /* Disabled because conditional branches clobber temporary vars.  */
2635        if ((s->env->macsr & MACSR_OMC) != 0) {
2636            /* Skip the accumulate if the value is already saturated.  */
2637            l1 = gen_new_label();
2638            tmp = tcg_temp_new();
2639            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2640            gen_op_jmp_nz32(tmp, l1);
2641        }
2642#endif
2643        if (ext & 2)
2644            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2645        else
2646            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2647        if (s->env->macsr & MACSR_FI)
2648            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2649        else if (s->env->macsr & MACSR_SU)
2650            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2651        else
2652            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2653#if 0
2654        /* Disabled because conditional branches clobber temporary vars.  */
2655        if (l1 != -1)
2656            gen_set_label(l1);
2657#endif
2658    }
2659    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2660
2661    if (insn & 0x30) {
2662        TCGv rw;
2663        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2664        tcg_gen_mov_i32(rw, loadval);
2665        /* FIXME: Should address writeback happen with the masked or
2666           unmasked value?  */
2667        switch ((insn >> 3) & 7) {
2668        case 3: /* Post-increment.  */
2669            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2670            break;
2671        case 4: /* Pre-decrement.  */
2672            tcg_gen_mov_i32(AREG(insn, 0), addr);
2673        }
2674    }
2675}
2676
2677DISAS_INSN(from_mac)
2678{
2679    TCGv rx;
2680    TCGv_i64 acc;
2681    int accnum;
2682
2683    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2684    accnum = (insn >> 9) & 3;
2685    acc = MACREG(accnum);
2686    if (s->env->macsr & MACSR_FI) {
2687        gen_helper_get_macf(rx, cpu_env, acc);
2688    } else if ((s->env->macsr & MACSR_OMC) == 0) {
2689        tcg_gen_extrl_i64_i32(rx, acc);
2690    } else if (s->env->macsr & MACSR_SU) {
2691        gen_helper_get_macs(rx, acc);
2692    } else {
2693        gen_helper_get_macu(rx, acc);
2694    }
2695    if (insn & 0x40) {
2696        tcg_gen_movi_i64(acc, 0);
2697        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2698    }
2699}
2700
2701DISAS_INSN(move_mac)
2702{
2703    /* FIXME: This can be done without a helper.  */
2704    int src;
2705    TCGv dest;
2706    src = insn & 3;
2707    dest = tcg_const_i32((insn >> 9) & 3);
2708    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2709    gen_mac_clear_flags();
2710    gen_helper_mac_set_flags(cpu_env, dest);
2711}
2712
2713DISAS_INSN(from_macsr)
2714{
2715    TCGv reg;
2716
2717    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2718    tcg_gen_mov_i32(reg, QREG_MACSR);
2719}
2720
2721DISAS_INSN(from_mask)
2722{
2723    TCGv reg;
2724    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2725    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2726}
2727
2728DISAS_INSN(from_mext)
2729{
2730    TCGv reg;
2731    TCGv acc;
2732    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2733    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2734    if (s->env->macsr & MACSR_FI)
2735        gen_helper_get_mac_extf(reg, cpu_env, acc);
2736    else
2737        gen_helper_get_mac_exti(reg, cpu_env, acc);
2738}
2739
2740DISAS_INSN(macsr_to_ccr)
2741{
2742    tcg_gen_movi_i32(QREG_CC_X, 0);
2743    tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2744    s->cc_op = CC_OP_FLAGS;
2745}
2746
2747DISAS_INSN(to_mac)
2748{
2749    TCGv_i64 acc;
2750    TCGv val;
2751    int accnum;
2752    accnum = (insn >> 9) & 3;
2753    acc = MACREG(accnum);
2754    SRC_EA(env, val, OS_LONG, 0, NULL);
2755    if (s->env->macsr & MACSR_FI) {
2756        tcg_gen_ext_i32_i64(acc, val);
2757        tcg_gen_shli_i64(acc, acc, 8);
2758    } else if (s->env->macsr & MACSR_SU) {
2759        tcg_gen_ext_i32_i64(acc, val);
2760    } else {
2761        tcg_gen_extu_i32_i64(acc, val);
2762    }
2763    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2764    gen_mac_clear_flags();
2765    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2766}
2767
2768DISAS_INSN(to_macsr)
2769{
2770    TCGv val;
2771    SRC_EA(env, val, OS_LONG, 0, NULL);
2772    gen_helper_set_macsr(cpu_env, val);
2773    gen_lookup_tb(s);
2774}
2775
2776DISAS_INSN(to_mask)
2777{
2778    TCGv val;
2779    SRC_EA(env, val, OS_LONG, 0, NULL);
2780    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2781}
2782
2783DISAS_INSN(to_mext)
2784{
2785    TCGv val;
2786    TCGv acc;
2787    SRC_EA(env, val, OS_LONG, 0, NULL);
2788    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2789    if (s->env->macsr & MACSR_FI)
2790        gen_helper_set_mac_extf(cpu_env, val, acc);
2791    else if (s->env->macsr & MACSR_SU)
2792        gen_helper_set_mac_exts(cpu_env, val, acc);
2793    else
2794        gen_helper_set_mac_extu(cpu_env, val, acc);
2795}
2796
2797static disas_proc opcode_table[65536];
2798
2799static void
2800register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2801{
2802  int i;
2803  int from;
2804  int to;
2805
2806  /* Sanity check.  All set bits must be included in the mask.  */
2807  if (opcode & ~mask) {
2808      fprintf(stderr,
2809              "qemu internal error: bogus opcode definition %04x/%04x\n",
2810              opcode, mask);
2811      abort();
2812  }
2813  /* This could probably be cleverer.  For now just optimize the case where
2814     the top bits are known.  */
2815  /* Find the first zero bit in the mask.  */
2816  i = 0x8000;
2817  while ((i & mask) != 0)
2818      i >>= 1;
2819  /* Iterate over all combinations of this and lower bits.  */
2820  if (i == 0)
2821      i = 1;
2822  else
2823      i <<= 1;
2824  from = opcode & ~(i - 1);
2825  to = from + i;
2826  for (i = from; i < to; i++) {
2827      if ((i & mask) == opcode)
2828          opcode_table[i] = proc;
2829  }
2830}
2831
2832/* Register m68k opcode handlers.  Order is important.
2833   Later insn override earlier ones.  */
2834void register_m68k_insns (CPUM68KState *env)
2835{
2836#define INSN(name, opcode, mask, feature) do { \
2837    if (m68k_feature(env, M68K_FEATURE_##feature)) \
2838        register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2839    } while(0)
2840    INSN(undef,     0000, 0000, CF_ISA_A);
2841    INSN(arith_im,  0080, fff8, CF_ISA_A);
2842    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
2843    INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2844    INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2845    INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2846    INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2847    INSN(arith_im,  0280, fff8, CF_ISA_A);
2848    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
2849    INSN(arith_im,  0480, fff8, CF_ISA_A);
2850    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
2851    INSN(arith_im,  0680, fff8, CF_ISA_A);
2852    INSN(bitop_im,  0800, ffc0, CF_ISA_A);
2853    INSN(bitop_im,  0840, ffc0, CF_ISA_A);
2854    INSN(bitop_im,  0880, ffc0, CF_ISA_A);
2855    INSN(bitop_im,  08c0, ffc0, CF_ISA_A);
2856    INSN(arith_im,  0a80, fff8, CF_ISA_A);
2857    INSN(arith_im,  0c00, ff38, CF_ISA_A);
2858    INSN(move,      1000, f000, CF_ISA_A);
2859    INSN(move,      2000, f000, CF_ISA_A);
2860    INSN(move,      3000, f000, CF_ISA_A);
2861    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
2862    INSN(negx,      4080, fff8, CF_ISA_A);
2863    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2864    INSN(lea,       41c0, f1c0, CF_ISA_A);
2865    INSN(clr,       4200, ff00, CF_ISA_A);
2866    INSN(undef,     42c0, ffc0, CF_ISA_A);
2867    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2868    INSN(neg,       4480, fff8, CF_ISA_A);
2869    INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2870    INSN(not,       4680, fff8, CF_ISA_A);
2871    INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2872    INSN(pea,       4840, ffc0, CF_ISA_A);
2873    INSN(swap,      4840, fff8, CF_ISA_A);
2874    INSN(movem,     48c0, fbc0, CF_ISA_A);
2875    INSN(ext,       4880, fff8, CF_ISA_A);
2876    INSN(ext,       48c0, fff8, CF_ISA_A);
2877    INSN(ext,       49c0, fff8, CF_ISA_A);
2878    INSN(tst,       4a00, ff00, CF_ISA_A);
2879    INSN(tas,       4ac0, ffc0, CF_ISA_B);
2880    INSN(halt,      4ac8, ffff, CF_ISA_A);
2881    INSN(pulse,     4acc, ffff, CF_ISA_A);
2882    INSN(illegal,   4afc, ffff, CF_ISA_A);
2883    INSN(mull,      4c00, ffc0, CF_ISA_A);
2884    INSN(divl,      4c40, ffc0, CF_ISA_A);
2885    INSN(sats,      4c80, fff8, CF_ISA_B);
2886    INSN(trap,      4e40, fff0, CF_ISA_A);
2887    INSN(link,      4e50, fff8, CF_ISA_A);
2888    INSN(unlk,      4e58, fff8, CF_ISA_A);
2889    INSN(move_to_usp, 4e60, fff8, USP);
2890    INSN(move_from_usp, 4e68, fff8, USP);
2891    INSN(nop,       4e71, ffff, CF_ISA_A);
2892    INSN(stop,      4e72, ffff, CF_ISA_A);
2893    INSN(rte,       4e73, ffff, CF_ISA_A);
2894    INSN(rts,       4e75, ffff, CF_ISA_A);
2895    INSN(movec,     4e7b, ffff, CF_ISA_A);
2896    INSN(jump,      4e80, ffc0, CF_ISA_A);
2897    INSN(jump,      4ec0, ffc0, CF_ISA_A);
2898    INSN(addsubq,   5180, f1c0, CF_ISA_A);
2899    INSN(scc,       50c0, f0f8, CF_ISA_A);
2900    INSN(addsubq,   5080, f1c0, CF_ISA_A);
2901    INSN(tpf,       51f8, fff8, CF_ISA_A);
2902
2903    /* Branch instructions.  */
2904    INSN(branch,    6000, f000, CF_ISA_A);
2905    /* Disable long branch instructions, then add back the ones we want.  */
2906    INSN(undef,     60ff, f0ff, CF_ISA_A); /* All long branches.  */
2907    INSN(branch,    60ff, f0ff, CF_ISA_B);
2908    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
2909    INSN(branch,    60ff, ffff, BRAL);
2910
2911    INSN(moveq,     7000, f100, CF_ISA_A);
2912    INSN(mvzs,      7100, f100, CF_ISA_B);
2913    INSN(or,        8000, f000, CF_ISA_A);
2914    INSN(divw,      80c0, f0c0, CF_ISA_A);
2915    INSN(addsub,    9000, f000, CF_ISA_A);
2916    INSN(subx,      9180, f1f8, CF_ISA_A);
2917    INSN(suba,      91c0, f1c0, CF_ISA_A);
2918
2919    INSN(undef_mac, a000, f000, CF_ISA_A);
2920    INSN(mac,       a000, f100, CF_EMAC);
2921    INSN(from_mac,  a180, f9b0, CF_EMAC);
2922    INSN(move_mac,  a110, f9fc, CF_EMAC);
2923    INSN(from_macsr,a980, f9f0, CF_EMAC);
2924    INSN(from_mask, ad80, fff0, CF_EMAC);
2925    INSN(from_mext, ab80, fbf0, CF_EMAC);
2926    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2927    INSN(to_mac,    a100, f9c0, CF_EMAC);
2928    INSN(to_macsr,  a900, ffc0, CF_EMAC);
2929    INSN(to_mext,   ab00, fbc0, CF_EMAC);
2930    INSN(to_mask,   ad00, ffc0, CF_EMAC);
2931
2932    INSN(mov3q,     a140, f1c0, CF_ISA_B);
2933    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
2934    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
2935    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2936    INSN(cmp,       b080, f1c0, CF_ISA_A);
2937    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
2938    INSN(eor,       b180, f1c0, CF_ISA_A);
2939    INSN(and,       c000, f000, CF_ISA_A);
2940    INSN(mulw,      c0c0, f0c0, CF_ISA_A);
2941    INSN(addsub,    d000, f000, CF_ISA_A);
2942    INSN(addx,      d180, f1f8, CF_ISA_A);
2943    INSN(adda,      d1c0, f1c0, CF_ISA_A);
2944    INSN(shift_im,  e080, f0f0, CF_ISA_A);
2945    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2946    INSN(undef_fpu, f000, f000, CF_ISA_A);
2947    INSN(fpu,       f200, ffc0, CF_FPU);
2948    INSN(fbcc,      f280, ffc0, CF_FPU);
2949    INSN(frestore,  f340, ffc0, CF_FPU);
2950    INSN(fsave,     f340, ffc0, CF_FPU);
2951    INSN(intouch,   f340, ffc0, CF_ISA_A);
2952    INSN(cpushl,    f428, ff38, CF_ISA_A);
2953    INSN(wddata,    fb00, ff00, CF_ISA_A);
2954    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
2955#undef INSN
2956}
2957
2958/* ??? Some of this implementation is not exception safe.  We should always
2959   write back the result to memory before setting the condition codes.  */
2960static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
2961{
2962    uint16_t insn;
2963
2964    insn = cpu_lduw_code(env, s->pc);
2965    s->pc += 2;
2966
2967    opcode_table[insn](env, s, insn);
2968}
2969
2970/* generate intermediate code for basic block 'tb'.  */
2971void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
2972{
2973    M68kCPU *cpu = m68k_env_get_cpu(env);
2974    CPUState *cs = CPU(cpu);
2975    DisasContext dc1, *dc = &dc1;
2976    target_ulong pc_start;
2977    int pc_offset;
2978    int num_insns;
2979    int max_insns;
2980
2981    /* generate intermediate code */
2982    pc_start = tb->pc;
2983
2984    dc->tb = tb;
2985
2986    dc->env = env;
2987    dc->is_jmp = DISAS_NEXT;
2988    dc->pc = pc_start;
2989    dc->cc_op = CC_OP_DYNAMIC;
2990    dc->singlestep_enabled = cs->singlestep_enabled;
2991    dc->fpcr = env->fpcr;
2992    dc->user = (env->sr & SR_S) == 0;
2993    dc->done_mac = 0;
2994    num_insns = 0;
2995    max_insns = tb->cflags & CF_COUNT_MASK;
2996    if (max_insns == 0) {
2997        max_insns = CF_COUNT_MASK;
2998    }
2999    if (max_insns > TCG_MAX_INSNS) {
3000        max_insns = TCG_MAX_INSNS;
3001    }
3002
3003    gen_tb_start(tb);
3004    do {
3005        pc_offset = dc->pc - pc_start;
3006        gen_throws_exception = NULL;
3007        tcg_gen_insn_start(dc->pc);
3008        num_insns++;
3009
3010        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
3011            gen_exception(dc, dc->pc, EXCP_DEBUG);
3012            dc->is_jmp = DISAS_JUMP;
3013            /* The address covered by the breakpoint must be included in
3014               [tb->pc, tb->pc + tb->size) in order to for it to be
3015               properly cleared -- thus we increment the PC here so that
3016               the logic setting tb->size below does the right thing.  */
3017            dc->pc += 2;
3018            break;
3019        }
3020
3021        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3022            gen_io_start();
3023        }
3024
3025        dc->insn_pc = dc->pc;
3026        disas_m68k_insn(env, dc);
3027    } while (!dc->is_jmp && !tcg_op_buf_full() &&
3028             !cs->singlestep_enabled &&
3029             !singlestep &&
3030             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3031             num_insns < max_insns);
3032
3033    if (tb->cflags & CF_LAST_IO)
3034        gen_io_end();
3035    if (unlikely(cs->singlestep_enabled)) {
3036        /* Make sure the pc is updated, and raise a debug exception.  */
3037        if (!dc->is_jmp) {
3038            gen_flush_cc_op(dc);
3039            tcg_gen_movi_i32(QREG_PC, dc->pc);
3040        }
3041        gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3042    } else {
3043        switch(dc->is_jmp) {
3044        case DISAS_NEXT:
3045            gen_flush_cc_op(dc);
3046            gen_jmp_tb(dc, 0, dc->pc);
3047            break;
3048        default:
3049        case DISAS_JUMP:
3050        case DISAS_UPDATE:
3051            gen_flush_cc_op(dc);
3052            /* indicate that the hash table must be used to find the next TB */
3053            tcg_gen_exit_tb(0);
3054            break;
3055        case DISAS_TB_JUMP:
3056            /* nothing more to generate */
3057            break;
3058        }
3059    }
3060    gen_tb_end(tb, num_insns);
3061
3062#ifdef DEBUG_DISAS
3063    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3064        qemu_log("----------------\n");
3065        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3066        log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
3067        qemu_log("\n");
3068    }
3069#endif
3070    tb->size = dc->pc - pc_start;
3071    tb->icount = num_insns;
3072}
3073
3074void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
3075                         int flags)
3076{
3077    M68kCPU *cpu = M68K_CPU(cs);
3078    CPUM68KState *env = &cpu->env;
3079    int i;
3080    uint16_t sr;
3081    CPU_DoubleU u;
3082    for (i = 0; i < 8; i++)
3083      {
3084        u.d = env->fregs[i];
3085        cpu_fprintf (f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
3086                     i, env->dregs[i], i, env->aregs[i],
3087                     i, u.l.upper, u.l.lower, *(double *)&u.d);
3088      }
3089    cpu_fprintf (f, "PC = %08x   ", env->pc);
3090    sr = env->sr;
3091    cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3092                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3093                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3094    cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3095}
3096
3097void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
3098                          target_ulong *data)
3099{
3100    env->pc = data[0];
3101}
3102