qemu/target-m68k/translate.c
<<
>>
Prefs
   1/*
   2 *  m68k translation
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery
   5 *  Written by Paul Brook
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "exec/exec-all.h"
  25#include "tcg-op.h"
  26#include "qemu/log.h"
  27#include "exec/cpu_ldst.h"
  28
  29#include "exec/helper-proto.h"
  30#include "exec/helper-gen.h"
  31
  32#include "trace-tcg.h"
  33#include "exec/log.h"
  34
  35
  36//#define DEBUG_DISPATCH 1
  37
  38/* Fake floating point.  */
  39#define tcg_gen_mov_f64 tcg_gen_mov_i64
  40#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
  41#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
  42
  43#define DEFO32(name, offset) static TCGv QREG_##name;
  44#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
  45#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
  46#include "qregs.def"
  47#undef DEFO32
  48#undef DEFO64
  49#undef DEFF64
  50
  51static TCGv_i32 cpu_halted;
  52static TCGv_i32 cpu_exception_index;
  53
  54static TCGv_env cpu_env;
  55
  56static char cpu_reg_names[3*8*3 + 5*4];
  57static TCGv cpu_dregs[8];
  58static TCGv cpu_aregs[8];
  59static TCGv_i64 cpu_fregs[8];
  60static TCGv_i64 cpu_macc[4];
  61
  62#define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
  63#define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
  64#define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
  65#define MACREG(acc) cpu_macc[acc]
  66#define QREG_SP cpu_aregs[7]
  67
  68static TCGv NULL_QREG;
  69#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
  70/* Used to distinguish stores from bad addressing modes.  */
  71static TCGv store_dummy;
  72
  73#include "exec/gen-icount.h"
  74
  75void m68k_tcg_init(void)
  76{
  77    char *p;
  78    int i;
  79
  80    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  81    tcg_ctx.tcg_env = cpu_env;
  82
  83#define DEFO32(name, offset) \
  84    QREG_##name = tcg_global_mem_new_i32(cpu_env, \
  85        offsetof(CPUM68KState, offset), #name);
  86#define DEFO64(name, offset) \
  87    QREG_##name = tcg_global_mem_new_i64(cpu_env, \
  88        offsetof(CPUM68KState, offset), #name);
  89#define DEFF64(name, offset) DEFO64(name, offset)
  90#include "qregs.def"
  91#undef DEFO32
  92#undef DEFO64
  93#undef DEFF64
  94
  95    cpu_halted = tcg_global_mem_new_i32(cpu_env,
  96                                        -offsetof(M68kCPU, env) +
  97                                        offsetof(CPUState, halted), "HALTED");
  98    cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
  99                                                 -offsetof(M68kCPU, env) +
 100                                                 offsetof(CPUState, exception_index),
 101                                                 "EXCEPTION");
 102
 103    p = cpu_reg_names;
 104    for (i = 0; i < 8; i++) {
 105        sprintf(p, "D%d", i);
 106        cpu_dregs[i] = tcg_global_mem_new(cpu_env,
 107                                          offsetof(CPUM68KState, dregs[i]), p);
 108        p += 3;
 109        sprintf(p, "A%d", i);
 110        cpu_aregs[i] = tcg_global_mem_new(cpu_env,
 111                                          offsetof(CPUM68KState, aregs[i]), p);
 112        p += 3;
 113        sprintf(p, "F%d", i);
 114        cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
 115                                          offsetof(CPUM68KState, fregs[i]), p);
 116        p += 3;
 117    }
 118    for (i = 0; i < 4; i++) {
 119        sprintf(p, "ACC%d", i);
 120        cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
 121                                         offsetof(CPUM68KState, macc[i]), p);
 122        p += 5;
 123    }
 124
 125    NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
 126    store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
 127}
 128
 129/* internal defines */
 130typedef struct DisasContext {
 131    CPUM68KState *env;
 132    target_ulong insn_pc; /* Start of the current instruction.  */
 133    target_ulong pc;
 134    int is_jmp;
 135    int cc_op;
 136    int user;
 137    uint32_t fpcr;
 138    struct TranslationBlock *tb;
 139    int singlestep_enabled;
 140    TCGv_i64 mactmp;
 141    int done_mac;
 142} DisasContext;
 143
 144#define DISAS_JUMP_NEXT 4
 145
 146#if defined(CONFIG_USER_ONLY)
 147#define IS_USER(s) 1
 148#else
 149#define IS_USER(s) s->user
 150#endif
 151
 152/* XXX: move that elsewhere */
 153/* ??? Fix exceptions.  */
 154static void *gen_throws_exception;
 155#define gen_last_qop NULL
 156
 157#define OS_BYTE 0
 158#define OS_WORD 1
 159#define OS_LONG 2
 160#define OS_SINGLE 4
 161#define OS_DOUBLE 5
 162
 163typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
 164
 165#ifdef DEBUG_DISPATCH
 166#define DISAS_INSN(name)                                                \
 167    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 168                                  uint16_t insn);                       \
 169    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 170                             uint16_t insn)                             \
 171    {                                                                   \
 172        qemu_log("Dispatch " #name "\n");                               \
 173        real_disas_##name(s, env, insn);                                \
 174    }                                                                   \
 175    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 176                                  uint16_t insn)
 177#else
 178#define DISAS_INSN(name)                                                \
 179    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 180                             uint16_t insn)
 181#endif
 182
 183/* Generate a load from the specified address.  Narrow values are
 184   sign extended to full register width.  */
 185static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
 186{
 187    TCGv tmp;
 188    int index = IS_USER(s);
 189    tmp = tcg_temp_new_i32();
 190    switch(opsize) {
 191    case OS_BYTE:
 192        if (sign)
 193            tcg_gen_qemu_ld8s(tmp, addr, index);
 194        else
 195            tcg_gen_qemu_ld8u(tmp, addr, index);
 196        break;
 197    case OS_WORD:
 198        if (sign)
 199            tcg_gen_qemu_ld16s(tmp, addr, index);
 200        else
 201            tcg_gen_qemu_ld16u(tmp, addr, index);
 202        break;
 203    case OS_LONG:
 204    case OS_SINGLE:
 205        tcg_gen_qemu_ld32u(tmp, addr, index);
 206        break;
 207    default:
 208        g_assert_not_reached();
 209    }
 210    gen_throws_exception = gen_last_qop;
 211    return tmp;
 212}
 213
 214static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
 215{
 216    TCGv_i64 tmp;
 217    int index = IS_USER(s);
 218    tmp = tcg_temp_new_i64();
 219    tcg_gen_qemu_ldf64(tmp, addr, index);
 220    gen_throws_exception = gen_last_qop;
 221    return tmp;
 222}
 223
 224/* Generate a store.  */
 225static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
 226{
 227    int index = IS_USER(s);
 228    switch(opsize) {
 229    case OS_BYTE:
 230        tcg_gen_qemu_st8(val, addr, index);
 231        break;
 232    case OS_WORD:
 233        tcg_gen_qemu_st16(val, addr, index);
 234        break;
 235    case OS_LONG:
 236    case OS_SINGLE:
 237        tcg_gen_qemu_st32(val, addr, index);
 238        break;
 239    default:
 240        g_assert_not_reached();
 241    }
 242    gen_throws_exception = gen_last_qop;
 243}
 244
 245static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
 246{
 247    int index = IS_USER(s);
 248    tcg_gen_qemu_stf64(val, addr, index);
 249    gen_throws_exception = gen_last_qop;
 250}
 251
 252typedef enum {
 253    EA_STORE,
 254    EA_LOADU,
 255    EA_LOADS
 256} ea_what;
 257
 258/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
 259   otherwise generate a store.  */
 260static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
 261                     ea_what what)
 262{
 263    if (what == EA_STORE) {
 264        gen_store(s, opsize, addr, val);
 265        return store_dummy;
 266    } else {
 267        return gen_load(s, opsize, addr, what == EA_LOADS);
 268    }
 269}
 270
 271/* Read a 32-bit immediate constant.  */
 272static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
 273{
 274    uint32_t im;
 275    im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16;
 276    s->pc += 2;
 277    im |= cpu_lduw_code(env, s->pc);
 278    s->pc += 2;
 279    return im;
 280}
 281
 282/* Calculate and address index.  */
 283static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
 284{
 285    TCGv add;
 286    int scale;
 287
 288    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
 289    if ((ext & 0x800) == 0) {
 290        tcg_gen_ext16s_i32(tmp, add);
 291        add = tmp;
 292    }
 293    scale = (ext >> 9) & 3;
 294    if (scale != 0) {
 295        tcg_gen_shli_i32(tmp, add, scale);
 296        add = tmp;
 297    }
 298    return add;
 299}
 300
 301/* Handle a base + index + displacement effective addresss.
 302   A NULL_QREG base means pc-relative.  */
 303static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
 304{
 305    uint32_t offset;
 306    uint16_t ext;
 307    TCGv add;
 308    TCGv tmp;
 309    uint32_t bd, od;
 310
 311    offset = s->pc;
 312    ext = cpu_lduw_code(env, s->pc);
 313    s->pc += 2;
 314
 315    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
 316        return NULL_QREG;
 317
 318    if (ext & 0x100) {
 319        /* full extension word format */
 320        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
 321            return NULL_QREG;
 322
 323        if ((ext & 0x30) > 0x10) {
 324            /* base displacement */
 325            if ((ext & 0x30) == 0x20) {
 326                bd = (int16_t)cpu_lduw_code(env, s->pc);
 327                s->pc += 2;
 328            } else {
 329                bd = read_im32(env, s);
 330            }
 331        } else {
 332            bd = 0;
 333        }
 334        tmp = tcg_temp_new();
 335        if ((ext & 0x44) == 0) {
 336            /* pre-index */
 337            add = gen_addr_index(ext, tmp);
 338        } else {
 339            add = NULL_QREG;
 340        }
 341        if ((ext & 0x80) == 0) {
 342            /* base not suppressed */
 343            if (IS_NULL_QREG(base)) {
 344                base = tcg_const_i32(offset + bd);
 345                bd = 0;
 346            }
 347            if (!IS_NULL_QREG(add)) {
 348                tcg_gen_add_i32(tmp, add, base);
 349                add = tmp;
 350            } else {
 351                add = base;
 352            }
 353        }
 354        if (!IS_NULL_QREG(add)) {
 355            if (bd != 0) {
 356                tcg_gen_addi_i32(tmp, add, bd);
 357                add = tmp;
 358            }
 359        } else {
 360            add = tcg_const_i32(bd);
 361        }
 362        if ((ext & 3) != 0) {
 363            /* memory indirect */
 364            base = gen_load(s, OS_LONG, add, 0);
 365            if ((ext & 0x44) == 4) {
 366                add = gen_addr_index(ext, tmp);
 367                tcg_gen_add_i32(tmp, add, base);
 368                add = tmp;
 369            } else {
 370                add = base;
 371            }
 372            if ((ext & 3) > 1) {
 373                /* outer displacement */
 374                if ((ext & 3) == 2) {
 375                    od = (int16_t)cpu_lduw_code(env, s->pc);
 376                    s->pc += 2;
 377                } else {
 378                    od = read_im32(env, s);
 379                }
 380            } else {
 381                od = 0;
 382            }
 383            if (od != 0) {
 384                tcg_gen_addi_i32(tmp, add, od);
 385                add = tmp;
 386            }
 387        }
 388    } else {
 389        /* brief extension word format */
 390        tmp = tcg_temp_new();
 391        add = gen_addr_index(ext, tmp);
 392        if (!IS_NULL_QREG(base)) {
 393            tcg_gen_add_i32(tmp, add, base);
 394            if ((int8_t)ext)
 395                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
 396        } else {
 397            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
 398        }
 399        add = tmp;
 400    }
 401    return add;
 402}
 403
 404/* Update the CPU env CC_OP state.  */
 405static inline void gen_flush_cc_op(DisasContext *s)
 406{
 407    if (s->cc_op != CC_OP_DYNAMIC)
 408        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
 409}
 410
 411/* Evaluate all the CC flags.  */
 412static inline void gen_flush_flags(DisasContext *s)
 413{
 414    if (s->cc_op == CC_OP_FLAGS)
 415        return;
 416    gen_flush_cc_op(s);
 417    gen_helper_flush_flags(cpu_env, QREG_CC_OP);
 418    s->cc_op = CC_OP_FLAGS;
 419}
 420
 421static void gen_logic_cc(DisasContext *s, TCGv val)
 422{
 423    tcg_gen_mov_i32(QREG_CC_DEST, val);
 424    s->cc_op = CC_OP_LOGIC;
 425}
 426
 427static void gen_update_cc_add(TCGv dest, TCGv src)
 428{
 429    tcg_gen_mov_i32(QREG_CC_DEST, dest);
 430    tcg_gen_mov_i32(QREG_CC_SRC, src);
 431}
 432
 433static inline int opsize_bytes(int opsize)
 434{
 435    switch (opsize) {
 436    case OS_BYTE: return 1;
 437    case OS_WORD: return 2;
 438    case OS_LONG: return 4;
 439    case OS_SINGLE: return 4;
 440    case OS_DOUBLE: return 8;
 441    default:
 442        g_assert_not_reached();
 443    }
 444}
 445
 446/* Assign value to a register.  If the width is less than the register width
 447   only the low part of the register is set.  */
 448static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
 449{
 450    TCGv tmp;
 451    switch (opsize) {
 452    case OS_BYTE:
 453        tcg_gen_andi_i32(reg, reg, 0xffffff00);
 454        tmp = tcg_temp_new();
 455        tcg_gen_ext8u_i32(tmp, val);
 456        tcg_gen_or_i32(reg, reg, tmp);
 457        break;
 458    case OS_WORD:
 459        tcg_gen_andi_i32(reg, reg, 0xffff0000);
 460        tmp = tcg_temp_new();
 461        tcg_gen_ext16u_i32(tmp, val);
 462        tcg_gen_or_i32(reg, reg, tmp);
 463        break;
 464    case OS_LONG:
 465    case OS_SINGLE:
 466        tcg_gen_mov_i32(reg, val);
 467        break;
 468    default:
 469        g_assert_not_reached();
 470    }
 471}
 472
 473/* Sign or zero extend a value.  */
 474static inline TCGv gen_extend(TCGv val, int opsize, int sign)
 475{
 476    TCGv tmp;
 477
 478    switch (opsize) {
 479    case OS_BYTE:
 480        tmp = tcg_temp_new();
 481        if (sign)
 482            tcg_gen_ext8s_i32(tmp, val);
 483        else
 484            tcg_gen_ext8u_i32(tmp, val);
 485        break;
 486    case OS_WORD:
 487        tmp = tcg_temp_new();
 488        if (sign)
 489            tcg_gen_ext16s_i32(tmp, val);
 490        else
 491            tcg_gen_ext16u_i32(tmp, val);
 492        break;
 493    case OS_LONG:
 494    case OS_SINGLE:
 495        tmp = val;
 496        break;
 497    default:
 498        g_assert_not_reached();
 499    }
 500    return tmp;
 501}
 502
 503/* Generate code for an "effective address".  Does not adjust the base
 504   register for autoincrement addressing modes.  */
 505static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 506                    int opsize)
 507{
 508    TCGv reg;
 509    TCGv tmp;
 510    uint16_t ext;
 511    uint32_t offset;
 512
 513    switch ((insn >> 3) & 7) {
 514    case 0: /* Data register direct.  */
 515    case 1: /* Address register direct.  */
 516        return NULL_QREG;
 517    case 2: /* Indirect register */
 518    case 3: /* Indirect postincrement.  */
 519        return AREG(insn, 0);
 520    case 4: /* Indirect predecrememnt.  */
 521        reg = AREG(insn, 0);
 522        tmp = tcg_temp_new();
 523        tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
 524        return tmp;
 525    case 5: /* Indirect displacement.  */
 526        reg = AREG(insn, 0);
 527        tmp = tcg_temp_new();
 528        ext = cpu_lduw_code(env, s->pc);
 529        s->pc += 2;
 530        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
 531        return tmp;
 532    case 6: /* Indirect index + displacement.  */
 533        reg = AREG(insn, 0);
 534        return gen_lea_indexed(env, s, reg);
 535    case 7: /* Other */
 536        switch (insn & 7) {
 537        case 0: /* Absolute short.  */
 538            offset = cpu_ldsw_code(env, s->pc);
 539            s->pc += 2;
 540            return tcg_const_i32(offset);
 541        case 1: /* Absolute long.  */
 542            offset = read_im32(env, s);
 543            return tcg_const_i32(offset);
 544        case 2: /* pc displacement  */
 545            offset = s->pc;
 546            offset += cpu_ldsw_code(env, s->pc);
 547            s->pc += 2;
 548            return tcg_const_i32(offset);
 549        case 3: /* pc index+displacement.  */
 550            return gen_lea_indexed(env, s, NULL_QREG);
 551        case 4: /* Immediate.  */
 552        default:
 553            return NULL_QREG;
 554        }
 555    }
 556    /* Should never happen.  */
 557    return NULL_QREG;
 558}
 559
 560/* Helper function for gen_ea. Reuse the computed address between the
 561   for read/write operands.  */
 562static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
 563                               uint16_t insn, int opsize, TCGv val,
 564                               TCGv *addrp, ea_what what)
 565{
 566    TCGv tmp;
 567
 568    if (addrp && what == EA_STORE) {
 569        tmp = *addrp;
 570    } else {
 571        tmp = gen_lea(env, s, insn, opsize);
 572        if (IS_NULL_QREG(tmp))
 573            return tmp;
 574        if (addrp)
 575            *addrp = tmp;
 576    }
 577    return gen_ldst(s, opsize, tmp, val, what);
 578}
 579
 580/* Generate code to load/store a value from/into an EA.  If VAL > 0 this is
 581   a write otherwise it is a read (0 == sign extend, -1 == zero extend).
 582   ADDRP is non-null for readwrite operands.  */
 583static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 584                   int opsize, TCGv val, TCGv *addrp, ea_what what)
 585{
 586    TCGv reg;
 587    TCGv result;
 588    uint32_t offset;
 589
 590    switch ((insn >> 3) & 7) {
 591    case 0: /* Data register direct.  */
 592        reg = DREG(insn, 0);
 593        if (what == EA_STORE) {
 594            gen_partset_reg(opsize, reg, val);
 595            return store_dummy;
 596        } else {
 597            return gen_extend(reg, opsize, what == EA_LOADS);
 598        }
 599    case 1: /* Address register direct.  */
 600        reg = AREG(insn, 0);
 601        if (what == EA_STORE) {
 602            tcg_gen_mov_i32(reg, val);
 603            return store_dummy;
 604        } else {
 605            return gen_extend(reg, opsize, what == EA_LOADS);
 606        }
 607    case 2: /* Indirect register */
 608        reg = AREG(insn, 0);
 609        return gen_ldst(s, opsize, reg, val, what);
 610    case 3: /* Indirect postincrement.  */
 611        reg = AREG(insn, 0);
 612        result = gen_ldst(s, opsize, reg, val, what);
 613        /* ??? This is not exception safe.  The instruction may still
 614           fault after this point.  */
 615        if (what == EA_STORE || !addrp)
 616            tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
 617        return result;
 618    case 4: /* Indirect predecrememnt.  */
 619        {
 620            TCGv tmp;
 621            if (addrp && what == EA_STORE) {
 622                tmp = *addrp;
 623            } else {
 624                tmp = gen_lea(env, s, insn, opsize);
 625                if (IS_NULL_QREG(tmp))
 626                    return tmp;
 627                if (addrp)
 628                    *addrp = tmp;
 629            }
 630            result = gen_ldst(s, opsize, tmp, val, what);
 631            /* ??? This is not exception safe.  The instruction may still
 632               fault after this point.  */
 633            if (what == EA_STORE || !addrp) {
 634                reg = AREG(insn, 0);
 635                tcg_gen_mov_i32(reg, tmp);
 636            }
 637        }
 638        return result;
 639    case 5: /* Indirect displacement.  */
 640    case 6: /* Indirect index + displacement.  */
 641        return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 642    case 7: /* Other */
 643        switch (insn & 7) {
 644        case 0: /* Absolute short.  */
 645        case 1: /* Absolute long.  */
 646        case 2: /* pc displacement  */
 647        case 3: /* pc index+displacement.  */
 648            return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 649        case 4: /* Immediate.  */
 650            /* Sign extend values for consistency.  */
 651            switch (opsize) {
 652            case OS_BYTE:
 653                if (what == EA_LOADS) {
 654                    offset = cpu_ldsb_code(env, s->pc + 1);
 655                } else {
 656                    offset = cpu_ldub_code(env, s->pc + 1);
 657                }
 658                s->pc += 2;
 659                break;
 660            case OS_WORD:
 661                if (what == EA_LOADS) {
 662                    offset = cpu_ldsw_code(env, s->pc);
 663                } else {
 664                    offset = cpu_lduw_code(env, s->pc);
 665                }
 666                s->pc += 2;
 667                break;
 668            case OS_LONG:
 669                offset = read_im32(env, s);
 670                break;
 671            default:
 672                g_assert_not_reached();
 673            }
 674            return tcg_const_i32(offset);
 675        default:
 676            return NULL_QREG;
 677        }
 678    }
 679    /* Should never happen.  */
 680    return NULL_QREG;
 681}
 682
 683/* This generates a conditional branch, clobbering all temporaries.  */
 684static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
 685{
 686    TCGv tmp;
 687
 688    /* TODO: Optimize compare/branch pairs rather than always flushing
 689       flag state to CC_OP_FLAGS.  */
 690    gen_flush_flags(s);
 691    switch (cond) {
 692    case 0: /* T */
 693        tcg_gen_br(l1);
 694        break;
 695    case 1: /* F */
 696        break;
 697    case 2: /* HI (!C && !Z) */
 698        tmp = tcg_temp_new();
 699        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
 700        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 701        break;
 702    case 3: /* LS (C || Z) */
 703        tmp = tcg_temp_new();
 704        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
 705        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 706        break;
 707    case 4: /* CC (!C) */
 708        tmp = tcg_temp_new();
 709        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
 710        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 711        break;
 712    case 5: /* CS (C) */
 713        tmp = tcg_temp_new();
 714        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
 715        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 716        break;
 717    case 6: /* NE (!Z) */
 718        tmp = tcg_temp_new();
 719        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
 720        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 721        break;
 722    case 7: /* EQ (Z) */
 723        tmp = tcg_temp_new();
 724        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
 725        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 726        break;
 727    case 8: /* VC (!V) */
 728        tmp = tcg_temp_new();
 729        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
 730        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 731        break;
 732    case 9: /* VS (V) */
 733        tmp = tcg_temp_new();
 734        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
 735        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 736        break;
 737    case 10: /* PL (!N) */
 738        tmp = tcg_temp_new();
 739        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 740        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 741        break;
 742    case 11: /* MI (N) */
 743        tmp = tcg_temp_new();
 744        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 745        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 746        break;
 747    case 12: /* GE (!(N ^ V)) */
 748        tmp = tcg_temp_new();
 749        assert(CCF_V == (CCF_N >> 2));
 750        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
 751        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 752        tcg_gen_andi_i32(tmp, tmp, CCF_V);
 753        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 754        break;
 755    case 13: /* LT (N ^ V) */
 756        tmp = tcg_temp_new();
 757        assert(CCF_V == (CCF_N >> 2));
 758        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
 759        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 760        tcg_gen_andi_i32(tmp, tmp, CCF_V);
 761        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 762        break;
 763    case 14: /* GT (!(Z || (N ^ V))) */
 764        tmp = tcg_temp_new();
 765        assert(CCF_V == (CCF_N >> 2));
 766        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 767        tcg_gen_shri_i32(tmp, tmp, 2);
 768        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 769        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
 770        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 771        break;
 772    case 15: /* LE (Z || (N ^ V)) */
 773        tmp = tcg_temp_new();
 774        assert(CCF_V == (CCF_N >> 2));
 775        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 776        tcg_gen_shri_i32(tmp, tmp, 2);
 777        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 778        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
 779        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 780        break;
 781    default:
 782        /* Should ever happen.  */
 783        abort();
 784    }
 785}
 786
 787DISAS_INSN(scc)
 788{
 789    TCGLabel *l1;
 790    int cond;
 791    TCGv reg;
 792
 793    l1 = gen_new_label();
 794    cond = (insn >> 8) & 0xf;
 795    reg = DREG(insn, 0);
 796    tcg_gen_andi_i32(reg, reg, 0xffffff00);
 797    /* This is safe because we modify the reg directly, with no other values
 798       live.  */
 799    gen_jmpcc(s, cond ^ 1, l1);
 800    tcg_gen_ori_i32(reg, reg, 0xff);
 801    gen_set_label(l1);
 802}
 803
 804/* Force a TB lookup after an instruction that changes the CPU state.  */
 805static void gen_lookup_tb(DisasContext *s)
 806{
 807    gen_flush_cc_op(s);
 808    tcg_gen_movi_i32(QREG_PC, s->pc);
 809    s->is_jmp = DISAS_UPDATE;
 810}
 811
 812/* Generate a jump to an immediate address.  */
 813static void gen_jmp_im(DisasContext *s, uint32_t dest)
 814{
 815    gen_flush_cc_op(s);
 816    tcg_gen_movi_i32(QREG_PC, dest);
 817    s->is_jmp = DISAS_JUMP;
 818}
 819
 820/* Generate a jump to the address in qreg DEST.  */
 821static void gen_jmp(DisasContext *s, TCGv dest)
 822{
 823    gen_flush_cc_op(s);
 824    tcg_gen_mov_i32(QREG_PC, dest);
 825    s->is_jmp = DISAS_JUMP;
 826}
 827
 828static void gen_exception(DisasContext *s, uint32_t where, int nr)
 829{
 830    gen_flush_cc_op(s);
 831    gen_jmp_im(s, where);
 832    gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
 833}
 834
 835static inline void gen_addr_fault(DisasContext *s)
 836{
 837    gen_exception(s, s->insn_pc, EXCP_ADDRESS);
 838}
 839
 840#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
 841        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
 842                        op_sign ? EA_LOADS : EA_LOADU);                 \
 843        if (IS_NULL_QREG(result)) {                                     \
 844            gen_addr_fault(s);                                          \
 845            return;                                                     \
 846        }                                                               \
 847    } while (0)
 848
 849#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
 850        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
 851        if (IS_NULL_QREG(ea_result)) {                                  \
 852            gen_addr_fault(s);                                          \
 853            return;                                                     \
 854        }                                                               \
 855    } while (0)
 856
 857static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
 858{
 859#ifndef CONFIG_USER_ONLY
 860    return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
 861           (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 862#else
 863    return true;
 864#endif
 865}
 866
 867/* Generate a jump to an immediate address.  */
 868static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
 869{
 870    if (unlikely(s->singlestep_enabled)) {
 871        gen_exception(s, dest, EXCP_DEBUG);
 872    } else if (use_goto_tb(s, dest)) {
 873        tcg_gen_goto_tb(n);
 874        tcg_gen_movi_i32(QREG_PC, dest);
 875        tcg_gen_exit_tb((uintptr_t)s->tb + n);
 876    } else {
 877        gen_jmp_im(s, dest);
 878        tcg_gen_exit_tb(0);
 879    }
 880    s->is_jmp = DISAS_TB_JUMP;
 881}
 882
 883DISAS_INSN(undef_mac)
 884{
 885    gen_exception(s, s->pc - 2, EXCP_LINEA);
 886}
 887
 888DISAS_INSN(undef_fpu)
 889{
 890    gen_exception(s, s->pc - 2, EXCP_LINEF);
 891}
 892
 893DISAS_INSN(undef)
 894{
 895    M68kCPU *cpu = m68k_env_get_cpu(env);
 896
 897    gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
 898    cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
 899}
 900
 901DISAS_INSN(mulw)
 902{
 903    TCGv reg;
 904    TCGv tmp;
 905    TCGv src;
 906    int sign;
 907
 908    sign = (insn & 0x100) != 0;
 909    reg = DREG(insn, 9);
 910    tmp = tcg_temp_new();
 911    if (sign)
 912        tcg_gen_ext16s_i32(tmp, reg);
 913    else
 914        tcg_gen_ext16u_i32(tmp, reg);
 915    SRC_EA(env, src, OS_WORD, sign, NULL);
 916    tcg_gen_mul_i32(tmp, tmp, src);
 917    tcg_gen_mov_i32(reg, tmp);
 918    /* Unlike m68k, coldfire always clears the overflow bit.  */
 919    gen_logic_cc(s, tmp);
 920}
 921
 922DISAS_INSN(divw)
 923{
 924    TCGv reg;
 925    TCGv tmp;
 926    TCGv src;
 927    int sign;
 928
 929    sign = (insn & 0x100) != 0;
 930    reg = DREG(insn, 9);
 931    if (sign) {
 932        tcg_gen_ext16s_i32(QREG_DIV1, reg);
 933    } else {
 934        tcg_gen_ext16u_i32(QREG_DIV1, reg);
 935    }
 936    SRC_EA(env, src, OS_WORD, sign, NULL);
 937    tcg_gen_mov_i32(QREG_DIV2, src);
 938    if (sign) {
 939        gen_helper_divs(cpu_env, tcg_const_i32(1));
 940    } else {
 941        gen_helper_divu(cpu_env, tcg_const_i32(1));
 942    }
 943
 944    tmp = tcg_temp_new();
 945    src = tcg_temp_new();
 946    tcg_gen_ext16u_i32(tmp, QREG_DIV1);
 947    tcg_gen_shli_i32(src, QREG_DIV2, 16);
 948    tcg_gen_or_i32(reg, tmp, src);
 949    s->cc_op = CC_OP_FLAGS;
 950}
 951
 952DISAS_INSN(divl)
 953{
 954    TCGv num;
 955    TCGv den;
 956    TCGv reg;
 957    uint16_t ext;
 958
 959    ext = cpu_lduw_code(env, s->pc);
 960    s->pc += 2;
 961    if (ext & 0x87f8) {
 962        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
 963        return;
 964    }
 965    num = DREG(ext, 12);
 966    reg = DREG(ext, 0);
 967    tcg_gen_mov_i32(QREG_DIV1, num);
 968    SRC_EA(env, den, OS_LONG, 0, NULL);
 969    tcg_gen_mov_i32(QREG_DIV2, den);
 970    if (ext & 0x0800) {
 971        gen_helper_divs(cpu_env, tcg_const_i32(0));
 972    } else {
 973        gen_helper_divu(cpu_env, tcg_const_i32(0));
 974    }
 975    if ((ext & 7) == ((ext >> 12) & 7)) {
 976        /* div */
 977        tcg_gen_mov_i32 (reg, QREG_DIV1);
 978    } else {
 979        /* rem */
 980        tcg_gen_mov_i32 (reg, QREG_DIV2);
 981    }
 982    s->cc_op = CC_OP_FLAGS;
 983}
 984
 985DISAS_INSN(addsub)
 986{
 987    TCGv reg;
 988    TCGv dest;
 989    TCGv src;
 990    TCGv tmp;
 991    TCGv addr;
 992    int add;
 993
 994    add = (insn & 0x4000) != 0;
 995    reg = DREG(insn, 9);
 996    dest = tcg_temp_new();
 997    if (insn & 0x100) {
 998        SRC_EA(env, tmp, OS_LONG, 0, &addr);
 999        src = reg;
1000    } else {
1001        tmp = reg;
1002        SRC_EA(env, src, OS_LONG, 0, NULL);
1003    }
1004    if (add) {
1005        tcg_gen_add_i32(dest, tmp, src);
1006        gen_helper_xflag_lt(QREG_CC_X, dest, src);
1007        s->cc_op = CC_OP_ADD;
1008    } else {
1009        gen_helper_xflag_lt(QREG_CC_X, tmp, src);
1010        tcg_gen_sub_i32(dest, tmp, src);
1011        s->cc_op = CC_OP_SUB;
1012    }
1013    gen_update_cc_add(dest, src);
1014    if (insn & 0x100) {
1015        DEST_EA(env, insn, OS_LONG, dest, &addr);
1016    } else {
1017        tcg_gen_mov_i32(reg, dest);
1018    }
1019}
1020
1021
1022/* Reverse the order of the bits in REG.  */
1023DISAS_INSN(bitrev)
1024{
1025    TCGv reg;
1026    reg = DREG(insn, 0);
1027    gen_helper_bitrev(reg, reg);
1028}
1029
1030DISAS_INSN(bitop_reg)
1031{
1032    int opsize;
1033    int op;
1034    TCGv src1;
1035    TCGv src2;
1036    TCGv tmp;
1037    TCGv addr;
1038    TCGv dest;
1039
1040    if ((insn & 0x38) != 0)
1041        opsize = OS_BYTE;
1042    else
1043        opsize = OS_LONG;
1044    op = (insn >> 6) & 3;
1045    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1046    src2 = DREG(insn, 9);
1047    dest = tcg_temp_new();
1048
1049    gen_flush_flags(s);
1050    tmp = tcg_temp_new();
1051    if (opsize == OS_BYTE)
1052        tcg_gen_andi_i32(tmp, src2, 7);
1053    else
1054        tcg_gen_andi_i32(tmp, src2, 31);
1055    src2 = tmp;
1056    tmp = tcg_temp_new();
1057    tcg_gen_shr_i32(tmp, src1, src2);
1058    tcg_gen_andi_i32(tmp, tmp, 1);
1059    tcg_gen_shli_i32(tmp, tmp, 2);
1060    /* Clear CCF_Z if bit set.  */
1061    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1062    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1063
1064    tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1065    switch (op) {
1066    case 1: /* bchg */
1067        tcg_gen_xor_i32(dest, src1, tmp);
1068        break;
1069    case 2: /* bclr */
1070        tcg_gen_not_i32(tmp, tmp);
1071        tcg_gen_and_i32(dest, src1, tmp);
1072        break;
1073    case 3: /* bset */
1074        tcg_gen_or_i32(dest, src1, tmp);
1075        break;
1076    default: /* btst */
1077        break;
1078    }
1079    if (op)
1080        DEST_EA(env, insn, opsize, dest, &addr);
1081}
1082
1083DISAS_INSN(sats)
1084{
1085    TCGv reg;
1086    reg = DREG(insn, 0);
1087    gen_flush_flags(s);
1088    gen_helper_sats(reg, reg, QREG_CC_DEST);
1089    gen_logic_cc(s, reg);
1090}
1091
1092static void gen_push(DisasContext *s, TCGv val)
1093{
1094    TCGv tmp;
1095
1096    tmp = tcg_temp_new();
1097    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1098    gen_store(s, OS_LONG, tmp, val);
1099    tcg_gen_mov_i32(QREG_SP, tmp);
1100}
1101
1102DISAS_INSN(movem)
1103{
1104    TCGv addr;
1105    int i;
1106    uint16_t mask;
1107    TCGv reg;
1108    TCGv tmp;
1109    int is_load;
1110
1111    mask = cpu_lduw_code(env, s->pc);
1112    s->pc += 2;
1113    tmp = gen_lea(env, s, insn, OS_LONG);
1114    if (IS_NULL_QREG(tmp)) {
1115        gen_addr_fault(s);
1116        return;
1117    }
1118    addr = tcg_temp_new();
1119    tcg_gen_mov_i32(addr, tmp);
1120    is_load = ((insn & 0x0400) != 0);
1121    for (i = 0; i < 16; i++, mask >>= 1) {
1122        if (mask & 1) {
1123            if (i < 8)
1124                reg = DREG(i, 0);
1125            else
1126                reg = AREG(i, 0);
1127            if (is_load) {
1128                tmp = gen_load(s, OS_LONG, addr, 0);
1129                tcg_gen_mov_i32(reg, tmp);
1130            } else {
1131                gen_store(s, OS_LONG, addr, reg);
1132            }
1133            if (mask != 1)
1134                tcg_gen_addi_i32(addr, addr, 4);
1135        }
1136    }
1137}
1138
1139DISAS_INSN(bitop_im)
1140{
1141    int opsize;
1142    int op;
1143    TCGv src1;
1144    uint32_t mask;
1145    int bitnum;
1146    TCGv tmp;
1147    TCGv addr;
1148
1149    if ((insn & 0x38) != 0)
1150        opsize = OS_BYTE;
1151    else
1152        opsize = OS_LONG;
1153    op = (insn >> 6) & 3;
1154
1155    bitnum = cpu_lduw_code(env, s->pc);
1156    s->pc += 2;
1157    if (bitnum & 0xff00) {
1158        disas_undef(env, s, insn);
1159        return;
1160    }
1161
1162    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1163
1164    gen_flush_flags(s);
1165    if (opsize == OS_BYTE)
1166        bitnum &= 7;
1167    else
1168        bitnum &= 31;
1169    mask = 1 << bitnum;
1170
1171    tmp = tcg_temp_new();
1172    assert (CCF_Z == (1 << 2));
1173    if (bitnum > 2)
1174        tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1175    else if (bitnum < 2)
1176        tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1177    else
1178        tcg_gen_mov_i32(tmp, src1);
1179    tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1180    /* Clear CCF_Z if bit set.  */
1181    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1182    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1183    if (op) {
1184        switch (op) {
1185        case 1: /* bchg */
1186            tcg_gen_xori_i32(tmp, src1, mask);
1187            break;
1188        case 2: /* bclr */
1189            tcg_gen_andi_i32(tmp, src1, ~mask);
1190            break;
1191        case 3: /* bset */
1192            tcg_gen_ori_i32(tmp, src1, mask);
1193            break;
1194        default: /* btst */
1195            break;
1196        }
1197        DEST_EA(env, insn, opsize, tmp, &addr);
1198    }
1199}
1200
1201DISAS_INSN(arith_im)
1202{
1203    int op;
1204    uint32_t im;
1205    TCGv src1;
1206    TCGv dest;
1207    TCGv addr;
1208
1209    op = (insn >> 9) & 7;
1210    SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1211    im = read_im32(env, s);
1212    dest = tcg_temp_new();
1213    switch (op) {
1214    case 0: /* ori */
1215        tcg_gen_ori_i32(dest, src1, im);
1216        gen_logic_cc(s, dest);
1217        break;
1218    case 1: /* andi */
1219        tcg_gen_andi_i32(dest, src1, im);
1220        gen_logic_cc(s, dest);
1221        break;
1222    case 2: /* subi */
1223        tcg_gen_mov_i32(dest, src1);
1224        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1225        tcg_gen_subi_i32(dest, dest, im);
1226        gen_update_cc_add(dest, tcg_const_i32(im));
1227        s->cc_op = CC_OP_SUB;
1228        break;
1229    case 3: /* addi */
1230        tcg_gen_mov_i32(dest, src1);
1231        tcg_gen_addi_i32(dest, dest, im);
1232        gen_update_cc_add(dest, tcg_const_i32(im));
1233        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1234        s->cc_op = CC_OP_ADD;
1235        break;
1236    case 5: /* eori */
1237        tcg_gen_xori_i32(dest, src1, im);
1238        gen_logic_cc(s, dest);
1239        break;
1240    case 6: /* cmpi */
1241        tcg_gen_mov_i32(dest, src1);
1242        tcg_gen_subi_i32(dest, dest, im);
1243        gen_update_cc_add(dest, tcg_const_i32(im));
1244        s->cc_op = CC_OP_SUB;
1245        break;
1246    default:
1247        abort();
1248    }
1249    if (op != 6) {
1250        DEST_EA(env, insn, OS_LONG, dest, &addr);
1251    }
1252}
1253
1254DISAS_INSN(byterev)
1255{
1256    TCGv reg;
1257
1258    reg = DREG(insn, 0);
1259    tcg_gen_bswap32_i32(reg, reg);
1260}
1261
1262DISAS_INSN(move)
1263{
1264    TCGv src;
1265    TCGv dest;
1266    int op;
1267    int opsize;
1268
1269    switch (insn >> 12) {
1270    case 1: /* move.b */
1271        opsize = OS_BYTE;
1272        break;
1273    case 2: /* move.l */
1274        opsize = OS_LONG;
1275        break;
1276    case 3: /* move.w */
1277        opsize = OS_WORD;
1278        break;
1279    default:
1280        abort();
1281    }
1282    SRC_EA(env, src, opsize, 1, NULL);
1283    op = (insn >> 6) & 7;
1284    if (op == 1) {
1285        /* movea */
1286        /* The value will already have been sign extended.  */
1287        dest = AREG(insn, 9);
1288        tcg_gen_mov_i32(dest, src);
1289    } else {
1290        /* normal move */
1291        uint16_t dest_ea;
1292        dest_ea = ((insn >> 9) & 7) | (op << 3);
1293        DEST_EA(env, dest_ea, opsize, src, NULL);
1294        /* This will be correct because loads sign extend.  */
1295        gen_logic_cc(s, src);
1296    }
1297}
1298
1299DISAS_INSN(negx)
1300{
1301    TCGv reg;
1302
1303    gen_flush_flags(s);
1304    reg = DREG(insn, 0);
1305    gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1306}
1307
1308DISAS_INSN(lea)
1309{
1310    TCGv reg;
1311    TCGv tmp;
1312
1313    reg = AREG(insn, 9);
1314    tmp = gen_lea(env, s, insn, OS_LONG);
1315    if (IS_NULL_QREG(tmp)) {
1316        gen_addr_fault(s);
1317        return;
1318    }
1319    tcg_gen_mov_i32(reg, tmp);
1320}
1321
1322DISAS_INSN(clr)
1323{
1324    int opsize;
1325
1326    switch ((insn >> 6) & 3) {
1327    case 0: /* clr.b */
1328        opsize = OS_BYTE;
1329        break;
1330    case 1: /* clr.w */
1331        opsize = OS_WORD;
1332        break;
1333    case 2: /* clr.l */
1334        opsize = OS_LONG;
1335        break;
1336    default:
1337        abort();
1338    }
1339    DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1340    gen_logic_cc(s, tcg_const_i32(0));
1341}
1342
1343static TCGv gen_get_ccr(DisasContext *s)
1344{
1345    TCGv dest;
1346
1347    gen_flush_flags(s);
1348    dest = tcg_temp_new();
1349    tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1350    tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1351    return dest;
1352}
1353
1354DISAS_INSN(move_from_ccr)
1355{
1356    TCGv reg;
1357    TCGv ccr;
1358
1359    ccr = gen_get_ccr(s);
1360    reg = DREG(insn, 0);
1361    gen_partset_reg(OS_WORD, reg, ccr);
1362}
1363
1364DISAS_INSN(neg)
1365{
1366    TCGv reg;
1367    TCGv src1;
1368
1369    reg = DREG(insn, 0);
1370    src1 = tcg_temp_new();
1371    tcg_gen_mov_i32(src1, reg);
1372    tcg_gen_neg_i32(reg, src1);
1373    s->cc_op = CC_OP_SUB;
1374    gen_update_cc_add(reg, src1);
1375    gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1376    s->cc_op = CC_OP_SUB;
1377}
1378
1379static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1380{
1381    tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1382    tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1383    if (!ccr_only) {
1384        gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1385    }
1386}
1387
1388static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1389                       int ccr_only)
1390{
1391    TCGv tmp;
1392    TCGv reg;
1393
1394    s->cc_op = CC_OP_FLAGS;
1395    if ((insn & 0x38) == 0)
1396      {
1397        tmp = tcg_temp_new();
1398        reg = DREG(insn, 0);
1399        tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1400        tcg_gen_shri_i32(tmp, reg, 4);
1401        tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1402        if (!ccr_only) {
1403            gen_helper_set_sr(cpu_env, reg);
1404        }
1405      }
1406    else if ((insn & 0x3f) == 0x3c)
1407      {
1408        uint16_t val;
1409        val = cpu_lduw_code(env, s->pc);
1410        s->pc += 2;
1411        gen_set_sr_im(s, val, ccr_only);
1412      }
1413    else
1414        disas_undef(env, s, insn);
1415}
1416
1417DISAS_INSN(move_to_ccr)
1418{
1419    gen_set_sr(env, s, insn, 1);
1420}
1421
1422DISAS_INSN(not)
1423{
1424    TCGv reg;
1425
1426    reg = DREG(insn, 0);
1427    tcg_gen_not_i32(reg, reg);
1428    gen_logic_cc(s, reg);
1429}
1430
1431DISAS_INSN(swap)
1432{
1433    TCGv src1;
1434    TCGv src2;
1435    TCGv reg;
1436
1437    src1 = tcg_temp_new();
1438    src2 = tcg_temp_new();
1439    reg = DREG(insn, 0);
1440    tcg_gen_shli_i32(src1, reg, 16);
1441    tcg_gen_shri_i32(src2, reg, 16);
1442    tcg_gen_or_i32(reg, src1, src2);
1443    gen_logic_cc(s, reg);
1444}
1445
1446DISAS_INSN(pea)
1447{
1448    TCGv tmp;
1449
1450    tmp = gen_lea(env, s, insn, OS_LONG);
1451    if (IS_NULL_QREG(tmp)) {
1452        gen_addr_fault(s);
1453        return;
1454    }
1455    gen_push(s, tmp);
1456}
1457
1458DISAS_INSN(ext)
1459{
1460    int op;
1461    TCGv reg;
1462    TCGv tmp;
1463
1464    reg = DREG(insn, 0);
1465    op = (insn >> 6) & 7;
1466    tmp = tcg_temp_new();
1467    if (op == 3)
1468        tcg_gen_ext16s_i32(tmp, reg);
1469    else
1470        tcg_gen_ext8s_i32(tmp, reg);
1471    if (op == 2)
1472        gen_partset_reg(OS_WORD, reg, tmp);
1473    else
1474        tcg_gen_mov_i32(reg, tmp);
1475    gen_logic_cc(s, tmp);
1476}
1477
1478DISAS_INSN(tst)
1479{
1480    int opsize;
1481    TCGv tmp;
1482
1483    switch ((insn >> 6) & 3) {
1484    case 0: /* tst.b */
1485        opsize = OS_BYTE;
1486        break;
1487    case 1: /* tst.w */
1488        opsize = OS_WORD;
1489        break;
1490    case 2: /* tst.l */
1491        opsize = OS_LONG;
1492        break;
1493    default:
1494        abort();
1495    }
1496    SRC_EA(env, tmp, opsize, 1, NULL);
1497    gen_logic_cc(s, tmp);
1498}
1499
1500DISAS_INSN(pulse)
1501{
1502  /* Implemented as a NOP.  */
1503}
1504
1505DISAS_INSN(illegal)
1506{
1507    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1508}
1509
1510/* ??? This should be atomic.  */
1511DISAS_INSN(tas)
1512{
1513    TCGv dest;
1514    TCGv src1;
1515    TCGv addr;
1516
1517    dest = tcg_temp_new();
1518    SRC_EA(env, src1, OS_BYTE, 1, &addr);
1519    gen_logic_cc(s, src1);
1520    tcg_gen_ori_i32(dest, src1, 0x80);
1521    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1522}
1523
1524DISAS_INSN(mull)
1525{
1526    uint16_t ext;
1527    TCGv reg;
1528    TCGv src1;
1529    TCGv dest;
1530
1531    /* The upper 32 bits of the product are discarded, so
1532       muls.l and mulu.l are functionally equivalent.  */
1533    ext = cpu_lduw_code(env, s->pc);
1534    s->pc += 2;
1535    if (ext & 0x87ff) {
1536        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1537        return;
1538    }
1539    reg = DREG(ext, 12);
1540    SRC_EA(env, src1, OS_LONG, 0, NULL);
1541    dest = tcg_temp_new();
1542    tcg_gen_mul_i32(dest, src1, reg);
1543    tcg_gen_mov_i32(reg, dest);
1544    /* Unlike m68k, coldfire always clears the overflow bit.  */
1545    gen_logic_cc(s, dest);
1546}
1547
1548DISAS_INSN(link)
1549{
1550    int16_t offset;
1551    TCGv reg;
1552    TCGv tmp;
1553
1554    offset = cpu_ldsw_code(env, s->pc);
1555    s->pc += 2;
1556    reg = AREG(insn, 0);
1557    tmp = tcg_temp_new();
1558    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1559    gen_store(s, OS_LONG, tmp, reg);
1560    if ((insn & 7) != 7)
1561        tcg_gen_mov_i32(reg, tmp);
1562    tcg_gen_addi_i32(QREG_SP, tmp, offset);
1563}
1564
1565DISAS_INSN(unlk)
1566{
1567    TCGv src;
1568    TCGv reg;
1569    TCGv tmp;
1570
1571    src = tcg_temp_new();
1572    reg = AREG(insn, 0);
1573    tcg_gen_mov_i32(src, reg);
1574    tmp = gen_load(s, OS_LONG, src, 0);
1575    tcg_gen_mov_i32(reg, tmp);
1576    tcg_gen_addi_i32(QREG_SP, src, 4);
1577}
1578
1579DISAS_INSN(nop)
1580{
1581}
1582
1583DISAS_INSN(rts)
1584{
1585    TCGv tmp;
1586
1587    tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1588    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1589    gen_jmp(s, tmp);
1590}
1591
1592DISAS_INSN(jump)
1593{
1594    TCGv tmp;
1595
1596    /* Load the target address first to ensure correct exception
1597       behavior.  */
1598    tmp = gen_lea(env, s, insn, OS_LONG);
1599    if (IS_NULL_QREG(tmp)) {
1600        gen_addr_fault(s);
1601        return;
1602    }
1603    if ((insn & 0x40) == 0) {
1604        /* jsr */
1605        gen_push(s, tcg_const_i32(s->pc));
1606    }
1607    gen_jmp(s, tmp);
1608}
1609
1610DISAS_INSN(addsubq)
1611{
1612    TCGv src1;
1613    TCGv src2;
1614    TCGv dest;
1615    int val;
1616    TCGv addr;
1617
1618    SRC_EA(env, src1, OS_LONG, 0, &addr);
1619    val = (insn >> 9) & 7;
1620    if (val == 0)
1621        val = 8;
1622    dest = tcg_temp_new();
1623    tcg_gen_mov_i32(dest, src1);
1624    if ((insn & 0x38) == 0x08) {
1625        /* Don't update condition codes if the destination is an
1626           address register.  */
1627        if (insn & 0x0100) {
1628            tcg_gen_subi_i32(dest, dest, val);
1629        } else {
1630            tcg_gen_addi_i32(dest, dest, val);
1631        }
1632    } else {
1633        src2 = tcg_const_i32(val);
1634        if (insn & 0x0100) {
1635            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1636            tcg_gen_subi_i32(dest, dest, val);
1637            s->cc_op = CC_OP_SUB;
1638        } else {
1639            tcg_gen_addi_i32(dest, dest, val);
1640            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1641            s->cc_op = CC_OP_ADD;
1642        }
1643        gen_update_cc_add(dest, src2);
1644    }
1645    DEST_EA(env, insn, OS_LONG, dest, &addr);
1646}
1647
1648DISAS_INSN(tpf)
1649{
1650    switch (insn & 7) {
1651    case 2: /* One extension word.  */
1652        s->pc += 2;
1653        break;
1654    case 3: /* Two extension words.  */
1655        s->pc += 4;
1656        break;
1657    case 4: /* No extension words.  */
1658        break;
1659    default:
1660        disas_undef(env, s, insn);
1661    }
1662}
1663
1664DISAS_INSN(branch)
1665{
1666    int32_t offset;
1667    uint32_t base;
1668    int op;
1669    TCGLabel *l1;
1670
1671    base = s->pc;
1672    op = (insn >> 8) & 0xf;
1673    offset = (int8_t)insn;
1674    if (offset == 0) {
1675        offset = cpu_ldsw_code(env, s->pc);
1676        s->pc += 2;
1677    } else if (offset == -1) {
1678        offset = read_im32(env, s);
1679    }
1680    if (op == 1) {
1681        /* bsr */
1682        gen_push(s, tcg_const_i32(s->pc));
1683    }
1684    gen_flush_cc_op(s);
1685    if (op > 1) {
1686        /* Bcc */
1687        l1 = gen_new_label();
1688        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1689        gen_jmp_tb(s, 1, base + offset);
1690        gen_set_label(l1);
1691        gen_jmp_tb(s, 0, s->pc);
1692    } else {
1693        /* Unconditional branch.  */
1694        gen_jmp_tb(s, 0, base + offset);
1695    }
1696}
1697
1698DISAS_INSN(moveq)
1699{
1700    uint32_t val;
1701
1702    val = (int8_t)insn;
1703    tcg_gen_movi_i32(DREG(insn, 9), val);
1704    gen_logic_cc(s, tcg_const_i32(val));
1705}
1706
1707DISAS_INSN(mvzs)
1708{
1709    int opsize;
1710    TCGv src;
1711    TCGv reg;
1712
1713    if (insn & 0x40)
1714        opsize = OS_WORD;
1715    else
1716        opsize = OS_BYTE;
1717    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
1718    reg = DREG(insn, 9);
1719    tcg_gen_mov_i32(reg, src);
1720    gen_logic_cc(s, src);
1721}
1722
1723DISAS_INSN(or)
1724{
1725    TCGv reg;
1726    TCGv dest;
1727    TCGv src;
1728    TCGv addr;
1729
1730    reg = DREG(insn, 9);
1731    dest = tcg_temp_new();
1732    if (insn & 0x100) {
1733        SRC_EA(env, src, OS_LONG, 0, &addr);
1734        tcg_gen_or_i32(dest, src, reg);
1735        DEST_EA(env, insn, OS_LONG, dest, &addr);
1736    } else {
1737        SRC_EA(env, src, OS_LONG, 0, NULL);
1738        tcg_gen_or_i32(dest, src, reg);
1739        tcg_gen_mov_i32(reg, dest);
1740    }
1741    gen_logic_cc(s, dest);
1742}
1743
1744DISAS_INSN(suba)
1745{
1746    TCGv src;
1747    TCGv reg;
1748
1749    SRC_EA(env, src, OS_LONG, 0, NULL);
1750    reg = AREG(insn, 9);
1751    tcg_gen_sub_i32(reg, reg, src);
1752}
1753
1754DISAS_INSN(subx)
1755{
1756    TCGv reg;
1757    TCGv src;
1758
1759    gen_flush_flags(s);
1760    reg = DREG(insn, 9);
1761    src = DREG(insn, 0);
1762    gen_helper_subx_cc(reg, cpu_env, reg, src);
1763}
1764
1765DISAS_INSN(mov3q)
1766{
1767    TCGv src;
1768    int val;
1769
1770    val = (insn >> 9) & 7;
1771    if (val == 0)
1772        val = -1;
1773    src = tcg_const_i32(val);
1774    gen_logic_cc(s, src);
1775    DEST_EA(env, insn, OS_LONG, src, NULL);
1776}
1777
1778DISAS_INSN(cmp)
1779{
1780    int op;
1781    TCGv src;
1782    TCGv reg;
1783    TCGv dest;
1784    int opsize;
1785
1786    op = (insn >> 6) & 3;
1787    switch (op) {
1788    case 0: /* cmp.b */
1789        opsize = OS_BYTE;
1790        s->cc_op = CC_OP_CMPB;
1791        break;
1792    case 1: /* cmp.w */
1793        opsize = OS_WORD;
1794        s->cc_op = CC_OP_CMPW;
1795        break;
1796    case 2: /* cmp.l */
1797        opsize = OS_LONG;
1798        s->cc_op = CC_OP_SUB;
1799        break;
1800    default:
1801        abort();
1802    }
1803    SRC_EA(env, src, opsize, 1, NULL);
1804    reg = DREG(insn, 9);
1805    dest = tcg_temp_new();
1806    tcg_gen_sub_i32(dest, reg, src);
1807    gen_update_cc_add(dest, src);
1808}
1809
1810DISAS_INSN(cmpa)
1811{
1812    int opsize;
1813    TCGv src;
1814    TCGv reg;
1815    TCGv dest;
1816
1817    if (insn & 0x100) {
1818        opsize = OS_LONG;
1819    } else {
1820        opsize = OS_WORD;
1821    }
1822    SRC_EA(env, src, opsize, 1, NULL);
1823    reg = AREG(insn, 9);
1824    dest = tcg_temp_new();
1825    tcg_gen_sub_i32(dest, reg, src);
1826    gen_update_cc_add(dest, src);
1827    s->cc_op = CC_OP_SUB;
1828}
1829
1830DISAS_INSN(eor)
1831{
1832    TCGv src;
1833    TCGv reg;
1834    TCGv dest;
1835    TCGv addr;
1836
1837    SRC_EA(env, src, OS_LONG, 0, &addr);
1838    reg = DREG(insn, 9);
1839    dest = tcg_temp_new();
1840    tcg_gen_xor_i32(dest, src, reg);
1841    gen_logic_cc(s, dest);
1842    DEST_EA(env, insn, OS_LONG, dest, &addr);
1843}
1844
1845DISAS_INSN(and)
1846{
1847    TCGv src;
1848    TCGv reg;
1849    TCGv dest;
1850    TCGv addr;
1851
1852    reg = DREG(insn, 9);
1853    dest = tcg_temp_new();
1854    if (insn & 0x100) {
1855        SRC_EA(env, src, OS_LONG, 0, &addr);
1856        tcg_gen_and_i32(dest, src, reg);
1857        DEST_EA(env, insn, OS_LONG, dest, &addr);
1858    } else {
1859        SRC_EA(env, src, OS_LONG, 0, NULL);
1860        tcg_gen_and_i32(dest, src, reg);
1861        tcg_gen_mov_i32(reg, dest);
1862    }
1863    gen_logic_cc(s, dest);
1864}
1865
1866DISAS_INSN(adda)
1867{
1868    TCGv src;
1869    TCGv reg;
1870
1871    SRC_EA(env, src, OS_LONG, 0, NULL);
1872    reg = AREG(insn, 9);
1873    tcg_gen_add_i32(reg, reg, src);
1874}
1875
1876DISAS_INSN(addx)
1877{
1878    TCGv reg;
1879    TCGv src;
1880
1881    gen_flush_flags(s);
1882    reg = DREG(insn, 9);
1883    src = DREG(insn, 0);
1884    gen_helper_addx_cc(reg, cpu_env, reg, src);
1885    s->cc_op = CC_OP_FLAGS;
1886}
1887
1888/* TODO: This could be implemented without helper functions.  */
1889DISAS_INSN(shift_im)
1890{
1891    TCGv reg;
1892    int tmp;
1893    TCGv shift;
1894
1895    reg = DREG(insn, 0);
1896    tmp = (insn >> 9) & 7;
1897    if (tmp == 0)
1898        tmp = 8;
1899    shift = tcg_const_i32(tmp);
1900    /* No need to flush flags becuse we know we will set C flag.  */
1901    if (insn & 0x100) {
1902        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1903    } else {
1904        if (insn & 8) {
1905            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1906        } else {
1907            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1908        }
1909    }
1910    s->cc_op = CC_OP_SHIFT;
1911}
1912
1913DISAS_INSN(shift_reg)
1914{
1915    TCGv reg;
1916    TCGv shift;
1917
1918    reg = DREG(insn, 0);
1919    shift = DREG(insn, 9);
1920    /* Shift by zero leaves C flag unmodified.   */
1921    gen_flush_flags(s);
1922    if (insn & 0x100) {
1923        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1924    } else {
1925        if (insn & 8) {
1926            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1927        } else {
1928            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1929        }
1930    }
1931    s->cc_op = CC_OP_SHIFT;
1932}
1933
1934DISAS_INSN(ff1)
1935{
1936    TCGv reg;
1937    reg = DREG(insn, 0);
1938    gen_logic_cc(s, reg);
1939    gen_helper_ff1(reg, reg);
1940}
1941
1942static TCGv gen_get_sr(DisasContext *s)
1943{
1944    TCGv ccr;
1945    TCGv sr;
1946
1947    ccr = gen_get_ccr(s);
1948    sr = tcg_temp_new();
1949    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1950    tcg_gen_or_i32(sr, sr, ccr);
1951    return sr;
1952}
1953
1954DISAS_INSN(strldsr)
1955{
1956    uint16_t ext;
1957    uint32_t addr;
1958
1959    addr = s->pc - 2;
1960    ext = cpu_lduw_code(env, s->pc);
1961    s->pc += 2;
1962    if (ext != 0x46FC) {
1963        gen_exception(s, addr, EXCP_UNSUPPORTED);
1964        return;
1965    }
1966    ext = cpu_lduw_code(env, s->pc);
1967    s->pc += 2;
1968    if (IS_USER(s) || (ext & SR_S) == 0) {
1969        gen_exception(s, addr, EXCP_PRIVILEGE);
1970        return;
1971    }
1972    gen_push(s, gen_get_sr(s));
1973    gen_set_sr_im(s, ext, 0);
1974}
1975
1976DISAS_INSN(move_from_sr)
1977{
1978    TCGv reg;
1979    TCGv sr;
1980
1981    if (IS_USER(s)) {
1982        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1983        return;
1984    }
1985    sr = gen_get_sr(s);
1986    reg = DREG(insn, 0);
1987    gen_partset_reg(OS_WORD, reg, sr);
1988}
1989
1990DISAS_INSN(move_to_sr)
1991{
1992    if (IS_USER(s)) {
1993        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1994        return;
1995    }
1996    gen_set_sr(env, s, insn, 0);
1997    gen_lookup_tb(s);
1998}
1999
2000DISAS_INSN(move_from_usp)
2001{
2002    if (IS_USER(s)) {
2003        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2004        return;
2005    }
2006    tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
2007                   offsetof(CPUM68KState, sp[M68K_USP]));
2008}
2009
2010DISAS_INSN(move_to_usp)
2011{
2012    if (IS_USER(s)) {
2013        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2014        return;
2015    }
2016    tcg_gen_st_i32(AREG(insn, 0), cpu_env,
2017                   offsetof(CPUM68KState, sp[M68K_USP]));
2018}
2019
2020DISAS_INSN(halt)
2021{
2022    gen_exception(s, s->pc, EXCP_HALT_INSN);
2023}
2024
2025DISAS_INSN(stop)
2026{
2027    uint16_t ext;
2028
2029    if (IS_USER(s)) {
2030        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2031        return;
2032    }
2033
2034    ext = cpu_lduw_code(env, s->pc);
2035    s->pc += 2;
2036
2037    gen_set_sr_im(s, ext, 0);
2038    tcg_gen_movi_i32(cpu_halted, 1);
2039    gen_exception(s, s->pc, EXCP_HLT);
2040}
2041
2042DISAS_INSN(rte)
2043{
2044    if (IS_USER(s)) {
2045        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2046        return;
2047    }
2048    gen_exception(s, s->pc - 2, EXCP_RTE);
2049}
2050
2051DISAS_INSN(movec)
2052{
2053    uint16_t ext;
2054    TCGv reg;
2055
2056    if (IS_USER(s)) {
2057        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2058        return;
2059    }
2060
2061    ext = cpu_lduw_code(env, s->pc);
2062    s->pc += 2;
2063
2064    if (ext & 0x8000) {
2065        reg = AREG(ext, 12);
2066    } else {
2067        reg = DREG(ext, 12);
2068    }
2069    gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2070    gen_lookup_tb(s);
2071}
2072
2073DISAS_INSN(intouch)
2074{
2075    if (IS_USER(s)) {
2076        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2077        return;
2078    }
2079    /* ICache fetch.  Implement as no-op.  */
2080}
2081
2082DISAS_INSN(cpushl)
2083{
2084    if (IS_USER(s)) {
2085        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2086        return;
2087    }
2088    /* Cache push/invalidate.  Implement as no-op.  */
2089}
2090
2091DISAS_INSN(wddata)
2092{
2093    gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2094}
2095
2096DISAS_INSN(wdebug)
2097{
2098    M68kCPU *cpu = m68k_env_get_cpu(env);
2099
2100    if (IS_USER(s)) {
2101        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2102        return;
2103    }
2104    /* TODO: Implement wdebug.  */
2105    cpu_abort(CPU(cpu), "WDEBUG not implemented");
2106}
2107
2108DISAS_INSN(trap)
2109{
2110    gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2111}
2112
2113/* ??? FP exceptions are not implemented.  Most exceptions are deferred until
2114   immediately before the next FP instruction is executed.  */
2115DISAS_INSN(fpu)
2116{
2117    uint16_t ext;
2118    int32_t offset;
2119    int opmode;
2120    TCGv_i64 src;
2121    TCGv_i64 dest;
2122    TCGv_i64 res;
2123    TCGv tmp32;
2124    int round;
2125    int set_dest;
2126    int opsize;
2127
2128    ext = cpu_lduw_code(env, s->pc);
2129    s->pc += 2;
2130    opmode = ext & 0x7f;
2131    switch ((ext >> 13) & 7) {
2132    case 0: case 2:
2133        break;
2134    case 1:
2135        goto undef;
2136    case 3: /* fmove out */
2137        src = FREG(ext, 7);
2138        tmp32 = tcg_temp_new_i32();
2139        /* fmove */
2140        /* ??? TODO: Proper behavior on overflow.  */
2141        switch ((ext >> 10) & 7) {
2142        case 0:
2143            opsize = OS_LONG;
2144            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2145            break;
2146        case 1:
2147            opsize = OS_SINGLE;
2148            gen_helper_f64_to_f32(tmp32, cpu_env, src);
2149            break;
2150        case 4:
2151            opsize = OS_WORD;
2152            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2153            break;
2154        case 5: /* OS_DOUBLE */
2155            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2156            switch ((insn >> 3) & 7) {
2157            case 2:
2158            case 3:
2159                break;
2160            case 4:
2161                tcg_gen_addi_i32(tmp32, tmp32, -8);
2162                break;
2163            case 5:
2164                offset = cpu_ldsw_code(env, s->pc);
2165                s->pc += 2;
2166                tcg_gen_addi_i32(tmp32, tmp32, offset);
2167                break;
2168            default:
2169                goto undef;
2170            }
2171            gen_store64(s, tmp32, src);
2172            switch ((insn >> 3) & 7) {
2173            case 3:
2174                tcg_gen_addi_i32(tmp32, tmp32, 8);
2175                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2176                break;
2177            case 4:
2178                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2179                break;
2180            }
2181            tcg_temp_free_i32(tmp32);
2182            return;
2183        case 6:
2184            opsize = OS_BYTE;
2185            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2186            break;
2187        default:
2188            goto undef;
2189        }
2190        DEST_EA(env, insn, opsize, tmp32, NULL);
2191        tcg_temp_free_i32(tmp32);
2192        return;
2193    case 4: /* fmove to control register.  */
2194        switch ((ext >> 10) & 7) {
2195        case 4: /* FPCR */
2196            /* Not implemented.  Ignore writes.  */
2197            break;
2198        case 1: /* FPIAR */
2199        case 2: /* FPSR */
2200        default:
2201            cpu_abort(NULL, "Unimplemented: fmove to control %d",
2202                      (ext >> 10) & 7);
2203        }
2204        break;
2205    case 5: /* fmove from control register.  */
2206        switch ((ext >> 10) & 7) {
2207        case 4: /* FPCR */
2208            /* Not implemented.  Always return zero.  */
2209            tmp32 = tcg_const_i32(0);
2210            break;
2211        case 1: /* FPIAR */
2212        case 2: /* FPSR */
2213        default:
2214            cpu_abort(NULL, "Unimplemented: fmove from control %d",
2215                      (ext >> 10) & 7);
2216            goto undef;
2217        }
2218        DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2219        break;
2220    case 6: /* fmovem */
2221    case 7:
2222        {
2223            TCGv addr;
2224            uint16_t mask;
2225            int i;
2226            if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2227                goto undef;
2228            tmp32 = gen_lea(env, s, insn, OS_LONG);
2229            if (IS_NULL_QREG(tmp32)) {
2230                gen_addr_fault(s);
2231                return;
2232            }
2233            addr = tcg_temp_new_i32();
2234            tcg_gen_mov_i32(addr, tmp32);
2235            mask = 0x80;
2236            for (i = 0; i < 8; i++) {
2237                if (ext & mask) {
2238                    dest = FREG(i, 0);
2239                    if (ext & (1 << 13)) {
2240                        /* store */
2241                        tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2242                    } else {
2243                        /* load */
2244                        tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2245                    }
2246                    if (ext & (mask - 1))
2247                        tcg_gen_addi_i32(addr, addr, 8);
2248                }
2249                mask >>= 1;
2250            }
2251            tcg_temp_free_i32(addr);
2252        }
2253        return;
2254    }
2255    if (ext & (1 << 14)) {
2256        /* Source effective address.  */
2257        switch ((ext >> 10) & 7) {
2258        case 0: opsize = OS_LONG; break;
2259        case 1: opsize = OS_SINGLE; break;
2260        case 4: opsize = OS_WORD; break;
2261        case 5: opsize = OS_DOUBLE; break;
2262        case 6: opsize = OS_BYTE; break;
2263        default:
2264            goto undef;
2265        }
2266        if (opsize == OS_DOUBLE) {
2267            tmp32 = tcg_temp_new_i32();
2268            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2269            switch ((insn >> 3) & 7) {
2270            case 2:
2271            case 3:
2272                break;
2273            case 4:
2274                tcg_gen_addi_i32(tmp32, tmp32, -8);
2275                break;
2276            case 5:
2277                offset = cpu_ldsw_code(env, s->pc);
2278                s->pc += 2;
2279                tcg_gen_addi_i32(tmp32, tmp32, offset);
2280                break;
2281            case 7:
2282                offset = cpu_ldsw_code(env, s->pc);
2283                offset += s->pc - 2;
2284                s->pc += 2;
2285                tcg_gen_addi_i32(tmp32, tmp32, offset);
2286                break;
2287            default:
2288                goto undef;
2289            }
2290            src = gen_load64(s, tmp32);
2291            switch ((insn >> 3) & 7) {
2292            case 3:
2293                tcg_gen_addi_i32(tmp32, tmp32, 8);
2294                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2295                break;
2296            case 4:
2297                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2298                break;
2299            }
2300            tcg_temp_free_i32(tmp32);
2301        } else {
2302            SRC_EA(env, tmp32, opsize, 1, NULL);
2303            src = tcg_temp_new_i64();
2304            switch (opsize) {
2305            case OS_LONG:
2306            case OS_WORD:
2307            case OS_BYTE:
2308                gen_helper_i32_to_f64(src, cpu_env, tmp32);
2309                break;
2310            case OS_SINGLE:
2311                gen_helper_f32_to_f64(src, cpu_env, tmp32);
2312                break;
2313            }
2314        }
2315    } else {
2316        /* Source register.  */
2317        src = FREG(ext, 10);
2318    }
2319    dest = FREG(ext, 7);
2320    res = tcg_temp_new_i64();
2321    if (opmode != 0x3a)
2322        tcg_gen_mov_f64(res, dest);
2323    round = 1;
2324    set_dest = 1;
2325    switch (opmode) {
2326    case 0: case 0x40: case 0x44: /* fmove */
2327        tcg_gen_mov_f64(res, src);
2328        break;
2329    case 1: /* fint */
2330        gen_helper_iround_f64(res, cpu_env, src);
2331        round = 0;
2332        break;
2333    case 3: /* fintrz */
2334        gen_helper_itrunc_f64(res, cpu_env, src);
2335        round = 0;
2336        break;
2337    case 4: case 0x41: case 0x45: /* fsqrt */
2338        gen_helper_sqrt_f64(res, cpu_env, src);
2339        break;
2340    case 0x18: case 0x58: case 0x5c: /* fabs */
2341        gen_helper_abs_f64(res, src);
2342        break;
2343    case 0x1a: case 0x5a: case 0x5e: /* fneg */
2344        gen_helper_chs_f64(res, src);
2345        break;
2346    case 0x20: case 0x60: case 0x64: /* fdiv */
2347        gen_helper_div_f64(res, cpu_env, res, src);
2348        break;
2349    case 0x22: case 0x62: case 0x66: /* fadd */
2350        gen_helper_add_f64(res, cpu_env, res, src);
2351        break;
2352    case 0x23: case 0x63: case 0x67: /* fmul */
2353        gen_helper_mul_f64(res, cpu_env, res, src);
2354        break;
2355    case 0x28: case 0x68: case 0x6c: /* fsub */
2356        gen_helper_sub_f64(res, cpu_env, res, src);
2357        break;
2358    case 0x38: /* fcmp */
2359        gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2360        set_dest = 0;
2361        round = 0;
2362        break;
2363    case 0x3a: /* ftst */
2364        tcg_gen_mov_f64(res, src);
2365        set_dest = 0;
2366        round = 0;
2367        break;
2368    default:
2369        goto undef;
2370    }
2371    if (ext & (1 << 14)) {
2372        tcg_temp_free_i64(src);
2373    }
2374    if (round) {
2375        if (opmode & 0x40) {
2376            if ((opmode & 0x4) != 0)
2377                round = 0;
2378        } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2379            round = 0;
2380        }
2381    }
2382    if (round) {
2383        TCGv tmp = tcg_temp_new_i32();
2384        gen_helper_f64_to_f32(tmp, cpu_env, res);
2385        gen_helper_f32_to_f64(res, cpu_env, tmp);
2386        tcg_temp_free_i32(tmp);
2387    }
2388    tcg_gen_mov_f64(QREG_FP_RESULT, res);
2389    if (set_dest) {
2390        tcg_gen_mov_f64(dest, res);
2391    }
2392    tcg_temp_free_i64(res);
2393    return;
2394undef:
2395    /* FIXME: Is this right for offset addressing modes?  */
2396    s->pc -= 2;
2397    disas_undef_fpu(env, s, insn);
2398}
2399
2400DISAS_INSN(fbcc)
2401{
2402    uint32_t offset;
2403    uint32_t addr;
2404    TCGv flag;
2405    TCGLabel *l1;
2406
2407    addr = s->pc;
2408    offset = cpu_ldsw_code(env, s->pc);
2409    s->pc += 2;
2410    if (insn & (1 << 6)) {
2411        offset = (offset << 16) | cpu_lduw_code(env, s->pc);
2412        s->pc += 2;
2413    }
2414
2415    l1 = gen_new_label();
2416    /* TODO: Raise BSUN exception.  */
2417    flag = tcg_temp_new();
2418    gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2419    /* Jump to l1 if condition is true.  */
2420    switch (insn & 0xf) {
2421    case 0: /* f */
2422        break;
2423    case 1: /* eq (=0) */
2424        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2425        break;
2426    case 2: /* ogt (=1) */
2427        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2428        break;
2429    case 3: /* oge (=0 or =1) */
2430        tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2431        break;
2432    case 4: /* olt (=-1) */
2433        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2434        break;
2435    case 5: /* ole (=-1 or =0) */
2436        tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2437        break;
2438    case 6: /* ogl (=-1 or =1) */
2439        tcg_gen_andi_i32(flag, flag, 1);
2440        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2441        break;
2442    case 7: /* or (=2) */
2443        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2444        break;
2445    case 8: /* un (<2) */
2446        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2447        break;
2448    case 9: /* ueq (=0 or =2) */
2449        tcg_gen_andi_i32(flag, flag, 1);
2450        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2451        break;
2452    case 10: /* ugt (>0) */
2453        tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2454        break;
2455    case 11: /* uge (>=0) */
2456        tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2457        break;
2458    case 12: /* ult (=-1 or =2) */
2459        tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2460        break;
2461    case 13: /* ule (!=1) */
2462        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2463        break;
2464    case 14: /* ne (!=0) */
2465        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2466        break;
2467    case 15: /* t */
2468        tcg_gen_br(l1);
2469        break;
2470    }
2471    gen_jmp_tb(s, 0, s->pc);
2472    gen_set_label(l1);
2473    gen_jmp_tb(s, 1, addr + offset);
2474}
2475
2476DISAS_INSN(frestore)
2477{
2478    M68kCPU *cpu = m68k_env_get_cpu(env);
2479
2480    /* TODO: Implement frestore.  */
2481    cpu_abort(CPU(cpu), "FRESTORE not implemented");
2482}
2483
2484DISAS_INSN(fsave)
2485{
2486    M68kCPU *cpu = m68k_env_get_cpu(env);
2487
2488    /* TODO: Implement fsave.  */
2489    cpu_abort(CPU(cpu), "FSAVE not implemented");
2490}
2491
2492static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2493{
2494    TCGv tmp = tcg_temp_new();
2495    if (s->env->macsr & MACSR_FI) {
2496        if (upper)
2497            tcg_gen_andi_i32(tmp, val, 0xffff0000);
2498        else
2499            tcg_gen_shli_i32(tmp, val, 16);
2500    } else if (s->env->macsr & MACSR_SU) {
2501        if (upper)
2502            tcg_gen_sari_i32(tmp, val, 16);
2503        else
2504            tcg_gen_ext16s_i32(tmp, val);
2505    } else {
2506        if (upper)
2507            tcg_gen_shri_i32(tmp, val, 16);
2508        else
2509            tcg_gen_ext16u_i32(tmp, val);
2510    }
2511    return tmp;
2512}
2513
2514static void gen_mac_clear_flags(void)
2515{
2516    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2517                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2518}
2519
2520DISAS_INSN(mac)
2521{
2522    TCGv rx;
2523    TCGv ry;
2524    uint16_t ext;
2525    int acc;
2526    TCGv tmp;
2527    TCGv addr;
2528    TCGv loadval;
2529    int dual;
2530    TCGv saved_flags;
2531
2532    if (!s->done_mac) {
2533        s->mactmp = tcg_temp_new_i64();
2534        s->done_mac = 1;
2535    }
2536
2537    ext = cpu_lduw_code(env, s->pc);
2538    s->pc += 2;
2539
2540    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2541    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2542    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2543        disas_undef(env, s, insn);
2544        return;
2545    }
2546    if (insn & 0x30) {
2547        /* MAC with load.  */
2548        tmp = gen_lea(env, s, insn, OS_LONG);
2549        addr = tcg_temp_new();
2550        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2551        /* Load the value now to ensure correct exception behavior.
2552           Perform writeback after reading the MAC inputs.  */
2553        loadval = gen_load(s, OS_LONG, addr, 0);
2554
2555        acc ^= 1;
2556        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2557        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2558    } else {
2559        loadval = addr = NULL_QREG;
2560        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2561        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2562    }
2563
2564    gen_mac_clear_flags();
2565#if 0
2566    l1 = -1;
2567    /* Disabled because conditional branches clobber temporary vars.  */
2568    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2569        /* Skip the multiply if we know we will ignore it.  */
2570        l1 = gen_new_label();
2571        tmp = tcg_temp_new();
2572        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2573        gen_op_jmp_nz32(tmp, l1);
2574    }
2575#endif
2576
2577    if ((ext & 0x0800) == 0) {
2578        /* Word.  */
2579        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2580        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2581    }
2582    if (s->env->macsr & MACSR_FI) {
2583        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2584    } else {
2585        if (s->env->macsr & MACSR_SU)
2586            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2587        else
2588            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2589        switch ((ext >> 9) & 3) {
2590        case 1:
2591            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2592            break;
2593        case 3:
2594            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2595            break;
2596        }
2597    }
2598
2599    if (dual) {
2600        /* Save the overflow flag from the multiply.  */
2601        saved_flags = tcg_temp_new();
2602        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2603    } else {
2604        saved_flags = NULL_QREG;
2605    }
2606
2607#if 0
2608    /* Disabled because conditional branches clobber temporary vars.  */
2609    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2610        /* Skip the accumulate if the value is already saturated.  */
2611        l1 = gen_new_label();
2612        tmp = tcg_temp_new();
2613        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2614        gen_op_jmp_nz32(tmp, l1);
2615    }
2616#endif
2617
2618    if (insn & 0x100)
2619        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2620    else
2621        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2622
2623    if (s->env->macsr & MACSR_FI)
2624        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2625    else if (s->env->macsr & MACSR_SU)
2626        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2627    else
2628        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2629
2630#if 0
2631    /* Disabled because conditional branches clobber temporary vars.  */
2632    if (l1 != -1)
2633        gen_set_label(l1);
2634#endif
2635
2636    if (dual) {
2637        /* Dual accumulate variant.  */
2638        acc = (ext >> 2) & 3;
2639        /* Restore the overflow flag from the multiplier.  */
2640        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2641#if 0
2642        /* Disabled because conditional branches clobber temporary vars.  */
2643        if ((s->env->macsr & MACSR_OMC) != 0) {
2644            /* Skip the accumulate if the value is already saturated.  */
2645            l1 = gen_new_label();
2646            tmp = tcg_temp_new();
2647            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2648            gen_op_jmp_nz32(tmp, l1);
2649        }
2650#endif
2651        if (ext & 2)
2652            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2653        else
2654            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2655        if (s->env->macsr & MACSR_FI)
2656            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2657        else if (s->env->macsr & MACSR_SU)
2658            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2659        else
2660            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2661#if 0
2662        /* Disabled because conditional branches clobber temporary vars.  */
2663        if (l1 != -1)
2664            gen_set_label(l1);
2665#endif
2666    }
2667    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2668
2669    if (insn & 0x30) {
2670        TCGv rw;
2671        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2672        tcg_gen_mov_i32(rw, loadval);
2673        /* FIXME: Should address writeback happen with the masked or
2674           unmasked value?  */
2675        switch ((insn >> 3) & 7) {
2676        case 3: /* Post-increment.  */
2677            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2678            break;
2679        case 4: /* Pre-decrement.  */
2680            tcg_gen_mov_i32(AREG(insn, 0), addr);
2681        }
2682    }
2683}
2684
2685DISAS_INSN(from_mac)
2686{
2687    TCGv rx;
2688    TCGv_i64 acc;
2689    int accnum;
2690
2691    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2692    accnum = (insn >> 9) & 3;
2693    acc = MACREG(accnum);
2694    if (s->env->macsr & MACSR_FI) {
2695        gen_helper_get_macf(rx, cpu_env, acc);
2696    } else if ((s->env->macsr & MACSR_OMC) == 0) {
2697        tcg_gen_extrl_i64_i32(rx, acc);
2698    } else if (s->env->macsr & MACSR_SU) {
2699        gen_helper_get_macs(rx, acc);
2700    } else {
2701        gen_helper_get_macu(rx, acc);
2702    }
2703    if (insn & 0x40) {
2704        tcg_gen_movi_i64(acc, 0);
2705        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2706    }
2707}
2708
2709DISAS_INSN(move_mac)
2710{
2711    /* FIXME: This can be done without a helper.  */
2712    int src;
2713    TCGv dest;
2714    src = insn & 3;
2715    dest = tcg_const_i32((insn >> 9) & 3);
2716    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2717    gen_mac_clear_flags();
2718    gen_helper_mac_set_flags(cpu_env, dest);
2719}
2720
2721DISAS_INSN(from_macsr)
2722{
2723    TCGv reg;
2724
2725    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2726    tcg_gen_mov_i32(reg, QREG_MACSR);
2727}
2728
2729DISAS_INSN(from_mask)
2730{
2731    TCGv reg;
2732    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2733    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2734}
2735
2736DISAS_INSN(from_mext)
2737{
2738    TCGv reg;
2739    TCGv acc;
2740    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2741    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2742    if (s->env->macsr & MACSR_FI)
2743        gen_helper_get_mac_extf(reg, cpu_env, acc);
2744    else
2745        gen_helper_get_mac_exti(reg, cpu_env, acc);
2746}
2747
2748DISAS_INSN(macsr_to_ccr)
2749{
2750    tcg_gen_movi_i32(QREG_CC_X, 0);
2751    tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2752    s->cc_op = CC_OP_FLAGS;
2753}
2754
2755DISAS_INSN(to_mac)
2756{
2757    TCGv_i64 acc;
2758    TCGv val;
2759    int accnum;
2760    accnum = (insn >> 9) & 3;
2761    acc = MACREG(accnum);
2762    SRC_EA(env, val, OS_LONG, 0, NULL);
2763    if (s->env->macsr & MACSR_FI) {
2764        tcg_gen_ext_i32_i64(acc, val);
2765        tcg_gen_shli_i64(acc, acc, 8);
2766    } else if (s->env->macsr & MACSR_SU) {
2767        tcg_gen_ext_i32_i64(acc, val);
2768    } else {
2769        tcg_gen_extu_i32_i64(acc, val);
2770    }
2771    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2772    gen_mac_clear_flags();
2773    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2774}
2775
2776DISAS_INSN(to_macsr)
2777{
2778    TCGv val;
2779    SRC_EA(env, val, OS_LONG, 0, NULL);
2780    gen_helper_set_macsr(cpu_env, val);
2781    gen_lookup_tb(s);
2782}
2783
2784DISAS_INSN(to_mask)
2785{
2786    TCGv val;
2787    SRC_EA(env, val, OS_LONG, 0, NULL);
2788    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2789}
2790
2791DISAS_INSN(to_mext)
2792{
2793    TCGv val;
2794    TCGv acc;
2795    SRC_EA(env, val, OS_LONG, 0, NULL);
2796    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2797    if (s->env->macsr & MACSR_FI)
2798        gen_helper_set_mac_extf(cpu_env, val, acc);
2799    else if (s->env->macsr & MACSR_SU)
2800        gen_helper_set_mac_exts(cpu_env, val, acc);
2801    else
2802        gen_helper_set_mac_extu(cpu_env, val, acc);
2803}
2804
2805static disas_proc opcode_table[65536];
2806
2807static void
2808register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2809{
2810  int i;
2811  int from;
2812  int to;
2813
2814  /* Sanity check.  All set bits must be included in the mask.  */
2815  if (opcode & ~mask) {
2816      fprintf(stderr,
2817              "qemu internal error: bogus opcode definition %04x/%04x\n",
2818              opcode, mask);
2819      abort();
2820  }
2821  /* This could probably be cleverer.  For now just optimize the case where
2822     the top bits are known.  */
2823  /* Find the first zero bit in the mask.  */
2824  i = 0x8000;
2825  while ((i & mask) != 0)
2826      i >>= 1;
2827  /* Iterate over all combinations of this and lower bits.  */
2828  if (i == 0)
2829      i = 1;
2830  else
2831      i <<= 1;
2832  from = opcode & ~(i - 1);
2833  to = from + i;
2834  for (i = from; i < to; i++) {
2835      if ((i & mask) == opcode)
2836          opcode_table[i] = proc;
2837  }
2838}
2839
2840/* Register m68k opcode handlers.  Order is important.
2841   Later insn override earlier ones.  */
2842void register_m68k_insns (CPUM68KState *env)
2843{
2844#define INSN(name, opcode, mask, feature) do { \
2845    if (m68k_feature(env, M68K_FEATURE_##feature)) \
2846        register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2847    } while(0)
2848    INSN(undef,     0000, 0000, CF_ISA_A);
2849    INSN(arith_im,  0080, fff8, CF_ISA_A);
2850    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
2851    INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2852    INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2853    INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2854    INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2855    INSN(arith_im,  0280, fff8, CF_ISA_A);
2856    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
2857    INSN(arith_im,  0480, fff8, CF_ISA_A);
2858    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
2859    INSN(arith_im,  0680, fff8, CF_ISA_A);
2860    INSN(bitop_im,  0800, ffc0, CF_ISA_A);
2861    INSN(bitop_im,  0840, ffc0, CF_ISA_A);
2862    INSN(bitop_im,  0880, ffc0, CF_ISA_A);
2863    INSN(bitop_im,  08c0, ffc0, CF_ISA_A);
2864    INSN(arith_im,  0a80, fff8, CF_ISA_A);
2865    INSN(arith_im,  0c00, ff38, CF_ISA_A);
2866    INSN(move,      1000, f000, CF_ISA_A);
2867    INSN(move,      2000, f000, CF_ISA_A);
2868    INSN(move,      3000, f000, CF_ISA_A);
2869    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
2870    INSN(negx,      4080, fff8, CF_ISA_A);
2871    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2872    INSN(lea,       41c0, f1c0, CF_ISA_A);
2873    INSN(clr,       4200, ff00, CF_ISA_A);
2874    INSN(undef,     42c0, ffc0, CF_ISA_A);
2875    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2876    INSN(neg,       4480, fff8, CF_ISA_A);
2877    INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2878    INSN(not,       4680, fff8, CF_ISA_A);
2879    INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2880    INSN(pea,       4840, ffc0, CF_ISA_A);
2881    INSN(swap,      4840, fff8, CF_ISA_A);
2882    INSN(movem,     48c0, fbc0, CF_ISA_A);
2883    INSN(ext,       4880, fff8, CF_ISA_A);
2884    INSN(ext,       48c0, fff8, CF_ISA_A);
2885    INSN(ext,       49c0, fff8, CF_ISA_A);
2886    INSN(tst,       4a00, ff00, CF_ISA_A);
2887    INSN(tas,       4ac0, ffc0, CF_ISA_B);
2888    INSN(halt,      4ac8, ffff, CF_ISA_A);
2889    INSN(pulse,     4acc, ffff, CF_ISA_A);
2890    INSN(illegal,   4afc, ffff, CF_ISA_A);
2891    INSN(mull,      4c00, ffc0, CF_ISA_A);
2892    INSN(divl,      4c40, ffc0, CF_ISA_A);
2893    INSN(sats,      4c80, fff8, CF_ISA_B);
2894    INSN(trap,      4e40, fff0, CF_ISA_A);
2895    INSN(link,      4e50, fff8, CF_ISA_A);
2896    INSN(unlk,      4e58, fff8, CF_ISA_A);
2897    INSN(move_to_usp, 4e60, fff8, USP);
2898    INSN(move_from_usp, 4e68, fff8, USP);
2899    INSN(nop,       4e71, ffff, CF_ISA_A);
2900    INSN(stop,      4e72, ffff, CF_ISA_A);
2901    INSN(rte,       4e73, ffff, CF_ISA_A);
2902    INSN(rts,       4e75, ffff, CF_ISA_A);
2903    INSN(movec,     4e7b, ffff, CF_ISA_A);
2904    INSN(jump,      4e80, ffc0, CF_ISA_A);
2905    INSN(jump,      4ec0, ffc0, CF_ISA_A);
2906    INSN(addsubq,   5180, f1c0, CF_ISA_A);
2907    INSN(scc,       50c0, f0f8, CF_ISA_A);
2908    INSN(addsubq,   5080, f1c0, CF_ISA_A);
2909    INSN(tpf,       51f8, fff8, CF_ISA_A);
2910
2911    /* Branch instructions.  */
2912    INSN(branch,    6000, f000, CF_ISA_A);
2913    /* Disable long branch instructions, then add back the ones we want.  */
2914    INSN(undef,     60ff, f0ff, CF_ISA_A); /* All long branches.  */
2915    INSN(branch,    60ff, f0ff, CF_ISA_B);
2916    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
2917    INSN(branch,    60ff, ffff, BRAL);
2918
2919    INSN(moveq,     7000, f100, CF_ISA_A);
2920    INSN(mvzs,      7100, f100, CF_ISA_B);
2921    INSN(or,        8000, f000, CF_ISA_A);
2922    INSN(divw,      80c0, f0c0, CF_ISA_A);
2923    INSN(addsub,    9000, f000, CF_ISA_A);
2924    INSN(subx,      9180, f1f8, CF_ISA_A);
2925    INSN(suba,      91c0, f1c0, CF_ISA_A);
2926
2927    INSN(undef_mac, a000, f000, CF_ISA_A);
2928    INSN(mac,       a000, f100, CF_EMAC);
2929    INSN(from_mac,  a180, f9b0, CF_EMAC);
2930    INSN(move_mac,  a110, f9fc, CF_EMAC);
2931    INSN(from_macsr,a980, f9f0, CF_EMAC);
2932    INSN(from_mask, ad80, fff0, CF_EMAC);
2933    INSN(from_mext, ab80, fbf0, CF_EMAC);
2934    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2935    INSN(to_mac,    a100, f9c0, CF_EMAC);
2936    INSN(to_macsr,  a900, ffc0, CF_EMAC);
2937    INSN(to_mext,   ab00, fbc0, CF_EMAC);
2938    INSN(to_mask,   ad00, ffc0, CF_EMAC);
2939
2940    INSN(mov3q,     a140, f1c0, CF_ISA_B);
2941    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
2942    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
2943    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2944    INSN(cmp,       b080, f1c0, CF_ISA_A);
2945    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
2946    INSN(eor,       b180, f1c0, CF_ISA_A);
2947    INSN(and,       c000, f000, CF_ISA_A);
2948    INSN(mulw,      c0c0, f0c0, CF_ISA_A);
2949    INSN(addsub,    d000, f000, CF_ISA_A);
2950    INSN(addx,      d180, f1f8, CF_ISA_A);
2951    INSN(adda,      d1c0, f1c0, CF_ISA_A);
2952    INSN(shift_im,  e080, f0f0, CF_ISA_A);
2953    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2954    INSN(undef_fpu, f000, f000, CF_ISA_A);
2955    INSN(fpu,       f200, ffc0, CF_FPU);
2956    INSN(fbcc,      f280, ffc0, CF_FPU);
2957    INSN(frestore,  f340, ffc0, CF_FPU);
2958    INSN(fsave,     f340, ffc0, CF_FPU);
2959    INSN(intouch,   f340, ffc0, CF_ISA_A);
2960    INSN(cpushl,    f428, ff38, CF_ISA_A);
2961    INSN(wddata,    fb00, ff00, CF_ISA_A);
2962    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
2963#undef INSN
2964}
2965
2966/* ??? Some of this implementation is not exception safe.  We should always
2967   write back the result to memory before setting the condition codes.  */
2968static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
2969{
2970    uint16_t insn;
2971
2972    insn = cpu_lduw_code(env, s->pc);
2973    s->pc += 2;
2974
2975    opcode_table[insn](env, s, insn);
2976}
2977
2978/* generate intermediate code for basic block 'tb'.  */
2979void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
2980{
2981    M68kCPU *cpu = m68k_env_get_cpu(env);
2982    CPUState *cs = CPU(cpu);
2983    DisasContext dc1, *dc = &dc1;
2984    target_ulong pc_start;
2985    int pc_offset;
2986    int num_insns;
2987    int max_insns;
2988
2989    /* generate intermediate code */
2990    pc_start = tb->pc;
2991
2992    dc->tb = tb;
2993
2994    dc->env = env;
2995    dc->is_jmp = DISAS_NEXT;
2996    dc->pc = pc_start;
2997    dc->cc_op = CC_OP_DYNAMIC;
2998    dc->singlestep_enabled = cs->singlestep_enabled;
2999    dc->fpcr = env->fpcr;
3000    dc->user = (env->sr & SR_S) == 0;
3001    dc->done_mac = 0;
3002    num_insns = 0;
3003    max_insns = tb->cflags & CF_COUNT_MASK;
3004    if (max_insns == 0) {
3005        max_insns = CF_COUNT_MASK;
3006    }
3007    if (max_insns > TCG_MAX_INSNS) {
3008        max_insns = TCG_MAX_INSNS;
3009    }
3010
3011    gen_tb_start(tb);
3012    do {
3013        pc_offset = dc->pc - pc_start;
3014        gen_throws_exception = NULL;
3015        tcg_gen_insn_start(dc->pc);
3016        num_insns++;
3017
3018        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
3019            gen_exception(dc, dc->pc, EXCP_DEBUG);
3020            dc->is_jmp = DISAS_JUMP;
3021            /* The address covered by the breakpoint must be included in
3022               [tb->pc, tb->pc + tb->size) in order to for it to be
3023               properly cleared -- thus we increment the PC here so that
3024               the logic setting tb->size below does the right thing.  */
3025            dc->pc += 2;
3026            break;
3027        }
3028
3029        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3030            gen_io_start();
3031        }
3032
3033        dc->insn_pc = dc->pc;
3034        disas_m68k_insn(env, dc);
3035    } while (!dc->is_jmp && !tcg_op_buf_full() &&
3036             !cs->singlestep_enabled &&
3037             !singlestep &&
3038             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3039             num_insns < max_insns);
3040
3041    if (tb->cflags & CF_LAST_IO)
3042        gen_io_end();
3043    if (unlikely(cs->singlestep_enabled)) {
3044        /* Make sure the pc is updated, and raise a debug exception.  */
3045        if (!dc->is_jmp) {
3046            gen_flush_cc_op(dc);
3047            tcg_gen_movi_i32(QREG_PC, dc->pc);
3048        }
3049        gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3050    } else {
3051        switch(dc->is_jmp) {
3052        case DISAS_NEXT:
3053            gen_flush_cc_op(dc);
3054            gen_jmp_tb(dc, 0, dc->pc);
3055            break;
3056        default:
3057        case DISAS_JUMP:
3058        case DISAS_UPDATE:
3059            gen_flush_cc_op(dc);
3060            /* indicate that the hash table must be used to find the next TB */
3061            tcg_gen_exit_tb(0);
3062            break;
3063        case DISAS_TB_JUMP:
3064            /* nothing more to generate */
3065            break;
3066        }
3067    }
3068    gen_tb_end(tb, num_insns);
3069
3070#ifdef DEBUG_DISAS
3071    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3072        && qemu_log_in_addr_range(pc_start)) {
3073        qemu_log("----------------\n");
3074        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3075        log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
3076        qemu_log("\n");
3077    }
3078#endif
3079    tb->size = dc->pc - pc_start;
3080    tb->icount = num_insns;
3081}
3082
3083void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
3084                         int flags)
3085{
3086    M68kCPU *cpu = M68K_CPU(cs);
3087    CPUM68KState *env = &cpu->env;
3088    int i;
3089    uint16_t sr;
3090    CPU_DoubleU u;
3091    for (i = 0; i < 8; i++)
3092      {
3093        u.d = env->fregs[i];
3094        cpu_fprintf (f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
3095                     i, env->dregs[i], i, env->aregs[i],
3096                     i, u.l.upper, u.l.lower, *(double *)&u.d);
3097      }
3098    cpu_fprintf (f, "PC = %08x   ", env->pc);
3099    sr = env->sr;
3100    cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3101                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3102                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3103    cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3104}
3105
3106void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
3107                          target_ulong *data)
3108{
3109    env->pc = data[0];
3110}
3111