qemu/target-m68k/translate.c
<<
>>
Prefs
   1/*
   2 *  m68k translation
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery
   5 *  Written by Paul Brook
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "cpu.h"
  22#include "disas/disas.h"
  23#include "tcg-op.h"
  24#include "qemu/log.h"
  25
  26#include "helper.h"
  27#define GEN_HELPER 1
  28#include "helper.h"
  29
  30//#define DEBUG_DISPATCH 1
  31
  32/* Fake floating point.  */
  33#define tcg_gen_mov_f64 tcg_gen_mov_i64
  34#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
  35#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
  36
  37#define DEFO32(name, offset) static TCGv QREG_##name;
  38#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
  39#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
  40#include "qregs.def"
  41#undef DEFO32
  42#undef DEFO64
  43#undef DEFF64
  44
  45static TCGv_i32 cpu_halted;
  46
  47static TCGv_ptr cpu_env;
  48
  49static char cpu_reg_names[3*8*3 + 5*4];
  50static TCGv cpu_dregs[8];
  51static TCGv cpu_aregs[8];
  52static TCGv_i64 cpu_fregs[8];
  53static TCGv_i64 cpu_macc[4];
  54
  55#define DREG(insn, pos) cpu_dregs[((insn) >> (pos)) & 7]
  56#define AREG(insn, pos) cpu_aregs[((insn) >> (pos)) & 7]
  57#define FREG(insn, pos) cpu_fregs[((insn) >> (pos)) & 7]
  58#define MACREG(acc) cpu_macc[acc]
  59#define QREG_SP cpu_aregs[7]
  60
  61static TCGv NULL_QREG;
  62#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
  63/* Used to distinguish stores from bad addressing modes.  */
  64static TCGv store_dummy;
  65
  66#include "exec/gen-icount.h"
  67
  68void m68k_tcg_init(void)
  69{
  70    char *p;
  71    int i;
  72
  73#define DEFO32(name,  offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
  74#define DEFO64(name,  offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
  75#define DEFF64(name,  offset) DEFO64(name, offset)
  76#include "qregs.def"
  77#undef DEFO32
  78#undef DEFO64
  79#undef DEFF64
  80
  81    cpu_halted = tcg_global_mem_new_i32(TCG_AREG0,
  82                                        -offsetof(M68kCPU, env) +
  83                                        offsetof(CPUState, halted), "HALTED");
  84
  85    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  86
  87    p = cpu_reg_names;
  88    for (i = 0; i < 8; i++) {
  89        sprintf(p, "D%d", i);
  90        cpu_dregs[i] = tcg_global_mem_new(TCG_AREG0,
  91                                          offsetof(CPUM68KState, dregs[i]), p);
  92        p += 3;
  93        sprintf(p, "A%d", i);
  94        cpu_aregs[i] = tcg_global_mem_new(TCG_AREG0,
  95                                          offsetof(CPUM68KState, aregs[i]), p);
  96        p += 3;
  97        sprintf(p, "F%d", i);
  98        cpu_fregs[i] = tcg_global_mem_new_i64(TCG_AREG0,
  99                                          offsetof(CPUM68KState, fregs[i]), p);
 100        p += 3;
 101    }
 102    for (i = 0; i < 4; i++) {
 103        sprintf(p, "ACC%d", i);
 104        cpu_macc[i] = tcg_global_mem_new_i64(TCG_AREG0,
 105                                         offsetof(CPUM68KState, macc[i]), p);
 106        p += 5;
 107    }
 108
 109    NULL_QREG = tcg_global_mem_new(TCG_AREG0, -4, "NULL");
 110    store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
 111}
 112
 113static inline void qemu_assert(int cond, const char *msg)
 114{
 115    if (!cond) {
 116        fprintf (stderr, "badness: %s\n", msg);
 117        abort();
 118    }
 119}
 120
 121/* internal defines */
 122typedef struct DisasContext {
 123    CPUM68KState *env;
 124    target_ulong insn_pc; /* Start of the current instruction.  */
 125    target_ulong pc;
 126    int is_jmp;
 127    int cc_op;
 128    int user;
 129    uint32_t fpcr;
 130    struct TranslationBlock *tb;
 131    int singlestep_enabled;
 132    int is_mem;
 133    TCGv_i64 mactmp;
 134    int done_mac;
 135} DisasContext;
 136
 137#define DISAS_JUMP_NEXT 4
 138
 139#if defined(CONFIG_USER_ONLY)
 140#define IS_USER(s) 1
 141#else
 142#define IS_USER(s) s->user
 143#endif
 144
 145/* XXX: move that elsewhere */
 146/* ??? Fix exceptions.  */
 147static void *gen_throws_exception;
 148#define gen_last_qop NULL
 149
 150#define OS_BYTE 0
 151#define OS_WORD 1
 152#define OS_LONG 2
 153#define OS_SINGLE 4
 154#define OS_DOUBLE 5
 155
 156typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
 157
 158#ifdef DEBUG_DISPATCH
 159#define DISAS_INSN(name)                                                \
 160    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 161                                  uint16_t insn);                       \
 162    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 163                             uint16_t insn)                             \
 164    {                                                                   \
 165        qemu_log("Dispatch " #name "\n");                               \
 166        real_disas_##name(s, env, insn);                                \
 167    }                                                                   \
 168    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 169                                  uint16_t insn)
 170#else
 171#define DISAS_INSN(name)                                                \
 172    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 173                             uint16_t insn)
 174#endif
 175
 176/* Generate a load from the specified address.  Narrow values are
 177   sign extended to full register width.  */
 178static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
 179{
 180    TCGv tmp;
 181    int index = IS_USER(s);
 182    s->is_mem = 1;
 183    tmp = tcg_temp_new_i32();
 184    switch(opsize) {
 185    case OS_BYTE:
 186        if (sign)
 187            tcg_gen_qemu_ld8s(tmp, addr, index);
 188        else
 189            tcg_gen_qemu_ld8u(tmp, addr, index);
 190        break;
 191    case OS_WORD:
 192        if (sign)
 193            tcg_gen_qemu_ld16s(tmp, addr, index);
 194        else
 195            tcg_gen_qemu_ld16u(tmp, addr, index);
 196        break;
 197    case OS_LONG:
 198    case OS_SINGLE:
 199        tcg_gen_qemu_ld32u(tmp, addr, index);
 200        break;
 201    default:
 202        qemu_assert(0, "bad load size");
 203    }
 204    gen_throws_exception = gen_last_qop;
 205    return tmp;
 206}
 207
 208static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
 209{
 210    TCGv_i64 tmp;
 211    int index = IS_USER(s);
 212    s->is_mem = 1;
 213    tmp = tcg_temp_new_i64();
 214    tcg_gen_qemu_ldf64(tmp, addr, index);
 215    gen_throws_exception = gen_last_qop;
 216    return tmp;
 217}
 218
 219/* Generate a store.  */
 220static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
 221{
 222    int index = IS_USER(s);
 223    s->is_mem = 1;
 224    switch(opsize) {
 225    case OS_BYTE:
 226        tcg_gen_qemu_st8(val, addr, index);
 227        break;
 228    case OS_WORD:
 229        tcg_gen_qemu_st16(val, addr, index);
 230        break;
 231    case OS_LONG:
 232    case OS_SINGLE:
 233        tcg_gen_qemu_st32(val, addr, index);
 234        break;
 235    default:
 236        qemu_assert(0, "bad store size");
 237    }
 238    gen_throws_exception = gen_last_qop;
 239}
 240
 241static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
 242{
 243    int index = IS_USER(s);
 244    s->is_mem = 1;
 245    tcg_gen_qemu_stf64(val, addr, index);
 246    gen_throws_exception = gen_last_qop;
 247}
 248
 249typedef enum {
 250    EA_STORE,
 251    EA_LOADU,
 252    EA_LOADS
 253} ea_what;
 254
 255/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
 256   otherwise generate a store.  */
 257static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
 258                     ea_what what)
 259{
 260    if (what == EA_STORE) {
 261        gen_store(s, opsize, addr, val);
 262        return store_dummy;
 263    } else {
 264        return gen_load(s, opsize, addr, what == EA_LOADS);
 265    }
 266}
 267
 268/* Read a 32-bit immediate constant.  */
 269static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
 270{
 271    uint32_t im;
 272    im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16;
 273    s->pc += 2;
 274    im |= cpu_lduw_code(env, s->pc);
 275    s->pc += 2;
 276    return im;
 277}
 278
 279/* Calculate and address index.  */
 280static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
 281{
 282    TCGv add;
 283    int scale;
 284
 285    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
 286    if ((ext & 0x800) == 0) {
 287        tcg_gen_ext16s_i32(tmp, add);
 288        add = tmp;
 289    }
 290    scale = (ext >> 9) & 3;
 291    if (scale != 0) {
 292        tcg_gen_shli_i32(tmp, add, scale);
 293        add = tmp;
 294    }
 295    return add;
 296}
 297
 298/* Handle a base + index + displacement effective addresss.
 299   A NULL_QREG base means pc-relative.  */
 300static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, int opsize,
 301                            TCGv base)
 302{
 303    uint32_t offset;
 304    uint16_t ext;
 305    TCGv add;
 306    TCGv tmp;
 307    uint32_t bd, od;
 308
 309    offset = s->pc;
 310    ext = cpu_lduw_code(env, s->pc);
 311    s->pc += 2;
 312
 313    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
 314        return NULL_QREG;
 315
 316    if (ext & 0x100) {
 317        /* full extension word format */
 318        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
 319            return NULL_QREG;
 320
 321        if ((ext & 0x30) > 0x10) {
 322            /* base displacement */
 323            if ((ext & 0x30) == 0x20) {
 324                bd = (int16_t)cpu_lduw_code(env, s->pc);
 325                s->pc += 2;
 326            } else {
 327                bd = read_im32(env, s);
 328            }
 329        } else {
 330            bd = 0;
 331        }
 332        tmp = tcg_temp_new();
 333        if ((ext & 0x44) == 0) {
 334            /* pre-index */
 335            add = gen_addr_index(ext, tmp);
 336        } else {
 337            add = NULL_QREG;
 338        }
 339        if ((ext & 0x80) == 0) {
 340            /* base not suppressed */
 341            if (IS_NULL_QREG(base)) {
 342                base = tcg_const_i32(offset + bd);
 343                bd = 0;
 344            }
 345            if (!IS_NULL_QREG(add)) {
 346                tcg_gen_add_i32(tmp, add, base);
 347                add = tmp;
 348            } else {
 349                add = base;
 350            }
 351        }
 352        if (!IS_NULL_QREG(add)) {
 353            if (bd != 0) {
 354                tcg_gen_addi_i32(tmp, add, bd);
 355                add = tmp;
 356            }
 357        } else {
 358            add = tcg_const_i32(bd);
 359        }
 360        if ((ext & 3) != 0) {
 361            /* memory indirect */
 362            base = gen_load(s, OS_LONG, add, 0);
 363            if ((ext & 0x44) == 4) {
 364                add = gen_addr_index(ext, tmp);
 365                tcg_gen_add_i32(tmp, add, base);
 366                add = tmp;
 367            } else {
 368                add = base;
 369            }
 370            if ((ext & 3) > 1) {
 371                /* outer displacement */
 372                if ((ext & 3) == 2) {
 373                    od = (int16_t)cpu_lduw_code(env, s->pc);
 374                    s->pc += 2;
 375                } else {
 376                    od = read_im32(env, s);
 377                }
 378            } else {
 379                od = 0;
 380            }
 381            if (od != 0) {
 382                tcg_gen_addi_i32(tmp, add, od);
 383                add = tmp;
 384            }
 385        }
 386    } else {
 387        /* brief extension word format */
 388        tmp = tcg_temp_new();
 389        add = gen_addr_index(ext, tmp);
 390        if (!IS_NULL_QREG(base)) {
 391            tcg_gen_add_i32(tmp, add, base);
 392            if ((int8_t)ext)
 393                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
 394        } else {
 395            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
 396        }
 397        add = tmp;
 398    }
 399    return add;
 400}
 401
 402/* Update the CPU env CC_OP state.  */
 403static inline void gen_flush_cc_op(DisasContext *s)
 404{
 405    if (s->cc_op != CC_OP_DYNAMIC)
 406        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
 407}
 408
 409/* Evaluate all the CC flags.  */
 410static inline void gen_flush_flags(DisasContext *s)
 411{
 412    if (s->cc_op == CC_OP_FLAGS)
 413        return;
 414    gen_flush_cc_op(s);
 415    gen_helper_flush_flags(cpu_env, QREG_CC_OP);
 416    s->cc_op = CC_OP_FLAGS;
 417}
 418
 419static void gen_logic_cc(DisasContext *s, TCGv val)
 420{
 421    tcg_gen_mov_i32(QREG_CC_DEST, val);
 422    s->cc_op = CC_OP_LOGIC;
 423}
 424
 425static void gen_update_cc_add(TCGv dest, TCGv src)
 426{
 427    tcg_gen_mov_i32(QREG_CC_DEST, dest);
 428    tcg_gen_mov_i32(QREG_CC_SRC, src);
 429}
 430
 431static inline int opsize_bytes(int opsize)
 432{
 433    switch (opsize) {
 434    case OS_BYTE: return 1;
 435    case OS_WORD: return 2;
 436    case OS_LONG: return 4;
 437    case OS_SINGLE: return 4;
 438    case OS_DOUBLE: return 8;
 439    default:
 440        qemu_assert(0, "bad operand size");
 441        return 0;
 442    }
 443}
 444
 445/* Assign value to a register.  If the width is less than the register width
 446   only the low part of the register is set.  */
 447static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
 448{
 449    TCGv tmp;
 450    switch (opsize) {
 451    case OS_BYTE:
 452        tcg_gen_andi_i32(reg, reg, 0xffffff00);
 453        tmp = tcg_temp_new();
 454        tcg_gen_ext8u_i32(tmp, val);
 455        tcg_gen_or_i32(reg, reg, tmp);
 456        break;
 457    case OS_WORD:
 458        tcg_gen_andi_i32(reg, reg, 0xffff0000);
 459        tmp = tcg_temp_new();
 460        tcg_gen_ext16u_i32(tmp, val);
 461        tcg_gen_or_i32(reg, reg, tmp);
 462        break;
 463    case OS_LONG:
 464    case OS_SINGLE:
 465        tcg_gen_mov_i32(reg, val);
 466        break;
 467    default:
 468        qemu_assert(0, "Bad operand size");
 469        break;
 470    }
 471}
 472
 473/* Sign or zero extend a value.  */
 474static inline TCGv gen_extend(TCGv val, int opsize, int sign)
 475{
 476    TCGv tmp;
 477
 478    switch (opsize) {
 479    case OS_BYTE:
 480        tmp = tcg_temp_new();
 481        if (sign)
 482            tcg_gen_ext8s_i32(tmp, val);
 483        else
 484            tcg_gen_ext8u_i32(tmp, val);
 485        break;
 486    case OS_WORD:
 487        tmp = tcg_temp_new();
 488        if (sign)
 489            tcg_gen_ext16s_i32(tmp, val);
 490        else
 491            tcg_gen_ext16u_i32(tmp, val);
 492        break;
 493    case OS_LONG:
 494    case OS_SINGLE:
 495        tmp = val;
 496        break;
 497    default:
 498        qemu_assert(0, "Bad operand size");
 499    }
 500    return tmp;
 501}
 502
 503/* Generate code for an "effective address".  Does not adjust the base
 504   register for autoincrement addressing modes.  */
 505static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 506                    int opsize)
 507{
 508    TCGv reg;
 509    TCGv tmp;
 510    uint16_t ext;
 511    uint32_t offset;
 512
 513    switch ((insn >> 3) & 7) {
 514    case 0: /* Data register direct.  */
 515    case 1: /* Address register direct.  */
 516        return NULL_QREG;
 517    case 2: /* Indirect register */
 518    case 3: /* Indirect postincrement.  */
 519        return AREG(insn, 0);
 520    case 4: /* Indirect predecrememnt.  */
 521        reg = AREG(insn, 0);
 522        tmp = tcg_temp_new();
 523        tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
 524        return tmp;
 525    case 5: /* Indirect displacement.  */
 526        reg = AREG(insn, 0);
 527        tmp = tcg_temp_new();
 528        ext = cpu_lduw_code(env, s->pc);
 529        s->pc += 2;
 530        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
 531        return tmp;
 532    case 6: /* Indirect index + displacement.  */
 533        reg = AREG(insn, 0);
 534        return gen_lea_indexed(env, s, opsize, reg);
 535    case 7: /* Other */
 536        switch (insn & 7) {
 537        case 0: /* Absolute short.  */
 538            offset = cpu_ldsw_code(env, s->pc);
 539            s->pc += 2;
 540            return tcg_const_i32(offset);
 541        case 1: /* Absolute long.  */
 542            offset = read_im32(env, s);
 543            return tcg_const_i32(offset);
 544        case 2: /* pc displacement  */
 545            offset = s->pc;
 546            offset += cpu_ldsw_code(env, s->pc);
 547            s->pc += 2;
 548            return tcg_const_i32(offset);
 549        case 3: /* pc index+displacement.  */
 550            return gen_lea_indexed(env, s, opsize, NULL_QREG);
 551        case 4: /* Immediate.  */
 552        default:
 553            return NULL_QREG;
 554        }
 555    }
 556    /* Should never happen.  */
 557    return NULL_QREG;
 558}
 559
 560/* Helper function for gen_ea. Reuse the computed address between the
 561   for read/write operands.  */
 562static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
 563                               uint16_t insn, int opsize, TCGv val,
 564                               TCGv *addrp, ea_what what)
 565{
 566    TCGv tmp;
 567
 568    if (addrp && what == EA_STORE) {
 569        tmp = *addrp;
 570    } else {
 571        tmp = gen_lea(env, s, insn, opsize);
 572        if (IS_NULL_QREG(tmp))
 573            return tmp;
 574        if (addrp)
 575            *addrp = tmp;
 576    }
 577    return gen_ldst(s, opsize, tmp, val, what);
 578}
 579
 580/* Generate code to load/store a value from/into an EA.  If VAL > 0 this is
 581   a write otherwise it is a read (0 == sign extend, -1 == zero extend).
 582   ADDRP is non-null for readwrite operands.  */
 583static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 584                   int opsize, TCGv val, TCGv *addrp, ea_what what)
 585{
 586    TCGv reg;
 587    TCGv result;
 588    uint32_t offset;
 589
 590    switch ((insn >> 3) & 7) {
 591    case 0: /* Data register direct.  */
 592        reg = DREG(insn, 0);
 593        if (what == EA_STORE) {
 594            gen_partset_reg(opsize, reg, val);
 595            return store_dummy;
 596        } else {
 597            return gen_extend(reg, opsize, what == EA_LOADS);
 598        }
 599    case 1: /* Address register direct.  */
 600        reg = AREG(insn, 0);
 601        if (what == EA_STORE) {
 602            tcg_gen_mov_i32(reg, val);
 603            return store_dummy;
 604        } else {
 605            return gen_extend(reg, opsize, what == EA_LOADS);
 606        }
 607    case 2: /* Indirect register */
 608        reg = AREG(insn, 0);
 609        return gen_ldst(s, opsize, reg, val, what);
 610    case 3: /* Indirect postincrement.  */
 611        reg = AREG(insn, 0);
 612        result = gen_ldst(s, opsize, reg, val, what);
 613        /* ??? This is not exception safe.  The instruction may still
 614           fault after this point.  */
 615        if (what == EA_STORE || !addrp)
 616            tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
 617        return result;
 618    case 4: /* Indirect predecrememnt.  */
 619        {
 620            TCGv tmp;
 621            if (addrp && what == EA_STORE) {
 622                tmp = *addrp;
 623            } else {
 624                tmp = gen_lea(env, s, insn, opsize);
 625                if (IS_NULL_QREG(tmp))
 626                    return tmp;
 627                if (addrp)
 628                    *addrp = tmp;
 629            }
 630            result = gen_ldst(s, opsize, tmp, val, what);
 631            /* ??? This is not exception safe.  The instruction may still
 632               fault after this point.  */
 633            if (what == EA_STORE || !addrp) {
 634                reg = AREG(insn, 0);
 635                tcg_gen_mov_i32(reg, tmp);
 636            }
 637        }
 638        return result;
 639    case 5: /* Indirect displacement.  */
 640    case 6: /* Indirect index + displacement.  */
 641        return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 642    case 7: /* Other */
 643        switch (insn & 7) {
 644        case 0: /* Absolute short.  */
 645        case 1: /* Absolute long.  */
 646        case 2: /* pc displacement  */
 647        case 3: /* pc index+displacement.  */
 648            return gen_ea_once(env, s, insn, opsize, val, addrp, what);
 649        case 4: /* Immediate.  */
 650            /* Sign extend values for consistency.  */
 651            switch (opsize) {
 652            case OS_BYTE:
 653                if (what == EA_LOADS) {
 654                    offset = cpu_ldsb_code(env, s->pc + 1);
 655                } else {
 656                    offset = cpu_ldub_code(env, s->pc + 1);
 657                }
 658                s->pc += 2;
 659                break;
 660            case OS_WORD:
 661                if (what == EA_LOADS) {
 662                    offset = cpu_ldsw_code(env, s->pc);
 663                } else {
 664                    offset = cpu_lduw_code(env, s->pc);
 665                }
 666                s->pc += 2;
 667                break;
 668            case OS_LONG:
 669                offset = read_im32(env, s);
 670                break;
 671            default:
 672                qemu_assert(0, "Bad immediate operand");
 673            }
 674            return tcg_const_i32(offset);
 675        default:
 676            return NULL_QREG;
 677        }
 678    }
 679    /* Should never happen.  */
 680    return NULL_QREG;
 681}
 682
 683/* This generates a conditional branch, clobbering all temporaries.  */
 684static void gen_jmpcc(DisasContext *s, int cond, int l1)
 685{
 686    TCGv tmp;
 687
 688    /* TODO: Optimize compare/branch pairs rather than always flushing
 689       flag state to CC_OP_FLAGS.  */
 690    gen_flush_flags(s);
 691    switch (cond) {
 692    case 0: /* T */
 693        tcg_gen_br(l1);
 694        break;
 695    case 1: /* F */
 696        break;
 697    case 2: /* HI (!C && !Z) */
 698        tmp = tcg_temp_new();
 699        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
 700        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 701        break;
 702    case 3: /* LS (C || Z) */
 703        tmp = tcg_temp_new();
 704        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z);
 705        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 706        break;
 707    case 4: /* CC (!C) */
 708        tmp = tcg_temp_new();
 709        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
 710        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 711        break;
 712    case 5: /* CS (C) */
 713        tmp = tcg_temp_new();
 714        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C);
 715        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 716        break;
 717    case 6: /* NE (!Z) */
 718        tmp = tcg_temp_new();
 719        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
 720        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 721        break;
 722    case 7: /* EQ (Z) */
 723        tmp = tcg_temp_new();
 724        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z);
 725        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 726        break;
 727    case 8: /* VC (!V) */
 728        tmp = tcg_temp_new();
 729        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
 730        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 731        break;
 732    case 9: /* VS (V) */
 733        tmp = tcg_temp_new();
 734        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V);
 735        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 736        break;
 737    case 10: /* PL (!N) */
 738        tmp = tcg_temp_new();
 739        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 740        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 741        break;
 742    case 11: /* MI (N) */
 743        tmp = tcg_temp_new();
 744        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 745        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 746        break;
 747    case 12: /* GE (!(N ^ V)) */
 748        tmp = tcg_temp_new();
 749        assert(CCF_V == (CCF_N >> 2));
 750        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
 751        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 752        tcg_gen_andi_i32(tmp, tmp, CCF_V);
 753        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 754        break;
 755    case 13: /* LT (N ^ V) */
 756        tmp = tcg_temp_new();
 757        assert(CCF_V == (CCF_N >> 2));
 758        tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2);
 759        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 760        tcg_gen_andi_i32(tmp, tmp, CCF_V);
 761        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 762        break;
 763    case 14: /* GT (!(Z || (N ^ V))) */
 764        tmp = tcg_temp_new();
 765        assert(CCF_V == (CCF_N >> 2));
 766        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 767        tcg_gen_shri_i32(tmp, tmp, 2);
 768        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 769        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
 770        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1);
 771        break;
 772    case 15: /* LE (Z || (N ^ V)) */
 773        tmp = tcg_temp_new();
 774        assert(CCF_V == (CCF_N >> 2));
 775        tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N);
 776        tcg_gen_shri_i32(tmp, tmp, 2);
 777        tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST);
 778        tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z);
 779        tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1);
 780        break;
 781    default:
 782        /* Should ever happen.  */
 783        abort();
 784    }
 785}
 786
 787DISAS_INSN(scc)
 788{
 789    int l1;
 790    int cond;
 791    TCGv reg;
 792
 793    l1 = gen_new_label();
 794    cond = (insn >> 8) & 0xf;
 795    reg = DREG(insn, 0);
 796    tcg_gen_andi_i32(reg, reg, 0xffffff00);
 797    /* This is safe because we modify the reg directly, with no other values
 798       live.  */
 799    gen_jmpcc(s, cond ^ 1, l1);
 800    tcg_gen_ori_i32(reg, reg, 0xff);
 801    gen_set_label(l1);
 802}
 803
 804/* Force a TB lookup after an instruction that changes the CPU state.  */
 805static void gen_lookup_tb(DisasContext *s)
 806{
 807    gen_flush_cc_op(s);
 808    tcg_gen_movi_i32(QREG_PC, s->pc);
 809    s->is_jmp = DISAS_UPDATE;
 810}
 811
 812/* Generate a jump to an immediate address.  */
 813static void gen_jmp_im(DisasContext *s, uint32_t dest)
 814{
 815    gen_flush_cc_op(s);
 816    tcg_gen_movi_i32(QREG_PC, dest);
 817    s->is_jmp = DISAS_JUMP;
 818}
 819
 820/* Generate a jump to the address in qreg DEST.  */
 821static void gen_jmp(DisasContext *s, TCGv dest)
 822{
 823    gen_flush_cc_op(s);
 824    tcg_gen_mov_i32(QREG_PC, dest);
 825    s->is_jmp = DISAS_JUMP;
 826}
 827
 828static void gen_exception(DisasContext *s, uint32_t where, int nr)
 829{
 830    gen_flush_cc_op(s);
 831    gen_jmp_im(s, where);
 832    gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
 833}
 834
 835static inline void gen_addr_fault(DisasContext *s)
 836{
 837    gen_exception(s, s->insn_pc, EXCP_ADDRESS);
 838}
 839
 840#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
 841        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
 842                        op_sign ? EA_LOADS : EA_LOADU);                 \
 843        if (IS_NULL_QREG(result)) {                                     \
 844            gen_addr_fault(s);                                          \
 845            return;                                                     \
 846        }                                                               \
 847    } while (0)
 848
 849#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
 850        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
 851        if (IS_NULL_QREG(ea_result)) {                                  \
 852            gen_addr_fault(s);                                          \
 853            return;                                                     \
 854        }                                                               \
 855    } while (0)
 856
 857/* Generate a jump to an immediate address.  */
 858static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
 859{
 860    TranslationBlock *tb;
 861
 862    tb = s->tb;
 863    if (unlikely(s->singlestep_enabled)) {
 864        gen_exception(s, dest, EXCP_DEBUG);
 865    } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
 866               (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
 867        tcg_gen_goto_tb(n);
 868        tcg_gen_movi_i32(QREG_PC, dest);
 869        tcg_gen_exit_tb((uintptr_t)tb + n);
 870    } else {
 871        gen_jmp_im(s, dest);
 872        tcg_gen_exit_tb(0);
 873    }
 874    s->is_jmp = DISAS_TB_JUMP;
 875}
 876
 877DISAS_INSN(undef_mac)
 878{
 879    gen_exception(s, s->pc - 2, EXCP_LINEA);
 880}
 881
 882DISAS_INSN(undef_fpu)
 883{
 884    gen_exception(s, s->pc - 2, EXCP_LINEF);
 885}
 886
 887DISAS_INSN(undef)
 888{
 889    gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
 890    cpu_abort(env, "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
 891}
 892
 893DISAS_INSN(mulw)
 894{
 895    TCGv reg;
 896    TCGv tmp;
 897    TCGv src;
 898    int sign;
 899
 900    sign = (insn & 0x100) != 0;
 901    reg = DREG(insn, 9);
 902    tmp = tcg_temp_new();
 903    if (sign)
 904        tcg_gen_ext16s_i32(tmp, reg);
 905    else
 906        tcg_gen_ext16u_i32(tmp, reg);
 907    SRC_EA(env, src, OS_WORD, sign, NULL);
 908    tcg_gen_mul_i32(tmp, tmp, src);
 909    tcg_gen_mov_i32(reg, tmp);
 910    /* Unlike m68k, coldfire always clears the overflow bit.  */
 911    gen_logic_cc(s, tmp);
 912}
 913
 914DISAS_INSN(divw)
 915{
 916    TCGv reg;
 917    TCGv tmp;
 918    TCGv src;
 919    int sign;
 920
 921    sign = (insn & 0x100) != 0;
 922    reg = DREG(insn, 9);
 923    if (sign) {
 924        tcg_gen_ext16s_i32(QREG_DIV1, reg);
 925    } else {
 926        tcg_gen_ext16u_i32(QREG_DIV1, reg);
 927    }
 928    SRC_EA(env, src, OS_WORD, sign, NULL);
 929    tcg_gen_mov_i32(QREG_DIV2, src);
 930    if (sign) {
 931        gen_helper_divs(cpu_env, tcg_const_i32(1));
 932    } else {
 933        gen_helper_divu(cpu_env, tcg_const_i32(1));
 934    }
 935
 936    tmp = tcg_temp_new();
 937    src = tcg_temp_new();
 938    tcg_gen_ext16u_i32(tmp, QREG_DIV1);
 939    tcg_gen_shli_i32(src, QREG_DIV2, 16);
 940    tcg_gen_or_i32(reg, tmp, src);
 941    s->cc_op = CC_OP_FLAGS;
 942}
 943
 944DISAS_INSN(divl)
 945{
 946    TCGv num;
 947    TCGv den;
 948    TCGv reg;
 949    uint16_t ext;
 950
 951    ext = cpu_lduw_code(env, s->pc);
 952    s->pc += 2;
 953    if (ext & 0x87f8) {
 954        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
 955        return;
 956    }
 957    num = DREG(ext, 12);
 958    reg = DREG(ext, 0);
 959    tcg_gen_mov_i32(QREG_DIV1, num);
 960    SRC_EA(env, den, OS_LONG, 0, NULL);
 961    tcg_gen_mov_i32(QREG_DIV2, den);
 962    if (ext & 0x0800) {
 963        gen_helper_divs(cpu_env, tcg_const_i32(0));
 964    } else {
 965        gen_helper_divu(cpu_env, tcg_const_i32(0));
 966    }
 967    if ((ext & 7) == ((ext >> 12) & 7)) {
 968        /* div */
 969        tcg_gen_mov_i32 (reg, QREG_DIV1);
 970    } else {
 971        /* rem */
 972        tcg_gen_mov_i32 (reg, QREG_DIV2);
 973    }
 974    s->cc_op = CC_OP_FLAGS;
 975}
 976
 977DISAS_INSN(addsub)
 978{
 979    TCGv reg;
 980    TCGv dest;
 981    TCGv src;
 982    TCGv tmp;
 983    TCGv addr;
 984    int add;
 985
 986    add = (insn & 0x4000) != 0;
 987    reg = DREG(insn, 9);
 988    dest = tcg_temp_new();
 989    if (insn & 0x100) {
 990        SRC_EA(env, tmp, OS_LONG, 0, &addr);
 991        src = reg;
 992    } else {
 993        tmp = reg;
 994        SRC_EA(env, src, OS_LONG, 0, NULL);
 995    }
 996    if (add) {
 997        tcg_gen_add_i32(dest, tmp, src);
 998        gen_helper_xflag_lt(QREG_CC_X, dest, src);
 999        s->cc_op = CC_OP_ADD;
1000    } else {
1001        gen_helper_xflag_lt(QREG_CC_X, tmp, src);
1002        tcg_gen_sub_i32(dest, tmp, src);
1003        s->cc_op = CC_OP_SUB;
1004    }
1005    gen_update_cc_add(dest, src);
1006    if (insn & 0x100) {
1007        DEST_EA(env, insn, OS_LONG, dest, &addr);
1008    } else {
1009        tcg_gen_mov_i32(reg, dest);
1010    }
1011}
1012
1013
1014/* Reverse the order of the bits in REG.  */
1015DISAS_INSN(bitrev)
1016{
1017    TCGv reg;
1018    reg = DREG(insn, 0);
1019    gen_helper_bitrev(reg, reg);
1020}
1021
1022DISAS_INSN(bitop_reg)
1023{
1024    int opsize;
1025    int op;
1026    TCGv src1;
1027    TCGv src2;
1028    TCGv tmp;
1029    TCGv addr;
1030    TCGv dest;
1031
1032    if ((insn & 0x38) != 0)
1033        opsize = OS_BYTE;
1034    else
1035        opsize = OS_LONG;
1036    op = (insn >> 6) & 3;
1037    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1038    src2 = DREG(insn, 9);
1039    dest = tcg_temp_new();
1040
1041    gen_flush_flags(s);
1042    tmp = tcg_temp_new();
1043    if (opsize == OS_BYTE)
1044        tcg_gen_andi_i32(tmp, src2, 7);
1045    else
1046        tcg_gen_andi_i32(tmp, src2, 31);
1047    src2 = tmp;
1048    tmp = tcg_temp_new();
1049    tcg_gen_shr_i32(tmp, src1, src2);
1050    tcg_gen_andi_i32(tmp, tmp, 1);
1051    tcg_gen_shli_i32(tmp, tmp, 2);
1052    /* Clear CCF_Z if bit set.  */
1053    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1054    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1055
1056    tcg_gen_shl_i32(tmp, tcg_const_i32(1), src2);
1057    switch (op) {
1058    case 1: /* bchg */
1059        tcg_gen_xor_i32(dest, src1, tmp);
1060        break;
1061    case 2: /* bclr */
1062        tcg_gen_not_i32(tmp, tmp);
1063        tcg_gen_and_i32(dest, src1, tmp);
1064        break;
1065    case 3: /* bset */
1066        tcg_gen_or_i32(dest, src1, tmp);
1067        break;
1068    default: /* btst */
1069        break;
1070    }
1071    if (op)
1072        DEST_EA(env, insn, opsize, dest, &addr);
1073}
1074
1075DISAS_INSN(sats)
1076{
1077    TCGv reg;
1078    reg = DREG(insn, 0);
1079    gen_flush_flags(s);
1080    gen_helper_sats(reg, reg, QREG_CC_DEST);
1081    gen_logic_cc(s, reg);
1082}
1083
1084static void gen_push(DisasContext *s, TCGv val)
1085{
1086    TCGv tmp;
1087
1088    tmp = tcg_temp_new();
1089    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1090    gen_store(s, OS_LONG, tmp, val);
1091    tcg_gen_mov_i32(QREG_SP, tmp);
1092}
1093
1094DISAS_INSN(movem)
1095{
1096    TCGv addr;
1097    int i;
1098    uint16_t mask;
1099    TCGv reg;
1100    TCGv tmp;
1101    int is_load;
1102
1103    mask = cpu_lduw_code(env, s->pc);
1104    s->pc += 2;
1105    tmp = gen_lea(env, s, insn, OS_LONG);
1106    if (IS_NULL_QREG(tmp)) {
1107        gen_addr_fault(s);
1108        return;
1109    }
1110    addr = tcg_temp_new();
1111    tcg_gen_mov_i32(addr, tmp);
1112    is_load = ((insn & 0x0400) != 0);
1113    for (i = 0; i < 16; i++, mask >>= 1) {
1114        if (mask & 1) {
1115            if (i < 8)
1116                reg = DREG(i, 0);
1117            else
1118                reg = AREG(i, 0);
1119            if (is_load) {
1120                tmp = gen_load(s, OS_LONG, addr, 0);
1121                tcg_gen_mov_i32(reg, tmp);
1122            } else {
1123                gen_store(s, OS_LONG, addr, reg);
1124            }
1125            if (mask != 1)
1126                tcg_gen_addi_i32(addr, addr, 4);
1127        }
1128    }
1129}
1130
1131DISAS_INSN(bitop_im)
1132{
1133    int opsize;
1134    int op;
1135    TCGv src1;
1136    uint32_t mask;
1137    int bitnum;
1138    TCGv tmp;
1139    TCGv addr;
1140
1141    if ((insn & 0x38) != 0)
1142        opsize = OS_BYTE;
1143    else
1144        opsize = OS_LONG;
1145    op = (insn >> 6) & 3;
1146
1147    bitnum = cpu_lduw_code(env, s->pc);
1148    s->pc += 2;
1149    if (bitnum & 0xff00) {
1150        disas_undef(env, s, insn);
1151        return;
1152    }
1153
1154    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1155
1156    gen_flush_flags(s);
1157    if (opsize == OS_BYTE)
1158        bitnum &= 7;
1159    else
1160        bitnum &= 31;
1161    mask = 1 << bitnum;
1162
1163    tmp = tcg_temp_new();
1164    assert (CCF_Z == (1 << 2));
1165    if (bitnum > 2)
1166        tcg_gen_shri_i32(tmp, src1, bitnum - 2);
1167    else if (bitnum < 2)
1168        tcg_gen_shli_i32(tmp, src1, 2 - bitnum);
1169    else
1170        tcg_gen_mov_i32(tmp, src1);
1171    tcg_gen_andi_i32(tmp, tmp, CCF_Z);
1172    /* Clear CCF_Z if bit set.  */
1173    tcg_gen_ori_i32(QREG_CC_DEST, QREG_CC_DEST, CCF_Z);
1174    tcg_gen_xor_i32(QREG_CC_DEST, QREG_CC_DEST, tmp);
1175    if (op) {
1176        switch (op) {
1177        case 1: /* bchg */
1178            tcg_gen_xori_i32(tmp, src1, mask);
1179            break;
1180        case 2: /* bclr */
1181            tcg_gen_andi_i32(tmp, src1, ~mask);
1182            break;
1183        case 3: /* bset */
1184            tcg_gen_ori_i32(tmp, src1, mask);
1185            break;
1186        default: /* btst */
1187            break;
1188        }
1189        DEST_EA(env, insn, opsize, tmp, &addr);
1190    }
1191}
1192
1193DISAS_INSN(arith_im)
1194{
1195    int op;
1196    uint32_t im;
1197    TCGv src1;
1198    TCGv dest;
1199    TCGv addr;
1200
1201    op = (insn >> 9) & 7;
1202    SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr);
1203    im = read_im32(env, s);
1204    dest = tcg_temp_new();
1205    switch (op) {
1206    case 0: /* ori */
1207        tcg_gen_ori_i32(dest, src1, im);
1208        gen_logic_cc(s, dest);
1209        break;
1210    case 1: /* andi */
1211        tcg_gen_andi_i32(dest, src1, im);
1212        gen_logic_cc(s, dest);
1213        break;
1214    case 2: /* subi */
1215        tcg_gen_mov_i32(dest, src1);
1216        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1217        tcg_gen_subi_i32(dest, dest, im);
1218        gen_update_cc_add(dest, tcg_const_i32(im));
1219        s->cc_op = CC_OP_SUB;
1220        break;
1221    case 3: /* addi */
1222        tcg_gen_mov_i32(dest, src1);
1223        tcg_gen_addi_i32(dest, dest, im);
1224        gen_update_cc_add(dest, tcg_const_i32(im));
1225        gen_helper_xflag_lt(QREG_CC_X, dest, tcg_const_i32(im));
1226        s->cc_op = CC_OP_ADD;
1227        break;
1228    case 5: /* eori */
1229        tcg_gen_xori_i32(dest, src1, im);
1230        gen_logic_cc(s, dest);
1231        break;
1232    case 6: /* cmpi */
1233        tcg_gen_mov_i32(dest, src1);
1234        tcg_gen_subi_i32(dest, dest, im);
1235        gen_update_cc_add(dest, tcg_const_i32(im));
1236        s->cc_op = CC_OP_SUB;
1237        break;
1238    default:
1239        abort();
1240    }
1241    if (op != 6) {
1242        DEST_EA(env, insn, OS_LONG, dest, &addr);
1243    }
1244}
1245
1246DISAS_INSN(byterev)
1247{
1248    TCGv reg;
1249
1250    reg = DREG(insn, 0);
1251    tcg_gen_bswap32_i32(reg, reg);
1252}
1253
1254DISAS_INSN(move)
1255{
1256    TCGv src;
1257    TCGv dest;
1258    int op;
1259    int opsize;
1260
1261    switch (insn >> 12) {
1262    case 1: /* move.b */
1263        opsize = OS_BYTE;
1264        break;
1265    case 2: /* move.l */
1266        opsize = OS_LONG;
1267        break;
1268    case 3: /* move.w */
1269        opsize = OS_WORD;
1270        break;
1271    default:
1272        abort();
1273    }
1274    SRC_EA(env, src, opsize, 1, NULL);
1275    op = (insn >> 6) & 7;
1276    if (op == 1) {
1277        /* movea */
1278        /* The value will already have been sign extended.  */
1279        dest = AREG(insn, 9);
1280        tcg_gen_mov_i32(dest, src);
1281    } else {
1282        /* normal move */
1283        uint16_t dest_ea;
1284        dest_ea = ((insn >> 9) & 7) | (op << 3);
1285        DEST_EA(env, dest_ea, opsize, src, NULL);
1286        /* This will be correct because loads sign extend.  */
1287        gen_logic_cc(s, src);
1288    }
1289}
1290
1291DISAS_INSN(negx)
1292{
1293    TCGv reg;
1294
1295    gen_flush_flags(s);
1296    reg = DREG(insn, 0);
1297    gen_helper_subx_cc(reg, cpu_env, tcg_const_i32(0), reg);
1298}
1299
1300DISAS_INSN(lea)
1301{
1302    TCGv reg;
1303    TCGv tmp;
1304
1305    reg = AREG(insn, 9);
1306    tmp = gen_lea(env, s, insn, OS_LONG);
1307    if (IS_NULL_QREG(tmp)) {
1308        gen_addr_fault(s);
1309        return;
1310    }
1311    tcg_gen_mov_i32(reg, tmp);
1312}
1313
1314DISAS_INSN(clr)
1315{
1316    int opsize;
1317
1318    switch ((insn >> 6) & 3) {
1319    case 0: /* clr.b */
1320        opsize = OS_BYTE;
1321        break;
1322    case 1: /* clr.w */
1323        opsize = OS_WORD;
1324        break;
1325    case 2: /* clr.l */
1326        opsize = OS_LONG;
1327        break;
1328    default:
1329        abort();
1330    }
1331    DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
1332    gen_logic_cc(s, tcg_const_i32(0));
1333}
1334
1335static TCGv gen_get_ccr(DisasContext *s)
1336{
1337    TCGv dest;
1338
1339    gen_flush_flags(s);
1340    dest = tcg_temp_new();
1341    tcg_gen_shli_i32(dest, QREG_CC_X, 4);
1342    tcg_gen_or_i32(dest, dest, QREG_CC_DEST);
1343    return dest;
1344}
1345
1346DISAS_INSN(move_from_ccr)
1347{
1348    TCGv reg;
1349    TCGv ccr;
1350
1351    ccr = gen_get_ccr(s);
1352    reg = DREG(insn, 0);
1353    gen_partset_reg(OS_WORD, reg, ccr);
1354}
1355
1356DISAS_INSN(neg)
1357{
1358    TCGv reg;
1359    TCGv src1;
1360
1361    reg = DREG(insn, 0);
1362    src1 = tcg_temp_new();
1363    tcg_gen_mov_i32(src1, reg);
1364    tcg_gen_neg_i32(reg, src1);
1365    s->cc_op = CC_OP_SUB;
1366    gen_update_cc_add(reg, src1);
1367    gen_helper_xflag_lt(QREG_CC_X, tcg_const_i32(0), src1);
1368    s->cc_op = CC_OP_SUB;
1369}
1370
1371static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
1372{
1373    tcg_gen_movi_i32(QREG_CC_DEST, val & 0xf);
1374    tcg_gen_movi_i32(QREG_CC_X, (val & 0x10) >> 4);
1375    if (!ccr_only) {
1376        gen_helper_set_sr(cpu_env, tcg_const_i32(val & 0xff00));
1377    }
1378}
1379
1380static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
1381                       int ccr_only)
1382{
1383    TCGv tmp;
1384    TCGv reg;
1385
1386    s->cc_op = CC_OP_FLAGS;
1387    if ((insn & 0x38) == 0)
1388      {
1389        tmp = tcg_temp_new();
1390        reg = DREG(insn, 0);
1391        tcg_gen_andi_i32(QREG_CC_DEST, reg, 0xf);
1392        tcg_gen_shri_i32(tmp, reg, 4);
1393        tcg_gen_andi_i32(QREG_CC_X, tmp, 1);
1394        if (!ccr_only) {
1395            gen_helper_set_sr(cpu_env, reg);
1396        }
1397      }
1398    else if ((insn & 0x3f) == 0x3c)
1399      {
1400        uint16_t val;
1401        val = cpu_lduw_code(env, s->pc);
1402        s->pc += 2;
1403        gen_set_sr_im(s, val, ccr_only);
1404      }
1405    else
1406        disas_undef(env, s, insn);
1407}
1408
1409DISAS_INSN(move_to_ccr)
1410{
1411    gen_set_sr(env, s, insn, 1);
1412}
1413
1414DISAS_INSN(not)
1415{
1416    TCGv reg;
1417
1418    reg = DREG(insn, 0);
1419    tcg_gen_not_i32(reg, reg);
1420    gen_logic_cc(s, reg);
1421}
1422
1423DISAS_INSN(swap)
1424{
1425    TCGv src1;
1426    TCGv src2;
1427    TCGv reg;
1428
1429    src1 = tcg_temp_new();
1430    src2 = tcg_temp_new();
1431    reg = DREG(insn, 0);
1432    tcg_gen_shli_i32(src1, reg, 16);
1433    tcg_gen_shri_i32(src2, reg, 16);
1434    tcg_gen_or_i32(reg, src1, src2);
1435    gen_logic_cc(s, reg);
1436}
1437
1438DISAS_INSN(pea)
1439{
1440    TCGv tmp;
1441
1442    tmp = gen_lea(env, s, insn, OS_LONG);
1443    if (IS_NULL_QREG(tmp)) {
1444        gen_addr_fault(s);
1445        return;
1446    }
1447    gen_push(s, tmp);
1448}
1449
1450DISAS_INSN(ext)
1451{
1452    int op;
1453    TCGv reg;
1454    TCGv tmp;
1455
1456    reg = DREG(insn, 0);
1457    op = (insn >> 6) & 7;
1458    tmp = tcg_temp_new();
1459    if (op == 3)
1460        tcg_gen_ext16s_i32(tmp, reg);
1461    else
1462        tcg_gen_ext8s_i32(tmp, reg);
1463    if (op == 2)
1464        gen_partset_reg(OS_WORD, reg, tmp);
1465    else
1466        tcg_gen_mov_i32(reg, tmp);
1467    gen_logic_cc(s, tmp);
1468}
1469
1470DISAS_INSN(tst)
1471{
1472    int opsize;
1473    TCGv tmp;
1474
1475    switch ((insn >> 6) & 3) {
1476    case 0: /* tst.b */
1477        opsize = OS_BYTE;
1478        break;
1479    case 1: /* tst.w */
1480        opsize = OS_WORD;
1481        break;
1482    case 2: /* tst.l */
1483        opsize = OS_LONG;
1484        break;
1485    default:
1486        abort();
1487    }
1488    SRC_EA(env, tmp, opsize, 1, NULL);
1489    gen_logic_cc(s, tmp);
1490}
1491
1492DISAS_INSN(pulse)
1493{
1494  /* Implemented as a NOP.  */
1495}
1496
1497DISAS_INSN(illegal)
1498{
1499    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
1500}
1501
1502/* ??? This should be atomic.  */
1503DISAS_INSN(tas)
1504{
1505    TCGv dest;
1506    TCGv src1;
1507    TCGv addr;
1508
1509    dest = tcg_temp_new();
1510    SRC_EA(env, src1, OS_BYTE, 1, &addr);
1511    gen_logic_cc(s, src1);
1512    tcg_gen_ori_i32(dest, src1, 0x80);
1513    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1514}
1515
1516DISAS_INSN(mull)
1517{
1518    uint16_t ext;
1519    TCGv reg;
1520    TCGv src1;
1521    TCGv dest;
1522
1523    /* The upper 32 bits of the product are discarded, so
1524       muls.l and mulu.l are functionally equivalent.  */
1525    ext = cpu_lduw_code(env, s->pc);
1526    s->pc += 2;
1527    if (ext & 0x87ff) {
1528        gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
1529        return;
1530    }
1531    reg = DREG(ext, 12);
1532    SRC_EA(env, src1, OS_LONG, 0, NULL);
1533    dest = tcg_temp_new();
1534    tcg_gen_mul_i32(dest, src1, reg);
1535    tcg_gen_mov_i32(reg, dest);
1536    /* Unlike m68k, coldfire always clears the overflow bit.  */
1537    gen_logic_cc(s, dest);
1538}
1539
1540DISAS_INSN(link)
1541{
1542    int16_t offset;
1543    TCGv reg;
1544    TCGv tmp;
1545
1546    offset = cpu_ldsw_code(env, s->pc);
1547    s->pc += 2;
1548    reg = AREG(insn, 0);
1549    tmp = tcg_temp_new();
1550    tcg_gen_subi_i32(tmp, QREG_SP, 4);
1551    gen_store(s, OS_LONG, tmp, reg);
1552    if ((insn & 7) != 7)
1553        tcg_gen_mov_i32(reg, tmp);
1554    tcg_gen_addi_i32(QREG_SP, tmp, offset);
1555}
1556
1557DISAS_INSN(unlk)
1558{
1559    TCGv src;
1560    TCGv reg;
1561    TCGv tmp;
1562
1563    src = tcg_temp_new();
1564    reg = AREG(insn, 0);
1565    tcg_gen_mov_i32(src, reg);
1566    tmp = gen_load(s, OS_LONG, src, 0);
1567    tcg_gen_mov_i32(reg, tmp);
1568    tcg_gen_addi_i32(QREG_SP, src, 4);
1569}
1570
1571DISAS_INSN(nop)
1572{
1573}
1574
1575DISAS_INSN(rts)
1576{
1577    TCGv tmp;
1578
1579    tmp = gen_load(s, OS_LONG, QREG_SP, 0);
1580    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
1581    gen_jmp(s, tmp);
1582}
1583
1584DISAS_INSN(jump)
1585{
1586    TCGv tmp;
1587
1588    /* Load the target address first to ensure correct exception
1589       behavior.  */
1590    tmp = gen_lea(env, s, insn, OS_LONG);
1591    if (IS_NULL_QREG(tmp)) {
1592        gen_addr_fault(s);
1593        return;
1594    }
1595    if ((insn & 0x40) == 0) {
1596        /* jsr */
1597        gen_push(s, tcg_const_i32(s->pc));
1598    }
1599    gen_jmp(s, tmp);
1600}
1601
1602DISAS_INSN(addsubq)
1603{
1604    TCGv src1;
1605    TCGv src2;
1606    TCGv dest;
1607    int val;
1608    TCGv addr;
1609
1610    SRC_EA(env, src1, OS_LONG, 0, &addr);
1611    val = (insn >> 9) & 7;
1612    if (val == 0)
1613        val = 8;
1614    dest = tcg_temp_new();
1615    tcg_gen_mov_i32(dest, src1);
1616    if ((insn & 0x38) == 0x08) {
1617        /* Don't update condition codes if the destination is an
1618           address register.  */
1619        if (insn & 0x0100) {
1620            tcg_gen_subi_i32(dest, dest, val);
1621        } else {
1622            tcg_gen_addi_i32(dest, dest, val);
1623        }
1624    } else {
1625        src2 = tcg_const_i32(val);
1626        if (insn & 0x0100) {
1627            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1628            tcg_gen_subi_i32(dest, dest, val);
1629            s->cc_op = CC_OP_SUB;
1630        } else {
1631            tcg_gen_addi_i32(dest, dest, val);
1632            gen_helper_xflag_lt(QREG_CC_X, dest, src2);
1633            s->cc_op = CC_OP_ADD;
1634        }
1635        gen_update_cc_add(dest, src2);
1636    }
1637    DEST_EA(env, insn, OS_LONG, dest, &addr);
1638}
1639
1640DISAS_INSN(tpf)
1641{
1642    switch (insn & 7) {
1643    case 2: /* One extension word.  */
1644        s->pc += 2;
1645        break;
1646    case 3: /* Two extension words.  */
1647        s->pc += 4;
1648        break;
1649    case 4: /* No extension words.  */
1650        break;
1651    default:
1652        disas_undef(env, s, insn);
1653    }
1654}
1655
1656DISAS_INSN(branch)
1657{
1658    int32_t offset;
1659    uint32_t base;
1660    int op;
1661    int l1;
1662
1663    base = s->pc;
1664    op = (insn >> 8) & 0xf;
1665    offset = (int8_t)insn;
1666    if (offset == 0) {
1667        offset = cpu_ldsw_code(env, s->pc);
1668        s->pc += 2;
1669    } else if (offset == -1) {
1670        offset = read_im32(env, s);
1671    }
1672    if (op == 1) {
1673        /* bsr */
1674        gen_push(s, tcg_const_i32(s->pc));
1675    }
1676    gen_flush_cc_op(s);
1677    if (op > 1) {
1678        /* Bcc */
1679        l1 = gen_new_label();
1680        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
1681        gen_jmp_tb(s, 1, base + offset);
1682        gen_set_label(l1);
1683        gen_jmp_tb(s, 0, s->pc);
1684    } else {
1685        /* Unconditional branch.  */
1686        gen_jmp_tb(s, 0, base + offset);
1687    }
1688}
1689
1690DISAS_INSN(moveq)
1691{
1692    uint32_t val;
1693
1694    val = (int8_t)insn;
1695    tcg_gen_movi_i32(DREG(insn, 9), val);
1696    gen_logic_cc(s, tcg_const_i32(val));
1697}
1698
1699DISAS_INSN(mvzs)
1700{
1701    int opsize;
1702    TCGv src;
1703    TCGv reg;
1704
1705    if (insn & 0x40)
1706        opsize = OS_WORD;
1707    else
1708        opsize = OS_BYTE;
1709    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
1710    reg = DREG(insn, 9);
1711    tcg_gen_mov_i32(reg, src);
1712    gen_logic_cc(s, src);
1713}
1714
1715DISAS_INSN(or)
1716{
1717    TCGv reg;
1718    TCGv dest;
1719    TCGv src;
1720    TCGv addr;
1721
1722    reg = DREG(insn, 9);
1723    dest = tcg_temp_new();
1724    if (insn & 0x100) {
1725        SRC_EA(env, src, OS_LONG, 0, &addr);
1726        tcg_gen_or_i32(dest, src, reg);
1727        DEST_EA(env, insn, OS_LONG, dest, &addr);
1728    } else {
1729        SRC_EA(env, src, OS_LONG, 0, NULL);
1730        tcg_gen_or_i32(dest, src, reg);
1731        tcg_gen_mov_i32(reg, dest);
1732    }
1733    gen_logic_cc(s, dest);
1734}
1735
1736DISAS_INSN(suba)
1737{
1738    TCGv src;
1739    TCGv reg;
1740
1741    SRC_EA(env, src, OS_LONG, 0, NULL);
1742    reg = AREG(insn, 9);
1743    tcg_gen_sub_i32(reg, reg, src);
1744}
1745
1746DISAS_INSN(subx)
1747{
1748    TCGv reg;
1749    TCGv src;
1750
1751    gen_flush_flags(s);
1752    reg = DREG(insn, 9);
1753    src = DREG(insn, 0);
1754    gen_helper_subx_cc(reg, cpu_env, reg, src);
1755}
1756
1757DISAS_INSN(mov3q)
1758{
1759    TCGv src;
1760    int val;
1761
1762    val = (insn >> 9) & 7;
1763    if (val == 0)
1764        val = -1;
1765    src = tcg_const_i32(val);
1766    gen_logic_cc(s, src);
1767    DEST_EA(env, insn, OS_LONG, src, NULL);
1768}
1769
1770DISAS_INSN(cmp)
1771{
1772    int op;
1773    TCGv src;
1774    TCGv reg;
1775    TCGv dest;
1776    int opsize;
1777
1778    op = (insn >> 6) & 3;
1779    switch (op) {
1780    case 0: /* cmp.b */
1781        opsize = OS_BYTE;
1782        s->cc_op = CC_OP_CMPB;
1783        break;
1784    case 1: /* cmp.w */
1785        opsize = OS_WORD;
1786        s->cc_op = CC_OP_CMPW;
1787        break;
1788    case 2: /* cmp.l */
1789        opsize = OS_LONG;
1790        s->cc_op = CC_OP_SUB;
1791        break;
1792    default:
1793        abort();
1794    }
1795    SRC_EA(env, src, opsize, 1, NULL);
1796    reg = DREG(insn, 9);
1797    dest = tcg_temp_new();
1798    tcg_gen_sub_i32(dest, reg, src);
1799    gen_update_cc_add(dest, src);
1800}
1801
1802DISAS_INSN(cmpa)
1803{
1804    int opsize;
1805    TCGv src;
1806    TCGv reg;
1807    TCGv dest;
1808
1809    if (insn & 0x100) {
1810        opsize = OS_LONG;
1811    } else {
1812        opsize = OS_WORD;
1813    }
1814    SRC_EA(env, src, opsize, 1, NULL);
1815    reg = AREG(insn, 9);
1816    dest = tcg_temp_new();
1817    tcg_gen_sub_i32(dest, reg, src);
1818    gen_update_cc_add(dest, src);
1819    s->cc_op = CC_OP_SUB;
1820}
1821
1822DISAS_INSN(eor)
1823{
1824    TCGv src;
1825    TCGv reg;
1826    TCGv dest;
1827    TCGv addr;
1828
1829    SRC_EA(env, src, OS_LONG, 0, &addr);
1830    reg = DREG(insn, 9);
1831    dest = tcg_temp_new();
1832    tcg_gen_xor_i32(dest, src, reg);
1833    gen_logic_cc(s, dest);
1834    DEST_EA(env, insn, OS_LONG, dest, &addr);
1835}
1836
1837DISAS_INSN(and)
1838{
1839    TCGv src;
1840    TCGv reg;
1841    TCGv dest;
1842    TCGv addr;
1843
1844    reg = DREG(insn, 9);
1845    dest = tcg_temp_new();
1846    if (insn & 0x100) {
1847        SRC_EA(env, src, OS_LONG, 0, &addr);
1848        tcg_gen_and_i32(dest, src, reg);
1849        DEST_EA(env, insn, OS_LONG, dest, &addr);
1850    } else {
1851        SRC_EA(env, src, OS_LONG, 0, NULL);
1852        tcg_gen_and_i32(dest, src, reg);
1853        tcg_gen_mov_i32(reg, dest);
1854    }
1855    gen_logic_cc(s, dest);
1856}
1857
1858DISAS_INSN(adda)
1859{
1860    TCGv src;
1861    TCGv reg;
1862
1863    SRC_EA(env, src, OS_LONG, 0, NULL);
1864    reg = AREG(insn, 9);
1865    tcg_gen_add_i32(reg, reg, src);
1866}
1867
1868DISAS_INSN(addx)
1869{
1870    TCGv reg;
1871    TCGv src;
1872
1873    gen_flush_flags(s);
1874    reg = DREG(insn, 9);
1875    src = DREG(insn, 0);
1876    gen_helper_addx_cc(reg, cpu_env, reg, src);
1877    s->cc_op = CC_OP_FLAGS;
1878}
1879
1880/* TODO: This could be implemented without helper functions.  */
1881DISAS_INSN(shift_im)
1882{
1883    TCGv reg;
1884    int tmp;
1885    TCGv shift;
1886
1887    reg = DREG(insn, 0);
1888    tmp = (insn >> 9) & 7;
1889    if (tmp == 0)
1890        tmp = 8;
1891    shift = tcg_const_i32(tmp);
1892    /* No need to flush flags becuse we know we will set C flag.  */
1893    if (insn & 0x100) {
1894        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1895    } else {
1896        if (insn & 8) {
1897            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1898        } else {
1899            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1900        }
1901    }
1902    s->cc_op = CC_OP_SHIFT;
1903}
1904
1905DISAS_INSN(shift_reg)
1906{
1907    TCGv reg;
1908    TCGv shift;
1909
1910    reg = DREG(insn, 0);
1911    shift = DREG(insn, 9);
1912    /* Shift by zero leaves C flag unmodified.   */
1913    gen_flush_flags(s);
1914    if (insn & 0x100) {
1915        gen_helper_shl_cc(reg, cpu_env, reg, shift);
1916    } else {
1917        if (insn & 8) {
1918            gen_helper_shr_cc(reg, cpu_env, reg, shift);
1919        } else {
1920            gen_helper_sar_cc(reg, cpu_env, reg, shift);
1921        }
1922    }
1923    s->cc_op = CC_OP_SHIFT;
1924}
1925
1926DISAS_INSN(ff1)
1927{
1928    TCGv reg;
1929    reg = DREG(insn, 0);
1930    gen_logic_cc(s, reg);
1931    gen_helper_ff1(reg, reg);
1932}
1933
1934static TCGv gen_get_sr(DisasContext *s)
1935{
1936    TCGv ccr;
1937    TCGv sr;
1938
1939    ccr = gen_get_ccr(s);
1940    sr = tcg_temp_new();
1941    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
1942    tcg_gen_or_i32(sr, sr, ccr);
1943    return sr;
1944}
1945
1946DISAS_INSN(strldsr)
1947{
1948    uint16_t ext;
1949    uint32_t addr;
1950
1951    addr = s->pc - 2;
1952    ext = cpu_lduw_code(env, s->pc);
1953    s->pc += 2;
1954    if (ext != 0x46FC) {
1955        gen_exception(s, addr, EXCP_UNSUPPORTED);
1956        return;
1957    }
1958    ext = cpu_lduw_code(env, s->pc);
1959    s->pc += 2;
1960    if (IS_USER(s) || (ext & SR_S) == 0) {
1961        gen_exception(s, addr, EXCP_PRIVILEGE);
1962        return;
1963    }
1964    gen_push(s, gen_get_sr(s));
1965    gen_set_sr_im(s, ext, 0);
1966}
1967
1968DISAS_INSN(move_from_sr)
1969{
1970    TCGv reg;
1971    TCGv sr;
1972
1973    if (IS_USER(s)) {
1974        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1975        return;
1976    }
1977    sr = gen_get_sr(s);
1978    reg = DREG(insn, 0);
1979    gen_partset_reg(OS_WORD, reg, sr);
1980}
1981
1982DISAS_INSN(move_to_sr)
1983{
1984    if (IS_USER(s)) {
1985        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1986        return;
1987    }
1988    gen_set_sr(env, s, insn, 0);
1989    gen_lookup_tb(s);
1990}
1991
1992DISAS_INSN(move_from_usp)
1993{
1994    if (IS_USER(s)) {
1995        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
1996        return;
1997    }
1998    /* TODO: Implement USP.  */
1999    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2000}
2001
2002DISAS_INSN(move_to_usp)
2003{
2004    if (IS_USER(s)) {
2005        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2006        return;
2007    }
2008    /* TODO: Implement USP.  */
2009    gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
2010}
2011
2012DISAS_INSN(halt)
2013{
2014    gen_exception(s, s->pc, EXCP_HALT_INSN);
2015}
2016
2017DISAS_INSN(stop)
2018{
2019    uint16_t ext;
2020
2021    if (IS_USER(s)) {
2022        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2023        return;
2024    }
2025
2026    ext = cpu_lduw_code(env, s->pc);
2027    s->pc += 2;
2028
2029    gen_set_sr_im(s, ext, 0);
2030    tcg_gen_movi_i32(cpu_halted, 1);
2031    gen_exception(s, s->pc, EXCP_HLT);
2032}
2033
2034DISAS_INSN(rte)
2035{
2036    if (IS_USER(s)) {
2037        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2038        return;
2039    }
2040    gen_exception(s, s->pc - 2, EXCP_RTE);
2041}
2042
2043DISAS_INSN(movec)
2044{
2045    uint16_t ext;
2046    TCGv reg;
2047
2048    if (IS_USER(s)) {
2049        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2050        return;
2051    }
2052
2053    ext = cpu_lduw_code(env, s->pc);
2054    s->pc += 2;
2055
2056    if (ext & 0x8000) {
2057        reg = AREG(ext, 12);
2058    } else {
2059        reg = DREG(ext, 12);
2060    }
2061    gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
2062    gen_lookup_tb(s);
2063}
2064
2065DISAS_INSN(intouch)
2066{
2067    if (IS_USER(s)) {
2068        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2069        return;
2070    }
2071    /* ICache fetch.  Implement as no-op.  */
2072}
2073
2074DISAS_INSN(cpushl)
2075{
2076    if (IS_USER(s)) {
2077        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2078        return;
2079    }
2080    /* Cache push/invalidate.  Implement as no-op.  */
2081}
2082
2083DISAS_INSN(wddata)
2084{
2085    gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2086}
2087
2088DISAS_INSN(wdebug)
2089{
2090    if (IS_USER(s)) {
2091        gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
2092        return;
2093    }
2094    /* TODO: Implement wdebug.  */
2095    qemu_assert(0, "WDEBUG not implemented");
2096}
2097
2098DISAS_INSN(trap)
2099{
2100    gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
2101}
2102
2103/* ??? FP exceptions are not implemented.  Most exceptions are deferred until
2104   immediately before the next FP instruction is executed.  */
2105DISAS_INSN(fpu)
2106{
2107    uint16_t ext;
2108    int32_t offset;
2109    int opmode;
2110    TCGv_i64 src;
2111    TCGv_i64 dest;
2112    TCGv_i64 res;
2113    TCGv tmp32;
2114    int round;
2115    int set_dest;
2116    int opsize;
2117
2118    ext = cpu_lduw_code(env, s->pc);
2119    s->pc += 2;
2120    opmode = ext & 0x7f;
2121    switch ((ext >> 13) & 7) {
2122    case 0: case 2:
2123        break;
2124    case 1:
2125        goto undef;
2126    case 3: /* fmove out */
2127        src = FREG(ext, 7);
2128        tmp32 = tcg_temp_new_i32();
2129        /* fmove */
2130        /* ??? TODO: Proper behavior on overflow.  */
2131        switch ((ext >> 10) & 7) {
2132        case 0:
2133            opsize = OS_LONG;
2134            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2135            break;
2136        case 1:
2137            opsize = OS_SINGLE;
2138            gen_helper_f64_to_f32(tmp32, cpu_env, src);
2139            break;
2140        case 4:
2141            opsize = OS_WORD;
2142            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2143            break;
2144        case 5: /* OS_DOUBLE */
2145            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2146            switch ((insn >> 3) & 7) {
2147            case 2:
2148            case 3:
2149                break;
2150            case 4:
2151                tcg_gen_addi_i32(tmp32, tmp32, -8);
2152                break;
2153            case 5:
2154                offset = cpu_ldsw_code(env, s->pc);
2155                s->pc += 2;
2156                tcg_gen_addi_i32(tmp32, tmp32, offset);
2157                break;
2158            default:
2159                goto undef;
2160            }
2161            gen_store64(s, tmp32, src);
2162            switch ((insn >> 3) & 7) {
2163            case 3:
2164                tcg_gen_addi_i32(tmp32, tmp32, 8);
2165                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2166                break;
2167            case 4:
2168                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2169                break;
2170            }
2171            tcg_temp_free_i32(tmp32);
2172            return;
2173        case 6:
2174            opsize = OS_BYTE;
2175            gen_helper_f64_to_i32(tmp32, cpu_env, src);
2176            break;
2177        default:
2178            goto undef;
2179        }
2180        DEST_EA(env, insn, opsize, tmp32, NULL);
2181        tcg_temp_free_i32(tmp32);
2182        return;
2183    case 4: /* fmove to control register.  */
2184        switch ((ext >> 10) & 7) {
2185        case 4: /* FPCR */
2186            /* Not implemented.  Ignore writes.  */
2187            break;
2188        case 1: /* FPIAR */
2189        case 2: /* FPSR */
2190        default:
2191            cpu_abort(NULL, "Unimplemented: fmove to control %d",
2192                      (ext >> 10) & 7);
2193        }
2194        break;
2195    case 5: /* fmove from control register.  */
2196        switch ((ext >> 10) & 7) {
2197        case 4: /* FPCR */
2198            /* Not implemented.  Always return zero.  */
2199            tmp32 = tcg_const_i32(0);
2200            break;
2201        case 1: /* FPIAR */
2202        case 2: /* FPSR */
2203        default:
2204            cpu_abort(NULL, "Unimplemented: fmove from control %d",
2205                      (ext >> 10) & 7);
2206            goto undef;
2207        }
2208        DEST_EA(env, insn, OS_LONG, tmp32, NULL);
2209        break;
2210    case 6: /* fmovem */
2211    case 7:
2212        {
2213            TCGv addr;
2214            uint16_t mask;
2215            int i;
2216            if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
2217                goto undef;
2218            tmp32 = gen_lea(env, s, insn, OS_LONG);
2219            if (IS_NULL_QREG(tmp32)) {
2220                gen_addr_fault(s);
2221                return;
2222            }
2223            addr = tcg_temp_new_i32();
2224            tcg_gen_mov_i32(addr, tmp32);
2225            mask = 0x80;
2226            for (i = 0; i < 8; i++) {
2227                if (ext & mask) {
2228                    s->is_mem = 1;
2229                    dest = FREG(i, 0);
2230                    if (ext & (1 << 13)) {
2231                        /* store */
2232                        tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
2233                    } else {
2234                        /* load */
2235                        tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
2236                    }
2237                    if (ext & (mask - 1))
2238                        tcg_gen_addi_i32(addr, addr, 8);
2239                }
2240                mask >>= 1;
2241            }
2242            tcg_temp_free_i32(addr);
2243        }
2244        return;
2245    }
2246    if (ext & (1 << 14)) {
2247        /* Source effective address.  */
2248        switch ((ext >> 10) & 7) {
2249        case 0: opsize = OS_LONG; break;
2250        case 1: opsize = OS_SINGLE; break;
2251        case 4: opsize = OS_WORD; break;
2252        case 5: opsize = OS_DOUBLE; break;
2253        case 6: opsize = OS_BYTE; break;
2254        default:
2255            goto undef;
2256        }
2257        if (opsize == OS_DOUBLE) {
2258            tmp32 = tcg_temp_new_i32();
2259            tcg_gen_mov_i32(tmp32, AREG(insn, 0));
2260            switch ((insn >> 3) & 7) {
2261            case 2:
2262            case 3:
2263                break;
2264            case 4:
2265                tcg_gen_addi_i32(tmp32, tmp32, -8);
2266                break;
2267            case 5:
2268                offset = cpu_ldsw_code(env, s->pc);
2269                s->pc += 2;
2270                tcg_gen_addi_i32(tmp32, tmp32, offset);
2271                break;
2272            case 7:
2273                offset = cpu_ldsw_code(env, s->pc);
2274                offset += s->pc - 2;
2275                s->pc += 2;
2276                tcg_gen_addi_i32(tmp32, tmp32, offset);
2277                break;
2278            default:
2279                goto undef;
2280            }
2281            src = gen_load64(s, tmp32);
2282            switch ((insn >> 3) & 7) {
2283            case 3:
2284                tcg_gen_addi_i32(tmp32, tmp32, 8);
2285                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2286                break;
2287            case 4:
2288                tcg_gen_mov_i32(AREG(insn, 0), tmp32);
2289                break;
2290            }
2291            tcg_temp_free_i32(tmp32);
2292        } else {
2293            SRC_EA(env, tmp32, opsize, 1, NULL);
2294            src = tcg_temp_new_i64();
2295            switch (opsize) {
2296            case OS_LONG:
2297            case OS_WORD:
2298            case OS_BYTE:
2299                gen_helper_i32_to_f64(src, cpu_env, tmp32);
2300                break;
2301            case OS_SINGLE:
2302                gen_helper_f32_to_f64(src, cpu_env, tmp32);
2303                break;
2304            }
2305        }
2306    } else {
2307        /* Source register.  */
2308        src = FREG(ext, 10);
2309    }
2310    dest = FREG(ext, 7);
2311    res = tcg_temp_new_i64();
2312    if (opmode != 0x3a)
2313        tcg_gen_mov_f64(res, dest);
2314    round = 1;
2315    set_dest = 1;
2316    switch (opmode) {
2317    case 0: case 0x40: case 0x44: /* fmove */
2318        tcg_gen_mov_f64(res, src);
2319        break;
2320    case 1: /* fint */
2321        gen_helper_iround_f64(res, cpu_env, src);
2322        round = 0;
2323        break;
2324    case 3: /* fintrz */
2325        gen_helper_itrunc_f64(res, cpu_env, src);
2326        round = 0;
2327        break;
2328    case 4: case 0x41: case 0x45: /* fsqrt */
2329        gen_helper_sqrt_f64(res, cpu_env, src);
2330        break;
2331    case 0x18: case 0x58: case 0x5c: /* fabs */
2332        gen_helper_abs_f64(res, src);
2333        break;
2334    case 0x1a: case 0x5a: case 0x5e: /* fneg */
2335        gen_helper_chs_f64(res, src);
2336        break;
2337    case 0x20: case 0x60: case 0x64: /* fdiv */
2338        gen_helper_div_f64(res, cpu_env, res, src);
2339        break;
2340    case 0x22: case 0x62: case 0x66: /* fadd */
2341        gen_helper_add_f64(res, cpu_env, res, src);
2342        break;
2343    case 0x23: case 0x63: case 0x67: /* fmul */
2344        gen_helper_mul_f64(res, cpu_env, res, src);
2345        break;
2346    case 0x28: case 0x68: case 0x6c: /* fsub */
2347        gen_helper_sub_f64(res, cpu_env, res, src);
2348        break;
2349    case 0x38: /* fcmp */
2350        gen_helper_sub_cmp_f64(res, cpu_env, res, src);
2351        set_dest = 0;
2352        round = 0;
2353        break;
2354    case 0x3a: /* ftst */
2355        tcg_gen_mov_f64(res, src);
2356        set_dest = 0;
2357        round = 0;
2358        break;
2359    default:
2360        goto undef;
2361    }
2362    if (ext & (1 << 14)) {
2363        tcg_temp_free_i64(src);
2364    }
2365    if (round) {
2366        if (opmode & 0x40) {
2367            if ((opmode & 0x4) != 0)
2368                round = 0;
2369        } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
2370            round = 0;
2371        }
2372    }
2373    if (round) {
2374        TCGv tmp = tcg_temp_new_i32();
2375        gen_helper_f64_to_f32(tmp, cpu_env, res);
2376        gen_helper_f32_to_f64(res, cpu_env, tmp);
2377        tcg_temp_free_i32(tmp);
2378    }
2379    tcg_gen_mov_f64(QREG_FP_RESULT, res);
2380    if (set_dest) {
2381        tcg_gen_mov_f64(dest, res);
2382    }
2383    tcg_temp_free_i64(res);
2384    return;
2385undef:
2386    /* FIXME: Is this right for offset addressing modes?  */
2387    s->pc -= 2;
2388    disas_undef_fpu(env, s, insn);
2389}
2390
2391DISAS_INSN(fbcc)
2392{
2393    uint32_t offset;
2394    uint32_t addr;
2395    TCGv flag;
2396    int l1;
2397
2398    addr = s->pc;
2399    offset = cpu_ldsw_code(env, s->pc);
2400    s->pc += 2;
2401    if (insn & (1 << 6)) {
2402        offset = (offset << 16) | cpu_lduw_code(env, s->pc);
2403        s->pc += 2;
2404    }
2405
2406    l1 = gen_new_label();
2407    /* TODO: Raise BSUN exception.  */
2408    flag = tcg_temp_new();
2409    gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
2410    /* Jump to l1 if condition is true.  */
2411    switch (insn & 0xf) {
2412    case 0: /* f */
2413        break;
2414    case 1: /* eq (=0) */
2415        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2416        break;
2417    case 2: /* ogt (=1) */
2418        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
2419        break;
2420    case 3: /* oge (=0 or =1) */
2421        tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
2422        break;
2423    case 4: /* olt (=-1) */
2424        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
2425        break;
2426    case 5: /* ole (=-1 or =0) */
2427        tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
2428        break;
2429    case 6: /* ogl (=-1 or =1) */
2430        tcg_gen_andi_i32(flag, flag, 1);
2431        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2432        break;
2433    case 7: /* or (=2) */
2434        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
2435        break;
2436    case 8: /* un (<2) */
2437        tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
2438        break;
2439    case 9: /* ueq (=0 or =2) */
2440        tcg_gen_andi_i32(flag, flag, 1);
2441        tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
2442        break;
2443    case 10: /* ugt (>0) */
2444        tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
2445        break;
2446    case 11: /* uge (>=0) */
2447        tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
2448        break;
2449    case 12: /* ult (=-1 or =2) */
2450        tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
2451        break;
2452    case 13: /* ule (!=1) */
2453        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
2454        break;
2455    case 14: /* ne (!=0) */
2456        tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
2457        break;
2458    case 15: /* t */
2459        tcg_gen_br(l1);
2460        break;
2461    }
2462    gen_jmp_tb(s, 0, s->pc);
2463    gen_set_label(l1);
2464    gen_jmp_tb(s, 1, addr + offset);
2465}
2466
2467DISAS_INSN(frestore)
2468{
2469    /* TODO: Implement frestore.  */
2470    qemu_assert(0, "FRESTORE not implemented");
2471}
2472
2473DISAS_INSN(fsave)
2474{
2475    /* TODO: Implement fsave.  */
2476    qemu_assert(0, "FSAVE not implemented");
2477}
2478
2479static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
2480{
2481    TCGv tmp = tcg_temp_new();
2482    if (s->env->macsr & MACSR_FI) {
2483        if (upper)
2484            tcg_gen_andi_i32(tmp, val, 0xffff0000);
2485        else
2486            tcg_gen_shli_i32(tmp, val, 16);
2487    } else if (s->env->macsr & MACSR_SU) {
2488        if (upper)
2489            tcg_gen_sari_i32(tmp, val, 16);
2490        else
2491            tcg_gen_ext16s_i32(tmp, val);
2492    } else {
2493        if (upper)
2494            tcg_gen_shri_i32(tmp, val, 16);
2495        else
2496            tcg_gen_ext16u_i32(tmp, val);
2497    }
2498    return tmp;
2499}
2500
2501static void gen_mac_clear_flags(void)
2502{
2503    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
2504                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
2505}
2506
2507DISAS_INSN(mac)
2508{
2509    TCGv rx;
2510    TCGv ry;
2511    uint16_t ext;
2512    int acc;
2513    TCGv tmp;
2514    TCGv addr;
2515    TCGv loadval;
2516    int dual;
2517    TCGv saved_flags;
2518
2519    if (!s->done_mac) {
2520        s->mactmp = tcg_temp_new_i64();
2521        s->done_mac = 1;
2522    }
2523
2524    ext = cpu_lduw_code(env, s->pc);
2525    s->pc += 2;
2526
2527    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
2528    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
2529    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
2530        disas_undef(env, s, insn);
2531        return;
2532    }
2533    if (insn & 0x30) {
2534        /* MAC with load.  */
2535        tmp = gen_lea(env, s, insn, OS_LONG);
2536        addr = tcg_temp_new();
2537        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
2538        /* Load the value now to ensure correct exception behavior.
2539           Perform writeback after reading the MAC inputs.  */
2540        loadval = gen_load(s, OS_LONG, addr, 0);
2541
2542        acc ^= 1;
2543        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
2544        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
2545    } else {
2546        loadval = addr = NULL_QREG;
2547        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2548        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2549    }
2550
2551    gen_mac_clear_flags();
2552#if 0
2553    l1 = -1;
2554    /* Disabled because conditional branches clobber temporary vars.  */
2555    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
2556        /* Skip the multiply if we know we will ignore it.  */
2557        l1 = gen_new_label();
2558        tmp = tcg_temp_new();
2559        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
2560        gen_op_jmp_nz32(tmp, l1);
2561    }
2562#endif
2563
2564    if ((ext & 0x0800) == 0) {
2565        /* Word.  */
2566        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
2567        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
2568    }
2569    if (s->env->macsr & MACSR_FI) {
2570        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
2571    } else {
2572        if (s->env->macsr & MACSR_SU)
2573            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
2574        else
2575            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
2576        switch ((ext >> 9) & 3) {
2577        case 1:
2578            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
2579            break;
2580        case 3:
2581            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
2582            break;
2583        }
2584    }
2585
2586    if (dual) {
2587        /* Save the overflow flag from the multiply.  */
2588        saved_flags = tcg_temp_new();
2589        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
2590    } else {
2591        saved_flags = NULL_QREG;
2592    }
2593
2594#if 0
2595    /* Disabled because conditional branches clobber temporary vars.  */
2596    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
2597        /* Skip the accumulate if the value is already saturated.  */
2598        l1 = gen_new_label();
2599        tmp = tcg_temp_new();
2600        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2601        gen_op_jmp_nz32(tmp, l1);
2602    }
2603#endif
2604
2605    if (insn & 0x100)
2606        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2607    else
2608        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2609
2610    if (s->env->macsr & MACSR_FI)
2611        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2612    else if (s->env->macsr & MACSR_SU)
2613        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2614    else
2615        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2616
2617#if 0
2618    /* Disabled because conditional branches clobber temporary vars.  */
2619    if (l1 != -1)
2620        gen_set_label(l1);
2621#endif
2622
2623    if (dual) {
2624        /* Dual accumulate variant.  */
2625        acc = (ext >> 2) & 3;
2626        /* Restore the overflow flag from the multiplier.  */
2627        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
2628#if 0
2629        /* Disabled because conditional branches clobber temporary vars.  */
2630        if ((s->env->macsr & MACSR_OMC) != 0) {
2631            /* Skip the accumulate if the value is already saturated.  */
2632            l1 = gen_new_label();
2633            tmp = tcg_temp_new();
2634            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
2635            gen_op_jmp_nz32(tmp, l1);
2636        }
2637#endif
2638        if (ext & 2)
2639            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
2640        else
2641            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
2642        if (s->env->macsr & MACSR_FI)
2643            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
2644        else if (s->env->macsr & MACSR_SU)
2645            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
2646        else
2647            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
2648#if 0
2649        /* Disabled because conditional branches clobber temporary vars.  */
2650        if (l1 != -1)
2651            gen_set_label(l1);
2652#endif
2653    }
2654    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
2655
2656    if (insn & 0x30) {
2657        TCGv rw;
2658        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
2659        tcg_gen_mov_i32(rw, loadval);
2660        /* FIXME: Should address writeback happen with the masked or
2661           unmasked value?  */
2662        switch ((insn >> 3) & 7) {
2663        case 3: /* Post-increment.  */
2664            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
2665            break;
2666        case 4: /* Pre-decrement.  */
2667            tcg_gen_mov_i32(AREG(insn, 0), addr);
2668        }
2669    }
2670}
2671
2672DISAS_INSN(from_mac)
2673{
2674    TCGv rx;
2675    TCGv_i64 acc;
2676    int accnum;
2677
2678    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2679    accnum = (insn >> 9) & 3;
2680    acc = MACREG(accnum);
2681    if (s->env->macsr & MACSR_FI) {
2682        gen_helper_get_macf(rx, cpu_env, acc);
2683    } else if ((s->env->macsr & MACSR_OMC) == 0) {
2684        tcg_gen_trunc_i64_i32(rx, acc);
2685    } else if (s->env->macsr & MACSR_SU) {
2686        gen_helper_get_macs(rx, acc);
2687    } else {
2688        gen_helper_get_macu(rx, acc);
2689    }
2690    if (insn & 0x40) {
2691        tcg_gen_movi_i64(acc, 0);
2692        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2693    }
2694}
2695
2696DISAS_INSN(move_mac)
2697{
2698    /* FIXME: This can be done without a helper.  */
2699    int src;
2700    TCGv dest;
2701    src = insn & 3;
2702    dest = tcg_const_i32((insn >> 9) & 3);
2703    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
2704    gen_mac_clear_flags();
2705    gen_helper_mac_set_flags(cpu_env, dest);
2706}
2707
2708DISAS_INSN(from_macsr)
2709{
2710    TCGv reg;
2711
2712    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2713    tcg_gen_mov_i32(reg, QREG_MACSR);
2714}
2715
2716DISAS_INSN(from_mask)
2717{
2718    TCGv reg;
2719    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2720    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
2721}
2722
2723DISAS_INSN(from_mext)
2724{
2725    TCGv reg;
2726    TCGv acc;
2727    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
2728    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2729    if (s->env->macsr & MACSR_FI)
2730        gen_helper_get_mac_extf(reg, cpu_env, acc);
2731    else
2732        gen_helper_get_mac_exti(reg, cpu_env, acc);
2733}
2734
2735DISAS_INSN(macsr_to_ccr)
2736{
2737    tcg_gen_movi_i32(QREG_CC_X, 0);
2738    tcg_gen_andi_i32(QREG_CC_DEST, QREG_MACSR, 0xf);
2739    s->cc_op = CC_OP_FLAGS;
2740}
2741
2742DISAS_INSN(to_mac)
2743{
2744    TCGv_i64 acc;
2745    TCGv val;
2746    int accnum;
2747    accnum = (insn >> 9) & 3;
2748    acc = MACREG(accnum);
2749    SRC_EA(env, val, OS_LONG, 0, NULL);
2750    if (s->env->macsr & MACSR_FI) {
2751        tcg_gen_ext_i32_i64(acc, val);
2752        tcg_gen_shli_i64(acc, acc, 8);
2753    } else if (s->env->macsr & MACSR_SU) {
2754        tcg_gen_ext_i32_i64(acc, val);
2755    } else {
2756        tcg_gen_extu_i32_i64(acc, val);
2757    }
2758    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
2759    gen_mac_clear_flags();
2760    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
2761}
2762
2763DISAS_INSN(to_macsr)
2764{
2765    TCGv val;
2766    SRC_EA(env, val, OS_LONG, 0, NULL);
2767    gen_helper_set_macsr(cpu_env, val);
2768    gen_lookup_tb(s);
2769}
2770
2771DISAS_INSN(to_mask)
2772{
2773    TCGv val;
2774    SRC_EA(env, val, OS_LONG, 0, NULL);
2775    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
2776}
2777
2778DISAS_INSN(to_mext)
2779{
2780    TCGv val;
2781    TCGv acc;
2782    SRC_EA(env, val, OS_LONG, 0, NULL);
2783    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
2784    if (s->env->macsr & MACSR_FI)
2785        gen_helper_set_mac_extf(cpu_env, val, acc);
2786    else if (s->env->macsr & MACSR_SU)
2787        gen_helper_set_mac_exts(cpu_env, val, acc);
2788    else
2789        gen_helper_set_mac_extu(cpu_env, val, acc);
2790}
2791
2792static disas_proc opcode_table[65536];
2793
2794static void
2795register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
2796{
2797  int i;
2798  int from;
2799  int to;
2800
2801  /* Sanity check.  All set bits must be included in the mask.  */
2802  if (opcode & ~mask) {
2803      fprintf(stderr,
2804              "qemu internal error: bogus opcode definition %04x/%04x\n",
2805              opcode, mask);
2806      abort();
2807  }
2808  /* This could probably be cleverer.  For now just optimize the case where
2809     the top bits are known.  */
2810  /* Find the first zero bit in the mask.  */
2811  i = 0x8000;
2812  while ((i & mask) != 0)
2813      i >>= 1;
2814  /* Iterate over all combinations of this and lower bits.  */
2815  if (i == 0)
2816      i = 1;
2817  else
2818      i <<= 1;
2819  from = opcode & ~(i - 1);
2820  to = from + i;
2821  for (i = from; i < to; i++) {
2822      if ((i & mask) == opcode)
2823          opcode_table[i] = proc;
2824  }
2825}
2826
2827/* Register m68k opcode handlers.  Order is important.
2828   Later insn override earlier ones.  */
2829void register_m68k_insns (CPUM68KState *env)
2830{
2831#define INSN(name, opcode, mask, feature) do { \
2832    if (m68k_feature(env, M68K_FEATURE_##feature)) \
2833        register_opcode(disas_##name, 0x##opcode, 0x##mask); \
2834    } while(0)
2835    INSN(undef,     0000, 0000, CF_ISA_A);
2836    INSN(arith_im,  0080, fff8, CF_ISA_A);
2837    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
2838    INSN(bitop_reg, 0100, f1c0, CF_ISA_A);
2839    INSN(bitop_reg, 0140, f1c0, CF_ISA_A);
2840    INSN(bitop_reg, 0180, f1c0, CF_ISA_A);
2841    INSN(bitop_reg, 01c0, f1c0, CF_ISA_A);
2842    INSN(arith_im,  0280, fff8, CF_ISA_A);
2843    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
2844    INSN(arith_im,  0480, fff8, CF_ISA_A);
2845    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
2846    INSN(arith_im,  0680, fff8, CF_ISA_A);
2847    INSN(bitop_im,  0800, ffc0, CF_ISA_A);
2848    INSN(bitop_im,  0840, ffc0, CF_ISA_A);
2849    INSN(bitop_im,  0880, ffc0, CF_ISA_A);
2850    INSN(bitop_im,  08c0, ffc0, CF_ISA_A);
2851    INSN(arith_im,  0a80, fff8, CF_ISA_A);
2852    INSN(arith_im,  0c00, ff38, CF_ISA_A);
2853    INSN(move,      1000, f000, CF_ISA_A);
2854    INSN(move,      2000, f000, CF_ISA_A);
2855    INSN(move,      3000, f000, CF_ISA_A);
2856    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
2857    INSN(negx,      4080, fff8, CF_ISA_A);
2858    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
2859    INSN(lea,       41c0, f1c0, CF_ISA_A);
2860    INSN(clr,       4200, ff00, CF_ISA_A);
2861    INSN(undef,     42c0, ffc0, CF_ISA_A);
2862    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
2863    INSN(neg,       4480, fff8, CF_ISA_A);
2864    INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A);
2865    INSN(not,       4680, fff8, CF_ISA_A);
2866    INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
2867    INSN(pea,       4840, ffc0, CF_ISA_A);
2868    INSN(swap,      4840, fff8, CF_ISA_A);
2869    INSN(movem,     48c0, fbc0, CF_ISA_A);
2870    INSN(ext,       4880, fff8, CF_ISA_A);
2871    INSN(ext,       48c0, fff8, CF_ISA_A);
2872    INSN(ext,       49c0, fff8, CF_ISA_A);
2873    INSN(tst,       4a00, ff00, CF_ISA_A);
2874    INSN(tas,       4ac0, ffc0, CF_ISA_B);
2875    INSN(halt,      4ac8, ffff, CF_ISA_A);
2876    INSN(pulse,     4acc, ffff, CF_ISA_A);
2877    INSN(illegal,   4afc, ffff, CF_ISA_A);
2878    INSN(mull,      4c00, ffc0, CF_ISA_A);
2879    INSN(divl,      4c40, ffc0, CF_ISA_A);
2880    INSN(sats,      4c80, fff8, CF_ISA_B);
2881    INSN(trap,      4e40, fff0, CF_ISA_A);
2882    INSN(link,      4e50, fff8, CF_ISA_A);
2883    INSN(unlk,      4e58, fff8, CF_ISA_A);
2884    INSN(move_to_usp, 4e60, fff8, USP);
2885    INSN(move_from_usp, 4e68, fff8, USP);
2886    INSN(nop,       4e71, ffff, CF_ISA_A);
2887    INSN(stop,      4e72, ffff, CF_ISA_A);
2888    INSN(rte,       4e73, ffff, CF_ISA_A);
2889    INSN(rts,       4e75, ffff, CF_ISA_A);
2890    INSN(movec,     4e7b, ffff, CF_ISA_A);
2891    INSN(jump,      4e80, ffc0, CF_ISA_A);
2892    INSN(jump,      4ec0, ffc0, CF_ISA_A);
2893    INSN(addsubq,   5180, f1c0, CF_ISA_A);
2894    INSN(scc,       50c0, f0f8, CF_ISA_A);
2895    INSN(addsubq,   5080, f1c0, CF_ISA_A);
2896    INSN(tpf,       51f8, fff8, CF_ISA_A);
2897
2898    /* Branch instructions.  */
2899    INSN(branch,    6000, f000, CF_ISA_A);
2900    /* Disable long branch instructions, then add back the ones we want.  */
2901    INSN(undef,     60ff, f0ff, CF_ISA_A); /* All long branches.  */
2902    INSN(branch,    60ff, f0ff, CF_ISA_B);
2903    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
2904    INSN(branch,    60ff, ffff, BRAL);
2905
2906    INSN(moveq,     7000, f100, CF_ISA_A);
2907    INSN(mvzs,      7100, f100, CF_ISA_B);
2908    INSN(or,        8000, f000, CF_ISA_A);
2909    INSN(divw,      80c0, f0c0, CF_ISA_A);
2910    INSN(addsub,    9000, f000, CF_ISA_A);
2911    INSN(subx,      9180, f1f8, CF_ISA_A);
2912    INSN(suba,      91c0, f1c0, CF_ISA_A);
2913
2914    INSN(undef_mac, a000, f000, CF_ISA_A);
2915    INSN(mac,       a000, f100, CF_EMAC);
2916    INSN(from_mac,  a180, f9b0, CF_EMAC);
2917    INSN(move_mac,  a110, f9fc, CF_EMAC);
2918    INSN(from_macsr,a980, f9f0, CF_EMAC);
2919    INSN(from_mask, ad80, fff0, CF_EMAC);
2920    INSN(from_mext, ab80, fbf0, CF_EMAC);
2921    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
2922    INSN(to_mac,    a100, f9c0, CF_EMAC);
2923    INSN(to_macsr,  a900, ffc0, CF_EMAC);
2924    INSN(to_mext,   ab00, fbc0, CF_EMAC);
2925    INSN(to_mask,   ad00, ffc0, CF_EMAC);
2926
2927    INSN(mov3q,     a140, f1c0, CF_ISA_B);
2928    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
2929    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
2930    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
2931    INSN(cmp,       b080, f1c0, CF_ISA_A);
2932    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
2933    INSN(eor,       b180, f1c0, CF_ISA_A);
2934    INSN(and,       c000, f000, CF_ISA_A);
2935    INSN(mulw,      c0c0, f0c0, CF_ISA_A);
2936    INSN(addsub,    d000, f000, CF_ISA_A);
2937    INSN(addx,      d180, f1f8, CF_ISA_A);
2938    INSN(adda,      d1c0, f1c0, CF_ISA_A);
2939    INSN(shift_im,  e080, f0f0, CF_ISA_A);
2940    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
2941    INSN(undef_fpu, f000, f000, CF_ISA_A);
2942    INSN(fpu,       f200, ffc0, CF_FPU);
2943    INSN(fbcc,      f280, ffc0, CF_FPU);
2944    INSN(frestore,  f340, ffc0, CF_FPU);
2945    INSN(fsave,     f340, ffc0, CF_FPU);
2946    INSN(intouch,   f340, ffc0, CF_ISA_A);
2947    INSN(cpushl,    f428, ff38, CF_ISA_A);
2948    INSN(wddata,    fb00, ff00, CF_ISA_A);
2949    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
2950#undef INSN
2951}
2952
2953/* ??? Some of this implementation is not exception safe.  We should always
2954   write back the result to memory before setting the condition codes.  */
2955static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
2956{
2957    uint16_t insn;
2958
2959    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2960        tcg_gen_debug_insn_start(s->pc);
2961    }
2962
2963    insn = cpu_lduw_code(env, s->pc);
2964    s->pc += 2;
2965
2966    opcode_table[insn](env, s, insn);
2967}
2968
2969/* generate intermediate code for basic block 'tb'.  */
2970static inline void
2971gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
2972                               bool search_pc)
2973{
2974    CPUState *cs = CPU(cpu);
2975    CPUM68KState *env = &cpu->env;
2976    DisasContext dc1, *dc = &dc1;
2977    uint16_t *gen_opc_end;
2978    CPUBreakpoint *bp;
2979    int j, lj;
2980    target_ulong pc_start;
2981    int pc_offset;
2982    int num_insns;
2983    int max_insns;
2984
2985    /* generate intermediate code */
2986    pc_start = tb->pc;
2987
2988    dc->tb = tb;
2989
2990    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2991
2992    dc->env = env;
2993    dc->is_jmp = DISAS_NEXT;
2994    dc->pc = pc_start;
2995    dc->cc_op = CC_OP_DYNAMIC;
2996    dc->singlestep_enabled = cs->singlestep_enabled;
2997    dc->fpcr = env->fpcr;
2998    dc->user = (env->sr & SR_S) == 0;
2999    dc->is_mem = 0;
3000    dc->done_mac = 0;
3001    lj = -1;
3002    num_insns = 0;
3003    max_insns = tb->cflags & CF_COUNT_MASK;
3004    if (max_insns == 0)
3005        max_insns = CF_COUNT_MASK;
3006
3007    gen_tb_start();
3008    do {
3009        pc_offset = dc->pc - pc_start;
3010        gen_throws_exception = NULL;
3011        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3012            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3013                if (bp->pc == dc->pc) {
3014                    gen_exception(dc, dc->pc, EXCP_DEBUG);
3015                    dc->is_jmp = DISAS_JUMP;
3016                    break;
3017                }
3018            }
3019            if (dc->is_jmp)
3020                break;
3021        }
3022        if (search_pc) {
3023            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3024            if (lj < j) {
3025                lj++;
3026                while (lj < j)
3027                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
3028            }
3029            tcg_ctx.gen_opc_pc[lj] = dc->pc;
3030            tcg_ctx.gen_opc_instr_start[lj] = 1;
3031            tcg_ctx.gen_opc_icount[lj] = num_insns;
3032        }
3033        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3034            gen_io_start();
3035        dc->insn_pc = dc->pc;
3036        disas_m68k_insn(env, dc);
3037        num_insns++;
3038    } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
3039             !cs->singlestep_enabled &&
3040             !singlestep &&
3041             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
3042             num_insns < max_insns);
3043
3044    if (tb->cflags & CF_LAST_IO)
3045        gen_io_end();
3046    if (unlikely(cs->singlestep_enabled)) {
3047        /* Make sure the pc is updated, and raise a debug exception.  */
3048        if (!dc->is_jmp) {
3049            gen_flush_cc_op(dc);
3050            tcg_gen_movi_i32(QREG_PC, dc->pc);
3051        }
3052        gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
3053    } else {
3054        switch(dc->is_jmp) {
3055        case DISAS_NEXT:
3056            gen_flush_cc_op(dc);
3057            gen_jmp_tb(dc, 0, dc->pc);
3058            break;
3059        default:
3060        case DISAS_JUMP:
3061        case DISAS_UPDATE:
3062            gen_flush_cc_op(dc);
3063            /* indicate that the hash table must be used to find the next TB */
3064            tcg_gen_exit_tb(0);
3065            break;
3066        case DISAS_TB_JUMP:
3067            /* nothing more to generate */
3068            break;
3069        }
3070    }
3071    gen_tb_end(tb, num_insns);
3072    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3073
3074#ifdef DEBUG_DISAS
3075    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3076        qemu_log("----------------\n");
3077        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3078        log_target_disas(env, pc_start, dc->pc - pc_start, 0);
3079        qemu_log("\n");
3080    }
3081#endif
3082    if (search_pc) {
3083        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3084        lj++;
3085        while (lj <= j)
3086            tcg_ctx.gen_opc_instr_start[lj++] = 0;
3087    } else {
3088        tb->size = dc->pc - pc_start;
3089        tb->icount = num_insns;
3090    }
3091
3092    //optimize_flags();
3093    //expand_target_qops();
3094}
3095
3096void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
3097{
3098    gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, false);
3099}
3100
3101void gen_intermediate_code_pc(CPUM68KState *env, TranslationBlock *tb)
3102{
3103    gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, true);
3104}
3105
3106void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
3107                         int flags)
3108{
3109    M68kCPU *cpu = M68K_CPU(cs);
3110    CPUM68KState *env = &cpu->env;
3111    int i;
3112    uint16_t sr;
3113    CPU_DoubleU u;
3114    for (i = 0; i < 8; i++)
3115      {
3116        u.d = env->fregs[i];
3117        cpu_fprintf (f, "D%d = %08x   A%d = %08x   F%d = %08x%08x (%12g)\n",
3118                     i, env->dregs[i], i, env->aregs[i],
3119                     i, u.l.upper, u.l.lower, *(double *)&u.d);
3120      }
3121    cpu_fprintf (f, "PC = %08x   ", env->pc);
3122    sr = env->sr;
3123    cpu_fprintf (f, "SR = %04x %c%c%c%c%c ", sr, (sr & 0x10) ? 'X' : '-',
3124                 (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
3125                 (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
3126    cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
3127}
3128
3129void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, int pc_pos)
3130{
3131    env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3132}
3133