qemu/target/m68k/translate.c
<<
>>
Prefs
   1/*
   2 *  m68k translation
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery
   5 *  Written by Paul Brook
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "exec/exec-all.h"
  25#include "tcg/tcg-op.h"
  26#include "qemu/log.h"
  27#include "qemu/qemu-print.h"
  28#include "exec/cpu_ldst.h"
  29#include "exec/translator.h"
  30
  31#include "exec/helper-proto.h"
  32#include "exec/helper-gen.h"
  33
  34#include "exec/log.h"
  35#include "fpu/softfloat.h"
  36
  37
  38//#define DEBUG_DISPATCH 1
  39
  40#define DEFO32(name, offset) static TCGv QREG_##name;
  41#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
  42#include "qregs.def"
  43#undef DEFO32
  44#undef DEFO64
  45
  46static TCGv_i32 cpu_halted;
  47static TCGv_i32 cpu_exception_index;
  48
  49static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
  50static TCGv cpu_dregs[8];
  51static TCGv cpu_aregs[8];
  52static TCGv_i64 cpu_macc[4];
  53
  54#define REG(insn, pos)  (((insn) >> (pos)) & 7)
  55#define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
  56#define AREG(insn, pos) get_areg(s, REG(insn, pos))
  57#define MACREG(acc)     cpu_macc[acc]
  58#define QREG_SP         get_areg(s, 7)
  59
  60static TCGv NULL_QREG;
  61#define IS_NULL_QREG(t) (t == NULL_QREG)
  62/* Used to distinguish stores from bad addressing modes.  */
  63static TCGv store_dummy;
  64
  65#include "exec/gen-icount.h"
  66
  67void m68k_tcg_init(void)
  68{
  69    char *p;
  70    int i;
  71
  72#define DEFO32(name, offset) \
  73    QREG_##name = tcg_global_mem_new_i32(cpu_env, \
  74        offsetof(CPUM68KState, offset), #name);
  75#define DEFO64(name, offset) \
  76    QREG_##name = tcg_global_mem_new_i64(cpu_env, \
  77        offsetof(CPUM68KState, offset), #name);
  78#include "qregs.def"
  79#undef DEFO32
  80#undef DEFO64
  81
  82    cpu_halted = tcg_global_mem_new_i32(cpu_env,
  83                                        -offsetof(M68kCPU, env) +
  84                                        offsetof(CPUState, halted), "HALTED");
  85    cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
  86                                                 -offsetof(M68kCPU, env) +
  87                                                 offsetof(CPUState, exception_index),
  88                                                 "EXCEPTION");
  89
  90    p = cpu_reg_names;
  91    for (i = 0; i < 8; i++) {
  92        sprintf(p, "D%d", i);
  93        cpu_dregs[i] = tcg_global_mem_new(cpu_env,
  94                                          offsetof(CPUM68KState, dregs[i]), p);
  95        p += 3;
  96        sprintf(p, "A%d", i);
  97        cpu_aregs[i] = tcg_global_mem_new(cpu_env,
  98                                          offsetof(CPUM68KState, aregs[i]), p);
  99        p += 3;
 100    }
 101    for (i = 0; i < 4; i++) {
 102        sprintf(p, "ACC%d", i);
 103        cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
 104                                         offsetof(CPUM68KState, macc[i]), p);
 105        p += 5;
 106    }
 107
 108    NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
 109    store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
 110}
 111
 112/* internal defines */
 113typedef struct DisasContext {
 114    DisasContextBase base;
 115    CPUM68KState *env;
 116    target_ulong pc;
 117    CCOp cc_op; /* Current CC operation */
 118    int cc_op_synced;
 119    TCGv_i64 mactmp;
 120    int done_mac;
 121    int writeback_mask;
 122    TCGv writeback[8];
 123#define MAX_TO_RELEASE 8
 124    int release_count;
 125    TCGv release[MAX_TO_RELEASE];
 126    bool ss_active;
 127} DisasContext;
 128
 129static void init_release_array(DisasContext *s)
 130{
 131#ifdef CONFIG_DEBUG_TCG
 132    memset(s->release, 0, sizeof(s->release));
 133#endif
 134    s->release_count = 0;
 135}
 136
 137static void do_release(DisasContext *s)
 138{
 139    int i;
 140    for (i = 0; i < s->release_count; i++) {
 141        tcg_temp_free(s->release[i]);
 142    }
 143    init_release_array(s);
 144}
 145
 146static TCGv mark_to_release(DisasContext *s, TCGv tmp)
 147{
 148    g_assert(s->release_count < MAX_TO_RELEASE);
 149    return s->release[s->release_count++] = tmp;
 150}
 151
 152static TCGv get_areg(DisasContext *s, unsigned regno)
 153{
 154    if (s->writeback_mask & (1 << regno)) {
 155        return s->writeback[regno];
 156    } else {
 157        return cpu_aregs[regno];
 158    }
 159}
 160
 161static void delay_set_areg(DisasContext *s, unsigned regno,
 162                           TCGv val, bool give_temp)
 163{
 164    if (s->writeback_mask & (1 << regno)) {
 165        if (give_temp) {
 166            tcg_temp_free(s->writeback[regno]);
 167            s->writeback[regno] = val;
 168        } else {
 169            tcg_gen_mov_i32(s->writeback[regno], val);
 170        }
 171    } else {
 172        s->writeback_mask |= 1 << regno;
 173        if (give_temp) {
 174            s->writeback[regno] = val;
 175        } else {
 176            TCGv tmp = tcg_temp_new();
 177            s->writeback[regno] = tmp;
 178            tcg_gen_mov_i32(tmp, val);
 179        }
 180    }
 181}
 182
 183static void do_writebacks(DisasContext *s)
 184{
 185    unsigned mask = s->writeback_mask;
 186    if (mask) {
 187        s->writeback_mask = 0;
 188        do {
 189            unsigned regno = ctz32(mask);
 190            tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
 191            tcg_temp_free(s->writeback[regno]);
 192            mask &= mask - 1;
 193        } while (mask);
 194    }
 195}
 196
 197static bool is_singlestepping(DisasContext *s)
 198{
 199    /*
 200     * Return true if we are singlestepping either because of
 201     * architectural singlestep or QEMU gdbstub singlestep. This does
 202     * not include the command line '-singlestep' mode which is rather
 203     * misnamed as it only means "one instruction per TB" and doesn't
 204     * affect the code we generate.
 205     */
 206    return s->base.singlestep_enabled || s->ss_active;
 207}
 208
 209/* is_jmp field values */
 210#define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
 211#define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
 212
 213#if defined(CONFIG_USER_ONLY)
 214#define IS_USER(s) 1
 215#else
 216#define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
 217#define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
 218                      MMU_KERNEL_IDX : MMU_USER_IDX)
 219#define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
 220                      MMU_KERNEL_IDX : MMU_USER_IDX)
 221#endif
 222
 223typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
 224
 225#ifdef DEBUG_DISPATCH
 226#define DISAS_INSN(name)                                                \
 227    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 228                                  uint16_t insn);                       \
 229    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 230                             uint16_t insn)                             \
 231    {                                                                   \
 232        qemu_log("Dispatch " #name "\n");                               \
 233        real_disas_##name(env, s, insn);                                \
 234    }                                                                   \
 235    static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
 236                                  uint16_t insn)
 237#else
 238#define DISAS_INSN(name)                                                \
 239    static void disas_##name(CPUM68KState *env, DisasContext *s,        \
 240                             uint16_t insn)
 241#endif
 242
 243static const uint8_t cc_op_live[CC_OP_NB] = {
 244    [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
 245    [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
 246    [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
 247    [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
 248    [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
 249    [CC_OP_LOGIC] = CCF_X | CCF_N
 250};
 251
 252static void set_cc_op(DisasContext *s, CCOp op)
 253{
 254    CCOp old_op = s->cc_op;
 255    int dead;
 256
 257    if (old_op == op) {
 258        return;
 259    }
 260    s->cc_op = op;
 261    s->cc_op_synced = 0;
 262
 263    /*
 264     * Discard CC computation that will no longer be used.
 265     * Note that X and N are never dead.
 266     */
 267    dead = cc_op_live[old_op] & ~cc_op_live[op];
 268    if (dead & CCF_C) {
 269        tcg_gen_discard_i32(QREG_CC_C);
 270    }
 271    if (dead & CCF_Z) {
 272        tcg_gen_discard_i32(QREG_CC_Z);
 273    }
 274    if (dead & CCF_V) {
 275        tcg_gen_discard_i32(QREG_CC_V);
 276    }
 277}
 278
 279/* Update the CPU env CC_OP state.  */
 280static void update_cc_op(DisasContext *s)
 281{
 282    if (!s->cc_op_synced) {
 283        s->cc_op_synced = 1;
 284        tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
 285    }
 286}
 287
 288/* Generate a jump to an immediate address.  */
 289static void gen_jmp_im(DisasContext *s, uint32_t dest)
 290{
 291    update_cc_op(s);
 292    tcg_gen_movi_i32(QREG_PC, dest);
 293    s->base.is_jmp = DISAS_JUMP;
 294}
 295
 296/* Generate a jump to the address in qreg DEST.  */
 297static void gen_jmp(DisasContext *s, TCGv dest)
 298{
 299    update_cc_op(s);
 300    tcg_gen_mov_i32(QREG_PC, dest);
 301    s->base.is_jmp = DISAS_JUMP;
 302}
 303
 304static void gen_raise_exception(int nr)
 305{
 306    TCGv_i32 tmp;
 307
 308    tmp = tcg_const_i32(nr);
 309    gen_helper_raise_exception(cpu_env, tmp);
 310    tcg_temp_free_i32(tmp);
 311}
 312
 313static void gen_exception(DisasContext *s, uint32_t dest, int nr)
 314{
 315    update_cc_op(s);
 316    tcg_gen_movi_i32(QREG_PC, dest);
 317
 318    gen_raise_exception(nr);
 319
 320    s->base.is_jmp = DISAS_NORETURN;
 321}
 322
 323static void gen_singlestep_exception(DisasContext *s)
 324{
 325    /*
 326     * Generate the right kind of exception for singlestep, which is
 327     * either the architectural singlestep or EXCP_DEBUG for QEMU's
 328     * gdb singlestepping.
 329     */
 330    if (s->ss_active) {
 331        gen_raise_exception(EXCP_TRACE);
 332    } else {
 333        gen_raise_exception(EXCP_DEBUG);
 334    }
 335}
 336
 337static inline void gen_addr_fault(DisasContext *s)
 338{
 339    gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
 340}
 341
 342/*
 343 * Generate a load from the specified address.  Narrow values are
 344 *  sign extended to full register width.
 345 */
 346static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
 347                            int sign, int index)
 348{
 349    TCGv tmp;
 350    tmp = tcg_temp_new_i32();
 351    switch(opsize) {
 352    case OS_BYTE:
 353        if (sign)
 354            tcg_gen_qemu_ld8s(tmp, addr, index);
 355        else
 356            tcg_gen_qemu_ld8u(tmp, addr, index);
 357        break;
 358    case OS_WORD:
 359        if (sign)
 360            tcg_gen_qemu_ld16s(tmp, addr, index);
 361        else
 362            tcg_gen_qemu_ld16u(tmp, addr, index);
 363        break;
 364    case OS_LONG:
 365        tcg_gen_qemu_ld32u(tmp, addr, index);
 366        break;
 367    default:
 368        g_assert_not_reached();
 369    }
 370    return tmp;
 371}
 372
 373/* Generate a store.  */
 374static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
 375                             int index)
 376{
 377    switch(opsize) {
 378    case OS_BYTE:
 379        tcg_gen_qemu_st8(val, addr, index);
 380        break;
 381    case OS_WORD:
 382        tcg_gen_qemu_st16(val, addr, index);
 383        break;
 384    case OS_LONG:
 385        tcg_gen_qemu_st32(val, addr, index);
 386        break;
 387    default:
 388        g_assert_not_reached();
 389    }
 390}
 391
 392typedef enum {
 393    EA_STORE,
 394    EA_LOADU,
 395    EA_LOADS
 396} ea_what;
 397
 398/*
 399 * Generate an unsigned load if VAL is 0 a signed load if val is -1,
 400 * otherwise generate a store.
 401 */
 402static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
 403                     ea_what what, int index)
 404{
 405    if (what == EA_STORE) {
 406        gen_store(s, opsize, addr, val, index);
 407        return store_dummy;
 408    } else {
 409        return mark_to_release(s, gen_load(s, opsize, addr,
 410                                           what == EA_LOADS, index));
 411    }
 412}
 413
 414/* Read a 16-bit immediate constant */
 415static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
 416{
 417    uint16_t im;
 418    im = translator_lduw(env, s->pc);
 419    s->pc += 2;
 420    return im;
 421}
 422
 423/* Read an 8-bit immediate constant */
 424static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
 425{
 426    return read_im16(env, s);
 427}
 428
 429/* Read a 32-bit immediate constant.  */
 430static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
 431{
 432    uint32_t im;
 433    im = read_im16(env, s) << 16;
 434    im |= 0xffff & read_im16(env, s);
 435    return im;
 436}
 437
 438/* Read a 64-bit immediate constant.  */
 439static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
 440{
 441    uint64_t im;
 442    im = (uint64_t)read_im32(env, s) << 32;
 443    im |= (uint64_t)read_im32(env, s);
 444    return im;
 445}
 446
 447/* Calculate and address index.  */
 448static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
 449{
 450    TCGv add;
 451    int scale;
 452
 453    add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
 454    if ((ext & 0x800) == 0) {
 455        tcg_gen_ext16s_i32(tmp, add);
 456        add = tmp;
 457    }
 458    scale = (ext >> 9) & 3;
 459    if (scale != 0) {
 460        tcg_gen_shli_i32(tmp, add, scale);
 461        add = tmp;
 462    }
 463    return add;
 464}
 465
 466/*
 467 * Handle a base + index + displacement effective address.
 468 * A NULL_QREG base means pc-relative.
 469 */
 470static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
 471{
 472    uint32_t offset;
 473    uint16_t ext;
 474    TCGv add;
 475    TCGv tmp;
 476    uint32_t bd, od;
 477
 478    offset = s->pc;
 479    ext = read_im16(env, s);
 480
 481    if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
 482        return NULL_QREG;
 483
 484    if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
 485        !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
 486        ext &= ~(3 << 9);
 487    }
 488
 489    if (ext & 0x100) {
 490        /* full extension word format */
 491        if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
 492            return NULL_QREG;
 493
 494        if ((ext & 0x30) > 0x10) {
 495            /* base displacement */
 496            if ((ext & 0x30) == 0x20) {
 497                bd = (int16_t)read_im16(env, s);
 498            } else {
 499                bd = read_im32(env, s);
 500            }
 501        } else {
 502            bd = 0;
 503        }
 504        tmp = mark_to_release(s, tcg_temp_new());
 505        if ((ext & 0x44) == 0) {
 506            /* pre-index */
 507            add = gen_addr_index(s, ext, tmp);
 508        } else {
 509            add = NULL_QREG;
 510        }
 511        if ((ext & 0x80) == 0) {
 512            /* base not suppressed */
 513            if (IS_NULL_QREG(base)) {
 514                base = mark_to_release(s, tcg_const_i32(offset + bd));
 515                bd = 0;
 516            }
 517            if (!IS_NULL_QREG(add)) {
 518                tcg_gen_add_i32(tmp, add, base);
 519                add = tmp;
 520            } else {
 521                add = base;
 522            }
 523        }
 524        if (!IS_NULL_QREG(add)) {
 525            if (bd != 0) {
 526                tcg_gen_addi_i32(tmp, add, bd);
 527                add = tmp;
 528            }
 529        } else {
 530            add = mark_to_release(s, tcg_const_i32(bd));
 531        }
 532        if ((ext & 3) != 0) {
 533            /* memory indirect */
 534            base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
 535            if ((ext & 0x44) == 4) {
 536                add = gen_addr_index(s, ext, tmp);
 537                tcg_gen_add_i32(tmp, add, base);
 538                add = tmp;
 539            } else {
 540                add = base;
 541            }
 542            if ((ext & 3) > 1) {
 543                /* outer displacement */
 544                if ((ext & 3) == 2) {
 545                    od = (int16_t)read_im16(env, s);
 546                } else {
 547                    od = read_im32(env, s);
 548                }
 549            } else {
 550                od = 0;
 551            }
 552            if (od != 0) {
 553                tcg_gen_addi_i32(tmp, add, od);
 554                add = tmp;
 555            }
 556        }
 557    } else {
 558        /* brief extension word format */
 559        tmp = mark_to_release(s, tcg_temp_new());
 560        add = gen_addr_index(s, ext, tmp);
 561        if (!IS_NULL_QREG(base)) {
 562            tcg_gen_add_i32(tmp, add, base);
 563            if ((int8_t)ext)
 564                tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
 565        } else {
 566            tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
 567        }
 568        add = tmp;
 569    }
 570    return add;
 571}
 572
 573/* Sign or zero extend a value.  */
 574
 575static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
 576{
 577    switch (opsize) {
 578    case OS_BYTE:
 579        if (sign) {
 580            tcg_gen_ext8s_i32(res, val);
 581        } else {
 582            tcg_gen_ext8u_i32(res, val);
 583        }
 584        break;
 585    case OS_WORD:
 586        if (sign) {
 587            tcg_gen_ext16s_i32(res, val);
 588        } else {
 589            tcg_gen_ext16u_i32(res, val);
 590        }
 591        break;
 592    case OS_LONG:
 593        tcg_gen_mov_i32(res, val);
 594        break;
 595    default:
 596        g_assert_not_reached();
 597    }
 598}
 599
 600/* Evaluate all the CC flags.  */
 601
 602static void gen_flush_flags(DisasContext *s)
 603{
 604    TCGv t0, t1;
 605
 606    switch (s->cc_op) {
 607    case CC_OP_FLAGS:
 608        return;
 609
 610    case CC_OP_ADDB:
 611    case CC_OP_ADDW:
 612    case CC_OP_ADDL:
 613        tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
 614        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 615        /* Compute signed overflow for addition.  */
 616        t0 = tcg_temp_new();
 617        t1 = tcg_temp_new();
 618        tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
 619        gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
 620        tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
 621        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
 622        tcg_temp_free(t0);
 623        tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
 624        tcg_temp_free(t1);
 625        break;
 626
 627    case CC_OP_SUBB:
 628    case CC_OP_SUBW:
 629    case CC_OP_SUBL:
 630        tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
 631        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 632        /* Compute signed overflow for subtraction.  */
 633        t0 = tcg_temp_new();
 634        t1 = tcg_temp_new();
 635        tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
 636        gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
 637        tcg_gen_xor_i32(t1, QREG_CC_N, t0);
 638        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
 639        tcg_temp_free(t0);
 640        tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
 641        tcg_temp_free(t1);
 642        break;
 643
 644    case CC_OP_CMPB:
 645    case CC_OP_CMPW:
 646    case CC_OP_CMPL:
 647        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
 648        tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
 649        gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
 650        /* Compute signed overflow for subtraction.  */
 651        t0 = tcg_temp_new();
 652        tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
 653        tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
 654        tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
 655        tcg_temp_free(t0);
 656        tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
 657        break;
 658
 659    case CC_OP_LOGIC:
 660        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
 661        tcg_gen_movi_i32(QREG_CC_C, 0);
 662        tcg_gen_movi_i32(QREG_CC_V, 0);
 663        break;
 664
 665    case CC_OP_DYNAMIC:
 666        gen_helper_flush_flags(cpu_env, QREG_CC_OP);
 667        s->cc_op_synced = 1;
 668        break;
 669
 670    default:
 671        t0 = tcg_const_i32(s->cc_op);
 672        gen_helper_flush_flags(cpu_env, t0);
 673        tcg_temp_free(t0);
 674        s->cc_op_synced = 1;
 675        break;
 676    }
 677
 678    /* Note that flush_flags also assigned to env->cc_op.  */
 679    s->cc_op = CC_OP_FLAGS;
 680}
 681
 682static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
 683{
 684    TCGv tmp;
 685
 686    if (opsize == OS_LONG) {
 687        tmp = val;
 688    } else {
 689        tmp = mark_to_release(s, tcg_temp_new());
 690        gen_ext(tmp, val, opsize, sign);
 691    }
 692
 693    return tmp;
 694}
 695
 696static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
 697{
 698    gen_ext(QREG_CC_N, val, opsize, 1);
 699    set_cc_op(s, CC_OP_LOGIC);
 700}
 701
 702static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
 703{
 704    tcg_gen_mov_i32(QREG_CC_N, dest);
 705    tcg_gen_mov_i32(QREG_CC_V, src);
 706    set_cc_op(s, CC_OP_CMPB + opsize);
 707}
 708
 709static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
 710{
 711    gen_ext(QREG_CC_N, dest, opsize, 1);
 712    tcg_gen_mov_i32(QREG_CC_V, src);
 713}
 714
 715static inline int opsize_bytes(int opsize)
 716{
 717    switch (opsize) {
 718    case OS_BYTE: return 1;
 719    case OS_WORD: return 2;
 720    case OS_LONG: return 4;
 721    case OS_SINGLE: return 4;
 722    case OS_DOUBLE: return 8;
 723    case OS_EXTENDED: return 12;
 724    case OS_PACKED: return 12;
 725    default:
 726        g_assert_not_reached();
 727    }
 728}
 729
 730static inline int insn_opsize(int insn)
 731{
 732    switch ((insn >> 6) & 3) {
 733    case 0: return OS_BYTE;
 734    case 1: return OS_WORD;
 735    case 2: return OS_LONG;
 736    default:
 737        g_assert_not_reached();
 738    }
 739}
 740
 741static inline int ext_opsize(int ext, int pos)
 742{
 743    switch ((ext >> pos) & 7) {
 744    case 0: return OS_LONG;
 745    case 1: return OS_SINGLE;
 746    case 2: return OS_EXTENDED;
 747    case 3: return OS_PACKED;
 748    case 4: return OS_WORD;
 749    case 5: return OS_DOUBLE;
 750    case 6: return OS_BYTE;
 751    default:
 752        g_assert_not_reached();
 753    }
 754}
 755
 756/*
 757 * Assign value to a register.  If the width is less than the register width
 758 * only the low part of the register is set.
 759 */
 760static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
 761{
 762    TCGv tmp;
 763    switch (opsize) {
 764    case OS_BYTE:
 765        tcg_gen_andi_i32(reg, reg, 0xffffff00);
 766        tmp = tcg_temp_new();
 767        tcg_gen_ext8u_i32(tmp, val);
 768        tcg_gen_or_i32(reg, reg, tmp);
 769        tcg_temp_free(tmp);
 770        break;
 771    case OS_WORD:
 772        tcg_gen_andi_i32(reg, reg, 0xffff0000);
 773        tmp = tcg_temp_new();
 774        tcg_gen_ext16u_i32(tmp, val);
 775        tcg_gen_or_i32(reg, reg, tmp);
 776        tcg_temp_free(tmp);
 777        break;
 778    case OS_LONG:
 779    case OS_SINGLE:
 780        tcg_gen_mov_i32(reg, val);
 781        break;
 782    default:
 783        g_assert_not_reached();
 784    }
 785}
 786
 787/*
 788 * Generate code for an "effective address".  Does not adjust the base
 789 * register for autoincrement addressing modes.
 790 */
 791static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
 792                         int mode, int reg0, int opsize)
 793{
 794    TCGv reg;
 795    TCGv tmp;
 796    uint16_t ext;
 797    uint32_t offset;
 798
 799    switch (mode) {
 800    case 0: /* Data register direct.  */
 801    case 1: /* Address register direct.  */
 802        return NULL_QREG;
 803    case 3: /* Indirect postincrement.  */
 804        if (opsize == OS_UNSIZED) {
 805            return NULL_QREG;
 806        }
 807        /* fallthru */
 808    case 2: /* Indirect register */
 809        return get_areg(s, reg0);
 810    case 4: /* Indirect predecrememnt.  */
 811        if (opsize == OS_UNSIZED) {
 812            return NULL_QREG;
 813        }
 814        reg = get_areg(s, reg0);
 815        tmp = mark_to_release(s, tcg_temp_new());
 816        if (reg0 == 7 && opsize == OS_BYTE &&
 817            m68k_feature(s->env, M68K_FEATURE_M68000)) {
 818            tcg_gen_subi_i32(tmp, reg, 2);
 819        } else {
 820            tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
 821        }
 822        return tmp;
 823    case 5: /* Indirect displacement.  */
 824        reg = get_areg(s, reg0);
 825        tmp = mark_to_release(s, tcg_temp_new());
 826        ext = read_im16(env, s);
 827        tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
 828        return tmp;
 829    case 6: /* Indirect index + displacement.  */
 830        reg = get_areg(s, reg0);
 831        return gen_lea_indexed(env, s, reg);
 832    case 7: /* Other */
 833        switch (reg0) {
 834        case 0: /* Absolute short.  */
 835            offset = (int16_t)read_im16(env, s);
 836            return mark_to_release(s, tcg_const_i32(offset));
 837        case 1: /* Absolute long.  */
 838            offset = read_im32(env, s);
 839            return mark_to_release(s, tcg_const_i32(offset));
 840        case 2: /* pc displacement  */
 841            offset = s->pc;
 842            offset += (int16_t)read_im16(env, s);
 843            return mark_to_release(s, tcg_const_i32(offset));
 844        case 3: /* pc index+displacement.  */
 845            return gen_lea_indexed(env, s, NULL_QREG);
 846        case 4: /* Immediate.  */
 847        default:
 848            return NULL_QREG;
 849        }
 850    }
 851    /* Should never happen.  */
 852    return NULL_QREG;
 853}
 854
 855static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 856                    int opsize)
 857{
 858    int mode = extract32(insn, 3, 3);
 859    int reg0 = REG(insn, 0);
 860    return gen_lea_mode(env, s, mode, reg0, opsize);
 861}
 862
 863/*
 864 * Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
 865 * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
 866 * ADDRP is non-null for readwrite operands.
 867 */
 868static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
 869                        int opsize, TCGv val, TCGv *addrp, ea_what what,
 870                        int index)
 871{
 872    TCGv reg, tmp, result;
 873    int32_t offset;
 874
 875    switch (mode) {
 876    case 0: /* Data register direct.  */
 877        reg = cpu_dregs[reg0];
 878        if (what == EA_STORE) {
 879            gen_partset_reg(opsize, reg, val);
 880            return store_dummy;
 881        } else {
 882            return gen_extend(s, reg, opsize, what == EA_LOADS);
 883        }
 884    case 1: /* Address register direct.  */
 885        reg = get_areg(s, reg0);
 886        if (what == EA_STORE) {
 887            tcg_gen_mov_i32(reg, val);
 888            return store_dummy;
 889        } else {
 890            return gen_extend(s, reg, opsize, what == EA_LOADS);
 891        }
 892    case 2: /* Indirect register */
 893        reg = get_areg(s, reg0);
 894        return gen_ldst(s, opsize, reg, val, what, index);
 895    case 3: /* Indirect postincrement.  */
 896        reg = get_areg(s, reg0);
 897        result = gen_ldst(s, opsize, reg, val, what, index);
 898        if (what == EA_STORE || !addrp) {
 899            TCGv tmp = tcg_temp_new();
 900            if (reg0 == 7 && opsize == OS_BYTE &&
 901                m68k_feature(s->env, M68K_FEATURE_M68000)) {
 902                tcg_gen_addi_i32(tmp, reg, 2);
 903            } else {
 904                tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
 905            }
 906            delay_set_areg(s, reg0, tmp, true);
 907        }
 908        return result;
 909    case 4: /* Indirect predecrememnt.  */
 910        if (addrp && what == EA_STORE) {
 911            tmp = *addrp;
 912        } else {
 913            tmp = gen_lea_mode(env, s, mode, reg0, opsize);
 914            if (IS_NULL_QREG(tmp)) {
 915                return tmp;
 916            }
 917            if (addrp) {
 918                *addrp = tmp;
 919            }
 920        }
 921        result = gen_ldst(s, opsize, tmp, val, what, index);
 922        if (what == EA_STORE || !addrp) {
 923            delay_set_areg(s, reg0, tmp, false);
 924        }
 925        return result;
 926    case 5: /* Indirect displacement.  */
 927    case 6: /* Indirect index + displacement.  */
 928    do_indirect:
 929        if (addrp && what == EA_STORE) {
 930            tmp = *addrp;
 931        } else {
 932            tmp = gen_lea_mode(env, s, mode, reg0, opsize);
 933            if (IS_NULL_QREG(tmp)) {
 934                return tmp;
 935            }
 936            if (addrp) {
 937                *addrp = tmp;
 938            }
 939        }
 940        return gen_ldst(s, opsize, tmp, val, what, index);
 941    case 7: /* Other */
 942        switch (reg0) {
 943        case 0: /* Absolute short.  */
 944        case 1: /* Absolute long.  */
 945        case 2: /* pc displacement  */
 946        case 3: /* pc index+displacement.  */
 947            goto do_indirect;
 948        case 4: /* Immediate.  */
 949            /* Sign extend values for consistency.  */
 950            switch (opsize) {
 951            case OS_BYTE:
 952                if (what == EA_LOADS) {
 953                    offset = (int8_t)read_im8(env, s);
 954                } else {
 955                    offset = read_im8(env, s);
 956                }
 957                break;
 958            case OS_WORD:
 959                if (what == EA_LOADS) {
 960                    offset = (int16_t)read_im16(env, s);
 961                } else {
 962                    offset = read_im16(env, s);
 963                }
 964                break;
 965            case OS_LONG:
 966                offset = read_im32(env, s);
 967                break;
 968            default:
 969                g_assert_not_reached();
 970            }
 971            return mark_to_release(s, tcg_const_i32(offset));
 972        default:
 973            return NULL_QREG;
 974        }
 975    }
 976    /* Should never happen.  */
 977    return NULL_QREG;
 978}
 979
 980static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
 981                   int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
 982{
 983    int mode = extract32(insn, 3, 3);
 984    int reg0 = REG(insn, 0);
 985    return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
 986}
 987
 988static TCGv_ptr gen_fp_ptr(int freg)
 989{
 990    TCGv_ptr fp = tcg_temp_new_ptr();
 991    tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
 992    return fp;
 993}
 994
 995static TCGv_ptr gen_fp_result_ptr(void)
 996{
 997    TCGv_ptr fp = tcg_temp_new_ptr();
 998    tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
 999    return fp;
1000}
1001
1002static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
1003{
1004    TCGv t32;
1005    TCGv_i64 t64;
1006
1007    t32 = tcg_temp_new();
1008    tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
1009    tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
1010    tcg_temp_free(t32);
1011
1012    t64 = tcg_temp_new_i64();
1013    tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
1014    tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
1015    tcg_temp_free_i64(t64);
1016}
1017
1018static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1019                        int index)
1020{
1021    TCGv tmp;
1022    TCGv_i64 t64;
1023
1024    t64 = tcg_temp_new_i64();
1025    tmp = tcg_temp_new();
1026    switch (opsize) {
1027    case OS_BYTE:
1028        tcg_gen_qemu_ld8s(tmp, addr, index);
1029        gen_helper_exts32(cpu_env, fp, tmp);
1030        break;
1031    case OS_WORD:
1032        tcg_gen_qemu_ld16s(tmp, addr, index);
1033        gen_helper_exts32(cpu_env, fp, tmp);
1034        break;
1035    case OS_LONG:
1036        tcg_gen_qemu_ld32u(tmp, addr, index);
1037        gen_helper_exts32(cpu_env, fp, tmp);
1038        break;
1039    case OS_SINGLE:
1040        tcg_gen_qemu_ld32u(tmp, addr, index);
1041        gen_helper_extf32(cpu_env, fp, tmp);
1042        break;
1043    case OS_DOUBLE:
1044        tcg_gen_qemu_ld64(t64, addr, index);
1045        gen_helper_extf64(cpu_env, fp, t64);
1046        break;
1047    case OS_EXTENDED:
1048        if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1049            gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1050            break;
1051        }
1052        tcg_gen_qemu_ld32u(tmp, addr, index);
1053        tcg_gen_shri_i32(tmp, tmp, 16);
1054        tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1055        tcg_gen_addi_i32(tmp, addr, 4);
1056        tcg_gen_qemu_ld64(t64, tmp, index);
1057        tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1058        break;
1059    case OS_PACKED:
1060        /*
1061         * unimplemented data type on 68040/ColdFire
1062         * FIXME if needed for another FPU
1063         */
1064        gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1065        break;
1066    default:
1067        g_assert_not_reached();
1068    }
1069    tcg_temp_free(tmp);
1070    tcg_temp_free_i64(t64);
1071}
1072
1073static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
1074                         int index)
1075{
1076    TCGv tmp;
1077    TCGv_i64 t64;
1078
1079    t64 = tcg_temp_new_i64();
1080    tmp = tcg_temp_new();
1081    switch (opsize) {
1082    case OS_BYTE:
1083        gen_helper_reds32(tmp, cpu_env, fp);
1084        tcg_gen_qemu_st8(tmp, addr, index);
1085        break;
1086    case OS_WORD:
1087        gen_helper_reds32(tmp, cpu_env, fp);
1088        tcg_gen_qemu_st16(tmp, addr, index);
1089        break;
1090    case OS_LONG:
1091        gen_helper_reds32(tmp, cpu_env, fp);
1092        tcg_gen_qemu_st32(tmp, addr, index);
1093        break;
1094    case OS_SINGLE:
1095        gen_helper_redf32(tmp, cpu_env, fp);
1096        tcg_gen_qemu_st32(tmp, addr, index);
1097        break;
1098    case OS_DOUBLE:
1099        gen_helper_redf64(t64, cpu_env, fp);
1100        tcg_gen_qemu_st64(t64, addr, index);
1101        break;
1102    case OS_EXTENDED:
1103        if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1104            gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1105            break;
1106        }
1107        tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1108        tcg_gen_shli_i32(tmp, tmp, 16);
1109        tcg_gen_qemu_st32(tmp, addr, index);
1110        tcg_gen_addi_i32(tmp, addr, 4);
1111        tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1112        tcg_gen_qemu_st64(t64, tmp, index);
1113        break;
1114    case OS_PACKED:
1115        /*
1116         * unimplemented data type on 68040/ColdFire
1117         * FIXME if needed for another FPU
1118         */
1119        gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1120        break;
1121    default:
1122        g_assert_not_reached();
1123    }
1124    tcg_temp_free(tmp);
1125    tcg_temp_free_i64(t64);
1126}
1127
1128static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1129                        TCGv_ptr fp, ea_what what, int index)
1130{
1131    if (what == EA_STORE) {
1132        gen_store_fp(s, opsize, addr, fp, index);
1133    } else {
1134        gen_load_fp(s, opsize, addr, fp, index);
1135    }
1136}
1137
1138static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1139                          int reg0, int opsize, TCGv_ptr fp, ea_what what,
1140                          int index)
1141{
1142    TCGv reg, addr, tmp;
1143    TCGv_i64 t64;
1144
1145    switch (mode) {
1146    case 0: /* Data register direct.  */
1147        reg = cpu_dregs[reg0];
1148        if (what == EA_STORE) {
1149            switch (opsize) {
1150            case OS_BYTE:
1151            case OS_WORD:
1152            case OS_LONG:
1153                gen_helper_reds32(reg, cpu_env, fp);
1154                break;
1155            case OS_SINGLE:
1156                gen_helper_redf32(reg, cpu_env, fp);
1157                break;
1158            default:
1159                g_assert_not_reached();
1160            }
1161        } else {
1162            tmp = tcg_temp_new();
1163            switch (opsize) {
1164            case OS_BYTE:
1165                tcg_gen_ext8s_i32(tmp, reg);
1166                gen_helper_exts32(cpu_env, fp, tmp);
1167                break;
1168            case OS_WORD:
1169                tcg_gen_ext16s_i32(tmp, reg);
1170                gen_helper_exts32(cpu_env, fp, tmp);
1171                break;
1172            case OS_LONG:
1173                gen_helper_exts32(cpu_env, fp, reg);
1174                break;
1175            case OS_SINGLE:
1176                gen_helper_extf32(cpu_env, fp, reg);
1177                break;
1178            default:
1179                g_assert_not_reached();
1180            }
1181            tcg_temp_free(tmp);
1182        }
1183        return 0;
1184    case 1: /* Address register direct.  */
1185        return -1;
1186    case 2: /* Indirect register */
1187        addr = get_areg(s, reg0);
1188        gen_ldst_fp(s, opsize, addr, fp, what, index);
1189        return 0;
1190    case 3: /* Indirect postincrement.  */
1191        addr = cpu_aregs[reg0];
1192        gen_ldst_fp(s, opsize, addr, fp, what, index);
1193        tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1194        return 0;
1195    case 4: /* Indirect predecrememnt.  */
1196        addr = gen_lea_mode(env, s, mode, reg0, opsize);
1197        if (IS_NULL_QREG(addr)) {
1198            return -1;
1199        }
1200        gen_ldst_fp(s, opsize, addr, fp, what, index);
1201        tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1202        return 0;
1203    case 5: /* Indirect displacement.  */
1204    case 6: /* Indirect index + displacement.  */
1205    do_indirect:
1206        addr = gen_lea_mode(env, s, mode, reg0, opsize);
1207        if (IS_NULL_QREG(addr)) {
1208            return -1;
1209        }
1210        gen_ldst_fp(s, opsize, addr, fp, what, index);
1211        return 0;
1212    case 7: /* Other */
1213        switch (reg0) {
1214        case 0: /* Absolute short.  */
1215        case 1: /* Absolute long.  */
1216        case 2: /* pc displacement  */
1217        case 3: /* pc index+displacement.  */
1218            goto do_indirect;
1219        case 4: /* Immediate.  */
1220            if (what == EA_STORE) {
1221                return -1;
1222            }
1223            switch (opsize) {
1224            case OS_BYTE:
1225                tmp = tcg_const_i32((int8_t)read_im8(env, s));
1226                gen_helper_exts32(cpu_env, fp, tmp);
1227                tcg_temp_free(tmp);
1228                break;
1229            case OS_WORD:
1230                tmp = tcg_const_i32((int16_t)read_im16(env, s));
1231                gen_helper_exts32(cpu_env, fp, tmp);
1232                tcg_temp_free(tmp);
1233                break;
1234            case OS_LONG:
1235                tmp = tcg_const_i32(read_im32(env, s));
1236                gen_helper_exts32(cpu_env, fp, tmp);
1237                tcg_temp_free(tmp);
1238                break;
1239            case OS_SINGLE:
1240                tmp = tcg_const_i32(read_im32(env, s));
1241                gen_helper_extf32(cpu_env, fp, tmp);
1242                tcg_temp_free(tmp);
1243                break;
1244            case OS_DOUBLE:
1245                t64 = tcg_const_i64(read_im64(env, s));
1246                gen_helper_extf64(cpu_env, fp, t64);
1247                tcg_temp_free_i64(t64);
1248                break;
1249            case OS_EXTENDED:
1250                if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1251                    gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1252                    break;
1253                }
1254                tmp = tcg_const_i32(read_im32(env, s) >> 16);
1255                tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1256                tcg_temp_free(tmp);
1257                t64 = tcg_const_i64(read_im64(env, s));
1258                tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1259                tcg_temp_free_i64(t64);
1260                break;
1261            case OS_PACKED:
1262                /*
1263                 * unimplemented data type on 68040/ColdFire
1264                 * FIXME if needed for another FPU
1265                 */
1266                gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1267                break;
1268            default:
1269                g_assert_not_reached();
1270            }
1271            return 0;
1272        default:
1273            return -1;
1274        }
1275    }
1276    return -1;
1277}
1278
1279static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1280                       int opsize, TCGv_ptr fp, ea_what what, int index)
1281{
1282    int mode = extract32(insn, 3, 3);
1283    int reg0 = REG(insn, 0);
1284    return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1285}
1286
1287typedef struct {
1288    TCGCond tcond;
1289    bool g1;
1290    bool g2;
1291    TCGv v1;
1292    TCGv v2;
1293} DisasCompare;
1294
1295static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1296{
1297    TCGv tmp, tmp2;
1298    TCGCond tcond;
1299    CCOp op = s->cc_op;
1300
1301    /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1302    if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1303        c->g1 = c->g2 = 1;
1304        c->v1 = QREG_CC_N;
1305        c->v2 = QREG_CC_V;
1306        switch (cond) {
1307        case 2: /* HI */
1308        case 3: /* LS */
1309            tcond = TCG_COND_LEU;
1310            goto done;
1311        case 4: /* CC */
1312        case 5: /* CS */
1313            tcond = TCG_COND_LTU;
1314            goto done;
1315        case 6: /* NE */
1316        case 7: /* EQ */
1317            tcond = TCG_COND_EQ;
1318            goto done;
1319        case 10: /* PL */
1320        case 11: /* MI */
1321            c->g1 = c->g2 = 0;
1322            c->v2 = tcg_const_i32(0);
1323            c->v1 = tmp = tcg_temp_new();
1324            tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1325            gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1326            /* fallthru */
1327        case 12: /* GE */
1328        case 13: /* LT */
1329            tcond = TCG_COND_LT;
1330            goto done;
1331        case 14: /* GT */
1332        case 15: /* LE */
1333            tcond = TCG_COND_LE;
1334            goto done;
1335        }
1336    }
1337
1338    c->g1 = 1;
1339    c->g2 = 0;
1340    c->v2 = tcg_const_i32(0);
1341
1342    switch (cond) {
1343    case 0: /* T */
1344    case 1: /* F */
1345        c->v1 = c->v2;
1346        tcond = TCG_COND_NEVER;
1347        goto done;
1348    case 14: /* GT (!(Z || (N ^ V))) */
1349    case 15: /* LE (Z || (N ^ V)) */
1350        /*
1351         * Logic operations clear V, which simplifies LE to (Z || N),
1352         * and since Z and N are co-located, this becomes a normal
1353         * comparison vs N.
1354         */
1355        if (op == CC_OP_LOGIC) {
1356            c->v1 = QREG_CC_N;
1357            tcond = TCG_COND_LE;
1358            goto done;
1359        }
1360        break;
1361    case 12: /* GE (!(N ^ V)) */
1362    case 13: /* LT (N ^ V) */
1363        /* Logic operations clear V, which simplifies this to N.  */
1364        if (op != CC_OP_LOGIC) {
1365            break;
1366        }
1367        /* fallthru */
1368    case 10: /* PL (!N) */
1369    case 11: /* MI (N) */
1370        /* Several cases represent N normally.  */
1371        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1372            op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1373            op == CC_OP_LOGIC) {
1374            c->v1 = QREG_CC_N;
1375            tcond = TCG_COND_LT;
1376            goto done;
1377        }
1378        break;
1379    case 6: /* NE (!Z) */
1380    case 7: /* EQ (Z) */
1381        /* Some cases fold Z into N.  */
1382        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1383            op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1384            op == CC_OP_LOGIC) {
1385            tcond = TCG_COND_EQ;
1386            c->v1 = QREG_CC_N;
1387            goto done;
1388        }
1389        break;
1390    case 4: /* CC (!C) */
1391    case 5: /* CS (C) */
1392        /* Some cases fold C into X.  */
1393        if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1394            op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1395            tcond = TCG_COND_NE;
1396            c->v1 = QREG_CC_X;
1397            goto done;
1398        }
1399        /* fallthru */
1400    case 8: /* VC (!V) */
1401    case 9: /* VS (V) */
1402        /* Logic operations clear V and C.  */
1403        if (op == CC_OP_LOGIC) {
1404            tcond = TCG_COND_NEVER;
1405            c->v1 = c->v2;
1406            goto done;
1407        }
1408        break;
1409    }
1410
1411    /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1412    gen_flush_flags(s);
1413
1414    switch (cond) {
1415    case 0: /* T */
1416    case 1: /* F */
1417    default:
1418        /* Invalid, or handled above.  */
1419        abort();
1420    case 2: /* HI (!C && !Z) -> !(C || Z)*/
1421    case 3: /* LS (C || Z) */
1422        c->v1 = tmp = tcg_temp_new();
1423        c->g1 = 0;
1424        tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1425        tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1426        tcond = TCG_COND_NE;
1427        break;
1428    case 4: /* CC (!C) */
1429    case 5: /* CS (C) */
1430        c->v1 = QREG_CC_C;
1431        tcond = TCG_COND_NE;
1432        break;
1433    case 6: /* NE (!Z) */
1434    case 7: /* EQ (Z) */
1435        c->v1 = QREG_CC_Z;
1436        tcond = TCG_COND_EQ;
1437        break;
1438    case 8: /* VC (!V) */
1439    case 9: /* VS (V) */
1440        c->v1 = QREG_CC_V;
1441        tcond = TCG_COND_LT;
1442        break;
1443    case 10: /* PL (!N) */
1444    case 11: /* MI (N) */
1445        c->v1 = QREG_CC_N;
1446        tcond = TCG_COND_LT;
1447        break;
1448    case 12: /* GE (!(N ^ V)) */
1449    case 13: /* LT (N ^ V) */
1450        c->v1 = tmp = tcg_temp_new();
1451        c->g1 = 0;
1452        tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1453        tcond = TCG_COND_LT;
1454        break;
1455    case 14: /* GT (!(Z || (N ^ V))) */
1456    case 15: /* LE (Z || (N ^ V)) */
1457        c->v1 = tmp = tcg_temp_new();
1458        c->g1 = 0;
1459        tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1460        tcg_gen_neg_i32(tmp, tmp);
1461        tmp2 = tcg_temp_new();
1462        tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1463        tcg_gen_or_i32(tmp, tmp, tmp2);
1464        tcg_temp_free(tmp2);
1465        tcond = TCG_COND_LT;
1466        break;
1467    }
1468
1469 done:
1470    if ((cond & 1) == 0) {
1471        tcond = tcg_invert_cond(tcond);
1472    }
1473    c->tcond = tcond;
1474}
1475
1476static void free_cond(DisasCompare *c)
1477{
1478    if (!c->g1) {
1479        tcg_temp_free(c->v1);
1480    }
1481    if (!c->g2) {
1482        tcg_temp_free(c->v2);
1483    }
1484}
1485
1486static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1487{
1488  DisasCompare c;
1489
1490  gen_cc_cond(&c, s, cond);
1491  update_cc_op(s);
1492  tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1493  free_cond(&c);
1494}
1495
1496/* Force a TB lookup after an instruction that changes the CPU state.  */
1497static void gen_exit_tb(DisasContext *s)
1498{
1499    update_cc_op(s);
1500    tcg_gen_movi_i32(QREG_PC, s->pc);
1501    s->base.is_jmp = DISAS_EXIT;
1502}
1503
1504#define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1505        result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1506                        op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1507        if (IS_NULL_QREG(result)) {                                     \
1508            gen_addr_fault(s);                                          \
1509            return;                                                     \
1510        }                                                               \
1511    } while (0)
1512
1513#define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1514        TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1515                                EA_STORE, IS_USER(s));                  \
1516        if (IS_NULL_QREG(ea_result)) {                                  \
1517            gen_addr_fault(s);                                          \
1518            return;                                                     \
1519        }                                                               \
1520    } while (0)
1521
1522/* Generate a jump to an immediate address.  */
1523static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
1524{
1525    if (unlikely(is_singlestepping(s))) {
1526        update_cc_op(s);
1527        tcg_gen_movi_i32(QREG_PC, dest);
1528        gen_singlestep_exception(s);
1529    } else if (translator_use_goto_tb(&s->base, dest)) {
1530        tcg_gen_goto_tb(n);
1531        tcg_gen_movi_i32(QREG_PC, dest);
1532        tcg_gen_exit_tb(s->base.tb, n);
1533    } else {
1534        gen_jmp_im(s, dest);
1535        tcg_gen_exit_tb(NULL, 0);
1536    }
1537    s->base.is_jmp = DISAS_NORETURN;
1538}
1539
1540DISAS_INSN(scc)
1541{
1542    DisasCompare c;
1543    int cond;
1544    TCGv tmp;
1545
1546    cond = (insn >> 8) & 0xf;
1547    gen_cc_cond(&c, s, cond);
1548
1549    tmp = tcg_temp_new();
1550    tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
1551    free_cond(&c);
1552
1553    tcg_gen_neg_i32(tmp, tmp);
1554    DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1555    tcg_temp_free(tmp);
1556}
1557
1558DISAS_INSN(dbcc)
1559{
1560    TCGLabel *l1;
1561    TCGv reg;
1562    TCGv tmp;
1563    int16_t offset;
1564    uint32_t base;
1565
1566    reg = DREG(insn, 0);
1567    base = s->pc;
1568    offset = (int16_t)read_im16(env, s);
1569    l1 = gen_new_label();
1570    gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1571
1572    tmp = tcg_temp_new();
1573    tcg_gen_ext16s_i32(tmp, reg);
1574    tcg_gen_addi_i32(tmp, tmp, -1);
1575    gen_partset_reg(OS_WORD, reg, tmp);
1576    tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1577    gen_jmp_tb(s, 1, base + offset);
1578    gen_set_label(l1);
1579    gen_jmp_tb(s, 0, s->pc);
1580}
1581
1582DISAS_INSN(undef_mac)
1583{
1584    gen_exception(s, s->base.pc_next, EXCP_LINEA);
1585}
1586
1587DISAS_INSN(undef_fpu)
1588{
1589    gen_exception(s, s->base.pc_next, EXCP_LINEF);
1590}
1591
1592DISAS_INSN(undef)
1593{
1594    /*
1595     * ??? This is both instructions that are as yet unimplemented
1596     * for the 680x0 series, as well as those that are implemented
1597     * but actually illegal for CPU32 or pre-68020.
1598     */
1599    qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
1600                  insn, s->base.pc_next);
1601    gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1602}
1603
1604DISAS_INSN(mulw)
1605{
1606    TCGv reg;
1607    TCGv tmp;
1608    TCGv src;
1609    int sign;
1610
1611    sign = (insn & 0x100) != 0;
1612    reg = DREG(insn, 9);
1613    tmp = tcg_temp_new();
1614    if (sign)
1615        tcg_gen_ext16s_i32(tmp, reg);
1616    else
1617        tcg_gen_ext16u_i32(tmp, reg);
1618    SRC_EA(env, src, OS_WORD, sign, NULL);
1619    tcg_gen_mul_i32(tmp, tmp, src);
1620    tcg_gen_mov_i32(reg, tmp);
1621    gen_logic_cc(s, tmp, OS_LONG);
1622    tcg_temp_free(tmp);
1623}
1624
1625DISAS_INSN(divw)
1626{
1627    int sign;
1628    TCGv src;
1629    TCGv destr;
1630
1631    /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1632
1633    sign = (insn & 0x100) != 0;
1634
1635    /* dest.l / src.w */
1636
1637    SRC_EA(env, src, OS_WORD, sign, NULL);
1638    destr = tcg_const_i32(REG(insn, 9));
1639    if (sign) {
1640        gen_helper_divsw(cpu_env, destr, src);
1641    } else {
1642        gen_helper_divuw(cpu_env, destr, src);
1643    }
1644    tcg_temp_free(destr);
1645
1646    set_cc_op(s, CC_OP_FLAGS);
1647}
1648
1649DISAS_INSN(divl)
1650{
1651    TCGv num, reg, den;
1652    int sign;
1653    uint16_t ext;
1654
1655    ext = read_im16(env, s);
1656
1657    sign = (ext & 0x0800) != 0;
1658
1659    if (ext & 0x400) {
1660        if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1661            gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1662            return;
1663        }
1664
1665        /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1666
1667        SRC_EA(env, den, OS_LONG, 0, NULL);
1668        num = tcg_const_i32(REG(ext, 12));
1669        reg = tcg_const_i32(REG(ext, 0));
1670        if (sign) {
1671            gen_helper_divsll(cpu_env, num, reg, den);
1672        } else {
1673            gen_helper_divull(cpu_env, num, reg, den);
1674        }
1675        tcg_temp_free(reg);
1676        tcg_temp_free(num);
1677        set_cc_op(s, CC_OP_FLAGS);
1678        return;
1679    }
1680
1681    /* divX.l <EA>, Dq        32/32 -> 32q     */
1682    /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1683
1684    SRC_EA(env, den, OS_LONG, 0, NULL);
1685    num = tcg_const_i32(REG(ext, 12));
1686    reg = tcg_const_i32(REG(ext, 0));
1687    if (sign) {
1688        gen_helper_divsl(cpu_env, num, reg, den);
1689    } else {
1690        gen_helper_divul(cpu_env, num, reg, den);
1691    }
1692    tcg_temp_free(reg);
1693    tcg_temp_free(num);
1694
1695    set_cc_op(s, CC_OP_FLAGS);
1696}
1697
1698static void bcd_add(TCGv dest, TCGv src)
1699{
1700    TCGv t0, t1;
1701
1702    /*
1703     * dest10 = dest10 + src10 + X
1704     *
1705     *        t1 = src
1706     *        t2 = t1 + 0x066
1707     *        t3 = t2 + dest + X
1708     *        t4 = t2 ^ dest
1709     *        t5 = t3 ^ t4
1710     *        t6 = ~t5 & 0x110
1711     *        t7 = (t6 >> 2) | (t6 >> 3)
1712     *        return t3 - t7
1713     */
1714
1715    /*
1716     * t1 = (src + 0x066) + dest + X
1717     *    = result with some possible exceeding 0x6
1718     */
1719
1720    t0 = tcg_const_i32(0x066);
1721    tcg_gen_add_i32(t0, t0, src);
1722
1723    t1 = tcg_temp_new();
1724    tcg_gen_add_i32(t1, t0, dest);
1725    tcg_gen_add_i32(t1, t1, QREG_CC_X);
1726
1727    /* we will remove exceeding 0x6 where there is no carry */
1728
1729    /*
1730     * t0 = (src + 0x0066) ^ dest
1731     *    = t1 without carries
1732     */
1733
1734    tcg_gen_xor_i32(t0, t0, dest);
1735
1736    /*
1737     * extract the carries
1738     * t0 = t0 ^ t1
1739     *    = only the carries
1740     */
1741
1742    tcg_gen_xor_i32(t0, t0, t1);
1743
1744    /*
1745     * generate 0x1 where there is no carry
1746     * and for each 0x10, generate a 0x6
1747     */
1748
1749    tcg_gen_shri_i32(t0, t0, 3);
1750    tcg_gen_not_i32(t0, t0);
1751    tcg_gen_andi_i32(t0, t0, 0x22);
1752    tcg_gen_add_i32(dest, t0, t0);
1753    tcg_gen_add_i32(dest, dest, t0);
1754    tcg_temp_free(t0);
1755
1756    /*
1757     * remove the exceeding 0x6
1758     * for digits that have not generated a carry
1759     */
1760
1761    tcg_gen_sub_i32(dest, t1, dest);
1762    tcg_temp_free(t1);
1763}
1764
1765static void bcd_sub(TCGv dest, TCGv src)
1766{
1767    TCGv t0, t1, t2;
1768
1769    /*
1770     *  dest10 = dest10 - src10 - X
1771     *         = bcd_add(dest + 1 - X, 0x199 - src)
1772     */
1773
1774    /* t0 = 0x066 + (0x199 - src) */
1775
1776    t0 = tcg_temp_new();
1777    tcg_gen_subfi_i32(t0, 0x1ff, src);
1778
1779    /* t1 = t0 + dest + 1 - X*/
1780
1781    t1 = tcg_temp_new();
1782    tcg_gen_add_i32(t1, t0, dest);
1783    tcg_gen_addi_i32(t1, t1, 1);
1784    tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1785
1786    /* t2 = t0 ^ dest */
1787
1788    t2 = tcg_temp_new();
1789    tcg_gen_xor_i32(t2, t0, dest);
1790
1791    /* t0 = t1 ^ t2 */
1792
1793    tcg_gen_xor_i32(t0, t1, t2);
1794
1795    /*
1796     * t2 = ~t0 & 0x110
1797     * t0 = (t2 >> 2) | (t2 >> 3)
1798     *
1799     * to fit on 8bit operands, changed in:
1800     *
1801     * t2 = ~(t0 >> 3) & 0x22
1802     * t0 = t2 + t2
1803     * t0 = t0 + t2
1804     */
1805
1806    tcg_gen_shri_i32(t2, t0, 3);
1807    tcg_gen_not_i32(t2, t2);
1808    tcg_gen_andi_i32(t2, t2, 0x22);
1809    tcg_gen_add_i32(t0, t2, t2);
1810    tcg_gen_add_i32(t0, t0, t2);
1811    tcg_temp_free(t2);
1812
1813    /* return t1 - t0 */
1814
1815    tcg_gen_sub_i32(dest, t1, t0);
1816    tcg_temp_free(t0);
1817    tcg_temp_free(t1);
1818}
1819
1820static void bcd_flags(TCGv val)
1821{
1822    tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1823    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1824
1825    tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1826
1827    tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1828}
1829
1830DISAS_INSN(abcd_reg)
1831{
1832    TCGv src;
1833    TCGv dest;
1834
1835    gen_flush_flags(s); /* !Z is sticky */
1836
1837    src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1838    dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1839    bcd_add(dest, src);
1840    gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1841
1842    bcd_flags(dest);
1843}
1844
1845DISAS_INSN(abcd_mem)
1846{
1847    TCGv src, dest, addr;
1848
1849    gen_flush_flags(s); /* !Z is sticky */
1850
1851    /* Indirect pre-decrement load (mode 4) */
1852
1853    src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1854                      NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1855    dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1856                       NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1857
1858    bcd_add(dest, src);
1859
1860    gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1861                EA_STORE, IS_USER(s));
1862
1863    bcd_flags(dest);
1864}
1865
1866DISAS_INSN(sbcd_reg)
1867{
1868    TCGv src, dest;
1869
1870    gen_flush_flags(s); /* !Z is sticky */
1871
1872    src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1873    dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1874
1875    bcd_sub(dest, src);
1876
1877    gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1878
1879    bcd_flags(dest);
1880}
1881
1882DISAS_INSN(sbcd_mem)
1883{
1884    TCGv src, dest, addr;
1885
1886    gen_flush_flags(s); /* !Z is sticky */
1887
1888    /* Indirect pre-decrement load (mode 4) */
1889
1890    src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1891                      NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1892    dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1893                       NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1894
1895    bcd_sub(dest, src);
1896
1897    gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1898                EA_STORE, IS_USER(s));
1899
1900    bcd_flags(dest);
1901}
1902
1903DISAS_INSN(nbcd)
1904{
1905    TCGv src, dest;
1906    TCGv addr;
1907
1908    gen_flush_flags(s); /* !Z is sticky */
1909
1910    SRC_EA(env, src, OS_BYTE, 0, &addr);
1911
1912    dest = tcg_const_i32(0);
1913    bcd_sub(dest, src);
1914
1915    DEST_EA(env, insn, OS_BYTE, dest, &addr);
1916
1917    bcd_flags(dest);
1918
1919    tcg_temp_free(dest);
1920}
1921
1922DISAS_INSN(addsub)
1923{
1924    TCGv reg;
1925    TCGv dest;
1926    TCGv src;
1927    TCGv tmp;
1928    TCGv addr;
1929    int add;
1930    int opsize;
1931
1932    add = (insn & 0x4000) != 0;
1933    opsize = insn_opsize(insn);
1934    reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1935    dest = tcg_temp_new();
1936    if (insn & 0x100) {
1937        SRC_EA(env, tmp, opsize, 1, &addr);
1938        src = reg;
1939    } else {
1940        tmp = reg;
1941        SRC_EA(env, src, opsize, 1, NULL);
1942    }
1943    if (add) {
1944        tcg_gen_add_i32(dest, tmp, src);
1945        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1946        set_cc_op(s, CC_OP_ADDB + opsize);
1947    } else {
1948        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1949        tcg_gen_sub_i32(dest, tmp, src);
1950        set_cc_op(s, CC_OP_SUBB + opsize);
1951    }
1952    gen_update_cc_add(dest, src, opsize);
1953    if (insn & 0x100) {
1954        DEST_EA(env, insn, opsize, dest, &addr);
1955    } else {
1956        gen_partset_reg(opsize, DREG(insn, 9), dest);
1957    }
1958    tcg_temp_free(dest);
1959}
1960
1961/* Reverse the order of the bits in REG.  */
1962DISAS_INSN(bitrev)
1963{
1964    TCGv reg;
1965    reg = DREG(insn, 0);
1966    gen_helper_bitrev(reg, reg);
1967}
1968
1969DISAS_INSN(bitop_reg)
1970{
1971    int opsize;
1972    int op;
1973    TCGv src1;
1974    TCGv src2;
1975    TCGv tmp;
1976    TCGv addr;
1977    TCGv dest;
1978
1979    if ((insn & 0x38) != 0)
1980        opsize = OS_BYTE;
1981    else
1982        opsize = OS_LONG;
1983    op = (insn >> 6) & 3;
1984    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1985
1986    gen_flush_flags(s);
1987    src2 = tcg_temp_new();
1988    if (opsize == OS_BYTE)
1989        tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1990    else
1991        tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1992
1993    tmp = tcg_const_i32(1);
1994    tcg_gen_shl_i32(tmp, tmp, src2);
1995    tcg_temp_free(src2);
1996
1997    tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1998
1999    dest = tcg_temp_new();
2000    switch (op) {
2001    case 1: /* bchg */
2002        tcg_gen_xor_i32(dest, src1, tmp);
2003        break;
2004    case 2: /* bclr */
2005        tcg_gen_andc_i32(dest, src1, tmp);
2006        break;
2007    case 3: /* bset */
2008        tcg_gen_or_i32(dest, src1, tmp);
2009        break;
2010    default: /* btst */
2011        break;
2012    }
2013    tcg_temp_free(tmp);
2014    if (op) {
2015        DEST_EA(env, insn, opsize, dest, &addr);
2016    }
2017    tcg_temp_free(dest);
2018}
2019
2020DISAS_INSN(sats)
2021{
2022    TCGv reg;
2023    reg = DREG(insn, 0);
2024    gen_flush_flags(s);
2025    gen_helper_sats(reg, reg, QREG_CC_V);
2026    gen_logic_cc(s, reg, OS_LONG);
2027}
2028
2029static void gen_push(DisasContext *s, TCGv val)
2030{
2031    TCGv tmp;
2032
2033    tmp = tcg_temp_new();
2034    tcg_gen_subi_i32(tmp, QREG_SP, 4);
2035    gen_store(s, OS_LONG, tmp, val, IS_USER(s));
2036    tcg_gen_mov_i32(QREG_SP, tmp);
2037    tcg_temp_free(tmp);
2038}
2039
2040static TCGv mreg(int reg)
2041{
2042    if (reg < 8) {
2043        /* Dx */
2044        return cpu_dregs[reg];
2045    }
2046    /* Ax */
2047    return cpu_aregs[reg & 7];
2048}
2049
2050DISAS_INSN(movem)
2051{
2052    TCGv addr, incr, tmp, r[16];
2053    int is_load = (insn & 0x0400) != 0;
2054    int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
2055    uint16_t mask = read_im16(env, s);
2056    int mode = extract32(insn, 3, 3);
2057    int reg0 = REG(insn, 0);
2058    int i;
2059
2060    tmp = cpu_aregs[reg0];
2061
2062    switch (mode) {
2063    case 0: /* data register direct */
2064    case 1: /* addr register direct */
2065    do_addr_fault:
2066        gen_addr_fault(s);
2067        return;
2068
2069    case 2: /* indirect */
2070        break;
2071
2072    case 3: /* indirect post-increment */
2073        if (!is_load) {
2074            /* post-increment is not allowed */
2075            goto do_addr_fault;
2076        }
2077        break;
2078
2079    case 4: /* indirect pre-decrement */
2080        if (is_load) {
2081            /* pre-decrement is not allowed */
2082            goto do_addr_fault;
2083        }
2084        /*
2085         * We want a bare copy of the address reg, without any pre-decrement
2086         * adjustment, as gen_lea would provide.
2087         */
2088        break;
2089
2090    default:
2091        tmp = gen_lea_mode(env, s, mode, reg0, opsize);
2092        if (IS_NULL_QREG(tmp)) {
2093            goto do_addr_fault;
2094        }
2095        break;
2096    }
2097
2098    addr = tcg_temp_new();
2099    tcg_gen_mov_i32(addr, tmp);
2100    incr = tcg_const_i32(opsize_bytes(opsize));
2101
2102    if (is_load) {
2103        /* memory to register */
2104        for (i = 0; i < 16; i++) {
2105            if (mask & (1 << i)) {
2106                r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
2107                tcg_gen_add_i32(addr, addr, incr);
2108            }
2109        }
2110        for (i = 0; i < 16; i++) {
2111            if (mask & (1 << i)) {
2112                tcg_gen_mov_i32(mreg(i), r[i]);
2113                tcg_temp_free(r[i]);
2114            }
2115        }
2116        if (mode == 3) {
2117            /* post-increment: movem (An)+,X */
2118            tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2119        }
2120    } else {
2121        /* register to memory */
2122        if (mode == 4) {
2123            /* pre-decrement: movem X,-(An) */
2124            for (i = 15; i >= 0; i--) {
2125                if ((mask << i) & 0x8000) {
2126                    tcg_gen_sub_i32(addr, addr, incr);
2127                    if (reg0 + 8 == i &&
2128                        m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2129                        /*
2130                         * M68020+: if the addressing register is the
2131                         * register moved to memory, the value written
2132                         * is the initial value decremented by the size of
2133                         * the operation, regardless of how many actual
2134                         * stores have been performed until this point.
2135                         * M68000/M68010: the value is the initial value.
2136                         */
2137                        tmp = tcg_temp_new();
2138                        tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2139                        gen_store(s, opsize, addr, tmp, IS_USER(s));
2140                        tcg_temp_free(tmp);
2141                    } else {
2142                        gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2143                    }
2144                }
2145            }
2146            tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2147        } else {
2148            for (i = 0; i < 16; i++) {
2149                if (mask & (1 << i)) {
2150                    gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2151                    tcg_gen_add_i32(addr, addr, incr);
2152                }
2153            }
2154        }
2155    }
2156
2157    tcg_temp_free(incr);
2158    tcg_temp_free(addr);
2159}
2160
2161DISAS_INSN(movep)
2162{
2163    uint8_t i;
2164    int16_t displ;
2165    TCGv reg;
2166    TCGv addr;
2167    TCGv abuf;
2168    TCGv dbuf;
2169
2170    displ = read_im16(env, s);
2171
2172    addr = AREG(insn, 0);
2173    reg = DREG(insn, 9);
2174
2175    abuf = tcg_temp_new();
2176    tcg_gen_addi_i32(abuf, addr, displ);
2177    dbuf = tcg_temp_new();
2178
2179    if (insn & 0x40) {
2180        i = 4;
2181    } else {
2182        i = 2;
2183    }
2184
2185    if (insn & 0x80) {
2186        for ( ; i > 0 ; i--) {
2187            tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2188            tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
2189            if (i > 1) {
2190                tcg_gen_addi_i32(abuf, abuf, 2);
2191            }
2192        }
2193    } else {
2194        for ( ; i > 0 ; i--) {
2195            tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
2196            tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2197            if (i > 1) {
2198                tcg_gen_addi_i32(abuf, abuf, 2);
2199            }
2200        }
2201    }
2202    tcg_temp_free(abuf);
2203    tcg_temp_free(dbuf);
2204}
2205
2206DISAS_INSN(bitop_im)
2207{
2208    int opsize;
2209    int op;
2210    TCGv src1;
2211    uint32_t mask;
2212    int bitnum;
2213    TCGv tmp;
2214    TCGv addr;
2215
2216    if ((insn & 0x38) != 0)
2217        opsize = OS_BYTE;
2218    else
2219        opsize = OS_LONG;
2220    op = (insn >> 6) & 3;
2221
2222    bitnum = read_im16(env, s);
2223    if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2224        if (bitnum & 0xfe00) {
2225            disas_undef(env, s, insn);
2226            return;
2227        }
2228    } else {
2229        if (bitnum & 0xff00) {
2230            disas_undef(env, s, insn);
2231            return;
2232        }
2233    }
2234
2235    SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2236
2237    gen_flush_flags(s);
2238    if (opsize == OS_BYTE)
2239        bitnum &= 7;
2240    else
2241        bitnum &= 31;
2242    mask = 1 << bitnum;
2243
2244   tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2245
2246    if (op) {
2247        tmp = tcg_temp_new();
2248        switch (op) {
2249        case 1: /* bchg */
2250            tcg_gen_xori_i32(tmp, src1, mask);
2251            break;
2252        case 2: /* bclr */
2253            tcg_gen_andi_i32(tmp, src1, ~mask);
2254            break;
2255        case 3: /* bset */
2256            tcg_gen_ori_i32(tmp, src1, mask);
2257            break;
2258        default: /* btst */
2259            break;
2260        }
2261        DEST_EA(env, insn, opsize, tmp, &addr);
2262        tcg_temp_free(tmp);
2263    }
2264}
2265
2266static TCGv gen_get_ccr(DisasContext *s)
2267{
2268    TCGv dest;
2269
2270    update_cc_op(s);
2271    dest = tcg_temp_new();
2272    gen_helper_get_ccr(dest, cpu_env);
2273    return dest;
2274}
2275
2276static TCGv gen_get_sr(DisasContext *s)
2277{
2278    TCGv ccr;
2279    TCGv sr;
2280
2281    ccr = gen_get_ccr(s);
2282    sr = tcg_temp_new();
2283    tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2284    tcg_gen_or_i32(sr, sr, ccr);
2285    tcg_temp_free(ccr);
2286    return sr;
2287}
2288
2289static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2290{
2291    if (ccr_only) {
2292        tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2293        tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2294        tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2295        tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2296        tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2297    } else {
2298        TCGv sr = tcg_const_i32(val);
2299        gen_helper_set_sr(cpu_env, sr);
2300        tcg_temp_free(sr);
2301    }
2302    set_cc_op(s, CC_OP_FLAGS);
2303}
2304
2305static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2306{
2307    if (ccr_only) {
2308        gen_helper_set_ccr(cpu_env, val);
2309    } else {
2310        gen_helper_set_sr(cpu_env, val);
2311    }
2312    set_cc_op(s, CC_OP_FLAGS);
2313}
2314
2315static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2316                           bool ccr_only)
2317{
2318    if ((insn & 0x3f) == 0x3c) {
2319        uint16_t val;
2320        val = read_im16(env, s);
2321        gen_set_sr_im(s, val, ccr_only);
2322    } else {
2323        TCGv src;
2324        SRC_EA(env, src, OS_WORD, 0, NULL);
2325        gen_set_sr(s, src, ccr_only);
2326    }
2327}
2328
2329DISAS_INSN(arith_im)
2330{
2331    int op;
2332    TCGv im;
2333    TCGv src1;
2334    TCGv dest;
2335    TCGv addr;
2336    int opsize;
2337    bool with_SR = ((insn & 0x3f) == 0x3c);
2338
2339    op = (insn >> 9) & 7;
2340    opsize = insn_opsize(insn);
2341    switch (opsize) {
2342    case OS_BYTE:
2343        im = tcg_const_i32((int8_t)read_im8(env, s));
2344        break;
2345    case OS_WORD:
2346        im = tcg_const_i32((int16_t)read_im16(env, s));
2347        break;
2348    case OS_LONG:
2349        im = tcg_const_i32(read_im32(env, s));
2350        break;
2351    default:
2352        g_assert_not_reached();
2353    }
2354
2355    if (with_SR) {
2356        /* SR/CCR can only be used with andi/eori/ori */
2357        if (op == 2 || op == 3 || op == 6) {
2358            disas_undef(env, s, insn);
2359            return;
2360        }
2361        switch (opsize) {
2362        case OS_BYTE:
2363            src1 = gen_get_ccr(s);
2364            break;
2365        case OS_WORD:
2366            if (IS_USER(s)) {
2367                gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2368                return;
2369            }
2370            src1 = gen_get_sr(s);
2371            break;
2372        default:
2373            /* OS_LONG; others already g_assert_not_reached.  */
2374            disas_undef(env, s, insn);
2375            return;
2376        }
2377    } else {
2378        SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2379    }
2380    dest = tcg_temp_new();
2381    switch (op) {
2382    case 0: /* ori */
2383        tcg_gen_or_i32(dest, src1, im);
2384        if (with_SR) {
2385            gen_set_sr(s, dest, opsize == OS_BYTE);
2386        } else {
2387            DEST_EA(env, insn, opsize, dest, &addr);
2388            gen_logic_cc(s, dest, opsize);
2389        }
2390        break;
2391    case 1: /* andi */
2392        tcg_gen_and_i32(dest, src1, im);
2393        if (with_SR) {
2394            gen_set_sr(s, dest, opsize == OS_BYTE);
2395        } else {
2396            DEST_EA(env, insn, opsize, dest, &addr);
2397            gen_logic_cc(s, dest, opsize);
2398        }
2399        break;
2400    case 2: /* subi */
2401        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2402        tcg_gen_sub_i32(dest, src1, im);
2403        gen_update_cc_add(dest, im, opsize);
2404        set_cc_op(s, CC_OP_SUBB + opsize);
2405        DEST_EA(env, insn, opsize, dest, &addr);
2406        break;
2407    case 3: /* addi */
2408        tcg_gen_add_i32(dest, src1, im);
2409        gen_update_cc_add(dest, im, opsize);
2410        tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2411        set_cc_op(s, CC_OP_ADDB + opsize);
2412        DEST_EA(env, insn, opsize, dest, &addr);
2413        break;
2414    case 5: /* eori */
2415        tcg_gen_xor_i32(dest, src1, im);
2416        if (with_SR) {
2417            gen_set_sr(s, dest, opsize == OS_BYTE);
2418        } else {
2419            DEST_EA(env, insn, opsize, dest, &addr);
2420            gen_logic_cc(s, dest, opsize);
2421        }
2422        break;
2423    case 6: /* cmpi */
2424        gen_update_cc_cmp(s, src1, im, opsize);
2425        break;
2426    default:
2427        abort();
2428    }
2429    tcg_temp_free(im);
2430    tcg_temp_free(dest);
2431}
2432
2433DISAS_INSN(cas)
2434{
2435    int opsize;
2436    TCGv addr;
2437    uint16_t ext;
2438    TCGv load;
2439    TCGv cmp;
2440    MemOp opc;
2441
2442    switch ((insn >> 9) & 3) {
2443    case 1:
2444        opsize = OS_BYTE;
2445        opc = MO_SB;
2446        break;
2447    case 2:
2448        opsize = OS_WORD;
2449        opc = MO_TESW;
2450        break;
2451    case 3:
2452        opsize = OS_LONG;
2453        opc = MO_TESL;
2454        break;
2455    default:
2456        g_assert_not_reached();
2457    }
2458
2459    ext = read_im16(env, s);
2460
2461    /* cas Dc,Du,<EA> */
2462
2463    addr = gen_lea(env, s, insn, opsize);
2464    if (IS_NULL_QREG(addr)) {
2465        gen_addr_fault(s);
2466        return;
2467    }
2468
2469    cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2470
2471    /*
2472     * if  <EA> == Dc then
2473     *     <EA> = Du
2474     *     Dc = <EA> (because <EA> == Dc)
2475     * else
2476     *     Dc = <EA>
2477     */
2478
2479    load = tcg_temp_new();
2480    tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2481                               IS_USER(s), opc);
2482    /* update flags before setting cmp to load */
2483    gen_update_cc_cmp(s, load, cmp, opsize);
2484    gen_partset_reg(opsize, DREG(ext, 0), load);
2485
2486    tcg_temp_free(load);
2487
2488    switch (extract32(insn, 3, 3)) {
2489    case 3: /* Indirect postincrement.  */
2490        tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2491        break;
2492    case 4: /* Indirect predecrememnt.  */
2493        tcg_gen_mov_i32(AREG(insn, 0), addr);
2494        break;
2495    }
2496}
2497
2498DISAS_INSN(cas2w)
2499{
2500    uint16_t ext1, ext2;
2501    TCGv addr1, addr2;
2502    TCGv regs;
2503
2504    /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2505
2506    ext1 = read_im16(env, s);
2507
2508    if (ext1 & 0x8000) {
2509        /* Address Register */
2510        addr1 = AREG(ext1, 12);
2511    } else {
2512        /* Data Register */
2513        addr1 = DREG(ext1, 12);
2514    }
2515
2516    ext2 = read_im16(env, s);
2517    if (ext2 & 0x8000) {
2518        /* Address Register */
2519        addr2 = AREG(ext2, 12);
2520    } else {
2521        /* Data Register */
2522        addr2 = DREG(ext2, 12);
2523    }
2524
2525    /*
2526     * if (R1) == Dc1 && (R2) == Dc2 then
2527     *     (R1) = Du1
2528     *     (R2) = Du2
2529     * else
2530     *     Dc1 = (R1)
2531     *     Dc2 = (R2)
2532     */
2533
2534    regs = tcg_const_i32(REG(ext2, 6) |
2535                         (REG(ext1, 6) << 3) |
2536                         (REG(ext2, 0) << 6) |
2537                         (REG(ext1, 0) << 9));
2538    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2539        gen_helper_exit_atomic(cpu_env);
2540    } else {
2541        gen_helper_cas2w(cpu_env, regs, addr1, addr2);
2542    }
2543    tcg_temp_free(regs);
2544
2545    /* Note that cas2w also assigned to env->cc_op.  */
2546    s->cc_op = CC_OP_CMPW;
2547    s->cc_op_synced = 1;
2548}
2549
2550DISAS_INSN(cas2l)
2551{
2552    uint16_t ext1, ext2;
2553    TCGv addr1, addr2, regs;
2554
2555    /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2556
2557    ext1 = read_im16(env, s);
2558
2559    if (ext1 & 0x8000) {
2560        /* Address Register */
2561        addr1 = AREG(ext1, 12);
2562    } else {
2563        /* Data Register */
2564        addr1 = DREG(ext1, 12);
2565    }
2566
2567    ext2 = read_im16(env, s);
2568    if (ext2 & 0x8000) {
2569        /* Address Register */
2570        addr2 = AREG(ext2, 12);
2571    } else {
2572        /* Data Register */
2573        addr2 = DREG(ext2, 12);
2574    }
2575
2576    /*
2577     * if (R1) == Dc1 && (R2) == Dc2 then
2578     *     (R1) = Du1
2579     *     (R2) = Du2
2580     * else
2581     *     Dc1 = (R1)
2582     *     Dc2 = (R2)
2583     */
2584
2585    regs = tcg_const_i32(REG(ext2, 6) |
2586                         (REG(ext1, 6) << 3) |
2587                         (REG(ext2, 0) << 6) |
2588                         (REG(ext1, 0) << 9));
2589    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2590        gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
2591    } else {
2592        gen_helper_cas2l(cpu_env, regs, addr1, addr2);
2593    }
2594    tcg_temp_free(regs);
2595
2596    /* Note that cas2l also assigned to env->cc_op.  */
2597    s->cc_op = CC_OP_CMPL;
2598    s->cc_op_synced = 1;
2599}
2600
2601DISAS_INSN(byterev)
2602{
2603    TCGv reg;
2604
2605    reg = DREG(insn, 0);
2606    tcg_gen_bswap32_i32(reg, reg);
2607}
2608
2609DISAS_INSN(move)
2610{
2611    TCGv src;
2612    TCGv dest;
2613    int op;
2614    int opsize;
2615
2616    switch (insn >> 12) {
2617    case 1: /* move.b */
2618        opsize = OS_BYTE;
2619        break;
2620    case 2: /* move.l */
2621        opsize = OS_LONG;
2622        break;
2623    case 3: /* move.w */
2624        opsize = OS_WORD;
2625        break;
2626    default:
2627        abort();
2628    }
2629    SRC_EA(env, src, opsize, 1, NULL);
2630    op = (insn >> 6) & 7;
2631    if (op == 1) {
2632        /* movea */
2633        /* The value will already have been sign extended.  */
2634        dest = AREG(insn, 9);
2635        tcg_gen_mov_i32(dest, src);
2636    } else {
2637        /* normal move */
2638        uint16_t dest_ea;
2639        dest_ea = ((insn >> 9) & 7) | (op << 3);
2640        DEST_EA(env, dest_ea, opsize, src, NULL);
2641        /* This will be correct because loads sign extend.  */
2642        gen_logic_cc(s, src, opsize);
2643    }
2644}
2645
2646DISAS_INSN(negx)
2647{
2648    TCGv z;
2649    TCGv src;
2650    TCGv addr;
2651    int opsize;
2652
2653    opsize = insn_opsize(insn);
2654    SRC_EA(env, src, opsize, 1, &addr);
2655
2656    gen_flush_flags(s); /* compute old Z */
2657
2658    /*
2659     * Perform subtract with borrow.
2660     * (X, N) =  -(src + X);
2661     */
2662
2663    z = tcg_const_i32(0);
2664    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2665    tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2666    tcg_temp_free(z);
2667    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2668
2669    tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2670
2671    /*
2672     * Compute signed-overflow for negation.  The normal formula for
2673     * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2674     * this simplifies to res & src.
2675     */
2676
2677    tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2678
2679    /* Copy the rest of the results into place.  */
2680    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2681    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2682
2683    set_cc_op(s, CC_OP_FLAGS);
2684
2685    /* result is in QREG_CC_N */
2686
2687    DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2688}
2689
2690DISAS_INSN(lea)
2691{
2692    TCGv reg;
2693    TCGv tmp;
2694
2695    reg = AREG(insn, 9);
2696    tmp = gen_lea(env, s, insn, OS_LONG);
2697    if (IS_NULL_QREG(tmp)) {
2698        gen_addr_fault(s);
2699        return;
2700    }
2701    tcg_gen_mov_i32(reg, tmp);
2702}
2703
2704DISAS_INSN(clr)
2705{
2706    int opsize;
2707    TCGv zero;
2708
2709    zero = tcg_const_i32(0);
2710
2711    opsize = insn_opsize(insn);
2712    DEST_EA(env, insn, opsize, zero, NULL);
2713    gen_logic_cc(s, zero, opsize);
2714    tcg_temp_free(zero);
2715}
2716
2717DISAS_INSN(move_from_ccr)
2718{
2719    TCGv ccr;
2720
2721    ccr = gen_get_ccr(s);
2722    DEST_EA(env, insn, OS_WORD, ccr, NULL);
2723}
2724
2725DISAS_INSN(neg)
2726{
2727    TCGv src1;
2728    TCGv dest;
2729    TCGv addr;
2730    int opsize;
2731
2732    opsize = insn_opsize(insn);
2733    SRC_EA(env, src1, opsize, 1, &addr);
2734    dest = tcg_temp_new();
2735    tcg_gen_neg_i32(dest, src1);
2736    set_cc_op(s, CC_OP_SUBB + opsize);
2737    gen_update_cc_add(dest, src1, opsize);
2738    tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2739    DEST_EA(env, insn, opsize, dest, &addr);
2740    tcg_temp_free(dest);
2741}
2742
2743DISAS_INSN(move_to_ccr)
2744{
2745    gen_move_to_sr(env, s, insn, true);
2746}
2747
2748DISAS_INSN(not)
2749{
2750    TCGv src1;
2751    TCGv dest;
2752    TCGv addr;
2753    int opsize;
2754
2755    opsize = insn_opsize(insn);
2756    SRC_EA(env, src1, opsize, 1, &addr);
2757    dest = tcg_temp_new();
2758    tcg_gen_not_i32(dest, src1);
2759    DEST_EA(env, insn, opsize, dest, &addr);
2760    gen_logic_cc(s, dest, opsize);
2761}
2762
2763DISAS_INSN(swap)
2764{
2765    TCGv src1;
2766    TCGv src2;
2767    TCGv reg;
2768
2769    src1 = tcg_temp_new();
2770    src2 = tcg_temp_new();
2771    reg = DREG(insn, 0);
2772    tcg_gen_shli_i32(src1, reg, 16);
2773    tcg_gen_shri_i32(src2, reg, 16);
2774    tcg_gen_or_i32(reg, src1, src2);
2775    tcg_temp_free(src2);
2776    tcg_temp_free(src1);
2777    gen_logic_cc(s, reg, OS_LONG);
2778}
2779
2780DISAS_INSN(bkpt)
2781{
2782    gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2783}
2784
2785DISAS_INSN(pea)
2786{
2787    TCGv tmp;
2788
2789    tmp = gen_lea(env, s, insn, OS_LONG);
2790    if (IS_NULL_QREG(tmp)) {
2791        gen_addr_fault(s);
2792        return;
2793    }
2794    gen_push(s, tmp);
2795}
2796
2797DISAS_INSN(ext)
2798{
2799    int op;
2800    TCGv reg;
2801    TCGv tmp;
2802
2803    reg = DREG(insn, 0);
2804    op = (insn >> 6) & 7;
2805    tmp = tcg_temp_new();
2806    if (op == 3)
2807        tcg_gen_ext16s_i32(tmp, reg);
2808    else
2809        tcg_gen_ext8s_i32(tmp, reg);
2810    if (op == 2)
2811        gen_partset_reg(OS_WORD, reg, tmp);
2812    else
2813        tcg_gen_mov_i32(reg, tmp);
2814    gen_logic_cc(s, tmp, OS_LONG);
2815    tcg_temp_free(tmp);
2816}
2817
2818DISAS_INSN(tst)
2819{
2820    int opsize;
2821    TCGv tmp;
2822
2823    opsize = insn_opsize(insn);
2824    SRC_EA(env, tmp, opsize, 1, NULL);
2825    gen_logic_cc(s, tmp, opsize);
2826}
2827
2828DISAS_INSN(pulse)
2829{
2830  /* Implemented as a NOP.  */
2831}
2832
2833DISAS_INSN(illegal)
2834{
2835    gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2836}
2837
2838/* ??? This should be atomic.  */
2839DISAS_INSN(tas)
2840{
2841    TCGv dest;
2842    TCGv src1;
2843    TCGv addr;
2844
2845    dest = tcg_temp_new();
2846    SRC_EA(env, src1, OS_BYTE, 1, &addr);
2847    gen_logic_cc(s, src1, OS_BYTE);
2848    tcg_gen_ori_i32(dest, src1, 0x80);
2849    DEST_EA(env, insn, OS_BYTE, dest, &addr);
2850    tcg_temp_free(dest);
2851}
2852
2853DISAS_INSN(mull)
2854{
2855    uint16_t ext;
2856    TCGv src1;
2857    int sign;
2858
2859    ext = read_im16(env, s);
2860
2861    sign = ext & 0x800;
2862
2863    if (ext & 0x400) {
2864        if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2865            gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2866            return;
2867        }
2868
2869        SRC_EA(env, src1, OS_LONG, 0, NULL);
2870
2871        if (sign) {
2872            tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2873        } else {
2874            tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2875        }
2876        /* if Dl == Dh, 68040 returns low word */
2877        tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2878        tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2879        tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2880
2881        tcg_gen_movi_i32(QREG_CC_V, 0);
2882        tcg_gen_movi_i32(QREG_CC_C, 0);
2883
2884        set_cc_op(s, CC_OP_FLAGS);
2885        return;
2886    }
2887    SRC_EA(env, src1, OS_LONG, 0, NULL);
2888    if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
2889        tcg_gen_movi_i32(QREG_CC_C, 0);
2890        if (sign) {
2891            tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2892            /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2893            tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2894            tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
2895        } else {
2896            tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2897            /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2898            tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
2899        }
2900        tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
2901        tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2902
2903        tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2904
2905        set_cc_op(s, CC_OP_FLAGS);
2906    } else {
2907        /*
2908         * The upper 32 bits of the product are discarded, so
2909         * muls.l and mulu.l are functionally equivalent.
2910         */
2911        tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2912        gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2913    }
2914}
2915
2916static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2917{
2918    TCGv reg;
2919    TCGv tmp;
2920
2921    reg = AREG(insn, 0);
2922    tmp = tcg_temp_new();
2923    tcg_gen_subi_i32(tmp, QREG_SP, 4);
2924    gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2925    if ((insn & 7) != 7) {
2926        tcg_gen_mov_i32(reg, tmp);
2927    }
2928    tcg_gen_addi_i32(QREG_SP, tmp, offset);
2929    tcg_temp_free(tmp);
2930}
2931
2932DISAS_INSN(link)
2933{
2934    int16_t offset;
2935
2936    offset = read_im16(env, s);
2937    gen_link(s, insn, offset);
2938}
2939
2940DISAS_INSN(linkl)
2941{
2942    int32_t offset;
2943
2944    offset = read_im32(env, s);
2945    gen_link(s, insn, offset);
2946}
2947
2948DISAS_INSN(unlk)
2949{
2950    TCGv src;
2951    TCGv reg;
2952    TCGv tmp;
2953
2954    src = tcg_temp_new();
2955    reg = AREG(insn, 0);
2956    tcg_gen_mov_i32(src, reg);
2957    tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2958    tcg_gen_mov_i32(reg, tmp);
2959    tcg_gen_addi_i32(QREG_SP, src, 4);
2960    tcg_temp_free(src);
2961    tcg_temp_free(tmp);
2962}
2963
2964#if defined(CONFIG_SOFTMMU)
2965DISAS_INSN(reset)
2966{
2967    if (IS_USER(s)) {
2968        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2969        return;
2970    }
2971
2972    gen_helper_reset(cpu_env);
2973}
2974#endif
2975
2976DISAS_INSN(nop)
2977{
2978}
2979
2980DISAS_INSN(rtd)
2981{
2982    TCGv tmp;
2983    int16_t offset = read_im16(env, s);
2984
2985    tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2986    tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2987    gen_jmp(s, tmp);
2988}
2989
2990DISAS_INSN(rtr)
2991{
2992    TCGv tmp;
2993    TCGv ccr;
2994    TCGv sp;
2995
2996    sp = tcg_temp_new();
2997    ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2998    tcg_gen_addi_i32(sp, QREG_SP, 2);
2999    tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
3000    tcg_gen_addi_i32(QREG_SP, sp, 4);
3001    tcg_temp_free(sp);
3002
3003    gen_set_sr(s, ccr, true);
3004    tcg_temp_free(ccr);
3005
3006    gen_jmp(s, tmp);
3007}
3008
3009DISAS_INSN(rts)
3010{
3011    TCGv tmp;
3012
3013    tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
3014    tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
3015    gen_jmp(s, tmp);
3016}
3017
3018DISAS_INSN(jump)
3019{
3020    TCGv tmp;
3021
3022    /*
3023     * Load the target address first to ensure correct exception
3024     * behavior.
3025     */
3026    tmp = gen_lea(env, s, insn, OS_LONG);
3027    if (IS_NULL_QREG(tmp)) {
3028        gen_addr_fault(s);
3029        return;
3030    }
3031    if ((insn & 0x40) == 0) {
3032        /* jsr */
3033        gen_push(s, tcg_const_i32(s->pc));
3034    }
3035    gen_jmp(s, tmp);
3036}
3037
3038DISAS_INSN(addsubq)
3039{
3040    TCGv src;
3041    TCGv dest;
3042    TCGv val;
3043    int imm;
3044    TCGv addr;
3045    int opsize;
3046
3047    if ((insn & 070) == 010) {
3048        /* Operation on address register is always long.  */
3049        opsize = OS_LONG;
3050    } else {
3051        opsize = insn_opsize(insn);
3052    }
3053    SRC_EA(env, src, opsize, 1, &addr);
3054    imm = (insn >> 9) & 7;
3055    if (imm == 0) {
3056        imm = 8;
3057    }
3058    val = tcg_const_i32(imm);
3059    dest = tcg_temp_new();
3060    tcg_gen_mov_i32(dest, src);
3061    if ((insn & 0x38) == 0x08) {
3062        /*
3063         * Don't update condition codes if the destination is an
3064         * address register.
3065         */
3066        if (insn & 0x0100) {
3067            tcg_gen_sub_i32(dest, dest, val);
3068        } else {
3069            tcg_gen_add_i32(dest, dest, val);
3070        }
3071    } else {
3072        if (insn & 0x0100) {
3073            tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
3074            tcg_gen_sub_i32(dest, dest, val);
3075            set_cc_op(s, CC_OP_SUBB + opsize);
3076        } else {
3077            tcg_gen_add_i32(dest, dest, val);
3078            tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
3079            set_cc_op(s, CC_OP_ADDB + opsize);
3080        }
3081        gen_update_cc_add(dest, val, opsize);
3082    }
3083    tcg_temp_free(val);
3084    DEST_EA(env, insn, opsize, dest, &addr);
3085    tcg_temp_free(dest);
3086}
3087
3088DISAS_INSN(tpf)
3089{
3090    switch (insn & 7) {
3091    case 2: /* One extension word.  */
3092        s->pc += 2;
3093        break;
3094    case 3: /* Two extension words.  */
3095        s->pc += 4;
3096        break;
3097    case 4: /* No extension words.  */
3098        break;
3099    default:
3100        disas_undef(env, s, insn);
3101    }
3102}
3103
3104DISAS_INSN(branch)
3105{
3106    int32_t offset;
3107    uint32_t base;
3108    int op;
3109
3110    base = s->pc;
3111    op = (insn >> 8) & 0xf;
3112    offset = (int8_t)insn;
3113    if (offset == 0) {
3114        offset = (int16_t)read_im16(env, s);
3115    } else if (offset == -1) {
3116        offset = read_im32(env, s);
3117    }
3118    if (op == 1) {
3119        /* bsr */
3120        gen_push(s, tcg_const_i32(s->pc));
3121    }
3122    if (op > 1) {
3123        /* Bcc */
3124        TCGLabel *l1 = gen_new_label();
3125        gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3126        gen_jmp_tb(s, 1, base + offset);
3127        gen_set_label(l1);
3128        gen_jmp_tb(s, 0, s->pc);
3129    } else {
3130        /* Unconditional branch.  */
3131        update_cc_op(s);
3132        gen_jmp_tb(s, 0, base + offset);
3133    }
3134}
3135
3136DISAS_INSN(moveq)
3137{
3138    tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3139    gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3140}
3141
3142DISAS_INSN(mvzs)
3143{
3144    int opsize;
3145    TCGv src;
3146    TCGv reg;
3147
3148    if (insn & 0x40)
3149        opsize = OS_WORD;
3150    else
3151        opsize = OS_BYTE;
3152    SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3153    reg = DREG(insn, 9);
3154    tcg_gen_mov_i32(reg, src);
3155    gen_logic_cc(s, src, opsize);
3156}
3157
3158DISAS_INSN(or)
3159{
3160    TCGv reg;
3161    TCGv dest;
3162    TCGv src;
3163    TCGv addr;
3164    int opsize;
3165
3166    opsize = insn_opsize(insn);
3167    reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3168    dest = tcg_temp_new();
3169    if (insn & 0x100) {
3170        SRC_EA(env, src, opsize, 0, &addr);
3171        tcg_gen_or_i32(dest, src, reg);
3172        DEST_EA(env, insn, opsize, dest, &addr);
3173    } else {
3174        SRC_EA(env, src, opsize, 0, NULL);
3175        tcg_gen_or_i32(dest, src, reg);
3176        gen_partset_reg(opsize, DREG(insn, 9), dest);
3177    }
3178    gen_logic_cc(s, dest, opsize);
3179    tcg_temp_free(dest);
3180}
3181
3182DISAS_INSN(suba)
3183{
3184    TCGv src;
3185    TCGv reg;
3186
3187    SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3188    reg = AREG(insn, 9);
3189    tcg_gen_sub_i32(reg, reg, src);
3190}
3191
3192static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3193{
3194    TCGv tmp;
3195
3196    gen_flush_flags(s); /* compute old Z */
3197
3198    /*
3199     * Perform subtract with borrow.
3200     * (X, N) = dest - (src + X);
3201     */
3202
3203    tmp = tcg_const_i32(0);
3204    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
3205    tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
3206    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3207    tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3208
3209    /* Compute signed-overflow for subtract.  */
3210
3211    tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3212    tcg_gen_xor_i32(tmp, dest, src);
3213    tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3214    tcg_temp_free(tmp);
3215
3216    /* Copy the rest of the results into place.  */
3217    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3218    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3219
3220    set_cc_op(s, CC_OP_FLAGS);
3221
3222    /* result is in QREG_CC_N */
3223}
3224
3225DISAS_INSN(subx_reg)
3226{
3227    TCGv dest;
3228    TCGv src;
3229    int opsize;
3230
3231    opsize = insn_opsize(insn);
3232
3233    src = gen_extend(s, DREG(insn, 0), opsize, 1);
3234    dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3235
3236    gen_subx(s, src, dest, opsize);
3237
3238    gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3239}
3240
3241DISAS_INSN(subx_mem)
3242{
3243    TCGv src;
3244    TCGv addr_src;
3245    TCGv dest;
3246    TCGv addr_dest;
3247    int opsize;
3248
3249    opsize = insn_opsize(insn);
3250
3251    addr_src = AREG(insn, 0);
3252    tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3253    src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3254
3255    addr_dest = AREG(insn, 9);
3256    tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3257    dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3258
3259    gen_subx(s, src, dest, opsize);
3260
3261    gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3262
3263    tcg_temp_free(dest);
3264    tcg_temp_free(src);
3265}
3266
3267DISAS_INSN(mov3q)
3268{
3269    TCGv src;
3270    int val;
3271
3272    val = (insn >> 9) & 7;
3273    if (val == 0)
3274        val = -1;
3275    src = tcg_const_i32(val);
3276    gen_logic_cc(s, src, OS_LONG);
3277    DEST_EA(env, insn, OS_LONG, src, NULL);
3278    tcg_temp_free(src);
3279}
3280
3281DISAS_INSN(cmp)
3282{
3283    TCGv src;
3284    TCGv reg;
3285    int opsize;
3286
3287    opsize = insn_opsize(insn);
3288    SRC_EA(env, src, opsize, 1, NULL);
3289    reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3290    gen_update_cc_cmp(s, reg, src, opsize);
3291}
3292
3293DISAS_INSN(cmpa)
3294{
3295    int opsize;
3296    TCGv src;
3297    TCGv reg;
3298
3299    if (insn & 0x100) {
3300        opsize = OS_LONG;
3301    } else {
3302        opsize = OS_WORD;
3303    }
3304    SRC_EA(env, src, opsize, 1, NULL);
3305    reg = AREG(insn, 9);
3306    gen_update_cc_cmp(s, reg, src, OS_LONG);
3307}
3308
3309DISAS_INSN(cmpm)
3310{
3311    int opsize = insn_opsize(insn);
3312    TCGv src, dst;
3313
3314    /* Post-increment load (mode 3) from Ay.  */
3315    src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3316                      NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3317    /* Post-increment load (mode 3) from Ax.  */
3318    dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3319                      NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3320
3321    gen_update_cc_cmp(s, dst, src, opsize);
3322}
3323
3324DISAS_INSN(eor)
3325{
3326    TCGv src;
3327    TCGv dest;
3328    TCGv addr;
3329    int opsize;
3330
3331    opsize = insn_opsize(insn);
3332
3333    SRC_EA(env, src, opsize, 0, &addr);
3334    dest = tcg_temp_new();
3335    tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3336    gen_logic_cc(s, dest, opsize);
3337    DEST_EA(env, insn, opsize, dest, &addr);
3338    tcg_temp_free(dest);
3339}
3340
3341static void do_exg(TCGv reg1, TCGv reg2)
3342{
3343    TCGv temp = tcg_temp_new();
3344    tcg_gen_mov_i32(temp, reg1);
3345    tcg_gen_mov_i32(reg1, reg2);
3346    tcg_gen_mov_i32(reg2, temp);
3347    tcg_temp_free(temp);
3348}
3349
3350DISAS_INSN(exg_dd)
3351{
3352    /* exchange Dx and Dy */
3353    do_exg(DREG(insn, 9), DREG(insn, 0));
3354}
3355
3356DISAS_INSN(exg_aa)
3357{
3358    /* exchange Ax and Ay */
3359    do_exg(AREG(insn, 9), AREG(insn, 0));
3360}
3361
3362DISAS_INSN(exg_da)
3363{
3364    /* exchange Dx and Ay */
3365    do_exg(DREG(insn, 9), AREG(insn, 0));
3366}
3367
3368DISAS_INSN(and)
3369{
3370    TCGv src;
3371    TCGv reg;
3372    TCGv dest;
3373    TCGv addr;
3374    int opsize;
3375
3376    dest = tcg_temp_new();
3377
3378    opsize = insn_opsize(insn);
3379    reg = DREG(insn, 9);
3380    if (insn & 0x100) {
3381        SRC_EA(env, src, opsize, 0, &addr);
3382        tcg_gen_and_i32(dest, src, reg);
3383        DEST_EA(env, insn, opsize, dest, &addr);
3384    } else {
3385        SRC_EA(env, src, opsize, 0, NULL);
3386        tcg_gen_and_i32(dest, src, reg);
3387        gen_partset_reg(opsize, reg, dest);
3388    }
3389    gen_logic_cc(s, dest, opsize);
3390    tcg_temp_free(dest);
3391}
3392
3393DISAS_INSN(adda)
3394{
3395    TCGv src;
3396    TCGv reg;
3397
3398    SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3399    reg = AREG(insn, 9);
3400    tcg_gen_add_i32(reg, reg, src);
3401}
3402
3403static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3404{
3405    TCGv tmp;
3406
3407    gen_flush_flags(s); /* compute old Z */
3408
3409    /*
3410     * Perform addition with carry.
3411     * (X, N) = src + dest + X;
3412     */
3413
3414    tmp = tcg_const_i32(0);
3415    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
3416    tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
3417    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3418
3419    /* Compute signed-overflow for addition.  */
3420
3421    tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3422    tcg_gen_xor_i32(tmp, dest, src);
3423    tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3424    tcg_temp_free(tmp);
3425
3426    /* Copy the rest of the results into place.  */
3427    tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3428    tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3429
3430    set_cc_op(s, CC_OP_FLAGS);
3431
3432    /* result is in QREG_CC_N */
3433}
3434
3435DISAS_INSN(addx_reg)
3436{
3437    TCGv dest;
3438    TCGv src;
3439    int opsize;
3440
3441    opsize = insn_opsize(insn);
3442
3443    dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3444    src = gen_extend(s, DREG(insn, 0), opsize, 1);
3445
3446    gen_addx(s, src, dest, opsize);
3447
3448    gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3449}
3450
3451DISAS_INSN(addx_mem)
3452{
3453    TCGv src;
3454    TCGv addr_src;
3455    TCGv dest;
3456    TCGv addr_dest;
3457    int opsize;
3458
3459    opsize = insn_opsize(insn);
3460
3461    addr_src = AREG(insn, 0);
3462    tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3463    src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3464
3465    addr_dest = AREG(insn, 9);
3466    tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3467    dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3468
3469    gen_addx(s, src, dest, opsize);
3470
3471    gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3472
3473    tcg_temp_free(dest);
3474    tcg_temp_free(src);
3475}
3476
3477static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3478{
3479    int count = (insn >> 9) & 7;
3480    int logical = insn & 8;
3481    int left = insn & 0x100;
3482    int bits = opsize_bytes(opsize) * 8;
3483    TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3484
3485    if (count == 0) {
3486        count = 8;
3487    }
3488
3489    tcg_gen_movi_i32(QREG_CC_V, 0);
3490    if (left) {
3491        tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3492        tcg_gen_shli_i32(QREG_CC_N, reg, count);
3493
3494        /*
3495         * Note that ColdFire always clears V (done above),
3496         * while M68000 sets if the most significant bit is changed at
3497         * any time during the shift operation.
3498         */
3499        if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3500            /* if shift count >= bits, V is (reg != 0) */
3501            if (count >= bits) {
3502                tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3503            } else {
3504                TCGv t0 = tcg_temp_new();
3505                tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3506                tcg_gen_sari_i32(t0, reg, bits - count - 1);
3507                tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3508                tcg_temp_free(t0);
3509            }
3510            tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3511        }
3512    } else {
3513        tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3514        if (logical) {
3515            tcg_gen_shri_i32(QREG_CC_N, reg, count);
3516        } else {
3517            tcg_gen_sari_i32(QREG_CC_N, reg, count);
3518        }
3519    }
3520
3521    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3522    tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3523    tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3524    tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3525
3526    gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3527    set_cc_op(s, CC_OP_FLAGS);
3528}
3529
3530static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3531{
3532    int logical = insn & 8;
3533    int left = insn & 0x100;
3534    int bits = opsize_bytes(opsize) * 8;
3535    TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3536    TCGv s32;
3537    TCGv_i64 t64, s64;
3538
3539    t64 = tcg_temp_new_i64();
3540    s64 = tcg_temp_new_i64();
3541    s32 = tcg_temp_new();
3542
3543    /*
3544     * Note that m68k truncates the shift count modulo 64, not 32.
3545     * In addition, a 64-bit shift makes it easy to find "the last
3546     * bit shifted out", for the carry flag.
3547     */
3548    tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3549    tcg_gen_extu_i32_i64(s64, s32);
3550    tcg_gen_extu_i32_i64(t64, reg);
3551
3552    /* Optimistically set V=0.  Also used as a zero source below.  */
3553    tcg_gen_movi_i32(QREG_CC_V, 0);
3554    if (left) {
3555        tcg_gen_shl_i64(t64, t64, s64);
3556
3557        if (opsize == OS_LONG) {
3558            tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3559            /* Note that C=0 if shift count is 0, and we get that for free.  */
3560        } else {
3561            TCGv zero = tcg_const_i32(0);
3562            tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3563            tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3564            tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3565                                s32, zero, zero, QREG_CC_C);
3566            tcg_temp_free(zero);
3567        }
3568        tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3569
3570        /* X = C, but only if the shift count was non-zero.  */
3571        tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3572                            QREG_CC_C, QREG_CC_X);
3573
3574        /*
3575         * M68000 sets V if the most significant bit is changed at
3576         * any time during the shift operation.  Do this via creating
3577         * an extension of the sign bit, comparing, and discarding
3578         * the bits below the sign bit.  I.e.
3579         *     int64_t s = (intN_t)reg;
3580         *     int64_t t = (int64_t)(intN_t)reg << count;
3581         *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3582         */
3583        if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3584            TCGv_i64 tt = tcg_const_i64(32);
3585            /* if shift is greater than 32, use 32 */
3586            tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3587            tcg_temp_free_i64(tt);
3588            /* Sign extend the input to 64 bits; re-do the shift.  */
3589            tcg_gen_ext_i32_i64(t64, reg);
3590            tcg_gen_shl_i64(s64, t64, s64);
3591            /* Clear all bits that are unchanged.  */
3592            tcg_gen_xor_i64(t64, t64, s64);
3593            /* Ignore the bits below the sign bit.  */
3594            tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3595            /* If any bits remain set, we have overflow.  */
3596            tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
3597            tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3598            tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
3599        }
3600    } else {
3601        tcg_gen_shli_i64(t64, t64, 32);
3602        if (logical) {
3603            tcg_gen_shr_i64(t64, t64, s64);
3604        } else {
3605            tcg_gen_sar_i64(t64, t64, s64);
3606        }
3607        tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3608
3609        /* Note that C=0 if shift count is 0, and we get that for free.  */
3610        tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3611
3612        /* X = C, but only if the shift count was non-zero.  */
3613        tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3614                            QREG_CC_C, QREG_CC_X);
3615    }
3616    gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3617    tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3618
3619    tcg_temp_free(s32);
3620    tcg_temp_free_i64(s64);
3621    tcg_temp_free_i64(t64);
3622
3623    /* Write back the result.  */
3624    gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3625    set_cc_op(s, CC_OP_FLAGS);
3626}
3627
3628DISAS_INSN(shift8_im)
3629{
3630    shift_im(s, insn, OS_BYTE);
3631}
3632
3633DISAS_INSN(shift16_im)
3634{
3635    shift_im(s, insn, OS_WORD);
3636}
3637
3638DISAS_INSN(shift_im)
3639{
3640    shift_im(s, insn, OS_LONG);
3641}
3642
3643DISAS_INSN(shift8_reg)
3644{
3645    shift_reg(s, insn, OS_BYTE);
3646}
3647
3648DISAS_INSN(shift16_reg)
3649{
3650    shift_reg(s, insn, OS_WORD);
3651}
3652
3653DISAS_INSN(shift_reg)
3654{
3655    shift_reg(s, insn, OS_LONG);
3656}
3657
3658DISAS_INSN(shift_mem)
3659{
3660    int logical = insn & 8;
3661    int left = insn & 0x100;
3662    TCGv src;
3663    TCGv addr;
3664
3665    SRC_EA(env, src, OS_WORD, !logical, &addr);
3666    tcg_gen_movi_i32(QREG_CC_V, 0);
3667    if (left) {
3668        tcg_gen_shri_i32(QREG_CC_C, src, 15);
3669        tcg_gen_shli_i32(QREG_CC_N, src, 1);
3670
3671        /*
3672         * Note that ColdFire always clears V,
3673         * while M68000 sets if the most significant bit is changed at
3674         * any time during the shift operation
3675         */
3676        if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
3677            src = gen_extend(s, src, OS_WORD, 1);
3678            tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3679        }
3680    } else {
3681        tcg_gen_mov_i32(QREG_CC_C, src);
3682        if (logical) {
3683            tcg_gen_shri_i32(QREG_CC_N, src, 1);
3684        } else {
3685            tcg_gen_sari_i32(QREG_CC_N, src, 1);
3686        }
3687    }
3688
3689    gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3690    tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3691    tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3692    tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3693
3694    DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3695    set_cc_op(s, CC_OP_FLAGS);
3696}
3697
3698static void rotate(TCGv reg, TCGv shift, int left, int size)
3699{
3700    switch (size) {
3701    case 8:
3702        /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3703        tcg_gen_ext8u_i32(reg, reg);
3704        tcg_gen_muli_i32(reg, reg, 0x01010101);
3705        goto do_long;
3706    case 16:
3707        /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3708        tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3709        goto do_long;
3710    do_long:
3711    default:
3712        if (left) {
3713            tcg_gen_rotl_i32(reg, reg, shift);
3714        } else {
3715            tcg_gen_rotr_i32(reg, reg, shift);
3716        }
3717    }
3718
3719    /* compute flags */
3720
3721    switch (size) {
3722    case 8:
3723        tcg_gen_ext8s_i32(reg, reg);
3724        break;
3725    case 16:
3726        tcg_gen_ext16s_i32(reg, reg);
3727        break;
3728    default:
3729        break;
3730    }
3731
3732    /* QREG_CC_X is not affected */
3733
3734    tcg_gen_mov_i32(QREG_CC_N, reg);
3735    tcg_gen_mov_i32(QREG_CC_Z, reg);
3736
3737    if (left) {
3738        tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3739    } else {
3740        tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3741    }
3742
3743    tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3744}
3745
3746static void rotate_x_flags(TCGv reg, TCGv X, int size)
3747{
3748    switch (size) {
3749    case 8:
3750        tcg_gen_ext8s_i32(reg, reg);
3751        break;
3752    case 16:
3753        tcg_gen_ext16s_i32(reg, reg);
3754        break;
3755    default:
3756        break;
3757    }
3758    tcg_gen_mov_i32(QREG_CC_N, reg);
3759    tcg_gen_mov_i32(QREG_CC_Z, reg);
3760    tcg_gen_mov_i32(QREG_CC_X, X);
3761    tcg_gen_mov_i32(QREG_CC_C, X);
3762    tcg_gen_movi_i32(QREG_CC_V, 0);
3763}
3764
3765/* Result of rotate_x() is valid if 0 <= shift <= size */
3766static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3767{
3768    TCGv X, shl, shr, shx, sz, zero;
3769
3770    sz = tcg_const_i32(size);
3771
3772    shr = tcg_temp_new();
3773    shl = tcg_temp_new();
3774    shx = tcg_temp_new();
3775    if (left) {
3776        tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3777        tcg_gen_movi_i32(shr, size + 1);
3778        tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3779        tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3780        /* shx = shx < 0 ? size : shx; */
3781        zero = tcg_const_i32(0);
3782        tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3783        tcg_temp_free(zero);
3784    } else {
3785        tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3786        tcg_gen_movi_i32(shl, size + 1);
3787        tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3788        tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3789    }
3790    tcg_temp_free_i32(sz);
3791
3792    /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3793
3794    tcg_gen_shl_i32(shl, reg, shl);
3795    tcg_gen_shr_i32(shr, reg, shr);
3796    tcg_gen_or_i32(reg, shl, shr);
3797    tcg_temp_free(shl);
3798    tcg_temp_free(shr);
3799    tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3800    tcg_gen_or_i32(reg, reg, shx);
3801    tcg_temp_free(shx);
3802
3803    /* X = (reg >> size) & 1 */
3804
3805    X = tcg_temp_new();
3806    tcg_gen_extract_i32(X, reg, size, 1);
3807
3808    return X;
3809}
3810
3811/* Result of rotate32_x() is valid if 0 <= shift < 33 */
3812static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3813{
3814    TCGv_i64 t0, shift64;
3815    TCGv X, lo, hi, zero;
3816
3817    shift64 = tcg_temp_new_i64();
3818    tcg_gen_extu_i32_i64(shift64, shift);
3819
3820    t0 = tcg_temp_new_i64();
3821
3822    X = tcg_temp_new();
3823    lo = tcg_temp_new();
3824    hi = tcg_temp_new();
3825
3826    if (left) {
3827        /* create [reg:X:..] */
3828
3829        tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3830        tcg_gen_concat_i32_i64(t0, lo, reg);
3831
3832        /* rotate */
3833
3834        tcg_gen_rotl_i64(t0, t0, shift64);
3835        tcg_temp_free_i64(shift64);
3836
3837        /* result is [reg:..:reg:X] */
3838
3839        tcg_gen_extr_i64_i32(lo, hi, t0);
3840        tcg_gen_andi_i32(X, lo, 1);
3841
3842        tcg_gen_shri_i32(lo, lo, 1);
3843    } else {
3844        /* create [..:X:reg] */
3845
3846        tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3847
3848        tcg_gen_rotr_i64(t0, t0, shift64);
3849        tcg_temp_free_i64(shift64);
3850
3851        /* result is value: [X:reg:..:reg] */
3852
3853        tcg_gen_extr_i64_i32(lo, hi, t0);
3854
3855        /* extract X */
3856
3857        tcg_gen_shri_i32(X, hi, 31);
3858
3859        /* extract result */
3860
3861        tcg_gen_shli_i32(hi, hi, 1);
3862    }
3863    tcg_temp_free_i64(t0);
3864    tcg_gen_or_i32(lo, lo, hi);
3865    tcg_temp_free(hi);
3866
3867    /* if shift == 0, register and X are not affected */
3868
3869    zero = tcg_const_i32(0);
3870    tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3871    tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3872    tcg_temp_free(zero);
3873    tcg_temp_free(lo);
3874
3875    return X;
3876}
3877
3878DISAS_INSN(rotate_im)
3879{
3880    TCGv shift;
3881    int tmp;
3882    int left = (insn & 0x100);
3883
3884    tmp = (insn >> 9) & 7;
3885    if (tmp == 0) {
3886        tmp = 8;
3887    }
3888
3889    shift = tcg_const_i32(tmp);
3890    if (insn & 8) {
3891        rotate(DREG(insn, 0), shift, left, 32);
3892    } else {
3893        TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3894        rotate_x_flags(DREG(insn, 0), X, 32);
3895        tcg_temp_free(X);
3896    }
3897    tcg_temp_free(shift);
3898
3899    set_cc_op(s, CC_OP_FLAGS);
3900}
3901
3902DISAS_INSN(rotate8_im)
3903{
3904    int left = (insn & 0x100);
3905    TCGv reg;
3906    TCGv shift;
3907    int tmp;
3908
3909    reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3910
3911    tmp = (insn >> 9) & 7;
3912    if (tmp == 0) {
3913        tmp = 8;
3914    }
3915
3916    shift = tcg_const_i32(tmp);
3917    if (insn & 8) {
3918        rotate(reg, shift, left, 8);
3919    } else {
3920        TCGv X = rotate_x(reg, shift, left, 8);
3921        rotate_x_flags(reg, X, 8);
3922        tcg_temp_free(X);
3923    }
3924    tcg_temp_free(shift);
3925    gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3926    set_cc_op(s, CC_OP_FLAGS);
3927}
3928
3929DISAS_INSN(rotate16_im)
3930{
3931    int left = (insn & 0x100);
3932    TCGv reg;
3933    TCGv shift;
3934    int tmp;
3935
3936    reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3937    tmp = (insn >> 9) & 7;
3938    if (tmp == 0) {
3939        tmp = 8;
3940    }
3941
3942    shift = tcg_const_i32(tmp);
3943    if (insn & 8) {
3944        rotate(reg, shift, left, 16);
3945    } else {
3946        TCGv X = rotate_x(reg, shift, left, 16);
3947        rotate_x_flags(reg, X, 16);
3948        tcg_temp_free(X);
3949    }
3950    tcg_temp_free(shift);
3951    gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3952    set_cc_op(s, CC_OP_FLAGS);
3953}
3954
3955DISAS_INSN(rotate_reg)
3956{
3957    TCGv reg;
3958    TCGv src;
3959    TCGv t0, t1;
3960    int left = (insn & 0x100);
3961
3962    reg = DREG(insn, 0);
3963    src = DREG(insn, 9);
3964    /* shift in [0..63] */
3965    t0 = tcg_temp_new();
3966    tcg_gen_andi_i32(t0, src, 63);
3967    t1 = tcg_temp_new_i32();
3968    if (insn & 8) {
3969        tcg_gen_andi_i32(t1, src, 31);
3970        rotate(reg, t1, left, 32);
3971        /* if shift == 0, clear C */
3972        tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3973                            t0, QREG_CC_V /* 0 */,
3974                            QREG_CC_V /* 0 */, QREG_CC_C);
3975    } else {
3976        TCGv X;
3977        /* modulo 33 */
3978        tcg_gen_movi_i32(t1, 33);
3979        tcg_gen_remu_i32(t1, t0, t1);
3980        X = rotate32_x(DREG(insn, 0), t1, left);
3981        rotate_x_flags(DREG(insn, 0), X, 32);
3982        tcg_temp_free(X);
3983    }
3984    tcg_temp_free(t1);
3985    tcg_temp_free(t0);
3986    set_cc_op(s, CC_OP_FLAGS);
3987}
3988
3989DISAS_INSN(rotate8_reg)
3990{
3991    TCGv reg;
3992    TCGv src;
3993    TCGv t0, t1;
3994    int left = (insn & 0x100);
3995
3996    reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3997    src = DREG(insn, 9);
3998    /* shift in [0..63] */
3999    t0 = tcg_temp_new_i32();
4000    tcg_gen_andi_i32(t0, src, 63);
4001    t1 = tcg_temp_new_i32();
4002    if (insn & 8) {
4003        tcg_gen_andi_i32(t1, src, 7);
4004        rotate(reg, t1, left, 8);
4005        /* if shift == 0, clear C */
4006        tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
4007                            t0, QREG_CC_V /* 0 */,
4008                            QREG_CC_V /* 0 */, QREG_CC_C);
4009    } else {
4010        TCGv X;
4011        /* modulo 9 */
4012        tcg_gen_movi_i32(t1, 9);
4013        tcg_gen_remu_i32(t1, t0, t1);
4014        X = rotate_x(reg, t1, left, 8);
4015        rotate_x_flags(reg, X, 8);
4016        tcg_temp_free(X);
4017    }
4018    tcg_temp_free(t1);
4019    tcg_temp_free(t0);
4020    gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
4021    set_cc_op(s, CC_OP_FLAGS);
4022}
4023
4024DISAS_INSN(rotate16_reg)
4025{
4026    TCGv reg;
4027    TCGv src;
4028    TCGv t0, t1;
4029    int left = (insn & 0x100);
4030
4031    reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
4032    src = DREG(insn, 9);
4033    /* shift in [0..63] */
4034    t0 = tcg_temp_new_i32();
4035    tcg_gen_andi_i32(t0, src, 63);
4036    t1 = tcg_temp_new_i32();
4037    if (insn & 8) {
4038        tcg_gen_andi_i32(t1, src, 15);
4039        rotate(reg, t1, left, 16);
4040        /* if shift == 0, clear C */
4041        tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
4042                            t0, QREG_CC_V /* 0 */,
4043                            QREG_CC_V /* 0 */, QREG_CC_C);
4044    } else {
4045        TCGv X;
4046        /* modulo 17 */
4047        tcg_gen_movi_i32(t1, 17);
4048        tcg_gen_remu_i32(t1, t0, t1);
4049        X = rotate_x(reg, t1, left, 16);
4050        rotate_x_flags(reg, X, 16);
4051        tcg_temp_free(X);
4052    }
4053    tcg_temp_free(t1);
4054    tcg_temp_free(t0);
4055    gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
4056    set_cc_op(s, CC_OP_FLAGS);
4057}
4058
4059DISAS_INSN(rotate_mem)
4060{
4061    TCGv src;
4062    TCGv addr;
4063    TCGv shift;
4064    int left = (insn & 0x100);
4065
4066    SRC_EA(env, src, OS_WORD, 0, &addr);
4067
4068    shift = tcg_const_i32(1);
4069    if (insn & 0x0200) {
4070        rotate(src, shift, left, 16);
4071    } else {
4072        TCGv X = rotate_x(src, shift, left, 16);
4073        rotate_x_flags(src, X, 16);
4074        tcg_temp_free(X);
4075    }
4076    tcg_temp_free(shift);
4077    DEST_EA(env, insn, OS_WORD, src, &addr);
4078    set_cc_op(s, CC_OP_FLAGS);
4079}
4080
4081DISAS_INSN(bfext_reg)
4082{
4083    int ext = read_im16(env, s);
4084    int is_sign = insn & 0x200;
4085    TCGv src = DREG(insn, 0);
4086    TCGv dst = DREG(ext, 12);
4087    int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4088    int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4089    int pos = 32 - ofs - len;        /* little bit-endian */
4090    TCGv tmp = tcg_temp_new();
4091    TCGv shift;
4092
4093    /*
4094     * In general, we're going to rotate the field so that it's at the
4095     * top of the word and then right-shift by the complement of the
4096     * width to extend the field.
4097     */
4098    if (ext & 0x20) {
4099        /* Variable width.  */
4100        if (ext & 0x800) {
4101            /* Variable offset.  */
4102            tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4103            tcg_gen_rotl_i32(tmp, src, tmp);
4104        } else {
4105            tcg_gen_rotli_i32(tmp, src, ofs);
4106        }
4107
4108        shift = tcg_temp_new();
4109        tcg_gen_neg_i32(shift, DREG(ext, 0));
4110        tcg_gen_andi_i32(shift, shift, 31);
4111        tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
4112        if (is_sign) {
4113            tcg_gen_mov_i32(dst, QREG_CC_N);
4114        } else {
4115            tcg_gen_shr_i32(dst, tmp, shift);
4116        }
4117        tcg_temp_free(shift);
4118    } else {
4119        /* Immediate width.  */
4120        if (ext & 0x800) {
4121            /* Variable offset */
4122            tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4123            tcg_gen_rotl_i32(tmp, src, tmp);
4124            src = tmp;
4125            pos = 32 - len;
4126        } else {
4127            /*
4128             * Immediate offset.  If the field doesn't wrap around the
4129             * end of the word, rely on (s)extract completely.
4130             */
4131            if (pos < 0) {
4132                tcg_gen_rotli_i32(tmp, src, ofs);
4133                src = tmp;
4134                pos = 32 - len;
4135            }
4136        }
4137
4138        tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
4139        if (is_sign) {
4140            tcg_gen_mov_i32(dst, QREG_CC_N);
4141        } else {
4142            tcg_gen_extract_i32(dst, src, pos, len);
4143        }
4144    }
4145
4146    tcg_temp_free(tmp);
4147    set_cc_op(s, CC_OP_LOGIC);
4148}
4149
4150DISAS_INSN(bfext_mem)
4151{
4152    int ext = read_im16(env, s);
4153    int is_sign = insn & 0x200;
4154    TCGv dest = DREG(ext, 12);
4155    TCGv addr, len, ofs;
4156
4157    addr = gen_lea(env, s, insn, OS_UNSIZED);
4158    if (IS_NULL_QREG(addr)) {
4159        gen_addr_fault(s);
4160        return;
4161    }
4162
4163    if (ext & 0x20) {
4164        len = DREG(ext, 0);
4165    } else {
4166        len = tcg_const_i32(extract32(ext, 0, 5));
4167    }
4168    if (ext & 0x800) {
4169        ofs = DREG(ext, 6);
4170    } else {
4171        ofs = tcg_const_i32(extract32(ext, 6, 5));
4172    }
4173
4174    if (is_sign) {
4175        gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
4176        tcg_gen_mov_i32(QREG_CC_N, dest);
4177    } else {
4178        TCGv_i64 tmp = tcg_temp_new_i64();
4179        gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
4180        tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4181        tcg_temp_free_i64(tmp);
4182    }
4183    set_cc_op(s, CC_OP_LOGIC);
4184
4185    if (!(ext & 0x20)) {
4186        tcg_temp_free(len);
4187    }
4188    if (!(ext & 0x800)) {
4189        tcg_temp_free(ofs);
4190    }
4191}
4192
4193DISAS_INSN(bfop_reg)
4194{
4195    int ext = read_im16(env, s);
4196    TCGv src = DREG(insn, 0);
4197    int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4198    int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4199    TCGv mask, tofs, tlen;
4200
4201    tofs = NULL;
4202    tlen = NULL;
4203    if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
4204        tofs = tcg_temp_new();
4205        tlen = tcg_temp_new();
4206    }
4207
4208    if ((ext & 0x820) == 0) {
4209        /* Immediate width and offset.  */
4210        uint32_t maski = 0x7fffffffu >> (len - 1);
4211        if (ofs + len <= 32) {
4212            tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4213        } else {
4214            tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4215        }
4216        tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4217        mask = tcg_const_i32(ror32(maski, ofs));
4218        if (tofs) {
4219            tcg_gen_movi_i32(tofs, ofs);
4220            tcg_gen_movi_i32(tlen, len);
4221        }
4222    } else {
4223        TCGv tmp = tcg_temp_new();
4224        if (ext & 0x20) {
4225            /* Variable width */
4226            tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4227            tcg_gen_andi_i32(tmp, tmp, 31);
4228            mask = tcg_const_i32(0x7fffffffu);
4229            tcg_gen_shr_i32(mask, mask, tmp);
4230            if (tlen) {
4231                tcg_gen_addi_i32(tlen, tmp, 1);
4232            }
4233        } else {
4234            /* Immediate width */
4235            mask = tcg_const_i32(0x7fffffffu >> (len - 1));
4236            if (tlen) {
4237                tcg_gen_movi_i32(tlen, len);
4238            }
4239        }
4240        if (ext & 0x800) {
4241            /* Variable offset */
4242            tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4243            tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4244            tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4245            tcg_gen_rotr_i32(mask, mask, tmp);
4246            if (tofs) {
4247                tcg_gen_mov_i32(tofs, tmp);
4248            }
4249        } else {
4250            /* Immediate offset (and variable width) */
4251            tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4252            tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4253            tcg_gen_rotri_i32(mask, mask, ofs);
4254            if (tofs) {
4255                tcg_gen_movi_i32(tofs, ofs);
4256            }
4257        }
4258        tcg_temp_free(tmp);
4259    }
4260    set_cc_op(s, CC_OP_LOGIC);
4261
4262    switch (insn & 0x0f00) {
4263    case 0x0a00: /* bfchg */
4264        tcg_gen_eqv_i32(src, src, mask);
4265        break;
4266    case 0x0c00: /* bfclr */
4267        tcg_gen_and_i32(src, src, mask);
4268        break;
4269    case 0x0d00: /* bfffo */
4270        gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4271        tcg_temp_free(tlen);
4272        tcg_temp_free(tofs);
4273        break;
4274    case 0x0e00: /* bfset */
4275        tcg_gen_orc_i32(src, src, mask);
4276        break;
4277    case 0x0800: /* bftst */
4278        /* flags already set; no other work to do.  */
4279        break;
4280    default:
4281        g_assert_not_reached();
4282    }
4283    tcg_temp_free(mask);
4284}
4285
4286DISAS_INSN(bfop_mem)
4287{
4288    int ext = read_im16(env, s);
4289    TCGv addr, len, ofs;
4290    TCGv_i64 t64;
4291
4292    addr = gen_lea(env, s, insn, OS_UNSIZED);
4293    if (IS_NULL_QREG(addr)) {
4294        gen_addr_fault(s);
4295        return;
4296    }
4297
4298    if (ext & 0x20) {
4299        len = DREG(ext, 0);
4300    } else {
4301        len = tcg_const_i32(extract32(ext, 0, 5));
4302    }
4303    if (ext & 0x800) {
4304        ofs = DREG(ext, 6);
4305    } else {
4306        ofs = tcg_const_i32(extract32(ext, 6, 5));
4307    }
4308
4309    switch (insn & 0x0f00) {
4310    case 0x0a00: /* bfchg */
4311        gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4312        break;
4313    case 0x0c00: /* bfclr */
4314        gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4315        break;
4316    case 0x0d00: /* bfffo */
4317        t64 = tcg_temp_new_i64();
4318        gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
4319        tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4320        tcg_temp_free_i64(t64);
4321        break;
4322    case 0x0e00: /* bfset */
4323        gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4324        break;
4325    case 0x0800: /* bftst */
4326        gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
4327        break;
4328    default:
4329        g_assert_not_reached();
4330    }
4331    set_cc_op(s, CC_OP_LOGIC);
4332
4333    if (!(ext & 0x20)) {
4334        tcg_temp_free(len);
4335    }
4336    if (!(ext & 0x800)) {
4337        tcg_temp_free(ofs);
4338    }
4339}
4340
4341DISAS_INSN(bfins_reg)
4342{
4343    int ext = read_im16(env, s);
4344    TCGv dst = DREG(insn, 0);
4345    TCGv src = DREG(ext, 12);
4346    int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4347    int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4348    int pos = 32 - ofs - len;        /* little bit-endian */
4349    TCGv tmp;
4350
4351    tmp = tcg_temp_new();
4352
4353    if (ext & 0x20) {
4354        /* Variable width */
4355        tcg_gen_neg_i32(tmp, DREG(ext, 0));
4356        tcg_gen_andi_i32(tmp, tmp, 31);
4357        tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4358    } else {
4359        /* Immediate width */
4360        tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4361    }
4362    set_cc_op(s, CC_OP_LOGIC);
4363
4364    /* Immediate width and offset */
4365    if ((ext & 0x820) == 0) {
4366        /* Check for suitability for deposit.  */
4367        if (pos >= 0) {
4368            tcg_gen_deposit_i32(dst, dst, src, pos, len);
4369        } else {
4370            uint32_t maski = -2U << (len - 1);
4371            uint32_t roti = (ofs + len) & 31;
4372            tcg_gen_andi_i32(tmp, src, ~maski);
4373            tcg_gen_rotri_i32(tmp, tmp, roti);
4374            tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4375            tcg_gen_or_i32(dst, dst, tmp);
4376        }
4377    } else {
4378        TCGv mask = tcg_temp_new();
4379        TCGv rot = tcg_temp_new();
4380
4381        if (ext & 0x20) {
4382            /* Variable width */
4383            tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4384            tcg_gen_andi_i32(rot, rot, 31);
4385            tcg_gen_movi_i32(mask, -2);
4386            tcg_gen_shl_i32(mask, mask, rot);
4387            tcg_gen_mov_i32(rot, DREG(ext, 0));
4388            tcg_gen_andc_i32(tmp, src, mask);
4389        } else {
4390            /* Immediate width (variable offset) */
4391            uint32_t maski = -2U << (len - 1);
4392            tcg_gen_andi_i32(tmp, src, ~maski);
4393            tcg_gen_movi_i32(mask, maski);
4394            tcg_gen_movi_i32(rot, len & 31);
4395        }
4396        if (ext & 0x800) {
4397            /* Variable offset */
4398            tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4399        } else {
4400            /* Immediate offset (variable width) */
4401            tcg_gen_addi_i32(rot, rot, ofs);
4402        }
4403        tcg_gen_andi_i32(rot, rot, 31);
4404        tcg_gen_rotr_i32(mask, mask, rot);
4405        tcg_gen_rotr_i32(tmp, tmp, rot);
4406        tcg_gen_and_i32(dst, dst, mask);
4407        tcg_gen_or_i32(dst, dst, tmp);
4408
4409        tcg_temp_free(rot);
4410        tcg_temp_free(mask);
4411    }
4412    tcg_temp_free(tmp);
4413}
4414
4415DISAS_INSN(bfins_mem)
4416{
4417    int ext = read_im16(env, s);
4418    TCGv src = DREG(ext, 12);
4419    TCGv addr, len, ofs;
4420
4421    addr = gen_lea(env, s, insn, OS_UNSIZED);
4422    if (IS_NULL_QREG(addr)) {
4423        gen_addr_fault(s);
4424        return;
4425    }
4426
4427    if (ext & 0x20) {
4428        len = DREG(ext, 0);
4429    } else {
4430        len = tcg_const_i32(extract32(ext, 0, 5));
4431    }
4432    if (ext & 0x800) {
4433        ofs = DREG(ext, 6);
4434    } else {
4435        ofs = tcg_const_i32(extract32(ext, 6, 5));
4436    }
4437
4438    gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
4439    set_cc_op(s, CC_OP_LOGIC);
4440
4441    if (!(ext & 0x20)) {
4442        tcg_temp_free(len);
4443    }
4444    if (!(ext & 0x800)) {
4445        tcg_temp_free(ofs);
4446    }
4447}
4448
4449DISAS_INSN(ff1)
4450{
4451    TCGv reg;
4452    reg = DREG(insn, 0);
4453    gen_logic_cc(s, reg, OS_LONG);
4454    gen_helper_ff1(reg, reg);
4455}
4456
4457DISAS_INSN(chk)
4458{
4459    TCGv src, reg;
4460    int opsize;
4461
4462    switch ((insn >> 7) & 3) {
4463    case 3:
4464        opsize = OS_WORD;
4465        break;
4466    case 2:
4467        if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4468            opsize = OS_LONG;
4469            break;
4470        }
4471        /* fallthru */
4472    default:
4473        gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4474        return;
4475    }
4476    SRC_EA(env, src, opsize, 1, NULL);
4477    reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4478
4479    gen_flush_flags(s);
4480    gen_helper_chk(cpu_env, reg, src);
4481}
4482
4483DISAS_INSN(chk2)
4484{
4485    uint16_t ext;
4486    TCGv addr1, addr2, bound1, bound2, reg;
4487    int opsize;
4488
4489    switch ((insn >> 9) & 3) {
4490    case 0:
4491        opsize = OS_BYTE;
4492        break;
4493    case 1:
4494        opsize = OS_WORD;
4495        break;
4496    case 2:
4497        opsize = OS_LONG;
4498        break;
4499    default:
4500        gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4501        return;
4502    }
4503
4504    ext = read_im16(env, s);
4505    if ((ext & 0x0800) == 0) {
4506        gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4507        return;
4508    }
4509
4510    addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4511    addr2 = tcg_temp_new();
4512    tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4513
4514    bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4515    tcg_temp_free(addr1);
4516    bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4517    tcg_temp_free(addr2);
4518
4519    reg = tcg_temp_new();
4520    if (ext & 0x8000) {
4521        tcg_gen_mov_i32(reg, AREG(ext, 12));
4522    } else {
4523        gen_ext(reg, DREG(ext, 12), opsize, 1);
4524    }
4525
4526    gen_flush_flags(s);
4527    gen_helper_chk2(cpu_env, reg, bound1, bound2);
4528    tcg_temp_free(reg);
4529    tcg_temp_free(bound1);
4530    tcg_temp_free(bound2);
4531}
4532
4533static void m68k_copy_line(TCGv dst, TCGv src, int index)
4534{
4535    TCGv addr;
4536    TCGv_i64 t0, t1;
4537
4538    addr = tcg_temp_new();
4539
4540    t0 = tcg_temp_new_i64();
4541    t1 = tcg_temp_new_i64();
4542
4543    tcg_gen_andi_i32(addr, src, ~15);
4544    tcg_gen_qemu_ld64(t0, addr, index);
4545    tcg_gen_addi_i32(addr, addr, 8);
4546    tcg_gen_qemu_ld64(t1, addr, index);
4547
4548    tcg_gen_andi_i32(addr, dst, ~15);
4549    tcg_gen_qemu_st64(t0, addr, index);
4550    tcg_gen_addi_i32(addr, addr, 8);
4551    tcg_gen_qemu_st64(t1, addr, index);
4552
4553    tcg_temp_free_i64(t0);
4554    tcg_temp_free_i64(t1);
4555    tcg_temp_free(addr);
4556}
4557
4558DISAS_INSN(move16_reg)
4559{
4560    int index = IS_USER(s);
4561    TCGv tmp;
4562    uint16_t ext;
4563
4564    ext = read_im16(env, s);
4565    if ((ext & (1 << 15)) == 0) {
4566        gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4567    }
4568
4569    m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4570
4571    /* Ax can be Ay, so save Ay before incrementing Ax */
4572    tmp = tcg_temp_new();
4573    tcg_gen_mov_i32(tmp, AREG(ext, 12));
4574    tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4575    tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4576    tcg_temp_free(tmp);
4577}
4578
4579DISAS_INSN(move16_mem)
4580{
4581    int index = IS_USER(s);
4582    TCGv reg, addr;
4583
4584    reg = AREG(insn, 0);
4585    addr = tcg_const_i32(read_im32(env, s));
4586
4587    if ((insn >> 3) & 1) {
4588        /* MOVE16 (xxx).L, (Ay) */
4589        m68k_copy_line(reg, addr, index);
4590    } else {
4591        /* MOVE16 (Ay), (xxx).L */
4592        m68k_copy_line(addr, reg, index);
4593    }
4594
4595    tcg_temp_free(addr);
4596
4597    if (((insn >> 3) & 2) == 0) {
4598        /* (Ay)+ */
4599        tcg_gen_addi_i32(reg, reg, 16);
4600    }
4601}
4602
4603DISAS_INSN(strldsr)
4604{
4605    uint16_t ext;
4606    uint32_t addr;
4607
4608    addr = s->pc - 2;
4609    ext = read_im16(env, s);
4610    if (ext != 0x46FC) {
4611        gen_exception(s, addr, EXCP_ILLEGAL);
4612        return;
4613    }
4614    ext = read_im16(env, s);
4615    if (IS_USER(s) || (ext & SR_S) == 0) {
4616        gen_exception(s, addr, EXCP_PRIVILEGE);
4617        return;
4618    }
4619    gen_push(s, gen_get_sr(s));
4620    gen_set_sr_im(s, ext, 0);
4621}
4622
4623DISAS_INSN(move_from_sr)
4624{
4625    TCGv sr;
4626
4627    if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
4628        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4629        return;
4630    }
4631    sr = gen_get_sr(s);
4632    DEST_EA(env, insn, OS_WORD, sr, NULL);
4633}
4634
4635#if defined(CONFIG_SOFTMMU)
4636DISAS_INSN(moves)
4637{
4638    int opsize;
4639    uint16_t ext;
4640    TCGv reg;
4641    TCGv addr;
4642    int extend;
4643
4644    if (IS_USER(s)) {
4645        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4646        return;
4647    }
4648
4649    ext = read_im16(env, s);
4650
4651    opsize = insn_opsize(insn);
4652
4653    if (ext & 0x8000) {
4654        /* address register */
4655        reg = AREG(ext, 12);
4656        extend = 1;
4657    } else {
4658        /* data register */
4659        reg = DREG(ext, 12);
4660        extend = 0;
4661    }
4662
4663    addr = gen_lea(env, s, insn, opsize);
4664    if (IS_NULL_QREG(addr)) {
4665        gen_addr_fault(s);
4666        return;
4667    }
4668
4669    if (ext & 0x0800) {
4670        /* from reg to ea */
4671        gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4672    } else {
4673        /* from ea to reg */
4674        TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4675        if (extend) {
4676            gen_ext(reg, tmp, opsize, 1);
4677        } else {
4678            gen_partset_reg(opsize, reg, tmp);
4679        }
4680        tcg_temp_free(tmp);
4681    }
4682    switch (extract32(insn, 3, 3)) {
4683    case 3: /* Indirect postincrement.  */
4684        tcg_gen_addi_i32(AREG(insn, 0), addr,
4685                         REG(insn, 0) == 7 && opsize == OS_BYTE
4686                         ? 2
4687                         : opsize_bytes(opsize));
4688        break;
4689    case 4: /* Indirect predecrememnt.  */
4690        tcg_gen_mov_i32(AREG(insn, 0), addr);
4691        break;
4692    }
4693}
4694
4695DISAS_INSN(move_to_sr)
4696{
4697    if (IS_USER(s)) {
4698        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4699        return;
4700    }
4701    gen_move_to_sr(env, s, insn, false);
4702    gen_exit_tb(s);
4703}
4704
4705DISAS_INSN(move_from_usp)
4706{
4707    if (IS_USER(s)) {
4708        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4709        return;
4710    }
4711    tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
4712                   offsetof(CPUM68KState, sp[M68K_USP]));
4713}
4714
4715DISAS_INSN(move_to_usp)
4716{
4717    if (IS_USER(s)) {
4718        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4719        return;
4720    }
4721    tcg_gen_st_i32(AREG(insn, 0), cpu_env,
4722                   offsetof(CPUM68KState, sp[M68K_USP]));
4723}
4724
4725DISAS_INSN(halt)
4726{
4727    if (IS_USER(s)) {
4728        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4729        return;
4730    }
4731
4732    gen_exception(s, s->pc, EXCP_HALT_INSN);
4733}
4734
4735DISAS_INSN(stop)
4736{
4737    uint16_t ext;
4738
4739    if (IS_USER(s)) {
4740        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4741        return;
4742    }
4743
4744    ext = read_im16(env, s);
4745
4746    gen_set_sr_im(s, ext, 0);
4747    tcg_gen_movi_i32(cpu_halted, 1);
4748    gen_exception(s, s->pc, EXCP_HLT);
4749}
4750
4751DISAS_INSN(rte)
4752{
4753    if (IS_USER(s)) {
4754        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4755        return;
4756    }
4757    gen_exception(s, s->base.pc_next, EXCP_RTE);
4758}
4759
4760DISAS_INSN(cf_movec)
4761{
4762    uint16_t ext;
4763    TCGv reg;
4764
4765    if (IS_USER(s)) {
4766        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4767        return;
4768    }
4769
4770    ext = read_im16(env, s);
4771
4772    if (ext & 0x8000) {
4773        reg = AREG(ext, 12);
4774    } else {
4775        reg = DREG(ext, 12);
4776    }
4777    gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4778    gen_exit_tb(s);
4779}
4780
4781DISAS_INSN(m68k_movec)
4782{
4783    uint16_t ext;
4784    TCGv reg;
4785
4786    if (IS_USER(s)) {
4787        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4788        return;
4789    }
4790
4791    ext = read_im16(env, s);
4792
4793    if (ext & 0x8000) {
4794        reg = AREG(ext, 12);
4795    } else {
4796        reg = DREG(ext, 12);
4797    }
4798    if (insn & 1) {
4799        gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
4800    } else {
4801        gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
4802    }
4803    gen_exit_tb(s);
4804}
4805
4806DISAS_INSN(intouch)
4807{
4808    if (IS_USER(s)) {
4809        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4810        return;
4811    }
4812    /* ICache fetch.  Implement as no-op.  */
4813}
4814
4815DISAS_INSN(cpushl)
4816{
4817    if (IS_USER(s)) {
4818        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4819        return;
4820    }
4821    /* Cache push/invalidate.  Implement as no-op.  */
4822}
4823
4824DISAS_INSN(cpush)
4825{
4826    if (IS_USER(s)) {
4827        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4828        return;
4829    }
4830    /* Cache push/invalidate.  Implement as no-op.  */
4831}
4832
4833DISAS_INSN(cinv)
4834{
4835    if (IS_USER(s)) {
4836        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4837        return;
4838    }
4839    /* Invalidate cache line.  Implement as no-op.  */
4840}
4841
4842#if defined(CONFIG_SOFTMMU)
4843DISAS_INSN(pflush)
4844{
4845    TCGv opmode;
4846
4847    if (IS_USER(s)) {
4848        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4849        return;
4850    }
4851
4852    opmode = tcg_const_i32((insn >> 3) & 3);
4853    gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
4854    tcg_temp_free(opmode);
4855}
4856
4857DISAS_INSN(ptest)
4858{
4859    TCGv is_read;
4860
4861    if (IS_USER(s)) {
4862        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4863        return;
4864    }
4865    is_read = tcg_const_i32((insn >> 5) & 1);
4866    gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
4867    tcg_temp_free(is_read);
4868}
4869#endif
4870
4871DISAS_INSN(wddata)
4872{
4873    gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4874}
4875
4876DISAS_INSN(wdebug)
4877{
4878    if (IS_USER(s)) {
4879        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4880        return;
4881    }
4882    /* TODO: Implement wdebug.  */
4883    cpu_abort(env_cpu(env), "WDEBUG not implemented");
4884}
4885#endif
4886
4887DISAS_INSN(trap)
4888{
4889    gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf));
4890}
4891
4892static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4893{
4894    switch (reg) {
4895    case M68K_FPIAR:
4896        tcg_gen_movi_i32(res, 0);
4897        break;
4898    case M68K_FPSR:
4899        tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
4900        break;
4901    case M68K_FPCR:
4902        tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
4903        break;
4904    }
4905}
4906
4907static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4908{
4909    switch (reg) {
4910    case M68K_FPIAR:
4911        break;
4912    case M68K_FPSR:
4913        tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
4914        break;
4915    case M68K_FPCR:
4916        gen_helper_set_fpcr(cpu_env, val);
4917        break;
4918    }
4919}
4920
4921static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4922{
4923    int index = IS_USER(s);
4924    TCGv tmp;
4925
4926    tmp = tcg_temp_new();
4927    gen_load_fcr(s, tmp, reg);
4928    tcg_gen_qemu_st32(tmp, addr, index);
4929    tcg_temp_free(tmp);
4930}
4931
4932static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4933{
4934    int index = IS_USER(s);
4935    TCGv tmp;
4936
4937    tmp = tcg_temp_new();
4938    tcg_gen_qemu_ld32u(tmp, addr, index);
4939    gen_store_fcr(s, tmp, reg);
4940    tcg_temp_free(tmp);
4941}
4942
4943
4944static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4945                             uint32_t insn, uint32_t ext)
4946{
4947    int mask = (ext >> 10) & 7;
4948    int is_write = (ext >> 13) & 1;
4949    int mode = extract32(insn, 3, 3);
4950    int i;
4951    TCGv addr, tmp;
4952
4953    switch (mode) {
4954    case 0: /* Dn */
4955        if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4956            gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4957            return;
4958        }
4959        if (is_write) {
4960            gen_load_fcr(s, DREG(insn, 0), mask);
4961        } else {
4962            gen_store_fcr(s, DREG(insn, 0), mask);
4963        }
4964        return;
4965    case 1: /* An, only with FPIAR */
4966        if (mask != M68K_FPIAR) {
4967            gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4968            return;
4969        }
4970        if (is_write) {
4971            gen_load_fcr(s, AREG(insn, 0), mask);
4972        } else {
4973            gen_store_fcr(s, AREG(insn, 0), mask);
4974        }
4975        return;
4976    case 7: /* Immediate */
4977        if (REG(insn, 0) == 4) {
4978            if (is_write ||
4979                (mask != M68K_FPIAR && mask != M68K_FPSR &&
4980                 mask != M68K_FPCR)) {
4981                gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4982                return;
4983            }
4984            tmp = tcg_const_i32(read_im32(env, s));
4985            gen_store_fcr(s, tmp, mask);
4986            tcg_temp_free(tmp);
4987            return;
4988        }
4989        break;
4990    default:
4991        break;
4992    }
4993
4994    tmp = gen_lea(env, s, insn, OS_LONG);
4995    if (IS_NULL_QREG(tmp)) {
4996        gen_addr_fault(s);
4997        return;
4998    }
4999
5000    addr = tcg_temp_new();
5001    tcg_gen_mov_i32(addr, tmp);
5002
5003    /*
5004     * mask:
5005     *
5006     * 0b100 Floating-Point Control Register
5007     * 0b010 Floating-Point Status Register
5008     * 0b001 Floating-Point Instruction Address Register
5009     *
5010     */
5011
5012    if (is_write && mode == 4) {
5013        for (i = 2; i >= 0; i--, mask >>= 1) {
5014            if (mask & 1) {
5015                gen_qemu_store_fcr(s, addr, 1 << i);
5016                if (mask != 1) {
5017                    tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
5018                }
5019            }
5020       }
5021       tcg_gen_mov_i32(AREG(insn, 0), addr);
5022    } else {
5023        for (i = 0; i < 3; i++, mask >>= 1) {
5024            if (mask & 1) {
5025                if (is_write) {
5026                    gen_qemu_store_fcr(s, addr, 1 << i);
5027                } else {
5028                    gen_qemu_load_fcr(s, addr, 1 << i);
5029                }
5030                if (mask != 1 || mode == 3) {
5031                    tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
5032                }
5033            }
5034        }
5035        if (mode == 3) {
5036            tcg_gen_mov_i32(AREG(insn, 0), addr);
5037        }
5038    }
5039    tcg_temp_free_i32(addr);
5040}
5041
5042static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
5043                          uint32_t insn, uint32_t ext)
5044{
5045    int opsize;
5046    TCGv addr, tmp;
5047    int mode = (ext >> 11) & 0x3;
5048    int is_load = ((ext & 0x2000) == 0);
5049
5050    if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
5051        opsize = OS_EXTENDED;
5052    } else {
5053        opsize = OS_DOUBLE;  /* FIXME */
5054    }
5055
5056    addr = gen_lea(env, s, insn, opsize);
5057    if (IS_NULL_QREG(addr)) {
5058        gen_addr_fault(s);
5059        return;
5060    }
5061
5062    tmp = tcg_temp_new();
5063    if (mode & 0x1) {
5064        /* Dynamic register list */
5065        tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
5066    } else {
5067        /* Static register list */
5068        tcg_gen_movi_i32(tmp, ext & 0xff);
5069    }
5070
5071    if (!is_load && (mode & 2) == 0) {
5072        /*
5073         * predecrement addressing mode
5074         * only available to store register to memory
5075         */
5076        if (opsize == OS_EXTENDED) {
5077            gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
5078        } else {
5079            gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
5080        }
5081    } else {
5082        /* postincrement addressing mode */
5083        if (opsize == OS_EXTENDED) {
5084            if (is_load) {
5085                gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
5086            } else {
5087                gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
5088            }
5089        } else {
5090            if (is_load) {
5091                gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
5092            } else {
5093                gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
5094            }
5095        }
5096    }
5097    if ((insn & 070) == 030 || (insn & 070) == 040) {
5098        tcg_gen_mov_i32(AREG(insn, 0), tmp);
5099    }
5100    tcg_temp_free(tmp);
5101}
5102
5103/*
5104 * ??? FP exceptions are not implemented.  Most exceptions are deferred until
5105 * immediately before the next FP instruction is executed.
5106 */
5107DISAS_INSN(fpu)
5108{
5109    uint16_t ext;
5110    int opmode;
5111    int opsize;
5112    TCGv_ptr cpu_src, cpu_dest;
5113
5114    ext = read_im16(env, s);
5115    opmode = ext & 0x7f;
5116    switch ((ext >> 13) & 7) {
5117    case 0:
5118        break;
5119    case 1:
5120        goto undef;
5121    case 2:
5122        if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
5123            /* fmovecr */
5124            TCGv rom_offset = tcg_const_i32(opmode);
5125            cpu_dest = gen_fp_ptr(REG(ext, 7));
5126            gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
5127            tcg_temp_free_ptr(cpu_dest);
5128            tcg_temp_free(rom_offset);
5129            return;
5130        }
5131        break;
5132    case 3: /* fmove out */
5133        cpu_src = gen_fp_ptr(REG(ext, 7));
5134        opsize = ext_opsize(ext, 10);
5135        if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5136                      EA_STORE, IS_USER(s)) == -1) {
5137            gen_addr_fault(s);
5138        }
5139        gen_helper_ftst(cpu_env, cpu_src);
5140        tcg_temp_free_ptr(cpu_src);
5141        return;
5142    case 4: /* fmove to control register.  */
5143    case 5: /* fmove from control register.  */
5144        gen_op_fmove_fcr(env, s, insn, ext);
5145        return;
5146    case 6: /* fmovem */
5147    case 7:
5148        if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
5149            goto undef;
5150        }
5151        gen_op_fmovem(env, s, insn, ext);
5152        return;
5153    }
5154    if (ext & (1 << 14)) {
5155        /* Source effective address.  */
5156        opsize = ext_opsize(ext, 10);
5157        cpu_src = gen_fp_result_ptr();
5158        if (gen_ea_fp(env, s, insn, opsize, cpu_src,
5159                      EA_LOADS, IS_USER(s)) == -1) {
5160            gen_addr_fault(s);
5161            return;
5162        }
5163    } else {
5164        /* Source register.  */
5165        opsize = OS_EXTENDED;
5166        cpu_src = gen_fp_ptr(REG(ext, 10));
5167    }
5168    cpu_dest = gen_fp_ptr(REG(ext, 7));
5169    switch (opmode) {
5170    case 0: /* fmove */
5171        gen_fp_move(cpu_dest, cpu_src);
5172        break;
5173    case 0x40: /* fsmove */
5174        gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
5175        break;
5176    case 0x44: /* fdmove */
5177        gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
5178        break;
5179    case 1: /* fint */
5180        gen_helper_firound(cpu_env, cpu_dest, cpu_src);
5181        break;
5182    case 2: /* fsinh */
5183        gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
5184        break;
5185    case 3: /* fintrz */
5186        gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
5187        break;
5188    case 4: /* fsqrt */
5189        gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
5190        break;
5191    case 0x41: /* fssqrt */
5192        gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
5193        break;
5194    case 0x45: /* fdsqrt */
5195        gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
5196        break;
5197    case 0x06: /* flognp1 */
5198        gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
5199        break;
5200    case 0x08: /* fetoxm1 */
5201        gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
5202        break;
5203    case 0x09: /* ftanh */
5204        gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
5205        break;
5206    case 0x0a: /* fatan */
5207        gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
5208        break;
5209    case 0x0c: /* fasin */
5210        gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
5211        break;
5212    case 0x0d: /* fatanh */
5213        gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
5214        break;
5215    case 0x0e: /* fsin */
5216        gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
5217        break;
5218    case 0x0f: /* ftan */
5219        gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
5220        break;
5221    case 0x10: /* fetox */
5222        gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
5223        break;
5224    case 0x11: /* ftwotox */
5225        gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
5226        break;
5227    case 0x12: /* ftentox */
5228        gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
5229        break;
5230    case 0x14: /* flogn */
5231        gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
5232        break;
5233    case 0x15: /* flog10 */
5234        gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
5235        break;
5236    case 0x16: /* flog2 */
5237        gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
5238        break;
5239    case 0x18: /* fabs */
5240        gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
5241        break;
5242    case 0x58: /* fsabs */
5243        gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
5244        break;
5245    case 0x5c: /* fdabs */
5246        gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
5247        break;
5248    case 0x19: /* fcosh */
5249        gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
5250        break;
5251    case 0x1a: /* fneg */
5252        gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
5253        break;
5254    case 0x5a: /* fsneg */
5255        gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
5256        break;
5257    case 0x5e: /* fdneg */
5258        gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
5259        break;
5260    case 0x1c: /* facos */
5261        gen_helper_facos(cpu_env, cpu_dest, cpu_src);
5262        break;
5263    case 0x1d: /* fcos */
5264        gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
5265        break;
5266    case 0x1e: /* fgetexp */
5267        gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
5268        break;
5269    case 0x1f: /* fgetman */
5270        gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
5271        break;
5272    case 0x20: /* fdiv */
5273        gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5274        break;
5275    case 0x60: /* fsdiv */
5276        gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5277        break;
5278    case 0x64: /* fddiv */
5279        gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5280        break;
5281    case 0x21: /* fmod */
5282        gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
5283        break;
5284    case 0x22: /* fadd */
5285        gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5286        break;
5287    case 0x62: /* fsadd */
5288        gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5289        break;
5290    case 0x66: /* fdadd */
5291        gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
5292        break;
5293    case 0x23: /* fmul */
5294        gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5295        break;
5296    case 0x63: /* fsmul */
5297        gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5298        break;
5299    case 0x67: /* fdmul */
5300        gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5301        break;
5302    case 0x24: /* fsgldiv */
5303        gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
5304        break;
5305    case 0x25: /* frem */
5306        gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
5307        break;
5308    case 0x26: /* fscale */
5309        gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
5310        break;
5311    case 0x27: /* fsglmul */
5312        gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
5313        break;
5314    case 0x28: /* fsub */
5315        gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5316        break;
5317    case 0x68: /* fssub */
5318        gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5319        break;
5320    case 0x6c: /* fdsub */
5321        gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
5322        break;
5323    case 0x30: case 0x31: case 0x32:
5324    case 0x33: case 0x34: case 0x35:
5325    case 0x36: case 0x37: {
5326            TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5327            gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
5328            tcg_temp_free_ptr(cpu_dest2);
5329        }
5330        break;
5331    case 0x38: /* fcmp */
5332        gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
5333        return;
5334    case 0x3a: /* ftst */
5335        gen_helper_ftst(cpu_env, cpu_src);
5336        return;
5337    default:
5338        goto undef;
5339    }
5340    tcg_temp_free_ptr(cpu_src);
5341    gen_helper_ftst(cpu_env, cpu_dest);
5342    tcg_temp_free_ptr(cpu_dest);
5343    return;
5344undef:
5345    /* FIXME: Is this right for offset addressing modes?  */
5346    s->pc -= 2;
5347    disas_undef_fpu(env, s, insn);
5348}
5349
5350static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5351{
5352    TCGv fpsr;
5353
5354    c->g1 = 1;
5355    c->v2 = tcg_const_i32(0);
5356    c->g2 = 0;
5357    /* TODO: Raise BSUN exception.  */
5358    fpsr = tcg_temp_new();
5359    gen_load_fcr(s, fpsr, M68K_FPSR);
5360    switch (cond) {
5361    case 0:  /* False */
5362    case 16: /* Signaling False */
5363        c->v1 = c->v2;
5364        c->tcond = TCG_COND_NEVER;
5365        break;
5366    case 1:  /* EQual Z */
5367    case 17: /* Signaling EQual Z */
5368        c->v1 = tcg_temp_new();
5369        c->g1 = 0;
5370        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5371        c->tcond = TCG_COND_NE;
5372        break;
5373    case 2:  /* Ordered Greater Than !(A || Z || N) */
5374    case 18: /* Greater Than !(A || Z || N) */
5375        c->v1 = tcg_temp_new();
5376        c->g1 = 0;
5377        tcg_gen_andi_i32(c->v1, fpsr,
5378                         FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5379        c->tcond = TCG_COND_EQ;
5380        break;
5381    case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5382    case 19: /* Greater than or Equal Z || !(A || N) */
5383        c->v1 = tcg_temp_new();
5384        c->g1 = 0;
5385        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5386        tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5387        tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
5388        tcg_gen_or_i32(c->v1, c->v1, fpsr);
5389        tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5390        c->tcond = TCG_COND_NE;
5391        break;
5392    case 4:  /* Ordered Less Than !(!N || A || Z); */
5393    case 20: /* Less Than !(!N || A || Z); */
5394        c->v1 = tcg_temp_new();
5395        c->g1 = 0;
5396        tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5397        tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
5398        c->tcond = TCG_COND_EQ;
5399        break;
5400    case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5401    case 21: /* Less than or Equal Z || (N && !A) */
5402        c->v1 = tcg_temp_new();
5403        c->g1 = 0;
5404        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5405        tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5406        tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5407        tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
5408        c->tcond = TCG_COND_NE;
5409        break;
5410    case 6:  /* Ordered Greater or Less than !(A || Z) */
5411    case 22: /* Greater or Less than !(A || Z) */
5412        c->v1 = tcg_temp_new();
5413        c->g1 = 0;
5414        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5415        c->tcond = TCG_COND_EQ;
5416        break;
5417    case 7:  /* Ordered !A */
5418    case 23: /* Greater, Less or Equal !A */
5419        c->v1 = tcg_temp_new();
5420        c->g1 = 0;
5421        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5422        c->tcond = TCG_COND_EQ;
5423        break;
5424    case 8:  /* Unordered A */
5425    case 24: /* Not Greater, Less or Equal A */
5426        c->v1 = tcg_temp_new();
5427        c->g1 = 0;
5428        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5429        c->tcond = TCG_COND_NE;
5430        break;
5431    case 9:  /* Unordered or Equal A || Z */
5432    case 25: /* Not Greater or Less then A || Z */
5433        c->v1 = tcg_temp_new();
5434        c->g1 = 0;
5435        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
5436        c->tcond = TCG_COND_NE;
5437        break;
5438    case 10: /* Unordered or Greater Than A || !(N || Z)) */
5439    case 26: /* Not Less or Equal A || !(N || Z)) */
5440        c->v1 = tcg_temp_new();
5441        c->g1 = 0;
5442        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5443        tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5444        tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
5445        tcg_gen_or_i32(c->v1, c->v1, fpsr);
5446        tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5447        c->tcond = TCG_COND_NE;
5448        break;
5449    case 11: /* Unordered or Greater or Equal A || Z || !N */
5450    case 27: /* Not Less Than A || Z || !N */
5451        c->v1 = tcg_temp_new();
5452        c->g1 = 0;
5453        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5454        tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5455        c->tcond = TCG_COND_NE;
5456        break;
5457    case 12: /* Unordered or Less Than A || (N && !Z) */
5458    case 28: /* Not Greater than or Equal A || (N && !Z) */
5459        c->v1 = tcg_temp_new();
5460        c->g1 = 0;
5461        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5462        tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5463        tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5464        tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
5465        c->tcond = TCG_COND_NE;
5466        break;
5467    case 13: /* Unordered or Less or Equal A || Z || N */
5468    case 29: /* Not Greater Than A || Z || N */
5469        c->v1 = tcg_temp_new();
5470        c->g1 = 0;
5471        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
5472        c->tcond = TCG_COND_NE;
5473        break;
5474    case 14: /* Not Equal !Z */
5475    case 30: /* Signaling Not Equal !Z */
5476        c->v1 = tcg_temp_new();
5477        c->g1 = 0;
5478        tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5479        c->tcond = TCG_COND_EQ;
5480        break;
5481    case 15: /* True */
5482    case 31: /* Signaling True */
5483        c->v1 = c->v2;
5484        c->tcond = TCG_COND_ALWAYS;
5485        break;
5486    }
5487    tcg_temp_free(fpsr);
5488}
5489
5490static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5491{
5492    DisasCompare c;
5493
5494    gen_fcc_cond(&c, s, cond);
5495    update_cc_op(s);
5496    tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5497    free_cond(&c);
5498}
5499
5500DISAS_INSN(fbcc)
5501{
5502    uint32_t offset;
5503    uint32_t base;
5504    TCGLabel *l1;
5505
5506    base = s->pc;
5507    offset = (int16_t)read_im16(env, s);
5508    if (insn & (1 << 6)) {
5509        offset = (offset << 16) | read_im16(env, s);
5510    }
5511
5512    l1 = gen_new_label();
5513    update_cc_op(s);
5514    gen_fjmpcc(s, insn & 0x3f, l1);
5515    gen_jmp_tb(s, 0, s->pc);
5516    gen_set_label(l1);
5517    gen_jmp_tb(s, 1, base + offset);
5518}
5519
5520DISAS_INSN(fscc)
5521{
5522    DisasCompare c;
5523    int cond;
5524    TCGv tmp;
5525    uint16_t ext;
5526
5527    ext = read_im16(env, s);
5528    cond = ext & 0x3f;
5529    gen_fcc_cond(&c, s, cond);
5530
5531    tmp = tcg_temp_new();
5532    tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
5533    free_cond(&c);
5534
5535    tcg_gen_neg_i32(tmp, tmp);
5536    DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5537    tcg_temp_free(tmp);
5538}
5539
5540#if defined(CONFIG_SOFTMMU)
5541DISAS_INSN(frestore)
5542{
5543    TCGv addr;
5544
5545    if (IS_USER(s)) {
5546        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5547        return;
5548    }
5549    if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5550        SRC_EA(env, addr, OS_LONG, 0, NULL);
5551        /* FIXME: check the state frame */
5552    } else {
5553        disas_undef(env, s, insn);
5554    }
5555}
5556
5557DISAS_INSN(fsave)
5558{
5559    if (IS_USER(s)) {
5560        gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5561        return;
5562    }
5563
5564    if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5565        /* always write IDLE */
5566        TCGv idle = tcg_const_i32(0x41000000);
5567        DEST_EA(env, insn, OS_LONG, idle, NULL);
5568        tcg_temp_free(idle);
5569    } else {
5570        disas_undef(env, s, insn);
5571    }
5572}
5573#endif
5574
5575static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5576{
5577    TCGv tmp = tcg_temp_new();
5578    if (s->env->macsr & MACSR_FI) {
5579        if (upper)
5580            tcg_gen_andi_i32(tmp, val, 0xffff0000);
5581        else
5582            tcg_gen_shli_i32(tmp, val, 16);
5583    } else if (s->env->macsr & MACSR_SU) {
5584        if (upper)
5585            tcg_gen_sari_i32(tmp, val, 16);
5586        else
5587            tcg_gen_ext16s_i32(tmp, val);
5588    } else {
5589        if (upper)
5590            tcg_gen_shri_i32(tmp, val, 16);
5591        else
5592            tcg_gen_ext16u_i32(tmp, val);
5593    }
5594    return tmp;
5595}
5596
5597static void gen_mac_clear_flags(void)
5598{
5599    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5600                     ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5601}
5602
5603DISAS_INSN(mac)
5604{
5605    TCGv rx;
5606    TCGv ry;
5607    uint16_t ext;
5608    int acc;
5609    TCGv tmp;
5610    TCGv addr;
5611    TCGv loadval;
5612    int dual;
5613    TCGv saved_flags;
5614
5615    if (!s->done_mac) {
5616        s->mactmp = tcg_temp_new_i64();
5617        s->done_mac = 1;
5618    }
5619
5620    ext = read_im16(env, s);
5621
5622    acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5623    dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5624    if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5625        disas_undef(env, s, insn);
5626        return;
5627    }
5628    if (insn & 0x30) {
5629        /* MAC with load.  */
5630        tmp = gen_lea(env, s, insn, OS_LONG);
5631        addr = tcg_temp_new();
5632        tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5633        /*
5634         * Load the value now to ensure correct exception behavior.
5635         * Perform writeback after reading the MAC inputs.
5636         */
5637        loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5638
5639        acc ^= 1;
5640        rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5641        ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5642    } else {
5643        loadval = addr = NULL_QREG;
5644        rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5645        ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5646    }
5647
5648    gen_mac_clear_flags();
5649#if 0
5650    l1 = -1;
5651    /* Disabled because conditional branches clobber temporary vars.  */
5652    if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5653        /* Skip the multiply if we know we will ignore it.  */
5654        l1 = gen_new_label();
5655        tmp = tcg_temp_new();
5656        tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5657        gen_op_jmp_nz32(tmp, l1);
5658    }
5659#endif
5660
5661    if ((ext & 0x0800) == 0) {
5662        /* Word.  */
5663        rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5664        ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5665    }
5666    if (s->env->macsr & MACSR_FI) {
5667        gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
5668    } else {
5669        if (s->env->macsr & MACSR_SU)
5670            gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
5671        else
5672            gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
5673        switch ((ext >> 9) & 3) {
5674        case 1:
5675            tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5676            break;
5677        case 3:
5678            tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5679            break;
5680        }
5681    }
5682
5683    if (dual) {
5684        /* Save the overflow flag from the multiply.  */
5685        saved_flags = tcg_temp_new();
5686        tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5687    } else {
5688        saved_flags = NULL_QREG;
5689    }
5690
5691#if 0
5692    /* Disabled because conditional branches clobber temporary vars.  */
5693    if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5694        /* Skip the accumulate if the value is already saturated.  */
5695        l1 = gen_new_label();
5696        tmp = tcg_temp_new();
5697        gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5698        gen_op_jmp_nz32(tmp, l1);
5699    }
5700#endif
5701
5702    if (insn & 0x100)
5703        tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5704    else
5705        tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5706
5707    if (s->env->macsr & MACSR_FI)
5708        gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5709    else if (s->env->macsr & MACSR_SU)
5710        gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5711    else
5712        gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5713
5714#if 0
5715    /* Disabled because conditional branches clobber temporary vars.  */
5716    if (l1 != -1)
5717        gen_set_label(l1);
5718#endif
5719
5720    if (dual) {
5721        /* Dual accumulate variant.  */
5722        acc = (ext >> 2) & 3;
5723        /* Restore the overflow flag from the multiplier.  */
5724        tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5725#if 0
5726        /* Disabled because conditional branches clobber temporary vars.  */
5727        if ((s->env->macsr & MACSR_OMC) != 0) {
5728            /* Skip the accumulate if the value is already saturated.  */
5729            l1 = gen_new_label();
5730            tmp = tcg_temp_new();
5731            gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
5732            gen_op_jmp_nz32(tmp, l1);
5733        }
5734#endif
5735        if (ext & 2)
5736            tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5737        else
5738            tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5739        if (s->env->macsr & MACSR_FI)
5740            gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
5741        else if (s->env->macsr & MACSR_SU)
5742            gen_helper_macsats(cpu_env, tcg_const_i32(acc));
5743        else
5744            gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
5745#if 0
5746        /* Disabled because conditional branches clobber temporary vars.  */
5747        if (l1 != -1)
5748            gen_set_label(l1);
5749#endif
5750    }
5751    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
5752
5753    if (insn & 0x30) {
5754        TCGv rw;
5755        rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5756        tcg_gen_mov_i32(rw, loadval);
5757        /*
5758         * FIXME: Should address writeback happen with the masked or
5759         * unmasked value?
5760         */
5761        switch ((insn >> 3) & 7) {
5762        case 3: /* Post-increment.  */
5763            tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5764            break;
5765        case 4: /* Pre-decrement.  */
5766            tcg_gen_mov_i32(AREG(insn, 0), addr);
5767        }
5768        tcg_temp_free(loadval);
5769    }
5770}
5771
5772DISAS_INSN(from_mac)
5773{
5774    TCGv rx;
5775    TCGv_i64 acc;
5776    int accnum;
5777
5778    rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5779    accnum = (insn >> 9) & 3;
5780    acc = MACREG(accnum);
5781    if (s->env->macsr & MACSR_FI) {
5782        gen_helper_get_macf(rx, cpu_env, acc);
5783    } else if ((s->env->macsr & MACSR_OMC) == 0) {
5784        tcg_gen_extrl_i64_i32(rx, acc);
5785    } else if (s->env->macsr & MACSR_SU) {
5786        gen_helper_get_macs(rx, acc);
5787    } else {
5788        gen_helper_get_macu(rx, acc);
5789    }
5790    if (insn & 0x40) {
5791        tcg_gen_movi_i64(acc, 0);
5792        tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5793    }
5794}
5795
5796DISAS_INSN(move_mac)
5797{
5798    /* FIXME: This can be done without a helper.  */
5799    int src;
5800    TCGv dest;
5801    src = insn & 3;
5802    dest = tcg_const_i32((insn >> 9) & 3);
5803    gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
5804    gen_mac_clear_flags();
5805    gen_helper_mac_set_flags(cpu_env, dest);
5806}
5807
5808DISAS_INSN(from_macsr)
5809{
5810    TCGv reg;
5811
5812    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5813    tcg_gen_mov_i32(reg, QREG_MACSR);
5814}
5815
5816DISAS_INSN(from_mask)
5817{
5818    TCGv reg;
5819    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5820    tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5821}
5822
5823DISAS_INSN(from_mext)
5824{
5825    TCGv reg;
5826    TCGv acc;
5827    reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5828    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5829    if (s->env->macsr & MACSR_FI)
5830        gen_helper_get_mac_extf(reg, cpu_env, acc);
5831    else
5832        gen_helper_get_mac_exti(reg, cpu_env, acc);
5833}
5834
5835DISAS_INSN(macsr_to_ccr)
5836{
5837    TCGv tmp = tcg_temp_new();
5838    tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
5839    gen_helper_set_sr(cpu_env, tmp);
5840    tcg_temp_free(tmp);
5841    set_cc_op(s, CC_OP_FLAGS);
5842}
5843
5844DISAS_INSN(to_mac)
5845{
5846    TCGv_i64 acc;
5847    TCGv val;
5848    int accnum;
5849    accnum = (insn >> 9) & 3;
5850    acc = MACREG(accnum);
5851    SRC_EA(env, val, OS_LONG, 0, NULL);
5852    if (s->env->macsr & MACSR_FI) {
5853        tcg_gen_ext_i32_i64(acc, val);
5854        tcg_gen_shli_i64(acc, acc, 8);
5855    } else if (s->env->macsr & MACSR_SU) {
5856        tcg_gen_ext_i32_i64(acc, val);
5857    } else {
5858        tcg_gen_extu_i32_i64(acc, val);
5859    }
5860    tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5861    gen_mac_clear_flags();
5862    gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
5863}
5864
5865DISAS_INSN(to_macsr)
5866{
5867    TCGv val;
5868    SRC_EA(env, val, OS_LONG, 0, NULL);
5869    gen_helper_set_macsr(cpu_env, val);
5870    gen_exit_tb(s);
5871}
5872
5873DISAS_INSN(to_mask)
5874{
5875    TCGv val;
5876    SRC_EA(env, val, OS_LONG, 0, NULL);
5877    tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5878}
5879
5880DISAS_INSN(to_mext)
5881{
5882    TCGv val;
5883    TCGv acc;
5884    SRC_EA(env, val, OS_LONG, 0, NULL);
5885    acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
5886    if (s->env->macsr & MACSR_FI)
5887        gen_helper_set_mac_extf(cpu_env, val, acc);
5888    else if (s->env->macsr & MACSR_SU)
5889        gen_helper_set_mac_exts(cpu_env, val, acc);
5890    else
5891        gen_helper_set_mac_extu(cpu_env, val, acc);
5892}
5893
5894static disas_proc opcode_table[65536];
5895
5896static void
5897register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5898{
5899  int i;
5900  int from;
5901  int to;
5902
5903  /* Sanity check.  All set bits must be included in the mask.  */
5904  if (opcode & ~mask) {
5905      fprintf(stderr,
5906              "qemu internal error: bogus opcode definition %04x/%04x\n",
5907              opcode, mask);
5908      abort();
5909  }
5910  /*
5911   * This could probably be cleverer.  For now just optimize the case where
5912   * the top bits are known.
5913   */
5914  /* Find the first zero bit in the mask.  */
5915  i = 0x8000;
5916  while ((i & mask) != 0)
5917      i >>= 1;
5918  /* Iterate over all combinations of this and lower bits.  */
5919  if (i == 0)
5920      i = 1;
5921  else
5922      i <<= 1;
5923  from = opcode & ~(i - 1);
5924  to = from + i;
5925  for (i = from; i < to; i++) {
5926      if ((i & mask) == opcode)
5927          opcode_table[i] = proc;
5928  }
5929}
5930
5931/*
5932 * Register m68k opcode handlers.  Order is important.
5933 * Later insn override earlier ones.
5934 */
5935void register_m68k_insns (CPUM68KState *env)
5936{
5937    /*
5938     * Build the opcode table only once to avoid
5939     * multithreading issues.
5940     */
5941    if (opcode_table[0] != NULL) {
5942        return;
5943    }
5944
5945    /*
5946     * use BASE() for instruction available
5947     * for CF_ISA_A and M68000.
5948     */
5949#define BASE(name, opcode, mask) \
5950    register_opcode(disas_##name, 0x##opcode, 0x##mask)
5951#define INSN(name, opcode, mask, feature) do { \
5952    if (m68k_feature(env, M68K_FEATURE_##feature)) \
5953        BASE(name, opcode, mask); \
5954    } while(0)
5955    BASE(undef,     0000, 0000);
5956    INSN(arith_im,  0080, fff8, CF_ISA_A);
5957    INSN(arith_im,  0000, ff00, M68000);
5958    INSN(chk2,      00c0, f9c0, CHK2);
5959    INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5960    BASE(bitop_reg, 0100, f1c0);
5961    BASE(bitop_reg, 0140, f1c0);
5962    BASE(bitop_reg, 0180, f1c0);
5963    BASE(bitop_reg, 01c0, f1c0);
5964    INSN(movep,     0108, f138, MOVEP);
5965    INSN(arith_im,  0280, fff8, CF_ISA_A);
5966    INSN(arith_im,  0200, ff00, M68000);
5967    INSN(undef,     02c0, ffc0, M68000);
5968    INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5969    INSN(arith_im,  0480, fff8, CF_ISA_A);
5970    INSN(arith_im,  0400, ff00, M68000);
5971    INSN(undef,     04c0, ffc0, M68000);
5972    INSN(arith_im,  0600, ff00, M68000);
5973    INSN(undef,     06c0, ffc0, M68000);
5974    INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5975    INSN(arith_im,  0680, fff8, CF_ISA_A);
5976    INSN(arith_im,  0c00, ff38, CF_ISA_A);
5977    INSN(arith_im,  0c00, ff00, M68000);
5978    BASE(bitop_im,  0800, ffc0);
5979    BASE(bitop_im,  0840, ffc0);
5980    BASE(bitop_im,  0880, ffc0);
5981    BASE(bitop_im,  08c0, ffc0);
5982    INSN(arith_im,  0a80, fff8, CF_ISA_A);
5983    INSN(arith_im,  0a00, ff00, M68000);
5984#if defined(CONFIG_SOFTMMU)
5985    INSN(moves,     0e00, ff00, M68000);
5986#endif
5987    INSN(cas,       0ac0, ffc0, CAS);
5988    INSN(cas,       0cc0, ffc0, CAS);
5989    INSN(cas,       0ec0, ffc0, CAS);
5990    INSN(cas2w,     0cfc, ffff, CAS);
5991    INSN(cas2l,     0efc, ffff, CAS);
5992    BASE(move,      1000, f000);
5993    BASE(move,      2000, f000);
5994    BASE(move,      3000, f000);
5995    INSN(chk,       4000, f040, M68000);
5996    INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5997    INSN(negx,      4080, fff8, CF_ISA_A);
5998    INSN(negx,      4000, ff00, M68000);
5999    INSN(undef,     40c0, ffc0, M68000);
6000    INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
6001    INSN(move_from_sr, 40c0, ffc0, M68000);
6002    BASE(lea,       41c0, f1c0);
6003    BASE(clr,       4200, ff00);
6004    BASE(undef,     42c0, ffc0);
6005    INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
6006    INSN(move_from_ccr, 42c0, ffc0, M68000);
6007    INSN(neg,       4480, fff8, CF_ISA_A);
6008    INSN(neg,       4400, ff00, M68000);
6009    INSN(undef,     44c0, ffc0, M68000);
6010    BASE(move_to_ccr, 44c0, ffc0);
6011    INSN(not,       4680, fff8, CF_ISA_A);
6012    INSN(not,       4600, ff00, M68000);
6013#if defined(CONFIG_SOFTMMU)
6014    BASE(move_to_sr, 46c0, ffc0);
6015#endif
6016    INSN(nbcd,      4800, ffc0, M68000);
6017    INSN(linkl,     4808, fff8, M68000);
6018    BASE(pea,       4840, ffc0);
6019    BASE(swap,      4840, fff8);
6020    INSN(bkpt,      4848, fff8, BKPT);
6021    INSN(movem,     48d0, fbf8, CF_ISA_A);
6022    INSN(movem,     48e8, fbf8, CF_ISA_A);
6023    INSN(movem,     4880, fb80, M68000);
6024    BASE(ext,       4880, fff8);
6025    BASE(ext,       48c0, fff8);
6026    BASE(ext,       49c0, fff8);
6027    BASE(tst,       4a00, ff00);
6028    INSN(tas,       4ac0, ffc0, CF_ISA_B);
6029    INSN(tas,       4ac0, ffc0, M68000);
6030#if defined(CONFIG_SOFTMMU)
6031    INSN(halt,      4ac8, ffff, CF_ISA_A);
6032#endif
6033    INSN(pulse,     4acc, ffff, CF_ISA_A);
6034    BASE(illegal,   4afc, ffff);
6035    INSN(mull,      4c00, ffc0, CF_ISA_A);
6036    INSN(mull,      4c00, ffc0, LONG_MULDIV);
6037    INSN(divl,      4c40, ffc0, CF_ISA_A);
6038    INSN(divl,      4c40, ffc0, LONG_MULDIV);
6039    INSN(sats,      4c80, fff8, CF_ISA_B);
6040    BASE(trap,      4e40, fff0);
6041    BASE(link,      4e50, fff8);
6042    BASE(unlk,      4e58, fff8);
6043#if defined(CONFIG_SOFTMMU)
6044    INSN(move_to_usp, 4e60, fff8, USP);
6045    INSN(move_from_usp, 4e68, fff8, USP);
6046    INSN(reset,     4e70, ffff, M68000);
6047    BASE(stop,      4e72, ffff);
6048    BASE(rte,       4e73, ffff);
6049    INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
6050    INSN(m68k_movec, 4e7a, fffe, MOVEC);
6051#endif
6052    BASE(nop,       4e71, ffff);
6053    INSN(rtd,       4e74, ffff, RTD);
6054    BASE(rts,       4e75, ffff);
6055    INSN(rtr,       4e77, ffff, M68000);
6056    BASE(jump,      4e80, ffc0);
6057    BASE(jump,      4ec0, ffc0);
6058    INSN(addsubq,   5000, f080, M68000);
6059    BASE(addsubq,   5080, f0c0);
6060    INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
6061    INSN(scc,       50c0, f0c0, M68000);   /* Scc.B <EA> */
6062    INSN(dbcc,      50c8, f0f8, M68000);
6063    INSN(tpf,       51f8, fff8, CF_ISA_A);
6064
6065    /* Branch instructions.  */
6066    BASE(branch,    6000, f000);
6067    /* Disable long branch instructions, then add back the ones we want.  */
6068    BASE(undef,     60ff, f0ff); /* All long branches.  */
6069    INSN(branch,    60ff, f0ff, CF_ISA_B);
6070    INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
6071    INSN(branch,    60ff, ffff, BRAL);
6072    INSN(branch,    60ff, f0ff, BCCL);
6073
6074    BASE(moveq,     7000, f100);
6075    INSN(mvzs,      7100, f100, CF_ISA_B);
6076    BASE(or,        8000, f000);
6077    BASE(divw,      80c0, f0c0);
6078    INSN(sbcd_reg,  8100, f1f8, M68000);
6079    INSN(sbcd_mem,  8108, f1f8, M68000);
6080    BASE(addsub,    9000, f000);
6081    INSN(undef,     90c0, f0c0, CF_ISA_A);
6082    INSN(subx_reg,  9180, f1f8, CF_ISA_A);
6083    INSN(subx_reg,  9100, f138, M68000);
6084    INSN(subx_mem,  9108, f138, M68000);
6085    INSN(suba,      91c0, f1c0, CF_ISA_A);
6086    INSN(suba,      90c0, f0c0, M68000);
6087
6088    BASE(undef_mac, a000, f000);
6089    INSN(mac,       a000, f100, CF_EMAC);
6090    INSN(from_mac,  a180, f9b0, CF_EMAC);
6091    INSN(move_mac,  a110, f9fc, CF_EMAC);
6092    INSN(from_macsr,a980, f9f0, CF_EMAC);
6093    INSN(from_mask, ad80, fff0, CF_EMAC);
6094    INSN(from_mext, ab80, fbf0, CF_EMAC);
6095    INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
6096    INSN(to_mac,    a100, f9c0, CF_EMAC);
6097    INSN(to_macsr,  a900, ffc0, CF_EMAC);
6098    INSN(to_mext,   ab00, fbc0, CF_EMAC);
6099    INSN(to_mask,   ad00, ffc0, CF_EMAC);
6100
6101    INSN(mov3q,     a140, f1c0, CF_ISA_B);
6102    INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
6103    INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
6104    INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
6105    INSN(cmp,       b080, f1c0, CF_ISA_A);
6106    INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
6107    INSN(cmp,       b000, f100, M68000);
6108    INSN(eor,       b100, f100, M68000);
6109    INSN(cmpm,      b108, f138, M68000);
6110    INSN(cmpa,      b0c0, f0c0, M68000);
6111    INSN(eor,       b180, f1c0, CF_ISA_A);
6112    BASE(and,       c000, f000);
6113    INSN(exg_dd,    c140, f1f8, M68000);
6114    INSN(exg_aa,    c148, f1f8, M68000);
6115    INSN(exg_da,    c188, f1f8, M68000);
6116    BASE(mulw,      c0c0, f0c0);
6117    INSN(abcd_reg,  c100, f1f8, M68000);
6118    INSN(abcd_mem,  c108, f1f8, M68000);
6119    BASE(addsub,    d000, f000);
6120    INSN(undef,     d0c0, f0c0, CF_ISA_A);
6121    INSN(addx_reg,      d180, f1f8, CF_ISA_A);
6122    INSN(addx_reg,  d100, f138, M68000);
6123    INSN(addx_mem,  d108, f138, M68000);
6124    INSN(adda,      d1c0, f1c0, CF_ISA_A);
6125    INSN(adda,      d0c0, f0c0, M68000);
6126    INSN(shift_im,  e080, f0f0, CF_ISA_A);
6127    INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
6128    INSN(shift8_im, e000, f0f0, M68000);
6129    INSN(shift16_im, e040, f0f0, M68000);
6130    INSN(shift_im,  e080, f0f0, M68000);
6131    INSN(shift8_reg, e020, f0f0, M68000);
6132    INSN(shift16_reg, e060, f0f0, M68000);
6133    INSN(shift_reg, e0a0, f0f0, M68000);
6134    INSN(shift_mem, e0c0, fcc0, M68000);
6135    INSN(rotate_im, e090, f0f0, M68000);
6136    INSN(rotate8_im, e010, f0f0, M68000);
6137    INSN(rotate16_im, e050, f0f0, M68000);
6138    INSN(rotate_reg, e0b0, f0f0, M68000);
6139    INSN(rotate8_reg, e030, f0f0, M68000);
6140    INSN(rotate16_reg, e070, f0f0, M68000);
6141    INSN(rotate_mem, e4c0, fcc0, M68000);
6142    INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
6143    INSN(bfext_reg, e9c0, fdf8, BITFIELD);
6144    INSN(bfins_mem, efc0, ffc0, BITFIELD);
6145    INSN(bfins_reg, efc0, fff8, BITFIELD);
6146    INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
6147    INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
6148    INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
6149    INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
6150    INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
6151    INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
6152    INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
6153    INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
6154    INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
6155    INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
6156    BASE(undef_fpu, f000, f000);
6157    INSN(fpu,       f200, ffc0, CF_FPU);
6158    INSN(fbcc,      f280, ffc0, CF_FPU);
6159    INSN(fpu,       f200, ffc0, FPU);
6160    INSN(fscc,      f240, ffc0, FPU);
6161    INSN(fbcc,      f280, ff80, FPU);
6162#if defined(CONFIG_SOFTMMU)
6163    INSN(frestore,  f340, ffc0, CF_FPU);
6164    INSN(fsave,     f300, ffc0, CF_FPU);
6165    INSN(frestore,  f340, ffc0, FPU);
6166    INSN(fsave,     f300, ffc0, FPU);
6167    INSN(intouch,   f340, ffc0, CF_ISA_A);
6168    INSN(cpushl,    f428, ff38, CF_ISA_A);
6169    INSN(cpush,     f420, ff20, M68040);
6170    INSN(cinv,      f400, ff20, M68040);
6171    INSN(pflush,    f500, ffe0, M68040);
6172    INSN(ptest,     f548, ffd8, M68040);
6173    INSN(wddata,    fb00, ff00, CF_ISA_A);
6174    INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
6175#endif
6176    INSN(move16_mem, f600, ffe0, M68040);
6177    INSN(move16_reg, f620, fff8, M68040);
6178#undef INSN
6179}
6180
6181static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6182{
6183    DisasContext *dc = container_of(dcbase, DisasContext, base);
6184    CPUM68KState *env = cpu->env_ptr;
6185
6186    dc->env = env;
6187    dc->pc = dc->base.pc_first;
6188    dc->cc_op = CC_OP_DYNAMIC;
6189    dc->cc_op_synced = 1;
6190    dc->done_mac = 0;
6191    dc->writeback_mask = 0;
6192    init_release_array(dc);
6193
6194    dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6195    /* If architectural single step active, limit to 1 */
6196    if (is_singlestepping(dc)) {
6197        dc->base.max_insns = 1;
6198    }
6199}
6200
6201static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6202{
6203}
6204
6205static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6206{
6207    DisasContext *dc = container_of(dcbase, DisasContext, base);
6208    tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6209}
6210
6211static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6212{
6213    DisasContext *dc = container_of(dcbase, DisasContext, base);
6214    CPUM68KState *env = cpu->env_ptr;
6215    uint16_t insn = read_im16(env, dc);
6216
6217    opcode_table[insn](env, dc, insn);
6218    do_writebacks(dc);
6219    do_release(dc);
6220
6221    dc->base.pc_next = dc->pc;
6222
6223    if (dc->base.is_jmp == DISAS_NEXT) {
6224        /*
6225         * Stop translation when the next insn might touch a new page.
6226         * This ensures that prefetch aborts at the right place.
6227         *
6228         * We cannot determine the size of the next insn without
6229         * completely decoding it.  However, the maximum insn size
6230         * is 32 bytes, so end if we do not have that much remaining.
6231         * This may produce several small TBs at the end of each page,
6232         * but they will all be linked with goto_tb.
6233         *
6234         * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6235         * smaller than MC68020's.
6236         */
6237        target_ulong start_page_offset
6238            = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6239
6240        if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6241            dc->base.is_jmp = DISAS_TOO_MANY;
6242        }
6243    }
6244}
6245
6246static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6247{
6248    DisasContext *dc = container_of(dcbase, DisasContext, base);
6249
6250    switch (dc->base.is_jmp) {
6251    case DISAS_NORETURN:
6252        break;
6253    case DISAS_TOO_MANY:
6254        update_cc_op(dc);
6255        if (is_singlestepping(dc)) {
6256            tcg_gen_movi_i32(QREG_PC, dc->pc);
6257            gen_singlestep_exception(dc);
6258        } else {
6259            gen_jmp_tb(dc, 0, dc->pc);
6260        }
6261        break;
6262    case DISAS_JUMP:
6263        /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6264        if (is_singlestepping(dc)) {
6265            gen_singlestep_exception(dc);
6266        } else {
6267            tcg_gen_lookup_and_goto_ptr();
6268        }
6269        break;
6270    case DISAS_EXIT:
6271        /*
6272         * We updated CC_OP and PC in gen_exit_tb, but also modified
6273         * other state that may require returning to the main loop.
6274         */
6275        if (is_singlestepping(dc)) {
6276            gen_singlestep_exception(dc);
6277        } else {
6278            tcg_gen_exit_tb(NULL, 0);
6279        }
6280        break;
6281    default:
6282        g_assert_not_reached();
6283    }
6284}
6285
6286static void m68k_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
6287{
6288    qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
6289    log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
6290}
6291
6292static const TranslatorOps m68k_tr_ops = {
6293    .init_disas_context = m68k_tr_init_disas_context,
6294    .tb_start           = m68k_tr_tb_start,
6295    .insn_start         = m68k_tr_insn_start,
6296    .translate_insn     = m68k_tr_translate_insn,
6297    .tb_stop            = m68k_tr_tb_stop,
6298    .disas_log          = m68k_tr_disas_log,
6299};
6300
6301void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
6302{
6303    DisasContext dc;
6304    translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
6305}
6306
6307static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6308{
6309    floatx80 a = { .high = high, .low = low };
6310    union {
6311        float64 f64;
6312        double d;
6313    } u;
6314
6315    u.f64 = floatx80_to_float64(a, &env->fp_status);
6316    return u.d;
6317}
6318
6319void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6320{
6321    M68kCPU *cpu = M68K_CPU(cs);
6322    CPUM68KState *env = &cpu->env;
6323    int i;
6324    uint16_t sr;
6325    for (i = 0; i < 8; i++) {
6326        qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6327                     "F%d = %04x %016"PRIx64"  (%12g)\n",
6328                     i, env->dregs[i], i, env->aregs[i],
6329                     i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6330                     floatx80_to_double(env, env->fregs[i].l.upper,
6331                                        env->fregs[i].l.lower));
6332    }
6333    qemu_fprintf(f, "PC = %08x   ", env->pc);
6334    sr = env->sr | cpu_m68k_get_ccr(env);
6335    qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6336                 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6337                 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6338                 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6339                 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6340                 (sr & CCF_C) ? 'C' : '-');
6341    qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6342                 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6343                 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6344                 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6345                 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6346    qemu_fprintf(f, "\n                                "
6347                 "FPCR =     %04x ", env->fpcr);
6348    switch (env->fpcr & FPCR_PREC_MASK) {
6349    case FPCR_PREC_X:
6350        qemu_fprintf(f, "X ");
6351        break;
6352    case FPCR_PREC_S:
6353        qemu_fprintf(f, "S ");
6354        break;
6355    case FPCR_PREC_D:
6356        qemu_fprintf(f, "D ");
6357        break;
6358    }
6359    switch (env->fpcr & FPCR_RND_MASK) {
6360    case FPCR_RND_N:
6361        qemu_fprintf(f, "RN ");
6362        break;
6363    case FPCR_RND_Z:
6364        qemu_fprintf(f, "RZ ");
6365        break;
6366    case FPCR_RND_M:
6367        qemu_fprintf(f, "RM ");
6368        break;
6369    case FPCR_RND_P:
6370        qemu_fprintf(f, "RP ");
6371        break;
6372    }
6373    qemu_fprintf(f, "\n");
6374#ifdef CONFIG_SOFTMMU
6375    qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6376                 env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6377                 env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6378                 env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6379    qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6380    qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6381    qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6382                 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6383    qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6384                 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6385                 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6386    qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6387                 env->mmu.mmusr, env->mmu.ar);
6388#endif
6389}
6390
6391void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
6392                          target_ulong *data)
6393{
6394    int cc_op = data[1];
6395    env->pc = data[0];
6396    if (cc_op != CC_OP_DYNAMIC) {
6397        env->cc_op = cc_op;
6398    }
6399}
6400