qemu/target/microblaze/translate.c
<<
>>
Prefs
   1/*
   2 *  Xilinx MicroBlaze emulation for qemu: main translation routines.
   3 *
   4 *  Copyright (c) 2009 Edgar E. Iglesias.
   5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "exec/exec-all.h"
  25#include "tcg-op.h"
  26#include "exec/helper-proto.h"
  27#include "microblaze-decode.h"
  28#include "exec/cpu_ldst.h"
  29#include "exec/helper-gen.h"
  30#include "exec/translator.h"
  31
  32#include "trace-tcg.h"
  33#include "exec/log.h"
  34
  35
  36#define SIM_COMPAT 0
  37#define DISAS_GNU 1
  38#define DISAS_MB 1
  39#if DISAS_MB && !SIM_COMPAT
  40#  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  41#else
  42#  define LOG_DIS(...) do { } while (0)
  43#endif
  44
  45#define D(x)
  46
  47#define EXTRACT_FIELD(src, start, end) \
  48            (((src) >> start) & ((1 << (end - start + 1)) - 1))
  49
  50/* is_jmp field values */
  51#define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
  52#define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
  53#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
  54
  55static TCGv_i32 env_debug;
  56static TCGv_i32 cpu_R[32];
  57static TCGv_i64 cpu_SR[14];
  58static TCGv_i32 env_imm;
  59static TCGv_i32 env_btaken;
  60static TCGv_i32 env_btarget;
  61static TCGv_i32 env_iflags;
  62static TCGv env_res_addr;
  63static TCGv_i32 env_res_val;
  64
  65#include "exec/gen-icount.h"
  66
  67/* This is the state at translation time.  */
  68typedef struct DisasContext {
  69    MicroBlazeCPU *cpu;
  70    uint32_t pc;
  71
  72    /* Decoder.  */
  73    int type_b;
  74    uint32_t ir;
  75    uint8_t opcode;
  76    uint8_t rd, ra, rb;
  77    uint16_t imm;
  78
  79    unsigned int cpustate_changed;
  80    unsigned int delayed_branch;
  81    unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
  82    unsigned int clear_imm;
  83    int is_jmp;
  84
  85#define JMP_NOJMP     0
  86#define JMP_DIRECT    1
  87#define JMP_DIRECT_CC 2
  88#define JMP_INDIRECT  3
  89    unsigned int jmp;
  90    uint32_t jmp_pc;
  91
  92    int abort_at_next_insn;
  93    int nr_nops;
  94    struct TranslationBlock *tb;
  95    int singlestep_enabled;
  96} DisasContext;
  97
  98static const char *regnames[] =
  99{
 100    "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
 101    "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
 102    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
 103    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
 104};
 105
 106static const char *special_regnames[] =
 107{
 108    "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
 109    "sr8", "sr9", "sr10", "rbtr", "sr12", "rbtr"
 110};
 111
 112static inline void t_sync_flags(DisasContext *dc)
 113{
 114    /* Synch the tb dependent flags between translator and runtime.  */
 115    if (dc->tb_flags != dc->synced_flags) {
 116        tcg_gen_movi_i32(env_iflags, dc->tb_flags);
 117        dc->synced_flags = dc->tb_flags;
 118    }
 119}
 120
 121static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
 122{
 123    TCGv_i32 tmp = tcg_const_i32(index);
 124
 125    t_sync_flags(dc);
 126    tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
 127    gen_helper_raise_exception(cpu_env, tmp);
 128    tcg_temp_free_i32(tmp);
 129    dc->is_jmp = DISAS_UPDATE;
 130}
 131
 132static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
 133{
 134#ifndef CONFIG_USER_ONLY
 135    return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 136#else
 137    return true;
 138#endif
 139}
 140
 141static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
 142{
 143    if (use_goto_tb(dc, dest)) {
 144        tcg_gen_goto_tb(n);
 145        tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
 146        tcg_gen_exit_tb((uintptr_t)dc->tb + n);
 147    } else {
 148        tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
 149        tcg_gen_exit_tb(0);
 150    }
 151}
 152
 153static void read_carry(DisasContext *dc, TCGv_i32 d)
 154{
 155    tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
 156    tcg_gen_shri_i32(d, d, 31);
 157}
 158
 159/*
 160 * write_carry sets the carry bits in MSR based on bit 0 of v.
 161 * v[31:1] are ignored.
 162 */
 163static void write_carry(DisasContext *dc, TCGv_i32 v)
 164{
 165    TCGv_i64 t0 = tcg_temp_new_i64();
 166    tcg_gen_extu_i32_i64(t0, v);
 167    /* Deposit bit 0 into MSR_C and the alias MSR_CC.  */
 168    tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
 169    tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
 170    tcg_temp_free_i64(t0);
 171}
 172
 173static void write_carryi(DisasContext *dc, bool carry)
 174{
 175    TCGv_i32 t0 = tcg_temp_new_i32();
 176    tcg_gen_movi_i32(t0, carry);
 177    write_carry(dc, t0);
 178    tcg_temp_free_i32(t0);
 179}
 180
 181/*
 182 * Returns true if the insn an illegal operation.
 183 * If exceptions are enabled, an exception is raised.
 184 */
 185static bool trap_illegal(DisasContext *dc, bool cond)
 186{
 187    if (cond && (dc->tb_flags & MSR_EE_FLAG)
 188        && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
 189        tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 190        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 191    }
 192    return cond;
 193}
 194
 195/*
 196 * Returns true if the insn is illegal in userspace.
 197 * If exceptions are enabled, an exception is raised.
 198 */
 199static bool trap_userspace(DisasContext *dc, bool cond)
 200{
 201    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 202    bool cond_user = cond && mem_index == MMU_USER_IDX;
 203
 204    if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
 205        tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 206        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 207    }
 208    return cond_user;
 209}
 210
 211/* True if ALU operand b is a small immediate that may deserve
 212   faster treatment.  */
 213static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
 214{
 215    /* Immediate insn without the imm prefix ?  */
 216    return dc->type_b && !(dc->tb_flags & IMM_FLAG);
 217}
 218
 219static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
 220{
 221    if (dc->type_b) {
 222        if (dc->tb_flags & IMM_FLAG)
 223            tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
 224        else
 225            tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
 226        return &env_imm;
 227    } else
 228        return &cpu_R[dc->rb];
 229}
 230
 231static void dec_add(DisasContext *dc)
 232{
 233    unsigned int k, c;
 234    TCGv_i32 cf;
 235
 236    k = dc->opcode & 4;
 237    c = dc->opcode & 2;
 238
 239    LOG_DIS("add%s%s%s r%d r%d r%d\n",
 240            dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
 241            dc->rd, dc->ra, dc->rb);
 242
 243    /* Take care of the easy cases first.  */
 244    if (k) {
 245        /* k - keep carry, no need to update MSR.  */
 246        /* If rd == r0, it's a nop.  */
 247        if (dc->rd) {
 248            tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 249
 250            if (c) {
 251                /* c - Add carry into the result.  */
 252                cf = tcg_temp_new_i32();
 253
 254                read_carry(dc, cf);
 255                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 256                tcg_temp_free_i32(cf);
 257            }
 258        }
 259        return;
 260    }
 261
 262    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 263    /* Extract carry.  */
 264    cf = tcg_temp_new_i32();
 265    if (c) {
 266        read_carry(dc, cf);
 267    } else {
 268        tcg_gen_movi_i32(cf, 0);
 269    }
 270
 271    if (dc->rd) {
 272        TCGv_i32 ncf = tcg_temp_new_i32();
 273        gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 274        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 275        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 276        write_carry(dc, ncf);
 277        tcg_temp_free_i32(ncf);
 278    } else {
 279        gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 280        write_carry(dc, cf);
 281    }
 282    tcg_temp_free_i32(cf);
 283}
 284
 285static void dec_sub(DisasContext *dc)
 286{
 287    unsigned int u, cmp, k, c;
 288    TCGv_i32 cf, na;
 289
 290    u = dc->imm & 2;
 291    k = dc->opcode & 4;
 292    c = dc->opcode & 2;
 293    cmp = (dc->imm & 1) && (!dc->type_b) && k;
 294
 295    if (cmp) {
 296        LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
 297        if (dc->rd) {
 298            if (u)
 299                gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 300            else
 301                gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 302        }
 303        return;
 304    }
 305
 306    LOG_DIS("sub%s%s r%d, r%d r%d\n",
 307             k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
 308
 309    /* Take care of the easy cases first.  */
 310    if (k) {
 311        /* k - keep carry, no need to update MSR.  */
 312        /* If rd == r0, it's a nop.  */
 313        if (dc->rd) {
 314            tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
 315
 316            if (c) {
 317                /* c - Add carry into the result.  */
 318                cf = tcg_temp_new_i32();
 319
 320                read_carry(dc, cf);
 321                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 322                tcg_temp_free_i32(cf);
 323            }
 324        }
 325        return;
 326    }
 327
 328    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 329    /* Extract carry. And complement a into na.  */
 330    cf = tcg_temp_new_i32();
 331    na = tcg_temp_new_i32();
 332    if (c) {
 333        read_carry(dc, cf);
 334    } else {
 335        tcg_gen_movi_i32(cf, 1);
 336    }
 337
 338    /* d = b + ~a + c. carry defaults to 1.  */
 339    tcg_gen_not_i32(na, cpu_R[dc->ra]);
 340
 341    if (dc->rd) {
 342        TCGv_i32 ncf = tcg_temp_new_i32();
 343        gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
 344        tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
 345        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 346        write_carry(dc, ncf);
 347        tcg_temp_free_i32(ncf);
 348    } else {
 349        gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
 350        write_carry(dc, cf);
 351    }
 352    tcg_temp_free_i32(cf);
 353    tcg_temp_free_i32(na);
 354}
 355
 356static void dec_pattern(DisasContext *dc)
 357{
 358    unsigned int mode;
 359
 360    if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
 361        return;
 362    }
 363
 364    mode = dc->opcode & 3;
 365    switch (mode) {
 366        case 0:
 367            /* pcmpbf.  */
 368            LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 369            if (dc->rd)
 370                gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 371            break;
 372        case 2:
 373            LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 374            if (dc->rd) {
 375                tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
 376                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 377            }
 378            break;
 379        case 3:
 380            LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 381            if (dc->rd) {
 382                tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
 383                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 384            }
 385            break;
 386        default:
 387            cpu_abort(CPU(dc->cpu),
 388                      "unsupported pattern insn opcode=%x\n", dc->opcode);
 389            break;
 390    }
 391}
 392
 393static void dec_and(DisasContext *dc)
 394{
 395    unsigned int not;
 396
 397    if (!dc->type_b && (dc->imm & (1 << 10))) {
 398        dec_pattern(dc);
 399        return;
 400    }
 401
 402    not = dc->opcode & (1 << 1);
 403    LOG_DIS("and%s\n", not ? "n" : "");
 404
 405    if (!dc->rd)
 406        return;
 407
 408    if (not) {
 409        tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 410    } else
 411        tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 412}
 413
 414static void dec_or(DisasContext *dc)
 415{
 416    if (!dc->type_b && (dc->imm & (1 << 10))) {
 417        dec_pattern(dc);
 418        return;
 419    }
 420
 421    LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
 422    if (dc->rd)
 423        tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 424}
 425
 426static void dec_xor(DisasContext *dc)
 427{
 428    if (!dc->type_b && (dc->imm & (1 << 10))) {
 429        dec_pattern(dc);
 430        return;
 431    }
 432
 433    LOG_DIS("xor r%d\n", dc->rd);
 434    if (dc->rd)
 435        tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 436}
 437
 438static inline void msr_read(DisasContext *dc, TCGv_i32 d)
 439{
 440    tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
 441}
 442
 443static inline void msr_write(DisasContext *dc, TCGv_i32 v)
 444{
 445    TCGv_i64 t;
 446
 447    t = tcg_temp_new_i64();
 448    dc->cpustate_changed = 1;
 449    /* PVR bit is not writable.  */
 450    tcg_gen_extu_i32_i64(t, v);
 451    tcg_gen_andi_i64(t, t, ~MSR_PVR);
 452    tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
 453    tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
 454    tcg_temp_free_i64(t);
 455}
 456
 457static void dec_msr(DisasContext *dc)
 458{
 459    CPUState *cs = CPU(dc->cpu);
 460    TCGv_i32 t0, t1;
 461    unsigned int sr, rn;
 462    bool to, clrset, extended = false;
 463
 464    sr = extract32(dc->imm, 0, 14);
 465    to = extract32(dc->imm, 14, 1);
 466    clrset = extract32(dc->imm, 15, 1) == 0;
 467    dc->type_b = 1;
 468    if (to) {
 469        dc->cpustate_changed = 1;
 470    }
 471
 472    /* Extended MSRs are only available if addr_size > 32.  */
 473    if (dc->cpu->cfg.addr_size > 32) {
 474        /* The E-bit is encoded differently for To/From MSR.  */
 475        static const unsigned int e_bit[] = { 19, 24 };
 476
 477        extended = extract32(dc->imm, e_bit[to], 1);
 478    }
 479
 480    /* msrclr and msrset.  */
 481    if (clrset) {
 482        bool clr = extract32(dc->ir, 16, 1);
 483
 484        LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
 485                dc->rd, dc->imm);
 486
 487        if (!dc->cpu->cfg.use_msr_instr) {
 488            /* nop??? */
 489            return;
 490        }
 491
 492        if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
 493            return;
 494        }
 495
 496        if (dc->rd)
 497            msr_read(dc, cpu_R[dc->rd]);
 498
 499        t0 = tcg_temp_new_i32();
 500        t1 = tcg_temp_new_i32();
 501        msr_read(dc, t0);
 502        tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
 503
 504        if (clr) {
 505            tcg_gen_not_i32(t1, t1);
 506            tcg_gen_and_i32(t0, t0, t1);
 507        } else
 508            tcg_gen_or_i32(t0, t0, t1);
 509        msr_write(dc, t0);
 510        tcg_temp_free_i32(t0);
 511        tcg_temp_free_i32(t1);
 512        tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
 513        dc->is_jmp = DISAS_UPDATE;
 514        return;
 515    }
 516
 517    if (trap_userspace(dc, to)) {
 518        return;
 519    }
 520
 521#if !defined(CONFIG_USER_ONLY)
 522    /* Catch read/writes to the mmu block.  */
 523    if ((sr & ~0xff) == 0x1000) {
 524        sr &= 7;
 525        LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 526        if (to) {
 527            gen_helper_mmu_write(cpu_env, tcg_const_i32(extended),
 528                                 tcg_const_i32(sr), cpu_R[dc->ra]);
 529        } else {
 530            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env,
 531                                tcg_const_i32(extended), tcg_const_i32(sr));
 532        }
 533        return;
 534    }
 535#endif
 536
 537    if (to) {
 538        LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 539        switch (sr) {
 540            case 0:
 541                break;
 542            case 1:
 543                msr_write(dc, cpu_R[dc->ra]);
 544                break;
 545            case SR_EAR:
 546            case SR_ESR:
 547            case SR_FSR:
 548                tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
 549                break;
 550            case 0x800:
 551                tcg_gen_st_i32(cpu_R[dc->ra],
 552                               cpu_env, offsetof(CPUMBState, slr));
 553                break;
 554            case 0x802:
 555                tcg_gen_st_i32(cpu_R[dc->ra],
 556                               cpu_env, offsetof(CPUMBState, shr));
 557                break;
 558            default:
 559                cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
 560                break;
 561        }
 562    } else {
 563        LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
 564
 565        switch (sr) {
 566            case 0:
 567                tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
 568                break;
 569            case 1:
 570                msr_read(dc, cpu_R[dc->rd]);
 571                break;
 572            case SR_EAR:
 573                if (extended) {
 574                    tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
 575                    break;
 576                }
 577            case SR_ESR:
 578            case SR_FSR:
 579            case SR_BTR:
 580                tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
 581                break;
 582            case 0x800:
 583                tcg_gen_ld_i32(cpu_R[dc->rd],
 584                               cpu_env, offsetof(CPUMBState, slr));
 585                break;
 586            case 0x802:
 587                tcg_gen_ld_i32(cpu_R[dc->rd],
 588                               cpu_env, offsetof(CPUMBState, shr));
 589                break;
 590            case 0x2000 ... 0x200c:
 591                rn = sr & 0xf;
 592                tcg_gen_ld_i32(cpu_R[dc->rd],
 593                              cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
 594                break;
 595            default:
 596                cpu_abort(cs, "unknown mfs reg %x\n", sr);
 597                break;
 598        }
 599    }
 600
 601    if (dc->rd == 0) {
 602        tcg_gen_movi_i32(cpu_R[0], 0);
 603    }
 604}
 605
 606/* Multiplier unit.  */
 607static void dec_mul(DisasContext *dc)
 608{
 609    TCGv_i32 tmp;
 610    unsigned int subcode;
 611
 612    if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
 613        return;
 614    }
 615
 616    subcode = dc->imm & 3;
 617
 618    if (dc->type_b) {
 619        LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
 620        tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 621        return;
 622    }
 623
 624    /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
 625    if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
 626        /* nop??? */
 627    }
 628
 629    tmp = tcg_temp_new_i32();
 630    switch (subcode) {
 631        case 0:
 632            LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 633            tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 634            break;
 635        case 1:
 636            LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 637            tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
 638                              cpu_R[dc->ra], cpu_R[dc->rb]);
 639            break;
 640        case 2:
 641            LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 642            tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
 643                               cpu_R[dc->ra], cpu_R[dc->rb]);
 644            break;
 645        case 3:
 646            LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 647            tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 648            break;
 649        default:
 650            cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
 651            break;
 652    }
 653    tcg_temp_free_i32(tmp);
 654}
 655
 656/* Div unit.  */
 657static void dec_div(DisasContext *dc)
 658{
 659    unsigned int u;
 660
 661    u = dc->imm & 2; 
 662    LOG_DIS("div\n");
 663
 664    if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
 665        return;
 666    }
 667
 668    if (u)
 669        gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 670                        cpu_R[dc->ra]);
 671    else
 672        gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 673                        cpu_R[dc->ra]);
 674    if (!dc->rd)
 675        tcg_gen_movi_i32(cpu_R[dc->rd], 0);
 676}
 677
 678static void dec_barrel(DisasContext *dc)
 679{
 680    TCGv_i32 t0;
 681    unsigned int imm_w, imm_s;
 682    bool s, t, e = false, i = false;
 683
 684    if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
 685        return;
 686    }
 687
 688    if (dc->type_b) {
 689        /* Insert and extract are only available in immediate mode.  */
 690        i = extract32(dc->imm, 15, 1);
 691        e = extract32(dc->imm, 14, 1);
 692    }
 693    s = extract32(dc->imm, 10, 1);
 694    t = extract32(dc->imm, 9, 1);
 695    imm_w = extract32(dc->imm, 6, 5);
 696    imm_s = extract32(dc->imm, 0, 5);
 697
 698    LOG_DIS("bs%s%s%s r%d r%d r%d\n",
 699            e ? "e" : "",
 700            s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
 701
 702    if (e) {
 703        if (imm_w + imm_s > 32 || imm_w == 0) {
 704            /* These inputs have an undefined behavior.  */
 705            qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
 706                          imm_w, imm_s);
 707        } else {
 708            tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
 709        }
 710    } else if (i) {
 711        int width = imm_w - imm_s + 1;
 712
 713        if (imm_w < imm_s) {
 714            /* These inputs have an undefined behavior.  */
 715            qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
 716                          imm_w, imm_s);
 717        } else {
 718            tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
 719                                imm_s, width);
 720        }
 721    } else {
 722        t0 = tcg_temp_new_i32();
 723
 724        tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
 725        tcg_gen_andi_i32(t0, t0, 31);
 726
 727        if (s) {
 728            tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 729        } else {
 730            if (t) {
 731                tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 732            } else {
 733                tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 734            }
 735        }
 736        tcg_temp_free_i32(t0);
 737    }
 738}
 739
 740static void dec_bit(DisasContext *dc)
 741{
 742    CPUState *cs = CPU(dc->cpu);
 743    TCGv_i32 t0;
 744    unsigned int op;
 745
 746    op = dc->ir & ((1 << 9) - 1);
 747    switch (op) {
 748        case 0x21:
 749            /* src.  */
 750            t0 = tcg_temp_new_i32();
 751
 752            LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
 753            tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
 754            tcg_gen_andi_i32(t0, t0, MSR_CC);
 755            write_carry(dc, cpu_R[dc->ra]);
 756            if (dc->rd) {
 757                tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 758                tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
 759            }
 760            tcg_temp_free_i32(t0);
 761            break;
 762
 763        case 0x1:
 764        case 0x41:
 765            /* srl.  */
 766            LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
 767
 768            /* Update carry. Note that write carry only looks at the LSB.  */
 769            write_carry(dc, cpu_R[dc->ra]);
 770            if (dc->rd) {
 771                if (op == 0x41)
 772                    tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 773                else
 774                    tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 775            }
 776            break;
 777        case 0x60:
 778            LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
 779            tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 780            break;
 781        case 0x61:
 782            LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
 783            tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 784            break;
 785        case 0x64:
 786        case 0x66:
 787        case 0x74:
 788        case 0x76:
 789            /* wdc.  */
 790            LOG_DIS("wdc r%d\n", dc->ra);
 791            trap_userspace(dc, true);
 792            break;
 793        case 0x68:
 794            /* wic.  */
 795            LOG_DIS("wic r%d\n", dc->ra);
 796            trap_userspace(dc, true);
 797            break;
 798        case 0xe0:
 799            if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
 800                return;
 801            }
 802            if (dc->cpu->cfg.use_pcmp_instr) {
 803                tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
 804            }
 805            break;
 806        case 0x1e0:
 807            /* swapb */
 808            LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
 809            tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 810            break;
 811        case 0x1e2:
 812            /*swaph */
 813            LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
 814            tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
 815            break;
 816        default:
 817            cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
 818                      dc->pc, op, dc->rd, dc->ra, dc->rb);
 819            break;
 820    }
 821}
 822
 823static inline void sync_jmpstate(DisasContext *dc)
 824{
 825    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
 826        if (dc->jmp == JMP_DIRECT) {
 827            tcg_gen_movi_i32(env_btaken, 1);
 828        }
 829        dc->jmp = JMP_INDIRECT;
 830        tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
 831    }
 832}
 833
 834static void dec_imm(DisasContext *dc)
 835{
 836    LOG_DIS("imm %x\n", dc->imm << 16);
 837    tcg_gen_movi_i32(env_imm, (dc->imm << 16));
 838    dc->tb_flags |= IMM_FLAG;
 839    dc->clear_imm = 0;
 840}
 841
 842static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
 843{
 844    bool extimm = dc->tb_flags & IMM_FLAG;
 845    /* Should be set to true if r1 is used by loadstores.  */
 846    bool stackprot = false;
 847    TCGv_i32 t32;
 848
 849    /* All load/stores use ra.  */
 850    if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
 851        stackprot = true;
 852    }
 853
 854    /* Treat the common cases first.  */
 855    if (!dc->type_b) {
 856        if (ea) {
 857            int addr_size = dc->cpu->cfg.addr_size;
 858
 859            if (addr_size == 32) {
 860                tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
 861                return;
 862            }
 863
 864            tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
 865            if (addr_size < 64) {
 866                /* Mask off out of range bits.  */
 867                tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
 868            }
 869            return;
 870        }
 871
 872        /* If any of the regs is r0, return the value of the other reg.  */
 873        if (dc->ra == 0) {
 874            tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
 875            return;
 876        } else if (dc->rb == 0) {
 877            tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
 878            return;
 879        }
 880
 881        if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
 882            stackprot = true;
 883        }
 884
 885        t32 = tcg_temp_new_i32();
 886        tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
 887        tcg_gen_extu_i32_tl(t, t32);
 888        tcg_temp_free_i32(t32);
 889
 890        if (stackprot) {
 891            gen_helper_stackprot(cpu_env, t);
 892        }
 893        return;
 894    }
 895    /* Immediate.  */
 896    t32 = tcg_temp_new_i32();
 897    if (!extimm) {
 898        if (dc->imm == 0) {
 899            tcg_gen_mov_i32(t32, cpu_R[dc->ra]);
 900        } else {
 901            tcg_gen_movi_i32(t32, (int32_t)((int16_t)dc->imm));
 902            tcg_gen_add_i32(t32, cpu_R[dc->ra], t32);
 903        }
 904    } else {
 905        tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 906    }
 907    tcg_gen_extu_i32_tl(t, t32);
 908    tcg_temp_free_i32(t32);
 909
 910    if (stackprot) {
 911        gen_helper_stackprot(cpu_env, t);
 912    }
 913    return;
 914}
 915
 916static void dec_load(DisasContext *dc)
 917{
 918    TCGv_i32 v;
 919    TCGv addr;
 920    unsigned int size;
 921    bool rev = false, ex = false, ea = false;
 922    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 923    TCGMemOp mop;
 924
 925    mop = dc->opcode & 3;
 926    size = 1 << mop;
 927    if (!dc->type_b) {
 928        ea = extract32(dc->ir, 7, 1);
 929        rev = extract32(dc->ir, 9, 1);
 930        ex = extract32(dc->ir, 10, 1);
 931    }
 932    mop |= MO_TE;
 933    if (rev) {
 934        mop ^= MO_BSWAP;
 935    }
 936
 937    if (trap_illegal(dc, size > 4)) {
 938        return;
 939    }
 940
 941    trap_userspace(dc, ea);
 942
 943    LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
 944                                                        ex ? "x" : "",
 945                                                        ea ? "ea" : "");
 946
 947    t_sync_flags(dc);
 948    addr = tcg_temp_new();
 949    compute_ldst_addr(dc, ea, addr);
 950    /* Extended addressing bypasses the MMU.  */
 951    mem_index = ea ? MMU_NOMMU_IDX : mem_index;
 952
 953    /*
 954     * When doing reverse accesses we need to do two things.
 955     *
 956     * 1. Reverse the address wrt endianness.
 957     * 2. Byteswap the data lanes on the way back into the CPU core.
 958     */
 959    if (rev && size != 4) {
 960        /* Endian reverse the address. t is addr.  */
 961        switch (size) {
 962            case 1:
 963            {
 964                /* 00 -> 11
 965                   01 -> 10
 966                   10 -> 10
 967                   11 -> 00 */
 968                TCGv low = tcg_temp_new();
 969
 970                tcg_gen_andi_tl(low, addr, 3);
 971                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
 972                tcg_gen_andi_tl(addr, addr, ~3);
 973                tcg_gen_or_tl(addr, addr, low);
 974                tcg_temp_free(low);
 975                break;
 976            }
 977
 978            case 2:
 979                /* 00 -> 10
 980                   10 -> 00.  */
 981                tcg_gen_xori_tl(addr, addr, 2);
 982                break;
 983            default:
 984                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
 985                break;
 986        }
 987    }
 988
 989    /* lwx does not throw unaligned access errors, so force alignment */
 990    if (ex) {
 991        tcg_gen_andi_tl(addr, addr, ~3);
 992    }
 993
 994    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
 995    sync_jmpstate(dc);
 996
 997    /* Verify alignment if needed.  */
 998    /*
 999     * Microblaze gives MMU faults priority over faults due to
1000     * unaligned addresses. That's why we speculatively do the load
1001     * into v. If the load succeeds, we verify alignment of the
1002     * address and if that succeeds we write into the destination reg.
1003     */
1004    v = tcg_temp_new_i32();
1005    tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
1006
1007    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1008        tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1009        gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1010                            tcg_const_i32(0), tcg_const_i32(size - 1));
1011    }
1012
1013    if (ex) {
1014        tcg_gen_mov_tl(env_res_addr, addr);
1015        tcg_gen_mov_i32(env_res_val, v);
1016    }
1017    if (dc->rd) {
1018        tcg_gen_mov_i32(cpu_R[dc->rd], v);
1019    }
1020    tcg_temp_free_i32(v);
1021
1022    if (ex) { /* lwx */
1023        /* no support for AXI exclusive so always clear C */
1024        write_carryi(dc, 0);
1025    }
1026
1027    tcg_temp_free(addr);
1028}
1029
1030static void dec_store(DisasContext *dc)
1031{
1032    TCGv addr;
1033    TCGLabel *swx_skip = NULL;
1034    unsigned int size;
1035    bool rev = false, ex = false, ea = false;
1036    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1037    TCGMemOp mop;
1038
1039    mop = dc->opcode & 3;
1040    size = 1 << mop;
1041    if (!dc->type_b) {
1042        ea = extract32(dc->ir, 7, 1);
1043        rev = extract32(dc->ir, 9, 1);
1044        ex = extract32(dc->ir, 10, 1);
1045    }
1046    mop |= MO_TE;
1047    if (rev) {
1048        mop ^= MO_BSWAP;
1049    }
1050
1051    if (trap_illegal(dc, size > 4)) {
1052        return;
1053    }
1054
1055    trap_userspace(dc, ea);
1056
1057    LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1058                                                        ex ? "x" : "",
1059                                                        ea ? "ea" : "");
1060    t_sync_flags(dc);
1061    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1062    sync_jmpstate(dc);
1063    /* SWX needs a temp_local.  */
1064    addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1065    compute_ldst_addr(dc, ea, addr);
1066    /* Extended addressing bypasses the MMU.  */
1067    mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1068
1069    if (ex) { /* swx */
1070        TCGv_i32 tval;
1071
1072        /* swx does not throw unaligned access errors, so force alignment */
1073        tcg_gen_andi_tl(addr, addr, ~3);
1074
1075        write_carryi(dc, 1);
1076        swx_skip = gen_new_label();
1077        tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1078
1079        /* Compare the value loaded at lwx with current contents of
1080           the reserved location.
1081           FIXME: This only works for system emulation where we can expect
1082           this compare and the following write to be atomic. For user
1083           emulation we need to add atomicity between threads.  */
1084        tval = tcg_temp_new_i32();
1085        tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1086                            MO_TEUL);
1087        tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1088        write_carryi(dc, 0);
1089        tcg_temp_free_i32(tval);
1090    }
1091
1092    if (rev && size != 4) {
1093        /* Endian reverse the address. t is addr.  */
1094        switch (size) {
1095            case 1:
1096            {
1097                /* 00 -> 11
1098                   01 -> 10
1099                   10 -> 10
1100                   11 -> 00 */
1101                TCGv low = tcg_temp_new();
1102
1103                tcg_gen_andi_tl(low, addr, 3);
1104                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1105                tcg_gen_andi_tl(addr, addr, ~3);
1106                tcg_gen_or_tl(addr, addr, low);
1107                tcg_temp_free(low);
1108                break;
1109            }
1110
1111            case 2:
1112                /* 00 -> 10
1113                   10 -> 00.  */
1114                /* Force addr into the temp.  */
1115                tcg_gen_xori_tl(addr, addr, 2);
1116                break;
1117            default:
1118                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1119                break;
1120        }
1121    }
1122    tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1123
1124    /* Verify alignment if needed.  */
1125    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1126        tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1127        /* FIXME: if the alignment is wrong, we should restore the value
1128         *        in memory. One possible way to achieve this is to probe
1129         *        the MMU prior to the memaccess, thay way we could put
1130         *        the alignment checks in between the probe and the mem
1131         *        access.
1132         */
1133        gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
1134                            tcg_const_i32(1), tcg_const_i32(size - 1));
1135    }
1136
1137    if (ex) {
1138        gen_set_label(swx_skip);
1139    }
1140
1141    tcg_temp_free(addr);
1142}
1143
1144static inline void eval_cc(DisasContext *dc, unsigned int cc,
1145                           TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1146{
1147    switch (cc) {
1148        case CC_EQ:
1149            tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
1150            break;
1151        case CC_NE:
1152            tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
1153            break;
1154        case CC_LT:
1155            tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
1156            break;
1157        case CC_LE:
1158            tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
1159            break;
1160        case CC_GE:
1161            tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
1162            break;
1163        case CC_GT:
1164            tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
1165            break;
1166        default:
1167            cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1168            break;
1169    }
1170}
1171
1172static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i64 pc_false)
1173{
1174    TCGLabel *l1 = gen_new_label();
1175    /* Conditional jmp.  */
1176    tcg_gen_mov_i64(cpu_SR[SR_PC], pc_false);
1177    tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
1178    tcg_gen_extu_i32_i64(cpu_SR[SR_PC], pc_true);
1179    gen_set_label(l1);
1180}
1181
1182static void dec_bcc(DisasContext *dc)
1183{
1184    unsigned int cc;
1185    unsigned int dslot;
1186
1187    cc = EXTRACT_FIELD(dc->ir, 21, 23);
1188    dslot = dc->ir & (1 << 25);
1189    LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1190
1191    dc->delayed_branch = 1;
1192    if (dslot) {
1193        dc->delayed_branch = 2;
1194        dc->tb_flags |= D_FLAG;
1195        tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1196                      cpu_env, offsetof(CPUMBState, bimm));
1197    }
1198
1199    if (dec_alu_op_b_is_small_imm(dc)) {
1200        int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1201
1202        tcg_gen_movi_i32(env_btarget, dc->pc + offset);
1203        dc->jmp = JMP_DIRECT_CC;
1204        dc->jmp_pc = dc->pc + offset;
1205    } else {
1206        dc->jmp = JMP_INDIRECT;
1207        tcg_gen_movi_i32(env_btarget, dc->pc);
1208        tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1209    }
1210    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
1211}
1212
1213static void dec_br(DisasContext *dc)
1214{
1215    unsigned int dslot, link, abs, mbar;
1216
1217    dslot = dc->ir & (1 << 20);
1218    abs = dc->ir & (1 << 19);
1219    link = dc->ir & (1 << 18);
1220
1221    /* Memory barrier.  */
1222    mbar = (dc->ir >> 16) & 31;
1223    if (mbar == 2 && dc->imm == 4) {
1224        /* mbar IMM & 16 decodes to sleep.  */
1225        if (dc->rd & 16) {
1226            TCGv_i32 tmp_1 = tcg_const_i32(1);
1227
1228            LOG_DIS("sleep\n");
1229
1230            t_sync_flags(dc);
1231            tcg_gen_st_i32(tmp_1, cpu_env,
1232                           -offsetof(MicroBlazeCPU, env)
1233                           +offsetof(CPUState, halted));
1234            tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
1235            tcg_temp_free_i32(tmp_1);
1236            dc->is_jmp = DISAS_UPDATE;
1237            gen_helper_sleep(cpu_env);
1238            return;
1239        }
1240        LOG_DIS("mbar %d\n", dc->rd);
1241        /* Break the TB.  */
1242        dc->cpustate_changed = 1;
1243        return;
1244    }
1245
1246    LOG_DIS("br%s%s%s%s imm=%x\n",
1247             abs ? "a" : "", link ? "l" : "",
1248             dc->type_b ? "i" : "", dslot ? "d" : "",
1249             dc->imm);
1250
1251    dc->delayed_branch = 1;
1252    if (dslot) {
1253        dc->delayed_branch = 2;
1254        dc->tb_flags |= D_FLAG;
1255        tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1256                      cpu_env, offsetof(CPUMBState, bimm));
1257    }
1258    if (link && dc->rd)
1259        tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1260
1261    dc->jmp = JMP_INDIRECT;
1262    if (abs) {
1263        tcg_gen_movi_i32(env_btaken, 1);
1264        tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
1265        if (link && !dslot) {
1266            if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1267                t_gen_raise_exception(dc, EXCP_BREAK);
1268            if (dc->imm == 0) {
1269                if (trap_userspace(dc, true)) {
1270                    return;
1271                }
1272
1273                t_gen_raise_exception(dc, EXCP_DEBUG);
1274            }
1275        }
1276    } else {
1277        if (dec_alu_op_b_is_small_imm(dc)) {
1278            dc->jmp = JMP_DIRECT;
1279            dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1280        } else {
1281            tcg_gen_movi_i32(env_btaken, 1);
1282            tcg_gen_movi_i32(env_btarget, dc->pc);
1283            tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1284        }
1285    }
1286}
1287
1288static inline void do_rti(DisasContext *dc)
1289{
1290    TCGv_i32 t0, t1;
1291    t0 = tcg_temp_new_i32();
1292    t1 = tcg_temp_new_i32();
1293    tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1294    tcg_gen_shri_i32(t0, t1, 1);
1295    tcg_gen_ori_i32(t1, t1, MSR_IE);
1296    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1297
1298    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1299    tcg_gen_or_i32(t1, t1, t0);
1300    msr_write(dc, t1);
1301    tcg_temp_free_i32(t1);
1302    tcg_temp_free_i32(t0);
1303    dc->tb_flags &= ~DRTI_FLAG;
1304}
1305
1306static inline void do_rtb(DisasContext *dc)
1307{
1308    TCGv_i32 t0, t1;
1309    t0 = tcg_temp_new_i32();
1310    t1 = tcg_temp_new_i32();
1311    tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1312    tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1313    tcg_gen_shri_i32(t0, t1, 1);
1314    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1315
1316    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1317    tcg_gen_or_i32(t1, t1, t0);
1318    msr_write(dc, t1);
1319    tcg_temp_free_i32(t1);
1320    tcg_temp_free_i32(t0);
1321    dc->tb_flags &= ~DRTB_FLAG;
1322}
1323
1324static inline void do_rte(DisasContext *dc)
1325{
1326    TCGv_i32 t0, t1;
1327    t0 = tcg_temp_new_i32();
1328    t1 = tcg_temp_new_i32();
1329
1330    tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1331    tcg_gen_ori_i32(t1, t1, MSR_EE);
1332    tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1333    tcg_gen_shri_i32(t0, t1, 1);
1334    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1335
1336    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1337    tcg_gen_or_i32(t1, t1, t0);
1338    msr_write(dc, t1);
1339    tcg_temp_free_i32(t1);
1340    tcg_temp_free_i32(t0);
1341    dc->tb_flags &= ~DRTE_FLAG;
1342}
1343
1344static void dec_rts(DisasContext *dc)
1345{
1346    unsigned int b_bit, i_bit, e_bit;
1347
1348    i_bit = dc->ir & (1 << 21);
1349    b_bit = dc->ir & (1 << 22);
1350    e_bit = dc->ir & (1 << 23);
1351
1352    if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1353        return;
1354    }
1355
1356    dc->delayed_branch = 2;
1357    dc->tb_flags |= D_FLAG;
1358    tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1359                  cpu_env, offsetof(CPUMBState, bimm));
1360
1361    if (i_bit) {
1362        LOG_DIS("rtid ir=%x\n", dc->ir);
1363        dc->tb_flags |= DRTI_FLAG;
1364    } else if (b_bit) {
1365        LOG_DIS("rtbd ir=%x\n", dc->ir);
1366        dc->tb_flags |= DRTB_FLAG;
1367    } else if (e_bit) {
1368        LOG_DIS("rted ir=%x\n", dc->ir);
1369        dc->tb_flags |= DRTE_FLAG;
1370    } else
1371        LOG_DIS("rts ir=%x\n", dc->ir);
1372
1373    dc->jmp = JMP_INDIRECT;
1374    tcg_gen_movi_i32(env_btaken, 1);
1375    tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1376}
1377
1378static int dec_check_fpuv2(DisasContext *dc)
1379{
1380    if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1381        tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
1382        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1383    }
1384    return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1385}
1386
1387static void dec_fpu(DisasContext *dc)
1388{
1389    unsigned int fpu_insn;
1390
1391    if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1392        return;
1393    }
1394
1395    fpu_insn = (dc->ir >> 7) & 7;
1396
1397    switch (fpu_insn) {
1398        case 0:
1399            gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1400                            cpu_R[dc->rb]);
1401            break;
1402
1403        case 1:
1404            gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1405                             cpu_R[dc->rb]);
1406            break;
1407
1408        case 2:
1409            gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1410                            cpu_R[dc->rb]);
1411            break;
1412
1413        case 3:
1414            gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1415                            cpu_R[dc->rb]);
1416            break;
1417
1418        case 4:
1419            switch ((dc->ir >> 4) & 7) {
1420                case 0:
1421                    gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1422                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1423                    break;
1424                case 1:
1425                    gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1426                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1427                    break;
1428                case 2:
1429                    gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1430                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1431                    break;
1432                case 3:
1433                    gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1434                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1435                    break;
1436                case 4:
1437                    gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1438                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1439                    break;
1440                case 5:
1441                    gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1442                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1443                    break;
1444                case 6:
1445                    gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1446                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1447                    break;
1448                default:
1449                    qemu_log_mask(LOG_UNIMP,
1450                                  "unimplemented fcmp fpu_insn=%x pc=%x"
1451                                  " opc=%x\n",
1452                                  fpu_insn, dc->pc, dc->opcode);
1453                    dc->abort_at_next_insn = 1;
1454                    break;
1455            }
1456            break;
1457
1458        case 5:
1459            if (!dec_check_fpuv2(dc)) {
1460                return;
1461            }
1462            gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1463            break;
1464
1465        case 6:
1466            if (!dec_check_fpuv2(dc)) {
1467                return;
1468            }
1469            gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1470            break;
1471
1472        case 7:
1473            if (!dec_check_fpuv2(dc)) {
1474                return;
1475            }
1476            gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1477            break;
1478
1479        default:
1480            qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1481                          " opc=%x\n",
1482                          fpu_insn, dc->pc, dc->opcode);
1483            dc->abort_at_next_insn = 1;
1484            break;
1485    }
1486}
1487
1488static void dec_null(DisasContext *dc)
1489{
1490    if (trap_illegal(dc, true)) {
1491        return;
1492    }
1493    qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1494    dc->abort_at_next_insn = 1;
1495}
1496
1497/* Insns connected to FSL or AXI stream attached devices.  */
1498static void dec_stream(DisasContext *dc)
1499{
1500    TCGv_i32 t_id, t_ctrl;
1501    int ctrl;
1502
1503    LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1504            dc->type_b ? "" : "d", dc->imm);
1505
1506    if (trap_userspace(dc, true)) {
1507        return;
1508    }
1509
1510    t_id = tcg_temp_new_i32();
1511    if (dc->type_b) {
1512        tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1513        ctrl = dc->imm >> 10;
1514    } else {
1515        tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1516        ctrl = dc->imm >> 5;
1517    }
1518
1519    t_ctrl = tcg_const_i32(ctrl);
1520
1521    if (dc->rd == 0) {
1522        gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1523    } else {
1524        gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1525    }
1526    tcg_temp_free_i32(t_id);
1527    tcg_temp_free_i32(t_ctrl);
1528}
1529
1530static struct decoder_info {
1531    struct {
1532        uint32_t bits;
1533        uint32_t mask;
1534    };
1535    void (*dec)(DisasContext *dc);
1536} decinfo[] = {
1537    {DEC_ADD, dec_add},
1538    {DEC_SUB, dec_sub},
1539    {DEC_AND, dec_and},
1540    {DEC_XOR, dec_xor},
1541    {DEC_OR, dec_or},
1542    {DEC_BIT, dec_bit},
1543    {DEC_BARREL, dec_barrel},
1544    {DEC_LD, dec_load},
1545    {DEC_ST, dec_store},
1546    {DEC_IMM, dec_imm},
1547    {DEC_BR, dec_br},
1548    {DEC_BCC, dec_bcc},
1549    {DEC_RTS, dec_rts},
1550    {DEC_FPU, dec_fpu},
1551    {DEC_MUL, dec_mul},
1552    {DEC_DIV, dec_div},
1553    {DEC_MSR, dec_msr},
1554    {DEC_STREAM, dec_stream},
1555    {{0, 0}, dec_null}
1556};
1557
1558static inline void decode(DisasContext *dc, uint32_t ir)
1559{
1560    int i;
1561
1562    dc->ir = ir;
1563    LOG_DIS("%8.8x\t", dc->ir);
1564
1565    if (dc->ir)
1566        dc->nr_nops = 0;
1567    else {
1568        trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
1569
1570        LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1571        dc->nr_nops++;
1572        if (dc->nr_nops > 4) {
1573            cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1574        }
1575    }
1576    /* bit 2 seems to indicate insn type.  */
1577    dc->type_b = ir & (1 << 29);
1578
1579    dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1580    dc->rd = EXTRACT_FIELD(ir, 21, 25);
1581    dc->ra = EXTRACT_FIELD(ir, 16, 20);
1582    dc->rb = EXTRACT_FIELD(ir, 11, 15);
1583    dc->imm = EXTRACT_FIELD(ir, 0, 15);
1584
1585    /* Large switch for all insns.  */
1586    for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1587        if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1588            decinfo[i].dec(dc);
1589            break;
1590        }
1591    }
1592}
1593
1594/* generate intermediate code for basic block 'tb'.  */
1595void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1596{
1597    CPUMBState *env = cs->env_ptr;
1598    MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1599    uint32_t pc_start;
1600    struct DisasContext ctx;
1601    struct DisasContext *dc = &ctx;
1602    uint32_t next_page_start, org_flags;
1603    uint32_t npc;
1604    int num_insns;
1605    int max_insns;
1606
1607    pc_start = tb->pc;
1608    dc->cpu = cpu;
1609    dc->tb = tb;
1610    org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1611
1612    dc->is_jmp = DISAS_NEXT;
1613    dc->jmp = 0;
1614    dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1615    if (dc->delayed_branch) {
1616        dc->jmp = JMP_INDIRECT;
1617    }
1618    dc->pc = pc_start;
1619    dc->singlestep_enabled = cs->singlestep_enabled;
1620    dc->cpustate_changed = 0;
1621    dc->abort_at_next_insn = 0;
1622    dc->nr_nops = 0;
1623
1624    if (pc_start & 3) {
1625        cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1626    }
1627
1628    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1629    num_insns = 0;
1630    max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1631    if (max_insns == 0) {
1632        max_insns = CF_COUNT_MASK;
1633    }
1634    if (max_insns > TCG_MAX_INSNS) {
1635        max_insns = TCG_MAX_INSNS;
1636    }
1637
1638    gen_tb_start(tb);
1639    do
1640    {
1641        tcg_gen_insn_start(dc->pc);
1642        num_insns++;
1643
1644#if SIM_COMPAT
1645        if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1646            tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1647            gen_helper_debug();
1648        }
1649#endif
1650
1651        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1652            t_gen_raise_exception(dc, EXCP_DEBUG);
1653            dc->is_jmp = DISAS_UPDATE;
1654            /* The address covered by the breakpoint must be included in
1655               [tb->pc, tb->pc + tb->size) in order to for it to be
1656               properly cleared -- thus we increment the PC here so that
1657               the logic setting tb->size below does the right thing.  */
1658            dc->pc += 4;
1659            break;
1660        }
1661
1662        /* Pretty disas.  */
1663        LOG_DIS("%8.8x:\t", dc->pc);
1664
1665        if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1666            gen_io_start();
1667        }
1668
1669        dc->clear_imm = 1;
1670        decode(dc, cpu_ldl_code(env, dc->pc));
1671        if (dc->clear_imm)
1672            dc->tb_flags &= ~IMM_FLAG;
1673        dc->pc += 4;
1674
1675        if (dc->delayed_branch) {
1676            dc->delayed_branch--;
1677            if (!dc->delayed_branch) {
1678                if (dc->tb_flags & DRTI_FLAG)
1679                    do_rti(dc);
1680                 if (dc->tb_flags & DRTB_FLAG)
1681                    do_rtb(dc);
1682                if (dc->tb_flags & DRTE_FLAG)
1683                    do_rte(dc);
1684                /* Clear the delay slot flag.  */
1685                dc->tb_flags &= ~D_FLAG;
1686                /* If it is a direct jump, try direct chaining.  */
1687                if (dc->jmp == JMP_INDIRECT) {
1688                    eval_cond_jmp(dc, env_btarget, tcg_const_i64(dc->pc));
1689                    dc->is_jmp = DISAS_JUMP;
1690                } else if (dc->jmp == JMP_DIRECT) {
1691                    t_sync_flags(dc);
1692                    gen_goto_tb(dc, 0, dc->jmp_pc);
1693                    dc->is_jmp = DISAS_TB_JUMP;
1694                } else if (dc->jmp == JMP_DIRECT_CC) {
1695                    TCGLabel *l1 = gen_new_label();
1696                    t_sync_flags(dc);
1697                    /* Conditional jmp.  */
1698                    tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1699                    gen_goto_tb(dc, 1, dc->pc);
1700                    gen_set_label(l1);
1701                    gen_goto_tb(dc, 0, dc->jmp_pc);
1702
1703                    dc->is_jmp = DISAS_TB_JUMP;
1704                }
1705                break;
1706            }
1707        }
1708        if (cs->singlestep_enabled) {
1709            break;
1710        }
1711    } while (!dc->is_jmp && !dc->cpustate_changed
1712             && !tcg_op_buf_full()
1713             && !singlestep
1714             && (dc->pc < next_page_start)
1715             && num_insns < max_insns);
1716
1717    npc = dc->pc;
1718    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1719        if (dc->tb_flags & D_FLAG) {
1720            dc->is_jmp = DISAS_UPDATE;
1721            tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1722            sync_jmpstate(dc);
1723        } else
1724            npc = dc->jmp_pc;
1725    }
1726
1727    if (tb_cflags(tb) & CF_LAST_IO)
1728        gen_io_end();
1729    /* Force an update if the per-tb cpu state has changed.  */
1730    if (dc->is_jmp == DISAS_NEXT
1731        && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1732        dc->is_jmp = DISAS_UPDATE;
1733        tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1734    }
1735    t_sync_flags(dc);
1736
1737    if (unlikely(cs->singlestep_enabled)) {
1738        TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1739
1740        if (dc->is_jmp != DISAS_JUMP) {
1741            tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1742        }
1743        gen_helper_raise_exception(cpu_env, tmp);
1744        tcg_temp_free_i32(tmp);
1745    } else {
1746        switch(dc->is_jmp) {
1747            case DISAS_NEXT:
1748                gen_goto_tb(dc, 1, npc);
1749                break;
1750            default:
1751            case DISAS_JUMP:
1752            case DISAS_UPDATE:
1753                /* indicate that the hash table must be used
1754                   to find the next TB */
1755                tcg_gen_exit_tb(0);
1756                break;
1757            case DISAS_TB_JUMP:
1758                /* nothing more to generate */
1759                break;
1760        }
1761    }
1762    gen_tb_end(tb, num_insns);
1763
1764    tb->size = dc->pc - pc_start;
1765    tb->icount = num_insns;
1766
1767#ifdef DEBUG_DISAS
1768#if !SIM_COMPAT
1769    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1770        && qemu_log_in_addr_range(pc_start)) {
1771        qemu_log_lock();
1772        qemu_log("--------------\n");
1773#if DISAS_GNU
1774        log_target_disas(cs, pc_start, dc->pc - pc_start);
1775#endif
1776        qemu_log("\nisize=%d osize=%d\n",
1777                 dc->pc - pc_start, tcg_op_buf_count());
1778        qemu_log_unlock();
1779    }
1780#endif
1781#endif
1782    assert(!dc->abort_at_next_insn);
1783}
1784
1785void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1786                       int flags)
1787{
1788    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1789    CPUMBState *env = &cpu->env;
1790    int i;
1791
1792    if (!env || !f)
1793        return;
1794
1795    cpu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1796                env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1797    cpu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1798                   "debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
1799             env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1800             env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1801    cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1802             env->btaken, env->btarget,
1803             (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1804             (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1805             (bool)(env->sregs[SR_MSR] & MSR_EIP),
1806             (bool)(env->sregs[SR_MSR] & MSR_IE));
1807
1808    for (i = 0; i < 32; i++) {
1809        cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1810        if ((i + 1) % 4 == 0)
1811            cpu_fprintf(f, "\n");
1812        }
1813    cpu_fprintf(f, "\n\n");
1814}
1815
1816void mb_tcg_init(void)
1817{
1818    int i;
1819
1820    env_debug = tcg_global_mem_new_i32(cpu_env,
1821                    offsetof(CPUMBState, debug),
1822                    "debug0");
1823    env_iflags = tcg_global_mem_new_i32(cpu_env,
1824                    offsetof(CPUMBState, iflags),
1825                    "iflags");
1826    env_imm = tcg_global_mem_new_i32(cpu_env,
1827                    offsetof(CPUMBState, imm),
1828                    "imm");
1829    env_btarget = tcg_global_mem_new_i32(cpu_env,
1830                     offsetof(CPUMBState, btarget),
1831                     "btarget");
1832    env_btaken = tcg_global_mem_new_i32(cpu_env,
1833                     offsetof(CPUMBState, btaken),
1834                     "btaken");
1835    env_res_addr = tcg_global_mem_new(cpu_env,
1836                     offsetof(CPUMBState, res_addr),
1837                     "res_addr");
1838    env_res_val = tcg_global_mem_new_i32(cpu_env,
1839                     offsetof(CPUMBState, res_val),
1840                     "res_val");
1841    for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1842        cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1843                          offsetof(CPUMBState, regs[i]),
1844                          regnames[i]);
1845    }
1846    for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1847        cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
1848                          offsetof(CPUMBState, sregs[i]),
1849                          special_regnames[i]);
1850    }
1851}
1852
1853void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1854                          target_ulong *data)
1855{
1856    env->sregs[SR_PC] = data[0];
1857}
1858