qemu/target/microblaze/translate.c
<<
>>
Prefs
   1/*
   2 *  Xilinx MicroBlaze emulation for qemu: main translation routines.
   3 *
   4 *  Copyright (c) 2009 Edgar E. Iglesias.
   5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "exec/exec-all.h"
  25#include "tcg-op.h"
  26#include "exec/helper-proto.h"
  27#include "microblaze-decode.h"
  28#include "exec/cpu_ldst.h"
  29#include "exec/helper-gen.h"
  30#include "exec/translator.h"
  31#include "qemu/qemu-print.h"
  32
  33#include "trace-tcg.h"
  34#include "exec/log.h"
  35
  36
  37#define SIM_COMPAT 0
  38#define DISAS_GNU 1
  39#define DISAS_MB 1
  40#if DISAS_MB && !SIM_COMPAT
  41#  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  42#else
  43#  define LOG_DIS(...) do { } while (0)
  44#endif
  45
  46#define D(x)
  47
  48#define EXTRACT_FIELD(src, start, end) \
  49            (((src) >> start) & ((1 << (end - start + 1)) - 1))
  50
  51/* is_jmp field values */
  52#define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
  53#define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
  54#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
  55
  56static TCGv_i32 env_debug;
  57static TCGv_i32 cpu_R[32];
  58static TCGv_i64 cpu_SR[14];
  59static TCGv_i32 env_imm;
  60static TCGv_i32 env_btaken;
  61static TCGv_i64 env_btarget;
  62static TCGv_i32 env_iflags;
  63static TCGv env_res_addr;
  64static TCGv_i32 env_res_val;
  65
  66#include "exec/gen-icount.h"
  67
  68/* This is the state at translation time.  */
  69typedef struct DisasContext {
  70    MicroBlazeCPU *cpu;
  71    uint32_t pc;
  72
  73    /* Decoder.  */
  74    int type_b;
  75    uint32_t ir;
  76    uint8_t opcode;
  77    uint8_t rd, ra, rb;
  78    uint16_t imm;
  79
  80    unsigned int cpustate_changed;
  81    unsigned int delayed_branch;
  82    unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
  83    unsigned int clear_imm;
  84    int is_jmp;
  85
  86#define JMP_NOJMP     0
  87#define JMP_DIRECT    1
  88#define JMP_DIRECT_CC 2
  89#define JMP_INDIRECT  3
  90    unsigned int jmp;
  91    uint32_t jmp_pc;
  92
  93    int abort_at_next_insn;
  94    struct TranslationBlock *tb;
  95    int singlestep_enabled;
  96} DisasContext;
  97
  98static const char *regnames[] =
  99{
 100    "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
 101    "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
 102    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
 103    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
 104};
 105
 106static const char *special_regnames[] =
 107{
 108    "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
 109    "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
 110};
 111
 112static inline void t_sync_flags(DisasContext *dc)
 113{
 114    /* Synch the tb dependent flags between translator and runtime.  */
 115    if (dc->tb_flags != dc->synced_flags) {
 116        tcg_gen_movi_i32(env_iflags, dc->tb_flags);
 117        dc->synced_flags = dc->tb_flags;
 118    }
 119}
 120
 121static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
 122{
 123    TCGv_i32 tmp = tcg_const_i32(index);
 124
 125    t_sync_flags(dc);
 126    tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
 127    gen_helper_raise_exception(cpu_env, tmp);
 128    tcg_temp_free_i32(tmp);
 129    dc->is_jmp = DISAS_UPDATE;
 130}
 131
 132static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
 133{
 134#ifndef CONFIG_USER_ONLY
 135    return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 136#else
 137    return true;
 138#endif
 139}
 140
 141static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
 142{
 143    if (use_goto_tb(dc, dest)) {
 144        tcg_gen_goto_tb(n);
 145        tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
 146        tcg_gen_exit_tb(dc->tb, n);
 147    } else {
 148        tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
 149        tcg_gen_exit_tb(NULL, 0);
 150    }
 151}
 152
 153static void read_carry(DisasContext *dc, TCGv_i32 d)
 154{
 155    tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
 156    tcg_gen_shri_i32(d, d, 31);
 157}
 158
 159/*
 160 * write_carry sets the carry bits in MSR based on bit 0 of v.
 161 * v[31:1] are ignored.
 162 */
 163static void write_carry(DisasContext *dc, TCGv_i32 v)
 164{
 165    TCGv_i64 t0 = tcg_temp_new_i64();
 166    tcg_gen_extu_i32_i64(t0, v);
 167    /* Deposit bit 0 into MSR_C and the alias MSR_CC.  */
 168    tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
 169    tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
 170    tcg_temp_free_i64(t0);
 171}
 172
 173static void write_carryi(DisasContext *dc, bool carry)
 174{
 175    TCGv_i32 t0 = tcg_temp_new_i32();
 176    tcg_gen_movi_i32(t0, carry);
 177    write_carry(dc, t0);
 178    tcg_temp_free_i32(t0);
 179}
 180
 181/*
 182 * Returns true if the insn an illegal operation.
 183 * If exceptions are enabled, an exception is raised.
 184 */
 185static bool trap_illegal(DisasContext *dc, bool cond)
 186{
 187    if (cond && (dc->tb_flags & MSR_EE_FLAG)
 188        && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
 189        tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 190        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 191    }
 192    return cond;
 193}
 194
 195/*
 196 * Returns true if the insn is illegal in userspace.
 197 * If exceptions are enabled, an exception is raised.
 198 */
 199static bool trap_userspace(DisasContext *dc, bool cond)
 200{
 201    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 202    bool cond_user = cond && mem_index == MMU_USER_IDX;
 203
 204    if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
 205        tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 206        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 207    }
 208    return cond_user;
 209}
 210
 211/* True if ALU operand b is a small immediate that may deserve
 212   faster treatment.  */
 213static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
 214{
 215    /* Immediate insn without the imm prefix ?  */
 216    return dc->type_b && !(dc->tb_flags & IMM_FLAG);
 217}
 218
 219static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
 220{
 221    if (dc->type_b) {
 222        if (dc->tb_flags & IMM_FLAG)
 223            tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
 224        else
 225            tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
 226        return &env_imm;
 227    } else
 228        return &cpu_R[dc->rb];
 229}
 230
 231static void dec_add(DisasContext *dc)
 232{
 233    unsigned int k, c;
 234    TCGv_i32 cf;
 235
 236    k = dc->opcode & 4;
 237    c = dc->opcode & 2;
 238
 239    LOG_DIS("add%s%s%s r%d r%d r%d\n",
 240            dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
 241            dc->rd, dc->ra, dc->rb);
 242
 243    /* Take care of the easy cases first.  */
 244    if (k) {
 245        /* k - keep carry, no need to update MSR.  */
 246        /* If rd == r0, it's a nop.  */
 247        if (dc->rd) {
 248            tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 249
 250            if (c) {
 251                /* c - Add carry into the result.  */
 252                cf = tcg_temp_new_i32();
 253
 254                read_carry(dc, cf);
 255                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 256                tcg_temp_free_i32(cf);
 257            }
 258        }
 259        return;
 260    }
 261
 262    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 263    /* Extract carry.  */
 264    cf = tcg_temp_new_i32();
 265    if (c) {
 266        read_carry(dc, cf);
 267    } else {
 268        tcg_gen_movi_i32(cf, 0);
 269    }
 270
 271    if (dc->rd) {
 272        TCGv_i32 ncf = tcg_temp_new_i32();
 273        gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 274        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 275        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 276        write_carry(dc, ncf);
 277        tcg_temp_free_i32(ncf);
 278    } else {
 279        gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 280        write_carry(dc, cf);
 281    }
 282    tcg_temp_free_i32(cf);
 283}
 284
 285static void dec_sub(DisasContext *dc)
 286{
 287    unsigned int u, cmp, k, c;
 288    TCGv_i32 cf, na;
 289
 290    u = dc->imm & 2;
 291    k = dc->opcode & 4;
 292    c = dc->opcode & 2;
 293    cmp = (dc->imm & 1) && (!dc->type_b) && k;
 294
 295    if (cmp) {
 296        LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
 297        if (dc->rd) {
 298            if (u)
 299                gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 300            else
 301                gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 302        }
 303        return;
 304    }
 305
 306    LOG_DIS("sub%s%s r%d, r%d r%d\n",
 307             k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
 308
 309    /* Take care of the easy cases first.  */
 310    if (k) {
 311        /* k - keep carry, no need to update MSR.  */
 312        /* If rd == r0, it's a nop.  */
 313        if (dc->rd) {
 314            tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
 315
 316            if (c) {
 317                /* c - Add carry into the result.  */
 318                cf = tcg_temp_new_i32();
 319
 320                read_carry(dc, cf);
 321                tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 322                tcg_temp_free_i32(cf);
 323            }
 324        }
 325        return;
 326    }
 327
 328    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 329    /* Extract carry. And complement a into na.  */
 330    cf = tcg_temp_new_i32();
 331    na = tcg_temp_new_i32();
 332    if (c) {
 333        read_carry(dc, cf);
 334    } else {
 335        tcg_gen_movi_i32(cf, 1);
 336    }
 337
 338    /* d = b + ~a + c. carry defaults to 1.  */
 339    tcg_gen_not_i32(na, cpu_R[dc->ra]);
 340
 341    if (dc->rd) {
 342        TCGv_i32 ncf = tcg_temp_new_i32();
 343        gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
 344        tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
 345        tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 346        write_carry(dc, ncf);
 347        tcg_temp_free_i32(ncf);
 348    } else {
 349        gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
 350        write_carry(dc, cf);
 351    }
 352    tcg_temp_free_i32(cf);
 353    tcg_temp_free_i32(na);
 354}
 355
 356static void dec_pattern(DisasContext *dc)
 357{
 358    unsigned int mode;
 359
 360    if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
 361        return;
 362    }
 363
 364    mode = dc->opcode & 3;
 365    switch (mode) {
 366        case 0:
 367            /* pcmpbf.  */
 368            LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 369            if (dc->rd)
 370                gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 371            break;
 372        case 2:
 373            LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 374            if (dc->rd) {
 375                tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
 376                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 377            }
 378            break;
 379        case 3:
 380            LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 381            if (dc->rd) {
 382                tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
 383                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 384            }
 385            break;
 386        default:
 387            cpu_abort(CPU(dc->cpu),
 388                      "unsupported pattern insn opcode=%x\n", dc->opcode);
 389            break;
 390    }
 391}
 392
 393static void dec_and(DisasContext *dc)
 394{
 395    unsigned int not;
 396
 397    if (!dc->type_b && (dc->imm & (1 << 10))) {
 398        dec_pattern(dc);
 399        return;
 400    }
 401
 402    not = dc->opcode & (1 << 1);
 403    LOG_DIS("and%s\n", not ? "n" : "");
 404
 405    if (!dc->rd)
 406        return;
 407
 408    if (not) {
 409        tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 410    } else
 411        tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 412}
 413
 414static void dec_or(DisasContext *dc)
 415{
 416    if (!dc->type_b && (dc->imm & (1 << 10))) {
 417        dec_pattern(dc);
 418        return;
 419    }
 420
 421    LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
 422    if (dc->rd)
 423        tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 424}
 425
 426static void dec_xor(DisasContext *dc)
 427{
 428    if (!dc->type_b && (dc->imm & (1 << 10))) {
 429        dec_pattern(dc);
 430        return;
 431    }
 432
 433    LOG_DIS("xor r%d\n", dc->rd);
 434    if (dc->rd)
 435        tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 436}
 437
 438static inline void msr_read(DisasContext *dc, TCGv_i32 d)
 439{
 440    tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
 441}
 442
 443static inline void msr_write(DisasContext *dc, TCGv_i32 v)
 444{
 445    TCGv_i64 t;
 446
 447    t = tcg_temp_new_i64();
 448    dc->cpustate_changed = 1;
 449    /* PVR bit is not writable.  */
 450    tcg_gen_extu_i32_i64(t, v);
 451    tcg_gen_andi_i64(t, t, ~MSR_PVR);
 452    tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
 453    tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
 454    tcg_temp_free_i64(t);
 455}
 456
 457static void dec_msr(DisasContext *dc)
 458{
 459    CPUState *cs = CPU(dc->cpu);
 460    TCGv_i32 t0, t1;
 461    unsigned int sr, rn;
 462    bool to, clrset, extended = false;
 463
 464    sr = extract32(dc->imm, 0, 14);
 465    to = extract32(dc->imm, 14, 1);
 466    clrset = extract32(dc->imm, 15, 1) == 0;
 467    dc->type_b = 1;
 468    if (to) {
 469        dc->cpustate_changed = 1;
 470    }
 471
 472    /* Extended MSRs are only available if addr_size > 32.  */
 473    if (dc->cpu->cfg.addr_size > 32) {
 474        /* The E-bit is encoded differently for To/From MSR.  */
 475        static const unsigned int e_bit[] = { 19, 24 };
 476
 477        extended = extract32(dc->imm, e_bit[to], 1);
 478    }
 479
 480    /* msrclr and msrset.  */
 481    if (clrset) {
 482        bool clr = extract32(dc->ir, 16, 1);
 483
 484        LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
 485                dc->rd, dc->imm);
 486
 487        if (!dc->cpu->cfg.use_msr_instr) {
 488            /* nop??? */
 489            return;
 490        }
 491
 492        if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
 493            return;
 494        }
 495
 496        if (dc->rd)
 497            msr_read(dc, cpu_R[dc->rd]);
 498
 499        t0 = tcg_temp_new_i32();
 500        t1 = tcg_temp_new_i32();
 501        msr_read(dc, t0);
 502        tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));
 503
 504        if (clr) {
 505            tcg_gen_not_i32(t1, t1);
 506            tcg_gen_and_i32(t0, t0, t1);
 507        } else
 508            tcg_gen_or_i32(t0, t0, t1);
 509        msr_write(dc, t0);
 510        tcg_temp_free_i32(t0);
 511        tcg_temp_free_i32(t1);
 512        tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
 513        dc->is_jmp = DISAS_UPDATE;
 514        return;
 515    }
 516
 517    if (trap_userspace(dc, to)) {
 518        return;
 519    }
 520
 521#if !defined(CONFIG_USER_ONLY)
 522    /* Catch read/writes to the mmu block.  */
 523    if ((sr & ~0xff) == 0x1000) {
 524        TCGv_i32 tmp_ext = tcg_const_i32(extended);
 525        TCGv_i32 tmp_sr;
 526
 527        sr &= 7;
 528        tmp_sr = tcg_const_i32(sr);
 529        LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 530        if (to) {
 531            gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
 532        } else {
 533            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
 534        }
 535        tcg_temp_free_i32(tmp_sr);
 536        tcg_temp_free_i32(tmp_ext);
 537        return;
 538    }
 539#endif
 540
 541    if (to) {
 542        LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 543        switch (sr) {
 544            case 0:
 545                break;
 546            case 1:
 547                msr_write(dc, cpu_R[dc->ra]);
 548                break;
 549            case SR_EAR:
 550            case SR_ESR:
 551            case SR_FSR:
 552                tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
 553                break;
 554            case 0x800:
 555                tcg_gen_st_i32(cpu_R[dc->ra],
 556                               cpu_env, offsetof(CPUMBState, slr));
 557                break;
 558            case 0x802:
 559                tcg_gen_st_i32(cpu_R[dc->ra],
 560                               cpu_env, offsetof(CPUMBState, shr));
 561                break;
 562            default:
 563                cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
 564                break;
 565        }
 566    } else {
 567        LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
 568
 569        switch (sr) {
 570            case 0:
 571                tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
 572                break;
 573            case 1:
 574                msr_read(dc, cpu_R[dc->rd]);
 575                break;
 576            case SR_EAR:
 577                if (extended) {
 578                    tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
 579                    break;
 580                }
 581            case SR_ESR:
 582            case SR_FSR:
 583            case SR_BTR:
 584                tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
 585                break;
 586            case 0x800:
 587                tcg_gen_ld_i32(cpu_R[dc->rd],
 588                               cpu_env, offsetof(CPUMBState, slr));
 589                break;
 590            case 0x802:
 591                tcg_gen_ld_i32(cpu_R[dc->rd],
 592                               cpu_env, offsetof(CPUMBState, shr));
 593                break;
 594            case 0x2000 ... 0x200c:
 595                rn = sr & 0xf;
 596                tcg_gen_ld_i32(cpu_R[dc->rd],
 597                              cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
 598                break;
 599            default:
 600                cpu_abort(cs, "unknown mfs reg %x\n", sr);
 601                break;
 602        }
 603    }
 604
 605    if (dc->rd == 0) {
 606        tcg_gen_movi_i32(cpu_R[0], 0);
 607    }
 608}
 609
 610/* Multiplier unit.  */
 611static void dec_mul(DisasContext *dc)
 612{
 613    TCGv_i32 tmp;
 614    unsigned int subcode;
 615
 616    if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
 617        return;
 618    }
 619
 620    subcode = dc->imm & 3;
 621
 622    if (dc->type_b) {
 623        LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
 624        tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 625        return;
 626    }
 627
 628    /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
 629    if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
 630        /* nop??? */
 631    }
 632
 633    tmp = tcg_temp_new_i32();
 634    switch (subcode) {
 635        case 0:
 636            LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 637            tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 638            break;
 639        case 1:
 640            LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 641            tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
 642                              cpu_R[dc->ra], cpu_R[dc->rb]);
 643            break;
 644        case 2:
 645            LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 646            tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
 647                               cpu_R[dc->ra], cpu_R[dc->rb]);
 648            break;
 649        case 3:
 650            LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 651            tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 652            break;
 653        default:
 654            cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
 655            break;
 656    }
 657    tcg_temp_free_i32(tmp);
 658}
 659
 660/* Div unit.  */
 661static void dec_div(DisasContext *dc)
 662{
 663    unsigned int u;
 664
 665    u = dc->imm & 2; 
 666    LOG_DIS("div\n");
 667
 668    if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
 669        return;
 670    }
 671
 672    if (u)
 673        gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 674                        cpu_R[dc->ra]);
 675    else
 676        gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 677                        cpu_R[dc->ra]);
 678    if (!dc->rd)
 679        tcg_gen_movi_i32(cpu_R[dc->rd], 0);
 680}
 681
 682static void dec_barrel(DisasContext *dc)
 683{
 684    TCGv_i32 t0;
 685    unsigned int imm_w, imm_s;
 686    bool s, t, e = false, i = false;
 687
 688    if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
 689        return;
 690    }
 691
 692    if (dc->type_b) {
 693        /* Insert and extract are only available in immediate mode.  */
 694        i = extract32(dc->imm, 15, 1);
 695        e = extract32(dc->imm, 14, 1);
 696    }
 697    s = extract32(dc->imm, 10, 1);
 698    t = extract32(dc->imm, 9, 1);
 699    imm_w = extract32(dc->imm, 6, 5);
 700    imm_s = extract32(dc->imm, 0, 5);
 701
 702    LOG_DIS("bs%s%s%s r%d r%d r%d\n",
 703            e ? "e" : "",
 704            s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
 705
 706    if (e) {
 707        if (imm_w + imm_s > 32 || imm_w == 0) {
 708            /* These inputs have an undefined behavior.  */
 709            qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
 710                          imm_w, imm_s);
 711        } else {
 712            tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
 713        }
 714    } else if (i) {
 715        int width = imm_w - imm_s + 1;
 716
 717        if (imm_w < imm_s) {
 718            /* These inputs have an undefined behavior.  */
 719            qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
 720                          imm_w, imm_s);
 721        } else {
 722            tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
 723                                imm_s, width);
 724        }
 725    } else {
 726        t0 = tcg_temp_new_i32();
 727
 728        tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
 729        tcg_gen_andi_i32(t0, t0, 31);
 730
 731        if (s) {
 732            tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 733        } else {
 734            if (t) {
 735                tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 736            } else {
 737                tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 738            }
 739        }
 740        tcg_temp_free_i32(t0);
 741    }
 742}
 743
 744static void dec_bit(DisasContext *dc)
 745{
 746    CPUState *cs = CPU(dc->cpu);
 747    TCGv_i32 t0;
 748    unsigned int op;
 749
 750    op = dc->ir & ((1 << 9) - 1);
 751    switch (op) {
 752        case 0x21:
 753            /* src.  */
 754            t0 = tcg_temp_new_i32();
 755
 756            LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
 757            tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
 758            tcg_gen_andi_i32(t0, t0, MSR_CC);
 759            write_carry(dc, cpu_R[dc->ra]);
 760            if (dc->rd) {
 761                tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 762                tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
 763            }
 764            tcg_temp_free_i32(t0);
 765            break;
 766
 767        case 0x1:
 768        case 0x41:
 769            /* srl.  */
 770            LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
 771
 772            /* Update carry. Note that write carry only looks at the LSB.  */
 773            write_carry(dc, cpu_R[dc->ra]);
 774            if (dc->rd) {
 775                if (op == 0x41)
 776                    tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 777                else
 778                    tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 779            }
 780            break;
 781        case 0x60:
 782            LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
 783            tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 784            break;
 785        case 0x61:
 786            LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
 787            tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 788            break;
 789        case 0x64:
 790        case 0x66:
 791        case 0x74:
 792        case 0x76:
 793            /* wdc.  */
 794            LOG_DIS("wdc r%d\n", dc->ra);
 795            trap_userspace(dc, true);
 796            break;
 797        case 0x68:
 798            /* wic.  */
 799            LOG_DIS("wic r%d\n", dc->ra);
 800            trap_userspace(dc, true);
 801            break;
 802        case 0xe0:
 803            if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
 804                return;
 805            }
 806            if (dc->cpu->cfg.use_pcmp_instr) {
 807                tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
 808            }
 809            break;
 810        case 0x1e0:
 811            /* swapb */
 812            LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
 813            tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 814            break;
 815        case 0x1e2:
 816            /*swaph */
 817            LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
 818            tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
 819            break;
 820        default:
 821            cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
 822                      dc->pc, op, dc->rd, dc->ra, dc->rb);
 823            break;
 824    }
 825}
 826
 827static inline void sync_jmpstate(DisasContext *dc)
 828{
 829    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
 830        if (dc->jmp == JMP_DIRECT) {
 831            tcg_gen_movi_i32(env_btaken, 1);
 832        }
 833        dc->jmp = JMP_INDIRECT;
 834        tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
 835    }
 836}
 837
 838static void dec_imm(DisasContext *dc)
 839{
 840    LOG_DIS("imm %x\n", dc->imm << 16);
 841    tcg_gen_movi_i32(env_imm, (dc->imm << 16));
 842    dc->tb_flags |= IMM_FLAG;
 843    dc->clear_imm = 0;
 844}
 845
 846static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
 847{
 848    bool extimm = dc->tb_flags & IMM_FLAG;
 849    /* Should be set to true if r1 is used by loadstores.  */
 850    bool stackprot = false;
 851    TCGv_i32 t32;
 852
 853    /* All load/stores use ra.  */
 854    if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
 855        stackprot = true;
 856    }
 857
 858    /* Treat the common cases first.  */
 859    if (!dc->type_b) {
 860        if (ea) {
 861            int addr_size = dc->cpu->cfg.addr_size;
 862
 863            if (addr_size == 32) {
 864                tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
 865                return;
 866            }
 867
 868            tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]);
 869            if (addr_size < 64) {
 870                /* Mask off out of range bits.  */
 871                tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size));
 872            }
 873            return;
 874        }
 875
 876        /* If any of the regs is r0, set t to the value of the other reg.  */
 877        if (dc->ra == 0) {
 878            tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
 879            return;
 880        } else if (dc->rb == 0) {
 881            tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
 882            return;
 883        }
 884
 885        if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
 886            stackprot = true;
 887        }
 888
 889        t32 = tcg_temp_new_i32();
 890        tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
 891        tcg_gen_extu_i32_tl(t, t32);
 892        tcg_temp_free_i32(t32);
 893
 894        if (stackprot) {
 895            gen_helper_stackprot(cpu_env, t);
 896        }
 897        return;
 898    }
 899    /* Immediate.  */
 900    t32 = tcg_temp_new_i32();
 901    if (!extimm) {
 902        tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
 903    } else {
 904        tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 905    }
 906    tcg_gen_extu_i32_tl(t, t32);
 907    tcg_temp_free_i32(t32);
 908
 909    if (stackprot) {
 910        gen_helper_stackprot(cpu_env, t);
 911    }
 912    return;
 913}
 914
 915static void dec_load(DisasContext *dc)
 916{
 917    TCGv_i32 v;
 918    TCGv addr;
 919    unsigned int size;
 920    bool rev = false, ex = false, ea = false;
 921    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 922    MemOp mop;
 923
 924    mop = dc->opcode & 3;
 925    size = 1 << mop;
 926    if (!dc->type_b) {
 927        ea = extract32(dc->ir, 7, 1);
 928        rev = extract32(dc->ir, 9, 1);
 929        ex = extract32(dc->ir, 10, 1);
 930    }
 931    mop |= MO_TE;
 932    if (rev) {
 933        mop ^= MO_BSWAP;
 934    }
 935
 936    if (trap_illegal(dc, size > 4)) {
 937        return;
 938    }
 939
 940    if (trap_userspace(dc, ea)) {
 941        return;
 942    }
 943
 944    LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
 945                                                        ex ? "x" : "",
 946                                                        ea ? "ea" : "");
 947
 948    t_sync_flags(dc);
 949    addr = tcg_temp_new();
 950    compute_ldst_addr(dc, ea, addr);
 951    /* Extended addressing bypasses the MMU.  */
 952    mem_index = ea ? MMU_NOMMU_IDX : mem_index;
 953
 954    /*
 955     * When doing reverse accesses we need to do two things.
 956     *
 957     * 1. Reverse the address wrt endianness.
 958     * 2. Byteswap the data lanes on the way back into the CPU core.
 959     */
 960    if (rev && size != 4) {
 961        /* Endian reverse the address. t is addr.  */
 962        switch (size) {
 963            case 1:
 964            {
 965                tcg_gen_xori_tl(addr, addr, 3);
 966                break;
 967            }
 968
 969            case 2:
 970                /* 00 -> 10
 971                   10 -> 00.  */
 972                tcg_gen_xori_tl(addr, addr, 2);
 973                break;
 974            default:
 975                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
 976                break;
 977        }
 978    }
 979
 980    /* lwx does not throw unaligned access errors, so force alignment */
 981    if (ex) {
 982        tcg_gen_andi_tl(addr, addr, ~3);
 983    }
 984
 985    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
 986    sync_jmpstate(dc);
 987
 988    /* Verify alignment if needed.  */
 989    /*
 990     * Microblaze gives MMU faults priority over faults due to
 991     * unaligned addresses. That's why we speculatively do the load
 992     * into v. If the load succeeds, we verify alignment of the
 993     * address and if that succeeds we write into the destination reg.
 994     */
 995    v = tcg_temp_new_i32();
 996    tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
 997
 998    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
 999        TCGv_i32 t0 = tcg_const_i32(0);
1000        TCGv_i32 treg = tcg_const_i32(dc->rd);
1001        TCGv_i32 tsize = tcg_const_i32(size - 1);
1002
1003        tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1004        gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
1005
1006        tcg_temp_free_i32(t0);
1007        tcg_temp_free_i32(treg);
1008        tcg_temp_free_i32(tsize);
1009    }
1010
1011    if (ex) {
1012        tcg_gen_mov_tl(env_res_addr, addr);
1013        tcg_gen_mov_i32(env_res_val, v);
1014    }
1015    if (dc->rd) {
1016        tcg_gen_mov_i32(cpu_R[dc->rd], v);
1017    }
1018    tcg_temp_free_i32(v);
1019
1020    if (ex) { /* lwx */
1021        /* no support for AXI exclusive so always clear C */
1022        write_carryi(dc, 0);
1023    }
1024
1025    tcg_temp_free(addr);
1026}
1027
1028static void dec_store(DisasContext *dc)
1029{
1030    TCGv addr;
1031    TCGLabel *swx_skip = NULL;
1032    unsigned int size;
1033    bool rev = false, ex = false, ea = false;
1034    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1035    MemOp mop;
1036
1037    mop = dc->opcode & 3;
1038    size = 1 << mop;
1039    if (!dc->type_b) {
1040        ea = extract32(dc->ir, 7, 1);
1041        rev = extract32(dc->ir, 9, 1);
1042        ex = extract32(dc->ir, 10, 1);
1043    }
1044    mop |= MO_TE;
1045    if (rev) {
1046        mop ^= MO_BSWAP;
1047    }
1048
1049    if (trap_illegal(dc, size > 4)) {
1050        return;
1051    }
1052
1053    trap_userspace(dc, ea);
1054
1055    LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1056                                                        ex ? "x" : "",
1057                                                        ea ? "ea" : "");
1058    t_sync_flags(dc);
1059    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1060    sync_jmpstate(dc);
1061    /* SWX needs a temp_local.  */
1062    addr = ex ? tcg_temp_local_new() : tcg_temp_new();
1063    compute_ldst_addr(dc, ea, addr);
1064    /* Extended addressing bypasses the MMU.  */
1065    mem_index = ea ? MMU_NOMMU_IDX : mem_index;
1066
1067    if (ex) { /* swx */
1068        TCGv_i32 tval;
1069
1070        /* swx does not throw unaligned access errors, so force alignment */
1071        tcg_gen_andi_tl(addr, addr, ~3);
1072
1073        write_carryi(dc, 1);
1074        swx_skip = gen_new_label();
1075        tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);
1076
1077        /* Compare the value loaded at lwx with current contents of
1078           the reserved location.
1079           FIXME: This only works for system emulation where we can expect
1080           this compare and the following write to be atomic. For user
1081           emulation we need to add atomicity between threads.  */
1082        tval = tcg_temp_new_i32();
1083        tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
1084                            MO_TEUL);
1085        tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
1086        write_carryi(dc, 0);
1087        tcg_temp_free_i32(tval);
1088    }
1089
1090    if (rev && size != 4) {
1091        /* Endian reverse the address. t is addr.  */
1092        switch (size) {
1093            case 1:
1094            {
1095                tcg_gen_xori_tl(addr, addr, 3);
1096                break;
1097            }
1098
1099            case 2:
1100                /* 00 -> 10
1101                   10 -> 00.  */
1102                /* Force addr into the temp.  */
1103                tcg_gen_xori_tl(addr, addr, 2);
1104                break;
1105            default:
1106                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1107                break;
1108        }
1109    }
1110    tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop);
1111
1112    /* Verify alignment if needed.  */
1113    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1114        TCGv_i32 t1 = tcg_const_i32(1);
1115        TCGv_i32 treg = tcg_const_i32(dc->rd);
1116        TCGv_i32 tsize = tcg_const_i32(size - 1);
1117
1118        tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1119        /* FIXME: if the alignment is wrong, we should restore the value
1120         *        in memory. One possible way to achieve this is to probe
1121         *        the MMU prior to the memaccess, thay way we could put
1122         *        the alignment checks in between the probe and the mem
1123         *        access.
1124         */
1125        gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
1126
1127        tcg_temp_free_i32(t1);
1128        tcg_temp_free_i32(treg);
1129        tcg_temp_free_i32(tsize);
1130    }
1131
1132    if (ex) {
1133        gen_set_label(swx_skip);
1134    }
1135
1136    tcg_temp_free(addr);
1137}
1138
1139static inline void eval_cc(DisasContext *dc, unsigned int cc,
1140                           TCGv_i32 d, TCGv_i32 a)
1141{
1142    static const int mb_to_tcg_cc[] = {
1143        [CC_EQ] = TCG_COND_EQ,
1144        [CC_NE] = TCG_COND_NE,
1145        [CC_LT] = TCG_COND_LT,
1146        [CC_LE] = TCG_COND_LE,
1147        [CC_GE] = TCG_COND_GE,
1148        [CC_GT] = TCG_COND_GT,
1149    };
1150
1151    switch (cc) {
1152    case CC_EQ:
1153    case CC_NE:
1154    case CC_LT:
1155    case CC_LE:
1156    case CC_GE:
1157    case CC_GT:
1158        tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
1159        break;
1160    default:
1161        cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1162        break;
1163    }
1164}
1165
1166static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
1167{
1168    TCGv_i64 tmp_btaken = tcg_temp_new_i64();
1169    TCGv_i64 tmp_zero = tcg_const_i64(0);
1170
1171    tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
1172    tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
1173                        tmp_btaken, tmp_zero,
1174                        pc_true, pc_false);
1175
1176    tcg_temp_free_i64(tmp_btaken);
1177    tcg_temp_free_i64(tmp_zero);
1178}
1179
1180static void dec_setup_dslot(DisasContext *dc)
1181{
1182        TCGv_i32 tmp = tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG));
1183
1184        dc->delayed_branch = 2;
1185        dc->tb_flags |= D_FLAG;
1186
1187        tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, bimm));
1188        tcg_temp_free_i32(tmp);
1189}
1190
1191static void dec_bcc(DisasContext *dc)
1192{
1193    unsigned int cc;
1194    unsigned int dslot;
1195
1196    cc = EXTRACT_FIELD(dc->ir, 21, 23);
1197    dslot = dc->ir & (1 << 25);
1198    LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1199
1200    dc->delayed_branch = 1;
1201    if (dslot) {
1202        dec_setup_dslot(dc);
1203    }
1204
1205    if (dec_alu_op_b_is_small_imm(dc)) {
1206        int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1207
1208        tcg_gen_movi_i64(env_btarget, dc->pc + offset);
1209        dc->jmp = JMP_DIRECT_CC;
1210        dc->jmp_pc = dc->pc + offset;
1211    } else {
1212        dc->jmp = JMP_INDIRECT;
1213        tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1214        tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1215        tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1216    }
1217    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
1218}
1219
1220static void dec_br(DisasContext *dc)
1221{
1222    unsigned int dslot, link, abs, mbar;
1223
1224    dslot = dc->ir & (1 << 20);
1225    abs = dc->ir & (1 << 19);
1226    link = dc->ir & (1 << 18);
1227
1228    /* Memory barrier.  */
1229    mbar = (dc->ir >> 16) & 31;
1230    if (mbar == 2 && dc->imm == 4) {
1231        /* mbar IMM & 16 decodes to sleep.  */
1232        if (dc->rd & 16) {
1233            TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1234            TCGv_i32 tmp_1 = tcg_const_i32(1);
1235
1236            LOG_DIS("sleep\n");
1237
1238            t_sync_flags(dc);
1239            tcg_gen_st_i32(tmp_1, cpu_env,
1240                           -offsetof(MicroBlazeCPU, env)
1241                           +offsetof(CPUState, halted));
1242            tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
1243            gen_helper_raise_exception(cpu_env, tmp_hlt);
1244            tcg_temp_free_i32(tmp_hlt);
1245            tcg_temp_free_i32(tmp_1);
1246            return;
1247        }
1248        LOG_DIS("mbar %d\n", dc->rd);
1249        /* Break the TB.  */
1250        dc->cpustate_changed = 1;
1251        return;
1252    }
1253
1254    LOG_DIS("br%s%s%s%s imm=%x\n",
1255             abs ? "a" : "", link ? "l" : "",
1256             dc->type_b ? "i" : "", dslot ? "d" : "",
1257             dc->imm);
1258
1259    dc->delayed_branch = 1;
1260    if (dslot) {
1261        dec_setup_dslot(dc);
1262    }
1263    if (link && dc->rd)
1264        tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
1265
1266    dc->jmp = JMP_INDIRECT;
1267    if (abs) {
1268        tcg_gen_movi_i32(env_btaken, 1);
1269        tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1270        if (link && !dslot) {
1271            if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1272                t_gen_raise_exception(dc, EXCP_BREAK);
1273            if (dc->imm == 0) {
1274                if (trap_userspace(dc, true)) {
1275                    return;
1276                }
1277
1278                t_gen_raise_exception(dc, EXCP_DEBUG);
1279            }
1280        }
1281    } else {
1282        if (dec_alu_op_b_is_small_imm(dc)) {
1283            dc->jmp = JMP_DIRECT;
1284            dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1285        } else {
1286            tcg_gen_movi_i32(env_btaken, 1);
1287            tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1288            tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
1289            tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1290        }
1291    }
1292}
1293
1294static inline void do_rti(DisasContext *dc)
1295{
1296    TCGv_i32 t0, t1;
1297    t0 = tcg_temp_new_i32();
1298    t1 = tcg_temp_new_i32();
1299    tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1300    tcg_gen_shri_i32(t0, t1, 1);
1301    tcg_gen_ori_i32(t1, t1, MSR_IE);
1302    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1303
1304    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1305    tcg_gen_or_i32(t1, t1, t0);
1306    msr_write(dc, t1);
1307    tcg_temp_free_i32(t1);
1308    tcg_temp_free_i32(t0);
1309    dc->tb_flags &= ~DRTI_FLAG;
1310}
1311
1312static inline void do_rtb(DisasContext *dc)
1313{
1314    TCGv_i32 t0, t1;
1315    t0 = tcg_temp_new_i32();
1316    t1 = tcg_temp_new_i32();
1317    tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1318    tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
1319    tcg_gen_shri_i32(t0, t1, 1);
1320    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1321
1322    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1323    tcg_gen_or_i32(t1, t1, t0);
1324    msr_write(dc, t1);
1325    tcg_temp_free_i32(t1);
1326    tcg_temp_free_i32(t0);
1327    dc->tb_flags &= ~DRTB_FLAG;
1328}
1329
1330static inline void do_rte(DisasContext *dc)
1331{
1332    TCGv_i32 t0, t1;
1333    t0 = tcg_temp_new_i32();
1334    t1 = tcg_temp_new_i32();
1335
1336    tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
1337    tcg_gen_ori_i32(t1, t1, MSR_EE);
1338    tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
1339    tcg_gen_shri_i32(t0, t1, 1);
1340    tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
1341
1342    tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
1343    tcg_gen_or_i32(t1, t1, t0);
1344    msr_write(dc, t1);
1345    tcg_temp_free_i32(t1);
1346    tcg_temp_free_i32(t0);
1347    dc->tb_flags &= ~DRTE_FLAG;
1348}
1349
1350static void dec_rts(DisasContext *dc)
1351{
1352    unsigned int b_bit, i_bit, e_bit;
1353    TCGv_i64 tmp64;
1354
1355    i_bit = dc->ir & (1 << 21);
1356    b_bit = dc->ir & (1 << 22);
1357    e_bit = dc->ir & (1 << 23);
1358
1359    if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
1360        return;
1361    }
1362
1363    dec_setup_dslot(dc);
1364
1365    if (i_bit) {
1366        LOG_DIS("rtid ir=%x\n", dc->ir);
1367        dc->tb_flags |= DRTI_FLAG;
1368    } else if (b_bit) {
1369        LOG_DIS("rtbd ir=%x\n", dc->ir);
1370        dc->tb_flags |= DRTB_FLAG;
1371    } else if (e_bit) {
1372        LOG_DIS("rted ir=%x\n", dc->ir);
1373        dc->tb_flags |= DRTE_FLAG;
1374    } else
1375        LOG_DIS("rts ir=%x\n", dc->ir);
1376
1377    dc->jmp = JMP_INDIRECT;
1378    tcg_gen_movi_i32(env_btaken, 1);
1379
1380    tmp64 = tcg_temp_new_i64();
1381    tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
1382    tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
1383    tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
1384    tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
1385    tcg_temp_free_i64(tmp64);
1386}
1387
1388static int dec_check_fpuv2(DisasContext *dc)
1389{
1390    if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1391        tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
1392        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1393    }
1394    return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1395}
1396
1397static void dec_fpu(DisasContext *dc)
1398{
1399    unsigned int fpu_insn;
1400
1401    if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
1402        return;
1403    }
1404
1405    fpu_insn = (dc->ir >> 7) & 7;
1406
1407    switch (fpu_insn) {
1408        case 0:
1409            gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1410                            cpu_R[dc->rb]);
1411            break;
1412
1413        case 1:
1414            gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1415                             cpu_R[dc->rb]);
1416            break;
1417
1418        case 2:
1419            gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1420                            cpu_R[dc->rb]);
1421            break;
1422
1423        case 3:
1424            gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1425                            cpu_R[dc->rb]);
1426            break;
1427
1428        case 4:
1429            switch ((dc->ir >> 4) & 7) {
1430                case 0:
1431                    gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1432                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1433                    break;
1434                case 1:
1435                    gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1436                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1437                    break;
1438                case 2:
1439                    gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1440                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1441                    break;
1442                case 3:
1443                    gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1444                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1445                    break;
1446                case 4:
1447                    gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1448                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1449                    break;
1450                case 5:
1451                    gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1452                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1453                    break;
1454                case 6:
1455                    gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1456                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1457                    break;
1458                default:
1459                    qemu_log_mask(LOG_UNIMP,
1460                                  "unimplemented fcmp fpu_insn=%x pc=%x"
1461                                  " opc=%x\n",
1462                                  fpu_insn, dc->pc, dc->opcode);
1463                    dc->abort_at_next_insn = 1;
1464                    break;
1465            }
1466            break;
1467
1468        case 5:
1469            if (!dec_check_fpuv2(dc)) {
1470                return;
1471            }
1472            gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1473            break;
1474
1475        case 6:
1476            if (!dec_check_fpuv2(dc)) {
1477                return;
1478            }
1479            gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1480            break;
1481
1482        case 7:
1483            if (!dec_check_fpuv2(dc)) {
1484                return;
1485            }
1486            gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1487            break;
1488
1489        default:
1490            qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1491                          " opc=%x\n",
1492                          fpu_insn, dc->pc, dc->opcode);
1493            dc->abort_at_next_insn = 1;
1494            break;
1495    }
1496}
1497
1498static void dec_null(DisasContext *dc)
1499{
1500    if (trap_illegal(dc, true)) {
1501        return;
1502    }
1503    qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1504    dc->abort_at_next_insn = 1;
1505}
1506
1507/* Insns connected to FSL or AXI stream attached devices.  */
1508static void dec_stream(DisasContext *dc)
1509{
1510    TCGv_i32 t_id, t_ctrl;
1511    int ctrl;
1512
1513    LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1514            dc->type_b ? "" : "d", dc->imm);
1515
1516    if (trap_userspace(dc, true)) {
1517        return;
1518    }
1519
1520    t_id = tcg_temp_new_i32();
1521    if (dc->type_b) {
1522        tcg_gen_movi_i32(t_id, dc->imm & 0xf);
1523        ctrl = dc->imm >> 10;
1524    } else {
1525        tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
1526        ctrl = dc->imm >> 5;
1527    }
1528
1529    t_ctrl = tcg_const_i32(ctrl);
1530
1531    if (dc->rd == 0) {
1532        gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1533    } else {
1534        gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1535    }
1536    tcg_temp_free_i32(t_id);
1537    tcg_temp_free_i32(t_ctrl);
1538}
1539
1540static struct decoder_info {
1541    struct {
1542        uint32_t bits;
1543        uint32_t mask;
1544    };
1545    void (*dec)(DisasContext *dc);
1546} decinfo[] = {
1547    {DEC_ADD, dec_add},
1548    {DEC_SUB, dec_sub},
1549    {DEC_AND, dec_and},
1550    {DEC_XOR, dec_xor},
1551    {DEC_OR, dec_or},
1552    {DEC_BIT, dec_bit},
1553    {DEC_BARREL, dec_barrel},
1554    {DEC_LD, dec_load},
1555    {DEC_ST, dec_store},
1556    {DEC_IMM, dec_imm},
1557    {DEC_BR, dec_br},
1558    {DEC_BCC, dec_bcc},
1559    {DEC_RTS, dec_rts},
1560    {DEC_FPU, dec_fpu},
1561    {DEC_MUL, dec_mul},
1562    {DEC_DIV, dec_div},
1563    {DEC_MSR, dec_msr},
1564    {DEC_STREAM, dec_stream},
1565    {{0, 0}, dec_null}
1566};
1567
1568static inline void decode(DisasContext *dc, uint32_t ir)
1569{
1570    int i;
1571
1572    dc->ir = ir;
1573    LOG_DIS("%8.8x\t", dc->ir);
1574
1575    if (ir == 0) {
1576        trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);
1577        /* Don't decode nop/zero instructions any further.  */
1578        return;
1579    }
1580
1581    /* bit 2 seems to indicate insn type.  */
1582    dc->type_b = ir & (1 << 29);
1583
1584    dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1585    dc->rd = EXTRACT_FIELD(ir, 21, 25);
1586    dc->ra = EXTRACT_FIELD(ir, 16, 20);
1587    dc->rb = EXTRACT_FIELD(ir, 11, 15);
1588    dc->imm = EXTRACT_FIELD(ir, 0, 15);
1589
1590    /* Large switch for all insns.  */
1591    for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1592        if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1593            decinfo[i].dec(dc);
1594            break;
1595        }
1596    }
1597}
1598
1599/* generate intermediate code for basic block 'tb'.  */
1600void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1601{
1602    CPUMBState *env = cs->env_ptr;
1603    MicroBlazeCPU *cpu = env_archcpu(env);
1604    uint32_t pc_start;
1605    struct DisasContext ctx;
1606    struct DisasContext *dc = &ctx;
1607    uint32_t page_start, org_flags;
1608    uint32_t npc;
1609    int num_insns;
1610
1611    pc_start = tb->pc;
1612    dc->cpu = cpu;
1613    dc->tb = tb;
1614    org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1615
1616    dc->is_jmp = DISAS_NEXT;
1617    dc->jmp = 0;
1618    dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1619    if (dc->delayed_branch) {
1620        dc->jmp = JMP_INDIRECT;
1621    }
1622    dc->pc = pc_start;
1623    dc->singlestep_enabled = cs->singlestep_enabled;
1624    dc->cpustate_changed = 0;
1625    dc->abort_at_next_insn = 0;
1626
1627    if (pc_start & 3) {
1628        cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1629    }
1630
1631    page_start = pc_start & TARGET_PAGE_MASK;
1632    num_insns = 0;
1633
1634    gen_tb_start(tb);
1635    do
1636    {
1637        tcg_gen_insn_start(dc->pc);
1638        num_insns++;
1639
1640#if SIM_COMPAT
1641        if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1642            tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
1643            gen_helper_debug();
1644        }
1645#endif
1646
1647        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1648            t_gen_raise_exception(dc, EXCP_DEBUG);
1649            dc->is_jmp = DISAS_UPDATE;
1650            /* The address covered by the breakpoint must be included in
1651               [tb->pc, tb->pc + tb->size) in order to for it to be
1652               properly cleared -- thus we increment the PC here so that
1653               the logic setting tb->size below does the right thing.  */
1654            dc->pc += 4;
1655            break;
1656        }
1657
1658        /* Pretty disas.  */
1659        LOG_DIS("%8.8x:\t", dc->pc);
1660
1661        if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1662            gen_io_start();
1663        }
1664
1665        dc->clear_imm = 1;
1666        decode(dc, cpu_ldl_code(env, dc->pc));
1667        if (dc->clear_imm)
1668            dc->tb_flags &= ~IMM_FLAG;
1669        dc->pc += 4;
1670
1671        if (dc->delayed_branch) {
1672            dc->delayed_branch--;
1673            if (!dc->delayed_branch) {
1674                if (dc->tb_flags & DRTI_FLAG)
1675                    do_rti(dc);
1676                 if (dc->tb_flags & DRTB_FLAG)
1677                    do_rtb(dc);
1678                if (dc->tb_flags & DRTE_FLAG)
1679                    do_rte(dc);
1680                /* Clear the delay slot flag.  */
1681                dc->tb_flags &= ~D_FLAG;
1682                /* If it is a direct jump, try direct chaining.  */
1683                if (dc->jmp == JMP_INDIRECT) {
1684                    TCGv_i64 tmp_pc = tcg_const_i64(dc->pc);
1685                    eval_cond_jmp(dc, env_btarget, tmp_pc);
1686                    tcg_temp_free_i64(tmp_pc);
1687
1688                    dc->is_jmp = DISAS_JUMP;
1689                } else if (dc->jmp == JMP_DIRECT) {
1690                    t_sync_flags(dc);
1691                    gen_goto_tb(dc, 0, dc->jmp_pc);
1692                    dc->is_jmp = DISAS_TB_JUMP;
1693                } else if (dc->jmp == JMP_DIRECT_CC) {
1694                    TCGLabel *l1 = gen_new_label();
1695                    t_sync_flags(dc);
1696                    /* Conditional jmp.  */
1697                    tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
1698                    gen_goto_tb(dc, 1, dc->pc);
1699                    gen_set_label(l1);
1700                    gen_goto_tb(dc, 0, dc->jmp_pc);
1701
1702                    dc->is_jmp = DISAS_TB_JUMP;
1703                }
1704                break;
1705            }
1706        }
1707        if (cs->singlestep_enabled) {
1708            break;
1709        }
1710    } while (!dc->is_jmp && !dc->cpustate_changed
1711             && !tcg_op_buf_full()
1712             && !singlestep
1713             && (dc->pc - page_start < TARGET_PAGE_SIZE)
1714             && num_insns < max_insns);
1715
1716    npc = dc->pc;
1717    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1718        if (dc->tb_flags & D_FLAG) {
1719            dc->is_jmp = DISAS_UPDATE;
1720            tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1721            sync_jmpstate(dc);
1722        } else
1723            npc = dc->jmp_pc;
1724    }
1725
1726    /* Force an update if the per-tb cpu state has changed.  */
1727    if (dc->is_jmp == DISAS_NEXT
1728        && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1729        dc->is_jmp = DISAS_UPDATE;
1730        tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1731    }
1732    t_sync_flags(dc);
1733
1734    if (unlikely(cs->singlestep_enabled)) {
1735        TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1736
1737        if (dc->is_jmp != DISAS_JUMP) {
1738            tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
1739        }
1740        gen_helper_raise_exception(cpu_env, tmp);
1741        tcg_temp_free_i32(tmp);
1742    } else {
1743        switch(dc->is_jmp) {
1744            case DISAS_NEXT:
1745                gen_goto_tb(dc, 1, npc);
1746                break;
1747            default:
1748            case DISAS_JUMP:
1749            case DISAS_UPDATE:
1750                /* indicate that the hash table must be used
1751                   to find the next TB */
1752                tcg_gen_exit_tb(NULL, 0);
1753                break;
1754            case DISAS_TB_JUMP:
1755                /* nothing more to generate */
1756                break;
1757        }
1758    }
1759    gen_tb_end(tb, num_insns);
1760
1761    tb->size = dc->pc - pc_start;
1762    tb->icount = num_insns;
1763
1764#ifdef DEBUG_DISAS
1765#if !SIM_COMPAT
1766    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1767        && qemu_log_in_addr_range(pc_start)) {
1768        qemu_log_lock();
1769        qemu_log("--------------\n");
1770        log_target_disas(cs, pc_start, dc->pc - pc_start);
1771        qemu_log_unlock();
1772    }
1773#endif
1774#endif
1775    assert(!dc->abort_at_next_insn);
1776}
1777
1778void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1779{
1780    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1781    CPUMBState *env = &cpu->env;
1782    int i;
1783
1784    if (!env) {
1785        return;
1786    }
1787
1788    qemu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
1789                 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1790    qemu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
1791                 "debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
1792                 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1793                 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1794    qemu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
1795                 "eip=%d ie=%d\n",
1796                 env->btaken, env->btarget,
1797                 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1798                 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1799                 (bool)(env->sregs[SR_MSR] & MSR_EIP),
1800                 (bool)(env->sregs[SR_MSR] & MSR_IE));
1801
1802    for (i = 0; i < 32; i++) {
1803        qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1804        if ((i + 1) % 4 == 0)
1805            qemu_fprintf(f, "\n");
1806        }
1807    qemu_fprintf(f, "\n\n");
1808}
1809
1810void mb_tcg_init(void)
1811{
1812    int i;
1813
1814    env_debug = tcg_global_mem_new_i32(cpu_env,
1815                    offsetof(CPUMBState, debug),
1816                    "debug0");
1817    env_iflags = tcg_global_mem_new_i32(cpu_env,
1818                    offsetof(CPUMBState, iflags),
1819                    "iflags");
1820    env_imm = tcg_global_mem_new_i32(cpu_env,
1821                    offsetof(CPUMBState, imm),
1822                    "imm");
1823    env_btarget = tcg_global_mem_new_i64(cpu_env,
1824                     offsetof(CPUMBState, btarget),
1825                     "btarget");
1826    env_btaken = tcg_global_mem_new_i32(cpu_env,
1827                     offsetof(CPUMBState, btaken),
1828                     "btaken");
1829    env_res_addr = tcg_global_mem_new(cpu_env,
1830                     offsetof(CPUMBState, res_addr),
1831                     "res_addr");
1832    env_res_val = tcg_global_mem_new_i32(cpu_env,
1833                     offsetof(CPUMBState, res_val),
1834                     "res_val");
1835    for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1836        cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
1837                          offsetof(CPUMBState, regs[i]),
1838                          regnames[i]);
1839    }
1840    for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1841        cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
1842                          offsetof(CPUMBState, sregs[i]),
1843                          special_regnames[i]);
1844    }
1845}
1846
1847void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1848                          target_ulong *data)
1849{
1850    env->sregs[SR_PC] = data[0];
1851}
1852