qemu/target-microblaze/translate.c
<<
>>
Prefs
   1/*
   2 *  Xilinx MicroBlaze emulation for qemu: main translation routines.
   3 *
   4 *  Copyright (c) 2009 Edgar E. Iglesias.
   5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "exec/exec-all.h"
  25#include "tcg-op.h"
  26#include "exec/helper-proto.h"
  27#include "microblaze-decode.h"
  28#include "exec/cpu_ldst.h"
  29#include "exec/helper-gen.h"
  30
  31#include "trace-tcg.h"
  32#include "exec/log.h"
  33
  34
  35#define SIM_COMPAT 0
  36#define DISAS_GNU 1
  37#define DISAS_MB 1
  38#if DISAS_MB && !SIM_COMPAT
  39#  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  40#else
  41#  define LOG_DIS(...) do { } while (0)
  42#endif
  43
  44#define D(x)
  45
  46#define EXTRACT_FIELD(src, start, end) \
  47            (((src) >> start) & ((1 << (end - start + 1)) - 1))
  48
  49static TCGv env_debug;
  50static TCGv_env cpu_env;
  51static TCGv cpu_R[32];
  52static TCGv cpu_SR[18];
  53static TCGv env_imm;
  54static TCGv env_btaken;
  55static TCGv env_btarget;
  56static TCGv env_iflags;
  57static TCGv env_res_addr;
  58static TCGv env_res_val;
  59
  60#include "exec/gen-icount.h"
  61
  62/* This is the state at translation time.  */
  63typedef struct DisasContext {
  64    MicroBlazeCPU *cpu;
  65    target_ulong pc;
  66
  67    /* Decoder.  */
  68    int type_b;
  69    uint32_t ir;
  70    uint8_t opcode;
  71    uint8_t rd, ra, rb;
  72    uint16_t imm;
  73
  74    unsigned int cpustate_changed;
  75    unsigned int delayed_branch;
  76    unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
  77    unsigned int clear_imm;
  78    int is_jmp;
  79
  80#define JMP_NOJMP     0
  81#define JMP_DIRECT    1
  82#define JMP_DIRECT_CC 2
  83#define JMP_INDIRECT  3
  84    unsigned int jmp;
  85    uint32_t jmp_pc;
  86
  87    int abort_at_next_insn;
  88    int nr_nops;
  89    struct TranslationBlock *tb;
  90    int singlestep_enabled;
  91} DisasContext;
  92
  93static const char *regnames[] =
  94{
  95    "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  96    "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  97    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
  98    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
  99};
 100
 101static const char *special_regnames[] =
 102{
 103    "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
 104    "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
 105    "sr16", "sr17", "sr18"
 106};
 107
 108static inline void t_sync_flags(DisasContext *dc)
 109{
 110    /* Synch the tb dependent flags between translator and runtime.  */
 111    if (dc->tb_flags != dc->synced_flags) {
 112        tcg_gen_movi_tl(env_iflags, dc->tb_flags);
 113        dc->synced_flags = dc->tb_flags;
 114    }
 115}
 116
 117static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
 118{
 119    TCGv_i32 tmp = tcg_const_i32(index);
 120
 121    t_sync_flags(dc);
 122    tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
 123    gen_helper_raise_exception(cpu_env, tmp);
 124    tcg_temp_free_i32(tmp);
 125    dc->is_jmp = DISAS_UPDATE;
 126}
 127
 128static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
 129{
 130#ifndef CONFIG_USER_ONLY
 131    return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 132#else
 133    return true;
 134#endif
 135}
 136
 137static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
 138{
 139    if (use_goto_tb(dc, dest)) {
 140        tcg_gen_goto_tb(n);
 141        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
 142        tcg_gen_exit_tb((uintptr_t)dc->tb + n);
 143    } else {
 144        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
 145        tcg_gen_exit_tb(0);
 146    }
 147}
 148
 149static void read_carry(DisasContext *dc, TCGv d)
 150{
 151    tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
 152}
 153
 154/*
 155 * write_carry sets the carry bits in MSR based on bit 0 of v.
 156 * v[31:1] are ignored.
 157 */
 158static void write_carry(DisasContext *dc, TCGv v)
 159{
 160    TCGv t0 = tcg_temp_new();
 161    tcg_gen_shli_tl(t0, v, 31);
 162    tcg_gen_sari_tl(t0, t0, 31);
 163    tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
 164    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
 165                    ~(MSR_C | MSR_CC));
 166    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
 167    tcg_temp_free(t0);
 168}
 169
 170static void write_carryi(DisasContext *dc, bool carry)
 171{
 172    TCGv t0 = tcg_temp_new();
 173    tcg_gen_movi_tl(t0, carry);
 174    write_carry(dc, t0);
 175    tcg_temp_free(t0);
 176}
 177
 178/* True if ALU operand b is a small immediate that may deserve
 179   faster treatment.  */
 180static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
 181{
 182    /* Immediate insn without the imm prefix ?  */
 183    return dc->type_b && !(dc->tb_flags & IMM_FLAG);
 184}
 185
 186static inline TCGv *dec_alu_op_b(DisasContext *dc)
 187{
 188    if (dc->type_b) {
 189        if (dc->tb_flags & IMM_FLAG)
 190            tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
 191        else
 192            tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
 193        return &env_imm;
 194    } else
 195        return &cpu_R[dc->rb];
 196}
 197
 198static void dec_add(DisasContext *dc)
 199{
 200    unsigned int k, c;
 201    TCGv cf;
 202
 203    k = dc->opcode & 4;
 204    c = dc->opcode & 2;
 205
 206    LOG_DIS("add%s%s%s r%d r%d r%d\n",
 207            dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
 208            dc->rd, dc->ra, dc->rb);
 209
 210    /* Take care of the easy cases first.  */
 211    if (k) {
 212        /* k - keep carry, no need to update MSR.  */
 213        /* If rd == r0, it's a nop.  */
 214        if (dc->rd) {
 215            tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 216
 217            if (c) {
 218                /* c - Add carry into the result.  */
 219                cf = tcg_temp_new();
 220
 221                read_carry(dc, cf);
 222                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 223                tcg_temp_free(cf);
 224            }
 225        }
 226        return;
 227    }
 228
 229    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 230    /* Extract carry.  */
 231    cf = tcg_temp_new();
 232    if (c) {
 233        read_carry(dc, cf);
 234    } else {
 235        tcg_gen_movi_tl(cf, 0);
 236    }
 237
 238    if (dc->rd) {
 239        TCGv ncf = tcg_temp_new();
 240        gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 241        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 242        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 243        write_carry(dc, ncf);
 244        tcg_temp_free(ncf);
 245    } else {
 246        gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 247        write_carry(dc, cf);
 248    }
 249    tcg_temp_free(cf);
 250}
 251
 252static void dec_sub(DisasContext *dc)
 253{
 254    unsigned int u, cmp, k, c;
 255    TCGv cf, na;
 256
 257    u = dc->imm & 2;
 258    k = dc->opcode & 4;
 259    c = dc->opcode & 2;
 260    cmp = (dc->imm & 1) && (!dc->type_b) && k;
 261
 262    if (cmp) {
 263        LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
 264        if (dc->rd) {
 265            if (u)
 266                gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 267            else
 268                gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 269        }
 270        return;
 271    }
 272
 273    LOG_DIS("sub%s%s r%d, r%d r%d\n",
 274             k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
 275
 276    /* Take care of the easy cases first.  */
 277    if (k) {
 278        /* k - keep carry, no need to update MSR.  */
 279        /* If rd == r0, it's a nop.  */
 280        if (dc->rd) {
 281            tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
 282
 283            if (c) {
 284                /* c - Add carry into the result.  */
 285                cf = tcg_temp_new();
 286
 287                read_carry(dc, cf);
 288                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 289                tcg_temp_free(cf);
 290            }
 291        }
 292        return;
 293    }
 294
 295    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 296    /* Extract carry. And complement a into na.  */
 297    cf = tcg_temp_new();
 298    na = tcg_temp_new();
 299    if (c) {
 300        read_carry(dc, cf);
 301    } else {
 302        tcg_gen_movi_tl(cf, 1);
 303    }
 304
 305    /* d = b + ~a + c. carry defaults to 1.  */
 306    tcg_gen_not_tl(na, cpu_R[dc->ra]);
 307
 308    if (dc->rd) {
 309        TCGv ncf = tcg_temp_new();
 310        gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
 311        tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
 312        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 313        write_carry(dc, ncf);
 314        tcg_temp_free(ncf);
 315    } else {
 316        gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
 317        write_carry(dc, cf);
 318    }
 319    tcg_temp_free(cf);
 320    tcg_temp_free(na);
 321}
 322
 323static void dec_pattern(DisasContext *dc)
 324{
 325    unsigned int mode;
 326
 327    if ((dc->tb_flags & MSR_EE_FLAG)
 328          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 329          && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
 330        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 331        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 332    }
 333
 334    mode = dc->opcode & 3;
 335    switch (mode) {
 336        case 0:
 337            /* pcmpbf.  */
 338            LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 339            if (dc->rd)
 340                gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 341            break;
 342        case 2:
 343            LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 344            if (dc->rd) {
 345                tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
 346                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 347            }
 348            break;
 349        case 3:
 350            LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 351            if (dc->rd) {
 352                tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
 353                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 354            }
 355            break;
 356        default:
 357            cpu_abort(CPU(dc->cpu),
 358                      "unsupported pattern insn opcode=%x\n", dc->opcode);
 359            break;
 360    }
 361}
 362
 363static void dec_and(DisasContext *dc)
 364{
 365    unsigned int not;
 366
 367    if (!dc->type_b && (dc->imm & (1 << 10))) {
 368        dec_pattern(dc);
 369        return;
 370    }
 371
 372    not = dc->opcode & (1 << 1);
 373    LOG_DIS("and%s\n", not ? "n" : "");
 374
 375    if (!dc->rd)
 376        return;
 377
 378    if (not) {
 379        tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 380    } else
 381        tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 382}
 383
 384static void dec_or(DisasContext *dc)
 385{
 386    if (!dc->type_b && (dc->imm & (1 << 10))) {
 387        dec_pattern(dc);
 388        return;
 389    }
 390
 391    LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
 392    if (dc->rd)
 393        tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 394}
 395
 396static void dec_xor(DisasContext *dc)
 397{
 398    if (!dc->type_b && (dc->imm & (1 << 10))) {
 399        dec_pattern(dc);
 400        return;
 401    }
 402
 403    LOG_DIS("xor r%d\n", dc->rd);
 404    if (dc->rd)
 405        tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 406}
 407
 408static inline void msr_read(DisasContext *dc, TCGv d)
 409{
 410    tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
 411}
 412
 413static inline void msr_write(DisasContext *dc, TCGv v)
 414{
 415    TCGv t;
 416
 417    t = tcg_temp_new();
 418    dc->cpustate_changed = 1;
 419    /* PVR bit is not writable.  */
 420    tcg_gen_andi_tl(t, v, ~MSR_PVR);
 421    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
 422    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
 423    tcg_temp_free(t);
 424}
 425
 426static void dec_msr(DisasContext *dc)
 427{
 428    CPUState *cs = CPU(dc->cpu);
 429    TCGv t0, t1;
 430    unsigned int sr, to, rn;
 431    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 432
 433    sr = dc->imm & ((1 << 14) - 1);
 434    to = dc->imm & (1 << 14);
 435    dc->type_b = 1;
 436    if (to)
 437        dc->cpustate_changed = 1;
 438
 439    /* msrclr and msrset.  */
 440    if (!(dc->imm & (1 << 15))) {
 441        unsigned int clr = dc->ir & (1 << 16);
 442
 443        LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
 444                dc->rd, dc->imm);
 445
 446        if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
 447            /* nop??? */
 448            return;
 449        }
 450
 451        if ((dc->tb_flags & MSR_EE_FLAG)
 452            && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
 453            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 454            t_gen_raise_exception(dc, EXCP_HW_EXCP);
 455            return;
 456        }
 457
 458        if (dc->rd)
 459            msr_read(dc, cpu_R[dc->rd]);
 460
 461        t0 = tcg_temp_new();
 462        t1 = tcg_temp_new();
 463        msr_read(dc, t0);
 464        tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
 465
 466        if (clr) {
 467            tcg_gen_not_tl(t1, t1);
 468            tcg_gen_and_tl(t0, t0, t1);
 469        } else
 470            tcg_gen_or_tl(t0, t0, t1);
 471        msr_write(dc, t0);
 472        tcg_temp_free(t0);
 473        tcg_temp_free(t1);
 474        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
 475        dc->is_jmp = DISAS_UPDATE;
 476        return;
 477    }
 478
 479    if (to) {
 480        if ((dc->tb_flags & MSR_EE_FLAG)
 481             && mem_index == MMU_USER_IDX) {
 482            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 483            t_gen_raise_exception(dc, EXCP_HW_EXCP);
 484            return;
 485        }
 486    }
 487
 488#if !defined(CONFIG_USER_ONLY)
 489    /* Catch read/writes to the mmu block.  */
 490    if ((sr & ~0xff) == 0x1000) {
 491        sr &= 7;
 492        LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 493        if (to)
 494            gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
 495        else
 496            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
 497        return;
 498    }
 499#endif
 500
 501    if (to) {
 502        LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 503        switch (sr) {
 504            case 0:
 505                break;
 506            case 1:
 507                msr_write(dc, cpu_R[dc->ra]);
 508                break;
 509            case 0x3:
 510                tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
 511                break;
 512            case 0x5:
 513                tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
 514                break;
 515            case 0x7:
 516                tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
 517                break;
 518            case 0x800:
 519                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
 520                break;
 521            case 0x802:
 522                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
 523                break;
 524            default:
 525                cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
 526                break;
 527        }
 528    } else {
 529        LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
 530
 531        switch (sr) {
 532            case 0:
 533                tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
 534                break;
 535            case 1:
 536                msr_read(dc, cpu_R[dc->rd]);
 537                break;
 538            case 0x3:
 539                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
 540                break;
 541            case 0x5:
 542                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
 543                break;
 544             case 0x7:
 545                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
 546                break;
 547            case 0xb:
 548                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
 549                break;
 550            case 0x800:
 551                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
 552                break;
 553            case 0x802:
 554                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
 555                break;
 556            case 0x2000:
 557            case 0x2001:
 558            case 0x2002:
 559            case 0x2003:
 560            case 0x2004:
 561            case 0x2005:
 562            case 0x2006:
 563            case 0x2007:
 564            case 0x2008:
 565            case 0x2009:
 566            case 0x200a:
 567            case 0x200b:
 568            case 0x200c:
 569                rn = sr & 0xf;
 570                tcg_gen_ld_tl(cpu_R[dc->rd],
 571                              cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
 572                break;
 573            default:
 574                cpu_abort(cs, "unknown mfs reg %x\n", sr);
 575                break;
 576        }
 577    }
 578
 579    if (dc->rd == 0) {
 580        tcg_gen_movi_tl(cpu_R[0], 0);
 581    }
 582}
 583
 584/* 64-bit signed mul, lower result in d and upper in d2.  */
 585static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
 586{
 587    TCGv_i64 t0, t1;
 588
 589    t0 = tcg_temp_new_i64();
 590    t1 = tcg_temp_new_i64();
 591
 592    tcg_gen_ext_i32_i64(t0, a);
 593    tcg_gen_ext_i32_i64(t1, b);
 594    tcg_gen_mul_i64(t0, t0, t1);
 595
 596    tcg_gen_extrl_i64_i32(d, t0);
 597    tcg_gen_shri_i64(t0, t0, 32);
 598    tcg_gen_extrl_i64_i32(d2, t0);
 599
 600    tcg_temp_free_i64(t0);
 601    tcg_temp_free_i64(t1);
 602}
 603
 604/* 64-bit unsigned muls, lower result in d and upper in d2.  */
 605static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
 606{
 607    TCGv_i64 t0, t1;
 608
 609    t0 = tcg_temp_new_i64();
 610    t1 = tcg_temp_new_i64();
 611
 612    tcg_gen_extu_i32_i64(t0, a);
 613    tcg_gen_extu_i32_i64(t1, b);
 614    tcg_gen_mul_i64(t0, t0, t1);
 615
 616    tcg_gen_extrl_i64_i32(d, t0);
 617    tcg_gen_shri_i64(t0, t0, 32);
 618    tcg_gen_extrl_i64_i32(d2, t0);
 619
 620    tcg_temp_free_i64(t0);
 621    tcg_temp_free_i64(t1);
 622}
 623
 624/* Multiplier unit.  */
 625static void dec_mul(DisasContext *dc)
 626{
 627    TCGv d[2];
 628    unsigned int subcode;
 629
 630    if ((dc->tb_flags & MSR_EE_FLAG)
 631         && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 632         && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
 633        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 634        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 635        return;
 636    }
 637
 638    subcode = dc->imm & 3;
 639    d[0] = tcg_temp_new();
 640    d[1] = tcg_temp_new();
 641
 642    if (dc->type_b) {
 643        LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
 644        t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 645        goto done;
 646    }
 647
 648    /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
 649    if (subcode >= 1 && subcode <= 3
 650        && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
 651        /* nop??? */
 652    }
 653
 654    switch (subcode) {
 655        case 0:
 656            LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 657            t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
 658            break;
 659        case 1:
 660            LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 661            t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 662            break;
 663        case 2:
 664            LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 665            t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 666            break;
 667        case 3:
 668            LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 669            t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 670            break;
 671        default:
 672            cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
 673            break;
 674    }
 675done:
 676    tcg_temp_free(d[0]);
 677    tcg_temp_free(d[1]);
 678}
 679
 680/* Div unit.  */
 681static void dec_div(DisasContext *dc)
 682{
 683    unsigned int u;
 684
 685    u = dc->imm & 2; 
 686    LOG_DIS("div\n");
 687
 688    if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 689          && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
 690        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 691        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 692    }
 693
 694    if (u)
 695        gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 696                        cpu_R[dc->ra]);
 697    else
 698        gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 699                        cpu_R[dc->ra]);
 700    if (!dc->rd)
 701        tcg_gen_movi_tl(cpu_R[dc->rd], 0);
 702}
 703
 704static void dec_barrel(DisasContext *dc)
 705{
 706    TCGv t0;
 707    unsigned int s, t;
 708
 709    if ((dc->tb_flags & MSR_EE_FLAG)
 710          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 711          && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
 712        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 713        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 714        return;
 715    }
 716
 717    s = dc->imm & (1 << 10);
 718    t = dc->imm & (1 << 9);
 719
 720    LOG_DIS("bs%s%s r%d r%d r%d\n",
 721            s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
 722
 723    t0 = tcg_temp_new();
 724
 725    tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
 726    tcg_gen_andi_tl(t0, t0, 31);
 727
 728    if (s)
 729        tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 730    else {
 731        if (t)
 732            tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 733        else
 734            tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 735    }
 736}
 737
 738static void dec_bit(DisasContext *dc)
 739{
 740    CPUState *cs = CPU(dc->cpu);
 741    TCGv t0;
 742    unsigned int op;
 743    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 744
 745    op = dc->ir & ((1 << 9) - 1);
 746    switch (op) {
 747        case 0x21:
 748            /* src.  */
 749            t0 = tcg_temp_new();
 750
 751            LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
 752            tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
 753            write_carry(dc, cpu_R[dc->ra]);
 754            if (dc->rd) {
 755                tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 756                tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
 757            }
 758            tcg_temp_free(t0);
 759            break;
 760
 761        case 0x1:
 762        case 0x41:
 763            /* srl.  */
 764            LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
 765
 766            /* Update carry. Note that write carry only looks at the LSB.  */
 767            write_carry(dc, cpu_R[dc->ra]);
 768            if (dc->rd) {
 769                if (op == 0x41)
 770                    tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 771                else
 772                    tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 773            }
 774            break;
 775        case 0x60:
 776            LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
 777            tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 778            break;
 779        case 0x61:
 780            LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
 781            tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 782            break;
 783        case 0x64:
 784        case 0x66:
 785        case 0x74:
 786        case 0x76:
 787            /* wdc.  */
 788            LOG_DIS("wdc r%d\n", dc->ra);
 789            if ((dc->tb_flags & MSR_EE_FLAG)
 790                 && mem_index == MMU_USER_IDX) {
 791                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 792                t_gen_raise_exception(dc, EXCP_HW_EXCP);
 793                return;
 794            }
 795            break;
 796        case 0x68:
 797            /* wic.  */
 798            LOG_DIS("wic r%d\n", dc->ra);
 799            if ((dc->tb_flags & MSR_EE_FLAG)
 800                 && mem_index == MMU_USER_IDX) {
 801                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 802                t_gen_raise_exception(dc, EXCP_HW_EXCP);
 803                return;
 804            }
 805            break;
 806        case 0xe0:
 807            if ((dc->tb_flags & MSR_EE_FLAG)
 808                && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 809                && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
 810                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 811                t_gen_raise_exception(dc, EXCP_HW_EXCP);
 812            }
 813            if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
 814                gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
 815            }
 816            break;
 817        case 0x1e0:
 818            /* swapb */
 819            LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
 820            tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 821            break;
 822        case 0x1e2:
 823            /*swaph */
 824            LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
 825            tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
 826            break;
 827        default:
 828            cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
 829                      dc->pc, op, dc->rd, dc->ra, dc->rb);
 830            break;
 831    }
 832}
 833
 834static inline void sync_jmpstate(DisasContext *dc)
 835{
 836    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
 837        if (dc->jmp == JMP_DIRECT) {
 838            tcg_gen_movi_tl(env_btaken, 1);
 839        }
 840        dc->jmp = JMP_INDIRECT;
 841        tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
 842    }
 843}
 844
 845static void dec_imm(DisasContext *dc)
 846{
 847    LOG_DIS("imm %x\n", dc->imm << 16);
 848    tcg_gen_movi_tl(env_imm, (dc->imm << 16));
 849    dc->tb_flags |= IMM_FLAG;
 850    dc->clear_imm = 0;
 851}
 852
 853static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
 854{
 855    unsigned int extimm = dc->tb_flags & IMM_FLAG;
 856    /* Should be set to one if r1 is used by loadstores.  */
 857    int stackprot = 0;
 858
 859    /* All load/stores use ra.  */
 860    if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
 861        stackprot = 1;
 862    }
 863
 864    /* Treat the common cases first.  */
 865    if (!dc->type_b) {
 866        /* If any of the regs is r0, return a ptr to the other.  */
 867        if (dc->ra == 0) {
 868            return &cpu_R[dc->rb];
 869        } else if (dc->rb == 0) {
 870            return &cpu_R[dc->ra];
 871        }
 872
 873        if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
 874            stackprot = 1;
 875        }
 876
 877        *t = tcg_temp_new();
 878        tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
 879
 880        if (stackprot) {
 881            gen_helper_stackprot(cpu_env, *t);
 882        }
 883        return t;
 884    }
 885    /* Immediate.  */
 886    if (!extimm) {
 887        if (dc->imm == 0) {
 888            return &cpu_R[dc->ra];
 889        }
 890        *t = tcg_temp_new();
 891        tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
 892        tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
 893    } else {
 894        *t = tcg_temp_new();
 895        tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 896    }
 897
 898    if (stackprot) {
 899        gen_helper_stackprot(cpu_env, *t);
 900    }
 901    return t;
 902}
 903
 904static void dec_load(DisasContext *dc)
 905{
 906    TCGv t, v, *addr;
 907    unsigned int size, rev = 0, ex = 0;
 908    TCGMemOp mop;
 909
 910    mop = dc->opcode & 3;
 911    size = 1 << mop;
 912    if (!dc->type_b) {
 913        rev = (dc->ir >> 9) & 1;
 914        ex = (dc->ir >> 10) & 1;
 915    }
 916    mop |= MO_TE;
 917    if (rev) {
 918        mop ^= MO_BSWAP;
 919    }
 920
 921    if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
 922          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
 923        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 924        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 925        return;
 926    }
 927
 928    LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
 929                                                        ex ? "x" : "");
 930
 931    t_sync_flags(dc);
 932    addr = compute_ldst_addr(dc, &t);
 933
 934    /*
 935     * When doing reverse accesses we need to do two things.
 936     *
 937     * 1. Reverse the address wrt endianness.
 938     * 2. Byteswap the data lanes on the way back into the CPU core.
 939     */
 940    if (rev && size != 4) {
 941        /* Endian reverse the address. t is addr.  */
 942        switch (size) {
 943            case 1:
 944            {
 945                /* 00 -> 11
 946                   01 -> 10
 947                   10 -> 10
 948                   11 -> 00 */
 949                TCGv low = tcg_temp_new();
 950
 951                /* Force addr into the temp.  */
 952                if (addr != &t) {
 953                    t = tcg_temp_new();
 954                    tcg_gen_mov_tl(t, *addr);
 955                    addr = &t;
 956                }
 957
 958                tcg_gen_andi_tl(low, t, 3);
 959                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
 960                tcg_gen_andi_tl(t, t, ~3);
 961                tcg_gen_or_tl(t, t, low);
 962                tcg_gen_mov_tl(env_imm, t);
 963                tcg_temp_free(low);
 964                break;
 965            }
 966
 967            case 2:
 968                /* 00 -> 10
 969                   10 -> 00.  */
 970                /* Force addr into the temp.  */
 971                if (addr != &t) {
 972                    t = tcg_temp_new();
 973                    tcg_gen_xori_tl(t, *addr, 2);
 974                    addr = &t;
 975                } else {
 976                    tcg_gen_xori_tl(t, t, 2);
 977                }
 978                break;
 979            default:
 980                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
 981                break;
 982        }
 983    }
 984
 985    /* lwx does not throw unaligned access errors, so force alignment */
 986    if (ex) {
 987        /* Force addr into the temp.  */
 988        if (addr != &t) {
 989            t = tcg_temp_new();
 990            tcg_gen_mov_tl(t, *addr);
 991            addr = &t;
 992        }
 993        tcg_gen_andi_tl(t, t, ~3);
 994    }
 995
 996    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
 997    sync_jmpstate(dc);
 998
 999    /* Verify alignment if needed.  */
1000    /*
1001     * Microblaze gives MMU faults priority over faults due to
1002     * unaligned addresses. That's why we speculatively do the load
1003     * into v. If the load succeeds, we verify alignment of the
1004     * address and if that succeeds we write into the destination reg.
1005     */
1006    v = tcg_temp_new();
1007    tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1008
1009    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1010        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1011        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1012                            tcg_const_tl(0), tcg_const_tl(size - 1));
1013    }
1014
1015    if (ex) {
1016        tcg_gen_mov_tl(env_res_addr, *addr);
1017        tcg_gen_mov_tl(env_res_val, v);
1018    }
1019    if (dc->rd) {
1020        tcg_gen_mov_tl(cpu_R[dc->rd], v);
1021    }
1022    tcg_temp_free(v);
1023
1024    if (ex) { /* lwx */
1025        /* no support for AXI exclusive so always clear C */
1026        write_carryi(dc, 0);
1027    }
1028
1029    if (addr == &t)
1030        tcg_temp_free(t);
1031}
1032
1033static void dec_store(DisasContext *dc)
1034{
1035    TCGv t, *addr, swx_addr;
1036    TCGLabel *swx_skip = NULL;
1037    unsigned int size, rev = 0, ex = 0;
1038    TCGMemOp mop;
1039
1040    mop = dc->opcode & 3;
1041    size = 1 << mop;
1042    if (!dc->type_b) {
1043        rev = (dc->ir >> 9) & 1;
1044        ex = (dc->ir >> 10) & 1;
1045    }
1046    mop |= MO_TE;
1047    if (rev) {
1048        mop ^= MO_BSWAP;
1049    }
1050
1051    if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1052          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1053        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1054        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1055        return;
1056    }
1057
1058    LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1059                                                        ex ? "x" : "");
1060    t_sync_flags(dc);
1061    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1062    sync_jmpstate(dc);
1063    addr = compute_ldst_addr(dc, &t);
1064
1065    swx_addr = tcg_temp_local_new();
1066    if (ex) { /* swx */
1067        TCGv tval;
1068
1069        /* Force addr into the swx_addr. */
1070        tcg_gen_mov_tl(swx_addr, *addr);
1071        addr = &swx_addr;
1072        /* swx does not throw unaligned access errors, so force alignment */
1073        tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1074
1075        write_carryi(dc, 1);
1076        swx_skip = gen_new_label();
1077        tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1078
1079        /* Compare the value loaded at lwx with current contents of
1080           the reserved location.
1081           FIXME: This only works for system emulation where we can expect
1082           this compare and the following write to be atomic. For user
1083           emulation we need to add atomicity between threads.  */
1084        tval = tcg_temp_new();
1085        tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1086                           MO_TEUL);
1087        tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1088        write_carryi(dc, 0);
1089        tcg_temp_free(tval);
1090    }
1091
1092    if (rev && size != 4) {
1093        /* Endian reverse the address. t is addr.  */
1094        switch (size) {
1095            case 1:
1096            {
1097                /* 00 -> 11
1098                   01 -> 10
1099                   10 -> 10
1100                   11 -> 00 */
1101                TCGv low = tcg_temp_new();
1102
1103                /* Force addr into the temp.  */
1104                if (addr != &t) {
1105                    t = tcg_temp_new();
1106                    tcg_gen_mov_tl(t, *addr);
1107                    addr = &t;
1108                }
1109
1110                tcg_gen_andi_tl(low, t, 3);
1111                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1112                tcg_gen_andi_tl(t, t, ~3);
1113                tcg_gen_or_tl(t, t, low);
1114                tcg_gen_mov_tl(env_imm, t);
1115                tcg_temp_free(low);
1116                break;
1117            }
1118
1119            case 2:
1120                /* 00 -> 10
1121                   10 -> 00.  */
1122                /* Force addr into the temp.  */
1123                if (addr != &t) {
1124                    t = tcg_temp_new();
1125                    tcg_gen_xori_tl(t, *addr, 2);
1126                    addr = &t;
1127                } else {
1128                    tcg_gen_xori_tl(t, t, 2);
1129                }
1130                break;
1131            default:
1132                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1133                break;
1134        }
1135    }
1136    tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1137
1138    /* Verify alignment if needed.  */
1139    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1140        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1141        /* FIXME: if the alignment is wrong, we should restore the value
1142         *        in memory. One possible way to achieve this is to probe
1143         *        the MMU prior to the memaccess, thay way we could put
1144         *        the alignment checks in between the probe and the mem
1145         *        access.
1146         */
1147        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1148                            tcg_const_tl(1), tcg_const_tl(size - 1));
1149    }
1150
1151    if (ex) {
1152        gen_set_label(swx_skip);
1153    }
1154    tcg_temp_free(swx_addr);
1155
1156    if (addr == &t)
1157        tcg_temp_free(t);
1158}
1159
1160static inline void eval_cc(DisasContext *dc, unsigned int cc,
1161                           TCGv d, TCGv a, TCGv b)
1162{
1163    switch (cc) {
1164        case CC_EQ:
1165            tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1166            break;
1167        case CC_NE:
1168            tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1169            break;
1170        case CC_LT:
1171            tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1172            break;
1173        case CC_LE:
1174            tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1175            break;
1176        case CC_GE:
1177            tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1178            break;
1179        case CC_GT:
1180            tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1181            break;
1182        default:
1183            cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1184            break;
1185    }
1186}
1187
1188static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1189{
1190    TCGLabel *l1 = gen_new_label();
1191    /* Conditional jmp.  */
1192    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1193    tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1194    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1195    gen_set_label(l1);
1196}
1197
1198static void dec_bcc(DisasContext *dc)
1199{
1200    unsigned int cc;
1201    unsigned int dslot;
1202
1203    cc = EXTRACT_FIELD(dc->ir, 21, 23);
1204    dslot = dc->ir & (1 << 25);
1205    LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1206
1207    dc->delayed_branch = 1;
1208    if (dslot) {
1209        dc->delayed_branch = 2;
1210        dc->tb_flags |= D_FLAG;
1211        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1212                      cpu_env, offsetof(CPUMBState, bimm));
1213    }
1214
1215    if (dec_alu_op_b_is_small_imm(dc)) {
1216        int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1217
1218        tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1219        dc->jmp = JMP_DIRECT_CC;
1220        dc->jmp_pc = dc->pc + offset;
1221    } else {
1222        dc->jmp = JMP_INDIRECT;
1223        tcg_gen_movi_tl(env_btarget, dc->pc);
1224        tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1225    }
1226    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1227}
1228
1229static void dec_br(DisasContext *dc)
1230{
1231    unsigned int dslot, link, abs, mbar;
1232    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1233
1234    dslot = dc->ir & (1 << 20);
1235    abs = dc->ir & (1 << 19);
1236    link = dc->ir & (1 << 18);
1237
1238    /* Memory barrier.  */
1239    mbar = (dc->ir >> 16) & 31;
1240    if (mbar == 2 && dc->imm == 4) {
1241        /* mbar IMM & 16 decodes to sleep.  */
1242        if (dc->rd & 16) {
1243            TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1244            TCGv_i32 tmp_1 = tcg_const_i32(1);
1245
1246            LOG_DIS("sleep\n");
1247
1248            t_sync_flags(dc);
1249            tcg_gen_st_i32(tmp_1, cpu_env,
1250                           -offsetof(MicroBlazeCPU, env)
1251                           +offsetof(CPUState, halted));
1252            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1253            gen_helper_raise_exception(cpu_env, tmp_hlt);
1254            tcg_temp_free_i32(tmp_hlt);
1255            tcg_temp_free_i32(tmp_1);
1256            return;
1257        }
1258        LOG_DIS("mbar %d\n", dc->rd);
1259        /* Break the TB.  */
1260        dc->cpustate_changed = 1;
1261        return;
1262    }
1263
1264    LOG_DIS("br%s%s%s%s imm=%x\n",
1265             abs ? "a" : "", link ? "l" : "",
1266             dc->type_b ? "i" : "", dslot ? "d" : "",
1267             dc->imm);
1268
1269    dc->delayed_branch = 1;
1270    if (dslot) {
1271        dc->delayed_branch = 2;
1272        dc->tb_flags |= D_FLAG;
1273        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1274                      cpu_env, offsetof(CPUMBState, bimm));
1275    }
1276    if (link && dc->rd)
1277        tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1278
1279    dc->jmp = JMP_INDIRECT;
1280    if (abs) {
1281        tcg_gen_movi_tl(env_btaken, 1);
1282        tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1283        if (link && !dslot) {
1284            if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1285                t_gen_raise_exception(dc, EXCP_BREAK);
1286            if (dc->imm == 0) {
1287                if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1288                    tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1289                    t_gen_raise_exception(dc, EXCP_HW_EXCP);
1290                    return;
1291                }
1292
1293                t_gen_raise_exception(dc, EXCP_DEBUG);
1294            }
1295        }
1296    } else {
1297        if (dec_alu_op_b_is_small_imm(dc)) {
1298            dc->jmp = JMP_DIRECT;
1299            dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1300        } else {
1301            tcg_gen_movi_tl(env_btaken, 1);
1302            tcg_gen_movi_tl(env_btarget, dc->pc);
1303            tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1304        }
1305    }
1306}
1307
1308static inline void do_rti(DisasContext *dc)
1309{
1310    TCGv t0, t1;
1311    t0 = tcg_temp_new();
1312    t1 = tcg_temp_new();
1313    tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1314    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1315    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1316
1317    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1318    tcg_gen_or_tl(t1, t1, t0);
1319    msr_write(dc, t1);
1320    tcg_temp_free(t1);
1321    tcg_temp_free(t0);
1322    dc->tb_flags &= ~DRTI_FLAG;
1323}
1324
1325static inline void do_rtb(DisasContext *dc)
1326{
1327    TCGv t0, t1;
1328    t0 = tcg_temp_new();
1329    t1 = tcg_temp_new();
1330    tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1331    tcg_gen_shri_tl(t0, t1, 1);
1332    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1333
1334    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1335    tcg_gen_or_tl(t1, t1, t0);
1336    msr_write(dc, t1);
1337    tcg_temp_free(t1);
1338    tcg_temp_free(t0);
1339    dc->tb_flags &= ~DRTB_FLAG;
1340}
1341
1342static inline void do_rte(DisasContext *dc)
1343{
1344    TCGv t0, t1;
1345    t0 = tcg_temp_new();
1346    t1 = tcg_temp_new();
1347
1348    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1349    tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1350    tcg_gen_shri_tl(t0, t1, 1);
1351    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1352
1353    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1354    tcg_gen_or_tl(t1, t1, t0);
1355    msr_write(dc, t1);
1356    tcg_temp_free(t1);
1357    tcg_temp_free(t0);
1358    dc->tb_flags &= ~DRTE_FLAG;
1359}
1360
1361static void dec_rts(DisasContext *dc)
1362{
1363    unsigned int b_bit, i_bit, e_bit;
1364    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1365
1366    i_bit = dc->ir & (1 << 21);
1367    b_bit = dc->ir & (1 << 22);
1368    e_bit = dc->ir & (1 << 23);
1369
1370    dc->delayed_branch = 2;
1371    dc->tb_flags |= D_FLAG;
1372    tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1373                  cpu_env, offsetof(CPUMBState, bimm));
1374
1375    if (i_bit) {
1376        LOG_DIS("rtid ir=%x\n", dc->ir);
1377        if ((dc->tb_flags & MSR_EE_FLAG)
1378             && mem_index == MMU_USER_IDX) {
1379            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1380            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1381        }
1382        dc->tb_flags |= DRTI_FLAG;
1383    } else if (b_bit) {
1384        LOG_DIS("rtbd ir=%x\n", dc->ir);
1385        if ((dc->tb_flags & MSR_EE_FLAG)
1386             && mem_index == MMU_USER_IDX) {
1387            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1388            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1389        }
1390        dc->tb_flags |= DRTB_FLAG;
1391    } else if (e_bit) {
1392        LOG_DIS("rted ir=%x\n", dc->ir);
1393        if ((dc->tb_flags & MSR_EE_FLAG)
1394             && mem_index == MMU_USER_IDX) {
1395            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1396            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397        }
1398        dc->tb_flags |= DRTE_FLAG;
1399    } else
1400        LOG_DIS("rts ir=%x\n", dc->ir);
1401
1402    dc->jmp = JMP_INDIRECT;
1403    tcg_gen_movi_tl(env_btaken, 1);
1404    tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1405}
1406
1407static int dec_check_fpuv2(DisasContext *dc)
1408{
1409    if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1410        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1411        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1412    }
1413    return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1414}
1415
1416static void dec_fpu(DisasContext *dc)
1417{
1418    unsigned int fpu_insn;
1419
1420    if ((dc->tb_flags & MSR_EE_FLAG)
1421          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1422          && (dc->cpu->cfg.use_fpu != 1)) {
1423        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1424        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1425        return;
1426    }
1427
1428    fpu_insn = (dc->ir >> 7) & 7;
1429
1430    switch (fpu_insn) {
1431        case 0:
1432            gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433                            cpu_R[dc->rb]);
1434            break;
1435
1436        case 1:
1437            gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438                             cpu_R[dc->rb]);
1439            break;
1440
1441        case 2:
1442            gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1443                            cpu_R[dc->rb]);
1444            break;
1445
1446        case 3:
1447            gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1448                            cpu_R[dc->rb]);
1449            break;
1450
1451        case 4:
1452            switch ((dc->ir >> 4) & 7) {
1453                case 0:
1454                    gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1455                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1456                    break;
1457                case 1:
1458                    gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1459                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1460                    break;
1461                case 2:
1462                    gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1463                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1464                    break;
1465                case 3:
1466                    gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1467                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1468                    break;
1469                case 4:
1470                    gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1471                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1472                    break;
1473                case 5:
1474                    gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1475                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1476                    break;
1477                case 6:
1478                    gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1479                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1480                    break;
1481                default:
1482                    qemu_log_mask(LOG_UNIMP,
1483                                  "unimplemented fcmp fpu_insn=%x pc=%x"
1484                                  " opc=%x\n",
1485                                  fpu_insn, dc->pc, dc->opcode);
1486                    dc->abort_at_next_insn = 1;
1487                    break;
1488            }
1489            break;
1490
1491        case 5:
1492            if (!dec_check_fpuv2(dc)) {
1493                return;
1494            }
1495            gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1496            break;
1497
1498        case 6:
1499            if (!dec_check_fpuv2(dc)) {
1500                return;
1501            }
1502            gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1503            break;
1504
1505        case 7:
1506            if (!dec_check_fpuv2(dc)) {
1507                return;
1508            }
1509            gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1510            break;
1511
1512        default:
1513            qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1514                          " opc=%x\n",
1515                          fpu_insn, dc->pc, dc->opcode);
1516            dc->abort_at_next_insn = 1;
1517            break;
1518    }
1519}
1520
1521static void dec_null(DisasContext *dc)
1522{
1523    if ((dc->tb_flags & MSR_EE_FLAG)
1524          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1525        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1526        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1527        return;
1528    }
1529    qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1530    dc->abort_at_next_insn = 1;
1531}
1532
1533/* Insns connected to FSL or AXI stream attached devices.  */
1534static void dec_stream(DisasContext *dc)
1535{
1536    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1537    TCGv_i32 t_id, t_ctrl;
1538    int ctrl;
1539
1540    LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1541            dc->type_b ? "" : "d", dc->imm);
1542
1543    if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1544        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1545        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1546        return;
1547    }
1548
1549    t_id = tcg_temp_new();
1550    if (dc->type_b) {
1551        tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1552        ctrl = dc->imm >> 10;
1553    } else {
1554        tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1555        ctrl = dc->imm >> 5;
1556    }
1557
1558    t_ctrl = tcg_const_tl(ctrl);
1559
1560    if (dc->rd == 0) {
1561        gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1562    } else {
1563        gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1564    }
1565    tcg_temp_free(t_id);
1566    tcg_temp_free(t_ctrl);
1567}
1568
1569static struct decoder_info {
1570    struct {
1571        uint32_t bits;
1572        uint32_t mask;
1573    };
1574    void (*dec)(DisasContext *dc);
1575} decinfo[] = {
1576    {DEC_ADD, dec_add},
1577    {DEC_SUB, dec_sub},
1578    {DEC_AND, dec_and},
1579    {DEC_XOR, dec_xor},
1580    {DEC_OR, dec_or},
1581    {DEC_BIT, dec_bit},
1582    {DEC_BARREL, dec_barrel},
1583    {DEC_LD, dec_load},
1584    {DEC_ST, dec_store},
1585    {DEC_IMM, dec_imm},
1586    {DEC_BR, dec_br},
1587    {DEC_BCC, dec_bcc},
1588    {DEC_RTS, dec_rts},
1589    {DEC_FPU, dec_fpu},
1590    {DEC_MUL, dec_mul},
1591    {DEC_DIV, dec_div},
1592    {DEC_MSR, dec_msr},
1593    {DEC_STREAM, dec_stream},
1594    {{0, 0}, dec_null}
1595};
1596
1597static inline void decode(DisasContext *dc, uint32_t ir)
1598{
1599    int i;
1600
1601    dc->ir = ir;
1602    LOG_DIS("%8.8x\t", dc->ir);
1603
1604    if (dc->ir)
1605        dc->nr_nops = 0;
1606    else {
1607        if ((dc->tb_flags & MSR_EE_FLAG)
1608              && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1609              && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1610            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1611            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1612            return;
1613        }
1614
1615        LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1616        dc->nr_nops++;
1617        if (dc->nr_nops > 4) {
1618            cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1619        }
1620    }
1621    /* bit 2 seems to indicate insn type.  */
1622    dc->type_b = ir & (1 << 29);
1623
1624    dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1625    dc->rd = EXTRACT_FIELD(ir, 21, 25);
1626    dc->ra = EXTRACT_FIELD(ir, 16, 20);
1627    dc->rb = EXTRACT_FIELD(ir, 11, 15);
1628    dc->imm = EXTRACT_FIELD(ir, 0, 15);
1629
1630    /* Large switch for all insns.  */
1631    for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1632        if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1633            decinfo[i].dec(dc);
1634            break;
1635        }
1636    }
1637}
1638
1639/* generate intermediate code for basic block 'tb'.  */
1640void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1641{
1642    MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1643    CPUState *cs = CPU(cpu);
1644    uint32_t pc_start;
1645    struct DisasContext ctx;
1646    struct DisasContext *dc = &ctx;
1647    uint32_t next_page_start, org_flags;
1648    target_ulong npc;
1649    int num_insns;
1650    int max_insns;
1651
1652    pc_start = tb->pc;
1653    dc->cpu = cpu;
1654    dc->tb = tb;
1655    org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1656
1657    dc->is_jmp = DISAS_NEXT;
1658    dc->jmp = 0;
1659    dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1660    if (dc->delayed_branch) {
1661        dc->jmp = JMP_INDIRECT;
1662    }
1663    dc->pc = pc_start;
1664    dc->singlestep_enabled = cs->singlestep_enabled;
1665    dc->cpustate_changed = 0;
1666    dc->abort_at_next_insn = 0;
1667    dc->nr_nops = 0;
1668
1669    if (pc_start & 3) {
1670        cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1671    }
1672
1673    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1674#if !SIM_COMPAT
1675        qemu_log("--------------\n");
1676        log_cpu_state(CPU(cpu), 0);
1677#endif
1678    }
1679
1680    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1681    num_insns = 0;
1682    max_insns = tb->cflags & CF_COUNT_MASK;
1683    if (max_insns == 0) {
1684        max_insns = CF_COUNT_MASK;
1685    }
1686    if (max_insns > TCG_MAX_INSNS) {
1687        max_insns = TCG_MAX_INSNS;
1688    }
1689
1690    gen_tb_start(tb);
1691    do
1692    {
1693        tcg_gen_insn_start(dc->pc);
1694        num_insns++;
1695
1696#if SIM_COMPAT
1697        if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1698            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1699            gen_helper_debug();
1700        }
1701#endif
1702
1703        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1704            t_gen_raise_exception(dc, EXCP_DEBUG);
1705            dc->is_jmp = DISAS_UPDATE;
1706            /* The address covered by the breakpoint must be included in
1707               [tb->pc, tb->pc + tb->size) in order to for it to be
1708               properly cleared -- thus we increment the PC here so that
1709               the logic setting tb->size below does the right thing.  */
1710            dc->pc += 4;
1711            break;
1712        }
1713
1714        /* Pretty disas.  */
1715        LOG_DIS("%8.8x:\t", dc->pc);
1716
1717        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1718            gen_io_start();
1719        }
1720
1721        dc->clear_imm = 1;
1722        decode(dc, cpu_ldl_code(env, dc->pc));
1723        if (dc->clear_imm)
1724            dc->tb_flags &= ~IMM_FLAG;
1725        dc->pc += 4;
1726
1727        if (dc->delayed_branch) {
1728            dc->delayed_branch--;
1729            if (!dc->delayed_branch) {
1730                if (dc->tb_flags & DRTI_FLAG)
1731                    do_rti(dc);
1732                 if (dc->tb_flags & DRTB_FLAG)
1733                    do_rtb(dc);
1734                if (dc->tb_flags & DRTE_FLAG)
1735                    do_rte(dc);
1736                /* Clear the delay slot flag.  */
1737                dc->tb_flags &= ~D_FLAG;
1738                /* If it is a direct jump, try direct chaining.  */
1739                if (dc->jmp == JMP_INDIRECT) {
1740                    eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1741                    dc->is_jmp = DISAS_JUMP;
1742                } else if (dc->jmp == JMP_DIRECT) {
1743                    t_sync_flags(dc);
1744                    gen_goto_tb(dc, 0, dc->jmp_pc);
1745                    dc->is_jmp = DISAS_TB_JUMP;
1746                } else if (dc->jmp == JMP_DIRECT_CC) {
1747                    TCGLabel *l1 = gen_new_label();
1748                    t_sync_flags(dc);
1749                    /* Conditional jmp.  */
1750                    tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1751                    gen_goto_tb(dc, 1, dc->pc);
1752                    gen_set_label(l1);
1753                    gen_goto_tb(dc, 0, dc->jmp_pc);
1754
1755                    dc->is_jmp = DISAS_TB_JUMP;
1756                }
1757                break;
1758            }
1759        }
1760        if (cs->singlestep_enabled) {
1761            break;
1762        }
1763    } while (!dc->is_jmp && !dc->cpustate_changed
1764             && !tcg_op_buf_full()
1765             && !singlestep
1766             && (dc->pc < next_page_start)
1767             && num_insns < max_insns);
1768
1769    npc = dc->pc;
1770    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1771        if (dc->tb_flags & D_FLAG) {
1772            dc->is_jmp = DISAS_UPDATE;
1773            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1774            sync_jmpstate(dc);
1775        } else
1776            npc = dc->jmp_pc;
1777    }
1778
1779    if (tb->cflags & CF_LAST_IO)
1780        gen_io_end();
1781    /* Force an update if the per-tb cpu state has changed.  */
1782    if (dc->is_jmp == DISAS_NEXT
1783        && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1784        dc->is_jmp = DISAS_UPDATE;
1785        tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1786    }
1787    t_sync_flags(dc);
1788
1789    if (unlikely(cs->singlestep_enabled)) {
1790        TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1791
1792        if (dc->is_jmp != DISAS_JUMP) {
1793            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1794        }
1795        gen_helper_raise_exception(cpu_env, tmp);
1796        tcg_temp_free_i32(tmp);
1797    } else {
1798        switch(dc->is_jmp) {
1799            case DISAS_NEXT:
1800                gen_goto_tb(dc, 1, npc);
1801                break;
1802            default:
1803            case DISAS_JUMP:
1804            case DISAS_UPDATE:
1805                /* indicate that the hash table must be used
1806                   to find the next TB */
1807                tcg_gen_exit_tb(0);
1808                break;
1809            case DISAS_TB_JUMP:
1810                /* nothing more to generate */
1811                break;
1812        }
1813    }
1814    gen_tb_end(tb, num_insns);
1815
1816    tb->size = dc->pc - pc_start;
1817    tb->icount = num_insns;
1818
1819#ifdef DEBUG_DISAS
1820#if !SIM_COMPAT
1821    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1822        && qemu_log_in_addr_range(pc_start)) {
1823        qemu_log("\n");
1824#if DISAS_GNU
1825        log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1826#endif
1827        qemu_log("\nisize=%d osize=%d\n",
1828                 dc->pc - pc_start, tcg_op_buf_count());
1829    }
1830#endif
1831#endif
1832    assert(!dc->abort_at_next_insn);
1833}
1834
1835void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1836                       int flags)
1837{
1838    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1839    CPUMBState *env = &cpu->env;
1840    int i;
1841
1842    if (!env || !f)
1843        return;
1844
1845    cpu_fprintf(f, "IN: PC=%x %s\n",
1846                env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1847    cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1848             env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1849             env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1850    cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1851             env->btaken, env->btarget,
1852             (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1853             (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1854             (env->sregs[SR_MSR] & MSR_EIP),
1855             (env->sregs[SR_MSR] & MSR_IE));
1856
1857    for (i = 0; i < 32; i++) {
1858        cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1859        if ((i + 1) % 4 == 0)
1860            cpu_fprintf(f, "\n");
1861        }
1862    cpu_fprintf(f, "\n\n");
1863}
1864
1865MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1866{
1867    MicroBlazeCPU *cpu;
1868
1869    cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1870
1871    object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1872
1873    return cpu;
1874}
1875
1876void mb_tcg_init(void)
1877{
1878    int i;
1879
1880    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1881    tcg_ctx.tcg_env = cpu_env;
1882
1883    env_debug = tcg_global_mem_new(cpu_env,
1884                    offsetof(CPUMBState, debug),
1885                    "debug0");
1886    env_iflags = tcg_global_mem_new(cpu_env,
1887                    offsetof(CPUMBState, iflags),
1888                    "iflags");
1889    env_imm = tcg_global_mem_new(cpu_env,
1890                    offsetof(CPUMBState, imm),
1891                    "imm");
1892    env_btarget = tcg_global_mem_new(cpu_env,
1893                     offsetof(CPUMBState, btarget),
1894                     "btarget");
1895    env_btaken = tcg_global_mem_new(cpu_env,
1896                     offsetof(CPUMBState, btaken),
1897                     "btaken");
1898    env_res_addr = tcg_global_mem_new(cpu_env,
1899                     offsetof(CPUMBState, res_addr),
1900                     "res_addr");
1901    env_res_val = tcg_global_mem_new(cpu_env,
1902                     offsetof(CPUMBState, res_val),
1903                     "res_val");
1904    for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1905        cpu_R[i] = tcg_global_mem_new(cpu_env,
1906                          offsetof(CPUMBState, regs[i]),
1907                          regnames[i]);
1908    }
1909    for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1910        cpu_SR[i] = tcg_global_mem_new(cpu_env,
1911                          offsetof(CPUMBState, sregs[i]),
1912                          special_regnames[i]);
1913    }
1914}
1915
1916void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1917                          target_ulong *data)
1918{
1919    env->sregs[SR_PC] = data[0];
1920}
1921