qemu/target-microblaze/translate.c
<<
>>
Prefs
   1/*
   2 *  Xilinx MicroBlaze emulation for qemu: main translation routines.
   3 *
   4 *  Copyright (c) 2009 Edgar E. Iglesias.
   5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "disas/disas.h"
  24#include "tcg-op.h"
  25#include "exec/helper-proto.h"
  26#include "microblaze-decode.h"
  27#include "exec/cpu_ldst.h"
  28#include "exec/helper-gen.h"
  29
  30#include "trace-tcg.h"
  31#include "exec/log.h"
  32
  33
  34#define SIM_COMPAT 0
  35#define DISAS_GNU 1
  36#define DISAS_MB 1
  37#if DISAS_MB && !SIM_COMPAT
  38#  define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  39#else
  40#  define LOG_DIS(...) do { } while (0)
  41#endif
  42
  43#define D(x)
  44
  45#define EXTRACT_FIELD(src, start, end) \
  46            (((src) >> start) & ((1 << (end - start + 1)) - 1))
  47
  48static TCGv env_debug;
  49static TCGv_env cpu_env;
  50static TCGv cpu_R[32];
  51static TCGv cpu_SR[18];
  52static TCGv env_imm;
  53static TCGv env_btaken;
  54static TCGv env_btarget;
  55static TCGv env_iflags;
  56static TCGv env_res_addr;
  57static TCGv env_res_val;
  58
  59#include "exec/gen-icount.h"
  60
  61/* This is the state at translation time.  */
  62typedef struct DisasContext {
  63    MicroBlazeCPU *cpu;
  64    target_ulong pc;
  65
  66    /* Decoder.  */
  67    int type_b;
  68    uint32_t ir;
  69    uint8_t opcode;
  70    uint8_t rd, ra, rb;
  71    uint16_t imm;
  72
  73    unsigned int cpustate_changed;
  74    unsigned int delayed_branch;
  75    unsigned int tb_flags, synced_flags; /* tb dependent flags.  */
  76    unsigned int clear_imm;
  77    int is_jmp;
  78
  79#define JMP_NOJMP     0
  80#define JMP_DIRECT    1
  81#define JMP_DIRECT_CC 2
  82#define JMP_INDIRECT  3
  83    unsigned int jmp;
  84    uint32_t jmp_pc;
  85
  86    int abort_at_next_insn;
  87    int nr_nops;
  88    struct TranslationBlock *tb;
  89    int singlestep_enabled;
  90} DisasContext;
  91
  92static const char *regnames[] =
  93{
  94    "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  95    "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  96    "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
  97    "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
  98};
  99
 100static const char *special_regnames[] =
 101{
 102    "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
 103    "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
 104    "sr16", "sr17", "sr18"
 105};
 106
 107static inline void t_sync_flags(DisasContext *dc)
 108{
 109    /* Synch the tb dependent flags between translator and runtime.  */
 110    if (dc->tb_flags != dc->synced_flags) {
 111        tcg_gen_movi_tl(env_iflags, dc->tb_flags);
 112        dc->synced_flags = dc->tb_flags;
 113    }
 114}
 115
 116static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
 117{
 118    TCGv_i32 tmp = tcg_const_i32(index);
 119
 120    t_sync_flags(dc);
 121    tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
 122    gen_helper_raise_exception(cpu_env, tmp);
 123    tcg_temp_free_i32(tmp);
 124    dc->is_jmp = DISAS_UPDATE;
 125}
 126
 127static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
 128{
 129    TranslationBlock *tb;
 130    tb = dc->tb;
 131    if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
 132        tcg_gen_goto_tb(n);
 133        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
 134        tcg_gen_exit_tb((uintptr_t)tb + n);
 135    } else {
 136        tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
 137        tcg_gen_exit_tb(0);
 138    }
 139}
 140
 141static void read_carry(DisasContext *dc, TCGv d)
 142{
 143    tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
 144}
 145
 146/*
 147 * write_carry sets the carry bits in MSR based on bit 0 of v.
 148 * v[31:1] are ignored.
 149 */
 150static void write_carry(DisasContext *dc, TCGv v)
 151{
 152    TCGv t0 = tcg_temp_new();
 153    tcg_gen_shli_tl(t0, v, 31);
 154    tcg_gen_sari_tl(t0, t0, 31);
 155    tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
 156    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
 157                    ~(MSR_C | MSR_CC));
 158    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
 159    tcg_temp_free(t0);
 160}
 161
 162static void write_carryi(DisasContext *dc, bool carry)
 163{
 164    TCGv t0 = tcg_temp_new();
 165    tcg_gen_movi_tl(t0, carry);
 166    write_carry(dc, t0);
 167    tcg_temp_free(t0);
 168}
 169
 170/* True if ALU operand b is a small immediate that may deserve
 171   faster treatment.  */
 172static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
 173{
 174    /* Immediate insn without the imm prefix ?  */
 175    return dc->type_b && !(dc->tb_flags & IMM_FLAG);
 176}
 177
 178static inline TCGv *dec_alu_op_b(DisasContext *dc)
 179{
 180    if (dc->type_b) {
 181        if (dc->tb_flags & IMM_FLAG)
 182            tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
 183        else
 184            tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
 185        return &env_imm;
 186    } else
 187        return &cpu_R[dc->rb];
 188}
 189
 190static void dec_add(DisasContext *dc)
 191{
 192    unsigned int k, c;
 193    TCGv cf;
 194
 195    k = dc->opcode & 4;
 196    c = dc->opcode & 2;
 197
 198    LOG_DIS("add%s%s%s r%d r%d r%d\n",
 199            dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
 200            dc->rd, dc->ra, dc->rb);
 201
 202    /* Take care of the easy cases first.  */
 203    if (k) {
 204        /* k - keep carry, no need to update MSR.  */
 205        /* If rd == r0, it's a nop.  */
 206        if (dc->rd) {
 207            tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 208
 209            if (c) {
 210                /* c - Add carry into the result.  */
 211                cf = tcg_temp_new();
 212
 213                read_carry(dc, cf);
 214                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 215                tcg_temp_free(cf);
 216            }
 217        }
 218        return;
 219    }
 220
 221    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 222    /* Extract carry.  */
 223    cf = tcg_temp_new();
 224    if (c) {
 225        read_carry(dc, cf);
 226    } else {
 227        tcg_gen_movi_tl(cf, 0);
 228    }
 229
 230    if (dc->rd) {
 231        TCGv ncf = tcg_temp_new();
 232        gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 233        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 234        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 235        write_carry(dc, ncf);
 236        tcg_temp_free(ncf);
 237    } else {
 238        gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
 239        write_carry(dc, cf);
 240    }
 241    tcg_temp_free(cf);
 242}
 243
 244static void dec_sub(DisasContext *dc)
 245{
 246    unsigned int u, cmp, k, c;
 247    TCGv cf, na;
 248
 249    u = dc->imm & 2;
 250    k = dc->opcode & 4;
 251    c = dc->opcode & 2;
 252    cmp = (dc->imm & 1) && (!dc->type_b) && k;
 253
 254    if (cmp) {
 255        LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
 256        if (dc->rd) {
 257            if (u)
 258                gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 259            else
 260                gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 261        }
 262        return;
 263    }
 264
 265    LOG_DIS("sub%s%s r%d, r%d r%d\n",
 266             k ? "k" : "",  c ? "c" : "", dc->rd, dc->ra, dc->rb);
 267
 268    /* Take care of the easy cases first.  */
 269    if (k) {
 270        /* k - keep carry, no need to update MSR.  */
 271        /* If rd == r0, it's a nop.  */
 272        if (dc->rd) {
 273            tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
 274
 275            if (c) {
 276                /* c - Add carry into the result.  */
 277                cf = tcg_temp_new();
 278
 279                read_carry(dc, cf);
 280                tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 281                tcg_temp_free(cf);
 282            }
 283        }
 284        return;
 285    }
 286
 287    /* From now on, we can assume k is zero.  So we need to update MSR.  */
 288    /* Extract carry. And complement a into na.  */
 289    cf = tcg_temp_new();
 290    na = tcg_temp_new();
 291    if (c) {
 292        read_carry(dc, cf);
 293    } else {
 294        tcg_gen_movi_tl(cf, 1);
 295    }
 296
 297    /* d = b + ~a + c. carry defaults to 1.  */
 298    tcg_gen_not_tl(na, cpu_R[dc->ra]);
 299
 300    if (dc->rd) {
 301        TCGv ncf = tcg_temp_new();
 302        gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
 303        tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
 304        tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
 305        write_carry(dc, ncf);
 306        tcg_temp_free(ncf);
 307    } else {
 308        gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
 309        write_carry(dc, cf);
 310    }
 311    tcg_temp_free(cf);
 312    tcg_temp_free(na);
 313}
 314
 315static void dec_pattern(DisasContext *dc)
 316{
 317    unsigned int mode;
 318
 319    if ((dc->tb_flags & MSR_EE_FLAG)
 320          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 321          && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
 322        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 323        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 324    }
 325
 326    mode = dc->opcode & 3;
 327    switch (mode) {
 328        case 0:
 329            /* pcmpbf.  */
 330            LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 331            if (dc->rd)
 332                gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 333            break;
 334        case 2:
 335            LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 336            if (dc->rd) {
 337                tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
 338                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 339            }
 340            break;
 341        case 3:
 342            LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 343            if (dc->rd) {
 344                tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
 345                                   cpu_R[dc->ra], cpu_R[dc->rb]);
 346            }
 347            break;
 348        default:
 349            cpu_abort(CPU(dc->cpu),
 350                      "unsupported pattern insn opcode=%x\n", dc->opcode);
 351            break;
 352    }
 353}
 354
 355static void dec_and(DisasContext *dc)
 356{
 357    unsigned int not;
 358
 359    if (!dc->type_b && (dc->imm & (1 << 10))) {
 360        dec_pattern(dc);
 361        return;
 362    }
 363
 364    not = dc->opcode & (1 << 1);
 365    LOG_DIS("and%s\n", not ? "n" : "");
 366
 367    if (!dc->rd)
 368        return;
 369
 370    if (not) {
 371        tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 372    } else
 373        tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 374}
 375
 376static void dec_or(DisasContext *dc)
 377{
 378    if (!dc->type_b && (dc->imm & (1 << 10))) {
 379        dec_pattern(dc);
 380        return;
 381    }
 382
 383    LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
 384    if (dc->rd)
 385        tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 386}
 387
 388static void dec_xor(DisasContext *dc)
 389{
 390    if (!dc->type_b && (dc->imm & (1 << 10))) {
 391        dec_pattern(dc);
 392        return;
 393    }
 394
 395    LOG_DIS("xor r%d\n", dc->rd);
 396    if (dc->rd)
 397        tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 398}
 399
 400static inline void msr_read(DisasContext *dc, TCGv d)
 401{
 402    tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
 403}
 404
 405static inline void msr_write(DisasContext *dc, TCGv v)
 406{
 407    TCGv t;
 408
 409    t = tcg_temp_new();
 410    dc->cpustate_changed = 1;
 411    /* PVR bit is not writable.  */
 412    tcg_gen_andi_tl(t, v, ~MSR_PVR);
 413    tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
 414    tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
 415    tcg_temp_free(t);
 416}
 417
 418static void dec_msr(DisasContext *dc)
 419{
 420    CPUState *cs = CPU(dc->cpu);
 421    TCGv t0, t1;
 422    unsigned int sr, to, rn;
 423    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 424
 425    sr = dc->imm & ((1 << 14) - 1);
 426    to = dc->imm & (1 << 14);
 427    dc->type_b = 1;
 428    if (to)
 429        dc->cpustate_changed = 1;
 430
 431    /* msrclr and msrset.  */
 432    if (!(dc->imm & (1 << 15))) {
 433        unsigned int clr = dc->ir & (1 << 16);
 434
 435        LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
 436                dc->rd, dc->imm);
 437
 438        if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
 439            /* nop??? */
 440            return;
 441        }
 442
 443        if ((dc->tb_flags & MSR_EE_FLAG)
 444            && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
 445            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 446            t_gen_raise_exception(dc, EXCP_HW_EXCP);
 447            return;
 448        }
 449
 450        if (dc->rd)
 451            msr_read(dc, cpu_R[dc->rd]);
 452
 453        t0 = tcg_temp_new();
 454        t1 = tcg_temp_new();
 455        msr_read(dc, t0);
 456        tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
 457
 458        if (clr) {
 459            tcg_gen_not_tl(t1, t1);
 460            tcg_gen_and_tl(t0, t0, t1);
 461        } else
 462            tcg_gen_or_tl(t0, t0, t1);
 463        msr_write(dc, t0);
 464        tcg_temp_free(t0);
 465        tcg_temp_free(t1);
 466        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
 467        dc->is_jmp = DISAS_UPDATE;
 468        return;
 469    }
 470
 471    if (to) {
 472        if ((dc->tb_flags & MSR_EE_FLAG)
 473             && mem_index == MMU_USER_IDX) {
 474            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 475            t_gen_raise_exception(dc, EXCP_HW_EXCP);
 476            return;
 477        }
 478    }
 479
 480#if !defined(CONFIG_USER_ONLY)
 481    /* Catch read/writes to the mmu block.  */
 482    if ((sr & ~0xff) == 0x1000) {
 483        sr &= 7;
 484        LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 485        if (to)
 486            gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
 487        else
 488            gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
 489        return;
 490    }
 491#endif
 492
 493    if (to) {
 494        LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
 495        switch (sr) {
 496            case 0:
 497                break;
 498            case 1:
 499                msr_write(dc, cpu_R[dc->ra]);
 500                break;
 501            case 0x3:
 502                tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
 503                break;
 504            case 0x5:
 505                tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
 506                break;
 507            case 0x7:
 508                tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
 509                break;
 510            case 0x800:
 511                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
 512                break;
 513            case 0x802:
 514                tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
 515                break;
 516            default:
 517                cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
 518                break;
 519        }
 520    } else {
 521        LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
 522
 523        switch (sr) {
 524            case 0:
 525                tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
 526                break;
 527            case 1:
 528                msr_read(dc, cpu_R[dc->rd]);
 529                break;
 530            case 0x3:
 531                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
 532                break;
 533            case 0x5:
 534                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
 535                break;
 536             case 0x7:
 537                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
 538                break;
 539            case 0xb:
 540                tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
 541                break;
 542            case 0x800:
 543                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
 544                break;
 545            case 0x802:
 546                tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
 547                break;
 548            case 0x2000:
 549            case 0x2001:
 550            case 0x2002:
 551            case 0x2003:
 552            case 0x2004:
 553            case 0x2005:
 554            case 0x2006:
 555            case 0x2007:
 556            case 0x2008:
 557            case 0x2009:
 558            case 0x200a:
 559            case 0x200b:
 560            case 0x200c:
 561                rn = sr & 0xf;
 562                tcg_gen_ld_tl(cpu_R[dc->rd],
 563                              cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
 564                break;
 565            default:
 566                cpu_abort(cs, "unknown mfs reg %x\n", sr);
 567                break;
 568        }
 569    }
 570
 571    if (dc->rd == 0) {
 572        tcg_gen_movi_tl(cpu_R[0], 0);
 573    }
 574}
 575
 576/* 64-bit signed mul, lower result in d and upper in d2.  */
 577static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
 578{
 579    TCGv_i64 t0, t1;
 580
 581    t0 = tcg_temp_new_i64();
 582    t1 = tcg_temp_new_i64();
 583
 584    tcg_gen_ext_i32_i64(t0, a);
 585    tcg_gen_ext_i32_i64(t1, b);
 586    tcg_gen_mul_i64(t0, t0, t1);
 587
 588    tcg_gen_extrl_i64_i32(d, t0);
 589    tcg_gen_shri_i64(t0, t0, 32);
 590    tcg_gen_extrl_i64_i32(d2, t0);
 591
 592    tcg_temp_free_i64(t0);
 593    tcg_temp_free_i64(t1);
 594}
 595
 596/* 64-bit unsigned muls, lower result in d and upper in d2.  */
 597static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
 598{
 599    TCGv_i64 t0, t1;
 600
 601    t0 = tcg_temp_new_i64();
 602    t1 = tcg_temp_new_i64();
 603
 604    tcg_gen_extu_i32_i64(t0, a);
 605    tcg_gen_extu_i32_i64(t1, b);
 606    tcg_gen_mul_i64(t0, t0, t1);
 607
 608    tcg_gen_extrl_i64_i32(d, t0);
 609    tcg_gen_shri_i64(t0, t0, 32);
 610    tcg_gen_extrl_i64_i32(d2, t0);
 611
 612    tcg_temp_free_i64(t0);
 613    tcg_temp_free_i64(t1);
 614}
 615
 616/* Multiplier unit.  */
 617static void dec_mul(DisasContext *dc)
 618{
 619    TCGv d[2];
 620    unsigned int subcode;
 621
 622    if ((dc->tb_flags & MSR_EE_FLAG)
 623         && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 624         && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
 625        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 626        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 627        return;
 628    }
 629
 630    subcode = dc->imm & 3;
 631    d[0] = tcg_temp_new();
 632    d[1] = tcg_temp_new();
 633
 634    if (dc->type_b) {
 635        LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
 636        t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 637        goto done;
 638    }
 639
 640    /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2.  */
 641    if (subcode >= 1 && subcode <= 3
 642        && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
 643        /* nop??? */
 644    }
 645
 646    switch (subcode) {
 647        case 0:
 648            LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 649            t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
 650            break;
 651        case 1:
 652            LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 653            t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 654            break;
 655        case 2:
 656            LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 657            t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 658            break;
 659        case 3:
 660            LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
 661            t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
 662            break;
 663        default:
 664            cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
 665            break;
 666    }
 667done:
 668    tcg_temp_free(d[0]);
 669    tcg_temp_free(d[1]);
 670}
 671
 672/* Div unit.  */
 673static void dec_div(DisasContext *dc)
 674{
 675    unsigned int u;
 676
 677    u = dc->imm & 2; 
 678    LOG_DIS("div\n");
 679
 680    if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 681          && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
 682        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 683        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 684    }
 685
 686    if (u)
 687        gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 688                        cpu_R[dc->ra]);
 689    else
 690        gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
 691                        cpu_R[dc->ra]);
 692    if (!dc->rd)
 693        tcg_gen_movi_tl(cpu_R[dc->rd], 0);
 694}
 695
 696static void dec_barrel(DisasContext *dc)
 697{
 698    TCGv t0;
 699    unsigned int s, t;
 700
 701    if ((dc->tb_flags & MSR_EE_FLAG)
 702          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 703          && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
 704        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 705        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 706        return;
 707    }
 708
 709    s = dc->imm & (1 << 10);
 710    t = dc->imm & (1 << 9);
 711
 712    LOG_DIS("bs%s%s r%d r%d r%d\n",
 713            s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
 714
 715    t0 = tcg_temp_new();
 716
 717    tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
 718    tcg_gen_andi_tl(t0, t0, 31);
 719
 720    if (s)
 721        tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 722    else {
 723        if (t)
 724            tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 725        else
 726            tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
 727    }
 728}
 729
 730static void dec_bit(DisasContext *dc)
 731{
 732    CPUState *cs = CPU(dc->cpu);
 733    TCGv t0;
 734    unsigned int op;
 735    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
 736
 737    op = dc->ir & ((1 << 9) - 1);
 738    switch (op) {
 739        case 0x21:
 740            /* src.  */
 741            t0 = tcg_temp_new();
 742
 743            LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
 744            tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
 745            write_carry(dc, cpu_R[dc->ra]);
 746            if (dc->rd) {
 747                tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 748                tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
 749            }
 750            tcg_temp_free(t0);
 751            break;
 752
 753        case 0x1:
 754        case 0x41:
 755            /* srl.  */
 756            LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
 757
 758            /* Update carry. Note that write carry only looks at the LSB.  */
 759            write_carry(dc, cpu_R[dc->ra]);
 760            if (dc->rd) {
 761                if (op == 0x41)
 762                    tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 763                else
 764                    tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
 765            }
 766            break;
 767        case 0x60:
 768            LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
 769            tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 770            break;
 771        case 0x61:
 772            LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
 773            tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 774            break;
 775        case 0x64:
 776        case 0x66:
 777        case 0x74:
 778        case 0x76:
 779            /* wdc.  */
 780            LOG_DIS("wdc r%d\n", dc->ra);
 781            if ((dc->tb_flags & MSR_EE_FLAG)
 782                 && mem_index == MMU_USER_IDX) {
 783                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 784                t_gen_raise_exception(dc, EXCP_HW_EXCP);
 785                return;
 786            }
 787            break;
 788        case 0x68:
 789            /* wic.  */
 790            LOG_DIS("wic r%d\n", dc->ra);
 791            if ((dc->tb_flags & MSR_EE_FLAG)
 792                 && mem_index == MMU_USER_IDX) {
 793                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
 794                t_gen_raise_exception(dc, EXCP_HW_EXCP);
 795                return;
 796            }
 797            break;
 798        case 0xe0:
 799            if ((dc->tb_flags & MSR_EE_FLAG)
 800                && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
 801                && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
 802                tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 803                t_gen_raise_exception(dc, EXCP_HW_EXCP);
 804            }
 805            if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
 806                gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
 807            }
 808            break;
 809        case 0x1e0:
 810            /* swapb */
 811            LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
 812            tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
 813            break;
 814        case 0x1e2:
 815            /*swaph */
 816            LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
 817            tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
 818            break;
 819        default:
 820            cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
 821                      dc->pc, op, dc->rd, dc->ra, dc->rb);
 822            break;
 823    }
 824}
 825
 826static inline void sync_jmpstate(DisasContext *dc)
 827{
 828    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
 829        if (dc->jmp == JMP_DIRECT) {
 830            tcg_gen_movi_tl(env_btaken, 1);
 831        }
 832        dc->jmp = JMP_INDIRECT;
 833        tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
 834    }
 835}
 836
 837static void dec_imm(DisasContext *dc)
 838{
 839    LOG_DIS("imm %x\n", dc->imm << 16);
 840    tcg_gen_movi_tl(env_imm, (dc->imm << 16));
 841    dc->tb_flags |= IMM_FLAG;
 842    dc->clear_imm = 0;
 843}
 844
 845static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
 846{
 847    unsigned int extimm = dc->tb_flags & IMM_FLAG;
 848    /* Should be set to one if r1 is used by loadstores.  */
 849    int stackprot = 0;
 850
 851    /* All load/stores use ra.  */
 852    if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
 853        stackprot = 1;
 854    }
 855
 856    /* Treat the common cases first.  */
 857    if (!dc->type_b) {
 858        /* If any of the regs is r0, return a ptr to the other.  */
 859        if (dc->ra == 0) {
 860            return &cpu_R[dc->rb];
 861        } else if (dc->rb == 0) {
 862            return &cpu_R[dc->ra];
 863        }
 864
 865        if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
 866            stackprot = 1;
 867        }
 868
 869        *t = tcg_temp_new();
 870        tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
 871
 872        if (stackprot) {
 873            gen_helper_stackprot(cpu_env, *t);
 874        }
 875        return t;
 876    }
 877    /* Immediate.  */
 878    if (!extimm) {
 879        if (dc->imm == 0) {
 880            return &cpu_R[dc->ra];
 881        }
 882        *t = tcg_temp_new();
 883        tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
 884        tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
 885    } else {
 886        *t = tcg_temp_new();
 887        tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
 888    }
 889
 890    if (stackprot) {
 891        gen_helper_stackprot(cpu_env, *t);
 892    }
 893    return t;
 894}
 895
 896static void dec_load(DisasContext *dc)
 897{
 898    TCGv t, v, *addr;
 899    unsigned int size, rev = 0, ex = 0;
 900    TCGMemOp mop;
 901
 902    mop = dc->opcode & 3;
 903    size = 1 << mop;
 904    if (!dc->type_b) {
 905        rev = (dc->ir >> 9) & 1;
 906        ex = (dc->ir >> 10) & 1;
 907    }
 908    mop |= MO_TE;
 909    if (rev) {
 910        mop ^= MO_BSWAP;
 911    }
 912
 913    if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
 914          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
 915        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
 916        t_gen_raise_exception(dc, EXCP_HW_EXCP);
 917        return;
 918    }
 919
 920    LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
 921                                                        ex ? "x" : "");
 922
 923    t_sync_flags(dc);
 924    addr = compute_ldst_addr(dc, &t);
 925
 926    /*
 927     * When doing reverse accesses we need to do two things.
 928     *
 929     * 1. Reverse the address wrt endianness.
 930     * 2. Byteswap the data lanes on the way back into the CPU core.
 931     */
 932    if (rev && size != 4) {
 933        /* Endian reverse the address. t is addr.  */
 934        switch (size) {
 935            case 1:
 936            {
 937                /* 00 -> 11
 938                   01 -> 10
 939                   10 -> 10
 940                   11 -> 00 */
 941                TCGv low = tcg_temp_new();
 942
 943                /* Force addr into the temp.  */
 944                if (addr != &t) {
 945                    t = tcg_temp_new();
 946                    tcg_gen_mov_tl(t, *addr);
 947                    addr = &t;
 948                }
 949
 950                tcg_gen_andi_tl(low, t, 3);
 951                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
 952                tcg_gen_andi_tl(t, t, ~3);
 953                tcg_gen_or_tl(t, t, low);
 954                tcg_gen_mov_tl(env_imm, t);
 955                tcg_temp_free(low);
 956                break;
 957            }
 958
 959            case 2:
 960                /* 00 -> 10
 961                   10 -> 00.  */
 962                /* Force addr into the temp.  */
 963                if (addr != &t) {
 964                    t = tcg_temp_new();
 965                    tcg_gen_xori_tl(t, *addr, 2);
 966                    addr = &t;
 967                } else {
 968                    tcg_gen_xori_tl(t, t, 2);
 969                }
 970                break;
 971            default:
 972                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
 973                break;
 974        }
 975    }
 976
 977    /* lwx does not throw unaligned access errors, so force alignment */
 978    if (ex) {
 979        /* Force addr into the temp.  */
 980        if (addr != &t) {
 981            t = tcg_temp_new();
 982            tcg_gen_mov_tl(t, *addr);
 983            addr = &t;
 984        }
 985        tcg_gen_andi_tl(t, t, ~3);
 986    }
 987
 988    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
 989    sync_jmpstate(dc);
 990
 991    /* Verify alignment if needed.  */
 992    /*
 993     * Microblaze gives MMU faults priority over faults due to
 994     * unaligned addresses. That's why we speculatively do the load
 995     * into v. If the load succeeds, we verify alignment of the
 996     * address and if that succeeds we write into the destination reg.
 997     */
 998    v = tcg_temp_new();
 999    tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1000
1001    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1002        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1003        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1004                            tcg_const_tl(0), tcg_const_tl(size - 1));
1005    }
1006
1007    if (ex) {
1008        tcg_gen_mov_tl(env_res_addr, *addr);
1009        tcg_gen_mov_tl(env_res_val, v);
1010    }
1011    if (dc->rd) {
1012        tcg_gen_mov_tl(cpu_R[dc->rd], v);
1013    }
1014    tcg_temp_free(v);
1015
1016    if (ex) { /* lwx */
1017        /* no support for AXI exclusive so always clear C */
1018        write_carryi(dc, 0);
1019    }
1020
1021    if (addr == &t)
1022        tcg_temp_free(t);
1023}
1024
1025static void dec_store(DisasContext *dc)
1026{
1027    TCGv t, *addr, swx_addr;
1028    TCGLabel *swx_skip = NULL;
1029    unsigned int size, rev = 0, ex = 0;
1030    TCGMemOp mop;
1031
1032    mop = dc->opcode & 3;
1033    size = 1 << mop;
1034    if (!dc->type_b) {
1035        rev = (dc->ir >> 9) & 1;
1036        ex = (dc->ir >> 10) & 1;
1037    }
1038    mop |= MO_TE;
1039    if (rev) {
1040        mop ^= MO_BSWAP;
1041    }
1042
1043    if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1044          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1045        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1046        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1047        return;
1048    }
1049
1050    LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1051                                                        ex ? "x" : "");
1052    t_sync_flags(dc);
1053    /* If we get a fault on a dslot, the jmpstate better be in sync.  */
1054    sync_jmpstate(dc);
1055    addr = compute_ldst_addr(dc, &t);
1056
1057    swx_addr = tcg_temp_local_new();
1058    if (ex) { /* swx */
1059        TCGv tval;
1060
1061        /* Force addr into the swx_addr. */
1062        tcg_gen_mov_tl(swx_addr, *addr);
1063        addr = &swx_addr;
1064        /* swx does not throw unaligned access errors, so force alignment */
1065        tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1066
1067        write_carryi(dc, 1);
1068        swx_skip = gen_new_label();
1069        tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1070
1071        /* Compare the value loaded at lwx with current contents of
1072           the reserved location.
1073           FIXME: This only works for system emulation where we can expect
1074           this compare and the following write to be atomic. For user
1075           emulation we need to add atomicity between threads.  */
1076        tval = tcg_temp_new();
1077        tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1078                           MO_TEUL);
1079        tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1080        write_carryi(dc, 0);
1081        tcg_temp_free(tval);
1082    }
1083
1084    if (rev && size != 4) {
1085        /* Endian reverse the address. t is addr.  */
1086        switch (size) {
1087            case 1:
1088            {
1089                /* 00 -> 11
1090                   01 -> 10
1091                   10 -> 10
1092                   11 -> 00 */
1093                TCGv low = tcg_temp_new();
1094
1095                /* Force addr into the temp.  */
1096                if (addr != &t) {
1097                    t = tcg_temp_new();
1098                    tcg_gen_mov_tl(t, *addr);
1099                    addr = &t;
1100                }
1101
1102                tcg_gen_andi_tl(low, t, 3);
1103                tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1104                tcg_gen_andi_tl(t, t, ~3);
1105                tcg_gen_or_tl(t, t, low);
1106                tcg_gen_mov_tl(env_imm, t);
1107                tcg_temp_free(low);
1108                break;
1109            }
1110
1111            case 2:
1112                /* 00 -> 10
1113                   10 -> 00.  */
1114                /* Force addr into the temp.  */
1115                if (addr != &t) {
1116                    t = tcg_temp_new();
1117                    tcg_gen_xori_tl(t, *addr, 2);
1118                    addr = &t;
1119                } else {
1120                    tcg_gen_xori_tl(t, t, 2);
1121                }
1122                break;
1123            default:
1124                cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1125                break;
1126        }
1127    }
1128    tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1129
1130    /* Verify alignment if needed.  */
1131    if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1132        tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1133        /* FIXME: if the alignment is wrong, we should restore the value
1134         *        in memory. One possible way to achieve this is to probe
1135         *        the MMU prior to the memaccess, thay way we could put
1136         *        the alignment checks in between the probe and the mem
1137         *        access.
1138         */
1139        gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1140                            tcg_const_tl(1), tcg_const_tl(size - 1));
1141    }
1142
1143    if (ex) {
1144        gen_set_label(swx_skip);
1145    }
1146    tcg_temp_free(swx_addr);
1147
1148    if (addr == &t)
1149        tcg_temp_free(t);
1150}
1151
1152static inline void eval_cc(DisasContext *dc, unsigned int cc,
1153                           TCGv d, TCGv a, TCGv b)
1154{
1155    switch (cc) {
1156        case CC_EQ:
1157            tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1158            break;
1159        case CC_NE:
1160            tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1161            break;
1162        case CC_LT:
1163            tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1164            break;
1165        case CC_LE:
1166            tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1167            break;
1168        case CC_GE:
1169            tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1170            break;
1171        case CC_GT:
1172            tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1173            break;
1174        default:
1175            cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1176            break;
1177    }
1178}
1179
1180static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1181{
1182    TCGLabel *l1 = gen_new_label();
1183    /* Conditional jmp.  */
1184    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1185    tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1186    tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1187    gen_set_label(l1);
1188}
1189
1190static void dec_bcc(DisasContext *dc)
1191{
1192    unsigned int cc;
1193    unsigned int dslot;
1194
1195    cc = EXTRACT_FIELD(dc->ir, 21, 23);
1196    dslot = dc->ir & (1 << 25);
1197    LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1198
1199    dc->delayed_branch = 1;
1200    if (dslot) {
1201        dc->delayed_branch = 2;
1202        dc->tb_flags |= D_FLAG;
1203        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1204                      cpu_env, offsetof(CPUMBState, bimm));
1205    }
1206
1207    if (dec_alu_op_b_is_small_imm(dc)) {
1208        int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend.  */
1209
1210        tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1211        dc->jmp = JMP_DIRECT_CC;
1212        dc->jmp_pc = dc->pc + offset;
1213    } else {
1214        dc->jmp = JMP_INDIRECT;
1215        tcg_gen_movi_tl(env_btarget, dc->pc);
1216        tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1217    }
1218    eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1219}
1220
1221static void dec_br(DisasContext *dc)
1222{
1223    unsigned int dslot, link, abs, mbar;
1224    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1225
1226    dslot = dc->ir & (1 << 20);
1227    abs = dc->ir & (1 << 19);
1228    link = dc->ir & (1 << 18);
1229
1230    /* Memory barrier.  */
1231    mbar = (dc->ir >> 16) & 31;
1232    if (mbar == 2 && dc->imm == 4) {
1233        /* mbar IMM & 16 decodes to sleep.  */
1234        if (dc->rd & 16) {
1235            TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1236            TCGv_i32 tmp_1 = tcg_const_i32(1);
1237
1238            LOG_DIS("sleep\n");
1239
1240            t_sync_flags(dc);
1241            tcg_gen_st_i32(tmp_1, cpu_env,
1242                           -offsetof(MicroBlazeCPU, env)
1243                           +offsetof(CPUState, halted));
1244            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1245            gen_helper_raise_exception(cpu_env, tmp_hlt);
1246            tcg_temp_free_i32(tmp_hlt);
1247            tcg_temp_free_i32(tmp_1);
1248            return;
1249        }
1250        LOG_DIS("mbar %d\n", dc->rd);
1251        /* Break the TB.  */
1252        dc->cpustate_changed = 1;
1253        return;
1254    }
1255
1256    LOG_DIS("br%s%s%s%s imm=%x\n",
1257             abs ? "a" : "", link ? "l" : "",
1258             dc->type_b ? "i" : "", dslot ? "d" : "",
1259             dc->imm);
1260
1261    dc->delayed_branch = 1;
1262    if (dslot) {
1263        dc->delayed_branch = 2;
1264        dc->tb_flags |= D_FLAG;
1265        tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1266                      cpu_env, offsetof(CPUMBState, bimm));
1267    }
1268    if (link && dc->rd)
1269        tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1270
1271    dc->jmp = JMP_INDIRECT;
1272    if (abs) {
1273        tcg_gen_movi_tl(env_btaken, 1);
1274        tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1275        if (link && !dslot) {
1276            if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1277                t_gen_raise_exception(dc, EXCP_BREAK);
1278            if (dc->imm == 0) {
1279                if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1280                    tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1281                    t_gen_raise_exception(dc, EXCP_HW_EXCP);
1282                    return;
1283                }
1284
1285                t_gen_raise_exception(dc, EXCP_DEBUG);
1286            }
1287        }
1288    } else {
1289        if (dec_alu_op_b_is_small_imm(dc)) {
1290            dc->jmp = JMP_DIRECT;
1291            dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1292        } else {
1293            tcg_gen_movi_tl(env_btaken, 1);
1294            tcg_gen_movi_tl(env_btarget, dc->pc);
1295            tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1296        }
1297    }
1298}
1299
1300static inline void do_rti(DisasContext *dc)
1301{
1302    TCGv t0, t1;
1303    t0 = tcg_temp_new();
1304    t1 = tcg_temp_new();
1305    tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1306    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1307    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1308
1309    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1310    tcg_gen_or_tl(t1, t1, t0);
1311    msr_write(dc, t1);
1312    tcg_temp_free(t1);
1313    tcg_temp_free(t0);
1314    dc->tb_flags &= ~DRTI_FLAG;
1315}
1316
1317static inline void do_rtb(DisasContext *dc)
1318{
1319    TCGv t0, t1;
1320    t0 = tcg_temp_new();
1321    t1 = tcg_temp_new();
1322    tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1323    tcg_gen_shri_tl(t0, t1, 1);
1324    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1325
1326    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1327    tcg_gen_or_tl(t1, t1, t0);
1328    msr_write(dc, t1);
1329    tcg_temp_free(t1);
1330    tcg_temp_free(t0);
1331    dc->tb_flags &= ~DRTB_FLAG;
1332}
1333
1334static inline void do_rte(DisasContext *dc)
1335{
1336    TCGv t0, t1;
1337    t0 = tcg_temp_new();
1338    t1 = tcg_temp_new();
1339
1340    tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1341    tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1342    tcg_gen_shri_tl(t0, t1, 1);
1343    tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1344
1345    tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1346    tcg_gen_or_tl(t1, t1, t0);
1347    msr_write(dc, t1);
1348    tcg_temp_free(t1);
1349    tcg_temp_free(t0);
1350    dc->tb_flags &= ~DRTE_FLAG;
1351}
1352
1353static void dec_rts(DisasContext *dc)
1354{
1355    unsigned int b_bit, i_bit, e_bit;
1356    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1357
1358    i_bit = dc->ir & (1 << 21);
1359    b_bit = dc->ir & (1 << 22);
1360    e_bit = dc->ir & (1 << 23);
1361
1362    dc->delayed_branch = 2;
1363    dc->tb_flags |= D_FLAG;
1364    tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1365                  cpu_env, offsetof(CPUMBState, bimm));
1366
1367    if (i_bit) {
1368        LOG_DIS("rtid ir=%x\n", dc->ir);
1369        if ((dc->tb_flags & MSR_EE_FLAG)
1370             && mem_index == MMU_USER_IDX) {
1371            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1372            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1373        }
1374        dc->tb_flags |= DRTI_FLAG;
1375    } else if (b_bit) {
1376        LOG_DIS("rtbd ir=%x\n", dc->ir);
1377        if ((dc->tb_flags & MSR_EE_FLAG)
1378             && mem_index == MMU_USER_IDX) {
1379            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1380            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1381        }
1382        dc->tb_flags |= DRTB_FLAG;
1383    } else if (e_bit) {
1384        LOG_DIS("rted ir=%x\n", dc->ir);
1385        if ((dc->tb_flags & MSR_EE_FLAG)
1386             && mem_index == MMU_USER_IDX) {
1387            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1388            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1389        }
1390        dc->tb_flags |= DRTE_FLAG;
1391    } else
1392        LOG_DIS("rts ir=%x\n", dc->ir);
1393
1394    dc->jmp = JMP_INDIRECT;
1395    tcg_gen_movi_tl(env_btaken, 1);
1396    tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1397}
1398
1399static int dec_check_fpuv2(DisasContext *dc)
1400{
1401    if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1402        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1403        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1404    }
1405    return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1406}
1407
1408static void dec_fpu(DisasContext *dc)
1409{
1410    unsigned int fpu_insn;
1411
1412    if ((dc->tb_flags & MSR_EE_FLAG)
1413          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1414          && (dc->cpu->cfg.use_fpu != 1)) {
1415        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1416        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1417        return;
1418    }
1419
1420    fpu_insn = (dc->ir >> 7) & 7;
1421
1422    switch (fpu_insn) {
1423        case 0:
1424            gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1425                            cpu_R[dc->rb]);
1426            break;
1427
1428        case 1:
1429            gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1430                             cpu_R[dc->rb]);
1431            break;
1432
1433        case 2:
1434            gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1435                            cpu_R[dc->rb]);
1436            break;
1437
1438        case 3:
1439            gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1440                            cpu_R[dc->rb]);
1441            break;
1442
1443        case 4:
1444            switch ((dc->ir >> 4) & 7) {
1445                case 0:
1446                    gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1447                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1448                    break;
1449                case 1:
1450                    gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1451                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1452                    break;
1453                case 2:
1454                    gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1455                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1456                    break;
1457                case 3:
1458                    gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1459                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1460                    break;
1461                case 4:
1462                    gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1463                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1464                    break;
1465                case 5:
1466                    gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1467                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1468                    break;
1469                case 6:
1470                    gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1471                                       cpu_R[dc->ra], cpu_R[dc->rb]);
1472                    break;
1473                default:
1474                    qemu_log_mask(LOG_UNIMP,
1475                                  "unimplemented fcmp fpu_insn=%x pc=%x"
1476                                  " opc=%x\n",
1477                                  fpu_insn, dc->pc, dc->opcode);
1478                    dc->abort_at_next_insn = 1;
1479                    break;
1480            }
1481            break;
1482
1483        case 5:
1484            if (!dec_check_fpuv2(dc)) {
1485                return;
1486            }
1487            gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1488            break;
1489
1490        case 6:
1491            if (!dec_check_fpuv2(dc)) {
1492                return;
1493            }
1494            gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1495            break;
1496
1497        case 7:
1498            if (!dec_check_fpuv2(dc)) {
1499                return;
1500            }
1501            gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1502            break;
1503
1504        default:
1505            qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1506                          " opc=%x\n",
1507                          fpu_insn, dc->pc, dc->opcode);
1508            dc->abort_at_next_insn = 1;
1509            break;
1510    }
1511}
1512
1513static void dec_null(DisasContext *dc)
1514{
1515    if ((dc->tb_flags & MSR_EE_FLAG)
1516          && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1517        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1518        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1519        return;
1520    }
1521    qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1522    dc->abort_at_next_insn = 1;
1523}
1524
1525/* Insns connected to FSL or AXI stream attached devices.  */
1526static void dec_stream(DisasContext *dc)
1527{
1528    int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1529    TCGv_i32 t_id, t_ctrl;
1530    int ctrl;
1531
1532    LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1533            dc->type_b ? "" : "d", dc->imm);
1534
1535    if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1536        tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1537        t_gen_raise_exception(dc, EXCP_HW_EXCP);
1538        return;
1539    }
1540
1541    t_id = tcg_temp_new();
1542    if (dc->type_b) {
1543        tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1544        ctrl = dc->imm >> 10;
1545    } else {
1546        tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1547        ctrl = dc->imm >> 5;
1548    }
1549
1550    t_ctrl = tcg_const_tl(ctrl);
1551
1552    if (dc->rd == 0) {
1553        gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1554    } else {
1555        gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1556    }
1557    tcg_temp_free(t_id);
1558    tcg_temp_free(t_ctrl);
1559}
1560
1561static struct decoder_info {
1562    struct {
1563        uint32_t bits;
1564        uint32_t mask;
1565    };
1566    void (*dec)(DisasContext *dc);
1567} decinfo[] = {
1568    {DEC_ADD, dec_add},
1569    {DEC_SUB, dec_sub},
1570    {DEC_AND, dec_and},
1571    {DEC_XOR, dec_xor},
1572    {DEC_OR, dec_or},
1573    {DEC_BIT, dec_bit},
1574    {DEC_BARREL, dec_barrel},
1575    {DEC_LD, dec_load},
1576    {DEC_ST, dec_store},
1577    {DEC_IMM, dec_imm},
1578    {DEC_BR, dec_br},
1579    {DEC_BCC, dec_bcc},
1580    {DEC_RTS, dec_rts},
1581    {DEC_FPU, dec_fpu},
1582    {DEC_MUL, dec_mul},
1583    {DEC_DIV, dec_div},
1584    {DEC_MSR, dec_msr},
1585    {DEC_STREAM, dec_stream},
1586    {{0, 0}, dec_null}
1587};
1588
1589static inline void decode(DisasContext *dc, uint32_t ir)
1590{
1591    int i;
1592
1593    dc->ir = ir;
1594    LOG_DIS("%8.8x\t", dc->ir);
1595
1596    if (dc->ir)
1597        dc->nr_nops = 0;
1598    else {
1599        if ((dc->tb_flags & MSR_EE_FLAG)
1600              && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1601              && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1602            tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1603            t_gen_raise_exception(dc, EXCP_HW_EXCP);
1604            return;
1605        }
1606
1607        LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1608        dc->nr_nops++;
1609        if (dc->nr_nops > 4) {
1610            cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1611        }
1612    }
1613    /* bit 2 seems to indicate insn type.  */
1614    dc->type_b = ir & (1 << 29);
1615
1616    dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1617    dc->rd = EXTRACT_FIELD(ir, 21, 25);
1618    dc->ra = EXTRACT_FIELD(ir, 16, 20);
1619    dc->rb = EXTRACT_FIELD(ir, 11, 15);
1620    dc->imm = EXTRACT_FIELD(ir, 0, 15);
1621
1622    /* Large switch for all insns.  */
1623    for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1624        if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1625            decinfo[i].dec(dc);
1626            break;
1627        }
1628    }
1629}
1630
1631/* generate intermediate code for basic block 'tb'.  */
1632void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1633{
1634    MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1635    CPUState *cs = CPU(cpu);
1636    uint32_t pc_start;
1637    struct DisasContext ctx;
1638    struct DisasContext *dc = &ctx;
1639    uint32_t next_page_start, org_flags;
1640    target_ulong npc;
1641    int num_insns;
1642    int max_insns;
1643
1644    pc_start = tb->pc;
1645    dc->cpu = cpu;
1646    dc->tb = tb;
1647    org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1648
1649    dc->is_jmp = DISAS_NEXT;
1650    dc->jmp = 0;
1651    dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1652    if (dc->delayed_branch) {
1653        dc->jmp = JMP_INDIRECT;
1654    }
1655    dc->pc = pc_start;
1656    dc->singlestep_enabled = cs->singlestep_enabled;
1657    dc->cpustate_changed = 0;
1658    dc->abort_at_next_insn = 0;
1659    dc->nr_nops = 0;
1660
1661    if (pc_start & 3) {
1662        cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1663    }
1664
1665    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1666#if !SIM_COMPAT
1667        qemu_log("--------------\n");
1668        log_cpu_state(CPU(cpu), 0);
1669#endif
1670    }
1671
1672    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1673    num_insns = 0;
1674    max_insns = tb->cflags & CF_COUNT_MASK;
1675    if (max_insns == 0) {
1676        max_insns = CF_COUNT_MASK;
1677    }
1678    if (max_insns > TCG_MAX_INSNS) {
1679        max_insns = TCG_MAX_INSNS;
1680    }
1681
1682    gen_tb_start(tb);
1683    do
1684    {
1685        tcg_gen_insn_start(dc->pc);
1686        num_insns++;
1687
1688#if SIM_COMPAT
1689        if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1690            tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1691            gen_helper_debug();
1692        }
1693#endif
1694
1695        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1696            t_gen_raise_exception(dc, EXCP_DEBUG);
1697            dc->is_jmp = DISAS_UPDATE;
1698            /* The address covered by the breakpoint must be included in
1699               [tb->pc, tb->pc + tb->size) in order to for it to be
1700               properly cleared -- thus we increment the PC here so that
1701               the logic setting tb->size below does the right thing.  */
1702            dc->pc += 4;
1703            break;
1704        }
1705
1706        /* Pretty disas.  */
1707        LOG_DIS("%8.8x:\t", dc->pc);
1708
1709        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1710            gen_io_start();
1711        }
1712
1713        dc->clear_imm = 1;
1714        decode(dc, cpu_ldl_code(env, dc->pc));
1715        if (dc->clear_imm)
1716            dc->tb_flags &= ~IMM_FLAG;
1717        dc->pc += 4;
1718
1719        if (dc->delayed_branch) {
1720            dc->delayed_branch--;
1721            if (!dc->delayed_branch) {
1722                if (dc->tb_flags & DRTI_FLAG)
1723                    do_rti(dc);
1724                 if (dc->tb_flags & DRTB_FLAG)
1725                    do_rtb(dc);
1726                if (dc->tb_flags & DRTE_FLAG)
1727                    do_rte(dc);
1728                /* Clear the delay slot flag.  */
1729                dc->tb_flags &= ~D_FLAG;
1730                /* If it is a direct jump, try direct chaining.  */
1731                if (dc->jmp == JMP_INDIRECT) {
1732                    eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1733                    dc->is_jmp = DISAS_JUMP;
1734                } else if (dc->jmp == JMP_DIRECT) {
1735                    t_sync_flags(dc);
1736                    gen_goto_tb(dc, 0, dc->jmp_pc);
1737                    dc->is_jmp = DISAS_TB_JUMP;
1738                } else if (dc->jmp == JMP_DIRECT_CC) {
1739                    TCGLabel *l1 = gen_new_label();
1740                    t_sync_flags(dc);
1741                    /* Conditional jmp.  */
1742                    tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1743                    gen_goto_tb(dc, 1, dc->pc);
1744                    gen_set_label(l1);
1745                    gen_goto_tb(dc, 0, dc->jmp_pc);
1746
1747                    dc->is_jmp = DISAS_TB_JUMP;
1748                }
1749                break;
1750            }
1751        }
1752        if (cs->singlestep_enabled) {
1753            break;
1754        }
1755    } while (!dc->is_jmp && !dc->cpustate_changed
1756             && !tcg_op_buf_full()
1757             && !singlestep
1758             && (dc->pc < next_page_start)
1759             && num_insns < max_insns);
1760
1761    npc = dc->pc;
1762    if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1763        if (dc->tb_flags & D_FLAG) {
1764            dc->is_jmp = DISAS_UPDATE;
1765            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1766            sync_jmpstate(dc);
1767        } else
1768            npc = dc->jmp_pc;
1769    }
1770
1771    if (tb->cflags & CF_LAST_IO)
1772        gen_io_end();
1773    /* Force an update if the per-tb cpu state has changed.  */
1774    if (dc->is_jmp == DISAS_NEXT
1775        && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1776        dc->is_jmp = DISAS_UPDATE;
1777        tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1778    }
1779    t_sync_flags(dc);
1780
1781    if (unlikely(cs->singlestep_enabled)) {
1782        TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1783
1784        if (dc->is_jmp != DISAS_JUMP) {
1785            tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1786        }
1787        gen_helper_raise_exception(cpu_env, tmp);
1788        tcg_temp_free_i32(tmp);
1789    } else {
1790        switch(dc->is_jmp) {
1791            case DISAS_NEXT:
1792                gen_goto_tb(dc, 1, npc);
1793                break;
1794            default:
1795            case DISAS_JUMP:
1796            case DISAS_UPDATE:
1797                /* indicate that the hash table must be used
1798                   to find the next TB */
1799                tcg_gen_exit_tb(0);
1800                break;
1801            case DISAS_TB_JUMP:
1802                /* nothing more to generate */
1803                break;
1804        }
1805    }
1806    gen_tb_end(tb, num_insns);
1807
1808    tb->size = dc->pc - pc_start;
1809    tb->icount = num_insns;
1810
1811#ifdef DEBUG_DISAS
1812#if !SIM_COMPAT
1813    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1814        qemu_log("\n");
1815#if DISAS_GNU
1816        log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1817#endif
1818        qemu_log("\nisize=%d osize=%d\n",
1819                 dc->pc - pc_start, tcg_op_buf_count());
1820    }
1821#endif
1822#endif
1823    assert(!dc->abort_at_next_insn);
1824}
1825
1826void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1827                       int flags)
1828{
1829    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1830    CPUMBState *env = &cpu->env;
1831    int i;
1832
1833    if (!env || !f)
1834        return;
1835
1836    cpu_fprintf(f, "IN: PC=%x %s\n",
1837                env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1838    cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1839             env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1840             env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1841    cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1842             env->btaken, env->btarget,
1843             (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1844             (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1845             (env->sregs[SR_MSR] & MSR_EIP),
1846             (env->sregs[SR_MSR] & MSR_IE));
1847
1848    for (i = 0; i < 32; i++) {
1849        cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1850        if ((i + 1) % 4 == 0)
1851            cpu_fprintf(f, "\n");
1852        }
1853    cpu_fprintf(f, "\n\n");
1854}
1855
1856MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1857{
1858    MicroBlazeCPU *cpu;
1859
1860    cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1861
1862    object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1863
1864    return cpu;
1865}
1866
1867void mb_tcg_init(void)
1868{
1869    int i;
1870
1871    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1872
1873    env_debug = tcg_global_mem_new(cpu_env,
1874                    offsetof(CPUMBState, debug),
1875                    "debug0");
1876    env_iflags = tcg_global_mem_new(cpu_env,
1877                    offsetof(CPUMBState, iflags),
1878                    "iflags");
1879    env_imm = tcg_global_mem_new(cpu_env,
1880                    offsetof(CPUMBState, imm),
1881                    "imm");
1882    env_btarget = tcg_global_mem_new(cpu_env,
1883                     offsetof(CPUMBState, btarget),
1884                     "btarget");
1885    env_btaken = tcg_global_mem_new(cpu_env,
1886                     offsetof(CPUMBState, btaken),
1887                     "btaken");
1888    env_res_addr = tcg_global_mem_new(cpu_env,
1889                     offsetof(CPUMBState, res_addr),
1890                     "res_addr");
1891    env_res_val = tcg_global_mem_new(cpu_env,
1892                     offsetof(CPUMBState, res_val),
1893                     "res_val");
1894    for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1895        cpu_R[i] = tcg_global_mem_new(cpu_env,
1896                          offsetof(CPUMBState, regs[i]),
1897                          regnames[i]);
1898    }
1899    for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1900        cpu_SR[i] = tcg_global_mem_new(cpu_env,
1901                          offsetof(CPUMBState, sregs[i]),
1902                          special_regnames[i]);
1903    }
1904}
1905
1906void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1907                          target_ulong *data)
1908{
1909    env->sregs[SR_PC] = data[0];
1910}
1911