qemu/target/openrisc/translate.c
<<
>>
Prefs
   1/*
   2 * OpenRISC translation
   3 *
   4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
   5 *                         Feng Gao <gf91597@gmail.com>
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24#include "disas/disas.h"
  25#include "tcg-op.h"
  26#include "qemu-common.h"
  27#include "qemu/log.h"
  28#include "qemu/bitops.h"
  29#include "exec/cpu_ldst.h"
  30#include "exec/translator.h"
  31
  32#include "exec/helper-proto.h"
  33#include "exec/helper-gen.h"
  34
  35#include "trace-tcg.h"
  36#include "exec/log.h"
  37
  38#define LOG_DIS(str, ...) \
  39    qemu_log_mask(CPU_LOG_TB_IN_ASM, "%08x: " str, dc->pc, ## __VA_ARGS__)
  40
  41/* is_jmp field values */
  42#define DISAS_JUMP    DISAS_TARGET_0 /* only pc was modified dynamically */
  43#define DISAS_UPDATE  DISAS_TARGET_1 /* cpu state was modified dynamically */
  44#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
  45
  46typedef struct DisasContext {
  47    TranslationBlock *tb;
  48    target_ulong pc;
  49    uint32_t is_jmp;
  50    uint32_t mem_idx;
  51    uint32_t tb_flags;
  52    uint32_t delayed_branch;
  53    bool singlestep_enabled;
  54} DisasContext;
  55
  56static TCGv cpu_sr;
  57static TCGv cpu_R[32];
  58static TCGv cpu_R0;
  59static TCGv cpu_pc;
  60static TCGv jmp_pc;            /* l.jr/l.jalr temp pc */
  61static TCGv cpu_ppc;
  62static TCGv cpu_sr_f;           /* bf/bnf, F flag taken */
  63static TCGv cpu_sr_cy;          /* carry (unsigned overflow) */
  64static TCGv cpu_sr_ov;          /* signed overflow */
  65static TCGv cpu_lock_addr;
  66static TCGv cpu_lock_value;
  67static TCGv_i32 fpcsr;
  68static TCGv_i64 cpu_mac;        /* MACHI:MACLO */
  69static TCGv_i32 cpu_dflag;
  70#include "exec/gen-icount.h"
  71
  72void openrisc_translate_init(void)
  73{
  74    static const char * const regnames[] = {
  75        "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  76        "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  77        "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
  78        "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
  79    };
  80    int i;
  81
  82    cpu_sr = tcg_global_mem_new(cpu_env,
  83                                offsetof(CPUOpenRISCState, sr), "sr");
  84    cpu_dflag = tcg_global_mem_new_i32(cpu_env,
  85                                       offsetof(CPUOpenRISCState, dflag),
  86                                       "dflag");
  87    cpu_pc = tcg_global_mem_new(cpu_env,
  88                                offsetof(CPUOpenRISCState, pc), "pc");
  89    cpu_ppc = tcg_global_mem_new(cpu_env,
  90                                 offsetof(CPUOpenRISCState, ppc), "ppc");
  91    jmp_pc = tcg_global_mem_new(cpu_env,
  92                                offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
  93    cpu_sr_f = tcg_global_mem_new(cpu_env,
  94                                  offsetof(CPUOpenRISCState, sr_f), "sr_f");
  95    cpu_sr_cy = tcg_global_mem_new(cpu_env,
  96                                   offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
  97    cpu_sr_ov = tcg_global_mem_new(cpu_env,
  98                                   offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
  99    cpu_lock_addr = tcg_global_mem_new(cpu_env,
 100                                       offsetof(CPUOpenRISCState, lock_addr),
 101                                       "lock_addr");
 102    cpu_lock_value = tcg_global_mem_new(cpu_env,
 103                                        offsetof(CPUOpenRISCState, lock_value),
 104                                        "lock_value");
 105    fpcsr = tcg_global_mem_new_i32(cpu_env,
 106                                   offsetof(CPUOpenRISCState, fpcsr),
 107                                   "fpcsr");
 108    cpu_mac = tcg_global_mem_new_i64(cpu_env,
 109                                     offsetof(CPUOpenRISCState, mac),
 110                                     "mac");
 111    for (i = 0; i < 32; i++) {
 112        cpu_R[i] = tcg_global_mem_new(cpu_env,
 113                                      offsetof(CPUOpenRISCState,
 114                                               shadow_gpr[0][i]),
 115                                      regnames[i]);
 116    }
 117    cpu_R0 = cpu_R[0];
 118}
 119
 120static void gen_exception(DisasContext *dc, unsigned int excp)
 121{
 122    TCGv_i32 tmp = tcg_const_i32(excp);
 123    gen_helper_exception(cpu_env, tmp);
 124    tcg_temp_free_i32(tmp);
 125}
 126
 127static void gen_illegal_exception(DisasContext *dc)
 128{
 129    tcg_gen_movi_tl(cpu_pc, dc->pc);
 130    gen_exception(dc, EXCP_ILLEGAL);
 131    dc->is_jmp = DISAS_UPDATE;
 132}
 133
 134/* not used yet, open it when we need or64.  */
 135/*#ifdef TARGET_OPENRISC64
 136static void check_ob64s(DisasContext *dc)
 137{
 138    if (!(dc->flags & CPUCFGR_OB64S)) {
 139        gen_illegal_exception(dc);
 140    }
 141}
 142
 143static void check_of64s(DisasContext *dc)
 144{
 145    if (!(dc->flags & CPUCFGR_OF64S)) {
 146        gen_illegal_exception(dc);
 147    }
 148}
 149
 150static void check_ov64s(DisasContext *dc)
 151{
 152    if (!(dc->flags & CPUCFGR_OV64S)) {
 153        gen_illegal_exception(dc);
 154    }
 155}
 156#endif*/
 157
 158/* We're about to write to REG.  On the off-chance that the user is
 159   writing to R0, re-instate the architectural register.  */
 160#define check_r0_write(reg)             \
 161    do {                                \
 162        if (unlikely(reg == 0)) {       \
 163            cpu_R[0] = cpu_R0;          \
 164        }                               \
 165    } while (0)
 166
 167static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
 168{
 169    if (unlikely(dc->singlestep_enabled)) {
 170        return false;
 171    }
 172
 173#ifndef CONFIG_USER_ONLY
 174    return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 175#else
 176    return true;
 177#endif
 178}
 179
 180static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
 181{
 182    if (use_goto_tb(dc, dest)) {
 183        tcg_gen_movi_tl(cpu_pc, dest);
 184        tcg_gen_goto_tb(n);
 185        tcg_gen_exit_tb((uintptr_t)dc->tb + n);
 186    } else {
 187        tcg_gen_movi_tl(cpu_pc, dest);
 188        if (dc->singlestep_enabled) {
 189            gen_exception(dc, EXCP_DEBUG);
 190        }
 191        tcg_gen_exit_tb(0);
 192    }
 193}
 194
 195static void gen_jump(DisasContext *dc, int32_t n26, uint32_t reg, uint32_t op0)
 196{
 197    target_ulong tmp_pc = dc->pc + n26 * 4;
 198
 199    switch (op0) {
 200    case 0x00:     /* l.j */
 201        tcg_gen_movi_tl(jmp_pc, tmp_pc);
 202        break;
 203    case 0x01:     /* l.jal */
 204        tcg_gen_movi_tl(cpu_R[9], dc->pc + 8);
 205        /* Optimize jal being used to load the PC for PIC.  */
 206        if (tmp_pc == dc->pc + 8) {
 207            return;
 208        }
 209        tcg_gen_movi_tl(jmp_pc, tmp_pc);
 210        break;
 211    case 0x03:     /* l.bnf */
 212    case 0x04:     /* l.bf  */
 213        {
 214            TCGv t_next = tcg_const_tl(dc->pc + 8);
 215            TCGv t_true = tcg_const_tl(tmp_pc);
 216            TCGv t_zero = tcg_const_tl(0);
 217
 218            tcg_gen_movcond_tl(op0 == 0x03 ? TCG_COND_EQ : TCG_COND_NE,
 219                               jmp_pc, cpu_sr_f, t_zero, t_true, t_next);
 220
 221            tcg_temp_free(t_next);
 222            tcg_temp_free(t_true);
 223            tcg_temp_free(t_zero);
 224        }
 225        break;
 226    case 0x11:     /* l.jr */
 227        tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
 228        break;
 229    case 0x12:     /* l.jalr */
 230        tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8));
 231        tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
 232        break;
 233    default:
 234        gen_illegal_exception(dc);
 235        break;
 236    }
 237
 238    dc->delayed_branch = 2;
 239}
 240
 241static void gen_ove_cy(DisasContext *dc)
 242{
 243    if (dc->tb_flags & SR_OVE) {
 244        gen_helper_ove_cy(cpu_env);
 245    }
 246}
 247
 248static void gen_ove_ov(DisasContext *dc)
 249{
 250    if (dc->tb_flags & SR_OVE) {
 251        gen_helper_ove_ov(cpu_env);
 252    }
 253}
 254
 255static void gen_ove_cyov(DisasContext *dc)
 256{
 257    if (dc->tb_flags & SR_OVE) {
 258        gen_helper_ove_cyov(cpu_env);
 259    }
 260}
 261
 262static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
 263{
 264    TCGv t0 = tcg_const_tl(0);
 265    TCGv res = tcg_temp_new();
 266
 267    tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0);
 268    tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
 269    tcg_gen_xor_tl(t0, res, srcb);
 270    tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
 271    tcg_temp_free(t0);
 272
 273    tcg_gen_mov_tl(dest, res);
 274    tcg_temp_free(res);
 275
 276    gen_ove_cyov(dc);
 277}
 278
 279static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
 280{
 281    TCGv t0 = tcg_const_tl(0);
 282    TCGv res = tcg_temp_new();
 283
 284    tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0);
 285    tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0);
 286    tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
 287    tcg_gen_xor_tl(t0, res, srcb);
 288    tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
 289    tcg_temp_free(t0);
 290
 291    tcg_gen_mov_tl(dest, res);
 292    tcg_temp_free(res);
 293
 294    gen_ove_cyov(dc);
 295}
 296
 297static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
 298{
 299    TCGv res = tcg_temp_new();
 300
 301    tcg_gen_sub_tl(res, srca, srcb);
 302    tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
 303    tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
 304    tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
 305    tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
 306
 307    tcg_gen_mov_tl(dest, res);
 308    tcg_temp_free(res);
 309
 310    gen_ove_cyov(dc);
 311}
 312
 313static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
 314{
 315    TCGv t0 = tcg_temp_new();
 316
 317    tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
 318    tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
 319    tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
 320    tcg_temp_free(t0);
 321
 322    tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
 323    gen_ove_ov(dc);
 324}
 325
 326static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
 327{
 328    tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
 329    tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
 330
 331    gen_ove_cy(dc);
 332}
 333
 334static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
 335{
 336    TCGv t0 = tcg_temp_new();
 337
 338    tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
 339    /* The result of divide-by-zero is undefined.
 340       Supress the host-side exception by dividing by 1.  */
 341    tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
 342    tcg_gen_div_tl(dest, srca, t0);
 343    tcg_temp_free(t0);
 344
 345    tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
 346    gen_ove_ov(dc);
 347}
 348
 349static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
 350{
 351    TCGv t0 = tcg_temp_new();
 352
 353    tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
 354    /* The result of divide-by-zero is undefined.
 355       Supress the host-side exception by dividing by 1.  */
 356    tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
 357    tcg_gen_divu_tl(dest, srca, t0);
 358    tcg_temp_free(t0);
 359
 360    gen_ove_cy(dc);
 361}
 362
 363static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
 364{
 365    TCGv_i64 t1 = tcg_temp_new_i64();
 366    TCGv_i64 t2 = tcg_temp_new_i64();
 367
 368    tcg_gen_ext_tl_i64(t1, srca);
 369    tcg_gen_ext_tl_i64(t2, srcb);
 370    if (TARGET_LONG_BITS == 32) {
 371        tcg_gen_mul_i64(cpu_mac, t1, t2);
 372        tcg_gen_movi_tl(cpu_sr_ov, 0);
 373    } else {
 374        TCGv_i64 high = tcg_temp_new_i64();
 375
 376        tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
 377        tcg_gen_sari_i64(t1, cpu_mac, 63);
 378        tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
 379        tcg_temp_free_i64(high);
 380        tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
 381        tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
 382
 383        gen_ove_ov(dc);
 384    }
 385    tcg_temp_free_i64(t1);
 386    tcg_temp_free_i64(t2);
 387}
 388
 389static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
 390{
 391    TCGv_i64 t1 = tcg_temp_new_i64();
 392    TCGv_i64 t2 = tcg_temp_new_i64();
 393
 394    tcg_gen_extu_tl_i64(t1, srca);
 395    tcg_gen_extu_tl_i64(t2, srcb);
 396    if (TARGET_LONG_BITS == 32) {
 397        tcg_gen_mul_i64(cpu_mac, t1, t2);
 398        tcg_gen_movi_tl(cpu_sr_cy, 0);
 399    } else {
 400        TCGv_i64 high = tcg_temp_new_i64();
 401
 402        tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
 403        tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
 404        tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
 405        tcg_temp_free_i64(high);
 406
 407        gen_ove_cy(dc);
 408    }
 409    tcg_temp_free_i64(t1);
 410    tcg_temp_free_i64(t2);
 411}
 412
 413static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
 414{
 415    TCGv_i64 t1 = tcg_temp_new_i64();
 416    TCGv_i64 t2 = tcg_temp_new_i64();
 417
 418    tcg_gen_ext_tl_i64(t1, srca);
 419    tcg_gen_ext_tl_i64(t2, srcb);
 420    tcg_gen_mul_i64(t1, t1, t2);
 421
 422    /* Note that overflow is only computed during addition stage.  */
 423    tcg_gen_xor_i64(t2, cpu_mac, t1);
 424    tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
 425    tcg_gen_xor_i64(t1, t1, cpu_mac);
 426    tcg_gen_andc_i64(t1, t1, t2);
 427    tcg_temp_free_i64(t2);
 428
 429#if TARGET_LONG_BITS == 32
 430    tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
 431#else
 432    tcg_gen_mov_i64(cpu_sr_ov, t1);
 433#endif
 434    tcg_temp_free_i64(t1);
 435
 436    gen_ove_ov(dc);
 437}
 438
 439static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
 440{
 441    TCGv_i64 t1 = tcg_temp_new_i64();
 442    TCGv_i64 t2 = tcg_temp_new_i64();
 443
 444    tcg_gen_extu_tl_i64(t1, srca);
 445    tcg_gen_extu_tl_i64(t2, srcb);
 446    tcg_gen_mul_i64(t1, t1, t2);
 447    tcg_temp_free_i64(t2);
 448
 449    /* Note that overflow is only computed during addition stage.  */
 450    tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
 451    tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
 452    tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
 453    tcg_temp_free_i64(t1);
 454
 455    gen_ove_cy(dc);
 456}
 457
 458static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
 459{
 460    TCGv_i64 t1 = tcg_temp_new_i64();
 461    TCGv_i64 t2 = tcg_temp_new_i64();
 462
 463    tcg_gen_ext_tl_i64(t1, srca);
 464    tcg_gen_ext_tl_i64(t2, srcb);
 465    tcg_gen_mul_i64(t1, t1, t2);
 466
 467    /* Note that overflow is only computed during subtraction stage.  */
 468    tcg_gen_xor_i64(t2, cpu_mac, t1);
 469    tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
 470    tcg_gen_xor_i64(t1, t1, cpu_mac);
 471    tcg_gen_and_i64(t1, t1, t2);
 472    tcg_temp_free_i64(t2);
 473
 474#if TARGET_LONG_BITS == 32
 475    tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
 476#else
 477    tcg_gen_mov_i64(cpu_sr_ov, t1);
 478#endif
 479    tcg_temp_free_i64(t1);
 480
 481    gen_ove_ov(dc);
 482}
 483
 484static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
 485{
 486    TCGv_i64 t1 = tcg_temp_new_i64();
 487    TCGv_i64 t2 = tcg_temp_new_i64();
 488
 489    tcg_gen_extu_tl_i64(t1, srca);
 490    tcg_gen_extu_tl_i64(t2, srcb);
 491    tcg_gen_mul_i64(t1, t1, t2);
 492
 493    /* Note that overflow is only computed during subtraction stage.  */
 494    tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
 495    tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
 496    tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
 497    tcg_temp_free_i64(t2);
 498    tcg_temp_free_i64(t1);
 499
 500    gen_ove_cy(dc);
 501}
 502
 503static void gen_lwa(DisasContext *dc, TCGv rd, TCGv ra, int32_t ofs)
 504{
 505    TCGv ea = tcg_temp_new();
 506
 507    tcg_gen_addi_tl(ea, ra, ofs);
 508    tcg_gen_qemu_ld_tl(rd, ea, dc->mem_idx, MO_TEUL);
 509    tcg_gen_mov_tl(cpu_lock_addr, ea);
 510    tcg_gen_mov_tl(cpu_lock_value, rd);
 511    tcg_temp_free(ea);
 512}
 513
 514static void gen_swa(DisasContext *dc, int b, TCGv ra, int32_t ofs)
 515{
 516    TCGv ea, val;
 517    TCGLabel *lab_fail, *lab_done;
 518
 519    ea = tcg_temp_new();
 520    tcg_gen_addi_tl(ea, ra, ofs);
 521
 522    /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned
 523       to cpu_R[0].  Since l.swa is quite often immediately followed by a
 524       branch, don't bother reallocating; finish the TB using the "real" R0.
 525       This also takes care of RB input across the branch.  */
 526    cpu_R[0] = cpu_R0;
 527
 528    lab_fail = gen_new_label();
 529    lab_done = gen_new_label();
 530    tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
 531    tcg_temp_free(ea);
 532
 533    val = tcg_temp_new();
 534    tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
 535                              cpu_R[b], dc->mem_idx, MO_TEUL);
 536    tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
 537    tcg_temp_free(val);
 538
 539    tcg_gen_br(lab_done);
 540
 541    gen_set_label(lab_fail);
 542    tcg_gen_movi_tl(cpu_sr_f, 0);
 543
 544    gen_set_label(lab_done);
 545    tcg_gen_movi_tl(cpu_lock_addr, -1);
 546}
 547
 548static void dec_calc(DisasContext *dc, uint32_t insn)
 549{
 550    uint32_t op0, op1, op2;
 551    uint32_t ra, rb, rd;
 552    op0 = extract32(insn, 0, 4);
 553    op1 = extract32(insn, 8, 2);
 554    op2 = extract32(insn, 6, 2);
 555    ra = extract32(insn, 16, 5);
 556    rb = extract32(insn, 11, 5);
 557    rd = extract32(insn, 21, 5);
 558
 559    switch (op1) {
 560    case 0:
 561        switch (op0) {
 562        case 0x0: /* l.add */
 563            LOG_DIS("l.add r%d, r%d, r%d\n", rd, ra, rb);
 564            gen_add(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 565            return;
 566
 567        case 0x1: /* l.addc */
 568            LOG_DIS("l.addc r%d, r%d, r%d\n", rd, ra, rb);
 569            gen_addc(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 570            return;
 571
 572        case 0x2: /* l.sub */
 573            LOG_DIS("l.sub r%d, r%d, r%d\n", rd, ra, rb);
 574            gen_sub(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 575            return;
 576
 577        case 0x3: /* l.and */
 578            LOG_DIS("l.and r%d, r%d, r%d\n", rd, ra, rb);
 579            tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 580            return;
 581
 582        case 0x4: /* l.or */
 583            LOG_DIS("l.or r%d, r%d, r%d\n", rd, ra, rb);
 584            tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 585            return;
 586
 587        case 0x5: /* l.xor */
 588            LOG_DIS("l.xor r%d, r%d, r%d\n", rd, ra, rb);
 589            tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 590            return;
 591
 592        case 0x8:
 593            switch (op2) {
 594            case 0: /* l.sll */
 595                LOG_DIS("l.sll r%d, r%d, r%d\n", rd, ra, rb);
 596                tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 597                return;
 598            case 1: /* l.srl */
 599                LOG_DIS("l.srl r%d, r%d, r%d\n", rd, ra, rb);
 600                tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 601                return;
 602            case 2: /* l.sra */
 603                LOG_DIS("l.sra r%d, r%d, r%d\n", rd, ra, rb);
 604                tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 605                return;
 606            case 3: /* l.ror */
 607                LOG_DIS("l.ror r%d, r%d, r%d\n", rd, ra, rb);
 608                tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 609                return;
 610            }
 611            break;
 612
 613        case 0xc:
 614            switch (op2) {
 615            case 0: /* l.exths */
 616                LOG_DIS("l.exths r%d, r%d\n", rd, ra);
 617                tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]);
 618                return;
 619            case 1: /* l.extbs */
 620                LOG_DIS("l.extbs r%d, r%d\n", rd, ra);
 621                tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]);
 622                return;
 623            case 2: /* l.exthz */
 624                LOG_DIS("l.exthz r%d, r%d\n", rd, ra);
 625                tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]);
 626                return;
 627            case 3: /* l.extbz */
 628                LOG_DIS("l.extbz r%d, r%d\n", rd, ra);
 629                tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]);
 630                return;
 631            }
 632            break;
 633
 634        case 0xd:
 635            switch (op2) {
 636            case 0: /* l.extws */
 637                LOG_DIS("l.extws r%d, r%d\n", rd, ra);
 638                tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]);
 639                return;
 640            case 1: /* l.extwz */
 641                LOG_DIS("l.extwz r%d, r%d\n", rd, ra);
 642                tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]);
 643                return;
 644            }
 645            break;
 646
 647        case 0xe: /* l.cmov */
 648            LOG_DIS("l.cmov r%d, r%d, r%d\n", rd, ra, rb);
 649            {
 650                TCGv zero = tcg_const_tl(0);
 651                tcg_gen_movcond_tl(TCG_COND_NE, cpu_R[rd], cpu_sr_f, zero,
 652                                   cpu_R[ra], cpu_R[rb]);
 653                tcg_temp_free(zero);
 654            }
 655            return;
 656
 657        case 0xf: /* l.ff1 */
 658            LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb);
 659            tcg_gen_ctzi_tl(cpu_R[rd], cpu_R[ra], -1);
 660            tcg_gen_addi_tl(cpu_R[rd], cpu_R[rd], 1);
 661            return;
 662        }
 663        break;
 664
 665    case 1:
 666        switch (op0) {
 667        case 0xf: /* l.fl1 */
 668            LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb);
 669            tcg_gen_clzi_tl(cpu_R[rd], cpu_R[ra], TARGET_LONG_BITS);
 670            tcg_gen_subfi_tl(cpu_R[rd], TARGET_LONG_BITS, cpu_R[rd]);
 671            return;
 672        }
 673        break;
 674
 675    case 2:
 676        break;
 677
 678    case 3:
 679        switch (op0) {
 680        case 0x6: /* l.mul */
 681            LOG_DIS("l.mul r%d, r%d, r%d\n", rd, ra, rb);
 682            gen_mul(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 683            return;
 684
 685        case 0x7: /* l.muld */
 686            LOG_DIS("l.muld r%d, r%d\n", ra, rb);
 687            gen_muld(dc, cpu_R[ra], cpu_R[rb]);
 688            break;
 689
 690        case 0x9: /* l.div */
 691            LOG_DIS("l.div r%d, r%d, r%d\n", rd, ra, rb);
 692            gen_div(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 693            return;
 694
 695        case 0xa: /* l.divu */
 696            LOG_DIS("l.divu r%d, r%d, r%d\n", rd, ra, rb);
 697            gen_divu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 698            return;
 699
 700        case 0xb: /* l.mulu */
 701            LOG_DIS("l.mulu r%d, r%d, r%d\n", rd, ra, rb);
 702            gen_mulu(dc, cpu_R[rd], cpu_R[ra], cpu_R[rb]);
 703            return;
 704
 705        case 0xc: /* l.muldu */
 706            LOG_DIS("l.muldu r%d, r%d\n", ra, rb);
 707            gen_muldu(dc, cpu_R[ra], cpu_R[rb]);
 708            return;
 709        }
 710        break;
 711    }
 712    gen_illegal_exception(dc);
 713}
 714
 715static void dec_misc(DisasContext *dc, uint32_t insn)
 716{
 717    uint32_t op0, op1;
 718    uint32_t ra, rb, rd;
 719    uint32_t L6, K5, K16, K5_11;
 720    int32_t I16, I5_11, N26;
 721    TCGMemOp mop;
 722    TCGv t0;
 723
 724    op0 = extract32(insn, 26, 6);
 725    op1 = extract32(insn, 24, 2);
 726    ra = extract32(insn, 16, 5);
 727    rb = extract32(insn, 11, 5);
 728    rd = extract32(insn, 21, 5);
 729    L6 = extract32(insn, 5, 6);
 730    K5 = extract32(insn, 0, 5);
 731    K16 = extract32(insn, 0, 16);
 732    I16 = (int16_t)K16;
 733    N26 = sextract32(insn, 0, 26);
 734    K5_11 = (extract32(insn, 21, 5) << 11) | extract32(insn, 0, 11);
 735    I5_11 = (int16_t)K5_11;
 736
 737    switch (op0) {
 738    case 0x00:    /* l.j */
 739        LOG_DIS("l.j %d\n", N26);
 740        gen_jump(dc, N26, 0, op0);
 741        break;
 742
 743    case 0x01:    /* l.jal */
 744        LOG_DIS("l.jal %d\n", N26);
 745        gen_jump(dc, N26, 0, op0);
 746        break;
 747
 748    case 0x03:    /* l.bnf */
 749        LOG_DIS("l.bnf %d\n", N26);
 750        gen_jump(dc, N26, 0, op0);
 751        break;
 752
 753    case 0x04:    /* l.bf */
 754        LOG_DIS("l.bf %d\n", N26);
 755        gen_jump(dc, N26, 0, op0);
 756        break;
 757
 758    case 0x05:
 759        switch (op1) {
 760        case 0x01:    /* l.nop */
 761            LOG_DIS("l.nop %d\n", I16);
 762            break;
 763
 764        default:
 765            gen_illegal_exception(dc);
 766            break;
 767        }
 768        break;
 769
 770    case 0x11:    /* l.jr */
 771        LOG_DIS("l.jr r%d\n", rb);
 772         gen_jump(dc, 0, rb, op0);
 773         break;
 774
 775    case 0x12:    /* l.jalr */
 776        LOG_DIS("l.jalr r%d\n", rb);
 777        gen_jump(dc, 0, rb, op0);
 778        break;
 779
 780    case 0x13:    /* l.maci */
 781        LOG_DIS("l.maci r%d, %d\n", ra, I16);
 782        t0 = tcg_const_tl(I16);
 783        gen_mac(dc, cpu_R[ra], t0);
 784        tcg_temp_free(t0);
 785        break;
 786
 787    case 0x09:    /* l.rfe */
 788        LOG_DIS("l.rfe\n");
 789        {
 790#if defined(CONFIG_USER_ONLY)
 791            return;
 792#else
 793            if (dc->mem_idx == MMU_USER_IDX) {
 794                gen_illegal_exception(dc);
 795                return;
 796            }
 797            gen_helper_rfe(cpu_env);
 798            dc->is_jmp = DISAS_UPDATE;
 799#endif
 800        }
 801        break;
 802
 803    case 0x1b: /* l.lwa */
 804        LOG_DIS("l.lwa r%d, r%d, %d\n", rd, ra, I16);
 805        check_r0_write(rd);
 806        gen_lwa(dc, cpu_R[rd], cpu_R[ra], I16);
 807        break;
 808
 809    case 0x1c:    /* l.cust1 */
 810        LOG_DIS("l.cust1\n");
 811        break;
 812
 813    case 0x1d:    /* l.cust2 */
 814        LOG_DIS("l.cust2\n");
 815        break;
 816
 817    case 0x1e:    /* l.cust3 */
 818        LOG_DIS("l.cust3\n");
 819        break;
 820
 821    case 0x1f:    /* l.cust4 */
 822        LOG_DIS("l.cust4\n");
 823        break;
 824
 825    case 0x3c:    /* l.cust5 */
 826        LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5);
 827        break;
 828
 829    case 0x3d:    /* l.cust6 */
 830        LOG_DIS("l.cust6\n");
 831        break;
 832
 833    case 0x3e:    /* l.cust7 */
 834        LOG_DIS("l.cust7\n");
 835        break;
 836
 837    case 0x3f:    /* l.cust8 */
 838        LOG_DIS("l.cust8\n");
 839        break;
 840
 841/* not used yet, open it when we need or64.  */
 842/*#ifdef TARGET_OPENRISC64
 843    case 0x20:     l.ld
 844        LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
 845        check_ob64s(dc);
 846        mop = MO_TEQ;
 847        goto do_load;
 848#endif*/
 849
 850    case 0x21:    /* l.lwz */
 851        LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16);
 852        mop = MO_TEUL;
 853        goto do_load;
 854
 855    case 0x22:    /* l.lws */
 856        LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16);
 857        mop = MO_TESL;
 858        goto do_load;
 859
 860    case 0x23:    /* l.lbz */
 861        LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16);
 862        mop = MO_UB;
 863        goto do_load;
 864
 865    case 0x24:    /* l.lbs */
 866        LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16);
 867        mop = MO_SB;
 868        goto do_load;
 869
 870    case 0x25:    /* l.lhz */
 871        LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16);
 872        mop = MO_TEUW;
 873        goto do_load;
 874
 875    case 0x26:    /* l.lhs */
 876        LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16);
 877        mop = MO_TESW;
 878        goto do_load;
 879
 880    do_load:
 881        check_r0_write(rd);
 882        t0 = tcg_temp_new();
 883        tcg_gen_addi_tl(t0, cpu_R[ra], I16);
 884        tcg_gen_qemu_ld_tl(cpu_R[rd], t0, dc->mem_idx, mop);
 885        tcg_temp_free(t0);
 886        break;
 887
 888    case 0x27:    /* l.addi */
 889        LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16);
 890        check_r0_write(rd);
 891        t0 = tcg_const_tl(I16);
 892        gen_add(dc, cpu_R[rd], cpu_R[ra], t0);
 893        tcg_temp_free(t0);
 894        break;
 895
 896    case 0x28:    /* l.addic */
 897        LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16);
 898        check_r0_write(rd);
 899        t0 = tcg_const_tl(I16);
 900        gen_addc(dc, cpu_R[rd], cpu_R[ra], t0);
 901        tcg_temp_free(t0);
 902        break;
 903
 904    case 0x29:    /* l.andi */
 905        LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, K16);
 906        check_r0_write(rd);
 907        tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], K16);
 908        break;
 909
 910    case 0x2a:    /* l.ori */
 911        LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, K16);
 912        check_r0_write(rd);
 913        tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], K16);
 914        break;
 915
 916    case 0x2b:    /* l.xori */
 917        LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16);
 918        check_r0_write(rd);
 919        tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], I16);
 920        break;
 921
 922    case 0x2c:    /* l.muli */
 923        LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16);
 924        check_r0_write(rd);
 925        t0 = tcg_const_tl(I16);
 926        gen_mul(dc, cpu_R[rd], cpu_R[ra], t0);
 927        tcg_temp_free(t0);
 928        break;
 929
 930    case 0x2d:    /* l.mfspr */
 931        LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, K16);
 932        check_r0_write(rd);
 933        {
 934#if defined(CONFIG_USER_ONLY)
 935            return;
 936#else
 937            TCGv_i32 ti = tcg_const_i32(K16);
 938            if (dc->mem_idx == MMU_USER_IDX) {
 939                gen_illegal_exception(dc);
 940                return;
 941            }
 942            gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti);
 943            tcg_temp_free_i32(ti);
 944#endif
 945        }
 946        break;
 947
 948    case 0x30:    /* l.mtspr */
 949        LOG_DIS("l.mtspr r%d, r%d, %d\n", ra, rb, K5_11);
 950        {
 951#if defined(CONFIG_USER_ONLY)
 952            return;
 953#else
 954            TCGv_i32 im = tcg_const_i32(K5_11);
 955            if (dc->mem_idx == MMU_USER_IDX) {
 956                gen_illegal_exception(dc);
 957                return;
 958            }
 959            gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im);
 960            tcg_temp_free_i32(im);
 961#endif
 962        }
 963        break;
 964
 965    case 0x33: /* l.swa */
 966        LOG_DIS("l.swa r%d, r%d, %d\n", ra, rb, I5_11);
 967        gen_swa(dc, rb, cpu_R[ra], I5_11);
 968        break;
 969
 970/* not used yet, open it when we need or64.  */
 971/*#ifdef TARGET_OPENRISC64
 972    case 0x34:     l.sd
 973        LOG_DIS("l.sd r%d, r%d, %d\n", ra, rb, I5_11);
 974        check_ob64s(dc);
 975        mop = MO_TEQ;
 976        goto do_store;
 977#endif*/
 978
 979    case 0x35:    /* l.sw */
 980        LOG_DIS("l.sw r%d, r%d, %d\n", ra, rb, I5_11);
 981        mop = MO_TEUL;
 982        goto do_store;
 983
 984    case 0x36:    /* l.sb */
 985        LOG_DIS("l.sb r%d, r%d, %d\n", ra, rb, I5_11);
 986        mop = MO_UB;
 987        goto do_store;
 988
 989    case 0x37:    /* l.sh */
 990        LOG_DIS("l.sh r%d, r%d, %d\n", ra, rb, I5_11);
 991        mop = MO_TEUW;
 992        goto do_store;
 993
 994    do_store:
 995        {
 996            TCGv t0 = tcg_temp_new();
 997            tcg_gen_addi_tl(t0, cpu_R[ra], I5_11);
 998            tcg_gen_qemu_st_tl(cpu_R[rb], t0, dc->mem_idx, mop);
 999            tcg_temp_free(t0);
1000        }
1001        break;
1002
1003    default:
1004        gen_illegal_exception(dc);
1005        break;
1006    }
1007}
1008
1009static void dec_mac(DisasContext *dc, uint32_t insn)
1010{
1011    uint32_t op0;
1012    uint32_t ra, rb;
1013    op0 = extract32(insn, 0, 4);
1014    ra = extract32(insn, 16, 5);
1015    rb = extract32(insn, 11, 5);
1016
1017    switch (op0) {
1018    case 0x0001:    /* l.mac */
1019        LOG_DIS("l.mac r%d, r%d\n", ra, rb);
1020        gen_mac(dc, cpu_R[ra], cpu_R[rb]);
1021        break;
1022
1023    case 0x0002:    /* l.msb */
1024        LOG_DIS("l.msb r%d, r%d\n", ra, rb);
1025        gen_msb(dc, cpu_R[ra], cpu_R[rb]);
1026        break;
1027
1028    case 0x0003:    /* l.macu */
1029        LOG_DIS("l.macu r%d, r%d\n", ra, rb);
1030        gen_macu(dc, cpu_R[ra], cpu_R[rb]);
1031        break;
1032
1033    case 0x0004:    /* l.msbu */
1034        LOG_DIS("l.msbu r%d, r%d\n", ra, rb);
1035        gen_msbu(dc, cpu_R[ra], cpu_R[rb]);
1036        break;
1037
1038    default:
1039        gen_illegal_exception(dc);
1040        break;
1041   }
1042}
1043
1044static void dec_logic(DisasContext *dc, uint32_t insn)
1045{
1046    uint32_t op0;
1047    uint32_t rd, ra, L6, S6;
1048    op0 = extract32(insn, 6, 2);
1049    rd = extract32(insn, 21, 5);
1050    ra = extract32(insn, 16, 5);
1051    L6 = extract32(insn, 0, 6);
1052    S6 = L6 & (TARGET_LONG_BITS - 1);
1053
1054    check_r0_write(rd);
1055    switch (op0) {
1056    case 0x00:    /* l.slli */
1057        LOG_DIS("l.slli r%d, r%d, %d\n", rd, ra, L6);
1058        tcg_gen_shli_tl(cpu_R[rd], cpu_R[ra], S6);
1059        break;
1060
1061    case 0x01:    /* l.srli */
1062        LOG_DIS("l.srli r%d, r%d, %d\n", rd, ra, L6);
1063        tcg_gen_shri_tl(cpu_R[rd], cpu_R[ra], S6);
1064        break;
1065
1066    case 0x02:    /* l.srai */
1067        LOG_DIS("l.srai r%d, r%d, %d\n", rd, ra, L6);
1068        tcg_gen_sari_tl(cpu_R[rd], cpu_R[ra], S6);
1069        break;
1070
1071    case 0x03:    /* l.rori */
1072        LOG_DIS("l.rori r%d, r%d, %d\n", rd, ra, L6);
1073        tcg_gen_rotri_tl(cpu_R[rd], cpu_R[ra], S6);
1074        break;
1075
1076    default:
1077        gen_illegal_exception(dc);
1078        break;
1079    }
1080}
1081
1082static void dec_M(DisasContext *dc, uint32_t insn)
1083{
1084    uint32_t op0;
1085    uint32_t rd;
1086    uint32_t K16;
1087    op0 = extract32(insn, 16, 1);
1088    rd = extract32(insn, 21, 5);
1089    K16 = extract32(insn, 0, 16);
1090
1091    check_r0_write(rd);
1092    switch (op0) {
1093    case 0x0:    /* l.movhi */
1094        LOG_DIS("l.movhi  r%d, %d\n", rd, K16);
1095        tcg_gen_movi_tl(cpu_R[rd], (K16 << 16));
1096        break;
1097
1098    case 0x1:    /* l.macrc */
1099        LOG_DIS("l.macrc  r%d\n", rd);
1100        tcg_gen_trunc_i64_tl(cpu_R[rd], cpu_mac);
1101        tcg_gen_movi_i64(cpu_mac, 0);
1102        break;
1103
1104    default:
1105        gen_illegal_exception(dc);
1106        break;
1107    }
1108}
1109
1110static void dec_comp(DisasContext *dc, uint32_t insn)
1111{
1112    uint32_t op0;
1113    uint32_t ra, rb;
1114
1115    op0 = extract32(insn, 21, 5);
1116    ra = extract32(insn, 16, 5);
1117    rb = extract32(insn, 11, 5);
1118
1119    /* unsigned integers  */
1120    tcg_gen_ext32u_tl(cpu_R[ra], cpu_R[ra]);
1121    tcg_gen_ext32u_tl(cpu_R[rb], cpu_R[rb]);
1122
1123    switch (op0) {
1124    case 0x0:    /* l.sfeq */
1125        LOG_DIS("l.sfeq  r%d, r%d\n", ra, rb);
1126        tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1127        break;
1128
1129    case 0x1:    /* l.sfne */
1130        LOG_DIS("l.sfne  r%d, r%d\n", ra, rb);
1131        tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1132        break;
1133
1134    case 0x2:    /* l.sfgtu */
1135        LOG_DIS("l.sfgtu  r%d, r%d\n", ra, rb);
1136        tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1137        break;
1138
1139    case 0x3:    /* l.sfgeu */
1140        LOG_DIS("l.sfgeu  r%d, r%d\n", ra, rb);
1141        tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1142        break;
1143
1144    case 0x4:    /* l.sfltu */
1145        LOG_DIS("l.sfltu  r%d, r%d\n", ra, rb);
1146        tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1147        break;
1148
1149    case 0x5:    /* l.sfleu */
1150        LOG_DIS("l.sfleu  r%d, r%d\n", ra, rb);
1151        tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1152        break;
1153
1154    case 0xa:    /* l.sfgts */
1155        LOG_DIS("l.sfgts  r%d, r%d\n", ra, rb);
1156        tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1157        break;
1158
1159    case 0xb:    /* l.sfges */
1160        LOG_DIS("l.sfges  r%d, r%d\n", ra, rb);
1161        tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1162        break;
1163
1164    case 0xc:    /* l.sflts */
1165        LOG_DIS("l.sflts  r%d, r%d\n", ra, rb);
1166        tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1167        break;
1168
1169    case 0xd:    /* l.sfles */
1170        LOG_DIS("l.sfles  r%d, r%d\n", ra, rb);
1171        tcg_gen_setcond_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], cpu_R[rb]);
1172        break;
1173
1174    default:
1175        gen_illegal_exception(dc);
1176        break;
1177    }
1178}
1179
1180static void dec_compi(DisasContext *dc, uint32_t insn)
1181{
1182    uint32_t op0, ra;
1183    int32_t I16;
1184
1185    op0 = extract32(insn, 21, 5);
1186    ra = extract32(insn, 16, 5);
1187    I16 = sextract32(insn, 0, 16);
1188
1189    switch (op0) {
1190    case 0x0:    /* l.sfeqi */
1191        LOG_DIS("l.sfeqi  r%d, %d\n", ra, I16);
1192        tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R[ra], I16);
1193        break;
1194
1195    case 0x1:    /* l.sfnei */
1196        LOG_DIS("l.sfnei  r%d, %d\n", ra, I16);
1197        tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R[ra], I16);
1198        break;
1199
1200    case 0x2:    /* l.sfgtui */
1201        LOG_DIS("l.sfgtui  r%d, %d\n", ra, I16);
1202        tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R[ra], I16);
1203        break;
1204
1205    case 0x3:    /* l.sfgeui */
1206        LOG_DIS("l.sfgeui  r%d, %d\n", ra, I16);
1207        tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R[ra], I16);
1208        break;
1209
1210    case 0x4:    /* l.sfltui */
1211        LOG_DIS("l.sfltui  r%d, %d\n", ra, I16);
1212        tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R[ra], I16);
1213        break;
1214
1215    case 0x5:    /* l.sfleui */
1216        LOG_DIS("l.sfleui  r%d, %d\n", ra, I16);
1217        tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R[ra], I16);
1218        break;
1219
1220    case 0xa:    /* l.sfgtsi */
1221        LOG_DIS("l.sfgtsi  r%d, %d\n", ra, I16);
1222        tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R[ra], I16);
1223        break;
1224
1225    case 0xb:    /* l.sfgesi */
1226        LOG_DIS("l.sfgesi  r%d, %d\n", ra, I16);
1227        tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R[ra], I16);
1228        break;
1229
1230    case 0xc:    /* l.sfltsi */
1231        LOG_DIS("l.sfltsi  r%d, %d\n", ra, I16);
1232        tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R[ra], I16);
1233        break;
1234
1235    case 0xd:    /* l.sflesi */
1236        LOG_DIS("l.sflesi  r%d, %d\n", ra, I16);
1237        tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R[ra], I16);
1238        break;
1239
1240    default:
1241        gen_illegal_exception(dc);
1242        break;
1243    }
1244}
1245
1246static void dec_sys(DisasContext *dc, uint32_t insn)
1247{
1248    uint32_t op0;
1249    uint32_t K16;
1250
1251    op0 = extract32(insn, 16, 10);
1252    K16 = extract32(insn, 0, 16);
1253
1254    switch (op0) {
1255    case 0x000:    /* l.sys */
1256        LOG_DIS("l.sys %d\n", K16);
1257        tcg_gen_movi_tl(cpu_pc, dc->pc);
1258        gen_exception(dc, EXCP_SYSCALL);
1259        dc->is_jmp = DISAS_UPDATE;
1260        break;
1261
1262    case 0x100:    /* l.trap */
1263        LOG_DIS("l.trap %d\n", K16);
1264        tcg_gen_movi_tl(cpu_pc, dc->pc);
1265        gen_exception(dc, EXCP_TRAP);
1266        break;
1267
1268    case 0x300:    /* l.csync */
1269        LOG_DIS("l.csync\n");
1270        break;
1271
1272    case 0x200:    /* l.msync */
1273        LOG_DIS("l.msync\n");
1274        tcg_gen_mb(TCG_MO_ALL);
1275        break;
1276
1277    case 0x270:    /* l.psync */
1278        LOG_DIS("l.psync\n");
1279        break;
1280
1281    default:
1282        gen_illegal_exception(dc);
1283        break;
1284    }
1285}
1286
1287static void dec_float(DisasContext *dc, uint32_t insn)
1288{
1289    uint32_t op0;
1290    uint32_t ra, rb, rd;
1291    op0 = extract32(insn, 0, 8);
1292    ra = extract32(insn, 16, 5);
1293    rb = extract32(insn, 11, 5);
1294    rd = extract32(insn, 21, 5);
1295
1296    switch (op0) {
1297    case 0x00:    /* lf.add.s */
1298        LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb);
1299        check_r0_write(rd);
1300        gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1301        break;
1302
1303    case 0x01:    /* lf.sub.s */
1304        LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb);
1305        check_r0_write(rd);
1306        gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1307        break;
1308
1309    case 0x02:    /* lf.mul.s */
1310        LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb);
1311        check_r0_write(rd);
1312        gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1313        break;
1314
1315    case 0x03:    /* lf.div.s */
1316        LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb);
1317        check_r0_write(rd);
1318        gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1319        break;
1320
1321    case 0x04:    /* lf.itof.s */
1322        LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1323        check_r0_write(rd);
1324        gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]);
1325        break;
1326
1327    case 0x05:    /* lf.ftoi.s */
1328        LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1329        check_r0_write(rd);
1330        gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]);
1331        break;
1332
1333    case 0x06:    /* lf.rem.s */
1334        LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb);
1335        check_r0_write(rd);
1336        gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1337        break;
1338
1339    case 0x07:    /* lf.madd.s */
1340        LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb);
1341        check_r0_write(rd);
1342        gen_helper_float_madd_s(cpu_R[rd], cpu_env, cpu_R[rd],
1343                                cpu_R[ra], cpu_R[rb]);
1344        break;
1345
1346    case 0x08:    /* lf.sfeq.s */
1347        LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb);
1348        gen_helper_float_eq_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1349        break;
1350
1351    case 0x09:    /* lf.sfne.s */
1352        LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb);
1353        gen_helper_float_ne_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1354        break;
1355
1356    case 0x0a:    /* lf.sfgt.s */
1357        LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb);
1358        gen_helper_float_gt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1359        break;
1360
1361    case 0x0b:    /* lf.sfge.s */
1362        LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb);
1363        gen_helper_float_ge_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1364        break;
1365
1366    case 0x0c:    /* lf.sflt.s */
1367        LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb);
1368        gen_helper_float_lt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1369        break;
1370
1371    case 0x0d:    /* lf.sfle.s */
1372        LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb);
1373        gen_helper_float_le_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1374        break;
1375
1376/* not used yet, open it when we need or64.  */
1377/*#ifdef TARGET_OPENRISC64
1378    case 0x10:     lf.add.d
1379        LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
1380        check_of64s(dc);
1381        check_r0_write(rd);
1382        gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1383        break;
1384
1385    case 0x11:     lf.sub.d
1386        LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
1387        check_of64s(dc);
1388        check_r0_write(rd);
1389        gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1390        break;
1391
1392    case 0x12:     lf.mul.d
1393        LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
1394        check_of64s(dc);
1395        check_r0_write(rd);
1396        gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1397        break;
1398
1399    case 0x13:     lf.div.d
1400        LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
1401        check_of64s(dc);
1402        check_r0_write(rd);
1403        gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1404        break;
1405
1406    case 0x14:     lf.itof.d
1407        LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
1408        check_of64s(dc);
1409        check_r0_write(rd);
1410        gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
1411        break;
1412
1413    case 0x15:     lf.ftoi.d
1414        LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
1415        check_of64s(dc);
1416        check_r0_write(rd);
1417        gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
1418        break;
1419
1420    case 0x16:     lf.rem.d
1421        LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
1422        check_of64s(dc);
1423        check_r0_write(rd);
1424        gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
1425        break;
1426
1427    case 0x17:     lf.madd.d
1428        LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
1429        check_of64s(dc);
1430        check_r0_write(rd);
1431        gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd],
1432                                cpu_R[ra], cpu_R[rb]);
1433        break;
1434
1435    case 0x18:     lf.sfeq.d
1436        LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
1437        check_of64s(dc);
1438        gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1439        break;
1440
1441    case 0x1a:     lf.sfgt.d
1442        LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
1443        check_of64s(dc);
1444        gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1445        break;
1446
1447    case 0x1b:     lf.sfge.d
1448        LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
1449        check_of64s(dc);
1450        gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1451        break;
1452
1453    case 0x19:     lf.sfne.d
1454        LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
1455        check_of64s(dc);
1456        gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1457        break;
1458
1459    case 0x1c:     lf.sflt.d
1460        LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
1461        check_of64s(dc);
1462        gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1463        break;
1464
1465    case 0x1d:     lf.sfle.d
1466        LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
1467        check_of64s(dc);
1468        gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]);
1469        break;
1470#endif*/
1471
1472    default:
1473        gen_illegal_exception(dc);
1474        break;
1475    }
1476}
1477
1478static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
1479{
1480    uint32_t op0;
1481    uint32_t insn;
1482    insn = cpu_ldl_code(&cpu->env, dc->pc);
1483    op0 = extract32(insn, 26, 6);
1484
1485    switch (op0) {
1486    case 0x06:
1487        dec_M(dc, insn);
1488        break;
1489
1490    case 0x08:
1491        dec_sys(dc, insn);
1492        break;
1493
1494    case 0x2e:
1495        dec_logic(dc, insn);
1496        break;
1497
1498    case 0x2f:
1499        dec_compi(dc, insn);
1500        break;
1501
1502    case 0x31:
1503        dec_mac(dc, insn);
1504        break;
1505
1506    case 0x32:
1507        dec_float(dc, insn);
1508        break;
1509
1510    case 0x38:
1511        dec_calc(dc, insn);
1512        break;
1513
1514    case 0x39:
1515        dec_comp(dc, insn);
1516        break;
1517
1518    default:
1519        dec_misc(dc, insn);
1520        break;
1521    }
1522}
1523
1524void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1525{
1526    CPUOpenRISCState *env = cs->env_ptr;
1527    OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
1528    struct DisasContext ctx, *dc = &ctx;
1529    uint32_t pc_start;
1530    uint32_t next_page_start;
1531    int num_insns;
1532    int max_insns;
1533
1534    pc_start = tb->pc;
1535    dc->tb = tb;
1536
1537    dc->is_jmp = DISAS_NEXT;
1538    dc->pc = pc_start;
1539    dc->mem_idx = cpu_mmu_index(&cpu->env, false);
1540    dc->tb_flags = tb->flags;
1541    dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
1542    dc->singlestep_enabled = cs->singlestep_enabled;
1543
1544    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1545    num_insns = 0;
1546    max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1547
1548    if (max_insns == 0) {
1549        max_insns = CF_COUNT_MASK;
1550    }
1551    if (max_insns > TCG_MAX_INSNS) {
1552        max_insns = TCG_MAX_INSNS;
1553    }
1554
1555    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1556        && qemu_log_in_addr_range(pc_start)) {
1557        qemu_log_lock();
1558        qemu_log("----------------\n");
1559        qemu_log("IN: %s\n", lookup_symbol(pc_start));
1560    }
1561
1562    gen_tb_start(tb);
1563
1564    /* Allow the TCG optimizer to see that R0 == 0,
1565       when it's true, which is the common case.  */
1566    if (dc->tb_flags & TB_FLAGS_R0_0) {
1567        cpu_R[0] = tcg_const_tl(0);
1568    } else {
1569        cpu_R[0] = cpu_R0;
1570    }
1571
1572    do {
1573        tcg_gen_insn_start(dc->pc, (dc->delayed_branch ? 1 : 0)
1574                           | (num_insns ? 2 : 0));
1575        num_insns++;
1576
1577        if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1578            tcg_gen_movi_tl(cpu_pc, dc->pc);
1579            gen_exception(dc, EXCP_DEBUG);
1580            dc->is_jmp = DISAS_UPDATE;
1581            /* The address covered by the breakpoint must be included in
1582               [tb->pc, tb->pc + tb->size) in order to for it to be
1583               properly cleared -- thus we increment the PC here so that
1584               the logic setting tb->size below does the right thing.  */
1585            dc->pc += 4;
1586            break;
1587        }
1588
1589        if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1590            gen_io_start();
1591        }
1592        disas_openrisc_insn(dc, cpu);
1593        dc->pc = dc->pc + 4;
1594
1595        /* delay slot */
1596        if (dc->delayed_branch) {
1597            dc->delayed_branch--;
1598            if (!dc->delayed_branch) {
1599                tcg_gen_mov_tl(cpu_pc, jmp_pc);
1600                tcg_gen_discard_tl(jmp_pc);
1601                dc->is_jmp = DISAS_UPDATE;
1602                break;
1603            }
1604        }
1605    } while (!dc->is_jmp
1606             && !tcg_op_buf_full()
1607             && !cs->singlestep_enabled
1608             && !singlestep
1609             && (dc->pc < next_page_start)
1610             && num_insns < max_insns);
1611
1612    if (tb_cflags(tb) & CF_LAST_IO) {
1613        gen_io_end();
1614    }
1615
1616    if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
1617        tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
1618    }
1619
1620    tcg_gen_movi_tl(cpu_ppc, dc->pc - 4);
1621    if (dc->is_jmp == DISAS_NEXT) {
1622        dc->is_jmp = DISAS_UPDATE;
1623        tcg_gen_movi_tl(cpu_pc, dc->pc);
1624    }
1625    if (unlikely(cs->singlestep_enabled)) {
1626        gen_exception(dc, EXCP_DEBUG);
1627    } else {
1628        switch (dc->is_jmp) {
1629        case DISAS_NEXT:
1630            gen_goto_tb(dc, 0, dc->pc);
1631            break;
1632        default:
1633        case DISAS_JUMP:
1634            break;
1635        case DISAS_UPDATE:
1636            /* indicate that the hash table must be used
1637               to find the next TB */
1638            tcg_gen_exit_tb(0);
1639            break;
1640        case DISAS_TB_JUMP:
1641            /* nothing more to generate */
1642            break;
1643        }
1644    }
1645
1646    gen_tb_end(tb, num_insns);
1647
1648    tb->size = dc->pc - pc_start;
1649    tb->icount = num_insns;
1650
1651    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1652        && qemu_log_in_addr_range(pc_start)) {
1653        log_target_disas(cs, pc_start, tb->size);
1654        qemu_log("\n");
1655        qemu_log_unlock();
1656    }
1657}
1658
1659void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
1660                             fprintf_function cpu_fprintf,
1661                             int flags)
1662{
1663    OpenRISCCPU *cpu = OPENRISC_CPU(cs);
1664    CPUOpenRISCState *env = &cpu->env;
1665    int i;
1666
1667    cpu_fprintf(f, "PC=%08x\n", env->pc);
1668    for (i = 0; i < 32; ++i) {
1669        cpu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
1670                    (i % 4) == 3 ? '\n' : ' ');
1671    }
1672}
1673
1674void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
1675                          target_ulong *data)
1676{
1677    env->pc = data[0];
1678    env->dflag = data[1] & 1;
1679    if (data[1] & 2) {
1680        env->ppc = env->pc - 4;
1681    }
1682}
1683