qemu/target/riscv/translate.c
<<
>>
Prefs
   1/*
   2 * RISC-V emulation for qemu: main translation routines.
   3 *
   4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2 or later, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "qemu/log.h"
  21#include "cpu.h"
  22#include "tcg-op.h"
  23#include "disas/disas.h"
  24#include "exec/cpu_ldst.h"
  25#include "exec/exec-all.h"
  26#include "exec/helper-proto.h"
  27#include "exec/helper-gen.h"
  28
  29#include "exec/translator.h"
  30#include "exec/log.h"
  31
  32#include "instmap.h"
  33
  34/* global register indices */
  35static TCGv cpu_gpr[32], cpu_pc;
  36static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
  37static TCGv load_res;
  38static TCGv load_val;
  39
  40#include "exec/gen-icount.h"
  41
  42typedef struct DisasContext {
  43    DisasContextBase base;
  44    /* pc_succ_insn points to the instruction following base.pc_next */
  45    target_ulong pc_succ_insn;
  46    uint32_t opcode;
  47    uint32_t flags;
  48    uint32_t mem_idx;
  49    /* Remember the rounding mode encoded in the previous fp instruction,
  50       which we have already installed into env->fp_status.  Or -1 for
  51       no previous fp instruction.  Note that we exit the TB when writing
  52       to any system register, which includes CSR_FRM, so we do not have
  53       to reset this known value.  */
  54    int frm;
  55} DisasContext;
  56
  57/* convert riscv funct3 to qemu memop for load/store */
  58static const int tcg_memop_lookup[8] = {
  59    [0 ... 7] = -1,
  60    [0] = MO_SB,
  61    [1] = MO_TESW,
  62    [2] = MO_TESL,
  63    [4] = MO_UB,
  64    [5] = MO_TEUW,
  65#ifdef TARGET_RISCV64
  66    [3] = MO_TEQ,
  67    [6] = MO_TEUL,
  68#endif
  69};
  70
  71#ifdef TARGET_RISCV64
  72#define CASE_OP_32_64(X) case X: case glue(X, W)
  73#else
  74#define CASE_OP_32_64(X) case X
  75#endif
  76
  77static void generate_exception(DisasContext *ctx, int excp)
  78{
  79    tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
  80    TCGv_i32 helper_tmp = tcg_const_i32(excp);
  81    gen_helper_raise_exception(cpu_env, helper_tmp);
  82    tcg_temp_free_i32(helper_tmp);
  83    ctx->base.is_jmp = DISAS_NORETURN;
  84}
  85
  86static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
  87{
  88    tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
  89    tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
  90    TCGv_i32 helper_tmp = tcg_const_i32(excp);
  91    gen_helper_raise_exception(cpu_env, helper_tmp);
  92    tcg_temp_free_i32(helper_tmp);
  93    ctx->base.is_jmp = DISAS_NORETURN;
  94}
  95
  96static void gen_exception_debug(void)
  97{
  98    TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
  99    gen_helper_raise_exception(cpu_env, helper_tmp);
 100    tcg_temp_free_i32(helper_tmp);
 101}
 102
 103static void gen_exception_illegal(DisasContext *ctx)
 104{
 105    generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
 106}
 107
 108static void gen_exception_inst_addr_mis(DisasContext *ctx)
 109{
 110    generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
 111}
 112
 113static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
 114{
 115    if (unlikely(ctx->base.singlestep_enabled)) {
 116        return false;
 117    }
 118
 119#ifndef CONFIG_USER_ONLY
 120    return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 121#else
 122    return true;
 123#endif
 124}
 125
 126static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
 127{
 128    if (use_goto_tb(ctx, dest)) {
 129        /* chaining is only allowed when the jump is to the same page */
 130        tcg_gen_goto_tb(n);
 131        tcg_gen_movi_tl(cpu_pc, dest);
 132        tcg_gen_exit_tb(ctx->base.tb, n);
 133    } else {
 134        tcg_gen_movi_tl(cpu_pc, dest);
 135        if (ctx->base.singlestep_enabled) {
 136            gen_exception_debug();
 137        } else {
 138            tcg_gen_lookup_and_goto_ptr();
 139        }
 140    }
 141}
 142
 143/* Wrapper for getting reg values - need to check of reg is zero since
 144 * cpu_gpr[0] is not actually allocated
 145 */
 146static inline void gen_get_gpr(TCGv t, int reg_num)
 147{
 148    if (reg_num == 0) {
 149        tcg_gen_movi_tl(t, 0);
 150    } else {
 151        tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
 152    }
 153}
 154
 155/* Wrapper for setting reg values - need to check of reg is zero since
 156 * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
 157 * since we usually avoid calling the OP_TYPE_gen function if we see a write to
 158 * $zero
 159 */
 160static inline void gen_set_gpr(int reg_num_dst, TCGv t)
 161{
 162    if (reg_num_dst != 0) {
 163        tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
 164    }
 165}
 166
 167static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
 168{
 169    TCGv rl = tcg_temp_new();
 170    TCGv rh = tcg_temp_new();
 171
 172    tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
 173    /* fix up for one negative */
 174    tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
 175    tcg_gen_and_tl(rl, rl, arg2);
 176    tcg_gen_sub_tl(ret, rh, rl);
 177
 178    tcg_temp_free(rl);
 179    tcg_temp_free(rh);
 180}
 181
 182static void gen_fsgnj(DisasContext *ctx, uint32_t rd, uint32_t rs1,
 183    uint32_t rs2, int rm, uint64_t min)
 184{
 185    switch (rm) {
 186    case 0: /* fsgnj */
 187        if (rs1 == rs2) { /* FMOV */
 188            tcg_gen_mov_i64(cpu_fpr[rd], cpu_fpr[rs1]);
 189        } else {
 190            tcg_gen_deposit_i64(cpu_fpr[rd], cpu_fpr[rs2], cpu_fpr[rs1],
 191                                0, min == INT32_MIN ? 31 : 63);
 192        }
 193        break;
 194    case 1: /* fsgnjn */
 195        if (rs1 == rs2) { /* FNEG */
 196            tcg_gen_xori_i64(cpu_fpr[rd], cpu_fpr[rs1], min);
 197        } else {
 198            TCGv_i64 t0 = tcg_temp_new_i64();
 199            tcg_gen_not_i64(t0, cpu_fpr[rs2]);
 200            tcg_gen_deposit_i64(cpu_fpr[rd], t0, cpu_fpr[rs1],
 201                                0, min == INT32_MIN ? 31 : 63);
 202            tcg_temp_free_i64(t0);
 203        }
 204        break;
 205    case 2: /* fsgnjx */
 206        if (rs1 == rs2) { /* FABS */
 207            tcg_gen_andi_i64(cpu_fpr[rd], cpu_fpr[rs1], ~min);
 208        } else {
 209            TCGv_i64 t0 = tcg_temp_new_i64();
 210            tcg_gen_andi_i64(t0, cpu_fpr[rs2], min);
 211            tcg_gen_xor_i64(cpu_fpr[rd], cpu_fpr[rs1], t0);
 212            tcg_temp_free_i64(t0);
 213        }
 214        break;
 215    default:
 216        gen_exception_illegal(ctx);
 217    }
 218}
 219
 220static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs1,
 221        int rs2)
 222{
 223    TCGv source1, source2, cond1, cond2, zeroreg, resultopt1;
 224    source1 = tcg_temp_new();
 225    source2 = tcg_temp_new();
 226    gen_get_gpr(source1, rs1);
 227    gen_get_gpr(source2, rs2);
 228
 229    switch (opc) {
 230    CASE_OP_32_64(OPC_RISC_ADD):
 231        tcg_gen_add_tl(source1, source1, source2);
 232        break;
 233    CASE_OP_32_64(OPC_RISC_SUB):
 234        tcg_gen_sub_tl(source1, source1, source2);
 235        break;
 236#if defined(TARGET_RISCV64)
 237    case OPC_RISC_SLLW:
 238        tcg_gen_andi_tl(source2, source2, 0x1F);
 239        tcg_gen_shl_tl(source1, source1, source2);
 240        break;
 241#endif
 242    case OPC_RISC_SLL:
 243        tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
 244        tcg_gen_shl_tl(source1, source1, source2);
 245        break;
 246    case OPC_RISC_SLT:
 247        tcg_gen_setcond_tl(TCG_COND_LT, source1, source1, source2);
 248        break;
 249    case OPC_RISC_SLTU:
 250        tcg_gen_setcond_tl(TCG_COND_LTU, source1, source1, source2);
 251        break;
 252    case OPC_RISC_XOR:
 253        tcg_gen_xor_tl(source1, source1, source2);
 254        break;
 255#if defined(TARGET_RISCV64)
 256    case OPC_RISC_SRLW:
 257        /* clear upper 32 */
 258        tcg_gen_ext32u_tl(source1, source1);
 259        tcg_gen_andi_tl(source2, source2, 0x1F);
 260        tcg_gen_shr_tl(source1, source1, source2);
 261        break;
 262#endif
 263    case OPC_RISC_SRL:
 264        tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
 265        tcg_gen_shr_tl(source1, source1, source2);
 266        break;
 267#if defined(TARGET_RISCV64)
 268    case OPC_RISC_SRAW:
 269        /* first, trick to get it to act like working on 32 bits (get rid of
 270        upper 32, sign extend to fill space) */
 271        tcg_gen_ext32s_tl(source1, source1);
 272        tcg_gen_andi_tl(source2, source2, 0x1F);
 273        tcg_gen_sar_tl(source1, source1, source2);
 274        break;
 275#endif
 276    case OPC_RISC_SRA:
 277        tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
 278        tcg_gen_sar_tl(source1, source1, source2);
 279        break;
 280    case OPC_RISC_OR:
 281        tcg_gen_or_tl(source1, source1, source2);
 282        break;
 283    case OPC_RISC_AND:
 284        tcg_gen_and_tl(source1, source1, source2);
 285        break;
 286    CASE_OP_32_64(OPC_RISC_MUL):
 287        tcg_gen_mul_tl(source1, source1, source2);
 288        break;
 289    case OPC_RISC_MULH:
 290        tcg_gen_muls2_tl(source2, source1, source1, source2);
 291        break;
 292    case OPC_RISC_MULHSU:
 293        gen_mulhsu(source1, source1, source2);
 294        break;
 295    case OPC_RISC_MULHU:
 296        tcg_gen_mulu2_tl(source2, source1, source1, source2);
 297        break;
 298#if defined(TARGET_RISCV64)
 299    case OPC_RISC_DIVW:
 300        tcg_gen_ext32s_tl(source1, source1);
 301        tcg_gen_ext32s_tl(source2, source2);
 302        /* fall through to DIV */
 303#endif
 304    case OPC_RISC_DIV:
 305        /* Handle by altering args to tcg_gen_div to produce req'd results:
 306         * For overflow: want source1 in source1 and 1 in source2
 307         * For div by zero: want -1 in source1 and 1 in source2 -> -1 result */
 308        cond1 = tcg_temp_new();
 309        cond2 = tcg_temp_new();
 310        zeroreg = tcg_const_tl(0);
 311        resultopt1 = tcg_temp_new();
 312
 313        tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
 314        tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
 315        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
 316                            ((target_ulong)1) << (TARGET_LONG_BITS - 1));
 317        tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
 318        tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
 319        /* if div by zero, set source1 to -1, otherwise don't change */
 320        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
 321                resultopt1);
 322        /* if overflow or div by zero, set source2 to 1, else don't change */
 323        tcg_gen_or_tl(cond1, cond1, cond2);
 324        tcg_gen_movi_tl(resultopt1, (target_ulong)1);
 325        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
 326                resultopt1);
 327        tcg_gen_div_tl(source1, source1, source2);
 328
 329        tcg_temp_free(cond1);
 330        tcg_temp_free(cond2);
 331        tcg_temp_free(zeroreg);
 332        tcg_temp_free(resultopt1);
 333        break;
 334#if defined(TARGET_RISCV64)
 335    case OPC_RISC_DIVUW:
 336        tcg_gen_ext32u_tl(source1, source1);
 337        tcg_gen_ext32u_tl(source2, source2);
 338        /* fall through to DIVU */
 339#endif
 340    case OPC_RISC_DIVU:
 341        cond1 = tcg_temp_new();
 342        zeroreg = tcg_const_tl(0);
 343        resultopt1 = tcg_temp_new();
 344
 345        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
 346        tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
 347        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
 348                resultopt1);
 349        tcg_gen_movi_tl(resultopt1, (target_ulong)1);
 350        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
 351                resultopt1);
 352        tcg_gen_divu_tl(source1, source1, source2);
 353
 354        tcg_temp_free(cond1);
 355        tcg_temp_free(zeroreg);
 356        tcg_temp_free(resultopt1);
 357        break;
 358#if defined(TARGET_RISCV64)
 359    case OPC_RISC_REMW:
 360        tcg_gen_ext32s_tl(source1, source1);
 361        tcg_gen_ext32s_tl(source2, source2);
 362        /* fall through to REM */
 363#endif
 364    case OPC_RISC_REM:
 365        cond1 = tcg_temp_new();
 366        cond2 = tcg_temp_new();
 367        zeroreg = tcg_const_tl(0);
 368        resultopt1 = tcg_temp_new();
 369
 370        tcg_gen_movi_tl(resultopt1, 1L);
 371        tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
 372        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
 373                            (target_ulong)1 << (TARGET_LONG_BITS - 1));
 374        tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
 375        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
 376        /* if overflow or div by zero, set source2 to 1, else don't change */
 377        tcg_gen_or_tl(cond2, cond1, cond2);
 378        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
 379                resultopt1);
 380        tcg_gen_rem_tl(resultopt1, source1, source2);
 381        /* if div by zero, just return the original dividend */
 382        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
 383                source1);
 384
 385        tcg_temp_free(cond1);
 386        tcg_temp_free(cond2);
 387        tcg_temp_free(zeroreg);
 388        tcg_temp_free(resultopt1);
 389        break;
 390#if defined(TARGET_RISCV64)
 391    case OPC_RISC_REMUW:
 392        tcg_gen_ext32u_tl(source1, source1);
 393        tcg_gen_ext32u_tl(source2, source2);
 394        /* fall through to REMU */
 395#endif
 396    case OPC_RISC_REMU:
 397        cond1 = tcg_temp_new();
 398        zeroreg = tcg_const_tl(0);
 399        resultopt1 = tcg_temp_new();
 400
 401        tcg_gen_movi_tl(resultopt1, (target_ulong)1);
 402        tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
 403        tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
 404                resultopt1);
 405        tcg_gen_remu_tl(resultopt1, source1, source2);
 406        /* if div by zero, just return the original dividend */
 407        tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, resultopt1,
 408                source1);
 409
 410        tcg_temp_free(cond1);
 411        tcg_temp_free(zeroreg);
 412        tcg_temp_free(resultopt1);
 413        break;
 414    default:
 415        gen_exception_illegal(ctx);
 416        return;
 417    }
 418
 419    if (opc & 0x8) { /* sign extend for W instructions */
 420        tcg_gen_ext32s_tl(source1, source1);
 421    }
 422
 423    gen_set_gpr(rd, source1);
 424    tcg_temp_free(source1);
 425    tcg_temp_free(source2);
 426}
 427
 428static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rd,
 429        int rs1, target_long imm)
 430{
 431    TCGv source1 = tcg_temp_new();
 432    int shift_len = TARGET_LONG_BITS;
 433    int shift_a;
 434
 435    gen_get_gpr(source1, rs1);
 436
 437    switch (opc) {
 438    case OPC_RISC_ADDI:
 439#if defined(TARGET_RISCV64)
 440    case OPC_RISC_ADDIW:
 441#endif
 442        tcg_gen_addi_tl(source1, source1, imm);
 443        break;
 444    case OPC_RISC_SLTI:
 445        tcg_gen_setcondi_tl(TCG_COND_LT, source1, source1, imm);
 446        break;
 447    case OPC_RISC_SLTIU:
 448        tcg_gen_setcondi_tl(TCG_COND_LTU, source1, source1, imm);
 449        break;
 450    case OPC_RISC_XORI:
 451        tcg_gen_xori_tl(source1, source1, imm);
 452        break;
 453    case OPC_RISC_ORI:
 454        tcg_gen_ori_tl(source1, source1, imm);
 455        break;
 456    case OPC_RISC_ANDI:
 457        tcg_gen_andi_tl(source1, source1, imm);
 458        break;
 459#if defined(TARGET_RISCV64)
 460    case OPC_RISC_SLLIW:
 461        shift_len = 32;
 462        /* FALLTHRU */
 463#endif
 464    case OPC_RISC_SLLI:
 465        if (imm >= shift_len) {
 466            goto do_illegal;
 467        }
 468        tcg_gen_shli_tl(source1, source1, imm);
 469        break;
 470#if defined(TARGET_RISCV64)
 471    case OPC_RISC_SHIFT_RIGHT_IW:
 472        shift_len = 32;
 473        /* FALLTHRU */
 474#endif
 475    case OPC_RISC_SHIFT_RIGHT_I:
 476        /* differentiate on IMM */
 477        shift_a = imm & 0x400;
 478        imm &= 0x3ff;
 479        if (imm >= shift_len) {
 480            goto do_illegal;
 481        }
 482        if (imm != 0) {
 483            if (shift_a) {
 484                /* SRAI[W] */
 485                tcg_gen_sextract_tl(source1, source1, imm, shift_len - imm);
 486            } else {
 487                /* SRLI[W] */
 488                tcg_gen_extract_tl(source1, source1, imm, shift_len - imm);
 489            }
 490            /* No further sign-extension needed for W instructions.  */
 491            opc &= ~0x8;
 492        }
 493        break;
 494    default:
 495    do_illegal:
 496        gen_exception_illegal(ctx);
 497        return;
 498    }
 499
 500    if (opc & 0x8) { /* sign-extend for W instructions */
 501        tcg_gen_ext32s_tl(source1, source1);
 502    }
 503
 504    gen_set_gpr(rd, source1);
 505    tcg_temp_free(source1);
 506}
 507
 508static void gen_jal(CPURISCVState *env, DisasContext *ctx, int rd,
 509                    target_ulong imm)
 510{
 511    target_ulong next_pc;
 512
 513    /* check misaligned: */
 514    next_pc = ctx->base.pc_next + imm;
 515    if (!riscv_has_ext(env, RVC)) {
 516        if ((next_pc & 0x3) != 0) {
 517            gen_exception_inst_addr_mis(ctx);
 518            return;
 519        }
 520    }
 521    if (rd != 0) {
 522        tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
 523    }
 524
 525    gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
 526    ctx->base.is_jmp = DISAS_NORETURN;
 527}
 528
 529static void gen_jalr(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
 530                     int rd, int rs1, target_long imm)
 531{
 532    /* no chaining with JALR */
 533    TCGLabel *misaligned = NULL;
 534    TCGv t0 = tcg_temp_new();
 535
 536    switch (opc) {
 537    case OPC_RISC_JALR:
 538        gen_get_gpr(cpu_pc, rs1);
 539        tcg_gen_addi_tl(cpu_pc, cpu_pc, imm);
 540        tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
 541
 542        if (!riscv_has_ext(env, RVC)) {
 543            misaligned = gen_new_label();
 544            tcg_gen_andi_tl(t0, cpu_pc, 0x2);
 545            tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
 546        }
 547
 548        if (rd != 0) {
 549            tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
 550        }
 551        tcg_gen_lookup_and_goto_ptr();
 552
 553        if (misaligned) {
 554            gen_set_label(misaligned);
 555            gen_exception_inst_addr_mis(ctx);
 556        }
 557        ctx->base.is_jmp = DISAS_NORETURN;
 558        break;
 559
 560    default:
 561        gen_exception_illegal(ctx);
 562        break;
 563    }
 564    tcg_temp_free(t0);
 565}
 566
 567static void gen_branch(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
 568                       int rs1, int rs2, target_long bimm)
 569{
 570    TCGLabel *l = gen_new_label();
 571    TCGv source1, source2;
 572    source1 = tcg_temp_new();
 573    source2 = tcg_temp_new();
 574    gen_get_gpr(source1, rs1);
 575    gen_get_gpr(source2, rs2);
 576
 577    switch (opc) {
 578    case OPC_RISC_BEQ:
 579        tcg_gen_brcond_tl(TCG_COND_EQ, source1, source2, l);
 580        break;
 581    case OPC_RISC_BNE:
 582        tcg_gen_brcond_tl(TCG_COND_NE, source1, source2, l);
 583        break;
 584    case OPC_RISC_BLT:
 585        tcg_gen_brcond_tl(TCG_COND_LT, source1, source2, l);
 586        break;
 587    case OPC_RISC_BGE:
 588        tcg_gen_brcond_tl(TCG_COND_GE, source1, source2, l);
 589        break;
 590    case OPC_RISC_BLTU:
 591        tcg_gen_brcond_tl(TCG_COND_LTU, source1, source2, l);
 592        break;
 593    case OPC_RISC_BGEU:
 594        tcg_gen_brcond_tl(TCG_COND_GEU, source1, source2, l);
 595        break;
 596    default:
 597        gen_exception_illegal(ctx);
 598        return;
 599    }
 600    tcg_temp_free(source1);
 601    tcg_temp_free(source2);
 602
 603    gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
 604    gen_set_label(l); /* branch taken */
 605    if (!riscv_has_ext(env, RVC) && ((ctx->base.pc_next + bimm) & 0x3)) {
 606        /* misaligned */
 607        gen_exception_inst_addr_mis(ctx);
 608    } else {
 609        gen_goto_tb(ctx, 0, ctx->base.pc_next + bimm);
 610    }
 611    ctx->base.is_jmp = DISAS_NORETURN;
 612}
 613
 614static void gen_load(DisasContext *ctx, uint32_t opc, int rd, int rs1,
 615        target_long imm)
 616{
 617    TCGv t0 = tcg_temp_new();
 618    TCGv t1 = tcg_temp_new();
 619    gen_get_gpr(t0, rs1);
 620    tcg_gen_addi_tl(t0, t0, imm);
 621    int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
 622
 623    if (memop < 0) {
 624        gen_exception_illegal(ctx);
 625        return;
 626    }
 627
 628    tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
 629    gen_set_gpr(rd, t1);
 630    tcg_temp_free(t0);
 631    tcg_temp_free(t1);
 632}
 633
 634static void gen_store(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
 635        target_long imm)
 636{
 637    TCGv t0 = tcg_temp_new();
 638    TCGv dat = tcg_temp_new();
 639    gen_get_gpr(t0, rs1);
 640    tcg_gen_addi_tl(t0, t0, imm);
 641    gen_get_gpr(dat, rs2);
 642    int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
 643
 644    if (memop < 0) {
 645        gen_exception_illegal(ctx);
 646        return;
 647    }
 648
 649    tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
 650    tcg_temp_free(t0);
 651    tcg_temp_free(dat);
 652}
 653
 654static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
 655        int rs1, target_long imm)
 656{
 657    TCGv t0;
 658
 659    if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
 660        gen_exception_illegal(ctx);
 661        return;
 662    }
 663
 664    t0 = tcg_temp_new();
 665    gen_get_gpr(t0, rs1);
 666    tcg_gen_addi_tl(t0, t0, imm);
 667
 668    switch (opc) {
 669    case OPC_RISC_FLW:
 670        tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
 671        /* RISC-V requires NaN-boxing of narrower width floating point values */
 672        tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
 673        break;
 674    case OPC_RISC_FLD:
 675        tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
 676        break;
 677    default:
 678        gen_exception_illegal(ctx);
 679        break;
 680    }
 681    tcg_temp_free(t0);
 682}
 683
 684static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
 685        int rs2, target_long imm)
 686{
 687    TCGv t0;
 688
 689    if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
 690        gen_exception_illegal(ctx);
 691        return;
 692    }
 693
 694    t0 = tcg_temp_new();
 695    gen_get_gpr(t0, rs1);
 696    tcg_gen_addi_tl(t0, t0, imm);
 697
 698    switch (opc) {
 699    case OPC_RISC_FSW:
 700        tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
 701        break;
 702    case OPC_RISC_FSD:
 703        tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
 704        break;
 705    default:
 706        gen_exception_illegal(ctx);
 707        break;
 708    }
 709
 710    tcg_temp_free(t0);
 711}
 712
 713static void gen_atomic(DisasContext *ctx, uint32_t opc,
 714                      int rd, int rs1, int rs2)
 715{
 716    TCGv src1, src2, dat;
 717    TCGLabel *l1, *l2;
 718    TCGMemOp mop;
 719    bool aq, rl;
 720
 721    /* Extract the size of the atomic operation.  */
 722    switch (extract32(opc, 12, 3)) {
 723    case 2: /* 32-bit */
 724        mop = MO_ALIGN | MO_TESL;
 725        break;
 726#if defined(TARGET_RISCV64)
 727    case 3: /* 64-bit */
 728        mop = MO_ALIGN | MO_TEQ;
 729        break;
 730#endif
 731    default:
 732        gen_exception_illegal(ctx);
 733        return;
 734    }
 735    rl = extract32(opc, 25, 1);
 736    aq = extract32(opc, 26, 1);
 737
 738    src1 = tcg_temp_new();
 739    src2 = tcg_temp_new();
 740
 741    switch (MASK_OP_ATOMIC_NO_AQ_RL_SZ(opc)) {
 742    case OPC_RISC_LR:
 743        /* Put addr in load_res, data in load_val.  */
 744        gen_get_gpr(src1, rs1);
 745        if (rl) {
 746            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
 747        }
 748        tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
 749        if (aq) {
 750            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
 751        }
 752        tcg_gen_mov_tl(load_res, src1);
 753        gen_set_gpr(rd, load_val);
 754        break;
 755
 756    case OPC_RISC_SC:
 757        l1 = gen_new_label();
 758        l2 = gen_new_label();
 759        dat = tcg_temp_new();
 760
 761        gen_get_gpr(src1, rs1);
 762        tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
 763
 764        gen_get_gpr(src2, rs2);
 765        /* Note that the TCG atomic primitives are SC,
 766           so we can ignore AQ/RL along this path.  */
 767        tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
 768                                  ctx->mem_idx, mop);
 769        tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
 770        gen_set_gpr(rd, dat);
 771        tcg_gen_br(l2);
 772
 773        gen_set_label(l1);
 774        /* Address comparion failure.  However, we still need to
 775           provide the memory barrier implied by AQ/RL.  */
 776        tcg_gen_mb(TCG_MO_ALL + aq * TCG_BAR_LDAQ + rl * TCG_BAR_STRL);
 777        tcg_gen_movi_tl(dat, 1);
 778        gen_set_gpr(rd, dat);
 779
 780        gen_set_label(l2);
 781        tcg_temp_free(dat);
 782        break;
 783
 784    case OPC_RISC_AMOSWAP:
 785        /* Note that the TCG atomic primitives are SC,
 786           so we can ignore AQ/RL along this path.  */
 787        gen_get_gpr(src1, rs1);
 788        gen_get_gpr(src2, rs2);
 789        tcg_gen_atomic_xchg_tl(src2, src1, src2, ctx->mem_idx, mop);
 790        gen_set_gpr(rd, src2);
 791        break;
 792    case OPC_RISC_AMOADD:
 793        gen_get_gpr(src1, rs1);
 794        gen_get_gpr(src2, rs2);
 795        tcg_gen_atomic_fetch_add_tl(src2, src1, src2, ctx->mem_idx, mop);
 796        gen_set_gpr(rd, src2);
 797        break;
 798    case OPC_RISC_AMOXOR:
 799        gen_get_gpr(src1, rs1);
 800        gen_get_gpr(src2, rs2);
 801        tcg_gen_atomic_fetch_xor_tl(src2, src1, src2, ctx->mem_idx, mop);
 802        gen_set_gpr(rd, src2);
 803        break;
 804    case OPC_RISC_AMOAND:
 805        gen_get_gpr(src1, rs1);
 806        gen_get_gpr(src2, rs2);
 807        tcg_gen_atomic_fetch_and_tl(src2, src1, src2, ctx->mem_idx, mop);
 808        gen_set_gpr(rd, src2);
 809        break;
 810    case OPC_RISC_AMOOR:
 811        gen_get_gpr(src1, rs1);
 812        gen_get_gpr(src2, rs2);
 813        tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
 814        gen_set_gpr(rd, src2);
 815        break;
 816    case OPC_RISC_AMOMIN:
 817        gen_get_gpr(src1, rs1);
 818        gen_get_gpr(src2, rs2);
 819        tcg_gen_atomic_fetch_smin_tl(src2, src1, src2, ctx->mem_idx, mop);
 820        gen_set_gpr(rd, src2);
 821        break;
 822    case OPC_RISC_AMOMAX:
 823        gen_get_gpr(src1, rs1);
 824        gen_get_gpr(src2, rs2);
 825        tcg_gen_atomic_fetch_smax_tl(src2, src1, src2, ctx->mem_idx, mop);
 826        gen_set_gpr(rd, src2);
 827        break;
 828    case OPC_RISC_AMOMINU:
 829        gen_get_gpr(src1, rs1);
 830        gen_get_gpr(src2, rs2);
 831        tcg_gen_atomic_fetch_umin_tl(src2, src1, src2, ctx->mem_idx, mop);
 832        gen_set_gpr(rd, src2);
 833        break;
 834    case OPC_RISC_AMOMAXU:
 835        gen_get_gpr(src1, rs1);
 836        gen_get_gpr(src2, rs2);
 837        tcg_gen_atomic_fetch_umax_tl(src2, src1, src2, ctx->mem_idx, mop);
 838        gen_set_gpr(rd, src2);
 839        break;
 840
 841    default:
 842        gen_exception_illegal(ctx);
 843        break;
 844    }
 845
 846    tcg_temp_free(src1);
 847    tcg_temp_free(src2);
 848}
 849
 850static void gen_set_rm(DisasContext *ctx, int rm)
 851{
 852    TCGv_i32 t0;
 853
 854    if (ctx->frm == rm) {
 855        return;
 856    }
 857    ctx->frm = rm;
 858    t0 = tcg_const_i32(rm);
 859    gen_helper_set_rounding_mode(cpu_env, t0);
 860    tcg_temp_free_i32(t0);
 861}
 862
 863static void gen_fp_fmadd(DisasContext *ctx, uint32_t opc, int rd,
 864                         int rs1, int rs2, int rs3, int rm)
 865{
 866    switch (opc) {
 867    case OPC_RISC_FMADD_S:
 868        gen_set_rm(ctx, rm);
 869        gen_helper_fmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 870                           cpu_fpr[rs2], cpu_fpr[rs3]);
 871        break;
 872    case OPC_RISC_FMADD_D:
 873        gen_set_rm(ctx, rm);
 874        gen_helper_fmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 875                           cpu_fpr[rs2], cpu_fpr[rs3]);
 876        break;
 877    default:
 878        gen_exception_illegal(ctx);
 879        break;
 880    }
 881}
 882
 883static void gen_fp_fmsub(DisasContext *ctx, uint32_t opc, int rd,
 884                         int rs1, int rs2, int rs3, int rm)
 885{
 886    switch (opc) {
 887    case OPC_RISC_FMSUB_S:
 888        gen_set_rm(ctx, rm);
 889        gen_helper_fmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 890                           cpu_fpr[rs2], cpu_fpr[rs3]);
 891        break;
 892    case OPC_RISC_FMSUB_D:
 893        gen_set_rm(ctx, rm);
 894        gen_helper_fmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 895                           cpu_fpr[rs2], cpu_fpr[rs3]);
 896        break;
 897    default:
 898        gen_exception_illegal(ctx);
 899        break;
 900    }
 901}
 902
 903static void gen_fp_fnmsub(DisasContext *ctx, uint32_t opc, int rd,
 904                          int rs1, int rs2, int rs3, int rm)
 905{
 906    switch (opc) {
 907    case OPC_RISC_FNMSUB_S:
 908        gen_set_rm(ctx, rm);
 909        gen_helper_fnmsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 910                            cpu_fpr[rs2], cpu_fpr[rs3]);
 911        break;
 912    case OPC_RISC_FNMSUB_D:
 913        gen_set_rm(ctx, rm);
 914        gen_helper_fnmsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 915                            cpu_fpr[rs2], cpu_fpr[rs3]);
 916        break;
 917    default:
 918        gen_exception_illegal(ctx);
 919        break;
 920    }
 921}
 922
 923static void gen_fp_fnmadd(DisasContext *ctx, uint32_t opc, int rd,
 924                          int rs1, int rs2, int rs3, int rm)
 925{
 926    switch (opc) {
 927    case OPC_RISC_FNMADD_S:
 928        gen_set_rm(ctx, rm);
 929        gen_helper_fnmadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 930                            cpu_fpr[rs2], cpu_fpr[rs3]);
 931        break;
 932    case OPC_RISC_FNMADD_D:
 933        gen_set_rm(ctx, rm);
 934        gen_helper_fnmadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1],
 935                            cpu_fpr[rs2], cpu_fpr[rs3]);
 936        break;
 937    default:
 938        gen_exception_illegal(ctx);
 939        break;
 940    }
 941}
 942
 943static void gen_fp_arith(DisasContext *ctx, uint32_t opc, int rd,
 944                         int rs1, int rs2, int rm)
 945{
 946    TCGv t0 = NULL;
 947
 948    if (!(ctx->flags & TB_FLAGS_FP_ENABLE)) {
 949        goto do_illegal;
 950    }
 951
 952    switch (opc) {
 953    case OPC_RISC_FADD_S:
 954        gen_set_rm(ctx, rm);
 955        gen_helper_fadd_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
 956        break;
 957    case OPC_RISC_FSUB_S:
 958        gen_set_rm(ctx, rm);
 959        gen_helper_fsub_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
 960        break;
 961    case OPC_RISC_FMUL_S:
 962        gen_set_rm(ctx, rm);
 963        gen_helper_fmul_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
 964        break;
 965    case OPC_RISC_FDIV_S:
 966        gen_set_rm(ctx, rm);
 967        gen_helper_fdiv_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
 968        break;
 969    case OPC_RISC_FSQRT_S:
 970        gen_set_rm(ctx, rm);
 971        gen_helper_fsqrt_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
 972        break;
 973    case OPC_RISC_FSGNJ_S:
 974        gen_fsgnj(ctx, rd, rs1, rs2, rm, INT32_MIN);
 975        break;
 976
 977    case OPC_RISC_FMIN_S:
 978        /* also handles: OPC_RISC_FMAX_S */
 979        switch (rm) {
 980        case 0x0:
 981            gen_helper_fmin_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
 982            break;
 983        case 0x1:
 984            gen_helper_fmax_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
 985            break;
 986        default:
 987            goto do_illegal;
 988        }
 989        break;
 990
 991    case OPC_RISC_FEQ_S:
 992        /* also handles: OPC_RISC_FLT_S, OPC_RISC_FLE_S */
 993        t0 = tcg_temp_new();
 994        switch (rm) {
 995        case 0x0:
 996            gen_helper_fle_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
 997            break;
 998        case 0x1:
 999            gen_helper_flt_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1000            break;
1001        case 0x2:
1002            gen_helper_feq_s(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1003            break;
1004        default:
1005            goto do_illegal;
1006        }
1007        gen_set_gpr(rd, t0);
1008        tcg_temp_free(t0);
1009        break;
1010
1011    case OPC_RISC_FCVT_W_S:
1012        /* also OPC_RISC_FCVT_WU_S, OPC_RISC_FCVT_L_S, OPC_RISC_FCVT_LU_S */
1013        t0 = tcg_temp_new();
1014        switch (rs2) {
1015        case 0: /* FCVT_W_S */
1016            gen_set_rm(ctx, rm);
1017            gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[rs1]);
1018            break;
1019        case 1: /* FCVT_WU_S */
1020            gen_set_rm(ctx, rm);
1021            gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[rs1]);
1022            break;
1023#if defined(TARGET_RISCV64)
1024        case 2: /* FCVT_L_S */
1025            gen_set_rm(ctx, rm);
1026            gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[rs1]);
1027            break;
1028        case 3: /* FCVT_LU_S */
1029            gen_set_rm(ctx, rm);
1030            gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[rs1]);
1031            break;
1032#endif
1033        default:
1034            goto do_illegal;
1035        }
1036        gen_set_gpr(rd, t0);
1037        tcg_temp_free(t0);
1038        break;
1039
1040    case OPC_RISC_FCVT_S_W:
1041        /* also OPC_RISC_FCVT_S_WU, OPC_RISC_FCVT_S_L, OPC_RISC_FCVT_S_LU */
1042        t0 = tcg_temp_new();
1043        gen_get_gpr(t0, rs1);
1044        switch (rs2) {
1045        case 0: /* FCVT_S_W */
1046            gen_set_rm(ctx, rm);
1047            gen_helper_fcvt_s_w(cpu_fpr[rd], cpu_env, t0);
1048            break;
1049        case 1: /* FCVT_S_WU */
1050            gen_set_rm(ctx, rm);
1051            gen_helper_fcvt_s_wu(cpu_fpr[rd], cpu_env, t0);
1052            break;
1053#if defined(TARGET_RISCV64)
1054        case 2: /* FCVT_S_L */
1055            gen_set_rm(ctx, rm);
1056            gen_helper_fcvt_s_l(cpu_fpr[rd], cpu_env, t0);
1057            break;
1058        case 3: /* FCVT_S_LU */
1059            gen_set_rm(ctx, rm);
1060            gen_helper_fcvt_s_lu(cpu_fpr[rd], cpu_env, t0);
1061            break;
1062#endif
1063        default:
1064            goto do_illegal;
1065        }
1066        tcg_temp_free(t0);
1067        break;
1068
1069    case OPC_RISC_FMV_X_S:
1070        /* also OPC_RISC_FCLASS_S */
1071        t0 = tcg_temp_new();
1072        switch (rm) {
1073        case 0: /* FMV */
1074#if defined(TARGET_RISCV64)
1075            tcg_gen_ext32s_tl(t0, cpu_fpr[rs1]);
1076#else
1077            tcg_gen_extrl_i64_i32(t0, cpu_fpr[rs1]);
1078#endif
1079            break;
1080        case 1:
1081            gen_helper_fclass_s(t0, cpu_fpr[rs1]);
1082            break;
1083        default:
1084            goto do_illegal;
1085        }
1086        gen_set_gpr(rd, t0);
1087        tcg_temp_free(t0);
1088        break;
1089
1090    case OPC_RISC_FMV_S_X:
1091        t0 = tcg_temp_new();
1092        gen_get_gpr(t0, rs1);
1093#if defined(TARGET_RISCV64)
1094        tcg_gen_mov_i64(cpu_fpr[rd], t0);
1095#else
1096        tcg_gen_extu_i32_i64(cpu_fpr[rd], t0);
1097#endif
1098        tcg_temp_free(t0);
1099        break;
1100
1101    /* double */
1102    case OPC_RISC_FADD_D:
1103        gen_set_rm(ctx, rm);
1104        gen_helper_fadd_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1105        break;
1106    case OPC_RISC_FSUB_D:
1107        gen_set_rm(ctx, rm);
1108        gen_helper_fsub_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1109        break;
1110    case OPC_RISC_FMUL_D:
1111        gen_set_rm(ctx, rm);
1112        gen_helper_fmul_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1113        break;
1114    case OPC_RISC_FDIV_D:
1115        gen_set_rm(ctx, rm);
1116        gen_helper_fdiv_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1117        break;
1118    case OPC_RISC_FSQRT_D:
1119        gen_set_rm(ctx, rm);
1120        gen_helper_fsqrt_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1121        break;
1122    case OPC_RISC_FSGNJ_D:
1123        gen_fsgnj(ctx, rd, rs1, rs2, rm, INT64_MIN);
1124        break;
1125
1126    case OPC_RISC_FMIN_D:
1127        /* also OPC_RISC_FMAX_D */
1128        switch (rm) {
1129        case 0:
1130            gen_helper_fmin_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1131            break;
1132        case 1:
1133            gen_helper_fmax_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1134            break;
1135        default:
1136            goto do_illegal;
1137        }
1138        break;
1139
1140    case OPC_RISC_FCVT_S_D:
1141        switch (rs2) {
1142        case 1:
1143            gen_set_rm(ctx, rm);
1144            gen_helper_fcvt_s_d(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1145            break;
1146        default:
1147            goto do_illegal;
1148        }
1149        break;
1150
1151    case OPC_RISC_FCVT_D_S:
1152        switch (rs2) {
1153        case 0:
1154            gen_set_rm(ctx, rm);
1155            gen_helper_fcvt_d_s(cpu_fpr[rd], cpu_env, cpu_fpr[rs1]);
1156            break;
1157        default:
1158            goto do_illegal;
1159        }
1160        break;
1161
1162    case OPC_RISC_FEQ_D:
1163        /* also OPC_RISC_FLT_D, OPC_RISC_FLE_D */
1164        t0 = tcg_temp_new();
1165        switch (rm) {
1166        case 0:
1167            gen_helper_fle_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1168            break;
1169        case 1:
1170            gen_helper_flt_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1171            break;
1172        case 2:
1173            gen_helper_feq_d(t0, cpu_env, cpu_fpr[rs1], cpu_fpr[rs2]);
1174            break;
1175        default:
1176            goto do_illegal;
1177        }
1178        gen_set_gpr(rd, t0);
1179        tcg_temp_free(t0);
1180        break;
1181
1182    case OPC_RISC_FCVT_W_D:
1183        /* also OPC_RISC_FCVT_WU_D, OPC_RISC_FCVT_L_D, OPC_RISC_FCVT_LU_D */
1184        t0 = tcg_temp_new();
1185        switch (rs2) {
1186        case 0:
1187            gen_set_rm(ctx, rm);
1188            gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[rs1]);
1189            break;
1190        case 1:
1191            gen_set_rm(ctx, rm);
1192            gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[rs1]);
1193            break;
1194#if defined(TARGET_RISCV64)
1195        case 2:
1196            gen_set_rm(ctx, rm);
1197            gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[rs1]);
1198            break;
1199        case 3:
1200            gen_set_rm(ctx, rm);
1201            gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[rs1]);
1202            break;
1203#endif
1204        default:
1205            goto do_illegal;
1206        }
1207        gen_set_gpr(rd, t0);
1208        tcg_temp_free(t0);
1209        break;
1210
1211    case OPC_RISC_FCVT_D_W:
1212        /* also OPC_RISC_FCVT_D_WU, OPC_RISC_FCVT_D_L, OPC_RISC_FCVT_D_LU */
1213        t0 = tcg_temp_new();
1214        gen_get_gpr(t0, rs1);
1215        switch (rs2) {
1216        case 0:
1217            gen_set_rm(ctx, rm);
1218            gen_helper_fcvt_d_w(cpu_fpr[rd], cpu_env, t0);
1219            break;
1220        case 1:
1221            gen_set_rm(ctx, rm);
1222            gen_helper_fcvt_d_wu(cpu_fpr[rd], cpu_env, t0);
1223            break;
1224#if defined(TARGET_RISCV64)
1225        case 2:
1226            gen_set_rm(ctx, rm);
1227            gen_helper_fcvt_d_l(cpu_fpr[rd], cpu_env, t0);
1228            break;
1229        case 3:
1230            gen_set_rm(ctx, rm);
1231            gen_helper_fcvt_d_lu(cpu_fpr[rd], cpu_env, t0);
1232            break;
1233#endif
1234        default:
1235            goto do_illegal;
1236        }
1237        tcg_temp_free(t0);
1238        break;
1239
1240    case OPC_RISC_FMV_X_D:
1241        /* also OPC_RISC_FCLASS_D */
1242        switch (rm) {
1243#if defined(TARGET_RISCV64)
1244        case 0: /* FMV */
1245            gen_set_gpr(rd, cpu_fpr[rs1]);
1246            break;
1247#endif
1248        case 1:
1249            t0 = tcg_temp_new();
1250            gen_helper_fclass_d(t0, cpu_fpr[rs1]);
1251            gen_set_gpr(rd, t0);
1252            tcg_temp_free(t0);
1253            break;
1254        default:
1255            goto do_illegal;
1256        }
1257        break;
1258
1259#if defined(TARGET_RISCV64)
1260    case OPC_RISC_FMV_D_X:
1261        t0 = tcg_temp_new();
1262        gen_get_gpr(t0, rs1);
1263        tcg_gen_mov_tl(cpu_fpr[rd], t0);
1264        tcg_temp_free(t0);
1265        break;
1266#endif
1267
1268    default:
1269    do_illegal:
1270        if (t0) {
1271            tcg_temp_free(t0);
1272        }
1273        gen_exception_illegal(ctx);
1274        break;
1275    }
1276}
1277
1278static void gen_system(CPURISCVState *env, DisasContext *ctx, uint32_t opc,
1279                      int rd, int rs1, int csr)
1280{
1281    TCGv source1, csr_store, dest, rs1_pass, imm_rs1;
1282    source1 = tcg_temp_new();
1283    csr_store = tcg_temp_new();
1284    dest = tcg_temp_new();
1285    rs1_pass = tcg_temp_new();
1286    imm_rs1 = tcg_temp_new();
1287    gen_get_gpr(source1, rs1);
1288    tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
1289    tcg_gen_movi_tl(rs1_pass, rs1);
1290    tcg_gen_movi_tl(csr_store, csr); /* copy into temp reg to feed to helper */
1291
1292#ifndef CONFIG_USER_ONLY
1293    /* Extract funct7 value and check whether it matches SFENCE.VMA */
1294    if ((opc == OPC_RISC_ECALL) && ((csr >> 5) == 9)) {
1295        if (env->priv_ver == PRIV_VERSION_1_10_0) {
1296            /* sfence.vma */
1297            /* TODO: handle ASID specific fences */
1298            gen_helper_tlb_flush(cpu_env);
1299            return;
1300        } else {
1301            gen_exception_illegal(ctx);
1302        }
1303    }
1304#endif
1305
1306    switch (opc) {
1307    case OPC_RISC_ECALL:
1308        switch (csr) {
1309        case 0x0: /* ECALL */
1310            /* always generates U-level ECALL, fixed in do_interrupt handler */
1311            generate_exception(ctx, RISCV_EXCP_U_ECALL);
1312            tcg_gen_exit_tb(NULL, 0); /* no chaining */
1313            ctx->base.is_jmp = DISAS_NORETURN;
1314            break;
1315        case 0x1: /* EBREAK */
1316            generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
1317            tcg_gen_exit_tb(NULL, 0); /* no chaining */
1318            ctx->base.is_jmp = DISAS_NORETURN;
1319            break;
1320#ifndef CONFIG_USER_ONLY
1321        case 0x002: /* URET */
1322            gen_exception_illegal(ctx);
1323            break;
1324        case 0x102: /* SRET */
1325            if (riscv_has_ext(env, RVS)) {
1326                gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
1327                tcg_gen_exit_tb(NULL, 0); /* no chaining */
1328                ctx->base.is_jmp = DISAS_NORETURN;
1329            } else {
1330                gen_exception_illegal(ctx);
1331            }
1332            break;
1333        case 0x202: /* HRET */
1334            gen_exception_illegal(ctx);
1335            break;
1336        case 0x302: /* MRET */
1337            gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
1338            tcg_gen_exit_tb(NULL, 0); /* no chaining */
1339            ctx->base.is_jmp = DISAS_NORETURN;
1340            break;
1341        case 0x7b2: /* DRET */
1342            gen_exception_illegal(ctx);
1343            break;
1344        case 0x105: /* WFI */
1345            tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1346            gen_helper_wfi(cpu_env);
1347            break;
1348        case 0x104: /* SFENCE.VM */
1349            if (env->priv_ver <= PRIV_VERSION_1_09_1) {
1350                gen_helper_tlb_flush(cpu_env);
1351            } else {
1352                gen_exception_illegal(ctx);
1353            }
1354            break;
1355#endif
1356        default:
1357            gen_exception_illegal(ctx);
1358            break;
1359        }
1360        break;
1361    default:
1362        tcg_gen_movi_tl(imm_rs1, rs1);
1363        gen_io_start();
1364        switch (opc) {
1365        case OPC_RISC_CSRRW:
1366            gen_helper_csrrw(dest, cpu_env, source1, csr_store);
1367            break;
1368        case OPC_RISC_CSRRS:
1369            gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass);
1370            break;
1371        case OPC_RISC_CSRRC:
1372            gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass);
1373            break;
1374        case OPC_RISC_CSRRWI:
1375            gen_helper_csrrw(dest, cpu_env, imm_rs1, csr_store);
1376            break;
1377        case OPC_RISC_CSRRSI:
1378            gen_helper_csrrs(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1379            break;
1380        case OPC_RISC_CSRRCI:
1381            gen_helper_csrrc(dest, cpu_env, imm_rs1, csr_store, rs1_pass);
1382            break;
1383        default:
1384            gen_exception_illegal(ctx);
1385            return;
1386        }
1387        gen_io_end();
1388        gen_set_gpr(rd, dest);
1389        /* end tb since we may be changing priv modes, to get mmu_index right */
1390        tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1391        tcg_gen_exit_tb(NULL, 0); /* no chaining */
1392        ctx->base.is_jmp = DISAS_NORETURN;
1393        break;
1394    }
1395    tcg_temp_free(source1);
1396    tcg_temp_free(csr_store);
1397    tcg_temp_free(dest);
1398    tcg_temp_free(rs1_pass);
1399    tcg_temp_free(imm_rs1);
1400}
1401
1402static void decode_RV32_64C0(DisasContext *ctx)
1403{
1404    uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1405    uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode);
1406    uint8_t rs1s = GET_C_RS1S(ctx->opcode);
1407
1408    switch (funct3) {
1409    case 0:
1410        /* illegal */
1411        if (ctx->opcode == 0) {
1412            gen_exception_illegal(ctx);
1413        } else {
1414            /* C.ADDI4SPN -> addi rd', x2, zimm[9:2]*/
1415            gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs2, 2,
1416                          GET_C_ADDI4SPN_IMM(ctx->opcode));
1417        }
1418        break;
1419    case 1:
1420        /* C.FLD -> fld rd', offset[7:3](rs1')*/
1421        gen_fp_load(ctx, OPC_RISC_FLD, rd_rs2, rs1s,
1422                    GET_C_LD_IMM(ctx->opcode));
1423        /* C.LQ(RV128) */
1424        break;
1425    case 2:
1426        /* C.LW -> lw rd', offset[6:2](rs1') */
1427        gen_load(ctx, OPC_RISC_LW, rd_rs2, rs1s,
1428                 GET_C_LW_IMM(ctx->opcode));
1429        break;
1430    case 3:
1431#if defined(TARGET_RISCV64)
1432        /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
1433        gen_load(ctx, OPC_RISC_LD, rd_rs2, rs1s,
1434                 GET_C_LD_IMM(ctx->opcode));
1435#else
1436        /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
1437        gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
1438                    GET_C_LW_IMM(ctx->opcode));
1439#endif
1440        break;
1441    case 4:
1442        /* reserved */
1443        gen_exception_illegal(ctx);
1444        break;
1445    case 5:
1446        /* C.FSD(RV32/64) -> fsd rs2', offset[7:3](rs1') */
1447        gen_fp_store(ctx, OPC_RISC_FSD, rs1s, rd_rs2,
1448                     GET_C_LD_IMM(ctx->opcode));
1449        /* C.SQ (RV128) */
1450        break;
1451    case 6:
1452        /* C.SW -> sw rs2', offset[6:2](rs1')*/
1453        gen_store(ctx, OPC_RISC_SW, rs1s, rd_rs2,
1454                  GET_C_LW_IMM(ctx->opcode));
1455        break;
1456    case 7:
1457#if defined(TARGET_RISCV64)
1458        /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
1459        gen_store(ctx, OPC_RISC_SD, rs1s, rd_rs2,
1460                  GET_C_LD_IMM(ctx->opcode));
1461#else
1462        /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
1463        gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
1464                     GET_C_LW_IMM(ctx->opcode));
1465#endif
1466        break;
1467    }
1468}
1469
1470static void decode_RV32_64C1(CPURISCVState *env, DisasContext *ctx)
1471{
1472    uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1473    uint8_t rd_rs1 = GET_C_RS1(ctx->opcode);
1474    uint8_t rs1s, rs2s;
1475    uint8_t funct2;
1476
1477    switch (funct3) {
1478    case 0:
1479        /* C.ADDI -> addi rd, rd, nzimm[5:0] */
1480        gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, rd_rs1,
1481                      GET_C_IMM(ctx->opcode));
1482        break;
1483    case 1:
1484#if defined(TARGET_RISCV64)
1485        /* C.ADDIW (RV64/128) -> addiw rd, rd, imm[5:0]*/
1486        gen_arith_imm(ctx, OPC_RISC_ADDIW, rd_rs1, rd_rs1,
1487                      GET_C_IMM(ctx->opcode));
1488#else
1489        /* C.JAL(RV32) -> jal x1, offset[11:1] */
1490        gen_jal(env, ctx, 1, GET_C_J_IMM(ctx->opcode));
1491#endif
1492        break;
1493    case 2:
1494        /* C.LI -> addi rd, x0, imm[5:0]*/
1495        gen_arith_imm(ctx, OPC_RISC_ADDI, rd_rs1, 0, GET_C_IMM(ctx->opcode));
1496        break;
1497    case 3:
1498        if (rd_rs1 == 2) {
1499            /* C.ADDI16SP -> addi x2, x2, nzimm[9:4]*/
1500            gen_arith_imm(ctx, OPC_RISC_ADDI, 2, 2,
1501                          GET_C_ADDI16SP_IMM(ctx->opcode));
1502        } else if (rd_rs1 != 0) {
1503            /* C.LUI (rs1/rd =/= {0,2}) -> lui rd, nzimm[17:12]*/
1504            tcg_gen_movi_tl(cpu_gpr[rd_rs1],
1505                            GET_C_IMM(ctx->opcode) << 12);
1506        }
1507        break;
1508    case 4:
1509        funct2 = extract32(ctx->opcode, 10, 2);
1510        rs1s = GET_C_RS1S(ctx->opcode);
1511        switch (funct2) {
1512        case 0: /* C.SRLI(RV32) -> srli rd', rd', shamt[5:0] */
1513            gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1514                               GET_C_ZIMM(ctx->opcode));
1515            /* C.SRLI64(RV128) */
1516            break;
1517        case 1:
1518            /* C.SRAI -> srai rd', rd', shamt[5:0]*/
1519            gen_arith_imm(ctx, OPC_RISC_SHIFT_RIGHT_I, rs1s, rs1s,
1520                            GET_C_ZIMM(ctx->opcode) | 0x400);
1521            /* C.SRAI64(RV128) */
1522            break;
1523        case 2:
1524            /* C.ANDI -> andi rd', rd', imm[5:0]*/
1525            gen_arith_imm(ctx, OPC_RISC_ANDI, rs1s, rs1s,
1526                          GET_C_IMM(ctx->opcode));
1527            break;
1528        case 3:
1529            funct2 = extract32(ctx->opcode, 5, 2);
1530            rs2s = GET_C_RS2S(ctx->opcode);
1531            switch (funct2) {
1532            case 0:
1533                /* C.SUB -> sub rd', rd', rs2' */
1534                if (extract32(ctx->opcode, 12, 1) == 0) {
1535                    gen_arith(ctx, OPC_RISC_SUB, rs1s, rs1s, rs2s);
1536                }
1537#if defined(TARGET_RISCV64)
1538                else {
1539                    gen_arith(ctx, OPC_RISC_SUBW, rs1s, rs1s, rs2s);
1540                }
1541#endif
1542                break;
1543            case 1:
1544                /* C.XOR -> xor rs1', rs1', rs2' */
1545                if (extract32(ctx->opcode, 12, 1) == 0) {
1546                    gen_arith(ctx, OPC_RISC_XOR, rs1s, rs1s, rs2s);
1547                }
1548#if defined(TARGET_RISCV64)
1549                else {
1550                    /* C.ADDW (RV64/128) */
1551                    gen_arith(ctx, OPC_RISC_ADDW, rs1s, rs1s, rs2s);
1552                }
1553#endif
1554                break;
1555            case 2:
1556                /* C.OR -> or rs1', rs1', rs2' */
1557                gen_arith(ctx, OPC_RISC_OR, rs1s, rs1s, rs2s);
1558                break;
1559            case 3:
1560                /* C.AND -> and rs1', rs1', rs2' */
1561                gen_arith(ctx, OPC_RISC_AND, rs1s, rs1s, rs2s);
1562                break;
1563            }
1564            break;
1565        }
1566        break;
1567    case 5:
1568        /* C.J -> jal x0, offset[11:1]*/
1569        gen_jal(env, ctx, 0, GET_C_J_IMM(ctx->opcode));
1570        break;
1571    case 6:
1572        /* C.BEQZ -> beq rs1', x0, offset[8:1]*/
1573        rs1s = GET_C_RS1S(ctx->opcode);
1574        gen_branch(env, ctx, OPC_RISC_BEQ, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1575        break;
1576    case 7:
1577        /* C.BNEZ -> bne rs1', x0, offset[8:1]*/
1578        rs1s = GET_C_RS1S(ctx->opcode);
1579        gen_branch(env, ctx, OPC_RISC_BNE, rs1s, 0, GET_C_B_IMM(ctx->opcode));
1580        break;
1581    }
1582}
1583
1584static void decode_RV32_64C2(CPURISCVState *env, DisasContext *ctx)
1585{
1586    uint8_t rd, rs2;
1587    uint8_t funct3 = extract32(ctx->opcode, 13, 3);
1588
1589
1590    rd = GET_RD(ctx->opcode);
1591
1592    switch (funct3) {
1593    case 0: /* C.SLLI -> slli rd, rd, shamt[5:0]
1594               C.SLLI64 -> */
1595        gen_arith_imm(ctx, OPC_RISC_SLLI, rd, rd, GET_C_ZIMM(ctx->opcode));
1596        break;
1597    case 1: /* C.FLDSP(RV32/64DC) -> fld rd, offset[8:3](x2) */
1598        gen_fp_load(ctx, OPC_RISC_FLD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1599        break;
1600    case 2: /* C.LWSP -> lw rd, offset[7:2](x2) */
1601        gen_load(ctx, OPC_RISC_LW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1602        break;
1603    case 3:
1604#if defined(TARGET_RISCV64)
1605        /* C.LDSP(RVC64) -> ld rd, offset[8:3](x2) */
1606        gen_load(ctx, OPC_RISC_LD, rd, 2, GET_C_LDSP_IMM(ctx->opcode));
1607#else
1608        /* C.FLWSP(RV32FC) -> flw rd, offset[7:2](x2) */
1609        gen_fp_load(ctx, OPC_RISC_FLW, rd, 2, GET_C_LWSP_IMM(ctx->opcode));
1610#endif
1611        break;
1612    case 4:
1613        rs2 = GET_C_RS2(ctx->opcode);
1614
1615        if (extract32(ctx->opcode, 12, 1) == 0) {
1616            if (rs2 == 0) {
1617                /* C.JR -> jalr x0, rs1, 0*/
1618                gen_jalr(env, ctx, OPC_RISC_JALR, 0, rd, 0);
1619            } else {
1620                /* C.MV -> add rd, x0, rs2 */
1621                gen_arith(ctx, OPC_RISC_ADD, rd, 0, rs2);
1622            }
1623        } else {
1624            if (rd == 0) {
1625                /* C.EBREAK -> ebreak*/
1626                gen_system(env, ctx, OPC_RISC_ECALL, 0, 0, 0x1);
1627            } else {
1628                if (rs2 == 0) {
1629                    /* C.JALR -> jalr x1, rs1, 0*/
1630                    gen_jalr(env, ctx, OPC_RISC_JALR, 1, rd, 0);
1631                } else {
1632                    /* C.ADD -> add rd, rd, rs2 */
1633                    gen_arith(ctx, OPC_RISC_ADD, rd, rd, rs2);
1634                }
1635            }
1636        }
1637        break;
1638    case 5:
1639        /* C.FSDSP -> fsd rs2, offset[8:3](x2)*/
1640        gen_fp_store(ctx, OPC_RISC_FSD, 2, GET_C_RS2(ctx->opcode),
1641                     GET_C_SDSP_IMM(ctx->opcode));
1642        /* C.SQSP */
1643        break;
1644    case 6: /* C.SWSP -> sw rs2, offset[7:2](x2)*/
1645        gen_store(ctx, OPC_RISC_SW, 2, GET_C_RS2(ctx->opcode),
1646                  GET_C_SWSP_IMM(ctx->opcode));
1647        break;
1648    case 7:
1649#if defined(TARGET_RISCV64)
1650        /* C.SDSP(Rv64/128) -> sd rs2, offset[8:3](x2)*/
1651        gen_store(ctx, OPC_RISC_SD, 2, GET_C_RS2(ctx->opcode),
1652                  GET_C_SDSP_IMM(ctx->opcode));
1653#else
1654        /* C.FSWSP(RV32) -> fsw rs2, offset[7:2](x2) */
1655        gen_fp_store(ctx, OPC_RISC_FSW, 2, GET_C_RS2(ctx->opcode),
1656                     GET_C_SWSP_IMM(ctx->opcode));
1657#endif
1658        break;
1659    }
1660}
1661
1662static void decode_RV32_64C(CPURISCVState *env, DisasContext *ctx)
1663{
1664    uint8_t op = extract32(ctx->opcode, 0, 2);
1665
1666    switch (op) {
1667    case 0:
1668        decode_RV32_64C0(ctx);
1669        break;
1670    case 1:
1671        decode_RV32_64C1(env, ctx);
1672        break;
1673    case 2:
1674        decode_RV32_64C2(env, ctx);
1675        break;
1676    }
1677}
1678
1679static void decode_RV32_64G(CPURISCVState *env, DisasContext *ctx)
1680{
1681    int rs1;
1682    int rs2;
1683    int rd;
1684    uint32_t op;
1685    target_long imm;
1686
1687    /* We do not do misaligned address check here: the address should never be
1688     * misaligned at this point. Instructions that set PC must do the check,
1689     * since epc must be the address of the instruction that caused us to
1690     * perform the misaligned instruction fetch */
1691
1692    op = MASK_OP_MAJOR(ctx->opcode);
1693    rs1 = GET_RS1(ctx->opcode);
1694    rs2 = GET_RS2(ctx->opcode);
1695    rd = GET_RD(ctx->opcode);
1696    imm = GET_IMM(ctx->opcode);
1697
1698    switch (op) {
1699    case OPC_RISC_LUI:
1700        if (rd == 0) {
1701            break; /* NOP */
1702        }
1703        tcg_gen_movi_tl(cpu_gpr[rd], sextract64(ctx->opcode, 12, 20) << 12);
1704        break;
1705    case OPC_RISC_AUIPC:
1706        if (rd == 0) {
1707            break; /* NOP */
1708        }
1709        tcg_gen_movi_tl(cpu_gpr[rd], (sextract64(ctx->opcode, 12, 20) << 12) +
1710               ctx->base.pc_next);
1711        break;
1712    case OPC_RISC_JAL:
1713        imm = GET_JAL_IMM(ctx->opcode);
1714        gen_jal(env, ctx, rd, imm);
1715        break;
1716    case OPC_RISC_JALR:
1717        gen_jalr(env, ctx, MASK_OP_JALR(ctx->opcode), rd, rs1, imm);
1718        break;
1719    case OPC_RISC_BRANCH:
1720        gen_branch(env, ctx, MASK_OP_BRANCH(ctx->opcode), rs1, rs2,
1721                   GET_B_IMM(ctx->opcode));
1722        break;
1723    case OPC_RISC_LOAD:
1724        gen_load(ctx, MASK_OP_LOAD(ctx->opcode), rd, rs1, imm);
1725        break;
1726    case OPC_RISC_STORE:
1727        gen_store(ctx, MASK_OP_STORE(ctx->opcode), rs1, rs2,
1728                  GET_STORE_IMM(ctx->opcode));
1729        break;
1730    case OPC_RISC_ARITH_IMM:
1731#if defined(TARGET_RISCV64)
1732    case OPC_RISC_ARITH_IMM_W:
1733#endif
1734        if (rd == 0) {
1735            break; /* NOP */
1736        }
1737        gen_arith_imm(ctx, MASK_OP_ARITH_IMM(ctx->opcode), rd, rs1, imm);
1738        break;
1739    case OPC_RISC_ARITH:
1740#if defined(TARGET_RISCV64)
1741    case OPC_RISC_ARITH_W:
1742#endif
1743        if (rd == 0) {
1744            break; /* NOP */
1745        }
1746        gen_arith(ctx, MASK_OP_ARITH(ctx->opcode), rd, rs1, rs2);
1747        break;
1748    case OPC_RISC_FP_LOAD:
1749        gen_fp_load(ctx, MASK_OP_FP_LOAD(ctx->opcode), rd, rs1, imm);
1750        break;
1751    case OPC_RISC_FP_STORE:
1752        gen_fp_store(ctx, MASK_OP_FP_STORE(ctx->opcode), rs1, rs2,
1753                     GET_STORE_IMM(ctx->opcode));
1754        break;
1755    case OPC_RISC_ATOMIC:
1756        gen_atomic(ctx, MASK_OP_ATOMIC(ctx->opcode), rd, rs1, rs2);
1757        break;
1758    case OPC_RISC_FMADD:
1759        gen_fp_fmadd(ctx, MASK_OP_FP_FMADD(ctx->opcode), rd, rs1, rs2,
1760                     GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1761        break;
1762    case OPC_RISC_FMSUB:
1763        gen_fp_fmsub(ctx, MASK_OP_FP_FMSUB(ctx->opcode), rd, rs1, rs2,
1764                     GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1765        break;
1766    case OPC_RISC_FNMSUB:
1767        gen_fp_fnmsub(ctx, MASK_OP_FP_FNMSUB(ctx->opcode), rd, rs1, rs2,
1768                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1769        break;
1770    case OPC_RISC_FNMADD:
1771        gen_fp_fnmadd(ctx, MASK_OP_FP_FNMADD(ctx->opcode), rd, rs1, rs2,
1772                      GET_RS3(ctx->opcode), GET_RM(ctx->opcode));
1773        break;
1774    case OPC_RISC_FP_ARITH:
1775        gen_fp_arith(ctx, MASK_OP_FP_ARITH(ctx->opcode), rd, rs1, rs2,
1776                     GET_RM(ctx->opcode));
1777        break;
1778    case OPC_RISC_FENCE:
1779        if (ctx->opcode & 0x1000) {
1780            /* FENCE_I is a no-op in QEMU,
1781             * however we need to end the translation block */
1782            tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
1783            tcg_gen_exit_tb(NULL, 0);
1784            ctx->base.is_jmp = DISAS_NORETURN;
1785        } else {
1786            /* FENCE is a full memory barrier. */
1787            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1788        }
1789        break;
1790    case OPC_RISC_SYSTEM:
1791        gen_system(env, ctx, MASK_OP_SYSTEM(ctx->opcode), rd, rs1,
1792                   (ctx->opcode & 0xFFF00000) >> 20);
1793        break;
1794    default:
1795        gen_exception_illegal(ctx);
1796        break;
1797    }
1798}
1799
1800static void decode_opc(CPURISCVState *env, DisasContext *ctx)
1801{
1802    /* check for compressed insn */
1803    if (extract32(ctx->opcode, 0, 2) != 3) {
1804        if (!riscv_has_ext(env, RVC)) {
1805            gen_exception_illegal(ctx);
1806        } else {
1807            ctx->pc_succ_insn = ctx->base.pc_next + 2;
1808            decode_RV32_64C(env, ctx);
1809        }
1810    } else {
1811        ctx->pc_succ_insn = ctx->base.pc_next + 4;
1812        decode_RV32_64G(env, ctx);
1813    }
1814}
1815
1816static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
1817{
1818    DisasContext *ctx = container_of(dcbase, DisasContext, base);
1819
1820    ctx->pc_succ_insn = ctx->base.pc_first;
1821    ctx->flags = ctx->base.tb->flags;
1822    ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK;
1823    ctx->frm = -1;  /* unknown rounding mode */
1824}
1825
1826static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
1827{
1828}
1829
1830static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
1831{
1832    DisasContext *ctx = container_of(dcbase, DisasContext, base);
1833
1834    tcg_gen_insn_start(ctx->base.pc_next);
1835}
1836
1837static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
1838                                      const CPUBreakpoint *bp)
1839{
1840    DisasContext *ctx = container_of(dcbase, DisasContext, base);
1841
1842    tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
1843    ctx->base.is_jmp = DISAS_NORETURN;
1844    gen_exception_debug();
1845    /* The address covered by the breakpoint must be included in
1846       [tb->pc, tb->pc + tb->size) in order to for it to be
1847       properly cleared -- thus we increment the PC here so that
1848       the logic setting tb->size below does the right thing.  */
1849    ctx->base.pc_next += 4;
1850    return true;
1851}
1852
1853
1854static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
1855{
1856    DisasContext *ctx = container_of(dcbase, DisasContext, base);
1857    CPURISCVState *env = cpu->env_ptr;
1858
1859    ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
1860    decode_opc(env, ctx);
1861    ctx->base.pc_next = ctx->pc_succ_insn;
1862
1863    if (ctx->base.is_jmp == DISAS_NEXT) {
1864        target_ulong page_start;
1865
1866        page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
1867        if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
1868            ctx->base.is_jmp = DISAS_TOO_MANY;
1869        }
1870    }
1871}
1872
1873static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1874{
1875    DisasContext *ctx = container_of(dcbase, DisasContext, base);
1876
1877    switch (ctx->base.is_jmp) {
1878    case DISAS_TOO_MANY:
1879        gen_goto_tb(ctx, 0, ctx->base.pc_next);
1880        break;
1881    case DISAS_NORETURN:
1882        break;
1883    default:
1884        g_assert_not_reached();
1885    }
1886}
1887
1888static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
1889{
1890    qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
1891    log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
1892}
1893
1894static const TranslatorOps riscv_tr_ops = {
1895    .init_disas_context = riscv_tr_init_disas_context,
1896    .tb_start           = riscv_tr_tb_start,
1897    .insn_start         = riscv_tr_insn_start,
1898    .breakpoint_check   = riscv_tr_breakpoint_check,
1899    .translate_insn     = riscv_tr_translate_insn,
1900    .tb_stop            = riscv_tr_tb_stop,
1901    .disas_log          = riscv_tr_disas_log,
1902};
1903
1904void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
1905{
1906    DisasContext ctx;
1907
1908    translator_loop(&riscv_tr_ops, &ctx.base, cs, tb);
1909}
1910
1911void riscv_translate_init(void)
1912{
1913    int i;
1914
1915    /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
1916    /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
1917    /* registers, unless you specifically block reads/writes to reg 0 */
1918    cpu_gpr[0] = NULL;
1919
1920    for (i = 1; i < 32; i++) {
1921        cpu_gpr[i] = tcg_global_mem_new(cpu_env,
1922            offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
1923    }
1924
1925    for (i = 0; i < 32; i++) {
1926        cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
1927            offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
1928    }
1929
1930    cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
1931    load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
1932                             "load_res");
1933    load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
1934                             "load_val");
1935}
1936