qemu/target/riscv/translate.c
<<
>>
Prefs
   1/*
   2 * RISC-V emulation for qemu: main translation routines.
   3 *
   4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2 or later, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "qemu/log.h"
  21#include "cpu.h"
  22#include "tcg-op.h"
  23#include "disas/disas.h"
  24#include "exec/cpu_ldst.h"
  25#include "exec/exec-all.h"
  26#include "exec/helper-proto.h"
  27#include "exec/helper-gen.h"
  28
  29#include "exec/translator.h"
  30#include "exec/log.h"
  31
  32#include "instmap.h"
  33
  34/* global register indices */
  35static TCGv cpu_gpr[32], cpu_pc;
  36static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
  37static TCGv load_res;
  38static TCGv load_val;
  39
  40#include "exec/gen-icount.h"
  41
  42typedef struct DisasContext {
  43    DisasContextBase base;
  44    /* pc_succ_insn points to the instruction following base.pc_next */
  45    target_ulong pc_succ_insn;
  46    target_ulong priv_ver;
  47    uint32_t opcode;
  48    uint32_t mstatus_fs;
  49    uint32_t misa;
  50    uint32_t mem_idx;
  51    /* Remember the rounding mode encoded in the previous fp instruction,
  52       which we have already installed into env->fp_status.  Or -1 for
  53       no previous fp instruction.  Note that we exit the TB when writing
  54       to any system register, which includes CSR_FRM, so we do not have
  55       to reset this known value.  */
  56    int frm;
  57    bool ext_ifencei;
  58} DisasContext;
  59
  60#ifdef TARGET_RISCV64
  61/* convert riscv funct3 to qemu memop for load/store */
  62static const int tcg_memop_lookup[8] = {
  63    [0 ... 7] = -1,
  64    [0] = MO_SB,
  65    [1] = MO_TESW,
  66    [2] = MO_TESL,
  67    [3] = MO_TEQ,
  68    [4] = MO_UB,
  69    [5] = MO_TEUW,
  70    [6] = MO_TEUL,
  71};
  72#endif
  73
  74#ifdef TARGET_RISCV64
  75#define CASE_OP_32_64(X) case X: case glue(X, W)
  76#else
  77#define CASE_OP_32_64(X) case X
  78#endif
  79
  80static inline bool has_ext(DisasContext *ctx, uint32_t ext)
  81{
  82    return ctx->misa & ext;
  83}
  84
  85static void generate_exception(DisasContext *ctx, int excp)
  86{
  87    tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
  88    TCGv_i32 helper_tmp = tcg_const_i32(excp);
  89    gen_helper_raise_exception(cpu_env, helper_tmp);
  90    tcg_temp_free_i32(helper_tmp);
  91    ctx->base.is_jmp = DISAS_NORETURN;
  92}
  93
  94static void generate_exception_mbadaddr(DisasContext *ctx, int excp)
  95{
  96    tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
  97    tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
  98    TCGv_i32 helper_tmp = tcg_const_i32(excp);
  99    gen_helper_raise_exception(cpu_env, helper_tmp);
 100    tcg_temp_free_i32(helper_tmp);
 101    ctx->base.is_jmp = DISAS_NORETURN;
 102}
 103
 104static void gen_exception_debug(void)
 105{
 106    TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
 107    gen_helper_raise_exception(cpu_env, helper_tmp);
 108    tcg_temp_free_i32(helper_tmp);
 109}
 110
 111/* Wrapper around tcg_gen_exit_tb that handles single stepping */
 112static void exit_tb(DisasContext *ctx)
 113{
 114    if (ctx->base.singlestep_enabled) {
 115        gen_exception_debug();
 116    } else {
 117        tcg_gen_exit_tb(NULL, 0);
 118    }
 119}
 120
 121/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
 122static void lookup_and_goto_ptr(DisasContext *ctx)
 123{
 124    if (ctx->base.singlestep_enabled) {
 125        gen_exception_debug();
 126    } else {
 127        tcg_gen_lookup_and_goto_ptr();
 128    }
 129}
 130
 131static void gen_exception_illegal(DisasContext *ctx)
 132{
 133    generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
 134}
 135
 136static void gen_exception_inst_addr_mis(DisasContext *ctx)
 137{
 138    generate_exception_mbadaddr(ctx, RISCV_EXCP_INST_ADDR_MIS);
 139}
 140
 141static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
 142{
 143    if (unlikely(ctx->base.singlestep_enabled)) {
 144        return false;
 145    }
 146
 147#ifndef CONFIG_USER_ONLY
 148    return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
 149#else
 150    return true;
 151#endif
 152}
 153
 154static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
 155{
 156    if (use_goto_tb(ctx, dest)) {
 157        /* chaining is only allowed when the jump is to the same page */
 158        tcg_gen_goto_tb(n);
 159        tcg_gen_movi_tl(cpu_pc, dest);
 160
 161        /* No need to check for single stepping here as use_goto_tb() will
 162         * return false in case of single stepping.
 163         */
 164        tcg_gen_exit_tb(ctx->base.tb, n);
 165    } else {
 166        tcg_gen_movi_tl(cpu_pc, dest);
 167        lookup_and_goto_ptr(ctx);
 168    }
 169}
 170
 171/* Wrapper for getting reg values - need to check of reg is zero since
 172 * cpu_gpr[0] is not actually allocated
 173 */
 174static inline void gen_get_gpr(TCGv t, int reg_num)
 175{
 176    if (reg_num == 0) {
 177        tcg_gen_movi_tl(t, 0);
 178    } else {
 179        tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
 180    }
 181}
 182
 183/* Wrapper for setting reg values - need to check of reg is zero since
 184 * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
 185 * since we usually avoid calling the OP_TYPE_gen function if we see a write to
 186 * $zero
 187 */
 188static inline void gen_set_gpr(int reg_num_dst, TCGv t)
 189{
 190    if (reg_num_dst != 0) {
 191        tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
 192    }
 193}
 194
 195static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
 196{
 197    TCGv rl = tcg_temp_new();
 198    TCGv rh = tcg_temp_new();
 199
 200    tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
 201    /* fix up for one negative */
 202    tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
 203    tcg_gen_and_tl(rl, rl, arg2);
 204    tcg_gen_sub_tl(ret, rh, rl);
 205
 206    tcg_temp_free(rl);
 207    tcg_temp_free(rh);
 208}
 209
 210static void gen_div(TCGv ret, TCGv source1, TCGv source2)
 211{
 212    TCGv cond1, cond2, zeroreg, resultopt1;
 213    /*
 214     * Handle by altering args to tcg_gen_div to produce req'd results:
 215     * For overflow: want source1 in source1 and 1 in source2
 216     * For div by zero: want -1 in source1 and 1 in source2 -> -1 result
 217     */
 218    cond1 = tcg_temp_new();
 219    cond2 = tcg_temp_new();
 220    zeroreg = tcg_const_tl(0);
 221    resultopt1 = tcg_temp_new();
 222
 223    tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
 224    tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
 225    tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
 226                        ((target_ulong)1) << (TARGET_LONG_BITS - 1));
 227    tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
 228    tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
 229    /* if div by zero, set source1 to -1, otherwise don't change */
 230    tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
 231            resultopt1);
 232    /* if overflow or div by zero, set source2 to 1, else don't change */
 233    tcg_gen_or_tl(cond1, cond1, cond2);
 234    tcg_gen_movi_tl(resultopt1, (target_ulong)1);
 235    tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
 236            resultopt1);
 237    tcg_gen_div_tl(ret, source1, source2);
 238
 239    tcg_temp_free(cond1);
 240    tcg_temp_free(cond2);
 241    tcg_temp_free(zeroreg);
 242    tcg_temp_free(resultopt1);
 243}
 244
 245static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
 246{
 247    TCGv cond1, zeroreg, resultopt1;
 248    cond1 = tcg_temp_new();
 249
 250    zeroreg = tcg_const_tl(0);
 251    resultopt1 = tcg_temp_new();
 252
 253    tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
 254    tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
 255    tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
 256            resultopt1);
 257    tcg_gen_movi_tl(resultopt1, (target_ulong)1);
 258    tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
 259            resultopt1);
 260    tcg_gen_divu_tl(ret, source1, source2);
 261
 262    tcg_temp_free(cond1);
 263    tcg_temp_free(zeroreg);
 264    tcg_temp_free(resultopt1);
 265}
 266
 267static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
 268{
 269    TCGv cond1, cond2, zeroreg, resultopt1;
 270
 271    cond1 = tcg_temp_new();
 272    cond2 = tcg_temp_new();
 273    zeroreg = tcg_const_tl(0);
 274    resultopt1 = tcg_temp_new();
 275
 276    tcg_gen_movi_tl(resultopt1, 1L);
 277    tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
 278    tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
 279                        (target_ulong)1 << (TARGET_LONG_BITS - 1));
 280    tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
 281    tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
 282    /* if overflow or div by zero, set source2 to 1, else don't change */
 283    tcg_gen_or_tl(cond2, cond1, cond2);
 284    tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
 285            resultopt1);
 286    tcg_gen_rem_tl(resultopt1, source1, source2);
 287    /* if div by zero, just return the original dividend */
 288    tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
 289            source1);
 290
 291    tcg_temp_free(cond1);
 292    tcg_temp_free(cond2);
 293    tcg_temp_free(zeroreg);
 294    tcg_temp_free(resultopt1);
 295}
 296
 297static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
 298{
 299    TCGv cond1, zeroreg, resultopt1;
 300    cond1 = tcg_temp_new();
 301    zeroreg = tcg_const_tl(0);
 302    resultopt1 = tcg_temp_new();
 303
 304    tcg_gen_movi_tl(resultopt1, (target_ulong)1);
 305    tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
 306    tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
 307            resultopt1);
 308    tcg_gen_remu_tl(resultopt1, source1, source2);
 309    /* if div by zero, just return the original dividend */
 310    tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
 311            source1);
 312
 313    tcg_temp_free(cond1);
 314    tcg_temp_free(zeroreg);
 315    tcg_temp_free(resultopt1);
 316}
 317
 318static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
 319{
 320    target_ulong next_pc;
 321
 322    /* check misaligned: */
 323    next_pc = ctx->base.pc_next + imm;
 324    if (!has_ext(ctx, RVC)) {
 325        if ((next_pc & 0x3) != 0) {
 326            gen_exception_inst_addr_mis(ctx);
 327            return;
 328        }
 329    }
 330    if (rd != 0) {
 331        tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
 332    }
 333
 334    gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
 335    ctx->base.is_jmp = DISAS_NORETURN;
 336}
 337
 338#ifdef TARGET_RISCV64
 339static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1,
 340        target_long imm)
 341{
 342    TCGv t0 = tcg_temp_new();
 343    TCGv t1 = tcg_temp_new();
 344    gen_get_gpr(t0, rs1);
 345    tcg_gen_addi_tl(t0, t0, imm);
 346    int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
 347
 348    if (memop < 0) {
 349        gen_exception_illegal(ctx);
 350        return;
 351    }
 352
 353    tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
 354    gen_set_gpr(rd, t1);
 355    tcg_temp_free(t0);
 356    tcg_temp_free(t1);
 357}
 358
 359static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
 360        target_long imm)
 361{
 362    TCGv t0 = tcg_temp_new();
 363    TCGv dat = tcg_temp_new();
 364    gen_get_gpr(t0, rs1);
 365    tcg_gen_addi_tl(t0, t0, imm);
 366    gen_get_gpr(dat, rs2);
 367    int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
 368
 369    if (memop < 0) {
 370        gen_exception_illegal(ctx);
 371        return;
 372    }
 373
 374    tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
 375    tcg_temp_free(t0);
 376    tcg_temp_free(dat);
 377}
 378#endif
 379
 380#ifndef CONFIG_USER_ONLY
 381/* The states of mstatus_fs are:
 382 * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
 383 * We will have already diagnosed disabled state,
 384 * and need to turn initial/clean into dirty.
 385 */
 386static void mark_fs_dirty(DisasContext *ctx)
 387{
 388    TCGv tmp;
 389    if (ctx->mstatus_fs == MSTATUS_FS) {
 390        return;
 391    }
 392    /* Remember the state change for the rest of the TB.  */
 393    ctx->mstatus_fs = MSTATUS_FS;
 394
 395    tmp = tcg_temp_new();
 396    tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
 397    tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
 398    tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
 399    tcg_temp_free(tmp);
 400}
 401#else
 402static inline void mark_fs_dirty(DisasContext *ctx) { }
 403#endif
 404
 405#if !defined(TARGET_RISCV64)
 406static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
 407        int rs1, target_long imm)
 408{
 409    TCGv t0;
 410
 411    if (ctx->mstatus_fs == 0) {
 412        gen_exception_illegal(ctx);
 413        return;
 414    }
 415
 416    t0 = tcg_temp_new();
 417    gen_get_gpr(t0, rs1);
 418    tcg_gen_addi_tl(t0, t0, imm);
 419
 420    switch (opc) {
 421    case OPC_RISC_FLW:
 422        if (!has_ext(ctx, RVF)) {
 423            goto do_illegal;
 424        }
 425        tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
 426        /* RISC-V requires NaN-boxing of narrower width floating point values */
 427        tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
 428        break;
 429    case OPC_RISC_FLD:
 430        if (!has_ext(ctx, RVD)) {
 431            goto do_illegal;
 432        }
 433        tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
 434        break;
 435    do_illegal:
 436    default:
 437        gen_exception_illegal(ctx);
 438        break;
 439    }
 440    tcg_temp_free(t0);
 441
 442    mark_fs_dirty(ctx);
 443}
 444
 445static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
 446        int rs2, target_long imm)
 447{
 448    TCGv t0;
 449
 450    if (ctx->mstatus_fs == 0) {
 451        gen_exception_illegal(ctx);
 452        return;
 453    }
 454
 455    t0 = tcg_temp_new();
 456    gen_get_gpr(t0, rs1);
 457    tcg_gen_addi_tl(t0, t0, imm);
 458
 459    switch (opc) {
 460    case OPC_RISC_FSW:
 461        if (!has_ext(ctx, RVF)) {
 462            goto do_illegal;
 463        }
 464        tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
 465        break;
 466    case OPC_RISC_FSD:
 467        if (!has_ext(ctx, RVD)) {
 468            goto do_illegal;
 469        }
 470        tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
 471        break;
 472    do_illegal:
 473    default:
 474        gen_exception_illegal(ctx);
 475        break;
 476    }
 477
 478    tcg_temp_free(t0);
 479}
 480#endif
 481
 482static void gen_set_rm(DisasContext *ctx, int rm)
 483{
 484    TCGv_i32 t0;
 485
 486    if (ctx->frm == rm) {
 487        return;
 488    }
 489    ctx->frm = rm;
 490    t0 = tcg_const_i32(rm);
 491    gen_helper_set_rounding_mode(cpu_env, t0);
 492    tcg_temp_free_i32(t0);
 493}
 494
 495static void decode_RV32_64C0(DisasContext *ctx)
 496{
 497    uint8_t funct3 = extract32(ctx->opcode, 13, 3);
 498    uint8_t rd_rs2 = GET_C_RS2S(ctx->opcode);
 499    uint8_t rs1s = GET_C_RS1S(ctx->opcode);
 500
 501    switch (funct3) {
 502    case 3:
 503#if defined(TARGET_RISCV64)
 504        /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
 505        gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s,
 506                 GET_C_LD_IMM(ctx->opcode));
 507#else
 508        /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
 509        gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
 510                    GET_C_LW_IMM(ctx->opcode));
 511#endif
 512        break;
 513    case 7:
 514#if defined(TARGET_RISCV64)
 515        /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
 516        gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2,
 517                  GET_C_LD_IMM(ctx->opcode));
 518#else
 519        /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
 520        gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
 521                     GET_C_LW_IMM(ctx->opcode));
 522#endif
 523        break;
 524    }
 525}
 526
 527static void decode_RV32_64C(DisasContext *ctx)
 528{
 529    uint8_t op = extract32(ctx->opcode, 0, 2);
 530
 531    switch (op) {
 532    case 0:
 533        decode_RV32_64C0(ctx);
 534        break;
 535    }
 536}
 537
 538#define EX_SH(amount) \
 539    static int ex_shift_##amount(DisasContext *ctx, int imm) \
 540    {                                         \
 541        return imm << amount;                 \
 542    }
 543EX_SH(1)
 544EX_SH(2)
 545EX_SH(3)
 546EX_SH(4)
 547EX_SH(12)
 548
 549#define REQUIRE_EXT(ctx, ext) do { \
 550    if (!has_ext(ctx, ext)) {      \
 551        return false;              \
 552    }                              \
 553} while (0)
 554
 555static int ex_rvc_register(DisasContext *ctx, int reg)
 556{
 557    return 8 + reg;
 558}
 559
 560static int ex_rvc_shifti(DisasContext *ctx, int imm)
 561{
 562    /* For RV128 a shamt of 0 means a shift by 64. */
 563    return imm ? imm : 64;
 564}
 565
 566/* Include the auto-generated decoder for 32 bit insn */
 567#include "decode_insn32.inc.c"
 568
 569static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
 570                             void (*func)(TCGv, TCGv, target_long))
 571{
 572    TCGv source1;
 573    source1 = tcg_temp_new();
 574
 575    gen_get_gpr(source1, a->rs1);
 576
 577    (*func)(source1, source1, a->imm);
 578
 579    gen_set_gpr(a->rd, source1);
 580    tcg_temp_free(source1);
 581    return true;
 582}
 583
 584static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
 585                             void (*func)(TCGv, TCGv, TCGv))
 586{
 587    TCGv source1, source2;
 588    source1 = tcg_temp_new();
 589    source2 = tcg_temp_new();
 590
 591    gen_get_gpr(source1, a->rs1);
 592    tcg_gen_movi_tl(source2, a->imm);
 593
 594    (*func)(source1, source1, source2);
 595
 596    gen_set_gpr(a->rd, source1);
 597    tcg_temp_free(source1);
 598    tcg_temp_free(source2);
 599    return true;
 600}
 601
 602#ifdef TARGET_RISCV64
 603static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
 604{
 605    tcg_gen_add_tl(ret, arg1, arg2);
 606    tcg_gen_ext32s_tl(ret, ret);
 607}
 608
 609static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
 610{
 611    tcg_gen_sub_tl(ret, arg1, arg2);
 612    tcg_gen_ext32s_tl(ret, ret);
 613}
 614
 615static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
 616{
 617    tcg_gen_mul_tl(ret, arg1, arg2);
 618    tcg_gen_ext32s_tl(ret, ret);
 619}
 620
 621static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
 622                            void(*func)(TCGv, TCGv, TCGv))
 623{
 624    TCGv source1, source2;
 625    source1 = tcg_temp_new();
 626    source2 = tcg_temp_new();
 627
 628    gen_get_gpr(source1, a->rs1);
 629    gen_get_gpr(source2, a->rs2);
 630    tcg_gen_ext32s_tl(source1, source1);
 631    tcg_gen_ext32s_tl(source2, source2);
 632
 633    (*func)(source1, source1, source2);
 634
 635    tcg_gen_ext32s_tl(source1, source1);
 636    gen_set_gpr(a->rd, source1);
 637    tcg_temp_free(source1);
 638    tcg_temp_free(source2);
 639    return true;
 640}
 641
 642static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
 643                            void(*func)(TCGv, TCGv, TCGv))
 644{
 645    TCGv source1, source2;
 646    source1 = tcg_temp_new();
 647    source2 = tcg_temp_new();
 648
 649    gen_get_gpr(source1, a->rs1);
 650    gen_get_gpr(source2, a->rs2);
 651    tcg_gen_ext32u_tl(source1, source1);
 652    tcg_gen_ext32u_tl(source2, source2);
 653
 654    (*func)(source1, source1, source2);
 655
 656    tcg_gen_ext32s_tl(source1, source1);
 657    gen_set_gpr(a->rd, source1);
 658    tcg_temp_free(source1);
 659    tcg_temp_free(source2);
 660    return true;
 661}
 662
 663#endif
 664
 665static bool gen_arith(DisasContext *ctx, arg_r *a,
 666                      void(*func)(TCGv, TCGv, TCGv))
 667{
 668    TCGv source1, source2;
 669    source1 = tcg_temp_new();
 670    source2 = tcg_temp_new();
 671
 672    gen_get_gpr(source1, a->rs1);
 673    gen_get_gpr(source2, a->rs2);
 674
 675    (*func)(source1, source1, source2);
 676
 677    gen_set_gpr(a->rd, source1);
 678    tcg_temp_free(source1);
 679    tcg_temp_free(source2);
 680    return true;
 681}
 682
 683static bool gen_shift(DisasContext *ctx, arg_r *a,
 684                        void(*func)(TCGv, TCGv, TCGv))
 685{
 686    TCGv source1 = tcg_temp_new();
 687    TCGv source2 = tcg_temp_new();
 688
 689    gen_get_gpr(source1, a->rs1);
 690    gen_get_gpr(source2, a->rs2);
 691
 692    tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
 693    (*func)(source1, source1, source2);
 694
 695    gen_set_gpr(a->rd, source1);
 696    tcg_temp_free(source1);
 697    tcg_temp_free(source2);
 698    return true;
 699}
 700
 701/* Include insn module translation function */
 702#include "insn_trans/trans_rvi.inc.c"
 703#include "insn_trans/trans_rvm.inc.c"
 704#include "insn_trans/trans_rva.inc.c"
 705#include "insn_trans/trans_rvf.inc.c"
 706#include "insn_trans/trans_rvd.inc.c"
 707#include "insn_trans/trans_privileged.inc.c"
 708
 709/* Include the auto-generated decoder for 16 bit insn */
 710#include "decode_insn16.inc.c"
 711
 712static void decode_opc(DisasContext *ctx)
 713{
 714    /* check for compressed insn */
 715    if (extract32(ctx->opcode, 0, 2) != 3) {
 716        if (!has_ext(ctx, RVC)) {
 717            gen_exception_illegal(ctx);
 718        } else {
 719            ctx->pc_succ_insn = ctx->base.pc_next + 2;
 720            if (!decode_insn16(ctx, ctx->opcode)) {
 721                /* fall back to old decoder */
 722                decode_RV32_64C(ctx);
 723            }
 724        }
 725    } else {
 726        ctx->pc_succ_insn = ctx->base.pc_next + 4;
 727        if (!decode_insn32(ctx, ctx->opcode)) {
 728            gen_exception_illegal(ctx);
 729        }
 730    }
 731}
 732
 733static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
 734{
 735    DisasContext *ctx = container_of(dcbase, DisasContext, base);
 736    CPURISCVState *env = cs->env_ptr;
 737    RISCVCPU *cpu = RISCV_CPU(cs);
 738
 739    ctx->pc_succ_insn = ctx->base.pc_first;
 740    ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK;
 741    ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS;
 742    ctx->priv_ver = env->priv_ver;
 743    ctx->misa = env->misa;
 744    ctx->frm = -1;  /* unknown rounding mode */
 745    ctx->ext_ifencei = cpu->cfg.ext_ifencei;
 746}
 747
 748static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
 749{
 750}
 751
 752static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
 753{
 754    DisasContext *ctx = container_of(dcbase, DisasContext, base);
 755
 756    tcg_gen_insn_start(ctx->base.pc_next);
 757}
 758
 759static bool riscv_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
 760                                      const CPUBreakpoint *bp)
 761{
 762    DisasContext *ctx = container_of(dcbase, DisasContext, base);
 763
 764    tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
 765    ctx->base.is_jmp = DISAS_NORETURN;
 766    gen_exception_debug();
 767    /* The address covered by the breakpoint must be included in
 768       [tb->pc, tb->pc + tb->size) in order to for it to be
 769       properly cleared -- thus we increment the PC here so that
 770       the logic setting tb->size below does the right thing.  */
 771    ctx->base.pc_next += 4;
 772    return true;
 773}
 774
 775static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
 776{
 777    DisasContext *ctx = container_of(dcbase, DisasContext, base);
 778    CPURISCVState *env = cpu->env_ptr;
 779
 780    ctx->opcode = translator_ldl(env, ctx->base.pc_next);
 781    decode_opc(ctx);
 782    ctx->base.pc_next = ctx->pc_succ_insn;
 783
 784    if (ctx->base.is_jmp == DISAS_NEXT) {
 785        target_ulong page_start;
 786
 787        page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
 788        if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
 789            ctx->base.is_jmp = DISAS_TOO_MANY;
 790        }
 791    }
 792}
 793
 794static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
 795{
 796    DisasContext *ctx = container_of(dcbase, DisasContext, base);
 797
 798    switch (ctx->base.is_jmp) {
 799    case DISAS_TOO_MANY:
 800        gen_goto_tb(ctx, 0, ctx->base.pc_next);
 801        break;
 802    case DISAS_NORETURN:
 803        break;
 804    default:
 805        g_assert_not_reached();
 806    }
 807}
 808
 809static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
 810{
 811    qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
 812    log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
 813}
 814
 815static const TranslatorOps riscv_tr_ops = {
 816    .init_disas_context = riscv_tr_init_disas_context,
 817    .tb_start           = riscv_tr_tb_start,
 818    .insn_start         = riscv_tr_insn_start,
 819    .breakpoint_check   = riscv_tr_breakpoint_check,
 820    .translate_insn     = riscv_tr_translate_insn,
 821    .tb_stop            = riscv_tr_tb_stop,
 822    .disas_log          = riscv_tr_disas_log,
 823};
 824
 825void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
 826{
 827    DisasContext ctx;
 828
 829    translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
 830}
 831
 832void riscv_translate_init(void)
 833{
 834    int i;
 835
 836    /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
 837    /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
 838    /* registers, unless you specifically block reads/writes to reg 0 */
 839    cpu_gpr[0] = NULL;
 840
 841    for (i = 1; i < 32; i++) {
 842        cpu_gpr[i] = tcg_global_mem_new(cpu_env,
 843            offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
 844    }
 845
 846    for (i = 0; i < 32; i++) {
 847        cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
 848            offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
 849    }
 850
 851    cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
 852    load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
 853                             "load_res");
 854    load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
 855                             "load_val");
 856}
 857