qemu/target-alpha/translate.c
<<
>>
Prefs
   1/*
   2 *  Alpha emulation cpu translation for qemu.
   3 *
   4 *  Copyright (c) 2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "cpu.h"
  21#include "disas/disas.h"
  22#include "qemu/host-utils.h"
  23#include "tcg-op.h"
  24
  25#include "helper.h"
  26#define GEN_HELPER 1
  27#include "helper.h"
  28
  29#undef ALPHA_DEBUG_DISAS
  30#define CONFIG_SOFTFLOAT_INLINE
  31
  32#ifdef ALPHA_DEBUG_DISAS
  33#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  34#else
  35#  define LOG_DISAS(...) do { } while (0)
  36#endif
  37
  38typedef struct DisasContext DisasContext;
  39struct DisasContext {
  40    struct TranslationBlock *tb;
  41    uint64_t pc;
  42    int mem_idx;
  43
  44    /* Current rounding mode for this TB.  */
  45    int tb_rm;
  46    /* Current flush-to-zero setting for this TB.  */
  47    int tb_ftz;
  48
  49    /* implver value for this CPU.  */
  50    int implver;
  51
  52    bool singlestep_enabled;
  53};
  54
  55/* Return values from translate_one, indicating the state of the TB.
  56   Note that zero indicates that we are not exiting the TB.  */
  57
  58typedef enum {
  59    NO_EXIT,
  60
  61    /* We have emitted one or more goto_tb.  No fixup required.  */
  62    EXIT_GOTO_TB,
  63
  64    /* We are not using a goto_tb (for whatever reason), but have updated
  65       the PC (for whatever reason), so there's no need to do it again on
  66       exiting the TB.  */
  67    EXIT_PC_UPDATED,
  68
  69    /* We are exiting the TB, but have neither emitted a goto_tb, nor
  70       updated the PC for the next instruction to be executed.  */
  71    EXIT_PC_STALE,
  72
  73    /* We are ending the TB with a noreturn function call, e.g. longjmp.
  74       No following code will be executed.  */
  75    EXIT_NORETURN,
  76} ExitStatus;
  77
  78/* global register indexes */
  79static TCGv_ptr cpu_env;
  80static TCGv cpu_ir[31];
  81static TCGv cpu_fir[31];
  82static TCGv cpu_pc;
  83static TCGv cpu_lock_addr;
  84static TCGv cpu_lock_st_addr;
  85static TCGv cpu_lock_value;
  86static TCGv cpu_unique;
  87#ifndef CONFIG_USER_ONLY
  88static TCGv cpu_sysval;
  89static TCGv cpu_usp;
  90#endif
  91
  92/* register names */
  93static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
  94
  95#include "exec/gen-icount.h"
  96
  97void alpha_translate_init(void)
  98{
  99    int i;
 100    char *p;
 101    static int done_init = 0;
 102
 103    if (done_init)
 104        return;
 105
 106    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 107
 108    p = cpu_reg_names;
 109    for (i = 0; i < 31; i++) {
 110        sprintf(p, "ir%d", i);
 111        cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
 112                                           offsetof(CPUAlphaState, ir[i]), p);
 113        p += (i < 10) ? 4 : 5;
 114
 115        sprintf(p, "fir%d", i);
 116        cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
 117                                            offsetof(CPUAlphaState, fir[i]), p);
 118        p += (i < 10) ? 5 : 6;
 119    }
 120
 121    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
 122                                    offsetof(CPUAlphaState, pc), "pc");
 123
 124    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
 125                                           offsetof(CPUAlphaState, lock_addr),
 126                                           "lock_addr");
 127    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
 128                                              offsetof(CPUAlphaState, lock_st_addr),
 129                                              "lock_st_addr");
 130    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
 131                                            offsetof(CPUAlphaState, lock_value),
 132                                            "lock_value");
 133
 134    cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
 135                                        offsetof(CPUAlphaState, unique), "unique");
 136#ifndef CONFIG_USER_ONLY
 137    cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
 138                                        offsetof(CPUAlphaState, sysval), "sysval");
 139    cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
 140                                     offsetof(CPUAlphaState, usp), "usp");
 141#endif
 142
 143    /* register helpers */
 144#define GEN_HELPER 2
 145#include "helper.h"
 146
 147    done_init = 1;
 148}
 149
 150static void gen_excp_1(int exception, int error_code)
 151{
 152    TCGv_i32 tmp1, tmp2;
 153
 154    tmp1 = tcg_const_i32(exception);
 155    tmp2 = tcg_const_i32(error_code);
 156    gen_helper_excp(cpu_env, tmp1, tmp2);
 157    tcg_temp_free_i32(tmp2);
 158    tcg_temp_free_i32(tmp1);
 159}
 160
 161static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
 162{
 163    tcg_gen_movi_i64(cpu_pc, ctx->pc);
 164    gen_excp_1(exception, error_code);
 165    return EXIT_NORETURN;
 166}
 167
 168static inline ExitStatus gen_invalid(DisasContext *ctx)
 169{
 170    return gen_excp(ctx, EXCP_OPCDEC, 0);
 171}
 172
 173static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
 174{
 175    TCGv tmp = tcg_temp_new();
 176    TCGv_i32 tmp32 = tcg_temp_new_i32();
 177    tcg_gen_qemu_ld32u(tmp, t1, flags);
 178    tcg_gen_trunc_i64_i32(tmp32, tmp);
 179    gen_helper_memory_to_f(t0, tmp32);
 180    tcg_temp_free_i32(tmp32);
 181    tcg_temp_free(tmp);
 182}
 183
 184static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
 185{
 186    TCGv tmp = tcg_temp_new();
 187    tcg_gen_qemu_ld64(tmp, t1, flags);
 188    gen_helper_memory_to_g(t0, tmp);
 189    tcg_temp_free(tmp);
 190}
 191
 192static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
 193{
 194    TCGv tmp = tcg_temp_new();
 195    TCGv_i32 tmp32 = tcg_temp_new_i32();
 196    tcg_gen_qemu_ld32u(tmp, t1, flags);
 197    tcg_gen_trunc_i64_i32(tmp32, tmp);
 198    gen_helper_memory_to_s(t0, tmp32);
 199    tcg_temp_free_i32(tmp32);
 200    tcg_temp_free(tmp);
 201}
 202
 203static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
 204{
 205    tcg_gen_qemu_ld32s(t0, t1, flags);
 206    tcg_gen_mov_i64(cpu_lock_addr, t1);
 207    tcg_gen_mov_i64(cpu_lock_value, t0);
 208}
 209
 210static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
 211{
 212    tcg_gen_qemu_ld64(t0, t1, flags);
 213    tcg_gen_mov_i64(cpu_lock_addr, t1);
 214    tcg_gen_mov_i64(cpu_lock_value, t0);
 215}
 216
 217static inline void gen_load_mem(DisasContext *ctx,
 218                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
 219                                                          int flags),
 220                                int ra, int rb, int32_t disp16, int fp,
 221                                int clear)
 222{
 223    TCGv addr, va;
 224
 225    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
 226       prefetches, which we can treat as nops.  No worries about
 227       missed exceptions here.  */
 228    if (unlikely(ra == 31)) {
 229        return;
 230    }
 231
 232    addr = tcg_temp_new();
 233    if (rb != 31) {
 234        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
 235        if (clear) {
 236            tcg_gen_andi_i64(addr, addr, ~0x7);
 237        }
 238    } else {
 239        if (clear) {
 240            disp16 &= ~0x7;
 241        }
 242        tcg_gen_movi_i64(addr, disp16);
 243    }
 244
 245    va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
 246    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
 247
 248    tcg_temp_free(addr);
 249}
 250
 251static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
 252{
 253    TCGv_i32 tmp32 = tcg_temp_new_i32();
 254    TCGv tmp = tcg_temp_new();
 255    gen_helper_f_to_memory(tmp32, t0);
 256    tcg_gen_extu_i32_i64(tmp, tmp32);
 257    tcg_gen_qemu_st32(tmp, t1, flags);
 258    tcg_temp_free(tmp);
 259    tcg_temp_free_i32(tmp32);
 260}
 261
 262static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
 263{
 264    TCGv tmp = tcg_temp_new();
 265    gen_helper_g_to_memory(tmp, t0);
 266    tcg_gen_qemu_st64(tmp, t1, flags);
 267    tcg_temp_free(tmp);
 268}
 269
 270static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
 271{
 272    TCGv_i32 tmp32 = tcg_temp_new_i32();
 273    TCGv tmp = tcg_temp_new();
 274    gen_helper_s_to_memory(tmp32, t0);
 275    tcg_gen_extu_i32_i64(tmp, tmp32);
 276    tcg_gen_qemu_st32(tmp, t1, flags);
 277    tcg_temp_free(tmp);
 278    tcg_temp_free_i32(tmp32);
 279}
 280
 281static inline void gen_store_mem(DisasContext *ctx,
 282                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
 283                                                            int flags),
 284                                 int ra, int rb, int32_t disp16, int fp,
 285                                 int clear)
 286{
 287    TCGv addr, va;
 288
 289    addr = tcg_temp_new();
 290    if (rb != 31) {
 291        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
 292        if (clear) {
 293            tcg_gen_andi_i64(addr, addr, ~0x7);
 294        }
 295    } else {
 296        if (clear) {
 297            disp16 &= ~0x7;
 298        }
 299        tcg_gen_movi_i64(addr, disp16);
 300    }
 301
 302    if (ra == 31) {
 303        va = tcg_const_i64(0);
 304    } else {
 305        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
 306    }
 307    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
 308
 309    tcg_temp_free(addr);
 310    if (ra == 31) {
 311        tcg_temp_free(va);
 312    }
 313}
 314
 315static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
 316                                        int32_t disp16, int quad)
 317{
 318    TCGv addr;
 319
 320    if (ra == 31) {
 321        /* ??? Don't bother storing anything.  The user can't tell
 322           the difference, since the zero register always reads zero.  */
 323        return NO_EXIT;
 324    }
 325
 326#if defined(CONFIG_USER_ONLY)
 327    addr = cpu_lock_st_addr;
 328#else
 329    addr = tcg_temp_local_new();
 330#endif
 331
 332    if (rb != 31) {
 333        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
 334    } else {
 335        tcg_gen_movi_i64(addr, disp16);
 336    }
 337
 338#if defined(CONFIG_USER_ONLY)
 339    /* ??? This is handled via a complicated version of compare-and-swap
 340       in the cpu_loop.  Hopefully one day we'll have a real CAS opcode
 341       in TCG so that this isn't necessary.  */
 342    return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
 343#else
 344    /* ??? In system mode we are never multi-threaded, so CAS can be
 345       implemented via a non-atomic load-compare-store sequence.  */
 346    {
 347        int lab_fail, lab_done;
 348        TCGv val;
 349
 350        lab_fail = gen_new_label();
 351        lab_done = gen_new_label();
 352        tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
 353
 354        val = tcg_temp_new();
 355        if (quad) {
 356            tcg_gen_qemu_ld64(val, addr, ctx->mem_idx);
 357        } else {
 358            tcg_gen_qemu_ld32s(val, addr, ctx->mem_idx);
 359        }
 360        tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
 361
 362        if (quad) {
 363            tcg_gen_qemu_st64(cpu_ir[ra], addr, ctx->mem_idx);
 364        } else {
 365            tcg_gen_qemu_st32(cpu_ir[ra], addr, ctx->mem_idx);
 366        }
 367        tcg_gen_movi_i64(cpu_ir[ra], 1);
 368        tcg_gen_br(lab_done);
 369
 370        gen_set_label(lab_fail);
 371        tcg_gen_movi_i64(cpu_ir[ra], 0);
 372
 373        gen_set_label(lab_done);
 374        tcg_gen_movi_i64(cpu_lock_addr, -1);
 375
 376        tcg_temp_free(addr);
 377        return NO_EXIT;
 378    }
 379#endif
 380}
 381
 382static int use_goto_tb(DisasContext *ctx, uint64_t dest)
 383{
 384    /* Check for the dest on the same page as the start of the TB.  We
 385       also want to suppress goto_tb in the case of single-steping and IO.  */
 386    return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
 387            && !ctx->singlestep_enabled
 388            && !(ctx->tb->cflags & CF_LAST_IO));
 389}
 390
 391static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
 392{
 393    uint64_t dest = ctx->pc + (disp << 2);
 394
 395    if (ra != 31) {
 396        tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
 397    }
 398
 399    /* Notice branch-to-next; used to initialize RA with the PC.  */
 400    if (disp == 0) {
 401        return 0;
 402    } else if (use_goto_tb(ctx, dest)) {
 403        tcg_gen_goto_tb(0);
 404        tcg_gen_movi_i64(cpu_pc, dest);
 405        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
 406        return EXIT_GOTO_TB;
 407    } else {
 408        tcg_gen_movi_i64(cpu_pc, dest);
 409        return EXIT_PC_UPDATED;
 410    }
 411}
 412
 413static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
 414                                     TCGv cmp, int32_t disp)
 415{
 416    uint64_t dest = ctx->pc + (disp << 2);
 417    int lab_true = gen_new_label();
 418
 419    if (use_goto_tb(ctx, dest)) {
 420        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
 421
 422        tcg_gen_goto_tb(0);
 423        tcg_gen_movi_i64(cpu_pc, ctx->pc);
 424        tcg_gen_exit_tb((tcg_target_long)ctx->tb);
 425
 426        gen_set_label(lab_true);
 427        tcg_gen_goto_tb(1);
 428        tcg_gen_movi_i64(cpu_pc, dest);
 429        tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
 430
 431        return EXIT_GOTO_TB;
 432    } else {
 433        TCGv_i64 z = tcg_const_i64(0);
 434        TCGv_i64 d = tcg_const_i64(dest);
 435        TCGv_i64 p = tcg_const_i64(ctx->pc);
 436
 437        tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
 438
 439        tcg_temp_free_i64(z);
 440        tcg_temp_free_i64(d);
 441        tcg_temp_free_i64(p);
 442        return EXIT_PC_UPDATED;
 443    }
 444}
 445
 446static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
 447                            int32_t disp, int mask)
 448{
 449    TCGv cmp_tmp;
 450
 451    if (unlikely(ra == 31)) {
 452        cmp_tmp = tcg_const_i64(0);
 453    } else {
 454        cmp_tmp = tcg_temp_new();
 455        if (mask) {
 456            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
 457        } else {
 458            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
 459        }
 460    }
 461
 462    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
 463}
 464
 465/* Fold -0.0 for comparison with COND.  */
 466
 467static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
 468{
 469    uint64_t mzero = 1ull << 63;
 470
 471    switch (cond) {
 472    case TCG_COND_LE:
 473    case TCG_COND_GT:
 474        /* For <= or >, the -0.0 value directly compares the way we want.  */
 475        tcg_gen_mov_i64(dest, src);
 476        break;
 477
 478    case TCG_COND_EQ:
 479    case TCG_COND_NE:
 480        /* For == or !=, we can simply mask off the sign bit and compare.  */
 481        tcg_gen_andi_i64(dest, src, mzero - 1);
 482        break;
 483
 484    case TCG_COND_GE:
 485    case TCG_COND_LT:
 486        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
 487        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
 488        tcg_gen_neg_i64(dest, dest);
 489        tcg_gen_and_i64(dest, dest, src);
 490        break;
 491
 492    default:
 493        abort();
 494    }
 495}
 496
 497static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
 498                             int32_t disp)
 499{
 500    TCGv cmp_tmp;
 501
 502    if (unlikely(ra == 31)) {
 503        /* Very uncommon case, but easier to optimize it to an integer
 504           comparison than continuing with the floating point comparison.  */
 505        return gen_bcond(ctx, cond, ra, disp, 0);
 506    }
 507
 508    cmp_tmp = tcg_temp_new();
 509    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
 510    return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
 511}
 512
 513static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
 514                     int islit, uint8_t lit, int mask)
 515{
 516    TCGv_i64 c1, z, v1;
 517
 518    if (unlikely(rc == 31)) {
 519        return;
 520    }
 521
 522    if (ra == 31) {
 523        /* Very uncommon case - Do not bother to optimize.  */
 524        c1 = tcg_const_i64(0);
 525    } else if (mask) {
 526        c1 = tcg_const_i64(1);
 527        tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
 528    } else {
 529        c1 = cpu_ir[ra];
 530    }
 531    if (islit) {
 532        v1 = tcg_const_i64(lit);
 533    } else {
 534        v1 = cpu_ir[rb];
 535    }
 536    z = tcg_const_i64(0);
 537
 538    tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
 539
 540    tcg_temp_free_i64(z);
 541    if (ra == 31 || mask) {
 542        tcg_temp_free_i64(c1);
 543    }
 544    if (islit) {
 545        tcg_temp_free_i64(v1);
 546    }
 547}
 548
 549static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
 550{
 551    TCGv_i64 c1, z, v1;
 552
 553    if (unlikely(rc == 31)) {
 554        return;
 555    }
 556
 557    c1 = tcg_temp_new_i64();
 558    if (unlikely(ra == 31)) {
 559        tcg_gen_movi_i64(c1, 0);
 560    } else {
 561        gen_fold_mzero(cond, c1, cpu_fir[ra]);
 562    }
 563    if (rb == 31) {
 564        v1 = tcg_const_i64(0);
 565    } else {
 566        v1 = cpu_fir[rb];
 567    }
 568    z = tcg_const_i64(0);
 569
 570    tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
 571
 572    tcg_temp_free_i64(z);
 573    tcg_temp_free_i64(c1);
 574    if (rb == 31) {
 575        tcg_temp_free_i64(v1);
 576    }
 577}
 578
 579#define QUAL_RM_N       0x080   /* Round mode nearest even */
 580#define QUAL_RM_C       0x000   /* Round mode chopped */
 581#define QUAL_RM_M       0x040   /* Round mode minus infinity */
 582#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
 583#define QUAL_RM_MASK    0x0c0
 584
 585#define QUAL_U          0x100   /* Underflow enable (fp output) */
 586#define QUAL_V          0x100   /* Overflow enable (int output) */
 587#define QUAL_S          0x400   /* Software completion enable */
 588#define QUAL_I          0x200   /* Inexact detection enable */
 589
 590static void gen_qual_roundmode(DisasContext *ctx, int fn11)
 591{
 592    TCGv_i32 tmp;
 593
 594    fn11 &= QUAL_RM_MASK;
 595    if (fn11 == ctx->tb_rm) {
 596        return;
 597    }
 598    ctx->tb_rm = fn11;
 599
 600    tmp = tcg_temp_new_i32();
 601    switch (fn11) {
 602    case QUAL_RM_N:
 603        tcg_gen_movi_i32(tmp, float_round_nearest_even);
 604        break;
 605    case QUAL_RM_C:
 606        tcg_gen_movi_i32(tmp, float_round_to_zero);
 607        break;
 608    case QUAL_RM_M:
 609        tcg_gen_movi_i32(tmp, float_round_down);
 610        break;
 611    case QUAL_RM_D:
 612        tcg_gen_ld8u_i32(tmp, cpu_env,
 613                         offsetof(CPUAlphaState, fpcr_dyn_round));
 614        break;
 615    }
 616
 617#if defined(CONFIG_SOFTFLOAT_INLINE)
 618    /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
 619       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
 620       sets the one field.  */
 621    tcg_gen_st8_i32(tmp, cpu_env,
 622                    offsetof(CPUAlphaState, fp_status.float_rounding_mode));
 623#else
 624    gen_helper_setroundmode(tmp);
 625#endif
 626
 627    tcg_temp_free_i32(tmp);
 628}
 629
 630static void gen_qual_flushzero(DisasContext *ctx, int fn11)
 631{
 632    TCGv_i32 tmp;
 633
 634    fn11 &= QUAL_U;
 635    if (fn11 == ctx->tb_ftz) {
 636        return;
 637    }
 638    ctx->tb_ftz = fn11;
 639
 640    tmp = tcg_temp_new_i32();
 641    if (fn11) {
 642        /* Underflow is enabled, use the FPCR setting.  */
 643        tcg_gen_ld8u_i32(tmp, cpu_env,
 644                         offsetof(CPUAlphaState, fpcr_flush_to_zero));
 645    } else {
 646        /* Underflow is disabled, force flush-to-zero.  */
 647        tcg_gen_movi_i32(tmp, 1);
 648    }
 649
 650#if defined(CONFIG_SOFTFLOAT_INLINE)
 651    tcg_gen_st8_i32(tmp, cpu_env,
 652                    offsetof(CPUAlphaState, fp_status.flush_to_zero));
 653#else
 654    gen_helper_setflushzero(tmp);
 655#endif
 656
 657    tcg_temp_free_i32(tmp);
 658}
 659
 660static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
 661{
 662    TCGv val;
 663    if (reg == 31) {
 664        val = tcg_const_i64(0);
 665    } else {
 666        if ((fn11 & QUAL_S) == 0) {
 667            if (is_cmp) {
 668                gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
 669            } else {
 670                gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
 671            }
 672        }
 673        val = tcg_temp_new();
 674        tcg_gen_mov_i64(val, cpu_fir[reg]);
 675    }
 676    return val;
 677}
 678
 679static void gen_fp_exc_clear(void)
 680{
 681#if defined(CONFIG_SOFTFLOAT_INLINE)
 682    TCGv_i32 zero = tcg_const_i32(0);
 683    tcg_gen_st8_i32(zero, cpu_env,
 684                    offsetof(CPUAlphaState, fp_status.float_exception_flags));
 685    tcg_temp_free_i32(zero);
 686#else
 687    gen_helper_fp_exc_clear(cpu_env);
 688#endif
 689}
 690
 691static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
 692{
 693    /* ??? We ought to be able to do something with imprecise exceptions.
 694       E.g. notice we're still in the trap shadow of something within the
 695       TB and do not generate the code to signal the exception; end the TB
 696       when an exception is forced to arrive, either by consumption of a
 697       register value or TRAPB or EXCB.  */
 698    TCGv_i32 exc = tcg_temp_new_i32();
 699    TCGv_i32 reg;
 700
 701#if defined(CONFIG_SOFTFLOAT_INLINE)
 702    tcg_gen_ld8u_i32(exc, cpu_env,
 703                     offsetof(CPUAlphaState, fp_status.float_exception_flags));
 704#else
 705    gen_helper_fp_exc_get(exc, cpu_env);
 706#endif
 707
 708    if (ignore) {
 709        tcg_gen_andi_i32(exc, exc, ~ignore);
 710    }
 711
 712    /* ??? Pass in the regno of the destination so that the helper can
 713       set EXC_MASK, which contains a bitmask of destination registers
 714       that have caused arithmetic traps.  A simple userspace emulation
 715       does not require this.  We do need it for a guest kernel's entArith,
 716       or if we were to do something clever with imprecise exceptions.  */
 717    reg = tcg_const_i32(rc + 32);
 718
 719    if (fn11 & QUAL_S) {
 720        gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
 721    } else {
 722        gen_helper_fp_exc_raise(cpu_env, exc, reg);
 723    }
 724
 725    tcg_temp_free_i32(reg);
 726    tcg_temp_free_i32(exc);
 727}
 728
 729static inline void gen_fp_exc_raise(int rc, int fn11)
 730{
 731    gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
 732}
 733
 734static void gen_fcvtlq(int rb, int rc)
 735{
 736    if (unlikely(rc == 31)) {
 737        return;
 738    }
 739    if (unlikely(rb == 31)) {
 740        tcg_gen_movi_i64(cpu_fir[rc], 0);
 741    } else {
 742        TCGv tmp = tcg_temp_new();
 743
 744        /* The arithmetic right shift here, plus the sign-extended mask below
 745           yields a sign-extended result without an explicit ext32s_i64.  */
 746        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
 747        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
 748        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
 749        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
 750        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
 751
 752        tcg_temp_free(tmp);
 753    }
 754}
 755
 756static void gen_fcvtql(int rb, int rc)
 757{
 758    if (unlikely(rc == 31)) {
 759        return;
 760    }
 761    if (unlikely(rb == 31)) {
 762        tcg_gen_movi_i64(cpu_fir[rc], 0);
 763    } else {
 764        TCGv tmp = tcg_temp_new();
 765
 766        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
 767        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
 768        tcg_gen_shli_i64(tmp, tmp, 32);
 769        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
 770        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
 771
 772        tcg_temp_free(tmp);
 773    }
 774}
 775
 776static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
 777{
 778    if (rb != 31) {
 779        int lab = gen_new_label();
 780        TCGv tmp = tcg_temp_new();
 781
 782        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
 783        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
 784        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
 785
 786        gen_set_label(lab);
 787    }
 788    gen_fcvtql(rb, rc);
 789}
 790
 791#define FARITH2(name)                                                   \
 792    static inline void glue(gen_f, name)(int rb, int rc)                \
 793    {                                                                   \
 794        if (unlikely(rc == 31)) {                                       \
 795            return;                                                     \
 796        }                                                               \
 797        if (rb != 31) {                                                 \
 798            gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]);     \
 799        } else {                                                        \
 800            TCGv tmp = tcg_const_i64(0);                                \
 801            gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp);             \
 802            tcg_temp_free(tmp);                                         \
 803        }                                                               \
 804    }
 805
 806/* ??? VAX instruction qualifiers ignored.  */
 807FARITH2(sqrtf)
 808FARITH2(sqrtg)
 809FARITH2(cvtgf)
 810FARITH2(cvtgq)
 811FARITH2(cvtqf)
 812FARITH2(cvtqg)
 813
 814static void gen_ieee_arith2(DisasContext *ctx,
 815                            void (*helper)(TCGv, TCGv_ptr, TCGv),
 816                            int rb, int rc, int fn11)
 817{
 818    TCGv vb;
 819
 820    /* ??? This is wrong: the instruction is not a nop, it still may
 821       raise exceptions.  */
 822    if (unlikely(rc == 31)) {
 823        return;
 824    }
 825
 826    gen_qual_roundmode(ctx, fn11);
 827    gen_qual_flushzero(ctx, fn11);
 828    gen_fp_exc_clear();
 829
 830    vb = gen_ieee_input(rb, fn11, 0);
 831    helper(cpu_fir[rc], cpu_env, vb);
 832    tcg_temp_free(vb);
 833
 834    gen_fp_exc_raise(rc, fn11);
 835}
 836
 837#define IEEE_ARITH2(name)                                       \
 838static inline void glue(gen_f, name)(DisasContext *ctx,         \
 839                                     int rb, int rc, int fn11)  \
 840{                                                               \
 841    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
 842}
 843IEEE_ARITH2(sqrts)
 844IEEE_ARITH2(sqrtt)
 845IEEE_ARITH2(cvtst)
 846IEEE_ARITH2(cvtts)
 847
 848static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
 849{
 850    TCGv vb;
 851    int ignore = 0;
 852
 853    /* ??? This is wrong: the instruction is not a nop, it still may
 854       raise exceptions.  */
 855    if (unlikely(rc == 31)) {
 856        return;
 857    }
 858
 859    /* No need to set flushzero, since we have an integer output.  */
 860    gen_fp_exc_clear();
 861    vb = gen_ieee_input(rb, fn11, 0);
 862
 863    /* Almost all integer conversions use cropped rounding, and most
 864       also do not have integer overflow enabled.  Special case that.  */
 865    switch (fn11) {
 866    case QUAL_RM_C:
 867        gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
 868        break;
 869    case QUAL_V | QUAL_RM_C:
 870    case QUAL_S | QUAL_V | QUAL_RM_C:
 871        ignore = float_flag_inexact;
 872        /* FALLTHRU */
 873    case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
 874        gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
 875        break;
 876    default:
 877        gen_qual_roundmode(ctx, fn11);
 878        gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
 879        ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
 880        ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
 881        break;
 882    }
 883    tcg_temp_free(vb);
 884
 885    gen_fp_exc_raise_ignore(rc, fn11, ignore);
 886}
 887
 888static void gen_ieee_intcvt(DisasContext *ctx,
 889                            void (*helper)(TCGv, TCGv_ptr, TCGv),
 890                            int rb, int rc, int fn11)
 891{
 892    TCGv vb;
 893
 894    /* ??? This is wrong: the instruction is not a nop, it still may
 895       raise exceptions.  */
 896    if (unlikely(rc == 31)) {
 897        return;
 898    }
 899
 900    gen_qual_roundmode(ctx, fn11);
 901
 902    if (rb == 31) {
 903        vb = tcg_const_i64(0);
 904    } else {
 905        vb = cpu_fir[rb];
 906    }
 907
 908    /* The only exception that can be raised by integer conversion
 909       is inexact.  Thus we only need to worry about exceptions when
 910       inexact handling is requested.  */
 911    if (fn11 & QUAL_I) {
 912        gen_fp_exc_clear();
 913        helper(cpu_fir[rc], cpu_env, vb);
 914        gen_fp_exc_raise(rc, fn11);
 915    } else {
 916        helper(cpu_fir[rc], cpu_env, vb);
 917    }
 918
 919    if (rb == 31) {
 920        tcg_temp_free(vb);
 921    }
 922}
 923
 924#define IEEE_INTCVT(name)                                       \
 925static inline void glue(gen_f, name)(DisasContext *ctx,         \
 926                                     int rb, int rc, int fn11)  \
 927{                                                               \
 928    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
 929}
 930IEEE_INTCVT(cvtqs)
 931IEEE_INTCVT(cvtqt)
 932
 933static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
 934{
 935    TCGv va, vb, vmask;
 936    int za = 0, zb = 0;
 937
 938    if (unlikely(rc == 31)) {
 939        return;
 940    }
 941
 942    vmask = tcg_const_i64(mask);
 943
 944    TCGV_UNUSED_I64(va);
 945    if (ra == 31) {
 946        if (inv_a) {
 947            va = vmask;
 948        } else {
 949            za = 1;
 950        }
 951    } else {
 952        va = tcg_temp_new_i64();
 953        tcg_gen_mov_i64(va, cpu_fir[ra]);
 954        if (inv_a) {
 955            tcg_gen_andc_i64(va, vmask, va);
 956        } else {
 957            tcg_gen_and_i64(va, va, vmask);
 958        }
 959    }
 960
 961    TCGV_UNUSED_I64(vb);
 962    if (rb == 31) {
 963        zb = 1;
 964    } else {
 965        vb = tcg_temp_new_i64();
 966        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
 967    }
 968
 969    switch (za << 1 | zb) {
 970    case 0 | 0:
 971        tcg_gen_or_i64(cpu_fir[rc], va, vb);
 972        break;
 973    case 0 | 1:
 974        tcg_gen_mov_i64(cpu_fir[rc], va);
 975        break;
 976    case 2 | 0:
 977        tcg_gen_mov_i64(cpu_fir[rc], vb);
 978        break;
 979    case 2 | 1:
 980        tcg_gen_movi_i64(cpu_fir[rc], 0);
 981        break;
 982    }
 983
 984    tcg_temp_free(vmask);
 985    if (ra != 31) {
 986        tcg_temp_free(va);
 987    }
 988    if (rb != 31) {
 989        tcg_temp_free(vb);
 990    }
 991}
 992
 993static inline void gen_fcpys(int ra, int rb, int rc)
 994{
 995    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
 996}
 997
 998static inline void gen_fcpysn(int ra, int rb, int rc)
 999{
1000    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
1001}
1002
1003static inline void gen_fcpyse(int ra, int rb, int rc)
1004{
1005    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
1006}
1007
1008#define FARITH3(name)                                                   \
1009    static inline void glue(gen_f, name)(int ra, int rb, int rc)        \
1010    {                                                                   \
1011        TCGv va, vb;                                                    \
1012                                                                        \
1013        if (unlikely(rc == 31)) {                                       \
1014            return;                                                     \
1015        }                                                               \
1016        if (ra == 31) {                                                 \
1017            va = tcg_const_i64(0);                                      \
1018        } else {                                                        \
1019            va = cpu_fir[ra];                                           \
1020        }                                                               \
1021        if (rb == 31) {                                                 \
1022            vb = tcg_const_i64(0);                                      \
1023        } else {                                                        \
1024            vb = cpu_fir[rb];                                           \
1025        }                                                               \
1026                                                                        \
1027        gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb);              \
1028                                                                        \
1029        if (ra == 31) {                                                 \
1030            tcg_temp_free(va);                                          \
1031        }                                                               \
1032        if (rb == 31) {                                                 \
1033            tcg_temp_free(vb);                                          \
1034        }                                                               \
1035    }
1036
1037/* ??? VAX instruction qualifiers ignored.  */
1038FARITH3(addf)
1039FARITH3(subf)
1040FARITH3(mulf)
1041FARITH3(divf)
1042FARITH3(addg)
1043FARITH3(subg)
1044FARITH3(mulg)
1045FARITH3(divg)
1046FARITH3(cmpgeq)
1047FARITH3(cmpglt)
1048FARITH3(cmpgle)
1049
1050static void gen_ieee_arith3(DisasContext *ctx,
1051                            void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1052                            int ra, int rb, int rc, int fn11)
1053{
1054    TCGv va, vb;
1055
1056    /* ??? This is wrong: the instruction is not a nop, it still may
1057       raise exceptions.  */
1058    if (unlikely(rc == 31)) {
1059        return;
1060    }
1061
1062    gen_qual_roundmode(ctx, fn11);
1063    gen_qual_flushzero(ctx, fn11);
1064    gen_fp_exc_clear();
1065
1066    va = gen_ieee_input(ra, fn11, 0);
1067    vb = gen_ieee_input(rb, fn11, 0);
1068    helper(cpu_fir[rc], cpu_env, va, vb);
1069    tcg_temp_free(va);
1070    tcg_temp_free(vb);
1071
1072    gen_fp_exc_raise(rc, fn11);
1073}
1074
1075#define IEEE_ARITH3(name)                                               \
1076static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1077                                     int ra, int rb, int rc, int fn11)  \
1078{                                                                       \
1079    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
1080}
1081IEEE_ARITH3(adds)
1082IEEE_ARITH3(subs)
1083IEEE_ARITH3(muls)
1084IEEE_ARITH3(divs)
1085IEEE_ARITH3(addt)
1086IEEE_ARITH3(subt)
1087IEEE_ARITH3(mult)
1088IEEE_ARITH3(divt)
1089
1090static void gen_ieee_compare(DisasContext *ctx,
1091                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1092                             int ra, int rb, int rc, int fn11)
1093{
1094    TCGv va, vb;
1095
1096    /* ??? This is wrong: the instruction is not a nop, it still may
1097       raise exceptions.  */
1098    if (unlikely(rc == 31)) {
1099        return;
1100    }
1101
1102    gen_fp_exc_clear();
1103
1104    va = gen_ieee_input(ra, fn11, 1);
1105    vb = gen_ieee_input(rb, fn11, 1);
1106    helper(cpu_fir[rc], cpu_env, va, vb);
1107    tcg_temp_free(va);
1108    tcg_temp_free(vb);
1109
1110    gen_fp_exc_raise(rc, fn11);
1111}
1112
1113#define IEEE_CMP3(name)                                                 \
1114static inline void glue(gen_f, name)(DisasContext *ctx,                 \
1115                                     int ra, int rb, int rc, int fn11)  \
1116{                                                                       \
1117    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
1118}
1119IEEE_CMP3(cmptun)
1120IEEE_CMP3(cmpteq)
1121IEEE_CMP3(cmptlt)
1122IEEE_CMP3(cmptle)
1123
1124static inline uint64_t zapnot_mask(uint8_t lit)
1125{
1126    uint64_t mask = 0;
1127    int i;
1128
1129    for (i = 0; i < 8; ++i) {
1130        if ((lit >> i) & 1)
1131            mask |= 0xffull << (i * 8);
1132    }
1133    return mask;
1134}
1135
1136/* Implement zapnot with an immediate operand, which expands to some
1137   form of immediate AND.  This is a basic building block in the
1138   definition of many of the other byte manipulation instructions.  */
1139static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1140{
1141    switch (lit) {
1142    case 0x00:
1143        tcg_gen_movi_i64(dest, 0);
1144        break;
1145    case 0x01:
1146        tcg_gen_ext8u_i64(dest, src);
1147        break;
1148    case 0x03:
1149        tcg_gen_ext16u_i64(dest, src);
1150        break;
1151    case 0x0f:
1152        tcg_gen_ext32u_i64(dest, src);
1153        break;
1154    case 0xff:
1155        tcg_gen_mov_i64(dest, src);
1156        break;
1157    default:
1158        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1159        break;
1160    }
1161}
1162
1163static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1164{
1165    if (unlikely(rc == 31))
1166        return;
1167    else if (unlikely(ra == 31))
1168        tcg_gen_movi_i64(cpu_ir[rc], 0);
1169    else if (islit)
1170        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1171    else
1172        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1173}
1174
1175static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1176{
1177    if (unlikely(rc == 31))
1178        return;
1179    else if (unlikely(ra == 31))
1180        tcg_gen_movi_i64(cpu_ir[rc], 0);
1181    else if (islit)
1182        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1183    else
1184        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1185}
1186
1187
1188/* EXTWH, EXTLH, EXTQH */
1189static void gen_ext_h(int ra, int rb, int rc, int islit,
1190                      uint8_t lit, uint8_t byte_mask)
1191{
1192    if (unlikely(rc == 31))
1193        return;
1194    else if (unlikely(ra == 31))
1195        tcg_gen_movi_i64(cpu_ir[rc], 0);
1196    else {
1197        if (islit) {
1198            lit = (64 - (lit & 7) * 8) & 0x3f;
1199            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1200        } else {
1201            TCGv tmp1 = tcg_temp_new();
1202            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1203            tcg_gen_shli_i64(tmp1, tmp1, 3);
1204            tcg_gen_neg_i64(tmp1, tmp1);
1205            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1206            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1207            tcg_temp_free(tmp1);
1208        }
1209        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1210    }
1211}
1212
1213/* EXTBL, EXTWL, EXTLL, EXTQL */
1214static void gen_ext_l(int ra, int rb, int rc, int islit,
1215                      uint8_t lit, uint8_t byte_mask)
1216{
1217    if (unlikely(rc == 31))
1218        return;
1219    else if (unlikely(ra == 31))
1220        tcg_gen_movi_i64(cpu_ir[rc], 0);
1221    else {
1222        if (islit) {
1223            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1224        } else {
1225            TCGv tmp = tcg_temp_new();
1226            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1227            tcg_gen_shli_i64(tmp, tmp, 3);
1228            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1229            tcg_temp_free(tmp);
1230        }
1231        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1232    }
1233}
1234
1235/* INSWH, INSLH, INSQH */
1236static void gen_ins_h(int ra, int rb, int rc, int islit,
1237                      uint8_t lit, uint8_t byte_mask)
1238{
1239    if (unlikely(rc == 31))
1240        return;
1241    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1242        tcg_gen_movi_i64(cpu_ir[rc], 0);
1243    else {
1244        TCGv tmp = tcg_temp_new();
1245
1246        /* The instruction description has us left-shift the byte mask
1247           and extract bits <15:8> and apply that zap at the end.  This
1248           is equivalent to simply performing the zap first and shifting
1249           afterward.  */
1250        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1251
1252        if (islit) {
1253            /* Note that we have handled the lit==0 case above.  */
1254            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1255        } else {
1256            TCGv shift = tcg_temp_new();
1257
1258            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259               Do this portably by splitting the shift into two parts:
1260               shift_count-1 and 1.  Arrange for the -1 by using
1261               ones-complement instead of twos-complement in the negation:
1262               ~((B & 7) * 8) & 63.  */
1263
1264            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1265            tcg_gen_shli_i64(shift, shift, 3);
1266            tcg_gen_not_i64(shift, shift);
1267            tcg_gen_andi_i64(shift, shift, 0x3f);
1268
1269            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1270            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1271            tcg_temp_free(shift);
1272        }
1273        tcg_temp_free(tmp);
1274    }
1275}
1276
1277/* INSBL, INSWL, INSLL, INSQL */
1278static void gen_ins_l(int ra, int rb, int rc, int islit,
1279                      uint8_t lit, uint8_t byte_mask)
1280{
1281    if (unlikely(rc == 31))
1282        return;
1283    else if (unlikely(ra == 31))
1284        tcg_gen_movi_i64(cpu_ir[rc], 0);
1285    else {
1286        TCGv tmp = tcg_temp_new();
1287
1288        /* The instruction description has us left-shift the byte mask
1289           the same number of byte slots as the data and apply the zap
1290           at the end.  This is equivalent to simply performing the zap
1291           first and shifting afterward.  */
1292        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1293
1294        if (islit) {
1295            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1296        } else {
1297            TCGv shift = tcg_temp_new();
1298            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299            tcg_gen_shli_i64(shift, shift, 3);
1300            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1301            tcg_temp_free(shift);
1302        }
1303        tcg_temp_free(tmp);
1304    }
1305}
1306
1307/* MSKWH, MSKLH, MSKQH */
1308static void gen_msk_h(int ra, int rb, int rc, int islit,
1309                      uint8_t lit, uint8_t byte_mask)
1310{
1311    if (unlikely(rc == 31))
1312        return;
1313    else if (unlikely(ra == 31))
1314        tcg_gen_movi_i64(cpu_ir[rc], 0);
1315    else if (islit) {
1316        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1317    } else {
1318        TCGv shift = tcg_temp_new();
1319        TCGv mask = tcg_temp_new();
1320
1321        /* The instruction description is as above, where the byte_mask
1322           is shifted left, and then we extract bits <15:8>.  This can be
1323           emulated with a right-shift on the expanded byte mask.  This
1324           requires extra care because for an input <2:0> == 0 we need a
1325           shift of 64 bits in order to generate a zero.  This is done by
1326           splitting the shift into two parts, the variable shift - 1
1327           followed by a constant 1 shift.  The code we expand below is
1328           equivalent to ~((B & 7) * 8) & 63.  */
1329
1330        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1331        tcg_gen_shli_i64(shift, shift, 3);
1332        tcg_gen_not_i64(shift, shift);
1333        tcg_gen_andi_i64(shift, shift, 0x3f);
1334        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1335        tcg_gen_shr_i64(mask, mask, shift);
1336        tcg_gen_shri_i64(mask, mask, 1);
1337
1338        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1339
1340        tcg_temp_free(mask);
1341        tcg_temp_free(shift);
1342    }
1343}
1344
1345/* MSKBL, MSKWL, MSKLL, MSKQL */
1346static void gen_msk_l(int ra, int rb, int rc, int islit,
1347                      uint8_t lit, uint8_t byte_mask)
1348{
1349    if (unlikely(rc == 31))
1350        return;
1351    else if (unlikely(ra == 31))
1352        tcg_gen_movi_i64(cpu_ir[rc], 0);
1353    else if (islit) {
1354        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1355    } else {
1356        TCGv shift = tcg_temp_new();
1357        TCGv mask = tcg_temp_new();
1358
1359        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1360        tcg_gen_shli_i64(shift, shift, 3);
1361        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1362        tcg_gen_shl_i64(mask, mask, shift);
1363
1364        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1365
1366        tcg_temp_free(mask);
1367        tcg_temp_free(shift);
1368    }
1369}
1370
1371/* Code to call arith3 helpers */
1372#define ARITH3(name)                                                  \
1373static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374                                    uint8_t lit)                      \
1375{                                                                     \
1376    if (unlikely(rc == 31))                                           \
1377        return;                                                       \
1378                                                                      \
1379    if (ra != 31) {                                                   \
1380        if (islit) {                                                  \
1381            TCGv tmp = tcg_const_i64(lit);                            \
1382            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
1383            tcg_temp_free(tmp);                                       \
1384        } else                                                        \
1385            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386    } else {                                                          \
1387        TCGv tmp1 = tcg_const_i64(0);                                 \
1388        if (islit) {                                                  \
1389            TCGv tmp2 = tcg_const_i64(lit);                           \
1390            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
1391            tcg_temp_free(tmp2);                                      \
1392        } else                                                        \
1393            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
1394        tcg_temp_free(tmp1);                                          \
1395    }                                                                 \
1396}
1397ARITH3(cmpbge)
1398ARITH3(minub8)
1399ARITH3(minsb8)
1400ARITH3(minuw4)
1401ARITH3(minsw4)
1402ARITH3(maxub8)
1403ARITH3(maxsb8)
1404ARITH3(maxuw4)
1405ARITH3(maxsw4)
1406ARITH3(perr)
1407
1408/* Code to call arith3 helpers */
1409#define ARITH3_EX(name)                                                 \
1410    static inline void glue(gen_, name)(int ra, int rb, int rc,         \
1411                                        int islit, uint8_t lit)         \
1412    {                                                                   \
1413        if (unlikely(rc == 31)) {                                       \
1414            return;                                                     \
1415        }                                                               \
1416        if (ra != 31) {                                                 \
1417            if (islit) {                                                \
1418                TCGv tmp = tcg_const_i64(lit);                          \
1419                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1420                                    cpu_ir[ra], tmp);                   \
1421                tcg_temp_free(tmp);                                     \
1422            } else {                                                    \
1423                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
1424                                    cpu_ir[ra], cpu_ir[rb]);            \
1425            }                                                           \
1426        } else {                                                        \
1427            TCGv tmp1 = tcg_const_i64(0);                               \
1428            if (islit) {                                                \
1429                TCGv tmp2 = tcg_const_i64(lit);                         \
1430                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2);   \
1431                tcg_temp_free(tmp2);                                    \
1432            } else {                                                    \
1433                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1434            }                                                           \
1435            tcg_temp_free(tmp1);                                        \
1436        }                                                               \
1437    }
1438ARITH3_EX(addlv)
1439ARITH3_EX(sublv)
1440ARITH3_EX(addqv)
1441ARITH3_EX(subqv)
1442ARITH3_EX(mullv)
1443ARITH3_EX(mulqv)
1444
1445#define MVIOP2(name)                                    \
1446static inline void glue(gen_, name)(int rb, int rc)     \
1447{                                                       \
1448    if (unlikely(rc == 31))                             \
1449        return;                                         \
1450    if (unlikely(rb == 31))                             \
1451        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
1452    else                                                \
1453        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
1454}
1455MVIOP2(pklb)
1456MVIOP2(pkwb)
1457MVIOP2(unpkbl)
1458MVIOP2(unpkbw)
1459
1460static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1461                    int islit, uint8_t lit)
1462{
1463    TCGv va, vb;
1464
1465    if (unlikely(rc == 31)) {
1466        return;
1467    }
1468
1469    if (ra == 31) {
1470        va = tcg_const_i64(0);
1471    } else {
1472        va = cpu_ir[ra];
1473    }
1474    if (islit) {
1475        vb = tcg_const_i64(lit);
1476    } else {
1477        vb = cpu_ir[rb];
1478    }
1479
1480    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1481
1482    if (ra == 31) {
1483        tcg_temp_free(va);
1484    }
1485    if (islit) {
1486        tcg_temp_free(vb);
1487    }
1488}
1489
1490static void gen_rx(int ra, int set)
1491{
1492    TCGv_i32 tmp;
1493
1494    if (ra != 31) {
1495        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1496    }
1497
1498    tmp = tcg_const_i32(set);
1499    tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1500    tcg_temp_free_i32(tmp);
1501}
1502
1503static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1504{
1505    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1506       to internal cpu registers.  */
1507
1508    /* Unprivileged PAL call */
1509    if (palcode >= 0x80 && palcode < 0xC0) {
1510        switch (palcode) {
1511        case 0x86:
1512            /* IMB */
1513            /* No-op inside QEMU.  */
1514            break;
1515        case 0x9E:
1516            /* RDUNIQUE */
1517            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1518            break;
1519        case 0x9F:
1520            /* WRUNIQUE */
1521            tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1522            break;
1523        default:
1524            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
1525        }
1526        return NO_EXIT;
1527    }
1528
1529#ifndef CONFIG_USER_ONLY
1530    /* Privileged PAL code */
1531    if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1532        switch (palcode) {
1533        case 0x01:
1534            /* CFLUSH */
1535            /* No-op inside QEMU.  */
1536            break;
1537        case 0x02:
1538            /* DRAINA */
1539            /* No-op inside QEMU.  */
1540            break;
1541        case 0x2D:
1542            /* WRVPTPTR */
1543            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1544            break;
1545        case 0x31:
1546            /* WRVAL */
1547            tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1548            break;
1549        case 0x32:
1550            /* RDVAL */
1551            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1552            break;
1553
1554        case 0x35: {
1555            /* SWPIPL */
1556            TCGv tmp;
1557
1558            /* Note that we already know we're in kernel mode, so we know
1559               that PS only contains the 3 IPL bits.  */
1560            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1561
1562            /* But make sure and store only the 3 IPL bits from the user.  */
1563            tmp = tcg_temp_new();
1564            tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1565            tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1566            tcg_temp_free(tmp);
1567            break;
1568        }
1569
1570        case 0x36:
1571            /* RDPS */
1572            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1573            break;
1574        case 0x38:
1575            /* WRUSP */
1576            tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1577            break;
1578        case 0x3A:
1579            /* RDUSP */
1580            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1581            break;
1582        case 0x3C:
1583            /* WHAMI */
1584            tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1585                -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1586            break;
1587
1588        default:
1589            return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
1590        }
1591        return NO_EXIT;
1592    }
1593#endif
1594
1595    return gen_invalid(ctx);
1596}
1597
1598#ifndef CONFIG_USER_ONLY
1599
1600#define PR_BYTE         0x100000
1601#define PR_LONG         0x200000
1602
1603static int cpu_pr_data(int pr)
1604{
1605    switch (pr) {
1606    case  0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1607    case  1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1608    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1609    case  3: return offsetof(CPUAlphaState, trap_arg0);
1610    case  4: return offsetof(CPUAlphaState, trap_arg1);
1611    case  5: return offsetof(CPUAlphaState, trap_arg2);
1612    case  6: return offsetof(CPUAlphaState, exc_addr);
1613    case  7: return offsetof(CPUAlphaState, palbr);
1614    case  8: return offsetof(CPUAlphaState, ptbr);
1615    case  9: return offsetof(CPUAlphaState, vptptr);
1616    case 10: return offsetof(CPUAlphaState, unique);
1617    case 11: return offsetof(CPUAlphaState, sysval);
1618    case 12: return offsetof(CPUAlphaState, usp);
1619
1620    case 32 ... 39:
1621        return offsetof(CPUAlphaState, shadow[pr - 32]);
1622    case 40 ... 63:
1623        return offsetof(CPUAlphaState, scratch[pr - 40]);
1624
1625    case 251:
1626        return offsetof(CPUAlphaState, alarm_expire);
1627    }
1628    return 0;
1629}
1630
1631static ExitStatus gen_mfpr(int ra, int regno)
1632{
1633    int data = cpu_pr_data(regno);
1634
1635    /* In our emulated PALcode, these processor registers have no
1636       side effects from reading.  */
1637    if (ra == 31) {
1638        return NO_EXIT;
1639    }
1640
1641    /* Special help for VMTIME and WALLTIME.  */
1642    if (regno == 250 || regno == 249) {
1643        void (*helper)(TCGv) = gen_helper_get_walltime;
1644        if (regno == 249) {
1645                helper = gen_helper_get_vmtime;
1646        }
1647        if (use_icount) {
1648            gen_io_start();
1649            helper(cpu_ir[ra]);
1650            gen_io_end();
1651            return EXIT_PC_STALE;
1652        } else {
1653            helper(cpu_ir[ra]);
1654            return NO_EXIT;
1655        }
1656    }
1657
1658    /* The basic registers are data only, and unknown registers
1659       are read-zero, write-ignore.  */
1660    if (data == 0) {
1661        tcg_gen_movi_i64(cpu_ir[ra], 0);
1662    } else if (data & PR_BYTE) {
1663        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1664    } else if (data & PR_LONG) {
1665        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1666    } else {
1667        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1668    }
1669    return NO_EXIT;
1670}
1671
1672static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1673{
1674    TCGv tmp;
1675    int data;
1676
1677    if (rb == 31) {
1678        tmp = tcg_const_i64(0);
1679    } else {
1680        tmp = cpu_ir[rb];
1681    }
1682
1683    switch (regno) {
1684    case 255:
1685        /* TBIA */
1686        gen_helper_tbia(cpu_env);
1687        break;
1688
1689    case 254:
1690        /* TBIS */
1691        gen_helper_tbis(cpu_env, tmp);
1692        break;
1693
1694    case 253:
1695        /* WAIT */
1696        tmp = tcg_const_i64(1);
1697        tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1698                                       offsetof(CPUState, halted));
1699        return gen_excp(ctx, EXCP_HLT, 0);
1700
1701    case 252:
1702        /* HALT */
1703        gen_helper_halt(tmp);
1704        return EXIT_PC_STALE;
1705
1706    case 251:
1707        /* ALARM */
1708        gen_helper_set_alarm(cpu_env, tmp);
1709        break;
1710
1711    default:
1712        /* The basic registers are data only, and unknown registers
1713           are read-zero, write-ignore.  */
1714        data = cpu_pr_data(regno);
1715        if (data != 0) {
1716            if (data & PR_BYTE) {
1717                tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1718            } else if (data & PR_LONG) {
1719                tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1720            } else {
1721                tcg_gen_st_i64(tmp, cpu_env, data);
1722            }
1723        }
1724        break;
1725    }
1726
1727    if (rb == 31) {
1728        tcg_temp_free(tmp);
1729    }
1730
1731    return NO_EXIT;
1732}
1733#endif /* !USER_ONLY*/
1734
1735static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1736{
1737    uint32_t palcode;
1738    int32_t disp21, disp16;
1739#ifndef CONFIG_USER_ONLY
1740    int32_t disp12;
1741#endif
1742    uint16_t fn11;
1743    uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1744    uint8_t lit;
1745    ExitStatus ret;
1746
1747    /* Decode all instruction fields */
1748    opc = insn >> 26;
1749    ra = (insn >> 21) & 0x1F;
1750    rb = (insn >> 16) & 0x1F;
1751    rc = insn & 0x1F;
1752    real_islit = islit = (insn >> 12) & 1;
1753    if (rb == 31 && !islit) {
1754        islit = 1;
1755        lit = 0;
1756    } else
1757        lit = (insn >> 13) & 0xFF;
1758    palcode = insn & 0x03FFFFFF;
1759    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1760    disp16 = (int16_t)(insn & 0x0000FFFF);
1761#ifndef CONFIG_USER_ONLY
1762    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1763#endif
1764    fn11 = (insn >> 5) & 0x000007FF;
1765    fpfn = fn11 & 0x3F;
1766    fn7 = (insn >> 5) & 0x0000007F;
1767    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1768              opc, ra, rb, rc, disp16);
1769
1770    ret = NO_EXIT;
1771    switch (opc) {
1772    case 0x00:
1773        /* CALL_PAL */
1774        ret = gen_call_pal(ctx, palcode);
1775        break;
1776    case 0x01:
1777        /* OPC01 */
1778        goto invalid_opc;
1779    case 0x02:
1780        /* OPC02 */
1781        goto invalid_opc;
1782    case 0x03:
1783        /* OPC03 */
1784        goto invalid_opc;
1785    case 0x04:
1786        /* OPC04 */
1787        goto invalid_opc;
1788    case 0x05:
1789        /* OPC05 */
1790        goto invalid_opc;
1791    case 0x06:
1792        /* OPC06 */
1793        goto invalid_opc;
1794    case 0x07:
1795        /* OPC07 */
1796        goto invalid_opc;
1797    case 0x08:
1798        /* LDA */
1799        if (likely(ra != 31)) {
1800            if (rb != 31)
1801                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1802            else
1803                tcg_gen_movi_i64(cpu_ir[ra], disp16);
1804        }
1805        break;
1806    case 0x09:
1807        /* LDAH */
1808        if (likely(ra != 31)) {
1809            if (rb != 31)
1810                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1811            else
1812                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1813        }
1814        break;
1815    case 0x0A:
1816        /* LDBU */
1817        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1818            gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1819            break;
1820        }
1821        goto invalid_opc;
1822    case 0x0B:
1823        /* LDQ_U */
1824        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1825        break;
1826    case 0x0C:
1827        /* LDWU */
1828        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1829            gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1830            break;
1831        }
1832        goto invalid_opc;
1833    case 0x0D:
1834        /* STW */
1835        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1836        break;
1837    case 0x0E:
1838        /* STB */
1839        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1840        break;
1841    case 0x0F:
1842        /* STQ_U */
1843        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1844        break;
1845    case 0x10:
1846        switch (fn7) {
1847        case 0x00:
1848            /* ADDL */
1849            if (likely(rc != 31)) {
1850                if (ra != 31) {
1851                    if (islit) {
1852                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1853                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1854                    } else {
1855                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1856                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1857                    }
1858                } else {
1859                    if (islit)
1860                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1861                    else
1862                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1863                }
1864            }
1865            break;
1866        case 0x02:
1867            /* S4ADDL */
1868            if (likely(rc != 31)) {
1869                if (ra != 31) {
1870                    TCGv tmp = tcg_temp_new();
1871                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1872                    if (islit)
1873                        tcg_gen_addi_i64(tmp, tmp, lit);
1874                    else
1875                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1876                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1877                    tcg_temp_free(tmp);
1878                } else {
1879                    if (islit)
1880                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1881                    else
1882                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1883                }
1884            }
1885            break;
1886        case 0x09:
1887            /* SUBL */
1888            if (likely(rc != 31)) {
1889                if (ra != 31) {
1890                    if (islit)
1891                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1892                    else
1893                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1894                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1895                } else {
1896                    if (islit)
1897                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1898                    else {
1899                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1900                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1901                }
1902            }
1903            break;
1904        case 0x0B:
1905            /* S4SUBL */
1906            if (likely(rc != 31)) {
1907                if (ra != 31) {
1908                    TCGv tmp = tcg_temp_new();
1909                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1910                    if (islit)
1911                        tcg_gen_subi_i64(tmp, tmp, lit);
1912                    else
1913                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1914                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1915                    tcg_temp_free(tmp);
1916                } else {
1917                    if (islit)
1918                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1919                    else {
1920                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1921                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1922                    }
1923                }
1924            }
1925            break;
1926        case 0x0F:
1927            /* CMPBGE */
1928            gen_cmpbge(ra, rb, rc, islit, lit);
1929            break;
1930        case 0x12:
1931            /* S8ADDL */
1932            if (likely(rc != 31)) {
1933                if (ra != 31) {
1934                    TCGv tmp = tcg_temp_new();
1935                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1936                    if (islit)
1937                        tcg_gen_addi_i64(tmp, tmp, lit);
1938                    else
1939                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1940                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1941                    tcg_temp_free(tmp);
1942                } else {
1943                    if (islit)
1944                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1945                    else
1946                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1947                }
1948            }
1949            break;
1950        case 0x1B:
1951            /* S8SUBL */
1952            if (likely(rc != 31)) {
1953                if (ra != 31) {
1954                    TCGv tmp = tcg_temp_new();
1955                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1956                    if (islit)
1957                        tcg_gen_subi_i64(tmp, tmp, lit);
1958                    else
1959                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1960                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1961                    tcg_temp_free(tmp);
1962                } else {
1963                    if (islit)
1964                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
1965                    else
1966                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1967                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1968                    }
1969                }
1970            }
1971            break;
1972        case 0x1D:
1973            /* CMPULT */
1974            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1975            break;
1976        case 0x20:
1977            /* ADDQ */
1978            if (likely(rc != 31)) {
1979                if (ra != 31) {
1980                    if (islit)
1981                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1982                    else
1983                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1984                } else {
1985                    if (islit)
1986                        tcg_gen_movi_i64(cpu_ir[rc], lit);
1987                    else
1988                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1989                }
1990            }
1991            break;
1992        case 0x22:
1993            /* S4ADDQ */
1994            if (likely(rc != 31)) {
1995                if (ra != 31) {
1996                    TCGv tmp = tcg_temp_new();
1997                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1998                    if (islit)
1999                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2000                    else
2001                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2002                    tcg_temp_free(tmp);
2003                } else {
2004                    if (islit)
2005                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2006                    else
2007                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2008                }
2009            }
2010            break;
2011        case 0x29:
2012            /* SUBQ */
2013            if (likely(rc != 31)) {
2014                if (ra != 31) {
2015                    if (islit)
2016                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2017                    else
2018                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2019                } else {
2020                    if (islit)
2021                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2022                    else
2023                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2024                }
2025            }
2026            break;
2027        case 0x2B:
2028            /* S4SUBQ */
2029            if (likely(rc != 31)) {
2030                if (ra != 31) {
2031                    TCGv tmp = tcg_temp_new();
2032                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2033                    if (islit)
2034                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2035                    else
2036                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2037                    tcg_temp_free(tmp);
2038                } else {
2039                    if (islit)
2040                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2041                    else
2042                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2043                }
2044            }
2045            break;
2046        case 0x2D:
2047            /* CMPEQ */
2048            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2049            break;
2050        case 0x32:
2051            /* S8ADDQ */
2052            if (likely(rc != 31)) {
2053                if (ra != 31) {
2054                    TCGv tmp = tcg_temp_new();
2055                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2056                    if (islit)
2057                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2058                    else
2059                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2060                    tcg_temp_free(tmp);
2061                } else {
2062                    if (islit)
2063                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2064                    else
2065                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2066                }
2067            }
2068            break;
2069        case 0x3B:
2070            /* S8SUBQ */
2071            if (likely(rc != 31)) {
2072                if (ra != 31) {
2073                    TCGv tmp = tcg_temp_new();
2074                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2075                    if (islit)
2076                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2077                    else
2078                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2079                    tcg_temp_free(tmp);
2080                } else {
2081                    if (islit)
2082                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
2083                    else
2084                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2085                }
2086            }
2087            break;
2088        case 0x3D:
2089            /* CMPULE */
2090            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2091            break;
2092        case 0x40:
2093            /* ADDL/V */
2094            gen_addlv(ra, rb, rc, islit, lit);
2095            break;
2096        case 0x49:
2097            /* SUBL/V */
2098            gen_sublv(ra, rb, rc, islit, lit);
2099            break;
2100        case 0x4D:
2101            /* CMPLT */
2102            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2103            break;
2104        case 0x60:
2105            /* ADDQ/V */
2106            gen_addqv(ra, rb, rc, islit, lit);
2107            break;
2108        case 0x69:
2109            /* SUBQ/V */
2110            gen_subqv(ra, rb, rc, islit, lit);
2111            break;
2112        case 0x6D:
2113            /* CMPLE */
2114            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2115            break;
2116        default:
2117            goto invalid_opc;
2118        }
2119        break;
2120    case 0x11:
2121        switch (fn7) {
2122        case 0x00:
2123            /* AND */
2124            if (likely(rc != 31)) {
2125                if (ra == 31)
2126                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2127                else if (islit)
2128                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2129                else
2130                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2131            }
2132            break;
2133        case 0x08:
2134            /* BIC */
2135            if (likely(rc != 31)) {
2136                if (ra != 31) {
2137                    if (islit)
2138                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2139                    else
2140                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2141                } else
2142                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2143            }
2144            break;
2145        case 0x14:
2146            /* CMOVLBS */
2147            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2148            break;
2149        case 0x16:
2150            /* CMOVLBC */
2151            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2152            break;
2153        case 0x20:
2154            /* BIS */
2155            if (likely(rc != 31)) {
2156                if (ra != 31) {
2157                    if (islit)
2158                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2159                    else
2160                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2161                } else {
2162                    if (islit)
2163                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2164                    else
2165                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2166                }
2167            }
2168            break;
2169        case 0x24:
2170            /* CMOVEQ */
2171            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2172            break;
2173        case 0x26:
2174            /* CMOVNE */
2175            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2176            break;
2177        case 0x28:
2178            /* ORNOT */
2179            if (likely(rc != 31)) {
2180                if (ra != 31) {
2181                    if (islit)
2182                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2183                    else
2184                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2185                } else {
2186                    if (islit)
2187                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2188                    else
2189                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2190                }
2191            }
2192            break;
2193        case 0x40:
2194            /* XOR */
2195            if (likely(rc != 31)) {
2196                if (ra != 31) {
2197                    if (islit)
2198                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2199                    else
2200                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2201                } else {
2202                    if (islit)
2203                        tcg_gen_movi_i64(cpu_ir[rc], lit);
2204                    else
2205                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2206                }
2207            }
2208            break;
2209        case 0x44:
2210            /* CMOVLT */
2211            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2212            break;
2213        case 0x46:
2214            /* CMOVGE */
2215            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2216            break;
2217        case 0x48:
2218            /* EQV */
2219            if (likely(rc != 31)) {
2220                if (ra != 31) {
2221                    if (islit)
2222                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2223                    else
2224                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2225                } else {
2226                    if (islit)
2227                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2228                    else
2229                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2230                }
2231            }
2232            break;
2233        case 0x61:
2234            /* AMASK */
2235            if (likely(rc != 31)) {
2236                uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2237
2238                if (islit) {
2239                    tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2240                } else {
2241                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2242                }
2243            }
2244            break;
2245        case 0x64:
2246            /* CMOVLE */
2247            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2248            break;
2249        case 0x66:
2250            /* CMOVGT */
2251            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2252            break;
2253        case 0x6C:
2254            /* IMPLVER */
2255            if (rc != 31) {
2256                tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2257            }
2258            break;
2259        default:
2260            goto invalid_opc;
2261        }
2262        break;
2263    case 0x12:
2264        switch (fn7) {
2265        case 0x02:
2266            /* MSKBL */
2267            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2268            break;
2269        case 0x06:
2270            /* EXTBL */
2271            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2272            break;
2273        case 0x0B:
2274            /* INSBL */
2275            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2276            break;
2277        case 0x12:
2278            /* MSKWL */
2279            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2280            break;
2281        case 0x16:
2282            /* EXTWL */
2283            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2284            break;
2285        case 0x1B:
2286            /* INSWL */
2287            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2288            break;
2289        case 0x22:
2290            /* MSKLL */
2291            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2292            break;
2293        case 0x26:
2294            /* EXTLL */
2295            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2296            break;
2297        case 0x2B:
2298            /* INSLL */
2299            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2300            break;
2301        case 0x30:
2302            /* ZAP */
2303            gen_zap(ra, rb, rc, islit, lit);
2304            break;
2305        case 0x31:
2306            /* ZAPNOT */
2307            gen_zapnot(ra, rb, rc, islit, lit);
2308            break;
2309        case 0x32:
2310            /* MSKQL */
2311            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2312            break;
2313        case 0x34:
2314            /* SRL */
2315            if (likely(rc != 31)) {
2316                if (ra != 31) {
2317                    if (islit)
2318                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2319                    else {
2320                        TCGv shift = tcg_temp_new();
2321                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2322                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2323                        tcg_temp_free(shift);
2324                    }
2325                } else
2326                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2327            }
2328            break;
2329        case 0x36:
2330            /* EXTQL */
2331            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2332            break;
2333        case 0x39:
2334            /* SLL */
2335            if (likely(rc != 31)) {
2336                if (ra != 31) {
2337                    if (islit)
2338                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2339                    else {
2340                        TCGv shift = tcg_temp_new();
2341                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2342                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2343                        tcg_temp_free(shift);
2344                    }
2345                } else
2346                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2347            }
2348            break;
2349        case 0x3B:
2350            /* INSQL */
2351            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2352            break;
2353        case 0x3C:
2354            /* SRA */
2355            if (likely(rc != 31)) {
2356                if (ra != 31) {
2357                    if (islit)
2358                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2359                    else {
2360                        TCGv shift = tcg_temp_new();
2361                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2362                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2363                        tcg_temp_free(shift);
2364                    }
2365                } else
2366                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2367            }
2368            break;
2369        case 0x52:
2370            /* MSKWH */
2371            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2372            break;
2373        case 0x57:
2374            /* INSWH */
2375            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2376            break;
2377        case 0x5A:
2378            /* EXTWH */
2379            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2380            break;
2381        case 0x62:
2382            /* MSKLH */
2383            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2384            break;
2385        case 0x67:
2386            /* INSLH */
2387            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2388            break;
2389        case 0x6A:
2390            /* EXTLH */
2391            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2392            break;
2393        case 0x72:
2394            /* MSKQH */
2395            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2396            break;
2397        case 0x77:
2398            /* INSQH */
2399            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2400            break;
2401        case 0x7A:
2402            /* EXTQH */
2403            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2404            break;
2405        default:
2406            goto invalid_opc;
2407        }
2408        break;
2409    case 0x13:
2410        switch (fn7) {
2411        case 0x00:
2412            /* MULL */
2413            if (likely(rc != 31)) {
2414                if (ra == 31)
2415                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2416                else {
2417                    if (islit)
2418                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2419                    else
2420                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2421                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2422                }
2423            }
2424            break;
2425        case 0x20:
2426            /* MULQ */
2427            if (likely(rc != 31)) {
2428                if (ra == 31)
2429                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2430                else if (islit)
2431                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2432                else
2433                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2434            }
2435            break;
2436        case 0x30:
2437            /* UMULH */
2438            {
2439                TCGv low;
2440                if (unlikely(rc == 31)){
2441                    break;
2442                }
2443                if (ra == 31) {
2444                    tcg_gen_movi_i64(cpu_ir[rc], 0);
2445                    break;
2446                }
2447                low = tcg_temp_new();
2448                if (islit) {
2449                    tcg_gen_movi_tl(low, lit);
2450                    tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2451                } else {
2452                    tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2453                }
2454                tcg_temp_free(low);
2455            }
2456            break;
2457        case 0x40:
2458            /* MULL/V */
2459            gen_mullv(ra, rb, rc, islit, lit);
2460            break;
2461        case 0x60:
2462            /* MULQ/V */
2463            gen_mulqv(ra, rb, rc, islit, lit);
2464            break;
2465        default:
2466            goto invalid_opc;
2467        }
2468        break;
2469    case 0x14:
2470        switch (fpfn) { /* fn11 & 0x3F */
2471        case 0x04:
2472            /* ITOFS */
2473            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2474                goto invalid_opc;
2475            }
2476            if (likely(rc != 31)) {
2477                if (ra != 31) {
2478                    TCGv_i32 tmp = tcg_temp_new_i32();
2479                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2480                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
2481                    tcg_temp_free_i32(tmp);
2482                } else
2483                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2484            }
2485            break;
2486        case 0x0A:
2487            /* SQRTF */
2488            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2489                gen_fsqrtf(rb, rc);
2490                break;
2491            }
2492            goto invalid_opc;
2493        case 0x0B:
2494            /* SQRTS */
2495            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2496                gen_fsqrts(ctx, rb, rc, fn11);
2497                break;
2498            }
2499            goto invalid_opc;
2500        case 0x14:
2501            /* ITOFF */
2502            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2503                goto invalid_opc;
2504            }
2505            if (likely(rc != 31)) {
2506                if (ra != 31) {
2507                    TCGv_i32 tmp = tcg_temp_new_i32();
2508                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2509                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
2510                    tcg_temp_free_i32(tmp);
2511                } else
2512                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2513            }
2514            break;
2515        case 0x24:
2516            /* ITOFT */
2517            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2518                goto invalid_opc;
2519            }
2520            if (likely(rc != 31)) {
2521                if (ra != 31)
2522                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2523                else
2524                    tcg_gen_movi_i64(cpu_fir[rc], 0);
2525            }
2526            break;
2527        case 0x2A:
2528            /* SQRTG */
2529            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2530                gen_fsqrtg(rb, rc);
2531                break;
2532            }
2533            goto invalid_opc;
2534        case 0x02B:
2535            /* SQRTT */
2536            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2537                gen_fsqrtt(ctx, rb, rc, fn11);
2538                break;
2539            }
2540            goto invalid_opc;
2541        default:
2542            goto invalid_opc;
2543        }
2544        break;
2545    case 0x15:
2546        /* VAX floating point */
2547        /* XXX: rounding mode and trap are ignored (!) */
2548        switch (fpfn) { /* fn11 & 0x3F */
2549        case 0x00:
2550            /* ADDF */
2551            gen_faddf(ra, rb, rc);
2552            break;
2553        case 0x01:
2554            /* SUBF */
2555            gen_fsubf(ra, rb, rc);
2556            break;
2557        case 0x02:
2558            /* MULF */
2559            gen_fmulf(ra, rb, rc);
2560            break;
2561        case 0x03:
2562            /* DIVF */
2563            gen_fdivf(ra, rb, rc);
2564            break;
2565        case 0x1E:
2566            /* CVTDG */
2567#if 0 // TODO
2568            gen_fcvtdg(rb, rc);
2569#else
2570            goto invalid_opc;
2571#endif
2572            break;
2573        case 0x20:
2574            /* ADDG */
2575            gen_faddg(ra, rb, rc);
2576            break;
2577        case 0x21:
2578            /* SUBG */
2579            gen_fsubg(ra, rb, rc);
2580            break;
2581        case 0x22:
2582            /* MULG */
2583            gen_fmulg(ra, rb, rc);
2584            break;
2585        case 0x23:
2586            /* DIVG */
2587            gen_fdivg(ra, rb, rc);
2588            break;
2589        case 0x25:
2590            /* CMPGEQ */
2591            gen_fcmpgeq(ra, rb, rc);
2592            break;
2593        case 0x26:
2594            /* CMPGLT */
2595            gen_fcmpglt(ra, rb, rc);
2596            break;
2597        case 0x27:
2598            /* CMPGLE */
2599            gen_fcmpgle(ra, rb, rc);
2600            break;
2601        case 0x2C:
2602            /* CVTGF */
2603            gen_fcvtgf(rb, rc);
2604            break;
2605        case 0x2D:
2606            /* CVTGD */
2607#if 0 // TODO
2608            gen_fcvtgd(rb, rc);
2609#else
2610            goto invalid_opc;
2611#endif
2612            break;
2613        case 0x2F:
2614            /* CVTGQ */
2615            gen_fcvtgq(rb, rc);
2616            break;
2617        case 0x3C:
2618            /* CVTQF */
2619            gen_fcvtqf(rb, rc);
2620            break;
2621        case 0x3E:
2622            /* CVTQG */
2623            gen_fcvtqg(rb, rc);
2624            break;
2625        default:
2626            goto invalid_opc;
2627        }
2628        break;
2629    case 0x16:
2630        /* IEEE floating-point */
2631        switch (fpfn) { /* fn11 & 0x3F */
2632        case 0x00:
2633            /* ADDS */
2634            gen_fadds(ctx, ra, rb, rc, fn11);
2635            break;
2636        case 0x01:
2637            /* SUBS */
2638            gen_fsubs(ctx, ra, rb, rc, fn11);
2639            break;
2640        case 0x02:
2641            /* MULS */
2642            gen_fmuls(ctx, ra, rb, rc, fn11);
2643            break;
2644        case 0x03:
2645            /* DIVS */
2646            gen_fdivs(ctx, ra, rb, rc, fn11);
2647            break;
2648        case 0x20:
2649            /* ADDT */
2650            gen_faddt(ctx, ra, rb, rc, fn11);
2651            break;
2652        case 0x21:
2653            /* SUBT */
2654            gen_fsubt(ctx, ra, rb, rc, fn11);
2655            break;
2656        case 0x22:
2657            /* MULT */
2658            gen_fmult(ctx, ra, rb, rc, fn11);
2659            break;
2660        case 0x23:
2661            /* DIVT */
2662            gen_fdivt(ctx, ra, rb, rc, fn11);
2663            break;
2664        case 0x24:
2665            /* CMPTUN */
2666            gen_fcmptun(ctx, ra, rb, rc, fn11);
2667            break;
2668        case 0x25:
2669            /* CMPTEQ */
2670            gen_fcmpteq(ctx, ra, rb, rc, fn11);
2671            break;
2672        case 0x26:
2673            /* CMPTLT */
2674            gen_fcmptlt(ctx, ra, rb, rc, fn11);
2675            break;
2676        case 0x27:
2677            /* CMPTLE */
2678            gen_fcmptle(ctx, ra, rb, rc, fn11);
2679            break;
2680        case 0x2C:
2681            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2682                /* CVTST */
2683                gen_fcvtst(ctx, rb, rc, fn11);
2684            } else {
2685                /* CVTTS */
2686                gen_fcvtts(ctx, rb, rc, fn11);
2687            }
2688            break;
2689        case 0x2F:
2690            /* CVTTQ */
2691            gen_fcvttq(ctx, rb, rc, fn11);
2692            break;
2693        case 0x3C:
2694            /* CVTQS */
2695            gen_fcvtqs(ctx, rb, rc, fn11);
2696            break;
2697        case 0x3E:
2698            /* CVTQT */
2699            gen_fcvtqt(ctx, rb, rc, fn11);
2700            break;
2701        default:
2702            goto invalid_opc;
2703        }
2704        break;
2705    case 0x17:
2706        switch (fn11) {
2707        case 0x010:
2708            /* CVTLQ */
2709            gen_fcvtlq(rb, rc);
2710            break;
2711        case 0x020:
2712            if (likely(rc != 31)) {
2713                if (ra == rb) {
2714                    /* FMOV */
2715                    if (ra == 31)
2716                        tcg_gen_movi_i64(cpu_fir[rc], 0);
2717                    else
2718                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2719                } else {
2720                    /* CPYS */
2721                    gen_fcpys(ra, rb, rc);
2722                }
2723            }
2724            break;
2725        case 0x021:
2726            /* CPYSN */
2727            gen_fcpysn(ra, rb, rc);
2728            break;
2729        case 0x022:
2730            /* CPYSE */
2731            gen_fcpyse(ra, rb, rc);
2732            break;
2733        case 0x024:
2734            /* MT_FPCR */
2735            if (likely(ra != 31))
2736                gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2737            else {
2738                TCGv tmp = tcg_const_i64(0);
2739                gen_helper_store_fpcr(cpu_env, tmp);
2740                tcg_temp_free(tmp);
2741            }
2742            break;
2743        case 0x025:
2744            /* MF_FPCR */
2745            if (likely(ra != 31))
2746                gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2747            break;
2748        case 0x02A:
2749            /* FCMOVEQ */
2750            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2751            break;
2752        case 0x02B:
2753            /* FCMOVNE */
2754            gen_fcmov(TCG_COND_NE, ra, rb, rc);
2755            break;
2756        case 0x02C:
2757            /* FCMOVLT */
2758            gen_fcmov(TCG_COND_LT, ra, rb, rc);
2759            break;
2760        case 0x02D:
2761            /* FCMOVGE */
2762            gen_fcmov(TCG_COND_GE, ra, rb, rc);
2763            break;
2764        case 0x02E:
2765            /* FCMOVLE */
2766            gen_fcmov(TCG_COND_LE, ra, rb, rc);
2767            break;
2768        case 0x02F:
2769            /* FCMOVGT */
2770            gen_fcmov(TCG_COND_GT, ra, rb, rc);
2771            break;
2772        case 0x030:
2773            /* CVTQL */
2774            gen_fcvtql(rb, rc);
2775            break;
2776        case 0x130:
2777            /* CVTQL/V */
2778        case 0x530:
2779            /* CVTQL/SV */
2780            /* ??? I'm pretty sure there's nothing that /sv needs to do that
2781               /v doesn't do.  The only thing I can think is that /sv is a
2782               valid instruction merely for completeness in the ISA.  */
2783            gen_fcvtql_v(ctx, rb, rc);
2784            break;
2785        default:
2786            goto invalid_opc;
2787        }
2788        break;
2789    case 0x18:
2790        switch ((uint16_t)disp16) {
2791        case 0x0000:
2792            /* TRAPB */
2793            /* No-op.  */
2794            break;
2795        case 0x0400:
2796            /* EXCB */
2797            /* No-op.  */
2798            break;
2799        case 0x4000:
2800            /* MB */
2801            /* No-op */
2802            break;
2803        case 0x4400:
2804            /* WMB */
2805            /* No-op */
2806            break;
2807        case 0x8000:
2808            /* FETCH */
2809            /* No-op */
2810            break;
2811        case 0xA000:
2812            /* FETCH_M */
2813            /* No-op */
2814            break;
2815        case 0xC000:
2816            /* RPCC */
2817            if (ra != 31) {
2818                if (use_icount) {
2819                    gen_io_start();
2820                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2821                    gen_io_end();
2822                    ret = EXIT_PC_STALE;
2823                } else {
2824                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2825                }
2826            }
2827            break;
2828        case 0xE000:
2829            /* RC */
2830            gen_rx(ra, 0);
2831            break;
2832        case 0xE800:
2833            /* ECB */
2834            break;
2835        case 0xF000:
2836            /* RS */
2837            gen_rx(ra, 1);
2838            break;
2839        case 0xF800:
2840            /* WH64 */
2841            /* No-op */
2842            break;
2843        default:
2844            goto invalid_opc;
2845        }
2846        break;
2847    case 0x19:
2848        /* HW_MFPR (PALcode) */
2849#ifndef CONFIG_USER_ONLY
2850        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2851            return gen_mfpr(ra, insn & 0xffff);
2852        }
2853#endif
2854        goto invalid_opc;
2855    case 0x1A:
2856        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2857           prediction stack action, which of course we don't implement.  */
2858        if (rb != 31) {
2859            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2860        } else {
2861            tcg_gen_movi_i64(cpu_pc, 0);
2862        }
2863        if (ra != 31) {
2864            tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2865        }
2866        ret = EXIT_PC_UPDATED;
2867        break;
2868    case 0x1B:
2869        /* HW_LD (PALcode) */
2870#ifndef CONFIG_USER_ONLY
2871        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2872            TCGv addr;
2873
2874            if (ra == 31) {
2875                break;
2876            }
2877
2878            addr = tcg_temp_new();
2879            if (rb != 31)
2880                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2881            else
2882                tcg_gen_movi_i64(addr, disp12);
2883            switch ((insn >> 12) & 0xF) {
2884            case 0x0:
2885                /* Longword physical access (hw_ldl/p) */
2886                gen_helper_ldl_phys(cpu_ir[ra], addr);
2887                break;
2888            case 0x1:
2889                /* Quadword physical access (hw_ldq/p) */
2890                gen_helper_ldq_phys(cpu_ir[ra], addr);
2891                break;
2892            case 0x2:
2893                /* Longword physical access with lock (hw_ldl_l/p) */
2894                gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
2895                break;
2896            case 0x3:
2897                /* Quadword physical access with lock (hw_ldq_l/p) */
2898                gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
2899                break;
2900            case 0x4:
2901                /* Longword virtual PTE fetch (hw_ldl/v) */
2902                goto invalid_opc;
2903            case 0x5:
2904                /* Quadword virtual PTE fetch (hw_ldq/v) */
2905                goto invalid_opc;
2906                break;
2907            case 0x6:
2908                /* Incpu_ir[ra]id */
2909                goto invalid_opc;
2910            case 0x7:
2911                /* Incpu_ir[ra]id */
2912                goto invalid_opc;
2913            case 0x8:
2914                /* Longword virtual access (hw_ldl) */
2915                goto invalid_opc;
2916            case 0x9:
2917                /* Quadword virtual access (hw_ldq) */
2918                goto invalid_opc;
2919            case 0xA:
2920                /* Longword virtual access with protection check (hw_ldl/w) */
2921                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2922                break;
2923            case 0xB:
2924                /* Quadword virtual access with protection check (hw_ldq/w) */
2925                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_KERNEL_IDX);
2926                break;
2927            case 0xC:
2928                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2929                goto invalid_opc;
2930            case 0xD:
2931                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2932                goto invalid_opc;
2933            case 0xE:
2934                /* Longword virtual access with alternate access mode and
2935                   protection checks (hw_ldl/wa) */
2936                tcg_gen_qemu_ld32s(cpu_ir[ra], addr, MMU_USER_IDX);
2937                break;
2938            case 0xF:
2939                /* Quadword virtual access with alternate access mode and
2940                   protection checks (hw_ldq/wa) */
2941                tcg_gen_qemu_ld64(cpu_ir[ra], addr, MMU_USER_IDX);
2942                break;
2943            }
2944            tcg_temp_free(addr);
2945            break;
2946        }
2947#endif
2948        goto invalid_opc;
2949    case 0x1C:
2950        switch (fn7) {
2951        case 0x00:
2952            /* SEXTB */
2953            if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2954                goto invalid_opc;
2955            }
2956            if (likely(rc != 31)) {
2957                if (islit)
2958                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2959                else
2960                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2961            }
2962            break;
2963        case 0x01:
2964            /* SEXTW */
2965            if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2966                if (likely(rc != 31)) {
2967                    if (islit) {
2968                        tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2969                    } else {
2970                        tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2971                    }
2972                }
2973                break;
2974            }
2975            goto invalid_opc;
2976        case 0x30:
2977            /* CTPOP */
2978            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2979                if (likely(rc != 31)) {
2980                    if (islit) {
2981                        tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2982                    } else {
2983                        gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2984                    }
2985                }
2986                break;
2987            }
2988            goto invalid_opc;
2989        case 0x31:
2990            /* PERR */
2991            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
2992                gen_perr(ra, rb, rc, islit, lit);
2993                break;
2994            }
2995            goto invalid_opc;
2996        case 0x32:
2997            /* CTLZ */
2998            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
2999                if (likely(rc != 31)) {
3000                    if (islit) {
3001                        tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3002                    } else {
3003                        gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
3004                    }
3005                }
3006                break;
3007            }
3008            goto invalid_opc;
3009        case 0x33:
3010            /* CTTZ */
3011            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3012                if (likely(rc != 31)) {
3013                    if (islit) {
3014                        tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3015                    } else {
3016                        gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3017                    }
3018                }
3019                break;
3020            }
3021            goto invalid_opc;
3022        case 0x34:
3023            /* UNPKBW */
3024            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3025                if (real_islit || ra != 31) {
3026                    goto invalid_opc;
3027                }
3028                gen_unpkbw(rb, rc);
3029                break;
3030            }
3031            goto invalid_opc;
3032        case 0x35:
3033            /* UNPKBL */
3034            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3035                if (real_islit || ra != 31) {
3036                    goto invalid_opc;
3037                }
3038                gen_unpkbl(rb, rc);
3039                break;
3040            }
3041            goto invalid_opc;
3042        case 0x36:
3043            /* PKWB */
3044            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3045                if (real_islit || ra != 31) {
3046                    goto invalid_opc;
3047                }
3048                gen_pkwb(rb, rc);
3049                break;
3050            }
3051            goto invalid_opc;
3052        case 0x37:
3053            /* PKLB */
3054            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3055                if (real_islit || ra != 31) {
3056                    goto invalid_opc;
3057                }
3058                gen_pklb(rb, rc);
3059                break;
3060            }
3061            goto invalid_opc;
3062        case 0x38:
3063            /* MINSB8 */
3064            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3065                gen_minsb8(ra, rb, rc, islit, lit);
3066                break;
3067            }
3068            goto invalid_opc;
3069        case 0x39:
3070            /* MINSW4 */
3071            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3072                gen_minsw4(ra, rb, rc, islit, lit);
3073                break;
3074            }
3075            goto invalid_opc;
3076        case 0x3A:
3077            /* MINUB8 */
3078            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3079                gen_minub8(ra, rb, rc, islit, lit);
3080                break;
3081            }
3082            goto invalid_opc;
3083        case 0x3B:
3084            /* MINUW4 */
3085            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3086                gen_minuw4(ra, rb, rc, islit, lit);
3087                break;
3088            }
3089            goto invalid_opc;
3090        case 0x3C:
3091            /* MAXUB8 */
3092            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3093                gen_maxub8(ra, rb, rc, islit, lit);
3094                break;
3095            }
3096            goto invalid_opc;
3097        case 0x3D:
3098            /* MAXUW4 */
3099            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3100                gen_maxuw4(ra, rb, rc, islit, lit);
3101                break;
3102            }
3103            goto invalid_opc;
3104        case 0x3E:
3105            /* MAXSB8 */
3106            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3107                gen_maxsb8(ra, rb, rc, islit, lit);
3108                break;
3109            }
3110            goto invalid_opc;
3111        case 0x3F:
3112            /* MAXSW4 */
3113            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3114                gen_maxsw4(ra, rb, rc, islit, lit);
3115                break;
3116            }
3117            goto invalid_opc;
3118        case 0x70:
3119            /* FTOIT */
3120            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3121                goto invalid_opc;
3122            }
3123            if (likely(rc != 31)) {
3124                if (ra != 31)
3125                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3126                else
3127                    tcg_gen_movi_i64(cpu_ir[rc], 0);
3128            }
3129            break;
3130        case 0x78:
3131            /* FTOIS */
3132            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3133                goto invalid_opc;
3134            }
3135            if (rc != 31) {
3136                TCGv_i32 tmp1 = tcg_temp_new_i32();
3137                if (ra != 31)
3138                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3139                else {
3140                    TCGv tmp2 = tcg_const_i64(0);
3141                    gen_helper_s_to_memory(tmp1, tmp2);
3142                    tcg_temp_free(tmp2);
3143                }
3144                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3145                tcg_temp_free_i32(tmp1);
3146            }
3147            break;
3148        default:
3149            goto invalid_opc;
3150        }
3151        break;
3152    case 0x1D:
3153        /* HW_MTPR (PALcode) */
3154#ifndef CONFIG_USER_ONLY
3155        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3156            return gen_mtpr(ctx, rb, insn & 0xffff);
3157        }
3158#endif
3159        goto invalid_opc;
3160    case 0x1E:
3161        /* HW_RET (PALcode) */
3162#ifndef CONFIG_USER_ONLY
3163        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3164            if (rb == 31) {
3165                /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3166                   address from EXC_ADDR.  This turns out to be useful for our
3167                   emulation PALcode, so continue to accept it.  */
3168                TCGv tmp = tcg_temp_new();
3169                tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3170                gen_helper_hw_ret(cpu_env, tmp);
3171                tcg_temp_free(tmp);
3172            } else {
3173                gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
3174            }
3175            ret = EXIT_PC_UPDATED;
3176            break;
3177        }
3178#endif
3179        goto invalid_opc;
3180    case 0x1F:
3181        /* HW_ST (PALcode) */
3182#ifndef CONFIG_USER_ONLY
3183        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3184            TCGv addr, val;
3185            addr = tcg_temp_new();
3186            if (rb != 31)
3187                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3188            else
3189                tcg_gen_movi_i64(addr, disp12);
3190            if (ra != 31)
3191                val = cpu_ir[ra];
3192            else {
3193                val = tcg_temp_new();
3194                tcg_gen_movi_i64(val, 0);
3195            }
3196            switch ((insn >> 12) & 0xF) {
3197            case 0x0:
3198                /* Longword physical access */
3199                gen_helper_stl_phys(addr, val);
3200                break;
3201            case 0x1:
3202                /* Quadword physical access */
3203                gen_helper_stq_phys(addr, val);
3204                break;
3205            case 0x2:
3206                /* Longword physical access with lock */
3207                gen_helper_stl_c_phys(val, cpu_env, addr, val);
3208                break;
3209            case 0x3:
3210                /* Quadword physical access with lock */
3211                gen_helper_stq_c_phys(val, cpu_env, addr, val);
3212                break;
3213            case 0x4:
3214                /* Longword virtual access */
3215                goto invalid_opc;
3216            case 0x5:
3217                /* Quadword virtual access */
3218                goto invalid_opc;
3219            case 0x6:
3220                /* Invalid */
3221                goto invalid_opc;
3222            case 0x7:
3223                /* Invalid */
3224                goto invalid_opc;
3225            case 0x8:
3226                /* Invalid */
3227                goto invalid_opc;
3228            case 0x9:
3229                /* Invalid */
3230                goto invalid_opc;
3231            case 0xA:
3232                /* Invalid */
3233                goto invalid_opc;
3234            case 0xB:
3235                /* Invalid */
3236                goto invalid_opc;
3237            case 0xC:
3238                /* Longword virtual access with alternate access mode */
3239                goto invalid_opc;
3240            case 0xD:
3241                /* Quadword virtual access with alternate access mode */
3242                goto invalid_opc;
3243            case 0xE:
3244                /* Invalid */
3245                goto invalid_opc;
3246            case 0xF:
3247                /* Invalid */
3248                goto invalid_opc;
3249            }
3250            if (ra == 31)
3251                tcg_temp_free(val);
3252            tcg_temp_free(addr);
3253            break;
3254        }
3255#endif
3256        goto invalid_opc;
3257    case 0x20:
3258        /* LDF */
3259        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3260        break;
3261    case 0x21:
3262        /* LDG */
3263        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3264        break;
3265    case 0x22:
3266        /* LDS */
3267        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3268        break;
3269    case 0x23:
3270        /* LDT */
3271        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3272        break;
3273    case 0x24:
3274        /* STF */
3275        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3276        break;
3277    case 0x25:
3278        /* STG */
3279        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3280        break;
3281    case 0x26:
3282        /* STS */
3283        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3284        break;
3285    case 0x27:
3286        /* STT */
3287        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3288        break;
3289    case 0x28:
3290        /* LDL */
3291        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3292        break;
3293    case 0x29:
3294        /* LDQ */
3295        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3296        break;
3297    case 0x2A:
3298        /* LDL_L */
3299        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3300        break;
3301    case 0x2B:
3302        /* LDQ_L */
3303        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3304        break;
3305    case 0x2C:
3306        /* STL */
3307        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3308        break;
3309    case 0x2D:
3310        /* STQ */
3311        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3312        break;
3313    case 0x2E:
3314        /* STL_C */
3315        ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3316        break;
3317    case 0x2F:
3318        /* STQ_C */
3319        ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3320        break;
3321    case 0x30:
3322        /* BR */
3323        ret = gen_bdirect(ctx, ra, disp21);
3324        break;
3325    case 0x31: /* FBEQ */
3326        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3327        break;
3328    case 0x32: /* FBLT */
3329        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3330        break;
3331    case 0x33: /* FBLE */
3332        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3333        break;
3334    case 0x34:
3335        /* BSR */
3336        ret = gen_bdirect(ctx, ra, disp21);
3337        break;
3338    case 0x35: /* FBNE */
3339        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3340        break;
3341    case 0x36: /* FBGE */
3342        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3343        break;
3344    case 0x37: /* FBGT */
3345        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3346        break;
3347    case 0x38:
3348        /* BLBC */
3349        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3350        break;
3351    case 0x39:
3352        /* BEQ */
3353        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3354        break;
3355    case 0x3A:
3356        /* BLT */
3357        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3358        break;
3359    case 0x3B:
3360        /* BLE */
3361        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3362        break;
3363    case 0x3C:
3364        /* BLBS */
3365        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3366        break;
3367    case 0x3D:
3368        /* BNE */
3369        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3370        break;
3371    case 0x3E:
3372        /* BGE */
3373        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3374        break;
3375    case 0x3F:
3376        /* BGT */
3377        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3378        break;
3379    invalid_opc:
3380        ret = gen_invalid(ctx);
3381        break;
3382    }
3383
3384    return ret;
3385}
3386
3387static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
3388                                                  TranslationBlock *tb,
3389                                                  bool search_pc)
3390{
3391    CPUState *cs = CPU(cpu);
3392    CPUAlphaState *env = &cpu->env;
3393    DisasContext ctx, *ctxp = &ctx;
3394    target_ulong pc_start;
3395    uint32_t insn;
3396    uint16_t *gen_opc_end;
3397    CPUBreakpoint *bp;
3398    int j, lj = -1;
3399    ExitStatus ret;
3400    int num_insns;
3401    int max_insns;
3402
3403    pc_start = tb->pc;
3404    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3405
3406    ctx.tb = tb;
3407    ctx.pc = pc_start;
3408    ctx.mem_idx = cpu_mmu_index(env);
3409    ctx.implver = env->implver;
3410    ctx.singlestep_enabled = cs->singlestep_enabled;
3411
3412    /* ??? Every TB begins with unset rounding mode, to be initialized on
3413       the first fp insn of the TB.  Alternately we could define a proper
3414       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3415       to reset the FP_STATUS to that default at the end of any TB that
3416       changes the default.  We could even (gasp) dynamiclly figure out
3417       what default would be most efficient given the running program.  */
3418    ctx.tb_rm = -1;
3419    /* Similarly for flush-to-zero.  */
3420    ctx.tb_ftz = -1;
3421
3422    num_insns = 0;
3423    max_insns = tb->cflags & CF_COUNT_MASK;
3424    if (max_insns == 0)
3425        max_insns = CF_COUNT_MASK;
3426
3427    gen_tb_start();
3428    do {
3429        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3430            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3431                if (bp->pc == ctx.pc) {
3432                    gen_excp(&ctx, EXCP_DEBUG, 0);
3433                    break;
3434                }
3435            }
3436        }
3437        if (search_pc) {
3438            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3439            if (lj < j) {
3440                lj++;
3441                while (lj < j)
3442                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
3443            }
3444            tcg_ctx.gen_opc_pc[lj] = ctx.pc;
3445            tcg_ctx.gen_opc_instr_start[lj] = 1;
3446            tcg_ctx.gen_opc_icount[lj] = num_insns;
3447        }
3448        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3449            gen_io_start();
3450        insn = cpu_ldl_code(env, ctx.pc);
3451        num_insns++;
3452
3453        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3454            tcg_gen_debug_insn_start(ctx.pc);
3455        }
3456
3457        ctx.pc += 4;
3458        ret = translate_one(ctxp, insn);
3459
3460        /* If we reach a page boundary, are single stepping,
3461           or exhaust instruction count, stop generation.  */
3462        if (ret == NO_EXIT
3463            && ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3464                || tcg_ctx.gen_opc_ptr >= gen_opc_end
3465                || num_insns >= max_insns
3466                || singlestep
3467                || ctx.singlestep_enabled)) {
3468            ret = EXIT_PC_STALE;
3469        }
3470    } while (ret == NO_EXIT);
3471
3472    if (tb->cflags & CF_LAST_IO) {
3473        gen_io_end();
3474    }
3475
3476    switch (ret) {
3477    case EXIT_GOTO_TB:
3478    case EXIT_NORETURN:
3479        break;
3480    case EXIT_PC_STALE:
3481        tcg_gen_movi_i64(cpu_pc, ctx.pc);
3482        /* FALLTHRU */
3483    case EXIT_PC_UPDATED:
3484        if (ctx.singlestep_enabled) {
3485            gen_excp_1(EXCP_DEBUG, 0);
3486        } else {
3487            tcg_gen_exit_tb(0);
3488        }
3489        break;
3490    default:
3491        abort();
3492    }
3493
3494    gen_tb_end(tb, num_insns);
3495    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3496    if (search_pc) {
3497        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3498        lj++;
3499        while (lj <= j)
3500            tcg_ctx.gen_opc_instr_start[lj++] = 0;
3501    } else {
3502        tb->size = ctx.pc - pc_start;
3503        tb->icount = num_insns;
3504    }
3505
3506#ifdef DEBUG_DISAS
3507    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3508        qemu_log("IN: %s\n", lookup_symbol(pc_start));
3509        log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
3510        qemu_log("\n");
3511    }
3512#endif
3513}
3514
3515void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3516{
3517    gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
3518}
3519
3520void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3521{
3522    gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
3523}
3524
3525void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3526{
3527    env->pc = tcg_ctx.gen_opc_pc[pc_pos];
3528}
3529