qemu/target/alpha/translate.c
<<
>>
Prefs
   1/*
   2 *  Alpha emulation cpu translation for qemu.
   3 *
   4 *  Copyright (c) 2007 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "sysemu/cpus.h"
  23#include "sysemu/cpu-timers.h"
  24#include "disas/disas.h"
  25#include "qemu/host-utils.h"
  26#include "exec/exec-all.h"
  27#include "tcg/tcg-op.h"
  28#include "exec/cpu_ldst.h"
  29#include "exec/helper-proto.h"
  30#include "exec/helper-gen.h"
  31#include "trace-tcg.h"
  32#include "exec/translator.h"
  33#include "exec/log.h"
  34
  35
  36#undef ALPHA_DEBUG_DISAS
  37#define CONFIG_SOFTFLOAT_INLINE
  38
  39#ifdef ALPHA_DEBUG_DISAS
  40#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  41#else
  42#  define LOG_DISAS(...) do { } while (0)
  43#endif
  44
  45typedef struct DisasContext DisasContext;
  46struct DisasContext {
  47    DisasContextBase base;
  48
  49#ifndef CONFIG_USER_ONLY
  50    uint64_t palbr;
  51#endif
  52    uint32_t tbflags;
  53    int mem_idx;
  54
  55    /* implver and amask values for this CPU.  */
  56    int implver;
  57    int amask;
  58
  59    /* Current rounding mode for this TB.  */
  60    int tb_rm;
  61    /* Current flush-to-zero setting for this TB.  */
  62    int tb_ftz;
  63
  64    /* The set of registers active in the current context.  */
  65    TCGv *ir;
  66
  67    /* Temporaries for $31 and $f31 as source and destination.  */
  68    TCGv zero;
  69    TCGv sink;
  70    /* Temporary for immediate constants.  */
  71    TCGv lit;
  72};
  73
  74/* Target-specific return values from translate_one, indicating the
  75   state of the TB.  Note that DISAS_NEXT indicates that we are not
  76   exiting the TB.  */
  77#define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
  78#define DISAS_PC_UPDATED          DISAS_TARGET_1
  79#define DISAS_PC_STALE            DISAS_TARGET_2
  80
  81/* global register indexes */
  82static TCGv cpu_std_ir[31];
  83static TCGv cpu_fir[31];
  84static TCGv cpu_pc;
  85static TCGv cpu_lock_addr;
  86static TCGv cpu_lock_value;
  87
  88#ifndef CONFIG_USER_ONLY
  89static TCGv cpu_pal_ir[31];
  90#endif
  91
  92#include "exec/gen-icount.h"
  93
  94void alpha_translate_init(void)
  95{
  96#define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
  97
  98    typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
  99    static const GlobalVar vars[] = {
 100        DEF_VAR(pc),
 101        DEF_VAR(lock_addr),
 102        DEF_VAR(lock_value),
 103    };
 104
 105#undef DEF_VAR
 106
 107    /* Use the symbolic register names that match the disassembler.  */
 108    static const char greg_names[31][4] = {
 109        "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
 110        "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
 111        "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
 112        "t10", "t11", "ra", "t12", "at", "gp", "sp"
 113    };
 114    static const char freg_names[31][4] = {
 115        "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
 116        "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
 117        "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
 118        "f24", "f25", "f26", "f27", "f28", "f29", "f30"
 119    };
 120#ifndef CONFIG_USER_ONLY
 121    static const char shadow_names[8][8] = {
 122        "pal_t7", "pal_s0", "pal_s1", "pal_s2",
 123        "pal_s3", "pal_s4", "pal_s5", "pal_t11"
 124    };
 125#endif
 126
 127    int i;
 128
 129    for (i = 0; i < 31; i++) {
 130        cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
 131                                               offsetof(CPUAlphaState, ir[i]),
 132                                               greg_names[i]);
 133    }
 134
 135    for (i = 0; i < 31; i++) {
 136        cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
 137                                            offsetof(CPUAlphaState, fir[i]),
 138                                            freg_names[i]);
 139    }
 140
 141#ifndef CONFIG_USER_ONLY
 142    memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
 143    for (i = 0; i < 8; i++) {
 144        int r = (i == 7 ? 25 : i + 8);
 145        cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
 146                                               offsetof(CPUAlphaState,
 147                                                        shadow[i]),
 148                                               shadow_names[i]);
 149    }
 150#endif
 151
 152    for (i = 0; i < ARRAY_SIZE(vars); ++i) {
 153        const GlobalVar *v = &vars[i];
 154        *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
 155    }
 156}
 157
 158static TCGv load_zero(DisasContext *ctx)
 159{
 160    if (!ctx->zero) {
 161        ctx->zero = tcg_const_i64(0);
 162    }
 163    return ctx->zero;
 164}
 165
 166static TCGv dest_sink(DisasContext *ctx)
 167{
 168    if (!ctx->sink) {
 169        ctx->sink = tcg_temp_new();
 170    }
 171    return ctx->sink;
 172}
 173
 174static void free_context_temps(DisasContext *ctx)
 175{
 176    if (ctx->sink) {
 177        tcg_gen_discard_i64(ctx->sink);
 178        tcg_temp_free(ctx->sink);
 179        ctx->sink = NULL;
 180    }
 181    if (ctx->zero) {
 182        tcg_temp_free(ctx->zero);
 183        ctx->zero = NULL;
 184    }
 185    if (ctx->lit) {
 186        tcg_temp_free(ctx->lit);
 187        ctx->lit = NULL;
 188    }
 189}
 190
 191static TCGv load_gpr(DisasContext *ctx, unsigned reg)
 192{
 193    if (likely(reg < 31)) {
 194        return ctx->ir[reg];
 195    } else {
 196        return load_zero(ctx);
 197    }
 198}
 199
 200static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
 201                         uint8_t lit, bool islit)
 202{
 203    if (islit) {
 204        ctx->lit = tcg_const_i64(lit);
 205        return ctx->lit;
 206    } else if (likely(reg < 31)) {
 207        return ctx->ir[reg];
 208    } else {
 209        return load_zero(ctx);
 210    }
 211}
 212
 213static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
 214{
 215    if (likely(reg < 31)) {
 216        return ctx->ir[reg];
 217    } else {
 218        return dest_sink(ctx);
 219    }
 220}
 221
 222static TCGv load_fpr(DisasContext *ctx, unsigned reg)
 223{
 224    if (likely(reg < 31)) {
 225        return cpu_fir[reg];
 226    } else {
 227        return load_zero(ctx);
 228    }
 229}
 230
 231static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
 232{
 233    if (likely(reg < 31)) {
 234        return cpu_fir[reg];
 235    } else {
 236        return dest_sink(ctx);
 237    }
 238}
 239
 240static int get_flag_ofs(unsigned shift)
 241{
 242    int ofs = offsetof(CPUAlphaState, flags);
 243#ifdef HOST_WORDS_BIGENDIAN
 244    ofs += 3 - (shift / 8);
 245#else
 246    ofs += shift / 8;
 247#endif
 248    return ofs;
 249}
 250
 251static void ld_flag_byte(TCGv val, unsigned shift)
 252{
 253    tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
 254}
 255
 256static void st_flag_byte(TCGv val, unsigned shift)
 257{
 258    tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
 259}
 260
 261static void gen_excp_1(int exception, int error_code)
 262{
 263    TCGv_i32 tmp1, tmp2;
 264
 265    tmp1 = tcg_const_i32(exception);
 266    tmp2 = tcg_const_i32(error_code);
 267    gen_helper_excp(cpu_env, tmp1, tmp2);
 268    tcg_temp_free_i32(tmp2);
 269    tcg_temp_free_i32(tmp1);
 270}
 271
 272static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
 273{
 274    tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
 275    gen_excp_1(exception, error_code);
 276    return DISAS_NORETURN;
 277}
 278
 279static inline DisasJumpType gen_invalid(DisasContext *ctx)
 280{
 281    return gen_excp(ctx, EXCP_OPCDEC, 0);
 282}
 283
 284static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
 285{
 286    TCGv_i32 tmp32 = tcg_temp_new_i32();
 287    tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
 288    gen_helper_memory_to_f(t0, tmp32);
 289    tcg_temp_free_i32(tmp32);
 290}
 291
 292static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
 293{
 294    TCGv tmp = tcg_temp_new();
 295    tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
 296    gen_helper_memory_to_g(t0, tmp);
 297    tcg_temp_free(tmp);
 298}
 299
 300static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
 301{
 302    TCGv_i32 tmp32 = tcg_temp_new_i32();
 303    tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
 304    gen_helper_memory_to_s(t0, tmp32);
 305    tcg_temp_free_i32(tmp32);
 306}
 307
 308static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
 309{
 310    tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
 311    tcg_gen_mov_i64(cpu_lock_addr, t1);
 312    tcg_gen_mov_i64(cpu_lock_value, t0);
 313}
 314
 315static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
 316{
 317    tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
 318    tcg_gen_mov_i64(cpu_lock_addr, t1);
 319    tcg_gen_mov_i64(cpu_lock_value, t0);
 320}
 321
 322static inline void gen_load_mem(DisasContext *ctx,
 323                                void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
 324                                                          int flags),
 325                                int ra, int rb, int32_t disp16, bool fp,
 326                                bool clear)
 327{
 328    TCGv tmp, addr, va;
 329
 330    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
 331       prefetches, which we can treat as nops.  No worries about
 332       missed exceptions here.  */
 333    if (unlikely(ra == 31)) {
 334        return;
 335    }
 336
 337    tmp = tcg_temp_new();
 338    addr = load_gpr(ctx, rb);
 339
 340    if (disp16) {
 341        tcg_gen_addi_i64(tmp, addr, disp16);
 342        addr = tmp;
 343    }
 344    if (clear) {
 345        tcg_gen_andi_i64(tmp, addr, ~0x7);
 346        addr = tmp;
 347    }
 348
 349    va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
 350    tcg_gen_qemu_load(va, addr, ctx->mem_idx);
 351
 352    tcg_temp_free(tmp);
 353}
 354
 355static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
 356{
 357    TCGv_i32 tmp32 = tcg_temp_new_i32();
 358    gen_helper_f_to_memory(tmp32, t0);
 359    tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
 360    tcg_temp_free_i32(tmp32);
 361}
 362
 363static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
 364{
 365    TCGv tmp = tcg_temp_new();
 366    gen_helper_g_to_memory(tmp, t0);
 367    tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
 368    tcg_temp_free(tmp);
 369}
 370
 371static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
 372{
 373    TCGv_i32 tmp32 = tcg_temp_new_i32();
 374    gen_helper_s_to_memory(tmp32, t0);
 375    tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
 376    tcg_temp_free_i32(tmp32);
 377}
 378
 379static inline void gen_store_mem(DisasContext *ctx,
 380                                 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
 381                                                            int flags),
 382                                 int ra, int rb, int32_t disp16, bool fp,
 383                                 bool clear)
 384{
 385    TCGv tmp, addr, va;
 386
 387    tmp = tcg_temp_new();
 388    addr = load_gpr(ctx, rb);
 389
 390    if (disp16) {
 391        tcg_gen_addi_i64(tmp, addr, disp16);
 392        addr = tmp;
 393    }
 394    if (clear) {
 395        tcg_gen_andi_i64(tmp, addr, ~0x7);
 396        addr = tmp;
 397    }
 398
 399    va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
 400    tcg_gen_qemu_store(va, addr, ctx->mem_idx);
 401
 402    tcg_temp_free(tmp);
 403}
 404
 405static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
 406                                           int32_t disp16, int mem_idx,
 407                                           MemOp op)
 408{
 409    TCGLabel *lab_fail, *lab_done;
 410    TCGv addr, val;
 411
 412    addr = tcg_temp_new_i64();
 413    tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
 414    free_context_temps(ctx);
 415
 416    lab_fail = gen_new_label();
 417    lab_done = gen_new_label();
 418    tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
 419    tcg_temp_free_i64(addr);
 420
 421    val = tcg_temp_new_i64();
 422    tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
 423                               load_gpr(ctx, ra), mem_idx, op);
 424    free_context_temps(ctx);
 425
 426    if (ra != 31) {
 427        tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
 428    }
 429    tcg_temp_free_i64(val);
 430    tcg_gen_br(lab_done);
 431
 432    gen_set_label(lab_fail);
 433    if (ra != 31) {
 434        tcg_gen_movi_i64(ctx->ir[ra], 0);
 435    }
 436
 437    gen_set_label(lab_done);
 438    tcg_gen_movi_i64(cpu_lock_addr, -1);
 439    return DISAS_NEXT;
 440}
 441
 442static bool in_superpage(DisasContext *ctx, int64_t addr)
 443{
 444#ifndef CONFIG_USER_ONLY
 445    return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0
 446            && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
 447            && ((addr >> 41) & 3) == 2);
 448#else
 449    return false;
 450#endif
 451}
 452
 453static bool use_exit_tb(DisasContext *ctx)
 454{
 455    return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
 456            || ctx->base.singlestep_enabled
 457            || singlestep);
 458}
 459
 460static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
 461{
 462    /* Suppress goto_tb in the case of single-steping and IO.  */
 463    if (unlikely(use_exit_tb(ctx))) {
 464        return false;
 465    }
 466#ifndef CONFIG_USER_ONLY
 467    /* If the destination is in the superpage, the page perms can't change.  */
 468    if (in_superpage(ctx, dest)) {
 469        return true;
 470    }
 471    /* Check for the dest on the same page as the start of the TB.  */
 472    return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
 473#else
 474    return true;
 475#endif
 476}
 477
 478static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
 479{
 480    uint64_t dest = ctx->base.pc_next + (disp << 2);
 481
 482    if (ra != 31) {
 483        tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
 484    }
 485
 486    /* Notice branch-to-next; used to initialize RA with the PC.  */
 487    if (disp == 0) {
 488        return 0;
 489    } else if (use_goto_tb(ctx, dest)) {
 490        tcg_gen_goto_tb(0);
 491        tcg_gen_movi_i64(cpu_pc, dest);
 492        tcg_gen_exit_tb(ctx->base.tb, 0);
 493        return DISAS_NORETURN;
 494    } else {
 495        tcg_gen_movi_i64(cpu_pc, dest);
 496        return DISAS_PC_UPDATED;
 497    }
 498}
 499
 500static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
 501                                        TCGv cmp, int32_t disp)
 502{
 503    uint64_t dest = ctx->base.pc_next + (disp << 2);
 504    TCGLabel *lab_true = gen_new_label();
 505
 506    if (use_goto_tb(ctx, dest)) {
 507        tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
 508
 509        tcg_gen_goto_tb(0);
 510        tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
 511        tcg_gen_exit_tb(ctx->base.tb, 0);
 512
 513        gen_set_label(lab_true);
 514        tcg_gen_goto_tb(1);
 515        tcg_gen_movi_i64(cpu_pc, dest);
 516        tcg_gen_exit_tb(ctx->base.tb, 1);
 517
 518        return DISAS_NORETURN;
 519    } else {
 520        TCGv_i64 z = tcg_const_i64(0);
 521        TCGv_i64 d = tcg_const_i64(dest);
 522        TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
 523
 524        tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
 525
 526        tcg_temp_free_i64(z);
 527        tcg_temp_free_i64(d);
 528        tcg_temp_free_i64(p);
 529        return DISAS_PC_UPDATED;
 530    }
 531}
 532
 533static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
 534                               int32_t disp, int mask)
 535{
 536    if (mask) {
 537        TCGv tmp = tcg_temp_new();
 538        DisasJumpType ret;
 539
 540        tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
 541        ret = gen_bcond_internal(ctx, cond, tmp, disp);
 542        tcg_temp_free(tmp);
 543        return ret;
 544    }
 545    return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
 546}
 547
 548/* Fold -0.0 for comparison with COND.  */
 549
 550static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
 551{
 552    uint64_t mzero = 1ull << 63;
 553
 554    switch (cond) {
 555    case TCG_COND_LE:
 556    case TCG_COND_GT:
 557        /* For <= or >, the -0.0 value directly compares the way we want.  */
 558        tcg_gen_mov_i64(dest, src);
 559        break;
 560
 561    case TCG_COND_EQ:
 562    case TCG_COND_NE:
 563        /* For == or !=, we can simply mask off the sign bit and compare.  */
 564        tcg_gen_andi_i64(dest, src, mzero - 1);
 565        break;
 566
 567    case TCG_COND_GE:
 568    case TCG_COND_LT:
 569        /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
 570        tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
 571        tcg_gen_neg_i64(dest, dest);
 572        tcg_gen_and_i64(dest, dest, src);
 573        break;
 574
 575    default:
 576        abort();
 577    }
 578}
 579
 580static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
 581                                int32_t disp)
 582{
 583    TCGv cmp_tmp = tcg_temp_new();
 584    DisasJumpType ret;
 585
 586    gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
 587    ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
 588    tcg_temp_free(cmp_tmp);
 589    return ret;
 590}
 591
 592static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
 593{
 594    TCGv_i64 va, vb, z;
 595
 596    z = load_zero(ctx);
 597    vb = load_fpr(ctx, rb);
 598    va = tcg_temp_new();
 599    gen_fold_mzero(cond, va, load_fpr(ctx, ra));
 600
 601    tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
 602
 603    tcg_temp_free(va);
 604}
 605
 606#define QUAL_RM_N       0x080   /* Round mode nearest even */
 607#define QUAL_RM_C       0x000   /* Round mode chopped */
 608#define QUAL_RM_M       0x040   /* Round mode minus infinity */
 609#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
 610#define QUAL_RM_MASK    0x0c0
 611
 612#define QUAL_U          0x100   /* Underflow enable (fp output) */
 613#define QUAL_V          0x100   /* Overflow enable (int output) */
 614#define QUAL_S          0x400   /* Software completion enable */
 615#define QUAL_I          0x200   /* Inexact detection enable */
 616
 617static void gen_qual_roundmode(DisasContext *ctx, int fn11)
 618{
 619    TCGv_i32 tmp;
 620
 621    fn11 &= QUAL_RM_MASK;
 622    if (fn11 == ctx->tb_rm) {
 623        return;
 624    }
 625    ctx->tb_rm = fn11;
 626
 627    tmp = tcg_temp_new_i32();
 628    switch (fn11) {
 629    case QUAL_RM_N:
 630        tcg_gen_movi_i32(tmp, float_round_nearest_even);
 631        break;
 632    case QUAL_RM_C:
 633        tcg_gen_movi_i32(tmp, float_round_to_zero);
 634        break;
 635    case QUAL_RM_M:
 636        tcg_gen_movi_i32(tmp, float_round_down);
 637        break;
 638    case QUAL_RM_D:
 639        tcg_gen_ld8u_i32(tmp, cpu_env,
 640                         offsetof(CPUAlphaState, fpcr_dyn_round));
 641        break;
 642    }
 643
 644#if defined(CONFIG_SOFTFLOAT_INLINE)
 645    /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
 646       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
 647       sets the one field.  */
 648    tcg_gen_st8_i32(tmp, cpu_env,
 649                    offsetof(CPUAlphaState, fp_status.float_rounding_mode));
 650#else
 651    gen_helper_setroundmode(tmp);
 652#endif
 653
 654    tcg_temp_free_i32(tmp);
 655}
 656
 657static void gen_qual_flushzero(DisasContext *ctx, int fn11)
 658{
 659    TCGv_i32 tmp;
 660
 661    fn11 &= QUAL_U;
 662    if (fn11 == ctx->tb_ftz) {
 663        return;
 664    }
 665    ctx->tb_ftz = fn11;
 666
 667    tmp = tcg_temp_new_i32();
 668    if (fn11) {
 669        /* Underflow is enabled, use the FPCR setting.  */
 670        tcg_gen_ld8u_i32(tmp, cpu_env,
 671                         offsetof(CPUAlphaState, fpcr_flush_to_zero));
 672    } else {
 673        /* Underflow is disabled, force flush-to-zero.  */
 674        tcg_gen_movi_i32(tmp, 1);
 675    }
 676
 677#if defined(CONFIG_SOFTFLOAT_INLINE)
 678    tcg_gen_st8_i32(tmp, cpu_env,
 679                    offsetof(CPUAlphaState, fp_status.flush_to_zero));
 680#else
 681    gen_helper_setflushzero(tmp);
 682#endif
 683
 684    tcg_temp_free_i32(tmp);
 685}
 686
 687static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
 688{
 689    TCGv val;
 690
 691    if (unlikely(reg == 31)) {
 692        val = load_zero(ctx);
 693    } else {
 694        val = cpu_fir[reg];
 695        if ((fn11 & QUAL_S) == 0) {
 696            if (is_cmp) {
 697                gen_helper_ieee_input_cmp(cpu_env, val);
 698            } else {
 699                gen_helper_ieee_input(cpu_env, val);
 700            }
 701        } else {
 702#ifndef CONFIG_USER_ONLY
 703            /* In system mode, raise exceptions for denormals like real
 704               hardware.  In user mode, proceed as if the OS completion
 705               handler is handling the denormal as per spec.  */
 706            gen_helper_ieee_input_s(cpu_env, val);
 707#endif
 708        }
 709    }
 710    return val;
 711}
 712
 713static void gen_fp_exc_raise(int rc, int fn11)
 714{
 715    /* ??? We ought to be able to do something with imprecise exceptions.
 716       E.g. notice we're still in the trap shadow of something within the
 717       TB and do not generate the code to signal the exception; end the TB
 718       when an exception is forced to arrive, either by consumption of a
 719       register value or TRAPB or EXCB.  */
 720    TCGv_i32 reg, ign;
 721    uint32_t ignore = 0;
 722
 723    if (!(fn11 & QUAL_U)) {
 724        /* Note that QUAL_U == QUAL_V, so ignore either.  */
 725        ignore |= FPCR_UNF | FPCR_IOV;
 726    }
 727    if (!(fn11 & QUAL_I)) {
 728        ignore |= FPCR_INE;
 729    }
 730    ign = tcg_const_i32(ignore);
 731
 732    /* ??? Pass in the regno of the destination so that the helper can
 733       set EXC_MASK, which contains a bitmask of destination registers
 734       that have caused arithmetic traps.  A simple userspace emulation
 735       does not require this.  We do need it for a guest kernel's entArith,
 736       or if we were to do something clever with imprecise exceptions.  */
 737    reg = tcg_const_i32(rc + 32);
 738    if (fn11 & QUAL_S) {
 739        gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
 740    } else {
 741        gen_helper_fp_exc_raise(cpu_env, ign, reg);
 742    }
 743
 744    tcg_temp_free_i32(reg);
 745    tcg_temp_free_i32(ign);
 746}
 747
 748static void gen_cvtlq(TCGv vc, TCGv vb)
 749{
 750    TCGv tmp = tcg_temp_new();
 751
 752    /* The arithmetic right shift here, plus the sign-extended mask below
 753       yields a sign-extended result without an explicit ext32s_i64.  */
 754    tcg_gen_shri_i64(tmp, vb, 29);
 755    tcg_gen_sari_i64(vc, vb, 32);
 756    tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
 757
 758    tcg_temp_free(tmp);
 759}
 760
 761static void gen_ieee_arith2(DisasContext *ctx,
 762                            void (*helper)(TCGv, TCGv_ptr, TCGv),
 763                            int rb, int rc, int fn11)
 764{
 765    TCGv vb;
 766
 767    gen_qual_roundmode(ctx, fn11);
 768    gen_qual_flushzero(ctx, fn11);
 769
 770    vb = gen_ieee_input(ctx, rb, fn11, 0);
 771    helper(dest_fpr(ctx, rc), cpu_env, vb);
 772
 773    gen_fp_exc_raise(rc, fn11);
 774}
 775
 776#define IEEE_ARITH2(name)                                       \
 777static inline void glue(gen_, name)(DisasContext *ctx,          \
 778                                    int rb, int rc, int fn11)   \
 779{                                                               \
 780    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
 781}
 782IEEE_ARITH2(sqrts)
 783IEEE_ARITH2(sqrtt)
 784IEEE_ARITH2(cvtst)
 785IEEE_ARITH2(cvtts)
 786
 787static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
 788{
 789    TCGv vb, vc;
 790
 791    /* No need to set flushzero, since we have an integer output.  */
 792    vb = gen_ieee_input(ctx, rb, fn11, 0);
 793    vc = dest_fpr(ctx, rc);
 794
 795    /* Almost all integer conversions use cropped rounding;
 796       special case that.  */
 797    if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
 798        gen_helper_cvttq_c(vc, cpu_env, vb);
 799    } else {
 800        gen_qual_roundmode(ctx, fn11);
 801        gen_helper_cvttq(vc, cpu_env, vb);
 802    }
 803    gen_fp_exc_raise(rc, fn11);
 804}
 805
 806static void gen_ieee_intcvt(DisasContext *ctx,
 807                            void (*helper)(TCGv, TCGv_ptr, TCGv),
 808                            int rb, int rc, int fn11)
 809{
 810    TCGv vb, vc;
 811
 812    gen_qual_roundmode(ctx, fn11);
 813    vb = load_fpr(ctx, rb);
 814    vc = dest_fpr(ctx, rc);
 815
 816    /* The only exception that can be raised by integer conversion
 817       is inexact.  Thus we only need to worry about exceptions when
 818       inexact handling is requested.  */
 819    if (fn11 & QUAL_I) {
 820        helper(vc, cpu_env, vb);
 821        gen_fp_exc_raise(rc, fn11);
 822    } else {
 823        helper(vc, cpu_env, vb);
 824    }
 825}
 826
 827#define IEEE_INTCVT(name)                                       \
 828static inline void glue(gen_, name)(DisasContext *ctx,          \
 829                                    int rb, int rc, int fn11)   \
 830{                                                               \
 831    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
 832}
 833IEEE_INTCVT(cvtqs)
 834IEEE_INTCVT(cvtqt)
 835
 836static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
 837{
 838    TCGv vmask = tcg_const_i64(mask);
 839    TCGv tmp = tcg_temp_new_i64();
 840
 841    if (inv_a) {
 842        tcg_gen_andc_i64(tmp, vmask, va);
 843    } else {
 844        tcg_gen_and_i64(tmp, va, vmask);
 845    }
 846
 847    tcg_gen_andc_i64(vc, vb, vmask);
 848    tcg_gen_or_i64(vc, vc, tmp);
 849
 850    tcg_temp_free(vmask);
 851    tcg_temp_free(tmp);
 852}
 853
 854static void gen_ieee_arith3(DisasContext *ctx,
 855                            void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
 856                            int ra, int rb, int rc, int fn11)
 857{
 858    TCGv va, vb, vc;
 859
 860    gen_qual_roundmode(ctx, fn11);
 861    gen_qual_flushzero(ctx, fn11);
 862
 863    va = gen_ieee_input(ctx, ra, fn11, 0);
 864    vb = gen_ieee_input(ctx, rb, fn11, 0);
 865    vc = dest_fpr(ctx, rc);
 866    helper(vc, cpu_env, va, vb);
 867
 868    gen_fp_exc_raise(rc, fn11);
 869}
 870
 871#define IEEE_ARITH3(name)                                               \
 872static inline void glue(gen_, name)(DisasContext *ctx,                  \
 873                                    int ra, int rb, int rc, int fn11)   \
 874{                                                                       \
 875    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
 876}
 877IEEE_ARITH3(adds)
 878IEEE_ARITH3(subs)
 879IEEE_ARITH3(muls)
 880IEEE_ARITH3(divs)
 881IEEE_ARITH3(addt)
 882IEEE_ARITH3(subt)
 883IEEE_ARITH3(mult)
 884IEEE_ARITH3(divt)
 885
 886static void gen_ieee_compare(DisasContext *ctx,
 887                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
 888                             int ra, int rb, int rc, int fn11)
 889{
 890    TCGv va, vb, vc;
 891
 892    va = gen_ieee_input(ctx, ra, fn11, 1);
 893    vb = gen_ieee_input(ctx, rb, fn11, 1);
 894    vc = dest_fpr(ctx, rc);
 895    helper(vc, cpu_env, va, vb);
 896
 897    gen_fp_exc_raise(rc, fn11);
 898}
 899
 900#define IEEE_CMP3(name)                                                 \
 901static inline void glue(gen_, name)(DisasContext *ctx,                  \
 902                                    int ra, int rb, int rc, int fn11)   \
 903{                                                                       \
 904    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
 905}
 906IEEE_CMP3(cmptun)
 907IEEE_CMP3(cmpteq)
 908IEEE_CMP3(cmptlt)
 909IEEE_CMP3(cmptle)
 910
 911static inline uint64_t zapnot_mask(uint8_t lit)
 912{
 913    uint64_t mask = 0;
 914    int i;
 915
 916    for (i = 0; i < 8; ++i) {
 917        if ((lit >> i) & 1) {
 918            mask |= 0xffull << (i * 8);
 919        }
 920    }
 921    return mask;
 922}
 923
 924/* Implement zapnot with an immediate operand, which expands to some
 925   form of immediate AND.  This is a basic building block in the
 926   definition of many of the other byte manipulation instructions.  */
 927static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
 928{
 929    switch (lit) {
 930    case 0x00:
 931        tcg_gen_movi_i64(dest, 0);
 932        break;
 933    case 0x01:
 934        tcg_gen_ext8u_i64(dest, src);
 935        break;
 936    case 0x03:
 937        tcg_gen_ext16u_i64(dest, src);
 938        break;
 939    case 0x0f:
 940        tcg_gen_ext32u_i64(dest, src);
 941        break;
 942    case 0xff:
 943        tcg_gen_mov_i64(dest, src);
 944        break;
 945    default:
 946        tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
 947        break;
 948    }
 949}
 950
 951/* EXTWH, EXTLH, EXTQH */
 952static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
 953                      uint8_t lit, uint8_t byte_mask)
 954{
 955    if (islit) {
 956        int pos = (64 - lit * 8) & 0x3f;
 957        int len = cto32(byte_mask) * 8;
 958        if (pos < len) {
 959            tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
 960        } else {
 961            tcg_gen_movi_i64(vc, 0);
 962        }
 963    } else {
 964        TCGv tmp = tcg_temp_new();
 965        tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
 966        tcg_gen_neg_i64(tmp, tmp);
 967        tcg_gen_andi_i64(tmp, tmp, 0x3f);
 968        tcg_gen_shl_i64(vc, va, tmp);
 969        tcg_temp_free(tmp);
 970    }
 971    gen_zapnoti(vc, vc, byte_mask);
 972}
 973
 974/* EXTBL, EXTWL, EXTLL, EXTQL */
 975static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
 976                      uint8_t lit, uint8_t byte_mask)
 977{
 978    if (islit) {
 979        int pos = (lit & 7) * 8;
 980        int len = cto32(byte_mask) * 8;
 981        if (pos + len >= 64) {
 982            len = 64 - pos;
 983        }
 984        tcg_gen_extract_i64(vc, va, pos, len);
 985    } else {
 986        TCGv tmp = tcg_temp_new();
 987        tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
 988        tcg_gen_shli_i64(tmp, tmp, 3);
 989        tcg_gen_shr_i64(vc, va, tmp);
 990        tcg_temp_free(tmp);
 991        gen_zapnoti(vc, vc, byte_mask);
 992    }
 993}
 994
 995/* INSWH, INSLH, INSQH */
 996static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
 997                      uint8_t lit, uint8_t byte_mask)
 998{
 999    if (islit) {
1000        int pos = 64 - (lit & 7) * 8;
1001        int len = cto32(byte_mask) * 8;
1002        if (pos < len) {
1003            tcg_gen_extract_i64(vc, va, pos, len - pos);
1004        } else {
1005            tcg_gen_movi_i64(vc, 0);
1006        }
1007    } else {
1008        TCGv tmp = tcg_temp_new();
1009        TCGv shift = tcg_temp_new();
1010
1011        /* The instruction description has us left-shift the byte mask
1012           and extract bits <15:8> and apply that zap at the end.  This
1013           is equivalent to simply performing the zap first and shifting
1014           afterward.  */
1015        gen_zapnoti(tmp, va, byte_mask);
1016
1017        /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
1018           portably by splitting the shift into two parts: shift_count-1 and 1.
1019           Arrange for the -1 by using ones-complement instead of
1020           twos-complement in the negation: ~(B * 8) & 63.  */
1021
1022        tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1023        tcg_gen_not_i64(shift, shift);
1024        tcg_gen_andi_i64(shift, shift, 0x3f);
1025
1026        tcg_gen_shr_i64(vc, tmp, shift);
1027        tcg_gen_shri_i64(vc, vc, 1);
1028        tcg_temp_free(shift);
1029        tcg_temp_free(tmp);
1030    }
1031}
1032
1033/* INSBL, INSWL, INSLL, INSQL */
1034static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1035                      uint8_t lit, uint8_t byte_mask)
1036{
1037    if (islit) {
1038        int pos = (lit & 7) * 8;
1039        int len = cto32(byte_mask) * 8;
1040        if (pos + len > 64) {
1041            len = 64 - pos;
1042        }
1043        tcg_gen_deposit_z_i64(vc, va, pos, len);
1044    } else {
1045        TCGv tmp = tcg_temp_new();
1046        TCGv shift = tcg_temp_new();
1047
1048        /* The instruction description has us left-shift the byte mask
1049           and extract bits <15:8> and apply that zap at the end.  This
1050           is equivalent to simply performing the zap first and shifting
1051           afterward.  */
1052        gen_zapnoti(tmp, va, byte_mask);
1053
1054        tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1055        tcg_gen_shli_i64(shift, shift, 3);
1056        tcg_gen_shl_i64(vc, tmp, shift);
1057        tcg_temp_free(shift);
1058        tcg_temp_free(tmp);
1059    }
1060}
1061
1062/* MSKWH, MSKLH, MSKQH */
1063static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1064                      uint8_t lit, uint8_t byte_mask)
1065{
1066    if (islit) {
1067        gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1068    } else {
1069        TCGv shift = tcg_temp_new();
1070        TCGv mask = tcg_temp_new();
1071
1072        /* The instruction description is as above, where the byte_mask
1073           is shifted left, and then we extract bits <15:8>.  This can be
1074           emulated with a right-shift on the expanded byte mask.  This
1075           requires extra care because for an input <2:0> == 0 we need a
1076           shift of 64 bits in order to generate a zero.  This is done by
1077           splitting the shift into two parts, the variable shift - 1
1078           followed by a constant 1 shift.  The code we expand below is
1079           equivalent to ~(B * 8) & 63.  */
1080
1081        tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1082        tcg_gen_not_i64(shift, shift);
1083        tcg_gen_andi_i64(shift, shift, 0x3f);
1084        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1085        tcg_gen_shr_i64(mask, mask, shift);
1086        tcg_gen_shri_i64(mask, mask, 1);
1087
1088        tcg_gen_andc_i64(vc, va, mask);
1089
1090        tcg_temp_free(mask);
1091        tcg_temp_free(shift);
1092    }
1093}
1094
1095/* MSKBL, MSKWL, MSKLL, MSKQL */
1096static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1097                      uint8_t lit, uint8_t byte_mask)
1098{
1099    if (islit) {
1100        gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1101    } else {
1102        TCGv shift = tcg_temp_new();
1103        TCGv mask = tcg_temp_new();
1104
1105        tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1106        tcg_gen_shli_i64(shift, shift, 3);
1107        tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1108        tcg_gen_shl_i64(mask, mask, shift);
1109
1110        tcg_gen_andc_i64(vc, va, mask);
1111
1112        tcg_temp_free(mask);
1113        tcg_temp_free(shift);
1114    }
1115}
1116
1117static void gen_rx(DisasContext *ctx, int ra, int set)
1118{
1119    TCGv tmp;
1120
1121    if (ra != 31) {
1122        ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1123    }
1124
1125    tmp = tcg_const_i64(set);
1126    st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1127    tcg_temp_free(tmp);
1128}
1129
1130static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1131{
1132    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1133       to internal cpu registers.  */
1134
1135    /* Unprivileged PAL call */
1136    if (palcode >= 0x80 && palcode < 0xC0) {
1137        switch (palcode) {
1138        case 0x86:
1139            /* IMB */
1140            /* No-op inside QEMU.  */
1141            break;
1142        case 0x9E:
1143            /* RDUNIQUE */
1144            tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1145                           offsetof(CPUAlphaState, unique));
1146            break;
1147        case 0x9F:
1148            /* WRUNIQUE */
1149            tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1150                           offsetof(CPUAlphaState, unique));
1151            break;
1152        default:
1153            palcode &= 0xbf;
1154            goto do_call_pal;
1155        }
1156        return DISAS_NEXT;
1157    }
1158
1159#ifndef CONFIG_USER_ONLY
1160    /* Privileged PAL code */
1161    if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1162        switch (palcode) {
1163        case 0x01:
1164            /* CFLUSH */
1165            /* No-op inside QEMU.  */
1166            break;
1167        case 0x02:
1168            /* DRAINA */
1169            /* No-op inside QEMU.  */
1170            break;
1171        case 0x2D:
1172            /* WRVPTPTR */
1173            tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1174                           offsetof(CPUAlphaState, vptptr));
1175            break;
1176        case 0x31:
1177            /* WRVAL */
1178            tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1179                           offsetof(CPUAlphaState, sysval));
1180            break;
1181        case 0x32:
1182            /* RDVAL */
1183            tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1184                           offsetof(CPUAlphaState, sysval));
1185            break;
1186
1187        case 0x35:
1188            /* SWPIPL */
1189            /* Note that we already know we're in kernel mode, so we know
1190               that PS only contains the 3 IPL bits.  */
1191            ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1192
1193            /* But make sure and store only the 3 IPL bits from the user.  */
1194            {
1195                TCGv tmp = tcg_temp_new();
1196                tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1197                st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1198                tcg_temp_free(tmp);
1199            }
1200
1201            /* Allow interrupts to be recognized right away.  */
1202            tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1203            return DISAS_PC_UPDATED_NOCHAIN;
1204
1205        case 0x36:
1206            /* RDPS */
1207            ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1208            break;
1209
1210        case 0x38:
1211            /* WRUSP */
1212            tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1213                           offsetof(CPUAlphaState, usp));
1214            break;
1215        case 0x3A:
1216            /* RDUSP */
1217            tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1218                           offsetof(CPUAlphaState, usp));
1219            break;
1220        case 0x3C:
1221            /* WHAMI */
1222            tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1223                -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1224            break;
1225
1226        case 0x3E:
1227            /* WTINT */
1228            {
1229                TCGv_i32 tmp = tcg_const_i32(1);
1230                tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1231                                             offsetof(CPUState, halted));
1232                tcg_temp_free_i32(tmp);
1233            }
1234            tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1235            return gen_excp(ctx, EXCP_HALTED, 0);
1236
1237        default:
1238            palcode &= 0x3f;
1239            goto do_call_pal;
1240        }
1241        return DISAS_NEXT;
1242    }
1243#endif
1244    return gen_invalid(ctx);
1245
1246 do_call_pal:
1247#ifdef CONFIG_USER_ONLY
1248    return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1249#else
1250    {
1251        TCGv tmp = tcg_temp_new();
1252        uint64_t exc_addr = ctx->base.pc_next;
1253        uint64_t entry = ctx->palbr;
1254
1255        if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1256            exc_addr |= 1;
1257        } else {
1258            tcg_gen_movi_i64(tmp, 1);
1259            st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1260        }
1261
1262        tcg_gen_movi_i64(tmp, exc_addr);
1263        tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1264        tcg_temp_free(tmp);
1265
1266        entry += (palcode & 0x80
1267                  ? 0x2000 + (palcode - 0x80) * 64
1268                  : 0x1000 + palcode * 64);
1269
1270        /* Since the destination is running in PALmode, we don't really
1271           need the page permissions check.  We'll see the existence of
1272           the page when we create the TB, and we'll flush all TBs if
1273           we change the PAL base register.  */
1274        if (!use_exit_tb(ctx)) {
1275            tcg_gen_goto_tb(0);
1276            tcg_gen_movi_i64(cpu_pc, entry);
1277            tcg_gen_exit_tb(ctx->base.tb, 0);
1278            return DISAS_NORETURN;
1279        } else {
1280            tcg_gen_movi_i64(cpu_pc, entry);
1281            return DISAS_PC_UPDATED;
1282        }
1283    }
1284#endif
1285}
1286
1287#ifndef CONFIG_USER_ONLY
1288
1289#define PR_LONG         0x200000
1290
1291static int cpu_pr_data(int pr)
1292{
1293    switch (pr) {
1294    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1295    case  3: return offsetof(CPUAlphaState, trap_arg0);
1296    case  4: return offsetof(CPUAlphaState, trap_arg1);
1297    case  5: return offsetof(CPUAlphaState, trap_arg2);
1298    case  6: return offsetof(CPUAlphaState, exc_addr);
1299    case  7: return offsetof(CPUAlphaState, palbr);
1300    case  8: return offsetof(CPUAlphaState, ptbr);
1301    case  9: return offsetof(CPUAlphaState, vptptr);
1302    case 10: return offsetof(CPUAlphaState, unique);
1303    case 11: return offsetof(CPUAlphaState, sysval);
1304    case 12: return offsetof(CPUAlphaState, usp);
1305
1306    case 40 ... 63:
1307        return offsetof(CPUAlphaState, scratch[pr - 40]);
1308
1309    case 251:
1310        return offsetof(CPUAlphaState, alarm_expire);
1311    }
1312    return 0;
1313}
1314
1315static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1316{
1317    void (*helper)(TCGv);
1318    int data;
1319
1320    switch (regno) {
1321    case 32 ... 39:
1322        /* Accessing the "non-shadow" general registers.  */
1323        regno = regno == 39 ? 25 : regno - 32 + 8;
1324        tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1325        break;
1326
1327    case 250: /* WALLTIME */
1328        helper = gen_helper_get_walltime;
1329        goto do_helper;
1330    case 249: /* VMTIME */
1331        helper = gen_helper_get_vmtime;
1332    do_helper:
1333        if (icount_enabled()) {
1334            gen_io_start();
1335            helper(va);
1336            return DISAS_PC_STALE;
1337        } else {
1338            helper(va);
1339        }
1340        break;
1341
1342    case 0: /* PS */
1343        ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1344        break;
1345    case 1: /* FEN */
1346        ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1347        break;
1348
1349    default:
1350        /* The basic registers are data only, and unknown registers
1351           are read-zero, write-ignore.  */
1352        data = cpu_pr_data(regno);
1353        if (data == 0) {
1354            tcg_gen_movi_i64(va, 0);
1355        } else if (data & PR_LONG) {
1356            tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1357        } else {
1358            tcg_gen_ld_i64(va, cpu_env, data);
1359        }
1360        break;
1361    }
1362
1363    return DISAS_NEXT;
1364}
1365
1366static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1367{
1368    int data;
1369
1370    switch (regno) {
1371    case 255:
1372        /* TBIA */
1373        gen_helper_tbia(cpu_env);
1374        break;
1375
1376    case 254:
1377        /* TBIS */
1378        gen_helper_tbis(cpu_env, vb);
1379        break;
1380
1381    case 253:
1382        /* WAIT */
1383        {
1384            TCGv_i32 tmp = tcg_const_i32(1);
1385            tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1386                                         offsetof(CPUState, halted));
1387            tcg_temp_free_i32(tmp);
1388        }
1389        return gen_excp(ctx, EXCP_HALTED, 0);
1390
1391    case 252:
1392        /* HALT */
1393        gen_helper_halt(vb);
1394        return DISAS_PC_STALE;
1395
1396    case 251:
1397        /* ALARM */
1398        gen_helper_set_alarm(cpu_env, vb);
1399        break;
1400
1401    case 7:
1402        /* PALBR */
1403        tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1404        /* Changing the PAL base register implies un-chaining all of the TBs
1405           that ended with a CALL_PAL.  Since the base register usually only
1406           changes during boot, flushing everything works well.  */
1407        gen_helper_tb_flush(cpu_env);
1408        return DISAS_PC_STALE;
1409
1410    case 32 ... 39:
1411        /* Accessing the "non-shadow" general registers.  */
1412        regno = regno == 39 ? 25 : regno - 32 + 8;
1413        tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1414        break;
1415
1416    case 0: /* PS */
1417        st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1418        break;
1419    case 1: /* FEN */
1420        st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1421        break;
1422
1423    default:
1424        /* The basic registers are data only, and unknown registers
1425           are read-zero, write-ignore.  */
1426        data = cpu_pr_data(regno);
1427        if (data != 0) {
1428            if (data & PR_LONG) {
1429                tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1430            } else {
1431                tcg_gen_st_i64(vb, cpu_env, data);
1432            }
1433        }
1434        break;
1435    }
1436
1437    return DISAS_NEXT;
1438}
1439#endif /* !USER_ONLY*/
1440
1441#define REQUIRE_NO_LIT                          \
1442    do {                                        \
1443        if (real_islit) {                       \
1444            goto invalid_opc;                   \
1445        }                                       \
1446    } while (0)
1447
1448#define REQUIRE_AMASK(FLAG)                     \
1449    do {                                        \
1450        if ((ctx->amask & AMASK_##FLAG) == 0) { \
1451            goto invalid_opc;                   \
1452        }                                       \
1453    } while (0)
1454
1455#define REQUIRE_TB_FLAG(FLAG)                   \
1456    do {                                        \
1457        if ((ctx->tbflags & (FLAG)) == 0) {     \
1458            goto invalid_opc;                   \
1459        }                                       \
1460    } while (0)
1461
1462#define REQUIRE_REG_31(WHICH)                   \
1463    do {                                        \
1464        if (WHICH != 31) {                      \
1465            goto invalid_opc;                   \
1466        }                                       \
1467    } while (0)
1468
1469static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1470{
1471    int32_t disp21, disp16, disp12 __attribute__((unused));
1472    uint16_t fn11;
1473    uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1474    bool islit, real_islit;
1475    TCGv va, vb, vc, tmp, tmp2;
1476    TCGv_i32 t32;
1477    DisasJumpType ret;
1478
1479    /* Decode all instruction fields */
1480    opc = extract32(insn, 26, 6);
1481    ra = extract32(insn, 21, 5);
1482    rb = extract32(insn, 16, 5);
1483    rc = extract32(insn, 0, 5);
1484    real_islit = islit = extract32(insn, 12, 1);
1485    lit = extract32(insn, 13, 8);
1486
1487    disp21 = sextract32(insn, 0, 21);
1488    disp16 = sextract32(insn, 0, 16);
1489    disp12 = sextract32(insn, 0, 12);
1490
1491    fn11 = extract32(insn, 5, 11);
1492    fpfn = extract32(insn, 5, 6);
1493    fn7 = extract32(insn, 5, 7);
1494
1495    if (rb == 31 && !islit) {
1496        islit = true;
1497        lit = 0;
1498    }
1499
1500    ret = DISAS_NEXT;
1501    switch (opc) {
1502    case 0x00:
1503        /* CALL_PAL */
1504        ret = gen_call_pal(ctx, insn & 0x03ffffff);
1505        break;
1506    case 0x01:
1507        /* OPC01 */
1508        goto invalid_opc;
1509    case 0x02:
1510        /* OPC02 */
1511        goto invalid_opc;
1512    case 0x03:
1513        /* OPC03 */
1514        goto invalid_opc;
1515    case 0x04:
1516        /* OPC04 */
1517        goto invalid_opc;
1518    case 0x05:
1519        /* OPC05 */
1520        goto invalid_opc;
1521    case 0x06:
1522        /* OPC06 */
1523        goto invalid_opc;
1524    case 0x07:
1525        /* OPC07 */
1526        goto invalid_opc;
1527
1528    case 0x09:
1529        /* LDAH */
1530        disp16 = (uint32_t)disp16 << 16;
1531        /* fall through */
1532    case 0x08:
1533        /* LDA */
1534        va = dest_gpr(ctx, ra);
1535        /* It's worth special-casing immediate loads.  */
1536        if (rb == 31) {
1537            tcg_gen_movi_i64(va, disp16);
1538        } else {
1539            tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1540        }
1541        break;
1542
1543    case 0x0A:
1544        /* LDBU */
1545        REQUIRE_AMASK(BWX);
1546        gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1547        break;
1548    case 0x0B:
1549        /* LDQ_U */
1550        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1551        break;
1552    case 0x0C:
1553        /* LDWU */
1554        REQUIRE_AMASK(BWX);
1555        gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1556        break;
1557    case 0x0D:
1558        /* STW */
1559        REQUIRE_AMASK(BWX);
1560        gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1561        break;
1562    case 0x0E:
1563        /* STB */
1564        REQUIRE_AMASK(BWX);
1565        gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1566        break;
1567    case 0x0F:
1568        /* STQ_U */
1569        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1570        break;
1571
1572    case 0x10:
1573        vc = dest_gpr(ctx, rc);
1574        vb = load_gpr_lit(ctx, rb, lit, islit);
1575
1576        if (ra == 31) {
1577            if (fn7 == 0x00) {
1578                /* Special case ADDL as SEXTL.  */
1579                tcg_gen_ext32s_i64(vc, vb);
1580                break;
1581            }
1582            if (fn7 == 0x29) {
1583                /* Special case SUBQ as NEGQ.  */
1584                tcg_gen_neg_i64(vc, vb);
1585                break;
1586            }
1587        }
1588
1589        va = load_gpr(ctx, ra);
1590        switch (fn7) {
1591        case 0x00:
1592            /* ADDL */
1593            tcg_gen_add_i64(vc, va, vb);
1594            tcg_gen_ext32s_i64(vc, vc);
1595            break;
1596        case 0x02:
1597            /* S4ADDL */
1598            tmp = tcg_temp_new();
1599            tcg_gen_shli_i64(tmp, va, 2);
1600            tcg_gen_add_i64(tmp, tmp, vb);
1601            tcg_gen_ext32s_i64(vc, tmp);
1602            tcg_temp_free(tmp);
1603            break;
1604        case 0x09:
1605            /* SUBL */
1606            tcg_gen_sub_i64(vc, va, vb);
1607            tcg_gen_ext32s_i64(vc, vc);
1608            break;
1609        case 0x0B:
1610            /* S4SUBL */
1611            tmp = tcg_temp_new();
1612            tcg_gen_shli_i64(tmp, va, 2);
1613            tcg_gen_sub_i64(tmp, tmp, vb);
1614            tcg_gen_ext32s_i64(vc, tmp);
1615            tcg_temp_free(tmp);
1616            break;
1617        case 0x0F:
1618            /* CMPBGE */
1619            if (ra == 31) {
1620                /* Special case 0 >= X as X == 0.  */
1621                gen_helper_cmpbe0(vc, vb);
1622            } else {
1623                gen_helper_cmpbge(vc, va, vb);
1624            }
1625            break;
1626        case 0x12:
1627            /* S8ADDL */
1628            tmp = tcg_temp_new();
1629            tcg_gen_shli_i64(tmp, va, 3);
1630            tcg_gen_add_i64(tmp, tmp, vb);
1631            tcg_gen_ext32s_i64(vc, tmp);
1632            tcg_temp_free(tmp);
1633            break;
1634        case 0x1B:
1635            /* S8SUBL */
1636            tmp = tcg_temp_new();
1637            tcg_gen_shli_i64(tmp, va, 3);
1638            tcg_gen_sub_i64(tmp, tmp, vb);
1639            tcg_gen_ext32s_i64(vc, tmp);
1640            tcg_temp_free(tmp);
1641            break;
1642        case 0x1D:
1643            /* CMPULT */
1644            tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1645            break;
1646        case 0x20:
1647            /* ADDQ */
1648            tcg_gen_add_i64(vc, va, vb);
1649            break;
1650        case 0x22:
1651            /* S4ADDQ */
1652            tmp = tcg_temp_new();
1653            tcg_gen_shli_i64(tmp, va, 2);
1654            tcg_gen_add_i64(vc, tmp, vb);
1655            tcg_temp_free(tmp);
1656            break;
1657        case 0x29:
1658            /* SUBQ */
1659            tcg_gen_sub_i64(vc, va, vb);
1660            break;
1661        case 0x2B:
1662            /* S4SUBQ */
1663            tmp = tcg_temp_new();
1664            tcg_gen_shli_i64(tmp, va, 2);
1665            tcg_gen_sub_i64(vc, tmp, vb);
1666            tcg_temp_free(tmp);
1667            break;
1668        case 0x2D:
1669            /* CMPEQ */
1670            tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1671            break;
1672        case 0x32:
1673            /* S8ADDQ */
1674            tmp = tcg_temp_new();
1675            tcg_gen_shli_i64(tmp, va, 3);
1676            tcg_gen_add_i64(vc, tmp, vb);
1677            tcg_temp_free(tmp);
1678            break;
1679        case 0x3B:
1680            /* S8SUBQ */
1681            tmp = tcg_temp_new();
1682            tcg_gen_shli_i64(tmp, va, 3);
1683            tcg_gen_sub_i64(vc, tmp, vb);
1684            tcg_temp_free(tmp);
1685            break;
1686        case 0x3D:
1687            /* CMPULE */
1688            tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1689            break;
1690        case 0x40:
1691            /* ADDL/V */
1692            tmp = tcg_temp_new();
1693            tcg_gen_ext32s_i64(tmp, va);
1694            tcg_gen_ext32s_i64(vc, vb);
1695            tcg_gen_add_i64(tmp, tmp, vc);
1696            tcg_gen_ext32s_i64(vc, tmp);
1697            gen_helper_check_overflow(cpu_env, vc, tmp);
1698            tcg_temp_free(tmp);
1699            break;
1700        case 0x49:
1701            /* SUBL/V */
1702            tmp = tcg_temp_new();
1703            tcg_gen_ext32s_i64(tmp, va);
1704            tcg_gen_ext32s_i64(vc, vb);
1705            tcg_gen_sub_i64(tmp, tmp, vc);
1706            tcg_gen_ext32s_i64(vc, tmp);
1707            gen_helper_check_overflow(cpu_env, vc, tmp);
1708            tcg_temp_free(tmp);
1709            break;
1710        case 0x4D:
1711            /* CMPLT */
1712            tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1713            break;
1714        case 0x60:
1715            /* ADDQ/V */
1716            tmp = tcg_temp_new();
1717            tmp2 = tcg_temp_new();
1718            tcg_gen_eqv_i64(tmp, va, vb);
1719            tcg_gen_mov_i64(tmp2, va);
1720            tcg_gen_add_i64(vc, va, vb);
1721            tcg_gen_xor_i64(tmp2, tmp2, vc);
1722            tcg_gen_and_i64(tmp, tmp, tmp2);
1723            tcg_gen_shri_i64(tmp, tmp, 63);
1724            tcg_gen_movi_i64(tmp2, 0);
1725            gen_helper_check_overflow(cpu_env, tmp, tmp2);
1726            tcg_temp_free(tmp);
1727            tcg_temp_free(tmp2);
1728            break;
1729        case 0x69:
1730            /* SUBQ/V */
1731            tmp = tcg_temp_new();
1732            tmp2 = tcg_temp_new();
1733            tcg_gen_xor_i64(tmp, va, vb);
1734            tcg_gen_mov_i64(tmp2, va);
1735            tcg_gen_sub_i64(vc, va, vb);
1736            tcg_gen_xor_i64(tmp2, tmp2, vc);
1737            tcg_gen_and_i64(tmp, tmp, tmp2);
1738            tcg_gen_shri_i64(tmp, tmp, 63);
1739            tcg_gen_movi_i64(tmp2, 0);
1740            gen_helper_check_overflow(cpu_env, tmp, tmp2);
1741            tcg_temp_free(tmp);
1742            tcg_temp_free(tmp2);
1743            break;
1744        case 0x6D:
1745            /* CMPLE */
1746            tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1747            break;
1748        default:
1749            goto invalid_opc;
1750        }
1751        break;
1752
1753    case 0x11:
1754        if (fn7 == 0x20) {
1755            if (rc == 31) {
1756                /* Special case BIS as NOP.  */
1757                break;
1758            }
1759            if (ra == 31) {
1760                /* Special case BIS as MOV.  */
1761                vc = dest_gpr(ctx, rc);
1762                if (islit) {
1763                    tcg_gen_movi_i64(vc, lit);
1764                } else {
1765                    tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1766                }
1767                break;
1768            }
1769        }
1770
1771        vc = dest_gpr(ctx, rc);
1772        vb = load_gpr_lit(ctx, rb, lit, islit);
1773
1774        if (fn7 == 0x28 && ra == 31) {
1775            /* Special case ORNOT as NOT.  */
1776            tcg_gen_not_i64(vc, vb);
1777            break;
1778        }
1779
1780        va = load_gpr(ctx, ra);
1781        switch (fn7) {
1782        case 0x00:
1783            /* AND */
1784            tcg_gen_and_i64(vc, va, vb);
1785            break;
1786        case 0x08:
1787            /* BIC */
1788            tcg_gen_andc_i64(vc, va, vb);
1789            break;
1790        case 0x14:
1791            /* CMOVLBS */
1792            tmp = tcg_temp_new();
1793            tcg_gen_andi_i64(tmp, va, 1);
1794            tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1795                                vb, load_gpr(ctx, rc));
1796            tcg_temp_free(tmp);
1797            break;
1798        case 0x16:
1799            /* CMOVLBC */
1800            tmp = tcg_temp_new();
1801            tcg_gen_andi_i64(tmp, va, 1);
1802            tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1803                                vb, load_gpr(ctx, rc));
1804            tcg_temp_free(tmp);
1805            break;
1806        case 0x20:
1807            /* BIS */
1808            tcg_gen_or_i64(vc, va, vb);
1809            break;
1810        case 0x24:
1811            /* CMOVEQ */
1812            tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1813                                vb, load_gpr(ctx, rc));
1814            break;
1815        case 0x26:
1816            /* CMOVNE */
1817            tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1818                                vb, load_gpr(ctx, rc));
1819            break;
1820        case 0x28:
1821            /* ORNOT */
1822            tcg_gen_orc_i64(vc, va, vb);
1823            break;
1824        case 0x40:
1825            /* XOR */
1826            tcg_gen_xor_i64(vc, va, vb);
1827            break;
1828        case 0x44:
1829            /* CMOVLT */
1830            tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1831                                vb, load_gpr(ctx, rc));
1832            break;
1833        case 0x46:
1834            /* CMOVGE */
1835            tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1836                                vb, load_gpr(ctx, rc));
1837            break;
1838        case 0x48:
1839            /* EQV */
1840            tcg_gen_eqv_i64(vc, va, vb);
1841            break;
1842        case 0x61:
1843            /* AMASK */
1844            REQUIRE_REG_31(ra);
1845            tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1846            break;
1847        case 0x64:
1848            /* CMOVLE */
1849            tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1850                                vb, load_gpr(ctx, rc));
1851            break;
1852        case 0x66:
1853            /* CMOVGT */
1854            tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1855                                vb, load_gpr(ctx, rc));
1856            break;
1857        case 0x6C:
1858            /* IMPLVER */
1859            REQUIRE_REG_31(ra);
1860            tcg_gen_movi_i64(vc, ctx->implver);
1861            break;
1862        default:
1863            goto invalid_opc;
1864        }
1865        break;
1866
1867    case 0x12:
1868        vc = dest_gpr(ctx, rc);
1869        va = load_gpr(ctx, ra);
1870        switch (fn7) {
1871        case 0x02:
1872            /* MSKBL */
1873            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1874            break;
1875        case 0x06:
1876            /* EXTBL */
1877            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1878            break;
1879        case 0x0B:
1880            /* INSBL */
1881            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1882            break;
1883        case 0x12:
1884            /* MSKWL */
1885            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1886            break;
1887        case 0x16:
1888            /* EXTWL */
1889            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1890            break;
1891        case 0x1B:
1892            /* INSWL */
1893            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1894            break;
1895        case 0x22:
1896            /* MSKLL */
1897            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1898            break;
1899        case 0x26:
1900            /* EXTLL */
1901            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1902            break;
1903        case 0x2B:
1904            /* INSLL */
1905            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1906            break;
1907        case 0x30:
1908            /* ZAP */
1909            if (islit) {
1910                gen_zapnoti(vc, va, ~lit);
1911            } else {
1912                gen_helper_zap(vc, va, load_gpr(ctx, rb));
1913            }
1914            break;
1915        case 0x31:
1916            /* ZAPNOT */
1917            if (islit) {
1918                gen_zapnoti(vc, va, lit);
1919            } else {
1920                gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1921            }
1922            break;
1923        case 0x32:
1924            /* MSKQL */
1925            gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1926            break;
1927        case 0x34:
1928            /* SRL */
1929            if (islit) {
1930                tcg_gen_shri_i64(vc, va, lit & 0x3f);
1931            } else {
1932                tmp = tcg_temp_new();
1933                vb = load_gpr(ctx, rb);
1934                tcg_gen_andi_i64(tmp, vb, 0x3f);
1935                tcg_gen_shr_i64(vc, va, tmp);
1936                tcg_temp_free(tmp);
1937            }
1938            break;
1939        case 0x36:
1940            /* EXTQL */
1941            gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1942            break;
1943        case 0x39:
1944            /* SLL */
1945            if (islit) {
1946                tcg_gen_shli_i64(vc, va, lit & 0x3f);
1947            } else {
1948                tmp = tcg_temp_new();
1949                vb = load_gpr(ctx, rb);
1950                tcg_gen_andi_i64(tmp, vb, 0x3f);
1951                tcg_gen_shl_i64(vc, va, tmp);
1952                tcg_temp_free(tmp);
1953            }
1954            break;
1955        case 0x3B:
1956            /* INSQL */
1957            gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1958            break;
1959        case 0x3C:
1960            /* SRA */
1961            if (islit) {
1962                tcg_gen_sari_i64(vc, va, lit & 0x3f);
1963            } else {
1964                tmp = tcg_temp_new();
1965                vb = load_gpr(ctx, rb);
1966                tcg_gen_andi_i64(tmp, vb, 0x3f);
1967                tcg_gen_sar_i64(vc, va, tmp);
1968                tcg_temp_free(tmp);
1969            }
1970            break;
1971        case 0x52:
1972            /* MSKWH */
1973            gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1974            break;
1975        case 0x57:
1976            /* INSWH */
1977            gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1978            break;
1979        case 0x5A:
1980            /* EXTWH */
1981            gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1982            break;
1983        case 0x62:
1984            /* MSKLH */
1985            gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1986            break;
1987        case 0x67:
1988            /* INSLH */
1989            gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1990            break;
1991        case 0x6A:
1992            /* EXTLH */
1993            gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1994            break;
1995        case 0x72:
1996            /* MSKQH */
1997            gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1998            break;
1999        case 0x77:
2000            /* INSQH */
2001            gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2002            break;
2003        case 0x7A:
2004            /* EXTQH */
2005            gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2006            break;
2007        default:
2008            goto invalid_opc;
2009        }
2010        break;
2011
2012    case 0x13:
2013        vc = dest_gpr(ctx, rc);
2014        vb = load_gpr_lit(ctx, rb, lit, islit);
2015        va = load_gpr(ctx, ra);
2016        switch (fn7) {
2017        case 0x00:
2018            /* MULL */
2019            tcg_gen_mul_i64(vc, va, vb);
2020            tcg_gen_ext32s_i64(vc, vc);
2021            break;
2022        case 0x20:
2023            /* MULQ */
2024            tcg_gen_mul_i64(vc, va, vb);
2025            break;
2026        case 0x30:
2027            /* UMULH */
2028            tmp = tcg_temp_new();
2029            tcg_gen_mulu2_i64(tmp, vc, va, vb);
2030            tcg_temp_free(tmp);
2031            break;
2032        case 0x40:
2033            /* MULL/V */
2034            tmp = tcg_temp_new();
2035            tcg_gen_ext32s_i64(tmp, va);
2036            tcg_gen_ext32s_i64(vc, vb);
2037            tcg_gen_mul_i64(tmp, tmp, vc);
2038            tcg_gen_ext32s_i64(vc, tmp);
2039            gen_helper_check_overflow(cpu_env, vc, tmp);
2040            tcg_temp_free(tmp);
2041            break;
2042        case 0x60:
2043            /* MULQ/V */
2044            tmp = tcg_temp_new();
2045            tmp2 = tcg_temp_new();
2046            tcg_gen_muls2_i64(vc, tmp, va, vb);
2047            tcg_gen_sari_i64(tmp2, vc, 63);
2048            gen_helper_check_overflow(cpu_env, tmp, tmp2);
2049            tcg_temp_free(tmp);
2050            tcg_temp_free(tmp2);
2051            break;
2052        default:
2053            goto invalid_opc;
2054        }
2055        break;
2056
2057    case 0x14:
2058        REQUIRE_AMASK(FIX);
2059        vc = dest_fpr(ctx, rc);
2060        switch (fpfn) { /* fn11 & 0x3F */
2061        case 0x04:
2062            /* ITOFS */
2063            REQUIRE_REG_31(rb);
2064            t32 = tcg_temp_new_i32();
2065            va = load_gpr(ctx, ra);
2066            tcg_gen_extrl_i64_i32(t32, va);
2067            gen_helper_memory_to_s(vc, t32);
2068            tcg_temp_free_i32(t32);
2069            break;
2070        case 0x0A:
2071            /* SQRTF */
2072            REQUIRE_REG_31(ra);
2073            vb = load_fpr(ctx, rb);
2074            gen_helper_sqrtf(vc, cpu_env, vb);
2075            break;
2076        case 0x0B:
2077            /* SQRTS */
2078            REQUIRE_REG_31(ra);
2079            gen_sqrts(ctx, rb, rc, fn11);
2080            break;
2081        case 0x14:
2082            /* ITOFF */
2083            REQUIRE_REG_31(rb);
2084            t32 = tcg_temp_new_i32();
2085            va = load_gpr(ctx, ra);
2086            tcg_gen_extrl_i64_i32(t32, va);
2087            gen_helper_memory_to_f(vc, t32);
2088            tcg_temp_free_i32(t32);
2089            break;
2090        case 0x24:
2091            /* ITOFT */
2092            REQUIRE_REG_31(rb);
2093            va = load_gpr(ctx, ra);
2094            tcg_gen_mov_i64(vc, va);
2095            break;
2096        case 0x2A:
2097            /* SQRTG */
2098            REQUIRE_REG_31(ra);
2099            vb = load_fpr(ctx, rb);
2100            gen_helper_sqrtg(vc, cpu_env, vb);
2101            break;
2102        case 0x02B:
2103            /* SQRTT */
2104            REQUIRE_REG_31(ra);
2105            gen_sqrtt(ctx, rb, rc, fn11);
2106            break;
2107        default:
2108            goto invalid_opc;
2109        }
2110        break;
2111
2112    case 0x15:
2113        /* VAX floating point */
2114        /* XXX: rounding mode and trap are ignored (!) */
2115        vc = dest_fpr(ctx, rc);
2116        vb = load_fpr(ctx, rb);
2117        va = load_fpr(ctx, ra);
2118        switch (fpfn) { /* fn11 & 0x3F */
2119        case 0x00:
2120            /* ADDF */
2121            gen_helper_addf(vc, cpu_env, va, vb);
2122            break;
2123        case 0x01:
2124            /* SUBF */
2125            gen_helper_subf(vc, cpu_env, va, vb);
2126            break;
2127        case 0x02:
2128            /* MULF */
2129            gen_helper_mulf(vc, cpu_env, va, vb);
2130            break;
2131        case 0x03:
2132            /* DIVF */
2133            gen_helper_divf(vc, cpu_env, va, vb);
2134            break;
2135        case 0x1E:
2136            /* CVTDG -- TODO */
2137            REQUIRE_REG_31(ra);
2138            goto invalid_opc;
2139        case 0x20:
2140            /* ADDG */
2141            gen_helper_addg(vc, cpu_env, va, vb);
2142            break;
2143        case 0x21:
2144            /* SUBG */
2145            gen_helper_subg(vc, cpu_env, va, vb);
2146            break;
2147        case 0x22:
2148            /* MULG */
2149            gen_helper_mulg(vc, cpu_env, va, vb);
2150            break;
2151        case 0x23:
2152            /* DIVG */
2153            gen_helper_divg(vc, cpu_env, va, vb);
2154            break;
2155        case 0x25:
2156            /* CMPGEQ */
2157            gen_helper_cmpgeq(vc, cpu_env, va, vb);
2158            break;
2159        case 0x26:
2160            /* CMPGLT */
2161            gen_helper_cmpglt(vc, cpu_env, va, vb);
2162            break;
2163        case 0x27:
2164            /* CMPGLE */
2165            gen_helper_cmpgle(vc, cpu_env, va, vb);
2166            break;
2167        case 0x2C:
2168            /* CVTGF */
2169            REQUIRE_REG_31(ra);
2170            gen_helper_cvtgf(vc, cpu_env, vb);
2171            break;
2172        case 0x2D:
2173            /* CVTGD -- TODO */
2174            REQUIRE_REG_31(ra);
2175            goto invalid_opc;
2176        case 0x2F:
2177            /* CVTGQ */
2178            REQUIRE_REG_31(ra);
2179            gen_helper_cvtgq(vc, cpu_env, vb);
2180            break;
2181        case 0x3C:
2182            /* CVTQF */
2183            REQUIRE_REG_31(ra);
2184            gen_helper_cvtqf(vc, cpu_env, vb);
2185            break;
2186        case 0x3E:
2187            /* CVTQG */
2188            REQUIRE_REG_31(ra);
2189            gen_helper_cvtqg(vc, cpu_env, vb);
2190            break;
2191        default:
2192            goto invalid_opc;
2193        }
2194        break;
2195
2196    case 0x16:
2197        /* IEEE floating-point */
2198        switch (fpfn) { /* fn11 & 0x3F */
2199        case 0x00:
2200            /* ADDS */
2201            gen_adds(ctx, ra, rb, rc, fn11);
2202            break;
2203        case 0x01:
2204            /* SUBS */
2205            gen_subs(ctx, ra, rb, rc, fn11);
2206            break;
2207        case 0x02:
2208            /* MULS */
2209            gen_muls(ctx, ra, rb, rc, fn11);
2210            break;
2211        case 0x03:
2212            /* DIVS */
2213            gen_divs(ctx, ra, rb, rc, fn11);
2214            break;
2215        case 0x20:
2216            /* ADDT */
2217            gen_addt(ctx, ra, rb, rc, fn11);
2218            break;
2219        case 0x21:
2220            /* SUBT */
2221            gen_subt(ctx, ra, rb, rc, fn11);
2222            break;
2223        case 0x22:
2224            /* MULT */
2225            gen_mult(ctx, ra, rb, rc, fn11);
2226            break;
2227        case 0x23:
2228            /* DIVT */
2229            gen_divt(ctx, ra, rb, rc, fn11);
2230            break;
2231        case 0x24:
2232            /* CMPTUN */
2233            gen_cmptun(ctx, ra, rb, rc, fn11);
2234            break;
2235        case 0x25:
2236            /* CMPTEQ */
2237            gen_cmpteq(ctx, ra, rb, rc, fn11);
2238            break;
2239        case 0x26:
2240            /* CMPTLT */
2241            gen_cmptlt(ctx, ra, rb, rc, fn11);
2242            break;
2243        case 0x27:
2244            /* CMPTLE */
2245            gen_cmptle(ctx, ra, rb, rc, fn11);
2246            break;
2247        case 0x2C:
2248            REQUIRE_REG_31(ra);
2249            if (fn11 == 0x2AC || fn11 == 0x6AC) {
2250                /* CVTST */
2251                gen_cvtst(ctx, rb, rc, fn11);
2252            } else {
2253                /* CVTTS */
2254                gen_cvtts(ctx, rb, rc, fn11);
2255            }
2256            break;
2257        case 0x2F:
2258            /* CVTTQ */
2259            REQUIRE_REG_31(ra);
2260            gen_cvttq(ctx, rb, rc, fn11);
2261            break;
2262        case 0x3C:
2263            /* CVTQS */
2264            REQUIRE_REG_31(ra);
2265            gen_cvtqs(ctx, rb, rc, fn11);
2266            break;
2267        case 0x3E:
2268            /* CVTQT */
2269            REQUIRE_REG_31(ra);
2270            gen_cvtqt(ctx, rb, rc, fn11);
2271            break;
2272        default:
2273            goto invalid_opc;
2274        }
2275        break;
2276
2277    case 0x17:
2278        switch (fn11) {
2279        case 0x010:
2280            /* CVTLQ */
2281            REQUIRE_REG_31(ra);
2282            vc = dest_fpr(ctx, rc);
2283            vb = load_fpr(ctx, rb);
2284            gen_cvtlq(vc, vb);
2285            break;
2286        case 0x020:
2287            /* CPYS */
2288            if (rc == 31) {
2289                /* Special case CPYS as FNOP.  */
2290            } else {
2291                vc = dest_fpr(ctx, rc);
2292                va = load_fpr(ctx, ra);
2293                if (ra == rb) {
2294                    /* Special case CPYS as FMOV.  */
2295                    tcg_gen_mov_i64(vc, va);
2296                } else {
2297                    vb = load_fpr(ctx, rb);
2298                    gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2299                }
2300            }
2301            break;
2302        case 0x021:
2303            /* CPYSN */
2304            vc = dest_fpr(ctx, rc);
2305            vb = load_fpr(ctx, rb);
2306            va = load_fpr(ctx, ra);
2307            gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2308            break;
2309        case 0x022:
2310            /* CPYSE */
2311            vc = dest_fpr(ctx, rc);
2312            vb = load_fpr(ctx, rb);
2313            va = load_fpr(ctx, ra);
2314            gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2315            break;
2316        case 0x024:
2317            /* MT_FPCR */
2318            va = load_fpr(ctx, ra);
2319            gen_helper_store_fpcr(cpu_env, va);
2320            if (ctx->tb_rm == QUAL_RM_D) {
2321                /* Re-do the copy of the rounding mode to fp_status
2322                   the next time we use dynamic rounding.  */
2323                ctx->tb_rm = -1;
2324            }
2325            break;
2326        case 0x025:
2327            /* MF_FPCR */
2328            va = dest_fpr(ctx, ra);
2329            gen_helper_load_fpcr(va, cpu_env);
2330            break;
2331        case 0x02A:
2332            /* FCMOVEQ */
2333            gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2334            break;
2335        case 0x02B:
2336            /* FCMOVNE */
2337            gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2338            break;
2339        case 0x02C:
2340            /* FCMOVLT */
2341            gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2342            break;
2343        case 0x02D:
2344            /* FCMOVGE */
2345            gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2346            break;
2347        case 0x02E:
2348            /* FCMOVLE */
2349            gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2350            break;
2351        case 0x02F:
2352            /* FCMOVGT */
2353            gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2354            break;
2355        case 0x030: /* CVTQL */
2356        case 0x130: /* CVTQL/V */
2357        case 0x530: /* CVTQL/SV */
2358            REQUIRE_REG_31(ra);
2359            vc = dest_fpr(ctx, rc);
2360            vb = load_fpr(ctx, rb);
2361            gen_helper_cvtql(vc, cpu_env, vb);
2362            gen_fp_exc_raise(rc, fn11);
2363            break;
2364        default:
2365            goto invalid_opc;
2366        }
2367        break;
2368
2369    case 0x18:
2370        switch ((uint16_t)disp16) {
2371        case 0x0000:
2372            /* TRAPB */
2373            /* No-op.  */
2374            break;
2375        case 0x0400:
2376            /* EXCB */
2377            /* No-op.  */
2378            break;
2379        case 0x4000:
2380            /* MB */
2381            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2382            break;
2383        case 0x4400:
2384            /* WMB */
2385            tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2386            break;
2387        case 0x8000:
2388            /* FETCH */
2389            /* No-op */
2390            break;
2391        case 0xA000:
2392            /* FETCH_M */
2393            /* No-op */
2394            break;
2395        case 0xC000:
2396            /* RPCC */
2397            va = dest_gpr(ctx, ra);
2398            if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2399                gen_io_start();
2400                gen_helper_load_pcc(va, cpu_env);
2401                ret = DISAS_PC_STALE;
2402            } else {
2403                gen_helper_load_pcc(va, cpu_env);
2404            }
2405            break;
2406        case 0xE000:
2407            /* RC */
2408            gen_rx(ctx, ra, 0);
2409            break;
2410        case 0xE800:
2411            /* ECB */
2412            break;
2413        case 0xF000:
2414            /* RS */
2415            gen_rx(ctx, ra, 1);
2416            break;
2417        case 0xF800:
2418            /* WH64 */
2419            /* No-op */
2420            break;
2421        case 0xFC00:
2422            /* WH64EN */
2423            /* No-op */
2424            break;
2425        default:
2426            goto invalid_opc;
2427        }
2428        break;
2429
2430    case 0x19:
2431        /* HW_MFPR (PALcode) */
2432#ifndef CONFIG_USER_ONLY
2433        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2434        va = dest_gpr(ctx, ra);
2435        ret = gen_mfpr(ctx, va, insn & 0xffff);
2436        break;
2437#else
2438        goto invalid_opc;
2439#endif
2440
2441    case 0x1A:
2442        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2443           prediction stack action, which of course we don't implement.  */
2444        vb = load_gpr(ctx, rb);
2445        tcg_gen_andi_i64(cpu_pc, vb, ~3);
2446        if (ra != 31) {
2447            tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2448        }
2449        ret = DISAS_PC_UPDATED;
2450        break;
2451
2452    case 0x1B:
2453        /* HW_LD (PALcode) */
2454#ifndef CONFIG_USER_ONLY
2455        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2456        {
2457            TCGv addr = tcg_temp_new();
2458            vb = load_gpr(ctx, rb);
2459            va = dest_gpr(ctx, ra);
2460
2461            tcg_gen_addi_i64(addr, vb, disp12);
2462            switch ((insn >> 12) & 0xF) {
2463            case 0x0:
2464                /* Longword physical access (hw_ldl/p) */
2465                tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2466                break;
2467            case 0x1:
2468                /* Quadword physical access (hw_ldq/p) */
2469                tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2470                break;
2471            case 0x2:
2472                /* Longword physical access with lock (hw_ldl_l/p) */
2473                gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2474                break;
2475            case 0x3:
2476                /* Quadword physical access with lock (hw_ldq_l/p) */
2477                gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2478                break;
2479            case 0x4:
2480                /* Longword virtual PTE fetch (hw_ldl/v) */
2481                goto invalid_opc;
2482            case 0x5:
2483                /* Quadword virtual PTE fetch (hw_ldq/v) */
2484                goto invalid_opc;
2485                break;
2486            case 0x6:
2487                /* Invalid */
2488                goto invalid_opc;
2489            case 0x7:
2490                /* Invaliid */
2491                goto invalid_opc;
2492            case 0x8:
2493                /* Longword virtual access (hw_ldl) */
2494                goto invalid_opc;
2495            case 0x9:
2496                /* Quadword virtual access (hw_ldq) */
2497                goto invalid_opc;
2498            case 0xA:
2499                /* Longword virtual access with protection check (hw_ldl/w) */
2500                tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2501                break;
2502            case 0xB:
2503                /* Quadword virtual access with protection check (hw_ldq/w) */
2504                tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2505                break;
2506            case 0xC:
2507                /* Longword virtual access with alt access mode (hw_ldl/a)*/
2508                goto invalid_opc;
2509            case 0xD:
2510                /* Quadword virtual access with alt access mode (hw_ldq/a) */
2511                goto invalid_opc;
2512            case 0xE:
2513                /* Longword virtual access with alternate access mode and
2514                   protection checks (hw_ldl/wa) */
2515                tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2516                break;
2517            case 0xF:
2518                /* Quadword virtual access with alternate access mode and
2519                   protection checks (hw_ldq/wa) */
2520                tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2521                break;
2522            }
2523            tcg_temp_free(addr);
2524            break;
2525        }
2526#else
2527        goto invalid_opc;
2528#endif
2529
2530    case 0x1C:
2531        vc = dest_gpr(ctx, rc);
2532        if (fn7 == 0x70) {
2533            /* FTOIT */
2534            REQUIRE_AMASK(FIX);
2535            REQUIRE_REG_31(rb);
2536            va = load_fpr(ctx, ra);
2537            tcg_gen_mov_i64(vc, va);
2538            break;
2539        } else if (fn7 == 0x78) {
2540            /* FTOIS */
2541            REQUIRE_AMASK(FIX);
2542            REQUIRE_REG_31(rb);
2543            t32 = tcg_temp_new_i32();
2544            va = load_fpr(ctx, ra);
2545            gen_helper_s_to_memory(t32, va);
2546            tcg_gen_ext_i32_i64(vc, t32);
2547            tcg_temp_free_i32(t32);
2548            break;
2549        }
2550
2551        vb = load_gpr_lit(ctx, rb, lit, islit);
2552        switch (fn7) {
2553        case 0x00:
2554            /* SEXTB */
2555            REQUIRE_AMASK(BWX);
2556            REQUIRE_REG_31(ra);
2557            tcg_gen_ext8s_i64(vc, vb);
2558            break;
2559        case 0x01:
2560            /* SEXTW */
2561            REQUIRE_AMASK(BWX);
2562            REQUIRE_REG_31(ra);
2563            tcg_gen_ext16s_i64(vc, vb);
2564            break;
2565        case 0x30:
2566            /* CTPOP */
2567            REQUIRE_AMASK(CIX);
2568            REQUIRE_REG_31(ra);
2569            REQUIRE_NO_LIT;
2570            tcg_gen_ctpop_i64(vc, vb);
2571            break;
2572        case 0x31:
2573            /* PERR */
2574            REQUIRE_AMASK(MVI);
2575            REQUIRE_NO_LIT;
2576            va = load_gpr(ctx, ra);
2577            gen_helper_perr(vc, va, vb);
2578            break;
2579        case 0x32:
2580            /* CTLZ */
2581            REQUIRE_AMASK(CIX);
2582            REQUIRE_REG_31(ra);
2583            REQUIRE_NO_LIT;
2584            tcg_gen_clzi_i64(vc, vb, 64);
2585            break;
2586        case 0x33:
2587            /* CTTZ */
2588            REQUIRE_AMASK(CIX);
2589            REQUIRE_REG_31(ra);
2590            REQUIRE_NO_LIT;
2591            tcg_gen_ctzi_i64(vc, vb, 64);
2592            break;
2593        case 0x34:
2594            /* UNPKBW */
2595            REQUIRE_AMASK(MVI);
2596            REQUIRE_REG_31(ra);
2597            REQUIRE_NO_LIT;
2598            gen_helper_unpkbw(vc, vb);
2599            break;
2600        case 0x35:
2601            /* UNPKBL */
2602            REQUIRE_AMASK(MVI);
2603            REQUIRE_REG_31(ra);
2604            REQUIRE_NO_LIT;
2605            gen_helper_unpkbl(vc, vb);
2606            break;
2607        case 0x36:
2608            /* PKWB */
2609            REQUIRE_AMASK(MVI);
2610            REQUIRE_REG_31(ra);
2611            REQUIRE_NO_LIT;
2612            gen_helper_pkwb(vc, vb);
2613            break;
2614        case 0x37:
2615            /* PKLB */
2616            REQUIRE_AMASK(MVI);
2617            REQUIRE_REG_31(ra);
2618            REQUIRE_NO_LIT;
2619            gen_helper_pklb(vc, vb);
2620            break;
2621        case 0x38:
2622            /* MINSB8 */
2623            REQUIRE_AMASK(MVI);
2624            va = load_gpr(ctx, ra);
2625            gen_helper_minsb8(vc, va, vb);
2626            break;
2627        case 0x39:
2628            /* MINSW4 */
2629            REQUIRE_AMASK(MVI);
2630            va = load_gpr(ctx, ra);
2631            gen_helper_minsw4(vc, va, vb);
2632            break;
2633        case 0x3A:
2634            /* MINUB8 */
2635            REQUIRE_AMASK(MVI);
2636            va = load_gpr(ctx, ra);
2637            gen_helper_minub8(vc, va, vb);
2638            break;
2639        case 0x3B:
2640            /* MINUW4 */
2641            REQUIRE_AMASK(MVI);
2642            va = load_gpr(ctx, ra);
2643            gen_helper_minuw4(vc, va, vb);
2644            break;
2645        case 0x3C:
2646            /* MAXUB8 */
2647            REQUIRE_AMASK(MVI);
2648            va = load_gpr(ctx, ra);
2649            gen_helper_maxub8(vc, va, vb);
2650            break;
2651        case 0x3D:
2652            /* MAXUW4 */
2653            REQUIRE_AMASK(MVI);
2654            va = load_gpr(ctx, ra);
2655            gen_helper_maxuw4(vc, va, vb);
2656            break;
2657        case 0x3E:
2658            /* MAXSB8 */
2659            REQUIRE_AMASK(MVI);
2660            va = load_gpr(ctx, ra);
2661            gen_helper_maxsb8(vc, va, vb);
2662            break;
2663        case 0x3F:
2664            /* MAXSW4 */
2665            REQUIRE_AMASK(MVI);
2666            va = load_gpr(ctx, ra);
2667            gen_helper_maxsw4(vc, va, vb);
2668            break;
2669        default:
2670            goto invalid_opc;
2671        }
2672        break;
2673
2674    case 0x1D:
2675        /* HW_MTPR (PALcode) */
2676#ifndef CONFIG_USER_ONLY
2677        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2678        vb = load_gpr(ctx, rb);
2679        ret = gen_mtpr(ctx, vb, insn & 0xffff);
2680        break;
2681#else
2682        goto invalid_opc;
2683#endif
2684
2685    case 0x1E:
2686        /* HW_RET (PALcode) */
2687#ifndef CONFIG_USER_ONLY
2688        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2689        if (rb == 31) {
2690            /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2691               address from EXC_ADDR.  This turns out to be useful for our
2692               emulation PALcode, so continue to accept it.  */
2693            ctx->lit = vb = tcg_temp_new();
2694            tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2695        } else {
2696            vb = load_gpr(ctx, rb);
2697        }
2698        tcg_gen_movi_i64(cpu_lock_addr, -1);
2699        tmp = tcg_temp_new();
2700        tcg_gen_movi_i64(tmp, 0);
2701        st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2702        tcg_gen_andi_i64(tmp, vb, 1);
2703        st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2704        tcg_temp_free(tmp);
2705        tcg_gen_andi_i64(cpu_pc, vb, ~3);
2706        /* Allow interrupts to be recognized right away.  */
2707        ret = DISAS_PC_UPDATED_NOCHAIN;
2708        break;
2709#else
2710        goto invalid_opc;
2711#endif
2712
2713    case 0x1F:
2714        /* HW_ST (PALcode) */
2715#ifndef CONFIG_USER_ONLY
2716        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2717        {
2718            switch ((insn >> 12) & 0xF) {
2719            case 0x0:
2720                /* Longword physical access */
2721                va = load_gpr(ctx, ra);
2722                vb = load_gpr(ctx, rb);
2723                tmp = tcg_temp_new();
2724                tcg_gen_addi_i64(tmp, vb, disp12);
2725                tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2726                tcg_temp_free(tmp);
2727                break;
2728            case 0x1:
2729                /* Quadword physical access */
2730                va = load_gpr(ctx, ra);
2731                vb = load_gpr(ctx, rb);
2732                tmp = tcg_temp_new();
2733                tcg_gen_addi_i64(tmp, vb, disp12);
2734                tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2735                tcg_temp_free(tmp);
2736                break;
2737            case 0x2:
2738                /* Longword physical access with lock */
2739                ret = gen_store_conditional(ctx, ra, rb, disp12,
2740                                            MMU_PHYS_IDX, MO_LESL);
2741                break;
2742            case 0x3:
2743                /* Quadword physical access with lock */
2744                ret = gen_store_conditional(ctx, ra, rb, disp12,
2745                                            MMU_PHYS_IDX, MO_LEQ);
2746                break;
2747            case 0x4:
2748                /* Longword virtual access */
2749                goto invalid_opc;
2750            case 0x5:
2751                /* Quadword virtual access */
2752                goto invalid_opc;
2753            case 0x6:
2754                /* Invalid */
2755                goto invalid_opc;
2756            case 0x7:
2757                /* Invalid */
2758                goto invalid_opc;
2759            case 0x8:
2760                /* Invalid */
2761                goto invalid_opc;
2762            case 0x9:
2763                /* Invalid */
2764                goto invalid_opc;
2765            case 0xA:
2766                /* Invalid */
2767                goto invalid_opc;
2768            case 0xB:
2769                /* Invalid */
2770                goto invalid_opc;
2771            case 0xC:
2772                /* Longword virtual access with alternate access mode */
2773                goto invalid_opc;
2774            case 0xD:
2775                /* Quadword virtual access with alternate access mode */
2776                goto invalid_opc;
2777            case 0xE:
2778                /* Invalid */
2779                goto invalid_opc;
2780            case 0xF:
2781                /* Invalid */
2782                goto invalid_opc;
2783            }
2784            break;
2785        }
2786#else
2787        goto invalid_opc;
2788#endif
2789    case 0x20:
2790        /* LDF */
2791        gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2792        break;
2793    case 0x21:
2794        /* LDG */
2795        gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2796        break;
2797    case 0x22:
2798        /* LDS */
2799        gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2800        break;
2801    case 0x23:
2802        /* LDT */
2803        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2804        break;
2805    case 0x24:
2806        /* STF */
2807        gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2808        break;
2809    case 0x25:
2810        /* STG */
2811        gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2812        break;
2813    case 0x26:
2814        /* STS */
2815        gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2816        break;
2817    case 0x27:
2818        /* STT */
2819        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2820        break;
2821    case 0x28:
2822        /* LDL */
2823        gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2824        break;
2825    case 0x29:
2826        /* LDQ */
2827        gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2828        break;
2829    case 0x2A:
2830        /* LDL_L */
2831        gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2832        break;
2833    case 0x2B:
2834        /* LDQ_L */
2835        gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2836        break;
2837    case 0x2C:
2838        /* STL */
2839        gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2840        break;
2841    case 0x2D:
2842        /* STQ */
2843        gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2844        break;
2845    case 0x2E:
2846        /* STL_C */
2847        ret = gen_store_conditional(ctx, ra, rb, disp16,
2848                                    ctx->mem_idx, MO_LESL);
2849        break;
2850    case 0x2F:
2851        /* STQ_C */
2852        ret = gen_store_conditional(ctx, ra, rb, disp16,
2853                                    ctx->mem_idx, MO_LEQ);
2854        break;
2855    case 0x30:
2856        /* BR */
2857        ret = gen_bdirect(ctx, ra, disp21);
2858        break;
2859    case 0x31: /* FBEQ */
2860        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2861        break;
2862    case 0x32: /* FBLT */
2863        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2864        break;
2865    case 0x33: /* FBLE */
2866        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2867        break;
2868    case 0x34:
2869        /* BSR */
2870        ret = gen_bdirect(ctx, ra, disp21);
2871        break;
2872    case 0x35: /* FBNE */
2873        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2874        break;
2875    case 0x36: /* FBGE */
2876        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2877        break;
2878    case 0x37: /* FBGT */
2879        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2880        break;
2881    case 0x38:
2882        /* BLBC */
2883        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2884        break;
2885    case 0x39:
2886        /* BEQ */
2887        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2888        break;
2889    case 0x3A:
2890        /* BLT */
2891        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2892        break;
2893    case 0x3B:
2894        /* BLE */
2895        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2896        break;
2897    case 0x3C:
2898        /* BLBS */
2899        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2900        break;
2901    case 0x3D:
2902        /* BNE */
2903        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2904        break;
2905    case 0x3E:
2906        /* BGE */
2907        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2908        break;
2909    case 0x3F:
2910        /* BGT */
2911        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2912        break;
2913    invalid_opc:
2914        ret = gen_invalid(ctx);
2915        break;
2916    }
2917
2918    return ret;
2919}
2920
2921static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2922{
2923    DisasContext *ctx = container_of(dcbase, DisasContext, base);
2924    CPUAlphaState *env = cpu->env_ptr;
2925    int64_t bound, mask;
2926
2927    ctx->tbflags = ctx->base.tb->flags;
2928    ctx->mem_idx = cpu_mmu_index(env, false);
2929    ctx->implver = env->implver;
2930    ctx->amask = env->amask;
2931
2932#ifdef CONFIG_USER_ONLY
2933    ctx->ir = cpu_std_ir;
2934#else
2935    ctx->palbr = env->palbr;
2936    ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2937#endif
2938
2939    /* ??? Every TB begins with unset rounding mode, to be initialized on
2940       the first fp insn of the TB.  Alternately we could define a proper
2941       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2942       to reset the FP_STATUS to that default at the end of any TB that
2943       changes the default.  We could even (gasp) dynamiclly figure out
2944       what default would be most efficient given the running program.  */
2945    ctx->tb_rm = -1;
2946    /* Similarly for flush-to-zero.  */
2947    ctx->tb_ftz = -1;
2948
2949    ctx->zero = NULL;
2950    ctx->sink = NULL;
2951    ctx->lit = NULL;
2952
2953    /* Bound the number of insns to execute to those left on the page.  */
2954    if (in_superpage(ctx, ctx->base.pc_first)) {
2955        mask = -1ULL << 41;
2956    } else {
2957        mask = TARGET_PAGE_MASK;
2958    }
2959    bound = -(ctx->base.pc_first | mask) / 4;
2960    ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2961}
2962
2963static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2964{
2965}
2966
2967static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2968{
2969    tcg_gen_insn_start(dcbase->pc_next);
2970}
2971
2972static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
2973                                      const CPUBreakpoint *bp)
2974{
2975    DisasContext *ctx = container_of(dcbase, DisasContext, base);
2976
2977    ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
2978
2979    /* The address covered by the breakpoint must be included in
2980       [tb->pc, tb->pc + tb->size) in order to for it to be
2981       properly cleared -- thus we increment the PC here so that
2982       the logic setting tb->size below does the right thing.  */
2983    ctx->base.pc_next += 4;
2984    return true;
2985}
2986
2987static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2988{
2989    DisasContext *ctx = container_of(dcbase, DisasContext, base);
2990    CPUAlphaState *env = cpu->env_ptr;
2991    uint32_t insn = translator_ldl(env, ctx->base.pc_next);
2992
2993    ctx->base.pc_next += 4;
2994    ctx->base.is_jmp = translate_one(ctx, insn);
2995
2996    free_context_temps(ctx);
2997    translator_loop_temp_check(&ctx->base);
2998}
2999
3000static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3001{
3002    DisasContext *ctx = container_of(dcbase, DisasContext, base);
3003
3004    switch (ctx->base.is_jmp) {
3005    case DISAS_NORETURN:
3006        break;
3007    case DISAS_TOO_MANY:
3008        if (use_goto_tb(ctx, ctx->base.pc_next)) {
3009            tcg_gen_goto_tb(0);
3010            tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3011            tcg_gen_exit_tb(ctx->base.tb, 0);
3012        }
3013        /* FALLTHRU */
3014    case DISAS_PC_STALE:
3015        tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3016        /* FALLTHRU */
3017    case DISAS_PC_UPDATED:
3018        if (!use_exit_tb(ctx)) {
3019            tcg_gen_lookup_and_goto_ptr();
3020            break;
3021        }
3022        /* FALLTHRU */
3023    case DISAS_PC_UPDATED_NOCHAIN:
3024        if (ctx->base.singlestep_enabled) {
3025            gen_excp_1(EXCP_DEBUG, 0);
3026        } else {
3027            tcg_gen_exit_tb(NULL, 0);
3028        }
3029        break;
3030    default:
3031        g_assert_not_reached();
3032    }
3033}
3034
3035static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3036{
3037    qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3038    log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3039}
3040
3041static const TranslatorOps alpha_tr_ops = {
3042    .init_disas_context = alpha_tr_init_disas_context,
3043    .tb_start           = alpha_tr_tb_start,
3044    .insn_start         = alpha_tr_insn_start,
3045    .breakpoint_check   = alpha_tr_breakpoint_check,
3046    .translate_insn     = alpha_tr_translate_insn,
3047    .tb_stop            = alpha_tr_tb_stop,
3048    .disas_log          = alpha_tr_disas_log,
3049};
3050
3051void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
3052{
3053    DisasContext dc;
3054    translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
3055}
3056
3057void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3058                          target_ulong *data)
3059{
3060    env->pc = data[0];
3061}
3062