qemu/target/hppa/translate.c
<<
>>
Prefs
   1/*
   2 * HPPA emulation cpu translation for qemu.
   3 *
   4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "disas/disas.h"
  23#include "qemu/host-utils.h"
  24#include "exec/exec-all.h"
  25#include "tcg/tcg-op.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/helper-proto.h"
  28#include "exec/helper-gen.h"
  29#include "exec/translator.h"
  30#include "exec/log.h"
  31
  32/* Since we have a distinction between register size and address size,
  33   we need to redefine all of these.  */
  34
  35#undef TCGv
  36#undef tcg_temp_new
  37#undef tcg_global_mem_new
  38#undef tcg_temp_local_new
  39#undef tcg_temp_free
  40
  41#if TARGET_LONG_BITS == 64
  42#define TCGv_tl              TCGv_i64
  43#define tcg_temp_new_tl      tcg_temp_new_i64
  44#define tcg_temp_free_tl     tcg_temp_free_i64
  45#if TARGET_REGISTER_BITS == 64
  46#define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
  47#else
  48#define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
  49#endif
  50#else
  51#define TCGv_tl              TCGv_i32
  52#define tcg_temp_new_tl      tcg_temp_new_i32
  53#define tcg_temp_free_tl     tcg_temp_free_i32
  54#define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
  55#endif
  56
  57#if TARGET_REGISTER_BITS == 64
  58#define TCGv_reg             TCGv_i64
  59
  60#define tcg_temp_new         tcg_temp_new_i64
  61#define tcg_global_mem_new   tcg_global_mem_new_i64
  62#define tcg_temp_local_new   tcg_temp_local_new_i64
  63#define tcg_temp_free        tcg_temp_free_i64
  64
  65#define tcg_gen_movi_reg     tcg_gen_movi_i64
  66#define tcg_gen_mov_reg      tcg_gen_mov_i64
  67#define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
  68#define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
  69#define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
  70#define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
  71#define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
  72#define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
  73#define tcg_gen_ld_reg       tcg_gen_ld_i64
  74#define tcg_gen_st8_reg      tcg_gen_st8_i64
  75#define tcg_gen_st16_reg     tcg_gen_st16_i64
  76#define tcg_gen_st32_reg     tcg_gen_st32_i64
  77#define tcg_gen_st_reg       tcg_gen_st_i64
  78#define tcg_gen_add_reg      tcg_gen_add_i64
  79#define tcg_gen_addi_reg     tcg_gen_addi_i64
  80#define tcg_gen_sub_reg      tcg_gen_sub_i64
  81#define tcg_gen_neg_reg      tcg_gen_neg_i64
  82#define tcg_gen_subfi_reg    tcg_gen_subfi_i64
  83#define tcg_gen_subi_reg     tcg_gen_subi_i64
  84#define tcg_gen_and_reg      tcg_gen_and_i64
  85#define tcg_gen_andi_reg     tcg_gen_andi_i64
  86#define tcg_gen_or_reg       tcg_gen_or_i64
  87#define tcg_gen_ori_reg      tcg_gen_ori_i64
  88#define tcg_gen_xor_reg      tcg_gen_xor_i64
  89#define tcg_gen_xori_reg     tcg_gen_xori_i64
  90#define tcg_gen_not_reg      tcg_gen_not_i64
  91#define tcg_gen_shl_reg      tcg_gen_shl_i64
  92#define tcg_gen_shli_reg     tcg_gen_shli_i64
  93#define tcg_gen_shr_reg      tcg_gen_shr_i64
  94#define tcg_gen_shri_reg     tcg_gen_shri_i64
  95#define tcg_gen_sar_reg      tcg_gen_sar_i64
  96#define tcg_gen_sari_reg     tcg_gen_sari_i64
  97#define tcg_gen_brcond_reg   tcg_gen_brcond_i64
  98#define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
  99#define tcg_gen_setcond_reg  tcg_gen_setcond_i64
 100#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
 101#define tcg_gen_mul_reg      tcg_gen_mul_i64
 102#define tcg_gen_muli_reg     tcg_gen_muli_i64
 103#define tcg_gen_div_reg      tcg_gen_div_i64
 104#define tcg_gen_rem_reg      tcg_gen_rem_i64
 105#define tcg_gen_divu_reg     tcg_gen_divu_i64
 106#define tcg_gen_remu_reg     tcg_gen_remu_i64
 107#define tcg_gen_discard_reg  tcg_gen_discard_i64
 108#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
 109#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
 110#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
 111#define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
 112#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
 113#define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
 114#define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
 115#define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
 116#define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
 117#define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
 118#define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
 119#define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
 120#define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
 121#define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
 122#define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
 123#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
 124#define tcg_gen_andc_reg     tcg_gen_andc_i64
 125#define tcg_gen_eqv_reg      tcg_gen_eqv_i64
 126#define tcg_gen_nand_reg     tcg_gen_nand_i64
 127#define tcg_gen_nor_reg      tcg_gen_nor_i64
 128#define tcg_gen_orc_reg      tcg_gen_orc_i64
 129#define tcg_gen_clz_reg      tcg_gen_clz_i64
 130#define tcg_gen_ctz_reg      tcg_gen_ctz_i64
 131#define tcg_gen_clzi_reg     tcg_gen_clzi_i64
 132#define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
 133#define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
 134#define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
 135#define tcg_gen_rotl_reg     tcg_gen_rotl_i64
 136#define tcg_gen_rotli_reg    tcg_gen_rotli_i64
 137#define tcg_gen_rotr_reg     tcg_gen_rotr_i64
 138#define tcg_gen_rotri_reg    tcg_gen_rotri_i64
 139#define tcg_gen_deposit_reg  tcg_gen_deposit_i64
 140#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
 141#define tcg_gen_extract_reg  tcg_gen_extract_i64
 142#define tcg_gen_sextract_reg tcg_gen_sextract_i64
 143#define tcg_gen_extract2_reg tcg_gen_extract2_i64
 144#define tcg_const_reg        tcg_const_i64
 145#define tcg_const_local_reg  tcg_const_local_i64
 146#define tcg_constant_reg     tcg_constant_i64
 147#define tcg_gen_movcond_reg  tcg_gen_movcond_i64
 148#define tcg_gen_add2_reg     tcg_gen_add2_i64
 149#define tcg_gen_sub2_reg     tcg_gen_sub2_i64
 150#define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
 151#define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
 152#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
 153#define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
 154#else
 155#define TCGv_reg             TCGv_i32
 156#define tcg_temp_new         tcg_temp_new_i32
 157#define tcg_global_mem_new   tcg_global_mem_new_i32
 158#define tcg_temp_local_new   tcg_temp_local_new_i32
 159#define tcg_temp_free        tcg_temp_free_i32
 160
 161#define tcg_gen_movi_reg     tcg_gen_movi_i32
 162#define tcg_gen_mov_reg      tcg_gen_mov_i32
 163#define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
 164#define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
 165#define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
 166#define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
 167#define tcg_gen_ld32u_reg    tcg_gen_ld_i32
 168#define tcg_gen_ld32s_reg    tcg_gen_ld_i32
 169#define tcg_gen_ld_reg       tcg_gen_ld_i32
 170#define tcg_gen_st8_reg      tcg_gen_st8_i32
 171#define tcg_gen_st16_reg     tcg_gen_st16_i32
 172#define tcg_gen_st32_reg     tcg_gen_st32_i32
 173#define tcg_gen_st_reg       tcg_gen_st_i32
 174#define tcg_gen_add_reg      tcg_gen_add_i32
 175#define tcg_gen_addi_reg     tcg_gen_addi_i32
 176#define tcg_gen_sub_reg      tcg_gen_sub_i32
 177#define tcg_gen_neg_reg      tcg_gen_neg_i32
 178#define tcg_gen_subfi_reg    tcg_gen_subfi_i32
 179#define tcg_gen_subi_reg     tcg_gen_subi_i32
 180#define tcg_gen_and_reg      tcg_gen_and_i32
 181#define tcg_gen_andi_reg     tcg_gen_andi_i32
 182#define tcg_gen_or_reg       tcg_gen_or_i32
 183#define tcg_gen_ori_reg      tcg_gen_ori_i32
 184#define tcg_gen_xor_reg      tcg_gen_xor_i32
 185#define tcg_gen_xori_reg     tcg_gen_xori_i32
 186#define tcg_gen_not_reg      tcg_gen_not_i32
 187#define tcg_gen_shl_reg      tcg_gen_shl_i32
 188#define tcg_gen_shli_reg     tcg_gen_shli_i32
 189#define tcg_gen_shr_reg      tcg_gen_shr_i32
 190#define tcg_gen_shri_reg     tcg_gen_shri_i32
 191#define tcg_gen_sar_reg      tcg_gen_sar_i32
 192#define tcg_gen_sari_reg     tcg_gen_sari_i32
 193#define tcg_gen_brcond_reg   tcg_gen_brcond_i32
 194#define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
 195#define tcg_gen_setcond_reg  tcg_gen_setcond_i32
 196#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
 197#define tcg_gen_mul_reg      tcg_gen_mul_i32
 198#define tcg_gen_muli_reg     tcg_gen_muli_i32
 199#define tcg_gen_div_reg      tcg_gen_div_i32
 200#define tcg_gen_rem_reg      tcg_gen_rem_i32
 201#define tcg_gen_divu_reg     tcg_gen_divu_i32
 202#define tcg_gen_remu_reg     tcg_gen_remu_i32
 203#define tcg_gen_discard_reg  tcg_gen_discard_i32
 204#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
 205#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
 206#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
 207#define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
 208#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
 209#define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
 210#define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
 211#define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
 212#define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
 213#define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
 214#define tcg_gen_ext32u_reg   tcg_gen_mov_i32
 215#define tcg_gen_ext32s_reg   tcg_gen_mov_i32
 216#define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
 217#define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
 218#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
 219#define tcg_gen_andc_reg     tcg_gen_andc_i32
 220#define tcg_gen_eqv_reg      tcg_gen_eqv_i32
 221#define tcg_gen_nand_reg     tcg_gen_nand_i32
 222#define tcg_gen_nor_reg      tcg_gen_nor_i32
 223#define tcg_gen_orc_reg      tcg_gen_orc_i32
 224#define tcg_gen_clz_reg      tcg_gen_clz_i32
 225#define tcg_gen_ctz_reg      tcg_gen_ctz_i32
 226#define tcg_gen_clzi_reg     tcg_gen_clzi_i32
 227#define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
 228#define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
 229#define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
 230#define tcg_gen_rotl_reg     tcg_gen_rotl_i32
 231#define tcg_gen_rotli_reg    tcg_gen_rotli_i32
 232#define tcg_gen_rotr_reg     tcg_gen_rotr_i32
 233#define tcg_gen_rotri_reg    tcg_gen_rotri_i32
 234#define tcg_gen_deposit_reg  tcg_gen_deposit_i32
 235#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
 236#define tcg_gen_extract_reg  tcg_gen_extract_i32
 237#define tcg_gen_sextract_reg tcg_gen_sextract_i32
 238#define tcg_gen_extract2_reg tcg_gen_extract2_i32
 239#define tcg_const_reg        tcg_const_i32
 240#define tcg_const_local_reg  tcg_const_local_i32
 241#define tcg_constant_reg     tcg_constant_i32
 242#define tcg_gen_movcond_reg  tcg_gen_movcond_i32
 243#define tcg_gen_add2_reg     tcg_gen_add2_i32
 244#define tcg_gen_sub2_reg     tcg_gen_sub2_i32
 245#define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
 246#define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
 247#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
 248#define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
 249#endif /* TARGET_REGISTER_BITS */
 250
 251typedef struct DisasCond {
 252    TCGCond c;
 253    TCGv_reg a0, a1;
 254} DisasCond;
 255
 256typedef struct DisasContext {
 257    DisasContextBase base;
 258    CPUState *cs;
 259
 260    target_ureg iaoq_f;
 261    target_ureg iaoq_b;
 262    target_ureg iaoq_n;
 263    TCGv_reg iaoq_n_var;
 264
 265    int ntempr, ntempl;
 266    TCGv_reg tempr[8];
 267    TCGv_tl  templ[4];
 268
 269    DisasCond null_cond;
 270    TCGLabel *null_lab;
 271
 272    uint32_t insn;
 273    uint32_t tb_flags;
 274    int mmu_idx;
 275    int privilege;
 276    bool psw_n_nonzero;
 277
 278#ifdef CONFIG_USER_ONLY
 279    MemOp unalign;
 280#endif
 281} DisasContext;
 282
 283#ifdef CONFIG_USER_ONLY
 284#define UNALIGN(C)  (C)->unalign
 285#else
 286#define UNALIGN(C)  0
 287#endif
 288
 289/* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
 290static int expand_sm_imm(DisasContext *ctx, int val)
 291{
 292    if (val & PSW_SM_E) {
 293        val = (val & ~PSW_SM_E) | PSW_E;
 294    }
 295    if (val & PSW_SM_W) {
 296        val = (val & ~PSW_SM_W) | PSW_W;
 297    }
 298    return val;
 299}
 300
 301/* Inverted space register indicates 0 means sr0 not inferred from base.  */
 302static int expand_sr3x(DisasContext *ctx, int val)
 303{
 304    return ~val;
 305}
 306
 307/* Convert the M:A bits within a memory insn to the tri-state value
 308   we use for the final M.  */
 309static int ma_to_m(DisasContext *ctx, int val)
 310{
 311    return val & 2 ? (val & 1 ? -1 : 1) : 0;
 312}
 313
 314/* Convert the sign of the displacement to a pre or post-modify.  */
 315static int pos_to_m(DisasContext *ctx, int val)
 316{
 317    return val ? 1 : -1;
 318}
 319
 320static int neg_to_m(DisasContext *ctx, int val)
 321{
 322    return val ? -1 : 1;
 323}
 324
 325/* Used for branch targets and fp memory ops.  */
 326static int expand_shl2(DisasContext *ctx, int val)
 327{
 328    return val << 2;
 329}
 330
 331/* Used for fp memory ops.  */
 332static int expand_shl3(DisasContext *ctx, int val)
 333{
 334    return val << 3;
 335}
 336
 337/* Used for assemble_21.  */
 338static int expand_shl11(DisasContext *ctx, int val)
 339{
 340    return val << 11;
 341}
 342
 343
 344/* Include the auto-generated decoder.  */
 345#include "decode-insns.c.inc"
 346
 347/* We are not using a goto_tb (for whatever reason), but have updated
 348   the iaq (for whatever reason), so don't do it again on exit.  */
 349#define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
 350
 351/* We are exiting the TB, but have neither emitted a goto_tb, nor
 352   updated the iaq for the next instruction to be executed.  */
 353#define DISAS_IAQ_N_STALE    DISAS_TARGET_1
 354
 355/* Similarly, but we want to return to the main loop immediately
 356   to recognize unmasked interrupts.  */
 357#define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
 358#define DISAS_EXIT                  DISAS_TARGET_3
 359
 360/* global register indexes */
 361static TCGv_reg cpu_gr[32];
 362static TCGv_i64 cpu_sr[4];
 363static TCGv_i64 cpu_srH;
 364static TCGv_reg cpu_iaoq_f;
 365static TCGv_reg cpu_iaoq_b;
 366static TCGv_i64 cpu_iasq_f;
 367static TCGv_i64 cpu_iasq_b;
 368static TCGv_reg cpu_sar;
 369static TCGv_reg cpu_psw_n;
 370static TCGv_reg cpu_psw_v;
 371static TCGv_reg cpu_psw_cb;
 372static TCGv_reg cpu_psw_cb_msb;
 373
 374#include "exec/gen-icount.h"
 375
 376void hppa_translate_init(void)
 377{
 378#define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
 379
 380    typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
 381    static const GlobalVar vars[] = {
 382        { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
 383        DEF_VAR(psw_n),
 384        DEF_VAR(psw_v),
 385        DEF_VAR(psw_cb),
 386        DEF_VAR(psw_cb_msb),
 387        DEF_VAR(iaoq_f),
 388        DEF_VAR(iaoq_b),
 389    };
 390
 391#undef DEF_VAR
 392
 393    /* Use the symbolic register names that match the disassembler.  */
 394    static const char gr_names[32][4] = {
 395        "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
 396        "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
 397        "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
 398        "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
 399    };
 400    /* SR[4-7] are not global registers so that we can index them.  */
 401    static const char sr_names[5][4] = {
 402        "sr0", "sr1", "sr2", "sr3", "srH"
 403    };
 404
 405    int i;
 406
 407    cpu_gr[0] = NULL;
 408    for (i = 1; i < 32; i++) {
 409        cpu_gr[i] = tcg_global_mem_new(cpu_env,
 410                                       offsetof(CPUHPPAState, gr[i]),
 411                                       gr_names[i]);
 412    }
 413    for (i = 0; i < 4; i++) {
 414        cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
 415                                           offsetof(CPUHPPAState, sr[i]),
 416                                           sr_names[i]);
 417    }
 418    cpu_srH = tcg_global_mem_new_i64(cpu_env,
 419                                     offsetof(CPUHPPAState, sr[4]),
 420                                     sr_names[4]);
 421
 422    for (i = 0; i < ARRAY_SIZE(vars); ++i) {
 423        const GlobalVar *v = &vars[i];
 424        *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
 425    }
 426
 427    cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
 428                                        offsetof(CPUHPPAState, iasq_f),
 429                                        "iasq_f");
 430    cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
 431                                        offsetof(CPUHPPAState, iasq_b),
 432                                        "iasq_b");
 433}
 434
 435static DisasCond cond_make_f(void)
 436{
 437    return (DisasCond){
 438        .c = TCG_COND_NEVER,
 439        .a0 = NULL,
 440        .a1 = NULL,
 441    };
 442}
 443
 444static DisasCond cond_make_t(void)
 445{
 446    return (DisasCond){
 447        .c = TCG_COND_ALWAYS,
 448        .a0 = NULL,
 449        .a1 = NULL,
 450    };
 451}
 452
 453static DisasCond cond_make_n(void)
 454{
 455    return (DisasCond){
 456        .c = TCG_COND_NE,
 457        .a0 = cpu_psw_n,
 458        .a1 = tcg_constant_reg(0)
 459    };
 460}
 461
 462static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
 463{
 464    assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
 465    return (DisasCond){
 466        .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
 467    };
 468}
 469
 470static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
 471{
 472    TCGv_reg tmp = tcg_temp_new();
 473    tcg_gen_mov_reg(tmp, a0);
 474    return cond_make_0_tmp(c, tmp);
 475}
 476
 477static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
 478{
 479    DisasCond r = { .c = c };
 480
 481    assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
 482    r.a0 = tcg_temp_new();
 483    tcg_gen_mov_reg(r.a0, a0);
 484    r.a1 = tcg_temp_new();
 485    tcg_gen_mov_reg(r.a1, a1);
 486
 487    return r;
 488}
 489
 490static void cond_free(DisasCond *cond)
 491{
 492    switch (cond->c) {
 493    default:
 494        if (cond->a0 != cpu_psw_n) {
 495            tcg_temp_free(cond->a0);
 496        }
 497        tcg_temp_free(cond->a1);
 498        cond->a0 = NULL;
 499        cond->a1 = NULL;
 500        /* fallthru */
 501    case TCG_COND_ALWAYS:
 502        cond->c = TCG_COND_NEVER;
 503        break;
 504    case TCG_COND_NEVER:
 505        break;
 506    }
 507}
 508
 509static TCGv_reg get_temp(DisasContext *ctx)
 510{
 511    unsigned i = ctx->ntempr++;
 512    g_assert(i < ARRAY_SIZE(ctx->tempr));
 513    return ctx->tempr[i] = tcg_temp_new();
 514}
 515
 516#ifndef CONFIG_USER_ONLY
 517static TCGv_tl get_temp_tl(DisasContext *ctx)
 518{
 519    unsigned i = ctx->ntempl++;
 520    g_assert(i < ARRAY_SIZE(ctx->templ));
 521    return ctx->templ[i] = tcg_temp_new_tl();
 522}
 523#endif
 524
 525static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
 526{
 527    TCGv_reg t = get_temp(ctx);
 528    tcg_gen_movi_reg(t, v);
 529    return t;
 530}
 531
 532static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
 533{
 534    if (reg == 0) {
 535        TCGv_reg t = get_temp(ctx);
 536        tcg_gen_movi_reg(t, 0);
 537        return t;
 538    } else {
 539        return cpu_gr[reg];
 540    }
 541}
 542
 543static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
 544{
 545    if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
 546        return get_temp(ctx);
 547    } else {
 548        return cpu_gr[reg];
 549    }
 550}
 551
 552static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
 553{
 554    if (ctx->null_cond.c != TCG_COND_NEVER) {
 555        tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
 556                            ctx->null_cond.a1, dest, t);
 557    } else {
 558        tcg_gen_mov_reg(dest, t);
 559    }
 560}
 561
 562static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
 563{
 564    if (reg != 0) {
 565        save_or_nullify(ctx, cpu_gr[reg], t);
 566    }
 567}
 568
 569#ifdef HOST_WORDS_BIGENDIAN
 570# define HI_OFS  0
 571# define LO_OFS  4
 572#else
 573# define HI_OFS  4
 574# define LO_OFS  0
 575#endif
 576
 577static TCGv_i32 load_frw_i32(unsigned rt)
 578{
 579    TCGv_i32 ret = tcg_temp_new_i32();
 580    tcg_gen_ld_i32(ret, cpu_env,
 581                   offsetof(CPUHPPAState, fr[rt & 31])
 582                   + (rt & 32 ? LO_OFS : HI_OFS));
 583    return ret;
 584}
 585
 586static TCGv_i32 load_frw0_i32(unsigned rt)
 587{
 588    if (rt == 0) {
 589        return tcg_const_i32(0);
 590    } else {
 591        return load_frw_i32(rt);
 592    }
 593}
 594
 595static TCGv_i64 load_frw0_i64(unsigned rt)
 596{
 597    if (rt == 0) {
 598        return tcg_const_i64(0);
 599    } else {
 600        TCGv_i64 ret = tcg_temp_new_i64();
 601        tcg_gen_ld32u_i64(ret, cpu_env,
 602                          offsetof(CPUHPPAState, fr[rt & 31])
 603                          + (rt & 32 ? LO_OFS : HI_OFS));
 604        return ret;
 605    }
 606}
 607
 608static void save_frw_i32(unsigned rt, TCGv_i32 val)
 609{
 610    tcg_gen_st_i32(val, cpu_env,
 611                   offsetof(CPUHPPAState, fr[rt & 31])
 612                   + (rt & 32 ? LO_OFS : HI_OFS));
 613}
 614
 615#undef HI_OFS
 616#undef LO_OFS
 617
 618static TCGv_i64 load_frd(unsigned rt)
 619{
 620    TCGv_i64 ret = tcg_temp_new_i64();
 621    tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
 622    return ret;
 623}
 624
 625static TCGv_i64 load_frd0(unsigned rt)
 626{
 627    if (rt == 0) {
 628        return tcg_const_i64(0);
 629    } else {
 630        return load_frd(rt);
 631    }
 632}
 633
 634static void save_frd(unsigned rt, TCGv_i64 val)
 635{
 636    tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
 637}
 638
 639static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
 640{
 641#ifdef CONFIG_USER_ONLY
 642    tcg_gen_movi_i64(dest, 0);
 643#else
 644    if (reg < 4) {
 645        tcg_gen_mov_i64(dest, cpu_sr[reg]);
 646    } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
 647        tcg_gen_mov_i64(dest, cpu_srH);
 648    } else {
 649        tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
 650    }
 651#endif
 652}
 653
 654/* Skip over the implementation of an insn that has been nullified.
 655   Use this when the insn is too complex for a conditional move.  */
 656static void nullify_over(DisasContext *ctx)
 657{
 658    if (ctx->null_cond.c != TCG_COND_NEVER) {
 659        /* The always condition should have been handled in the main loop.  */
 660        assert(ctx->null_cond.c != TCG_COND_ALWAYS);
 661
 662        ctx->null_lab = gen_new_label();
 663
 664        /* If we're using PSW[N], copy it to a temp because... */
 665        if (ctx->null_cond.a0 == cpu_psw_n) {
 666            ctx->null_cond.a0 = tcg_temp_new();
 667            tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
 668        }
 669        /* ... we clear it before branching over the implementation,
 670           so that (1) it's clear after nullifying this insn and
 671           (2) if this insn nullifies the next, PSW[N] is valid.  */
 672        if (ctx->psw_n_nonzero) {
 673            ctx->psw_n_nonzero = false;
 674            tcg_gen_movi_reg(cpu_psw_n, 0);
 675        }
 676
 677        tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
 678                           ctx->null_cond.a1, ctx->null_lab);
 679        cond_free(&ctx->null_cond);
 680    }
 681}
 682
 683/* Save the current nullification state to PSW[N].  */
 684static void nullify_save(DisasContext *ctx)
 685{
 686    if (ctx->null_cond.c == TCG_COND_NEVER) {
 687        if (ctx->psw_n_nonzero) {
 688            tcg_gen_movi_reg(cpu_psw_n, 0);
 689        }
 690        return;
 691    }
 692    if (ctx->null_cond.a0 != cpu_psw_n) {
 693        tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
 694                            ctx->null_cond.a0, ctx->null_cond.a1);
 695        ctx->psw_n_nonzero = true;
 696    }
 697    cond_free(&ctx->null_cond);
 698}
 699
 700/* Set a PSW[N] to X.  The intention is that this is used immediately
 701   before a goto_tb/exit_tb, so that there is no fallthru path to other
 702   code within the TB.  Therefore we do not update psw_n_nonzero.  */
 703static void nullify_set(DisasContext *ctx, bool x)
 704{
 705    if (ctx->psw_n_nonzero || x) {
 706        tcg_gen_movi_reg(cpu_psw_n, x);
 707    }
 708}
 709
 710/* Mark the end of an instruction that may have been nullified.
 711   This is the pair to nullify_over.  Always returns true so that
 712   it may be tail-called from a translate function.  */
 713static bool nullify_end(DisasContext *ctx)
 714{
 715    TCGLabel *null_lab = ctx->null_lab;
 716    DisasJumpType status = ctx->base.is_jmp;
 717
 718    /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
 719       For UPDATED, we cannot update on the nullified path.  */
 720    assert(status != DISAS_IAQ_N_UPDATED);
 721
 722    if (likely(null_lab == NULL)) {
 723        /* The current insn wasn't conditional or handled the condition
 724           applied to it without a branch, so the (new) setting of
 725           NULL_COND can be applied directly to the next insn.  */
 726        return true;
 727    }
 728    ctx->null_lab = NULL;
 729
 730    if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
 731        /* The next instruction will be unconditional,
 732           and NULL_COND already reflects that.  */
 733        gen_set_label(null_lab);
 734    } else {
 735        /* The insn that we just executed is itself nullifying the next
 736           instruction.  Store the condition in the PSW[N] global.
 737           We asserted PSW[N] = 0 in nullify_over, so that after the
 738           label we have the proper value in place.  */
 739        nullify_save(ctx);
 740        gen_set_label(null_lab);
 741        ctx->null_cond = cond_make_n();
 742    }
 743    if (status == DISAS_NORETURN) {
 744        ctx->base.is_jmp = DISAS_NEXT;
 745    }
 746    return true;
 747}
 748
 749static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
 750{
 751    if (unlikely(ival == -1)) {
 752        tcg_gen_mov_reg(dest, vval);
 753    } else {
 754        tcg_gen_movi_reg(dest, ival);
 755    }
 756}
 757
 758static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
 759{
 760    return ctx->iaoq_f + disp + 8;
 761}
 762
 763static void gen_excp_1(int exception)
 764{
 765    gen_helper_excp(cpu_env, tcg_constant_i32(exception));
 766}
 767
 768static void gen_excp(DisasContext *ctx, int exception)
 769{
 770    copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
 771    copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
 772    nullify_save(ctx);
 773    gen_excp_1(exception);
 774    ctx->base.is_jmp = DISAS_NORETURN;
 775}
 776
 777static bool gen_excp_iir(DisasContext *ctx, int exc)
 778{
 779    nullify_over(ctx);
 780    tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
 781                   cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
 782    gen_excp(ctx, exc);
 783    return nullify_end(ctx);
 784}
 785
 786static bool gen_illegal(DisasContext *ctx)
 787{
 788    return gen_excp_iir(ctx, EXCP_ILL);
 789}
 790
 791#ifdef CONFIG_USER_ONLY
 792#define CHECK_MOST_PRIVILEGED(EXCP) \
 793    return gen_excp_iir(ctx, EXCP)
 794#else
 795#define CHECK_MOST_PRIVILEGED(EXCP) \
 796    do {                                     \
 797        if (ctx->privilege != 0) {           \
 798            return gen_excp_iir(ctx, EXCP);  \
 799        }                                    \
 800    } while (0)
 801#endif
 802
 803static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
 804{
 805    return translator_use_goto_tb(&ctx->base, dest);
 806}
 807
 808/* If the next insn is to be nullified, and it's on the same page,
 809   and we're not attempting to set a breakpoint on it, then we can
 810   totally skip the nullified insn.  This avoids creating and
 811   executing a TB that merely branches to the next TB.  */
 812static bool use_nullify_skip(DisasContext *ctx)
 813{
 814    return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
 815            && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
 816}
 817
 818static void gen_goto_tb(DisasContext *ctx, int which,
 819                        target_ureg f, target_ureg b)
 820{
 821    if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
 822        tcg_gen_goto_tb(which);
 823        tcg_gen_movi_reg(cpu_iaoq_f, f);
 824        tcg_gen_movi_reg(cpu_iaoq_b, b);
 825        tcg_gen_exit_tb(ctx->base.tb, which);
 826    } else {
 827        copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
 828        copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
 829        tcg_gen_lookup_and_goto_ptr();
 830    }
 831}
 832
 833static bool cond_need_sv(int c)
 834{
 835    return c == 2 || c == 3 || c == 6;
 836}
 837
 838static bool cond_need_cb(int c)
 839{
 840    return c == 4 || c == 5;
 841}
 842
 843/*
 844 * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
 845 * the Parisc 1.1 Architecture Reference Manual for details.
 846 */
 847
 848static DisasCond do_cond(unsigned cf, TCGv_reg res,
 849                         TCGv_reg cb_msb, TCGv_reg sv)
 850{
 851    DisasCond cond;
 852    TCGv_reg tmp;
 853
 854    switch (cf >> 1) {
 855    case 0: /* Never / TR    (0 / 1) */
 856        cond = cond_make_f();
 857        break;
 858    case 1: /* = / <>        (Z / !Z) */
 859        cond = cond_make_0(TCG_COND_EQ, res);
 860        break;
 861    case 2: /* < / >=        (N ^ V / !(N ^ V) */
 862        tmp = tcg_temp_new();
 863        tcg_gen_xor_reg(tmp, res, sv);
 864        cond = cond_make_0_tmp(TCG_COND_LT, tmp);
 865        break;
 866    case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
 867        /*
 868         * Simplify:
 869         *   (N ^ V) | Z
 870         *   ((res < 0) ^ (sv < 0)) | !res
 871         *   ((res ^ sv) < 0) | !res
 872         *   (~(res ^ sv) >= 0) | !res
 873         *   !(~(res ^ sv) >> 31) | !res
 874         *   !(~(res ^ sv) >> 31 & res)
 875         */
 876        tmp = tcg_temp_new();
 877        tcg_gen_eqv_reg(tmp, res, sv);
 878        tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
 879        tcg_gen_and_reg(tmp, tmp, res);
 880        cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
 881        break;
 882    case 4: /* NUV / UV      (!C / C) */
 883        cond = cond_make_0(TCG_COND_EQ, cb_msb);
 884        break;
 885    case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
 886        tmp = tcg_temp_new();
 887        tcg_gen_neg_reg(tmp, cb_msb);
 888        tcg_gen_and_reg(tmp, tmp, res);
 889        cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
 890        break;
 891    case 6: /* SV / NSV      (V / !V) */
 892        cond = cond_make_0(TCG_COND_LT, sv);
 893        break;
 894    case 7: /* OD / EV */
 895        tmp = tcg_temp_new();
 896        tcg_gen_andi_reg(tmp, res, 1);
 897        cond = cond_make_0_tmp(TCG_COND_NE, tmp);
 898        break;
 899    default:
 900        g_assert_not_reached();
 901    }
 902    if (cf & 1) {
 903        cond.c = tcg_invert_cond(cond.c);
 904    }
 905
 906    return cond;
 907}
 908
 909/* Similar, but for the special case of subtraction without borrow, we
 910   can use the inputs directly.  This can allow other computation to be
 911   deleted as unused.  */
 912
 913static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
 914                             TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
 915{
 916    DisasCond cond;
 917
 918    switch (cf >> 1) {
 919    case 1: /* = / <> */
 920        cond = cond_make(TCG_COND_EQ, in1, in2);
 921        break;
 922    case 2: /* < / >= */
 923        cond = cond_make(TCG_COND_LT, in1, in2);
 924        break;
 925    case 3: /* <= / > */
 926        cond = cond_make(TCG_COND_LE, in1, in2);
 927        break;
 928    case 4: /* << / >>= */
 929        cond = cond_make(TCG_COND_LTU, in1, in2);
 930        break;
 931    case 5: /* <<= / >> */
 932        cond = cond_make(TCG_COND_LEU, in1, in2);
 933        break;
 934    default:
 935        return do_cond(cf, res, NULL, sv);
 936    }
 937    if (cf & 1) {
 938        cond.c = tcg_invert_cond(cond.c);
 939    }
 940
 941    return cond;
 942}
 943
 944/*
 945 * Similar, but for logicals, where the carry and overflow bits are not
 946 * computed, and use of them is undefined.
 947 *
 948 * Undefined or not, hardware does not trap.  It seems reasonable to
 949 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
 950 * how cases c={2,3} are treated.
 951 */
 952
 953static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
 954{
 955    switch (cf) {
 956    case 0:  /* never */
 957    case 9:  /* undef, C */
 958    case 11: /* undef, C & !Z */
 959    case 12: /* undef, V */
 960        return cond_make_f();
 961
 962    case 1:  /* true */
 963    case 8:  /* undef, !C */
 964    case 10: /* undef, !C | Z */
 965    case 13: /* undef, !V */
 966        return cond_make_t();
 967
 968    case 2:  /* == */
 969        return cond_make_0(TCG_COND_EQ, res);
 970    case 3:  /* <> */
 971        return cond_make_0(TCG_COND_NE, res);
 972    case 4:  /* < */
 973        return cond_make_0(TCG_COND_LT, res);
 974    case 5:  /* >= */
 975        return cond_make_0(TCG_COND_GE, res);
 976    case 6:  /* <= */
 977        return cond_make_0(TCG_COND_LE, res);
 978    case 7:  /* > */
 979        return cond_make_0(TCG_COND_GT, res);
 980
 981    case 14: /* OD */
 982    case 15: /* EV */
 983        return do_cond(cf, res, NULL, NULL);
 984
 985    default:
 986        g_assert_not_reached();
 987    }
 988}
 989
 990/* Similar, but for shift/extract/deposit conditions.  */
 991
 992static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
 993{
 994    unsigned c, f;
 995
 996    /* Convert the compressed condition codes to standard.
 997       0-2 are the same as logicals (nv,<,<=), while 3 is OD.
 998       4-7 are the reverse of 0-3.  */
 999    c = orig & 3;
1000    if (c == 3) {
1001        c = 7;
1002    }
1003    f = (orig & 4) / 4;
1004
1005    return do_log_cond(c * 2 + f, res);
1006}
1007
1008/* Similar, but for unit conditions.  */
1009
1010static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1011                              TCGv_reg in1, TCGv_reg in2)
1012{
1013    DisasCond cond;
1014    TCGv_reg tmp, cb = NULL;
1015
1016    if (cf & 8) {
1017        /* Since we want to test lots of carry-out bits all at once, do not
1018         * do our normal thing and compute carry-in of bit B+1 since that
1019         * leaves us with carry bits spread across two words.
1020         */
1021        cb = tcg_temp_new();
1022        tmp = tcg_temp_new();
1023        tcg_gen_or_reg(cb, in1, in2);
1024        tcg_gen_and_reg(tmp, in1, in2);
1025        tcg_gen_andc_reg(cb, cb, res);
1026        tcg_gen_or_reg(cb, cb, tmp);
1027        tcg_temp_free(tmp);
1028    }
1029
1030    switch (cf >> 1) {
1031    case 0: /* never / TR */
1032    case 1: /* undefined */
1033    case 5: /* undefined */
1034        cond = cond_make_f();
1035        break;
1036
1037    case 2: /* SBZ / NBZ */
1038        /* See hasless(v,1) from
1039         * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1040         */
1041        tmp = tcg_temp_new();
1042        tcg_gen_subi_reg(tmp, res, 0x01010101u);
1043        tcg_gen_andc_reg(tmp, tmp, res);
1044        tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1045        cond = cond_make_0(TCG_COND_NE, tmp);
1046        tcg_temp_free(tmp);
1047        break;
1048
1049    case 3: /* SHZ / NHZ */
1050        tmp = tcg_temp_new();
1051        tcg_gen_subi_reg(tmp, res, 0x00010001u);
1052        tcg_gen_andc_reg(tmp, tmp, res);
1053        tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1054        cond = cond_make_0(TCG_COND_NE, tmp);
1055        tcg_temp_free(tmp);
1056        break;
1057
1058    case 4: /* SDC / NDC */
1059        tcg_gen_andi_reg(cb, cb, 0x88888888u);
1060        cond = cond_make_0(TCG_COND_NE, cb);
1061        break;
1062
1063    case 6: /* SBC / NBC */
1064        tcg_gen_andi_reg(cb, cb, 0x80808080u);
1065        cond = cond_make_0(TCG_COND_NE, cb);
1066        break;
1067
1068    case 7: /* SHC / NHC */
1069        tcg_gen_andi_reg(cb, cb, 0x80008000u);
1070        cond = cond_make_0(TCG_COND_NE, cb);
1071        break;
1072
1073    default:
1074        g_assert_not_reached();
1075    }
1076    if (cf & 8) {
1077        tcg_temp_free(cb);
1078    }
1079    if (cf & 1) {
1080        cond.c = tcg_invert_cond(cond.c);
1081    }
1082
1083    return cond;
1084}
1085
1086/* Compute signed overflow for addition.  */
1087static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1088                          TCGv_reg in1, TCGv_reg in2)
1089{
1090    TCGv_reg sv = get_temp(ctx);
1091    TCGv_reg tmp = tcg_temp_new();
1092
1093    tcg_gen_xor_reg(sv, res, in1);
1094    tcg_gen_xor_reg(tmp, in1, in2);
1095    tcg_gen_andc_reg(sv, sv, tmp);
1096    tcg_temp_free(tmp);
1097
1098    return sv;
1099}
1100
1101/* Compute signed overflow for subtraction.  */
1102static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1103                          TCGv_reg in1, TCGv_reg in2)
1104{
1105    TCGv_reg sv = get_temp(ctx);
1106    TCGv_reg tmp = tcg_temp_new();
1107
1108    tcg_gen_xor_reg(sv, res, in1);
1109    tcg_gen_xor_reg(tmp, in1, in2);
1110    tcg_gen_and_reg(sv, sv, tmp);
1111    tcg_temp_free(tmp);
1112
1113    return sv;
1114}
1115
1116static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1117                   TCGv_reg in2, unsigned shift, bool is_l,
1118                   bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1119{
1120    TCGv_reg dest, cb, cb_msb, sv, tmp;
1121    unsigned c = cf >> 1;
1122    DisasCond cond;
1123
1124    dest = tcg_temp_new();
1125    cb = NULL;
1126    cb_msb = NULL;
1127
1128    if (shift) {
1129        tmp = get_temp(ctx);
1130        tcg_gen_shli_reg(tmp, in1, shift);
1131        in1 = tmp;
1132    }
1133
1134    if (!is_l || cond_need_cb(c)) {
1135        TCGv_reg zero = tcg_constant_reg(0);
1136        cb_msb = get_temp(ctx);
1137        tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1138        if (is_c) {
1139            tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1140        }
1141        if (!is_l) {
1142            cb = get_temp(ctx);
1143            tcg_gen_xor_reg(cb, in1, in2);
1144            tcg_gen_xor_reg(cb, cb, dest);
1145        }
1146    } else {
1147        tcg_gen_add_reg(dest, in1, in2);
1148        if (is_c) {
1149            tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1150        }
1151    }
1152
1153    /* Compute signed overflow if required.  */
1154    sv = NULL;
1155    if (is_tsv || cond_need_sv(c)) {
1156        sv = do_add_sv(ctx, dest, in1, in2);
1157        if (is_tsv) {
1158            /* ??? Need to include overflow from shift.  */
1159            gen_helper_tsv(cpu_env, sv);
1160        }
1161    }
1162
1163    /* Emit any conditional trap before any writeback.  */
1164    cond = do_cond(cf, dest, cb_msb, sv);
1165    if (is_tc) {
1166        tmp = tcg_temp_new();
1167        tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1168        gen_helper_tcond(cpu_env, tmp);
1169        tcg_temp_free(tmp);
1170    }
1171
1172    /* Write back the result.  */
1173    if (!is_l) {
1174        save_or_nullify(ctx, cpu_psw_cb, cb);
1175        save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1176    }
1177    save_gpr(ctx, rt, dest);
1178    tcg_temp_free(dest);
1179
1180    /* Install the new nullification.  */
1181    cond_free(&ctx->null_cond);
1182    ctx->null_cond = cond;
1183}
1184
1185static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1186                       bool is_l, bool is_tsv, bool is_tc, bool is_c)
1187{
1188    TCGv_reg tcg_r1, tcg_r2;
1189
1190    if (a->cf) {
1191        nullify_over(ctx);
1192    }
1193    tcg_r1 = load_gpr(ctx, a->r1);
1194    tcg_r2 = load_gpr(ctx, a->r2);
1195    do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1196    return nullify_end(ctx);
1197}
1198
1199static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1200                       bool is_tsv, bool is_tc)
1201{
1202    TCGv_reg tcg_im, tcg_r2;
1203
1204    if (a->cf) {
1205        nullify_over(ctx);
1206    }
1207    tcg_im = load_const(ctx, a->i);
1208    tcg_r2 = load_gpr(ctx, a->r);
1209    do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1210    return nullify_end(ctx);
1211}
1212
1213static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1214                   TCGv_reg in2, bool is_tsv, bool is_b,
1215                   bool is_tc, unsigned cf)
1216{
1217    TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1218    unsigned c = cf >> 1;
1219    DisasCond cond;
1220
1221    dest = tcg_temp_new();
1222    cb = tcg_temp_new();
1223    cb_msb = tcg_temp_new();
1224
1225    zero = tcg_constant_reg(0);
1226    if (is_b) {
1227        /* DEST,C = IN1 + ~IN2 + C.  */
1228        tcg_gen_not_reg(cb, in2);
1229        tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1230        tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1231        tcg_gen_xor_reg(cb, cb, in1);
1232        tcg_gen_xor_reg(cb, cb, dest);
1233    } else {
1234        /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1235           operations by seeding the high word with 1 and subtracting.  */
1236        tcg_gen_movi_reg(cb_msb, 1);
1237        tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1238        tcg_gen_eqv_reg(cb, in1, in2);
1239        tcg_gen_xor_reg(cb, cb, dest);
1240    }
1241
1242    /* Compute signed overflow if required.  */
1243    sv = NULL;
1244    if (is_tsv || cond_need_sv(c)) {
1245        sv = do_sub_sv(ctx, dest, in1, in2);
1246        if (is_tsv) {
1247            gen_helper_tsv(cpu_env, sv);
1248        }
1249    }
1250
1251    /* Compute the condition.  We cannot use the special case for borrow.  */
1252    if (!is_b) {
1253        cond = do_sub_cond(cf, dest, in1, in2, sv);
1254    } else {
1255        cond = do_cond(cf, dest, cb_msb, sv);
1256    }
1257
1258    /* Emit any conditional trap before any writeback.  */
1259    if (is_tc) {
1260        tmp = tcg_temp_new();
1261        tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1262        gen_helper_tcond(cpu_env, tmp);
1263        tcg_temp_free(tmp);
1264    }
1265
1266    /* Write back the result.  */
1267    save_or_nullify(ctx, cpu_psw_cb, cb);
1268    save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1269    save_gpr(ctx, rt, dest);
1270    tcg_temp_free(dest);
1271    tcg_temp_free(cb);
1272    tcg_temp_free(cb_msb);
1273
1274    /* Install the new nullification.  */
1275    cond_free(&ctx->null_cond);
1276    ctx->null_cond = cond;
1277}
1278
1279static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1280                       bool is_tsv, bool is_b, bool is_tc)
1281{
1282    TCGv_reg tcg_r1, tcg_r2;
1283
1284    if (a->cf) {
1285        nullify_over(ctx);
1286    }
1287    tcg_r1 = load_gpr(ctx, a->r1);
1288    tcg_r2 = load_gpr(ctx, a->r2);
1289    do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1290    return nullify_end(ctx);
1291}
1292
1293static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1294{
1295    TCGv_reg tcg_im, tcg_r2;
1296
1297    if (a->cf) {
1298        nullify_over(ctx);
1299    }
1300    tcg_im = load_const(ctx, a->i);
1301    tcg_r2 = load_gpr(ctx, a->r);
1302    do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1303    return nullify_end(ctx);
1304}
1305
1306static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1307                      TCGv_reg in2, unsigned cf)
1308{
1309    TCGv_reg dest, sv;
1310    DisasCond cond;
1311
1312    dest = tcg_temp_new();
1313    tcg_gen_sub_reg(dest, in1, in2);
1314
1315    /* Compute signed overflow if required.  */
1316    sv = NULL;
1317    if (cond_need_sv(cf >> 1)) {
1318        sv = do_sub_sv(ctx, dest, in1, in2);
1319    }
1320
1321    /* Form the condition for the compare.  */
1322    cond = do_sub_cond(cf, dest, in1, in2, sv);
1323
1324    /* Clear.  */
1325    tcg_gen_movi_reg(dest, 0);
1326    save_gpr(ctx, rt, dest);
1327    tcg_temp_free(dest);
1328
1329    /* Install the new nullification.  */
1330    cond_free(&ctx->null_cond);
1331    ctx->null_cond = cond;
1332}
1333
1334static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1335                   TCGv_reg in2, unsigned cf,
1336                   void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1337{
1338    TCGv_reg dest = dest_gpr(ctx, rt);
1339
1340    /* Perform the operation, and writeback.  */
1341    fn(dest, in1, in2);
1342    save_gpr(ctx, rt, dest);
1343
1344    /* Install the new nullification.  */
1345    cond_free(&ctx->null_cond);
1346    if (cf) {
1347        ctx->null_cond = do_log_cond(cf, dest);
1348    }
1349}
1350
1351static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1352                       void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1353{
1354    TCGv_reg tcg_r1, tcg_r2;
1355
1356    if (a->cf) {
1357        nullify_over(ctx);
1358    }
1359    tcg_r1 = load_gpr(ctx, a->r1);
1360    tcg_r2 = load_gpr(ctx, a->r2);
1361    do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1362    return nullify_end(ctx);
1363}
1364
1365static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1366                    TCGv_reg in2, unsigned cf, bool is_tc,
1367                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1368{
1369    TCGv_reg dest;
1370    DisasCond cond;
1371
1372    if (cf == 0) {
1373        dest = dest_gpr(ctx, rt);
1374        fn(dest, in1, in2);
1375        save_gpr(ctx, rt, dest);
1376        cond_free(&ctx->null_cond);
1377    } else {
1378        dest = tcg_temp_new();
1379        fn(dest, in1, in2);
1380
1381        cond = do_unit_cond(cf, dest, in1, in2);
1382
1383        if (is_tc) {
1384            TCGv_reg tmp = tcg_temp_new();
1385            tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1386            gen_helper_tcond(cpu_env, tmp);
1387            tcg_temp_free(tmp);
1388        }
1389        save_gpr(ctx, rt, dest);
1390
1391        cond_free(&ctx->null_cond);
1392        ctx->null_cond = cond;
1393    }
1394}
1395
1396#ifndef CONFIG_USER_ONLY
1397/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1398   from the top 2 bits of the base register.  There are a few system
1399   instructions that have a 3-bit space specifier, for which SR0 is
1400   not special.  To handle this, pass ~SP.  */
1401static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1402{
1403    TCGv_ptr ptr;
1404    TCGv_reg tmp;
1405    TCGv_i64 spc;
1406
1407    if (sp != 0) {
1408        if (sp < 0) {
1409            sp = ~sp;
1410        }
1411        spc = get_temp_tl(ctx);
1412        load_spr(ctx, spc, sp);
1413        return spc;
1414    }
1415    if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1416        return cpu_srH;
1417    }
1418
1419    ptr = tcg_temp_new_ptr();
1420    tmp = tcg_temp_new();
1421    spc = get_temp_tl(ctx);
1422
1423    tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1424    tcg_gen_andi_reg(tmp, tmp, 030);
1425    tcg_gen_trunc_reg_ptr(ptr, tmp);
1426    tcg_temp_free(tmp);
1427
1428    tcg_gen_add_ptr(ptr, ptr, cpu_env);
1429    tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1430    tcg_temp_free_ptr(ptr);
1431
1432    return spc;
1433}
1434#endif
1435
1436static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1437                     unsigned rb, unsigned rx, int scale, target_sreg disp,
1438                     unsigned sp, int modify, bool is_phys)
1439{
1440    TCGv_reg base = load_gpr(ctx, rb);
1441    TCGv_reg ofs;
1442
1443    /* Note that RX is mutually exclusive with DISP.  */
1444    if (rx) {
1445        ofs = get_temp(ctx);
1446        tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1447        tcg_gen_add_reg(ofs, ofs, base);
1448    } else if (disp || modify) {
1449        ofs = get_temp(ctx);
1450        tcg_gen_addi_reg(ofs, base, disp);
1451    } else {
1452        ofs = base;
1453    }
1454
1455    *pofs = ofs;
1456#ifdef CONFIG_USER_ONLY
1457    *pgva = (modify <= 0 ? ofs : base);
1458#else
1459    TCGv_tl addr = get_temp_tl(ctx);
1460    tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1461    if (ctx->tb_flags & PSW_W) {
1462        tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1463    }
1464    if (!is_phys) {
1465        tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1466    }
1467    *pgva = addr;
1468#endif
1469}
1470
1471/* Emit a memory load.  The modify parameter should be
1472 * < 0 for pre-modify,
1473 * > 0 for post-modify,
1474 * = 0 for no base register update.
1475 */
1476static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1477                       unsigned rx, int scale, target_sreg disp,
1478                       unsigned sp, int modify, MemOp mop)
1479{
1480    TCGv_reg ofs;
1481    TCGv_tl addr;
1482
1483    /* Caller uses nullify_over/nullify_end.  */
1484    assert(ctx->null_cond.c == TCG_COND_NEVER);
1485
1486    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1487             ctx->mmu_idx == MMU_PHYS_IDX);
1488    tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1489    if (modify) {
1490        save_gpr(ctx, rb, ofs);
1491    }
1492}
1493
1494static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1495                       unsigned rx, int scale, target_sreg disp,
1496                       unsigned sp, int modify, MemOp mop)
1497{
1498    TCGv_reg ofs;
1499    TCGv_tl addr;
1500
1501    /* Caller uses nullify_over/nullify_end.  */
1502    assert(ctx->null_cond.c == TCG_COND_NEVER);
1503
1504    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1505             ctx->mmu_idx == MMU_PHYS_IDX);
1506    tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1507    if (modify) {
1508        save_gpr(ctx, rb, ofs);
1509    }
1510}
1511
1512static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1513                        unsigned rx, int scale, target_sreg disp,
1514                        unsigned sp, int modify, MemOp mop)
1515{
1516    TCGv_reg ofs;
1517    TCGv_tl addr;
1518
1519    /* Caller uses nullify_over/nullify_end.  */
1520    assert(ctx->null_cond.c == TCG_COND_NEVER);
1521
1522    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1523             ctx->mmu_idx == MMU_PHYS_IDX);
1524    tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1525    if (modify) {
1526        save_gpr(ctx, rb, ofs);
1527    }
1528}
1529
1530static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1531                        unsigned rx, int scale, target_sreg disp,
1532                        unsigned sp, int modify, MemOp mop)
1533{
1534    TCGv_reg ofs;
1535    TCGv_tl addr;
1536
1537    /* Caller uses nullify_over/nullify_end.  */
1538    assert(ctx->null_cond.c == TCG_COND_NEVER);
1539
1540    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1541             ctx->mmu_idx == MMU_PHYS_IDX);
1542    tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1543    if (modify) {
1544        save_gpr(ctx, rb, ofs);
1545    }
1546}
1547
1548#if TARGET_REGISTER_BITS == 64
1549#define do_load_reg   do_load_64
1550#define do_store_reg  do_store_64
1551#else
1552#define do_load_reg   do_load_32
1553#define do_store_reg  do_store_32
1554#endif
1555
1556static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1557                    unsigned rx, int scale, target_sreg disp,
1558                    unsigned sp, int modify, MemOp mop)
1559{
1560    TCGv_reg dest;
1561
1562    nullify_over(ctx);
1563
1564    if (modify == 0) {
1565        /* No base register update.  */
1566        dest = dest_gpr(ctx, rt);
1567    } else {
1568        /* Make sure if RT == RB, we see the result of the load.  */
1569        dest = get_temp(ctx);
1570    }
1571    do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1572    save_gpr(ctx, rt, dest);
1573
1574    return nullify_end(ctx);
1575}
1576
1577static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1578                      unsigned rx, int scale, target_sreg disp,
1579                      unsigned sp, int modify)
1580{
1581    TCGv_i32 tmp;
1582
1583    nullify_over(ctx);
1584
1585    tmp = tcg_temp_new_i32();
1586    do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1587    save_frw_i32(rt, tmp);
1588    tcg_temp_free_i32(tmp);
1589
1590    if (rt == 0) {
1591        gen_helper_loaded_fr0(cpu_env);
1592    }
1593
1594    return nullify_end(ctx);
1595}
1596
1597static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1598{
1599    return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1600                     a->disp, a->sp, a->m);
1601}
1602
1603static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1604                      unsigned rx, int scale, target_sreg disp,
1605                      unsigned sp, int modify)
1606{
1607    TCGv_i64 tmp;
1608
1609    nullify_over(ctx);
1610
1611    tmp = tcg_temp_new_i64();
1612    do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1613    save_frd(rt, tmp);
1614    tcg_temp_free_i64(tmp);
1615
1616    if (rt == 0) {
1617        gen_helper_loaded_fr0(cpu_env);
1618    }
1619
1620    return nullify_end(ctx);
1621}
1622
1623static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1624{
1625    return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1626                     a->disp, a->sp, a->m);
1627}
1628
1629static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1630                     target_sreg disp, unsigned sp,
1631                     int modify, MemOp mop)
1632{
1633    nullify_over(ctx);
1634    do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1635    return nullify_end(ctx);
1636}
1637
1638static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1639                       unsigned rx, int scale, target_sreg disp,
1640                       unsigned sp, int modify)
1641{
1642    TCGv_i32 tmp;
1643
1644    nullify_over(ctx);
1645
1646    tmp = load_frw_i32(rt);
1647    do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1648    tcg_temp_free_i32(tmp);
1649
1650    return nullify_end(ctx);
1651}
1652
1653static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1654{
1655    return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1656                      a->disp, a->sp, a->m);
1657}
1658
1659static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1660                       unsigned rx, int scale, target_sreg disp,
1661                       unsigned sp, int modify)
1662{
1663    TCGv_i64 tmp;
1664
1665    nullify_over(ctx);
1666
1667    tmp = load_frd(rt);
1668    do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1669    tcg_temp_free_i64(tmp);
1670
1671    return nullify_end(ctx);
1672}
1673
1674static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1675{
1676    return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1677                      a->disp, a->sp, a->m);
1678}
1679
1680static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1681                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1682{
1683    TCGv_i32 tmp;
1684
1685    nullify_over(ctx);
1686    tmp = load_frw0_i32(ra);
1687
1688    func(tmp, cpu_env, tmp);
1689
1690    save_frw_i32(rt, tmp);
1691    tcg_temp_free_i32(tmp);
1692    return nullify_end(ctx);
1693}
1694
1695static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1696                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1697{
1698    TCGv_i32 dst;
1699    TCGv_i64 src;
1700
1701    nullify_over(ctx);
1702    src = load_frd(ra);
1703    dst = tcg_temp_new_i32();
1704
1705    func(dst, cpu_env, src);
1706
1707    tcg_temp_free_i64(src);
1708    save_frw_i32(rt, dst);
1709    tcg_temp_free_i32(dst);
1710    return nullify_end(ctx);
1711}
1712
1713static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1714                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1715{
1716    TCGv_i64 tmp;
1717
1718    nullify_over(ctx);
1719    tmp = load_frd0(ra);
1720
1721    func(tmp, cpu_env, tmp);
1722
1723    save_frd(rt, tmp);
1724    tcg_temp_free_i64(tmp);
1725    return nullify_end(ctx);
1726}
1727
1728static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1729                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1730{
1731    TCGv_i32 src;
1732    TCGv_i64 dst;
1733
1734    nullify_over(ctx);
1735    src = load_frw0_i32(ra);
1736    dst = tcg_temp_new_i64();
1737
1738    func(dst, cpu_env, src);
1739
1740    tcg_temp_free_i32(src);
1741    save_frd(rt, dst);
1742    tcg_temp_free_i64(dst);
1743    return nullify_end(ctx);
1744}
1745
1746static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1747                        unsigned ra, unsigned rb,
1748                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1749{
1750    TCGv_i32 a, b;
1751
1752    nullify_over(ctx);
1753    a = load_frw0_i32(ra);
1754    b = load_frw0_i32(rb);
1755
1756    func(a, cpu_env, a, b);
1757
1758    tcg_temp_free_i32(b);
1759    save_frw_i32(rt, a);
1760    tcg_temp_free_i32(a);
1761    return nullify_end(ctx);
1762}
1763
1764static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1765                        unsigned ra, unsigned rb,
1766                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1767{
1768    TCGv_i64 a, b;
1769
1770    nullify_over(ctx);
1771    a = load_frd0(ra);
1772    b = load_frd0(rb);
1773
1774    func(a, cpu_env, a, b);
1775
1776    tcg_temp_free_i64(b);
1777    save_frd(rt, a);
1778    tcg_temp_free_i64(a);
1779    return nullify_end(ctx);
1780}
1781
1782/* Emit an unconditional branch to a direct target, which may or may not
1783   have already had nullification handled.  */
1784static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1785                       unsigned link, bool is_n)
1786{
1787    if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1788        if (link != 0) {
1789            copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1790        }
1791        ctx->iaoq_n = dest;
1792        if (is_n) {
1793            ctx->null_cond.c = TCG_COND_ALWAYS;
1794        }
1795    } else {
1796        nullify_over(ctx);
1797
1798        if (link != 0) {
1799            copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1800        }
1801
1802        if (is_n && use_nullify_skip(ctx)) {
1803            nullify_set(ctx, 0);
1804            gen_goto_tb(ctx, 0, dest, dest + 4);
1805        } else {
1806            nullify_set(ctx, is_n);
1807            gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1808        }
1809
1810        nullify_end(ctx);
1811
1812        nullify_set(ctx, 0);
1813        gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1814        ctx->base.is_jmp = DISAS_NORETURN;
1815    }
1816    return true;
1817}
1818
1819/* Emit a conditional branch to a direct target.  If the branch itself
1820   is nullified, we should have already used nullify_over.  */
1821static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1822                       DisasCond *cond)
1823{
1824    target_ureg dest = iaoq_dest(ctx, disp);
1825    TCGLabel *taken = NULL;
1826    TCGCond c = cond->c;
1827    bool n;
1828
1829    assert(ctx->null_cond.c == TCG_COND_NEVER);
1830
1831    /* Handle TRUE and NEVER as direct branches.  */
1832    if (c == TCG_COND_ALWAYS) {
1833        return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1834    }
1835    if (c == TCG_COND_NEVER) {
1836        return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1837    }
1838
1839    taken = gen_new_label();
1840    tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1841    cond_free(cond);
1842
1843    /* Not taken: Condition not satisfied; nullify on backward branches. */
1844    n = is_n && disp < 0;
1845    if (n && use_nullify_skip(ctx)) {
1846        nullify_set(ctx, 0);
1847        gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1848    } else {
1849        if (!n && ctx->null_lab) {
1850            gen_set_label(ctx->null_lab);
1851            ctx->null_lab = NULL;
1852        }
1853        nullify_set(ctx, n);
1854        if (ctx->iaoq_n == -1) {
1855            /* The temporary iaoq_n_var died at the branch above.
1856               Regenerate it here instead of saving it.  */
1857            tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1858        }
1859        gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1860    }
1861
1862    gen_set_label(taken);
1863
1864    /* Taken: Condition satisfied; nullify on forward branches.  */
1865    n = is_n && disp >= 0;
1866    if (n && use_nullify_skip(ctx)) {
1867        nullify_set(ctx, 0);
1868        gen_goto_tb(ctx, 1, dest, dest + 4);
1869    } else {
1870        nullify_set(ctx, n);
1871        gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1872    }
1873
1874    /* Not taken: the branch itself was nullified.  */
1875    if (ctx->null_lab) {
1876        gen_set_label(ctx->null_lab);
1877        ctx->null_lab = NULL;
1878        ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1879    } else {
1880        ctx->base.is_jmp = DISAS_NORETURN;
1881    }
1882    return true;
1883}
1884
1885/* Emit an unconditional branch to an indirect target.  This handles
1886   nullification of the branch itself.  */
1887static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1888                       unsigned link, bool is_n)
1889{
1890    TCGv_reg a0, a1, next, tmp;
1891    TCGCond c;
1892
1893    assert(ctx->null_lab == NULL);
1894
1895    if (ctx->null_cond.c == TCG_COND_NEVER) {
1896        if (link != 0) {
1897            copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1898        }
1899        next = get_temp(ctx);
1900        tcg_gen_mov_reg(next, dest);
1901        if (is_n) {
1902            if (use_nullify_skip(ctx)) {
1903                tcg_gen_mov_reg(cpu_iaoq_f, next);
1904                tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1905                nullify_set(ctx, 0);
1906                ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1907                return true;
1908            }
1909            ctx->null_cond.c = TCG_COND_ALWAYS;
1910        }
1911        ctx->iaoq_n = -1;
1912        ctx->iaoq_n_var = next;
1913    } else if (is_n && use_nullify_skip(ctx)) {
1914        /* The (conditional) branch, B, nullifies the next insn, N,
1915           and we're allowed to skip execution N (no single-step or
1916           tracepoint in effect).  Since the goto_ptr that we must use
1917           for the indirect branch consumes no special resources, we
1918           can (conditionally) skip B and continue execution.  */
1919        /* The use_nullify_skip test implies we have a known control path.  */
1920        tcg_debug_assert(ctx->iaoq_b != -1);
1921        tcg_debug_assert(ctx->iaoq_n != -1);
1922
1923        /* We do have to handle the non-local temporary, DEST, before
1924           branching.  Since IOAQ_F is not really live at this point, we
1925           can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1926        tcg_gen_mov_reg(cpu_iaoq_f, dest);
1927        tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1928
1929        nullify_over(ctx);
1930        if (link != 0) {
1931            tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1932        }
1933        tcg_gen_lookup_and_goto_ptr();
1934        return nullify_end(ctx);
1935    } else {
1936        c = ctx->null_cond.c;
1937        a0 = ctx->null_cond.a0;
1938        a1 = ctx->null_cond.a1;
1939
1940        tmp = tcg_temp_new();
1941        next = get_temp(ctx);
1942
1943        copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1944        tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1945        ctx->iaoq_n = -1;
1946        ctx->iaoq_n_var = next;
1947
1948        if (link != 0) {
1949            tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1950        }
1951
1952        if (is_n) {
1953            /* The branch nullifies the next insn, which means the state of N
1954               after the branch is the inverse of the state of N that applied
1955               to the branch.  */
1956            tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1957            cond_free(&ctx->null_cond);
1958            ctx->null_cond = cond_make_n();
1959            ctx->psw_n_nonzero = true;
1960        } else {
1961            cond_free(&ctx->null_cond);
1962        }
1963    }
1964    return true;
1965}
1966
1967/* Implement
1968 *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1969 *      IAOQ_Next{30..31} ← GR[b]{30..31};
1970 *    else
1971 *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1972 * which keeps the privilege level from being increased.
1973 */
1974static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1975{
1976    TCGv_reg dest;
1977    switch (ctx->privilege) {
1978    case 0:
1979        /* Privilege 0 is maximum and is allowed to decrease.  */
1980        return offset;
1981    case 3:
1982        /* Privilege 3 is minimum and is never allowed to increase.  */
1983        dest = get_temp(ctx);
1984        tcg_gen_ori_reg(dest, offset, 3);
1985        break;
1986    default:
1987        dest = get_temp(ctx);
1988        tcg_gen_andi_reg(dest, offset, -4);
1989        tcg_gen_ori_reg(dest, dest, ctx->privilege);
1990        tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1991        break;
1992    }
1993    return dest;
1994}
1995
1996#ifdef CONFIG_USER_ONLY
1997/* On Linux, page zero is normally marked execute only + gateway.
1998   Therefore normal read or write is supposed to fail, but specific
1999   offsets have kernel code mapped to raise permissions to implement
2000   system calls.  Handling this via an explicit check here, rather
2001   in than the "be disp(sr2,r0)" instruction that probably sent us
2002   here, is the easiest way to handle the branch delay slot on the
2003   aforementioned BE.  */
2004static void do_page_zero(DisasContext *ctx)
2005{
2006    /* If by some means we get here with PSW[N]=1, that implies that
2007       the B,GATE instruction would be skipped, and we'd fault on the
2008       next insn within the privilaged page.  */
2009    switch (ctx->null_cond.c) {
2010    case TCG_COND_NEVER:
2011        break;
2012    case TCG_COND_ALWAYS:
2013        tcg_gen_movi_reg(cpu_psw_n, 0);
2014        goto do_sigill;
2015    default:
2016        /* Since this is always the first (and only) insn within the
2017           TB, we should know the state of PSW[N] from TB->FLAGS.  */
2018        g_assert_not_reached();
2019    }
2020
2021    /* Check that we didn't arrive here via some means that allowed
2022       non-sequential instruction execution.  Normally the PSW[B] bit
2023       detects this by disallowing the B,GATE instruction to execute
2024       under such conditions.  */
2025    if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2026        goto do_sigill;
2027    }
2028
2029    switch (ctx->iaoq_f & -4) {
2030    case 0x00: /* Null pointer call */
2031        gen_excp_1(EXCP_IMP);
2032        ctx->base.is_jmp = DISAS_NORETURN;
2033        break;
2034
2035    case 0xb0: /* LWS */
2036        gen_excp_1(EXCP_SYSCALL_LWS);
2037        ctx->base.is_jmp = DISAS_NORETURN;
2038        break;
2039
2040    case 0xe0: /* SET_THREAD_POINTER */
2041        tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2042        tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2043        tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2044        ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2045        break;
2046
2047    case 0x100: /* SYSCALL */
2048        gen_excp_1(EXCP_SYSCALL);
2049        ctx->base.is_jmp = DISAS_NORETURN;
2050        break;
2051
2052    default:
2053    do_sigill:
2054        gen_excp_1(EXCP_ILL);
2055        ctx->base.is_jmp = DISAS_NORETURN;
2056        break;
2057    }
2058}
2059#endif
2060
2061static bool trans_nop(DisasContext *ctx, arg_nop *a)
2062{
2063    cond_free(&ctx->null_cond);
2064    return true;
2065}
2066
2067static bool trans_break(DisasContext *ctx, arg_break *a)
2068{
2069    return gen_excp_iir(ctx, EXCP_BREAK);
2070}
2071
2072static bool trans_sync(DisasContext *ctx, arg_sync *a)
2073{
2074    /* No point in nullifying the memory barrier.  */
2075    tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2076
2077    cond_free(&ctx->null_cond);
2078    return true;
2079}
2080
2081static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2082{
2083    unsigned rt = a->t;
2084    TCGv_reg tmp = dest_gpr(ctx, rt);
2085    tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2086    save_gpr(ctx, rt, tmp);
2087
2088    cond_free(&ctx->null_cond);
2089    return true;
2090}
2091
2092static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2093{
2094    unsigned rt = a->t;
2095    unsigned rs = a->sp;
2096    TCGv_i64 t0 = tcg_temp_new_i64();
2097    TCGv_reg t1 = tcg_temp_new();
2098
2099    load_spr(ctx, t0, rs);
2100    tcg_gen_shri_i64(t0, t0, 32);
2101    tcg_gen_trunc_i64_reg(t1, t0);
2102
2103    save_gpr(ctx, rt, t1);
2104    tcg_temp_free(t1);
2105    tcg_temp_free_i64(t0);
2106
2107    cond_free(&ctx->null_cond);
2108    return true;
2109}
2110
2111static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2112{
2113    unsigned rt = a->t;
2114    unsigned ctl = a->r;
2115    TCGv_reg tmp;
2116
2117    switch (ctl) {
2118    case CR_SAR:
2119#ifdef TARGET_HPPA64
2120        if (a->e == 0) {
2121            /* MFSAR without ,W masks low 5 bits.  */
2122            tmp = dest_gpr(ctx, rt);
2123            tcg_gen_andi_reg(tmp, cpu_sar, 31);
2124            save_gpr(ctx, rt, tmp);
2125            goto done;
2126        }
2127#endif
2128        save_gpr(ctx, rt, cpu_sar);
2129        goto done;
2130    case CR_IT: /* Interval Timer */
2131        /* FIXME: Respect PSW_S bit.  */
2132        nullify_over(ctx);
2133        tmp = dest_gpr(ctx, rt);
2134        if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2135            gen_io_start();
2136            gen_helper_read_interval_timer(tmp);
2137            ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2138        } else {
2139            gen_helper_read_interval_timer(tmp);
2140        }
2141        save_gpr(ctx, rt, tmp);
2142        return nullify_end(ctx);
2143    case 26:
2144    case 27:
2145        break;
2146    default:
2147        /* All other control registers are privileged.  */
2148        CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2149        break;
2150    }
2151
2152    tmp = get_temp(ctx);
2153    tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2154    save_gpr(ctx, rt, tmp);
2155
2156 done:
2157    cond_free(&ctx->null_cond);
2158    return true;
2159}
2160
2161static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2162{
2163    unsigned rr = a->r;
2164    unsigned rs = a->sp;
2165    TCGv_i64 t64;
2166
2167    if (rs >= 5) {
2168        CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2169    }
2170    nullify_over(ctx);
2171
2172    t64 = tcg_temp_new_i64();
2173    tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2174    tcg_gen_shli_i64(t64, t64, 32);
2175
2176    if (rs >= 4) {
2177        tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2178        ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2179    } else {
2180        tcg_gen_mov_i64(cpu_sr[rs], t64);
2181    }
2182    tcg_temp_free_i64(t64);
2183
2184    return nullify_end(ctx);
2185}
2186
2187static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2188{
2189    unsigned ctl = a->t;
2190    TCGv_reg reg;
2191    TCGv_reg tmp;
2192
2193    if (ctl == CR_SAR) {
2194        reg = load_gpr(ctx, a->r);
2195        tmp = tcg_temp_new();
2196        tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2197        save_or_nullify(ctx, cpu_sar, tmp);
2198        tcg_temp_free(tmp);
2199
2200        cond_free(&ctx->null_cond);
2201        return true;
2202    }
2203
2204    /* All other control registers are privileged or read-only.  */
2205    CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2206
2207#ifndef CONFIG_USER_ONLY
2208    nullify_over(ctx);
2209    reg = load_gpr(ctx, a->r);
2210
2211    switch (ctl) {
2212    case CR_IT:
2213        gen_helper_write_interval_timer(cpu_env, reg);
2214        break;
2215    case CR_EIRR:
2216        gen_helper_write_eirr(cpu_env, reg);
2217        break;
2218    case CR_EIEM:
2219        gen_helper_write_eiem(cpu_env, reg);
2220        ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2221        break;
2222
2223    case CR_IIASQ:
2224    case CR_IIAOQ:
2225        /* FIXME: Respect PSW_Q bit */
2226        /* The write advances the queue and stores to the back element.  */
2227        tmp = get_temp(ctx);
2228        tcg_gen_ld_reg(tmp, cpu_env,
2229                       offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2230        tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2231        tcg_gen_st_reg(reg, cpu_env,
2232                       offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2233        break;
2234
2235    case CR_PID1:
2236    case CR_PID2:
2237    case CR_PID3:
2238    case CR_PID4:
2239        tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2240#ifndef CONFIG_USER_ONLY
2241        gen_helper_change_prot_id(cpu_env);
2242#endif
2243        break;
2244
2245    default:
2246        tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2247        break;
2248    }
2249    return nullify_end(ctx);
2250#endif
2251}
2252
2253static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2254{
2255    TCGv_reg tmp = tcg_temp_new();
2256
2257    tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2258    tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2259    save_or_nullify(ctx, cpu_sar, tmp);
2260    tcg_temp_free(tmp);
2261
2262    cond_free(&ctx->null_cond);
2263    return true;
2264}
2265
2266static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2267{
2268    TCGv_reg dest = dest_gpr(ctx, a->t);
2269
2270#ifdef CONFIG_USER_ONLY
2271    /* We don't implement space registers in user mode. */
2272    tcg_gen_movi_reg(dest, 0);
2273#else
2274    TCGv_i64 t0 = tcg_temp_new_i64();
2275
2276    tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2277    tcg_gen_shri_i64(t0, t0, 32);
2278    tcg_gen_trunc_i64_reg(dest, t0);
2279
2280    tcg_temp_free_i64(t0);
2281#endif
2282    save_gpr(ctx, a->t, dest);
2283
2284    cond_free(&ctx->null_cond);
2285    return true;
2286}
2287
2288static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2289{
2290    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2291#ifndef CONFIG_USER_ONLY
2292    TCGv_reg tmp;
2293
2294    nullify_over(ctx);
2295
2296    tmp = get_temp(ctx);
2297    tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2298    tcg_gen_andi_reg(tmp, tmp, ~a->i);
2299    gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2300    save_gpr(ctx, a->t, tmp);
2301
2302    /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2303    ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2304    return nullify_end(ctx);
2305#endif
2306}
2307
2308static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2309{
2310    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2311#ifndef CONFIG_USER_ONLY
2312    TCGv_reg tmp;
2313
2314    nullify_over(ctx);
2315
2316    tmp = get_temp(ctx);
2317    tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2318    tcg_gen_ori_reg(tmp, tmp, a->i);
2319    gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2320    save_gpr(ctx, a->t, tmp);
2321
2322    /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2323    ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2324    return nullify_end(ctx);
2325#endif
2326}
2327
2328static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2329{
2330    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2331#ifndef CONFIG_USER_ONLY
2332    TCGv_reg tmp, reg;
2333    nullify_over(ctx);
2334
2335    reg = load_gpr(ctx, a->r);
2336    tmp = get_temp(ctx);
2337    gen_helper_swap_system_mask(tmp, cpu_env, reg);
2338
2339    /* Exit the TB to recognize new interrupts.  */
2340    ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2341    return nullify_end(ctx);
2342#endif
2343}
2344
2345static bool do_rfi(DisasContext *ctx, bool rfi_r)
2346{
2347    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2348#ifndef CONFIG_USER_ONLY
2349    nullify_over(ctx);
2350
2351    if (rfi_r) {
2352        gen_helper_rfi_r(cpu_env);
2353    } else {
2354        gen_helper_rfi(cpu_env);
2355    }
2356    /* Exit the TB to recognize new interrupts.  */
2357    tcg_gen_exit_tb(NULL, 0);
2358    ctx->base.is_jmp = DISAS_NORETURN;
2359
2360    return nullify_end(ctx);
2361#endif
2362}
2363
2364static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2365{
2366    return do_rfi(ctx, false);
2367}
2368
2369static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2370{
2371    return do_rfi(ctx, true);
2372}
2373
2374static bool trans_halt(DisasContext *ctx, arg_halt *a)
2375{
2376    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2377#ifndef CONFIG_USER_ONLY
2378    nullify_over(ctx);
2379    gen_helper_halt(cpu_env);
2380    ctx->base.is_jmp = DISAS_NORETURN;
2381    return nullify_end(ctx);
2382#endif
2383}
2384
2385static bool trans_reset(DisasContext *ctx, arg_reset *a)
2386{
2387    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2388#ifndef CONFIG_USER_ONLY
2389    nullify_over(ctx);
2390    gen_helper_reset(cpu_env);
2391    ctx->base.is_jmp = DISAS_NORETURN;
2392    return nullify_end(ctx);
2393#endif
2394}
2395
2396static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2397{
2398    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2399#ifndef CONFIG_USER_ONLY
2400    nullify_over(ctx);
2401    gen_helper_getshadowregs(cpu_env);
2402    return nullify_end(ctx);
2403#endif
2404}
2405
2406static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2407{
2408    if (a->m) {
2409        TCGv_reg dest = dest_gpr(ctx, a->b);
2410        TCGv_reg src1 = load_gpr(ctx, a->b);
2411        TCGv_reg src2 = load_gpr(ctx, a->x);
2412
2413        /* The only thing we need to do is the base register modification.  */
2414        tcg_gen_add_reg(dest, src1, src2);
2415        save_gpr(ctx, a->b, dest);
2416    }
2417    cond_free(&ctx->null_cond);
2418    return true;
2419}
2420
2421static bool trans_probe(DisasContext *ctx, arg_probe *a)
2422{
2423    TCGv_reg dest, ofs;
2424    TCGv_i32 level, want;
2425    TCGv_tl addr;
2426
2427    nullify_over(ctx);
2428
2429    dest = dest_gpr(ctx, a->t);
2430    form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2431
2432    if (a->imm) {
2433        level = tcg_constant_i32(a->ri);
2434    } else {
2435        level = tcg_temp_new_i32();
2436        tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2437        tcg_gen_andi_i32(level, level, 3);
2438    }
2439    want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2440
2441    gen_helper_probe(dest, cpu_env, addr, level, want);
2442
2443    tcg_temp_free_i32(level);
2444
2445    save_gpr(ctx, a->t, dest);
2446    return nullify_end(ctx);
2447}
2448
2449static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2450{
2451    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2452#ifndef CONFIG_USER_ONLY
2453    TCGv_tl addr;
2454    TCGv_reg ofs, reg;
2455
2456    nullify_over(ctx);
2457
2458    form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2459    reg = load_gpr(ctx, a->r);
2460    if (a->addr) {
2461        gen_helper_itlba(cpu_env, addr, reg);
2462    } else {
2463        gen_helper_itlbp(cpu_env, addr, reg);
2464    }
2465
2466    /* Exit TB for TLB change if mmu is enabled.  */
2467    if (ctx->tb_flags & PSW_C) {
2468        ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2469    }
2470    return nullify_end(ctx);
2471#endif
2472}
2473
2474static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2475{
2476    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2477#ifndef CONFIG_USER_ONLY
2478    TCGv_tl addr;
2479    TCGv_reg ofs;
2480
2481    nullify_over(ctx);
2482
2483    form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2484    if (a->m) {
2485        save_gpr(ctx, a->b, ofs);
2486    }
2487    if (a->local) {
2488        gen_helper_ptlbe(cpu_env);
2489    } else {
2490        gen_helper_ptlb(cpu_env, addr);
2491    }
2492
2493    /* Exit TB for TLB change if mmu is enabled.  */
2494    if (ctx->tb_flags & PSW_C) {
2495        ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2496    }
2497    return nullify_end(ctx);
2498#endif
2499}
2500
2501/*
2502 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2503 * See
2504 *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2505 *     page 13-9 (195/206)
2506 */
2507static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2508{
2509    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2510#ifndef CONFIG_USER_ONLY
2511    TCGv_tl addr, atl, stl;
2512    TCGv_reg reg;
2513
2514    nullify_over(ctx);
2515
2516    /*
2517     * FIXME:
2518     *  if (not (pcxl or pcxl2))
2519     *    return gen_illegal(ctx);
2520     *
2521     * Note for future: these are 32-bit systems; no hppa64.
2522     */
2523
2524    atl = tcg_temp_new_tl();
2525    stl = tcg_temp_new_tl();
2526    addr = tcg_temp_new_tl();
2527
2528    tcg_gen_ld32u_i64(stl, cpu_env,
2529                      a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2530                      : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2531    tcg_gen_ld32u_i64(atl, cpu_env,
2532                      a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2533                      : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2534    tcg_gen_shli_i64(stl, stl, 32);
2535    tcg_gen_or_tl(addr, atl, stl);
2536    tcg_temp_free_tl(atl);
2537    tcg_temp_free_tl(stl);
2538
2539    reg = load_gpr(ctx, a->r);
2540    if (a->addr) {
2541        gen_helper_itlba(cpu_env, addr, reg);
2542    } else {
2543        gen_helper_itlbp(cpu_env, addr, reg);
2544    }
2545    tcg_temp_free_tl(addr);
2546
2547    /* Exit TB for TLB change if mmu is enabled.  */
2548    if (ctx->tb_flags & PSW_C) {
2549        ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2550    }
2551    return nullify_end(ctx);
2552#endif
2553}
2554
2555static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2556{
2557    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2558#ifndef CONFIG_USER_ONLY
2559    TCGv_tl vaddr;
2560    TCGv_reg ofs, paddr;
2561
2562    nullify_over(ctx);
2563
2564    form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2565
2566    paddr = tcg_temp_new();
2567    gen_helper_lpa(paddr, cpu_env, vaddr);
2568
2569    /* Note that physical address result overrides base modification.  */
2570    if (a->m) {
2571        save_gpr(ctx, a->b, ofs);
2572    }
2573    save_gpr(ctx, a->t, paddr);
2574    tcg_temp_free(paddr);
2575
2576    return nullify_end(ctx);
2577#endif
2578}
2579
2580static bool trans_lci(DisasContext *ctx, arg_lci *a)
2581{
2582    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2583
2584    /* The Coherence Index is an implementation-defined function of the
2585       physical address.  Two addresses with the same CI have a coherent
2586       view of the cache.  Our implementation is to return 0 for all,
2587       since the entire address space is coherent.  */
2588    save_gpr(ctx, a->t, tcg_constant_reg(0));
2589
2590    cond_free(&ctx->null_cond);
2591    return true;
2592}
2593
2594static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2595{
2596    return do_add_reg(ctx, a, false, false, false, false);
2597}
2598
2599static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2600{
2601    return do_add_reg(ctx, a, true, false, false, false);
2602}
2603
2604static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2605{
2606    return do_add_reg(ctx, a, false, true, false, false);
2607}
2608
2609static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2610{
2611    return do_add_reg(ctx, a, false, false, false, true);
2612}
2613
2614static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2615{
2616    return do_add_reg(ctx, a, false, true, false, true);
2617}
2618
2619static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2620{
2621    return do_sub_reg(ctx, a, false, false, false);
2622}
2623
2624static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2625{
2626    return do_sub_reg(ctx, a, true, false, false);
2627}
2628
2629static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2630{
2631    return do_sub_reg(ctx, a, false, false, true);
2632}
2633
2634static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2635{
2636    return do_sub_reg(ctx, a, true, false, true);
2637}
2638
2639static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2640{
2641    return do_sub_reg(ctx, a, false, true, false);
2642}
2643
2644static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2645{
2646    return do_sub_reg(ctx, a, true, true, false);
2647}
2648
2649static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2650{
2651    return do_log_reg(ctx, a, tcg_gen_andc_reg);
2652}
2653
2654static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2655{
2656    return do_log_reg(ctx, a, tcg_gen_and_reg);
2657}
2658
2659static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2660{
2661    if (a->cf == 0) {
2662        unsigned r2 = a->r2;
2663        unsigned r1 = a->r1;
2664        unsigned rt = a->t;
2665
2666        if (rt == 0) { /* NOP */
2667            cond_free(&ctx->null_cond);
2668            return true;
2669        }
2670        if (r2 == 0) { /* COPY */
2671            if (r1 == 0) {
2672                TCGv_reg dest = dest_gpr(ctx, rt);
2673                tcg_gen_movi_reg(dest, 0);
2674                save_gpr(ctx, rt, dest);
2675            } else {
2676                save_gpr(ctx, rt, cpu_gr[r1]);
2677            }
2678            cond_free(&ctx->null_cond);
2679            return true;
2680        }
2681#ifndef CONFIG_USER_ONLY
2682        /* These are QEMU extensions and are nops in the real architecture:
2683         *
2684         * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2685         * or %r31,%r31,%r31 -- death loop; offline cpu
2686         *                      currently implemented as idle.
2687         */
2688        if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2689            /* No need to check for supervisor, as userland can only pause
2690               until the next timer interrupt.  */
2691            nullify_over(ctx);
2692
2693            /* Advance the instruction queue.  */
2694            copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2695            copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2696            nullify_set(ctx, 0);
2697
2698            /* Tell the qemu main loop to halt until this cpu has work.  */
2699            tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2700                           offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2701            gen_excp_1(EXCP_HALTED);
2702            ctx->base.is_jmp = DISAS_NORETURN;
2703
2704            return nullify_end(ctx);
2705        }
2706#endif
2707    }
2708    return do_log_reg(ctx, a, tcg_gen_or_reg);
2709}
2710
2711static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2712{
2713    return do_log_reg(ctx, a, tcg_gen_xor_reg);
2714}
2715
2716static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2717{
2718    TCGv_reg tcg_r1, tcg_r2;
2719
2720    if (a->cf) {
2721        nullify_over(ctx);
2722    }
2723    tcg_r1 = load_gpr(ctx, a->r1);
2724    tcg_r2 = load_gpr(ctx, a->r2);
2725    do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2726    return nullify_end(ctx);
2727}
2728
2729static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2730{
2731    TCGv_reg tcg_r1, tcg_r2;
2732
2733    if (a->cf) {
2734        nullify_over(ctx);
2735    }
2736    tcg_r1 = load_gpr(ctx, a->r1);
2737    tcg_r2 = load_gpr(ctx, a->r2);
2738    do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2739    return nullify_end(ctx);
2740}
2741
2742static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2743{
2744    TCGv_reg tcg_r1, tcg_r2, tmp;
2745
2746    if (a->cf) {
2747        nullify_over(ctx);
2748    }
2749    tcg_r1 = load_gpr(ctx, a->r1);
2750    tcg_r2 = load_gpr(ctx, a->r2);
2751    tmp = get_temp(ctx);
2752    tcg_gen_not_reg(tmp, tcg_r2);
2753    do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2754    return nullify_end(ctx);
2755}
2756
2757static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2758{
2759    return do_uaddcm(ctx, a, false);
2760}
2761
2762static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2763{
2764    return do_uaddcm(ctx, a, true);
2765}
2766
2767static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2768{
2769    TCGv_reg tmp;
2770
2771    nullify_over(ctx);
2772
2773    tmp = get_temp(ctx);
2774    tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2775    if (!is_i) {
2776        tcg_gen_not_reg(tmp, tmp);
2777    }
2778    tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2779    tcg_gen_muli_reg(tmp, tmp, 6);
2780    do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2781            is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2782    return nullify_end(ctx);
2783}
2784
2785static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2786{
2787    return do_dcor(ctx, a, false);
2788}
2789
2790static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2791{
2792    return do_dcor(ctx, a, true);
2793}
2794
2795static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2796{
2797    TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2798
2799    nullify_over(ctx);
2800
2801    in1 = load_gpr(ctx, a->r1);
2802    in2 = load_gpr(ctx, a->r2);
2803
2804    add1 = tcg_temp_new();
2805    add2 = tcg_temp_new();
2806    addc = tcg_temp_new();
2807    dest = tcg_temp_new();
2808    zero = tcg_constant_reg(0);
2809
2810    /* Form R1 << 1 | PSW[CB]{8}.  */
2811    tcg_gen_add_reg(add1, in1, in1);
2812    tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2813
2814    /* Add or subtract R2, depending on PSW[V].  Proper computation of
2815       carry{8} requires that we subtract via + ~R2 + 1, as described in
2816       the manual.  By extracting and masking V, we can produce the
2817       proper inputs to the addition without movcond.  */
2818    tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2819    tcg_gen_xor_reg(add2, in2, addc);
2820    tcg_gen_andi_reg(addc, addc, 1);
2821    /* ??? This is only correct for 32-bit.  */
2822    tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2823    tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2824
2825    tcg_temp_free(addc);
2826
2827    /* Write back the result register.  */
2828    save_gpr(ctx, a->t, dest);
2829
2830    /* Write back PSW[CB].  */
2831    tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2832    tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2833
2834    /* Write back PSW[V] for the division step.  */
2835    tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2836    tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2837
2838    /* Install the new nullification.  */
2839    if (a->cf) {
2840        TCGv_reg sv = NULL;
2841        if (cond_need_sv(a->cf >> 1)) {
2842            /* ??? The lshift is supposed to contribute to overflow.  */
2843            sv = do_add_sv(ctx, dest, add1, add2);
2844        }
2845        ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2846    }
2847
2848    tcg_temp_free(add1);
2849    tcg_temp_free(add2);
2850    tcg_temp_free(dest);
2851
2852    return nullify_end(ctx);
2853}
2854
2855static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2856{
2857    return do_add_imm(ctx, a, false, false);
2858}
2859
2860static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2861{
2862    return do_add_imm(ctx, a, true, false);
2863}
2864
2865static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2866{
2867    return do_add_imm(ctx, a, false, true);
2868}
2869
2870static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2871{
2872    return do_add_imm(ctx, a, true, true);
2873}
2874
2875static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2876{
2877    return do_sub_imm(ctx, a, false);
2878}
2879
2880static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2881{
2882    return do_sub_imm(ctx, a, true);
2883}
2884
2885static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2886{
2887    TCGv_reg tcg_im, tcg_r2;
2888
2889    if (a->cf) {
2890        nullify_over(ctx);
2891    }
2892
2893    tcg_im = load_const(ctx, a->i);
2894    tcg_r2 = load_gpr(ctx, a->r);
2895    do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2896
2897    return nullify_end(ctx);
2898}
2899
2900static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2901{
2902    return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2903                   a->disp, a->sp, a->m, a->size | MO_TE);
2904}
2905
2906static bool trans_st(DisasContext *ctx, arg_ldst *a)
2907{
2908    assert(a->x == 0 && a->scale == 0);
2909    return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2910}
2911
2912static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2913{
2914    MemOp mop = MO_TE | MO_ALIGN | a->size;
2915    TCGv_reg zero, dest, ofs;
2916    TCGv_tl addr;
2917
2918    nullify_over(ctx);
2919
2920    if (a->m) {
2921        /* Base register modification.  Make sure if RT == RB,
2922           we see the result of the load.  */
2923        dest = get_temp(ctx);
2924    } else {
2925        dest = dest_gpr(ctx, a->t);
2926    }
2927
2928    form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2929             a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2930
2931    /*
2932     * For hppa1.1, LDCW is undefined unless aligned mod 16.
2933     * However actual hardware succeeds with aligned mod 4.
2934     * Detect this case and log a GUEST_ERROR.
2935     *
2936     * TODO: HPPA64 relaxes the over-alignment requirement
2937     * with the ,co completer.
2938     */
2939    gen_helper_ldc_check(addr);
2940
2941    zero = tcg_constant_reg(0);
2942    tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2943
2944    if (a->m) {
2945        save_gpr(ctx, a->b, ofs);
2946    }
2947    save_gpr(ctx, a->t, dest);
2948
2949    return nullify_end(ctx);
2950}
2951
2952static bool trans_stby(DisasContext *ctx, arg_stby *a)
2953{
2954    TCGv_reg ofs, val;
2955    TCGv_tl addr;
2956
2957    nullify_over(ctx);
2958
2959    form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2960             ctx->mmu_idx == MMU_PHYS_IDX);
2961    val = load_gpr(ctx, a->r);
2962    if (a->a) {
2963        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2964            gen_helper_stby_e_parallel(cpu_env, addr, val);
2965        } else {
2966            gen_helper_stby_e(cpu_env, addr, val);
2967        }
2968    } else {
2969        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2970            gen_helper_stby_b_parallel(cpu_env, addr, val);
2971        } else {
2972            gen_helper_stby_b(cpu_env, addr, val);
2973        }
2974    }
2975    if (a->m) {
2976        tcg_gen_andi_reg(ofs, ofs, ~3);
2977        save_gpr(ctx, a->b, ofs);
2978    }
2979
2980    return nullify_end(ctx);
2981}
2982
2983static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2984{
2985    int hold_mmu_idx = ctx->mmu_idx;
2986
2987    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2988    ctx->mmu_idx = MMU_PHYS_IDX;
2989    trans_ld(ctx, a);
2990    ctx->mmu_idx = hold_mmu_idx;
2991    return true;
2992}
2993
2994static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2995{
2996    int hold_mmu_idx = ctx->mmu_idx;
2997
2998    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2999    ctx->mmu_idx = MMU_PHYS_IDX;
3000    trans_st(ctx, a);
3001    ctx->mmu_idx = hold_mmu_idx;
3002    return true;
3003}
3004
3005static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3006{
3007    TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3008
3009    tcg_gen_movi_reg(tcg_rt, a->i);
3010    save_gpr(ctx, a->t, tcg_rt);
3011    cond_free(&ctx->null_cond);
3012    return true;
3013}
3014
3015static bool trans_addil(DisasContext *ctx, arg_addil *a)
3016{
3017    TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3018    TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3019
3020    tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3021    save_gpr(ctx, 1, tcg_r1);
3022    cond_free(&ctx->null_cond);
3023    return true;
3024}
3025
3026static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3027{
3028    TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3029
3030    /* Special case rb == 0, for the LDI pseudo-op.
3031       The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3032    if (a->b == 0) {
3033        tcg_gen_movi_reg(tcg_rt, a->i);
3034    } else {
3035        tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3036    }
3037    save_gpr(ctx, a->t, tcg_rt);
3038    cond_free(&ctx->null_cond);
3039    return true;
3040}
3041
3042static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3043                    unsigned c, unsigned f, unsigned n, int disp)
3044{
3045    TCGv_reg dest, in2, sv;
3046    DisasCond cond;
3047
3048    in2 = load_gpr(ctx, r);
3049    dest = get_temp(ctx);
3050
3051    tcg_gen_sub_reg(dest, in1, in2);
3052
3053    sv = NULL;
3054    if (cond_need_sv(c)) {
3055        sv = do_sub_sv(ctx, dest, in1, in2);
3056    }
3057
3058    cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3059    return do_cbranch(ctx, disp, n, &cond);
3060}
3061
3062static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3063{
3064    nullify_over(ctx);
3065    return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3066}
3067
3068static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3069{
3070    nullify_over(ctx);
3071    return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3072}
3073
3074static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3075                    unsigned c, unsigned f, unsigned n, int disp)
3076{
3077    TCGv_reg dest, in2, sv, cb_msb;
3078    DisasCond cond;
3079
3080    in2 = load_gpr(ctx, r);
3081    dest = tcg_temp_new();
3082    sv = NULL;
3083    cb_msb = NULL;
3084
3085    if (cond_need_cb(c)) {
3086        cb_msb = get_temp(ctx);
3087        tcg_gen_movi_reg(cb_msb, 0);
3088        tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3089    } else {
3090        tcg_gen_add_reg(dest, in1, in2);
3091    }
3092    if (cond_need_sv(c)) {
3093        sv = do_add_sv(ctx, dest, in1, in2);
3094    }
3095
3096    cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3097    save_gpr(ctx, r, dest);
3098    tcg_temp_free(dest);
3099    return do_cbranch(ctx, disp, n, &cond);
3100}
3101
3102static bool trans_addb(DisasContext *ctx, arg_addb *a)
3103{
3104    nullify_over(ctx);
3105    return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3106}
3107
3108static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3109{
3110    nullify_over(ctx);
3111    return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3112}
3113
3114static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3115{
3116    TCGv_reg tmp, tcg_r;
3117    DisasCond cond;
3118
3119    nullify_over(ctx);
3120
3121    tmp = tcg_temp_new();
3122    tcg_r = load_gpr(ctx, a->r);
3123    tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3124
3125    cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3126    tcg_temp_free(tmp);
3127    return do_cbranch(ctx, a->disp, a->n, &cond);
3128}
3129
3130static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3131{
3132    TCGv_reg tmp, tcg_r;
3133    DisasCond cond;
3134
3135    nullify_over(ctx);
3136
3137    tmp = tcg_temp_new();
3138    tcg_r = load_gpr(ctx, a->r);
3139    tcg_gen_shli_reg(tmp, tcg_r, a->p);
3140
3141    cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3142    tcg_temp_free(tmp);
3143    return do_cbranch(ctx, a->disp, a->n, &cond);
3144}
3145
3146static bool trans_movb(DisasContext *ctx, arg_movb *a)
3147{
3148    TCGv_reg dest;
3149    DisasCond cond;
3150
3151    nullify_over(ctx);
3152
3153    dest = dest_gpr(ctx, a->r2);
3154    if (a->r1 == 0) {
3155        tcg_gen_movi_reg(dest, 0);
3156    } else {
3157        tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3158    }
3159
3160    cond = do_sed_cond(a->c, dest);
3161    return do_cbranch(ctx, a->disp, a->n, &cond);
3162}
3163
3164static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3165{
3166    TCGv_reg dest;
3167    DisasCond cond;
3168
3169    nullify_over(ctx);
3170
3171    dest = dest_gpr(ctx, a->r);
3172    tcg_gen_movi_reg(dest, a->i);
3173
3174    cond = do_sed_cond(a->c, dest);
3175    return do_cbranch(ctx, a->disp, a->n, &cond);
3176}
3177
3178static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3179{
3180    TCGv_reg dest;
3181
3182    if (a->c) {
3183        nullify_over(ctx);
3184    }
3185
3186    dest = dest_gpr(ctx, a->t);
3187    if (a->r1 == 0) {
3188        tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3189        tcg_gen_shr_reg(dest, dest, cpu_sar);
3190    } else if (a->r1 == a->r2) {
3191        TCGv_i32 t32 = tcg_temp_new_i32();
3192        tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3193        tcg_gen_rotr_i32(t32, t32, cpu_sar);
3194        tcg_gen_extu_i32_reg(dest, t32);
3195        tcg_temp_free_i32(t32);
3196    } else {
3197        TCGv_i64 t = tcg_temp_new_i64();
3198        TCGv_i64 s = tcg_temp_new_i64();
3199
3200        tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3201        tcg_gen_extu_reg_i64(s, cpu_sar);
3202        tcg_gen_shr_i64(t, t, s);
3203        tcg_gen_trunc_i64_reg(dest, t);
3204
3205        tcg_temp_free_i64(t);
3206        tcg_temp_free_i64(s);
3207    }
3208    save_gpr(ctx, a->t, dest);
3209
3210    /* Install the new nullification.  */
3211    cond_free(&ctx->null_cond);
3212    if (a->c) {
3213        ctx->null_cond = do_sed_cond(a->c, dest);
3214    }
3215    return nullify_end(ctx);
3216}
3217
3218static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3219{
3220    unsigned sa = 31 - a->cpos;
3221    TCGv_reg dest, t2;
3222
3223    if (a->c) {
3224        nullify_over(ctx);
3225    }
3226
3227    dest = dest_gpr(ctx, a->t);
3228    t2 = load_gpr(ctx, a->r2);
3229    if (a->r1 == 0) {
3230        tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3231    } else if (TARGET_REGISTER_BITS == 32) {
3232        tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3233    } else if (a->r1 == a->r2) {
3234        TCGv_i32 t32 = tcg_temp_new_i32();
3235        tcg_gen_trunc_reg_i32(t32, t2);
3236        tcg_gen_rotri_i32(t32, t32, sa);
3237        tcg_gen_extu_i32_reg(dest, t32);
3238        tcg_temp_free_i32(t32);
3239    } else {
3240        TCGv_i64 t64 = tcg_temp_new_i64();
3241        tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3242        tcg_gen_shri_i64(t64, t64, sa);
3243        tcg_gen_trunc_i64_reg(dest, t64);
3244        tcg_temp_free_i64(t64);
3245    }
3246    save_gpr(ctx, a->t, dest);
3247
3248    /* Install the new nullification.  */
3249    cond_free(&ctx->null_cond);
3250    if (a->c) {
3251        ctx->null_cond = do_sed_cond(a->c, dest);
3252    }
3253    return nullify_end(ctx);
3254}
3255
3256static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3257{
3258    unsigned len = 32 - a->clen;
3259    TCGv_reg dest, src, tmp;
3260
3261    if (a->c) {
3262        nullify_over(ctx);
3263    }
3264
3265    dest = dest_gpr(ctx, a->t);
3266    src = load_gpr(ctx, a->r);
3267    tmp = tcg_temp_new();
3268
3269    /* Recall that SAR is using big-endian bit numbering.  */
3270    tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3271    if (a->se) {
3272        tcg_gen_sar_reg(dest, src, tmp);
3273        tcg_gen_sextract_reg(dest, dest, 0, len);
3274    } else {
3275        tcg_gen_shr_reg(dest, src, tmp);
3276        tcg_gen_extract_reg(dest, dest, 0, len);
3277    }
3278    tcg_temp_free(tmp);
3279    save_gpr(ctx, a->t, dest);
3280
3281    /* Install the new nullification.  */
3282    cond_free(&ctx->null_cond);
3283    if (a->c) {
3284        ctx->null_cond = do_sed_cond(a->c, dest);
3285    }
3286    return nullify_end(ctx);
3287}
3288
3289static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3290{
3291    unsigned len = 32 - a->clen;
3292    unsigned cpos = 31 - a->pos;
3293    TCGv_reg dest, src;
3294
3295    if (a->c) {
3296        nullify_over(ctx);
3297    }
3298
3299    dest = dest_gpr(ctx, a->t);
3300    src = load_gpr(ctx, a->r);
3301    if (a->se) {
3302        tcg_gen_sextract_reg(dest, src, cpos, len);
3303    } else {
3304        tcg_gen_extract_reg(dest, src, cpos, len);
3305    }
3306    save_gpr(ctx, a->t, dest);
3307
3308    /* Install the new nullification.  */
3309    cond_free(&ctx->null_cond);
3310    if (a->c) {
3311        ctx->null_cond = do_sed_cond(a->c, dest);
3312    }
3313    return nullify_end(ctx);
3314}
3315
3316static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3317{
3318    unsigned len = 32 - a->clen;
3319    target_sreg mask0, mask1;
3320    TCGv_reg dest;
3321
3322    if (a->c) {
3323        nullify_over(ctx);
3324    }
3325    if (a->cpos + len > 32) {
3326        len = 32 - a->cpos;
3327    }
3328
3329    dest = dest_gpr(ctx, a->t);
3330    mask0 = deposit64(0, a->cpos, len, a->i);
3331    mask1 = deposit64(-1, a->cpos, len, a->i);
3332
3333    if (a->nz) {
3334        TCGv_reg src = load_gpr(ctx, a->t);
3335        if (mask1 != -1) {
3336            tcg_gen_andi_reg(dest, src, mask1);
3337            src = dest;
3338        }
3339        tcg_gen_ori_reg(dest, src, mask0);
3340    } else {
3341        tcg_gen_movi_reg(dest, mask0);
3342    }
3343    save_gpr(ctx, a->t, dest);
3344
3345    /* Install the new nullification.  */
3346    cond_free(&ctx->null_cond);
3347    if (a->c) {
3348        ctx->null_cond = do_sed_cond(a->c, dest);
3349    }
3350    return nullify_end(ctx);
3351}
3352
3353static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3354{
3355    unsigned rs = a->nz ? a->t : 0;
3356    unsigned len = 32 - a->clen;
3357    TCGv_reg dest, val;
3358
3359    if (a->c) {
3360        nullify_over(ctx);
3361    }
3362    if (a->cpos + len > 32) {
3363        len = 32 - a->cpos;
3364    }
3365
3366    dest = dest_gpr(ctx, a->t);
3367    val = load_gpr(ctx, a->r);
3368    if (rs == 0) {
3369        tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3370    } else {
3371        tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3372    }
3373    save_gpr(ctx, a->t, dest);
3374
3375    /* Install the new nullification.  */
3376    cond_free(&ctx->null_cond);
3377    if (a->c) {
3378        ctx->null_cond = do_sed_cond(a->c, dest);
3379    }
3380    return nullify_end(ctx);
3381}
3382
3383static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3384                        unsigned nz, unsigned clen, TCGv_reg val)
3385{
3386    unsigned rs = nz ? rt : 0;
3387    unsigned len = 32 - clen;
3388    TCGv_reg mask, tmp, shift, dest;
3389    unsigned msb = 1U << (len - 1);
3390
3391    dest = dest_gpr(ctx, rt);
3392    shift = tcg_temp_new();
3393    tmp = tcg_temp_new();
3394
3395    /* Convert big-endian bit numbering in SAR to left-shift.  */
3396    tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3397
3398    mask = tcg_const_reg(msb + (msb - 1));
3399    tcg_gen_and_reg(tmp, val, mask);
3400    if (rs) {
3401        tcg_gen_shl_reg(mask, mask, shift);
3402        tcg_gen_shl_reg(tmp, tmp, shift);
3403        tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3404        tcg_gen_or_reg(dest, dest, tmp);
3405    } else {
3406        tcg_gen_shl_reg(dest, tmp, shift);
3407    }
3408    tcg_temp_free(shift);
3409    tcg_temp_free(mask);
3410    tcg_temp_free(tmp);
3411    save_gpr(ctx, rt, dest);
3412
3413    /* Install the new nullification.  */
3414    cond_free(&ctx->null_cond);
3415    if (c) {
3416        ctx->null_cond = do_sed_cond(c, dest);
3417    }
3418    return nullify_end(ctx);
3419}
3420
3421static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3422{
3423    if (a->c) {
3424        nullify_over(ctx);
3425    }
3426    return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3427}
3428
3429static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3430{
3431    if (a->c) {
3432        nullify_over(ctx);
3433    }
3434    return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3435}
3436
3437static bool trans_be(DisasContext *ctx, arg_be *a)
3438{
3439    TCGv_reg tmp;
3440
3441#ifdef CONFIG_USER_ONLY
3442    /* ??? It seems like there should be a good way of using
3443       "be disp(sr2, r0)", the canonical gateway entry mechanism
3444       to our advantage.  But that appears to be inconvenient to
3445       manage along side branch delay slots.  Therefore we handle
3446       entry into the gateway page via absolute address.  */
3447    /* Since we don't implement spaces, just branch.  Do notice the special
3448       case of "be disp(*,r0)" using a direct branch to disp, so that we can
3449       goto_tb to the TB containing the syscall.  */
3450    if (a->b == 0) {
3451        return do_dbranch(ctx, a->disp, a->l, a->n);
3452    }
3453#else
3454    nullify_over(ctx);
3455#endif
3456
3457    tmp = get_temp(ctx);
3458    tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3459    tmp = do_ibranch_priv(ctx, tmp);
3460
3461#ifdef CONFIG_USER_ONLY
3462    return do_ibranch(ctx, tmp, a->l, a->n);
3463#else
3464    TCGv_i64 new_spc = tcg_temp_new_i64();
3465
3466    load_spr(ctx, new_spc, a->sp);
3467    if (a->l) {
3468        copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3469        tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3470    }
3471    if (a->n && use_nullify_skip(ctx)) {
3472        tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3473        tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3474        tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3475        tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3476    } else {
3477        copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3478        if (ctx->iaoq_b == -1) {
3479            tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3480        }
3481        tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3482        tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3483        nullify_set(ctx, a->n);
3484    }
3485    tcg_temp_free_i64(new_spc);
3486    tcg_gen_lookup_and_goto_ptr();
3487    ctx->base.is_jmp = DISAS_NORETURN;
3488    return nullify_end(ctx);
3489#endif
3490}
3491
3492static bool trans_bl(DisasContext *ctx, arg_bl *a)
3493{
3494    return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3495}
3496
3497static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3498{
3499    target_ureg dest = iaoq_dest(ctx, a->disp);
3500
3501    nullify_over(ctx);
3502
3503    /* Make sure the caller hasn't done something weird with the queue.
3504     * ??? This is not quite the same as the PSW[B] bit, which would be
3505     * expensive to track.  Real hardware will trap for
3506     *    b  gateway
3507     *    b  gateway+4  (in delay slot of first branch)
3508     * However, checking for a non-sequential instruction queue *will*
3509     * diagnose the security hole
3510     *    b  gateway
3511     *    b  evil
3512     * in which instructions at evil would run with increased privs.
3513     */
3514    if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3515        return gen_illegal(ctx);
3516    }
3517
3518#ifndef CONFIG_USER_ONLY
3519    if (ctx->tb_flags & PSW_C) {
3520        CPUHPPAState *env = ctx->cs->env_ptr;
3521        int type = hppa_artype_for_page(env, ctx->base.pc_next);
3522        /* If we could not find a TLB entry, then we need to generate an
3523           ITLB miss exception so the kernel will provide it.
3524           The resulting TLB fill operation will invalidate this TB and
3525           we will re-translate, at which point we *will* be able to find
3526           the TLB entry and determine if this is in fact a gateway page.  */
3527        if (type < 0) {
3528            gen_excp(ctx, EXCP_ITLB_MISS);
3529            return true;
3530        }
3531        /* No change for non-gateway pages or for priv decrease.  */
3532        if (type >= 4 && type - 4 < ctx->privilege) {
3533            dest = deposit32(dest, 0, 2, type - 4);
3534        }
3535    } else {
3536        dest &= -4;  /* priv = 0 */
3537    }
3538#endif
3539
3540    if (a->l) {
3541        TCGv_reg tmp = dest_gpr(ctx, a->l);
3542        if (ctx->privilege < 3) {
3543            tcg_gen_andi_reg(tmp, tmp, -4);
3544        }
3545        tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3546        save_gpr(ctx, a->l, tmp);
3547    }
3548
3549    return do_dbranch(ctx, dest, 0, a->n);
3550}
3551
3552static bool trans_blr(DisasContext *ctx, arg_blr *a)
3553{
3554    if (a->x) {
3555        TCGv_reg tmp = get_temp(ctx);
3556        tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3557        tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3558        /* The computation here never changes privilege level.  */
3559        return do_ibranch(ctx, tmp, a->l, a->n);
3560    } else {
3561        /* BLR R0,RX is a good way to load PC+8 into RX.  */
3562        return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3563    }
3564}
3565
3566static bool trans_bv(DisasContext *ctx, arg_bv *a)
3567{
3568    TCGv_reg dest;
3569
3570    if (a->x == 0) {
3571        dest = load_gpr(ctx, a->b);
3572    } else {
3573        dest = get_temp(ctx);
3574        tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3575        tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3576    }
3577    dest = do_ibranch_priv(ctx, dest);
3578    return do_ibranch(ctx, dest, 0, a->n);
3579}
3580
3581static bool trans_bve(DisasContext *ctx, arg_bve *a)
3582{
3583    TCGv_reg dest;
3584
3585#ifdef CONFIG_USER_ONLY
3586    dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3587    return do_ibranch(ctx, dest, a->l, a->n);
3588#else
3589    nullify_over(ctx);
3590    dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3591
3592    copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3593    if (ctx->iaoq_b == -1) {
3594        tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3595    }
3596    copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3597    tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3598    if (a->l) {
3599        copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3600    }
3601    nullify_set(ctx, a->n);
3602    tcg_gen_lookup_and_goto_ptr();
3603    ctx->base.is_jmp = DISAS_NORETURN;
3604    return nullify_end(ctx);
3605#endif
3606}
3607
3608/*
3609 * Float class 0
3610 */
3611
3612static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3613{
3614    tcg_gen_mov_i32(dst, src);
3615}
3616
3617static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3618{
3619    return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3620}
3621
3622static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3623{
3624    tcg_gen_mov_i64(dst, src);
3625}
3626
3627static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3628{
3629    return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3630}
3631
3632static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3633{
3634    tcg_gen_andi_i32(dst, src, INT32_MAX);
3635}
3636
3637static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3638{
3639    return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3640}
3641
3642static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3643{
3644    tcg_gen_andi_i64(dst, src, INT64_MAX);
3645}
3646
3647static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3648{
3649    return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3650}
3651
3652static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3653{
3654    return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3655}
3656
3657static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3658{
3659    return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3660}
3661
3662static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3663{
3664    return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3665}
3666
3667static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3668{
3669    return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3670}
3671
3672static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3673{
3674    tcg_gen_xori_i32(dst, src, INT32_MIN);
3675}
3676
3677static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3678{
3679    return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3680}
3681
3682static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3683{
3684    tcg_gen_xori_i64(dst, src, INT64_MIN);
3685}
3686
3687static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3688{
3689    return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3690}
3691
3692static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3693{
3694    tcg_gen_ori_i32(dst, src, INT32_MIN);
3695}
3696
3697static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3698{
3699    return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3700}
3701
3702static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3703{
3704    tcg_gen_ori_i64(dst, src, INT64_MIN);
3705}
3706
3707static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3708{
3709    return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3710}
3711
3712/*
3713 * Float class 1
3714 */
3715
3716static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3717{
3718    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3719}
3720
3721static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3722{
3723    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3724}
3725
3726static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3727{
3728    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3729}
3730
3731static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3732{
3733    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3734}
3735
3736static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3737{
3738    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3739}
3740
3741static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3742{
3743    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3744}
3745
3746static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3747{
3748    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3749}
3750
3751static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3752{
3753    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3754}
3755
3756static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3757{
3758    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3759}
3760
3761static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3762{
3763    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3764}
3765
3766static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3767{
3768    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3769}
3770
3771static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3772{
3773    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3774}
3775
3776static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3777{
3778    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3779}
3780
3781static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3782{
3783    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3784}
3785
3786static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3787{
3788    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3789}
3790
3791static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3792{
3793    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3794}
3795
3796static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3797{
3798    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3799}
3800
3801static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3802{
3803    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3804}
3805
3806static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3807{
3808    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3809}
3810
3811static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3812{
3813    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3814}
3815
3816static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3817{
3818    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3819}
3820
3821static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3822{
3823    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3824}
3825
3826static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3827{
3828    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3829}
3830
3831static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3832{
3833    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3834}
3835
3836static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3837{
3838    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3839}
3840
3841static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3842{
3843    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3844}
3845
3846/*
3847 * Float class 2
3848 */
3849
3850static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3851{
3852    TCGv_i32 ta, tb, tc, ty;
3853
3854    nullify_over(ctx);
3855
3856    ta = load_frw0_i32(a->r1);
3857    tb = load_frw0_i32(a->r2);
3858    ty = tcg_constant_i32(a->y);
3859    tc = tcg_constant_i32(a->c);
3860
3861    gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3862
3863    tcg_temp_free_i32(ta);
3864    tcg_temp_free_i32(tb);
3865
3866    return nullify_end(ctx);
3867}
3868
3869static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3870{
3871    TCGv_i64 ta, tb;
3872    TCGv_i32 tc, ty;
3873
3874    nullify_over(ctx);
3875
3876    ta = load_frd0(a->r1);
3877    tb = load_frd0(a->r2);
3878    ty = tcg_constant_i32(a->y);
3879    tc = tcg_constant_i32(a->c);
3880
3881    gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3882
3883    tcg_temp_free_i64(ta);
3884    tcg_temp_free_i64(tb);
3885
3886    return nullify_end(ctx);
3887}
3888
3889static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3890{
3891    TCGv_reg t;
3892
3893    nullify_over(ctx);
3894
3895    t = get_temp(ctx);
3896    tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3897
3898    if (a->y == 1) {
3899        int mask;
3900        bool inv = false;
3901
3902        switch (a->c) {
3903        case 0: /* simple */
3904            tcg_gen_andi_reg(t, t, 0x4000000);
3905            ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3906            goto done;
3907        case 2: /* rej */
3908            inv = true;
3909            /* fallthru */
3910        case 1: /* acc */
3911            mask = 0x43ff800;
3912            break;
3913        case 6: /* rej8 */
3914            inv = true;
3915            /* fallthru */
3916        case 5: /* acc8 */
3917            mask = 0x43f8000;
3918            break;
3919        case 9: /* acc6 */
3920            mask = 0x43e0000;
3921            break;
3922        case 13: /* acc4 */
3923            mask = 0x4380000;
3924            break;
3925        case 17: /* acc2 */
3926            mask = 0x4200000;
3927            break;
3928        default:
3929            gen_illegal(ctx);
3930            return true;
3931        }
3932        if (inv) {
3933            TCGv_reg c = load_const(ctx, mask);
3934            tcg_gen_or_reg(t, t, c);
3935            ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3936        } else {
3937            tcg_gen_andi_reg(t, t, mask);
3938            ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3939        }
3940    } else {
3941        unsigned cbit = (a->y ^ 1) - 1;
3942
3943        tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3944        ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3945        tcg_temp_free(t);
3946    }
3947
3948 done:
3949    return nullify_end(ctx);
3950}
3951
3952/*
3953 * Float class 2
3954 */
3955
3956static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3957{
3958    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3959}
3960
3961static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3962{
3963    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3964}
3965
3966static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3967{
3968    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3969}
3970
3971static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3972{
3973    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3974}
3975
3976static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3977{
3978    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3979}
3980
3981static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3982{
3983    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3984}
3985
3986static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3987{
3988    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3989}
3990
3991static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3992{
3993    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3994}
3995
3996static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3997{
3998    TCGv_i64 x, y;
3999
4000    nullify_over(ctx);
4001
4002    x = load_frw0_i64(a->r1);
4003    y = load_frw0_i64(a->r2);
4004    tcg_gen_mul_i64(x, x, y);
4005    save_frd(a->t, x);
4006    tcg_temp_free_i64(x);
4007    tcg_temp_free_i64(y);
4008
4009    return nullify_end(ctx);
4010}
4011
4012/* Convert the fmpyadd single-precision register encodings to standard.  */
4013static inline int fmpyadd_s_reg(unsigned r)
4014{
4015    return (r & 16) * 2 + 16 + (r & 15);
4016}
4017
4018static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4019{
4020    int tm = fmpyadd_s_reg(a->tm);
4021    int ra = fmpyadd_s_reg(a->ra);
4022    int ta = fmpyadd_s_reg(a->ta);
4023    int rm2 = fmpyadd_s_reg(a->rm2);
4024    int rm1 = fmpyadd_s_reg(a->rm1);
4025
4026    nullify_over(ctx);
4027
4028    do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4029    do_fop_weww(ctx, ta, ta, ra,
4030                is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4031
4032    return nullify_end(ctx);
4033}
4034
4035static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4036{
4037    return do_fmpyadd_s(ctx, a, false);
4038}
4039
4040static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4041{
4042    return do_fmpyadd_s(ctx, a, true);
4043}
4044
4045static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4046{
4047    nullify_over(ctx);
4048
4049    do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4050    do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4051                is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4052
4053    return nullify_end(ctx);
4054}
4055
4056static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4057{
4058    return do_fmpyadd_d(ctx, a, false);
4059}
4060
4061static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4062{
4063    return do_fmpyadd_d(ctx, a, true);
4064}
4065
4066static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4067{
4068    TCGv_i32 x, y, z;
4069
4070    nullify_over(ctx);
4071    x = load_frw0_i32(a->rm1);
4072    y = load_frw0_i32(a->rm2);
4073    z = load_frw0_i32(a->ra3);
4074
4075    if (a->neg) {
4076        gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4077    } else {
4078        gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4079    }
4080
4081    tcg_temp_free_i32(y);
4082    tcg_temp_free_i32(z);
4083    save_frw_i32(a->t, x);
4084    tcg_temp_free_i32(x);
4085    return nullify_end(ctx);
4086}
4087
4088static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4089{
4090    TCGv_i64 x, y, z;
4091
4092    nullify_over(ctx);
4093    x = load_frd0(a->rm1);
4094    y = load_frd0(a->rm2);
4095    z = load_frd0(a->ra3);
4096
4097    if (a->neg) {
4098        gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4099    } else {
4100        gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4101    }
4102
4103    tcg_temp_free_i64(y);
4104    tcg_temp_free_i64(z);
4105    save_frd(a->t, x);
4106    tcg_temp_free_i64(x);
4107    return nullify_end(ctx);
4108}
4109
4110static bool trans_diag(DisasContext *ctx, arg_diag *a)
4111{
4112    qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4113    cond_free(&ctx->null_cond);
4114    return true;
4115}
4116
4117static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4118{
4119    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4120    int bound;
4121
4122    ctx->cs = cs;
4123    ctx->tb_flags = ctx->base.tb->flags;
4124
4125#ifdef CONFIG_USER_ONLY
4126    ctx->privilege = MMU_USER_IDX;
4127    ctx->mmu_idx = MMU_USER_IDX;
4128    ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4129    ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4130    ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4131#else
4132    ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4133    ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4134
4135    /* Recover the IAOQ values from the GVA + PRIV.  */
4136    uint64_t cs_base = ctx->base.tb->cs_base;
4137    uint64_t iasq_f = cs_base & ~0xffffffffull;
4138    int32_t diff = cs_base;
4139
4140    ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4141    ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4142#endif
4143    ctx->iaoq_n = -1;
4144    ctx->iaoq_n_var = NULL;
4145
4146    /* Bound the number of instructions by those left on the page.  */
4147    bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4148    ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4149
4150    ctx->ntempr = 0;
4151    ctx->ntempl = 0;
4152    memset(ctx->tempr, 0, sizeof(ctx->tempr));
4153    memset(ctx->templ, 0, sizeof(ctx->templ));
4154}
4155
4156static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4157{
4158    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4159
4160    /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4161    ctx->null_cond = cond_make_f();
4162    ctx->psw_n_nonzero = false;
4163    if (ctx->tb_flags & PSW_N) {
4164        ctx->null_cond.c = TCG_COND_ALWAYS;
4165        ctx->psw_n_nonzero = true;
4166    }
4167    ctx->null_lab = NULL;
4168}
4169
4170static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4171{
4172    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4173
4174    tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4175}
4176
4177static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4178{
4179    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4180    CPUHPPAState *env = cs->env_ptr;
4181    DisasJumpType ret;
4182    int i, n;
4183
4184    /* Execute one insn.  */
4185#ifdef CONFIG_USER_ONLY
4186    if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4187        do_page_zero(ctx);
4188        ret = ctx->base.is_jmp;
4189        assert(ret != DISAS_NEXT);
4190    } else
4191#endif
4192    {
4193        /* Always fetch the insn, even if nullified, so that we check
4194           the page permissions for execute.  */
4195        uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4196
4197        /* Set up the IA queue for the next insn.
4198           This will be overwritten by a branch.  */
4199        if (ctx->iaoq_b == -1) {
4200            ctx->iaoq_n = -1;
4201            ctx->iaoq_n_var = get_temp(ctx);
4202            tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4203        } else {
4204            ctx->iaoq_n = ctx->iaoq_b + 4;
4205            ctx->iaoq_n_var = NULL;
4206        }
4207
4208        if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4209            ctx->null_cond.c = TCG_COND_NEVER;
4210            ret = DISAS_NEXT;
4211        } else {
4212            ctx->insn = insn;
4213            if (!decode(ctx, insn)) {
4214                gen_illegal(ctx);
4215            }
4216            ret = ctx->base.is_jmp;
4217            assert(ctx->null_lab == NULL);
4218        }
4219    }
4220
4221    /* Free any temporaries allocated.  */
4222    for (i = 0, n = ctx->ntempr; i < n; ++i) {
4223        tcg_temp_free(ctx->tempr[i]);
4224        ctx->tempr[i] = NULL;
4225    }
4226    for (i = 0, n = ctx->ntempl; i < n; ++i) {
4227        tcg_temp_free_tl(ctx->templ[i]);
4228        ctx->templ[i] = NULL;
4229    }
4230    ctx->ntempr = 0;
4231    ctx->ntempl = 0;
4232
4233    /* Advance the insn queue.  Note that this check also detects
4234       a priority change within the instruction queue.  */
4235    if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4236        if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4237            && use_goto_tb(ctx, ctx->iaoq_b)
4238            && (ctx->null_cond.c == TCG_COND_NEVER
4239                || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4240            nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4241            gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4242            ctx->base.is_jmp = ret = DISAS_NORETURN;
4243        } else {
4244            ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4245        }
4246    }
4247    ctx->iaoq_f = ctx->iaoq_b;
4248    ctx->iaoq_b = ctx->iaoq_n;
4249    ctx->base.pc_next += 4;
4250
4251    switch (ret) {
4252    case DISAS_NORETURN:
4253    case DISAS_IAQ_N_UPDATED:
4254        break;
4255
4256    case DISAS_NEXT:
4257    case DISAS_IAQ_N_STALE:
4258    case DISAS_IAQ_N_STALE_EXIT:
4259        if (ctx->iaoq_f == -1) {
4260            tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4261            copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4262#ifndef CONFIG_USER_ONLY
4263            tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4264#endif
4265            nullify_save(ctx);
4266            ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4267                                ? DISAS_EXIT
4268                                : DISAS_IAQ_N_UPDATED);
4269        } else if (ctx->iaoq_b == -1) {
4270            tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4271        }
4272        break;
4273
4274    default:
4275        g_assert_not_reached();
4276    }
4277}
4278
4279static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4280{
4281    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4282    DisasJumpType is_jmp = ctx->base.is_jmp;
4283
4284    switch (is_jmp) {
4285    case DISAS_NORETURN:
4286        break;
4287    case DISAS_TOO_MANY:
4288    case DISAS_IAQ_N_STALE:
4289    case DISAS_IAQ_N_STALE_EXIT:
4290        copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4291        copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4292        nullify_save(ctx);
4293        /* FALLTHRU */
4294    case DISAS_IAQ_N_UPDATED:
4295        if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4296            tcg_gen_lookup_and_goto_ptr();
4297            break;
4298        }
4299        /* FALLTHRU */
4300    case DISAS_EXIT:
4301        tcg_gen_exit_tb(NULL, 0);
4302        break;
4303    default:
4304        g_assert_not_reached();
4305    }
4306}
4307
4308static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4309{
4310    target_ulong pc = dcbase->pc_first;
4311
4312#ifdef CONFIG_USER_ONLY
4313    switch (pc) {
4314    case 0x00:
4315        qemu_log("IN:\n0x00000000:  (null)\n");
4316        return;
4317    case 0xb0:
4318        qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4319        return;
4320    case 0xe0:
4321        qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4322        return;
4323    case 0x100:
4324        qemu_log("IN:\n0x00000100:  syscall\n");
4325        return;
4326    }
4327#endif
4328
4329    qemu_log("IN: %s\n", lookup_symbol(pc));
4330    log_target_disas(cs, pc, dcbase->tb->size);
4331}
4332
4333static const TranslatorOps hppa_tr_ops = {
4334    .init_disas_context = hppa_tr_init_disas_context,
4335    .tb_start           = hppa_tr_tb_start,
4336    .insn_start         = hppa_tr_insn_start,
4337    .translate_insn     = hppa_tr_translate_insn,
4338    .tb_stop            = hppa_tr_tb_stop,
4339    .disas_log          = hppa_tr_disas_log,
4340};
4341
4342void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4343{
4344    DisasContext ctx;
4345    translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
4346}
4347
4348void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4349                          target_ulong *data)
4350{
4351    env->iaoq_f = data[0];
4352    if (data[1] != (target_ureg)-1) {
4353        env->iaoq_b = data[1];
4354    }
4355    /* Since we were executing the instruction at IAOQ_F, and took some
4356       sort of action that provoked the cpu_restore_state, we can infer
4357       that the instruction was not nullified.  */
4358    env->psw_n = 0;
4359}
4360