qemu/target/hppa/translate.c
<<
>>
Prefs
   1/*
   2 * HPPA emulation cpu translation for qemu.
   3 *
   4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "disas/disas.h"
  23#include "qemu/host-utils.h"
  24#include "exec/exec-all.h"
  25#include "tcg-op.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/helper-proto.h"
  28#include "exec/helper-gen.h"
  29#include "exec/translator.h"
  30#include "trace-tcg.h"
  31#include "exec/log.h"
  32
  33/* Since we have a distinction between register size and address size,
  34   we need to redefine all of these.  */
  35
  36#undef TCGv
  37#undef tcg_temp_new
  38#undef tcg_global_reg_new
  39#undef tcg_global_mem_new
  40#undef tcg_temp_local_new
  41#undef tcg_temp_free
  42
  43#if TARGET_LONG_BITS == 64
  44#define TCGv_tl              TCGv_i64
  45#define tcg_temp_new_tl      tcg_temp_new_i64
  46#define tcg_temp_free_tl     tcg_temp_free_i64
  47#if TARGET_REGISTER_BITS == 64
  48#define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
  49#else
  50#define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
  51#endif
  52#else
  53#define TCGv_tl              TCGv_i32
  54#define tcg_temp_new_tl      tcg_temp_new_i32
  55#define tcg_temp_free_tl     tcg_temp_free_i32
  56#define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
  57#endif
  58
  59#if TARGET_REGISTER_BITS == 64
  60#define TCGv_reg             TCGv_i64
  61
  62#define tcg_temp_new         tcg_temp_new_i64
  63#define tcg_global_reg_new   tcg_global_reg_new_i64
  64#define tcg_global_mem_new   tcg_global_mem_new_i64
  65#define tcg_temp_local_new   tcg_temp_local_new_i64
  66#define tcg_temp_free        tcg_temp_free_i64
  67
  68#define tcg_gen_movi_reg     tcg_gen_movi_i64
  69#define tcg_gen_mov_reg      tcg_gen_mov_i64
  70#define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
  71#define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
  72#define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
  73#define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
  74#define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
  75#define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
  76#define tcg_gen_ld_reg       tcg_gen_ld_i64
  77#define tcg_gen_st8_reg      tcg_gen_st8_i64
  78#define tcg_gen_st16_reg     tcg_gen_st16_i64
  79#define tcg_gen_st32_reg     tcg_gen_st32_i64
  80#define tcg_gen_st_reg       tcg_gen_st_i64
  81#define tcg_gen_add_reg      tcg_gen_add_i64
  82#define tcg_gen_addi_reg     tcg_gen_addi_i64
  83#define tcg_gen_sub_reg      tcg_gen_sub_i64
  84#define tcg_gen_neg_reg      tcg_gen_neg_i64
  85#define tcg_gen_subfi_reg    tcg_gen_subfi_i64
  86#define tcg_gen_subi_reg     tcg_gen_subi_i64
  87#define tcg_gen_and_reg      tcg_gen_and_i64
  88#define tcg_gen_andi_reg     tcg_gen_andi_i64
  89#define tcg_gen_or_reg       tcg_gen_or_i64
  90#define tcg_gen_ori_reg      tcg_gen_ori_i64
  91#define tcg_gen_xor_reg      tcg_gen_xor_i64
  92#define tcg_gen_xori_reg     tcg_gen_xori_i64
  93#define tcg_gen_not_reg      tcg_gen_not_i64
  94#define tcg_gen_shl_reg      tcg_gen_shl_i64
  95#define tcg_gen_shli_reg     tcg_gen_shli_i64
  96#define tcg_gen_shr_reg      tcg_gen_shr_i64
  97#define tcg_gen_shri_reg     tcg_gen_shri_i64
  98#define tcg_gen_sar_reg      tcg_gen_sar_i64
  99#define tcg_gen_sari_reg     tcg_gen_sari_i64
 100#define tcg_gen_brcond_reg   tcg_gen_brcond_i64
 101#define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
 102#define tcg_gen_setcond_reg  tcg_gen_setcond_i64
 103#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
 104#define tcg_gen_mul_reg      tcg_gen_mul_i64
 105#define tcg_gen_muli_reg     tcg_gen_muli_i64
 106#define tcg_gen_div_reg      tcg_gen_div_i64
 107#define tcg_gen_rem_reg      tcg_gen_rem_i64
 108#define tcg_gen_divu_reg     tcg_gen_divu_i64
 109#define tcg_gen_remu_reg     tcg_gen_remu_i64
 110#define tcg_gen_discard_reg  tcg_gen_discard_i64
 111#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
 112#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
 113#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
 114#define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
 115#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
 116#define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
 117#define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
 118#define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
 119#define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
 120#define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
 121#define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
 122#define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
 123#define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
 124#define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
 125#define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
 126#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
 127#define tcg_gen_andc_reg     tcg_gen_andc_i64
 128#define tcg_gen_eqv_reg      tcg_gen_eqv_i64
 129#define tcg_gen_nand_reg     tcg_gen_nand_i64
 130#define tcg_gen_nor_reg      tcg_gen_nor_i64
 131#define tcg_gen_orc_reg      tcg_gen_orc_i64
 132#define tcg_gen_clz_reg      tcg_gen_clz_i64
 133#define tcg_gen_ctz_reg      tcg_gen_ctz_i64
 134#define tcg_gen_clzi_reg     tcg_gen_clzi_i64
 135#define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
 136#define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
 137#define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
 138#define tcg_gen_rotl_reg     tcg_gen_rotl_i64
 139#define tcg_gen_rotli_reg    tcg_gen_rotli_i64
 140#define tcg_gen_rotr_reg     tcg_gen_rotr_i64
 141#define tcg_gen_rotri_reg    tcg_gen_rotri_i64
 142#define tcg_gen_deposit_reg  tcg_gen_deposit_i64
 143#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
 144#define tcg_gen_extract_reg  tcg_gen_extract_i64
 145#define tcg_gen_sextract_reg tcg_gen_sextract_i64
 146#define tcg_const_reg        tcg_const_i64
 147#define tcg_const_local_reg  tcg_const_local_i64
 148#define tcg_gen_movcond_reg  tcg_gen_movcond_i64
 149#define tcg_gen_add2_reg     tcg_gen_add2_i64
 150#define tcg_gen_sub2_reg     tcg_gen_sub2_i64
 151#define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
 152#define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
 153#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
 154#define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
 155#else
 156#define TCGv_reg             TCGv_i32
 157#define tcg_temp_new         tcg_temp_new_i32
 158#define tcg_global_reg_new   tcg_global_reg_new_i32
 159#define tcg_global_mem_new   tcg_global_mem_new_i32
 160#define tcg_temp_local_new   tcg_temp_local_new_i32
 161#define tcg_temp_free        tcg_temp_free_i32
 162
 163#define tcg_gen_movi_reg     tcg_gen_movi_i32
 164#define tcg_gen_mov_reg      tcg_gen_mov_i32
 165#define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
 166#define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
 167#define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
 168#define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
 169#define tcg_gen_ld32u_reg    tcg_gen_ld_i32
 170#define tcg_gen_ld32s_reg    tcg_gen_ld_i32
 171#define tcg_gen_ld_reg       tcg_gen_ld_i32
 172#define tcg_gen_st8_reg      tcg_gen_st8_i32
 173#define tcg_gen_st16_reg     tcg_gen_st16_i32
 174#define tcg_gen_st32_reg     tcg_gen_st32_i32
 175#define tcg_gen_st_reg       tcg_gen_st_i32
 176#define tcg_gen_add_reg      tcg_gen_add_i32
 177#define tcg_gen_addi_reg     tcg_gen_addi_i32
 178#define tcg_gen_sub_reg      tcg_gen_sub_i32
 179#define tcg_gen_neg_reg      tcg_gen_neg_i32
 180#define tcg_gen_subfi_reg    tcg_gen_subfi_i32
 181#define tcg_gen_subi_reg     tcg_gen_subi_i32
 182#define tcg_gen_and_reg      tcg_gen_and_i32
 183#define tcg_gen_andi_reg     tcg_gen_andi_i32
 184#define tcg_gen_or_reg       tcg_gen_or_i32
 185#define tcg_gen_ori_reg      tcg_gen_ori_i32
 186#define tcg_gen_xor_reg      tcg_gen_xor_i32
 187#define tcg_gen_xori_reg     tcg_gen_xori_i32
 188#define tcg_gen_not_reg      tcg_gen_not_i32
 189#define tcg_gen_shl_reg      tcg_gen_shl_i32
 190#define tcg_gen_shli_reg     tcg_gen_shli_i32
 191#define tcg_gen_shr_reg      tcg_gen_shr_i32
 192#define tcg_gen_shri_reg     tcg_gen_shri_i32
 193#define tcg_gen_sar_reg      tcg_gen_sar_i32
 194#define tcg_gen_sari_reg     tcg_gen_sari_i32
 195#define tcg_gen_brcond_reg   tcg_gen_brcond_i32
 196#define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
 197#define tcg_gen_setcond_reg  tcg_gen_setcond_i32
 198#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
 199#define tcg_gen_mul_reg      tcg_gen_mul_i32
 200#define tcg_gen_muli_reg     tcg_gen_muli_i32
 201#define tcg_gen_div_reg      tcg_gen_div_i32
 202#define tcg_gen_rem_reg      tcg_gen_rem_i32
 203#define tcg_gen_divu_reg     tcg_gen_divu_i32
 204#define tcg_gen_remu_reg     tcg_gen_remu_i32
 205#define tcg_gen_discard_reg  tcg_gen_discard_i32
 206#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
 207#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
 208#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
 209#define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
 210#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
 211#define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
 212#define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
 213#define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
 214#define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
 215#define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
 216#define tcg_gen_ext32u_reg   tcg_gen_mov_i32
 217#define tcg_gen_ext32s_reg   tcg_gen_mov_i32
 218#define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
 219#define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
 220#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
 221#define tcg_gen_andc_reg     tcg_gen_andc_i32
 222#define tcg_gen_eqv_reg      tcg_gen_eqv_i32
 223#define tcg_gen_nand_reg     tcg_gen_nand_i32
 224#define tcg_gen_nor_reg      tcg_gen_nor_i32
 225#define tcg_gen_orc_reg      tcg_gen_orc_i32
 226#define tcg_gen_clz_reg      tcg_gen_clz_i32
 227#define tcg_gen_ctz_reg      tcg_gen_ctz_i32
 228#define tcg_gen_clzi_reg     tcg_gen_clzi_i32
 229#define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
 230#define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
 231#define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
 232#define tcg_gen_rotl_reg     tcg_gen_rotl_i32
 233#define tcg_gen_rotli_reg    tcg_gen_rotli_i32
 234#define tcg_gen_rotr_reg     tcg_gen_rotr_i32
 235#define tcg_gen_rotri_reg    tcg_gen_rotri_i32
 236#define tcg_gen_deposit_reg  tcg_gen_deposit_i32
 237#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
 238#define tcg_gen_extract_reg  tcg_gen_extract_i32
 239#define tcg_gen_sextract_reg tcg_gen_sextract_i32
 240#define tcg_const_reg        tcg_const_i32
 241#define tcg_const_local_reg  tcg_const_local_i32
 242#define tcg_gen_movcond_reg  tcg_gen_movcond_i32
 243#define tcg_gen_add2_reg     tcg_gen_add2_i32
 244#define tcg_gen_sub2_reg     tcg_gen_sub2_i32
 245#define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
 246#define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
 247#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
 248#define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
 249#endif /* TARGET_REGISTER_BITS */
 250
 251typedef struct DisasCond {
 252    TCGCond c;
 253    TCGv_reg a0, a1;
 254    bool a0_is_n;
 255    bool a1_is_0;
 256} DisasCond;
 257
 258typedef struct DisasContext {
 259    DisasContextBase base;
 260    CPUState *cs;
 261
 262    target_ureg iaoq_f;
 263    target_ureg iaoq_b;
 264    target_ureg iaoq_n;
 265    TCGv_reg iaoq_n_var;
 266
 267    int ntempr, ntempl;
 268    TCGv_reg tempr[8];
 269    TCGv_tl  templ[4];
 270
 271    DisasCond null_cond;
 272    TCGLabel *null_lab;
 273
 274    uint32_t insn;
 275    uint32_t tb_flags;
 276    int mmu_idx;
 277    int privilege;
 278    bool psw_n_nonzero;
 279} DisasContext;
 280
 281/* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
 282static int expand_sm_imm(int val)
 283{
 284    if (val & PSW_SM_E) {
 285        val = (val & ~PSW_SM_E) | PSW_E;
 286    }
 287    if (val & PSW_SM_W) {
 288        val = (val & ~PSW_SM_W) | PSW_W;
 289    }
 290    return val;
 291}
 292
 293/* Inverted space register indicates 0 means sr0 not inferred from base.  */
 294static int expand_sr3x(int val)
 295{
 296    return ~val;
 297}
 298
 299/* Convert the M:A bits within a memory insn to the tri-state value
 300   we use for the final M.  */
 301static int ma_to_m(int val)
 302{
 303    return val & 2 ? (val & 1 ? -1 : 1) : 0;
 304}
 305
 306/* Convert the sign of the displacement to a pre or post-modify.  */
 307static int pos_to_m(int val)
 308{
 309    return val ? 1 : -1;
 310}
 311
 312static int neg_to_m(int val)
 313{
 314    return val ? -1 : 1;
 315}
 316
 317/* Used for branch targets and fp memory ops.  */
 318static int expand_shl2(int val)
 319{
 320    return val << 2;
 321}
 322
 323/* Used for fp memory ops.  */
 324static int expand_shl3(int val)
 325{
 326    return val << 3;
 327}
 328
 329/* Used for assemble_21.  */
 330static int expand_shl11(int val)
 331{
 332    return val << 11;
 333}
 334
 335
 336/* Include the auto-generated decoder.  */
 337#include "decode.inc.c"
 338
 339/* We are not using a goto_tb (for whatever reason), but have updated
 340   the iaq (for whatever reason), so don't do it again on exit.  */
 341#define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
 342
 343/* We are exiting the TB, but have neither emitted a goto_tb, nor
 344   updated the iaq for the next instruction to be executed.  */
 345#define DISAS_IAQ_N_STALE    DISAS_TARGET_1
 346
 347/* Similarly, but we want to return to the main loop immediately
 348   to recognize unmasked interrupts.  */
 349#define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
 350#define DISAS_EXIT                  DISAS_TARGET_3
 351
 352/* global register indexes */
 353static TCGv_reg cpu_gr[32];
 354static TCGv_i64 cpu_sr[4];
 355static TCGv_i64 cpu_srH;
 356static TCGv_reg cpu_iaoq_f;
 357static TCGv_reg cpu_iaoq_b;
 358static TCGv_i64 cpu_iasq_f;
 359static TCGv_i64 cpu_iasq_b;
 360static TCGv_reg cpu_sar;
 361static TCGv_reg cpu_psw_n;
 362static TCGv_reg cpu_psw_v;
 363static TCGv_reg cpu_psw_cb;
 364static TCGv_reg cpu_psw_cb_msb;
 365
 366#include "exec/gen-icount.h"
 367
 368void hppa_translate_init(void)
 369{
 370#define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
 371
 372    typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
 373    static const GlobalVar vars[] = {
 374        { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
 375        DEF_VAR(psw_n),
 376        DEF_VAR(psw_v),
 377        DEF_VAR(psw_cb),
 378        DEF_VAR(psw_cb_msb),
 379        DEF_VAR(iaoq_f),
 380        DEF_VAR(iaoq_b),
 381    };
 382
 383#undef DEF_VAR
 384
 385    /* Use the symbolic register names that match the disassembler.  */
 386    static const char gr_names[32][4] = {
 387        "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
 388        "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
 389        "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
 390        "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
 391    };
 392    /* SR[4-7] are not global registers so that we can index them.  */
 393    static const char sr_names[5][4] = {
 394        "sr0", "sr1", "sr2", "sr3", "srH"
 395    };
 396
 397    int i;
 398
 399    cpu_gr[0] = NULL;
 400    for (i = 1; i < 32; i++) {
 401        cpu_gr[i] = tcg_global_mem_new(cpu_env,
 402                                       offsetof(CPUHPPAState, gr[i]),
 403                                       gr_names[i]);
 404    }
 405    for (i = 0; i < 4; i++) {
 406        cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
 407                                           offsetof(CPUHPPAState, sr[i]),
 408                                           sr_names[i]);
 409    }
 410    cpu_srH = tcg_global_mem_new_i64(cpu_env,
 411                                     offsetof(CPUHPPAState, sr[4]),
 412                                     sr_names[4]);
 413
 414    for (i = 0; i < ARRAY_SIZE(vars); ++i) {
 415        const GlobalVar *v = &vars[i];
 416        *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
 417    }
 418
 419    cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
 420                                        offsetof(CPUHPPAState, iasq_f),
 421                                        "iasq_f");
 422    cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
 423                                        offsetof(CPUHPPAState, iasq_b),
 424                                        "iasq_b");
 425}
 426
 427static DisasCond cond_make_f(void)
 428{
 429    return (DisasCond){
 430        .c = TCG_COND_NEVER,
 431        .a0 = NULL,
 432        .a1 = NULL,
 433    };
 434}
 435
 436static DisasCond cond_make_t(void)
 437{
 438    return (DisasCond){
 439        .c = TCG_COND_ALWAYS,
 440        .a0 = NULL,
 441        .a1 = NULL,
 442    };
 443}
 444
 445static DisasCond cond_make_n(void)
 446{
 447    return (DisasCond){
 448        .c = TCG_COND_NE,
 449        .a0 = cpu_psw_n,
 450        .a0_is_n = true,
 451        .a1 = NULL,
 452        .a1_is_0 = true
 453    };
 454}
 455
 456static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
 457{
 458    assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
 459    return (DisasCond){
 460        .c = c, .a0 = a0, .a1_is_0 = true
 461    };
 462}
 463
 464static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
 465{
 466    TCGv_reg tmp = tcg_temp_new();
 467    tcg_gen_mov_reg(tmp, a0);
 468    return cond_make_0_tmp(c, tmp);
 469}
 470
 471static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
 472{
 473    DisasCond r = { .c = c };
 474
 475    assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
 476    r.a0 = tcg_temp_new();
 477    tcg_gen_mov_reg(r.a0, a0);
 478    r.a1 = tcg_temp_new();
 479    tcg_gen_mov_reg(r.a1, a1);
 480
 481    return r;
 482}
 483
 484static void cond_prep(DisasCond *cond)
 485{
 486    if (cond->a1_is_0) {
 487        cond->a1_is_0 = false;
 488        cond->a1 = tcg_const_reg(0);
 489    }
 490}
 491
 492static void cond_free(DisasCond *cond)
 493{
 494    switch (cond->c) {
 495    default:
 496        if (!cond->a0_is_n) {
 497            tcg_temp_free(cond->a0);
 498        }
 499        if (!cond->a1_is_0) {
 500            tcg_temp_free(cond->a1);
 501        }
 502        cond->a0_is_n = false;
 503        cond->a1_is_0 = false;
 504        cond->a0 = NULL;
 505        cond->a1 = NULL;
 506        /* fallthru */
 507    case TCG_COND_ALWAYS:
 508        cond->c = TCG_COND_NEVER;
 509        break;
 510    case TCG_COND_NEVER:
 511        break;
 512    }
 513}
 514
 515static TCGv_reg get_temp(DisasContext *ctx)
 516{
 517    unsigned i = ctx->ntempr++;
 518    g_assert(i < ARRAY_SIZE(ctx->tempr));
 519    return ctx->tempr[i] = tcg_temp_new();
 520}
 521
 522#ifndef CONFIG_USER_ONLY
 523static TCGv_tl get_temp_tl(DisasContext *ctx)
 524{
 525    unsigned i = ctx->ntempl++;
 526    g_assert(i < ARRAY_SIZE(ctx->templ));
 527    return ctx->templ[i] = tcg_temp_new_tl();
 528}
 529#endif
 530
 531static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
 532{
 533    TCGv_reg t = get_temp(ctx);
 534    tcg_gen_movi_reg(t, v);
 535    return t;
 536}
 537
 538static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
 539{
 540    if (reg == 0) {
 541        TCGv_reg t = get_temp(ctx);
 542        tcg_gen_movi_reg(t, 0);
 543        return t;
 544    } else {
 545        return cpu_gr[reg];
 546    }
 547}
 548
 549static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
 550{
 551    if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
 552        return get_temp(ctx);
 553    } else {
 554        return cpu_gr[reg];
 555    }
 556}
 557
 558static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
 559{
 560    if (ctx->null_cond.c != TCG_COND_NEVER) {
 561        cond_prep(&ctx->null_cond);
 562        tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
 563                           ctx->null_cond.a1, dest, t);
 564    } else {
 565        tcg_gen_mov_reg(dest, t);
 566    }
 567}
 568
 569static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
 570{
 571    if (reg != 0) {
 572        save_or_nullify(ctx, cpu_gr[reg], t);
 573    }
 574}
 575
 576#ifdef HOST_WORDS_BIGENDIAN
 577# define HI_OFS  0
 578# define LO_OFS  4
 579#else
 580# define HI_OFS  4
 581# define LO_OFS  0
 582#endif
 583
 584static TCGv_i32 load_frw_i32(unsigned rt)
 585{
 586    TCGv_i32 ret = tcg_temp_new_i32();
 587    tcg_gen_ld_i32(ret, cpu_env,
 588                   offsetof(CPUHPPAState, fr[rt & 31])
 589                   + (rt & 32 ? LO_OFS : HI_OFS));
 590    return ret;
 591}
 592
 593static TCGv_i32 load_frw0_i32(unsigned rt)
 594{
 595    if (rt == 0) {
 596        return tcg_const_i32(0);
 597    } else {
 598        return load_frw_i32(rt);
 599    }
 600}
 601
 602static TCGv_i64 load_frw0_i64(unsigned rt)
 603{
 604    if (rt == 0) {
 605        return tcg_const_i64(0);
 606    } else {
 607        TCGv_i64 ret = tcg_temp_new_i64();
 608        tcg_gen_ld32u_i64(ret, cpu_env,
 609                          offsetof(CPUHPPAState, fr[rt & 31])
 610                          + (rt & 32 ? LO_OFS : HI_OFS));
 611        return ret;
 612    }
 613}
 614
 615static void save_frw_i32(unsigned rt, TCGv_i32 val)
 616{
 617    tcg_gen_st_i32(val, cpu_env,
 618                   offsetof(CPUHPPAState, fr[rt & 31])
 619                   + (rt & 32 ? LO_OFS : HI_OFS));
 620}
 621
 622#undef HI_OFS
 623#undef LO_OFS
 624
 625static TCGv_i64 load_frd(unsigned rt)
 626{
 627    TCGv_i64 ret = tcg_temp_new_i64();
 628    tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
 629    return ret;
 630}
 631
 632static TCGv_i64 load_frd0(unsigned rt)
 633{
 634    if (rt == 0) {
 635        return tcg_const_i64(0);
 636    } else {
 637        return load_frd(rt);
 638    }
 639}
 640
 641static void save_frd(unsigned rt, TCGv_i64 val)
 642{
 643    tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
 644}
 645
 646static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
 647{
 648#ifdef CONFIG_USER_ONLY
 649    tcg_gen_movi_i64(dest, 0);
 650#else
 651    if (reg < 4) {
 652        tcg_gen_mov_i64(dest, cpu_sr[reg]);
 653    } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
 654        tcg_gen_mov_i64(dest, cpu_srH);
 655    } else {
 656        tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
 657    }
 658#endif
 659}
 660
 661/* Skip over the implementation of an insn that has been nullified.
 662   Use this when the insn is too complex for a conditional move.  */
 663static void nullify_over(DisasContext *ctx)
 664{
 665    if (ctx->null_cond.c != TCG_COND_NEVER) {
 666        /* The always condition should have been handled in the main loop.  */
 667        assert(ctx->null_cond.c != TCG_COND_ALWAYS);
 668
 669        ctx->null_lab = gen_new_label();
 670        cond_prep(&ctx->null_cond);
 671
 672        /* If we're using PSW[N], copy it to a temp because... */
 673        if (ctx->null_cond.a0_is_n) {
 674            ctx->null_cond.a0_is_n = false;
 675            ctx->null_cond.a0 = tcg_temp_new();
 676            tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
 677        }
 678        /* ... we clear it before branching over the implementation,
 679           so that (1) it's clear after nullifying this insn and
 680           (2) if this insn nullifies the next, PSW[N] is valid.  */
 681        if (ctx->psw_n_nonzero) {
 682            ctx->psw_n_nonzero = false;
 683            tcg_gen_movi_reg(cpu_psw_n, 0);
 684        }
 685
 686        tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
 687                          ctx->null_cond.a1, ctx->null_lab);
 688        cond_free(&ctx->null_cond);
 689    }
 690}
 691
 692/* Save the current nullification state to PSW[N].  */
 693static void nullify_save(DisasContext *ctx)
 694{
 695    if (ctx->null_cond.c == TCG_COND_NEVER) {
 696        if (ctx->psw_n_nonzero) {
 697            tcg_gen_movi_reg(cpu_psw_n, 0);
 698        }
 699        return;
 700    }
 701    if (!ctx->null_cond.a0_is_n) {
 702        cond_prep(&ctx->null_cond);
 703        tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
 704                           ctx->null_cond.a0, ctx->null_cond.a1);
 705        ctx->psw_n_nonzero = true;
 706    }
 707    cond_free(&ctx->null_cond);
 708}
 709
 710/* Set a PSW[N] to X.  The intention is that this is used immediately
 711   before a goto_tb/exit_tb, so that there is no fallthru path to other
 712   code within the TB.  Therefore we do not update psw_n_nonzero.  */
 713static void nullify_set(DisasContext *ctx, bool x)
 714{
 715    if (ctx->psw_n_nonzero || x) {
 716        tcg_gen_movi_reg(cpu_psw_n, x);
 717    }
 718}
 719
 720/* Mark the end of an instruction that may have been nullified.
 721   This is the pair to nullify_over.  Always returns true so that
 722   it may be tail-called from a translate function.  */
 723static bool nullify_end(DisasContext *ctx)
 724{
 725    TCGLabel *null_lab = ctx->null_lab;
 726    DisasJumpType status = ctx->base.is_jmp;
 727
 728    /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
 729       For UPDATED, we cannot update on the nullified path.  */
 730    assert(status != DISAS_IAQ_N_UPDATED);
 731
 732    if (likely(null_lab == NULL)) {
 733        /* The current insn wasn't conditional or handled the condition
 734           applied to it without a branch, so the (new) setting of
 735           NULL_COND can be applied directly to the next insn.  */
 736        return true;
 737    }
 738    ctx->null_lab = NULL;
 739
 740    if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
 741        /* The next instruction will be unconditional,
 742           and NULL_COND already reflects that.  */
 743        gen_set_label(null_lab);
 744    } else {
 745        /* The insn that we just executed is itself nullifying the next
 746           instruction.  Store the condition in the PSW[N] global.
 747           We asserted PSW[N] = 0 in nullify_over, so that after the
 748           label we have the proper value in place.  */
 749        nullify_save(ctx);
 750        gen_set_label(null_lab);
 751        ctx->null_cond = cond_make_n();
 752    }
 753    if (status == DISAS_NORETURN) {
 754        ctx->base.is_jmp = DISAS_NEXT;
 755    }
 756    return true;
 757}
 758
 759static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
 760{
 761    if (unlikely(ival == -1)) {
 762        tcg_gen_mov_reg(dest, vval);
 763    } else {
 764        tcg_gen_movi_reg(dest, ival);
 765    }
 766}
 767
 768static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
 769{
 770    return ctx->iaoq_f + disp + 8;
 771}
 772
 773static void gen_excp_1(int exception)
 774{
 775    TCGv_i32 t = tcg_const_i32(exception);
 776    gen_helper_excp(cpu_env, t);
 777    tcg_temp_free_i32(t);
 778}
 779
 780static void gen_excp(DisasContext *ctx, int exception)
 781{
 782    copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
 783    copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
 784    nullify_save(ctx);
 785    gen_excp_1(exception);
 786    ctx->base.is_jmp = DISAS_NORETURN;
 787}
 788
 789static bool gen_excp_iir(DisasContext *ctx, int exc)
 790{
 791    TCGv_reg tmp;
 792
 793    nullify_over(ctx);
 794    tmp = tcg_const_reg(ctx->insn);
 795    tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
 796    tcg_temp_free(tmp);
 797    gen_excp(ctx, exc);
 798    return nullify_end(ctx);
 799}
 800
 801static bool gen_illegal(DisasContext *ctx)
 802{
 803    return gen_excp_iir(ctx, EXCP_ILL);
 804}
 805
 806#ifdef CONFIG_USER_ONLY
 807#define CHECK_MOST_PRIVILEGED(EXCP) \
 808    return gen_excp_iir(ctx, EXCP)
 809#else
 810#define CHECK_MOST_PRIVILEGED(EXCP) \
 811    do {                                     \
 812        if (ctx->privilege != 0) {           \
 813            return gen_excp_iir(ctx, EXCP);  \
 814        }                                    \
 815    } while (0)
 816#endif
 817
 818static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
 819{
 820    /* Suppress goto_tb for page crossing, IO, or single-steping.  */
 821    return !(((ctx->base.pc_first ^ dest) & TARGET_PAGE_MASK)
 822             || (tb_cflags(ctx->base.tb) & CF_LAST_IO)
 823             || ctx->base.singlestep_enabled);
 824}
 825
 826/* If the next insn is to be nullified, and it's on the same page,
 827   and we're not attempting to set a breakpoint on it, then we can
 828   totally skip the nullified insn.  This avoids creating and
 829   executing a TB that merely branches to the next TB.  */
 830static bool use_nullify_skip(DisasContext *ctx)
 831{
 832    return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
 833            && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
 834}
 835
 836static void gen_goto_tb(DisasContext *ctx, int which,
 837                        target_ureg f, target_ureg b)
 838{
 839    if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
 840        tcg_gen_goto_tb(which);
 841        tcg_gen_movi_reg(cpu_iaoq_f, f);
 842        tcg_gen_movi_reg(cpu_iaoq_b, b);
 843        tcg_gen_exit_tb(ctx->base.tb, which);
 844    } else {
 845        copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
 846        copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
 847        if (ctx->base.singlestep_enabled) {
 848            gen_excp_1(EXCP_DEBUG);
 849        } else {
 850            tcg_gen_lookup_and_goto_ptr();
 851        }
 852    }
 853}
 854
 855static bool cond_need_sv(int c)
 856{
 857    return c == 2 || c == 3 || c == 6;
 858}
 859
 860static bool cond_need_cb(int c)
 861{
 862    return c == 4 || c == 5;
 863}
 864
 865/*
 866 * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
 867 * the Parisc 1.1 Architecture Reference Manual for details.
 868 */
 869
 870static DisasCond do_cond(unsigned cf, TCGv_reg res,
 871                         TCGv_reg cb_msb, TCGv_reg sv)
 872{
 873    DisasCond cond;
 874    TCGv_reg tmp;
 875
 876    switch (cf >> 1) {
 877    case 0: /* Never / TR    (0 / 1) */
 878        cond = cond_make_f();
 879        break;
 880    case 1: /* = / <>        (Z / !Z) */
 881        cond = cond_make_0(TCG_COND_EQ, res);
 882        break;
 883    case 2: /* < / >=        (N ^ V / !(N ^ V) */
 884        tmp = tcg_temp_new();
 885        tcg_gen_xor_reg(tmp, res, sv);
 886        cond = cond_make_0_tmp(TCG_COND_LT, tmp);
 887        break;
 888    case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
 889        /*
 890         * Simplify:
 891         *   (N ^ V) | Z
 892         *   ((res < 0) ^ (sv < 0)) | !res
 893         *   ((res ^ sv) < 0) | !res
 894         *   (~(res ^ sv) >= 0) | !res
 895         *   !(~(res ^ sv) >> 31) | !res
 896         *   !(~(res ^ sv) >> 31 & res)
 897         */
 898        tmp = tcg_temp_new();
 899        tcg_gen_eqv_reg(tmp, res, sv);
 900        tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
 901        tcg_gen_and_reg(tmp, tmp, res);
 902        cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
 903        break;
 904    case 4: /* NUV / UV      (!C / C) */
 905        cond = cond_make_0(TCG_COND_EQ, cb_msb);
 906        break;
 907    case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
 908        tmp = tcg_temp_new();
 909        tcg_gen_neg_reg(tmp, cb_msb);
 910        tcg_gen_and_reg(tmp, tmp, res);
 911        cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
 912        break;
 913    case 6: /* SV / NSV      (V / !V) */
 914        cond = cond_make_0(TCG_COND_LT, sv);
 915        break;
 916    case 7: /* OD / EV */
 917        tmp = tcg_temp_new();
 918        tcg_gen_andi_reg(tmp, res, 1);
 919        cond = cond_make_0_tmp(TCG_COND_NE, tmp);
 920        break;
 921    default:
 922        g_assert_not_reached();
 923    }
 924    if (cf & 1) {
 925        cond.c = tcg_invert_cond(cond.c);
 926    }
 927
 928    return cond;
 929}
 930
 931/* Similar, but for the special case of subtraction without borrow, we
 932   can use the inputs directly.  This can allow other computation to be
 933   deleted as unused.  */
 934
 935static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
 936                             TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
 937{
 938    DisasCond cond;
 939
 940    switch (cf >> 1) {
 941    case 1: /* = / <> */
 942        cond = cond_make(TCG_COND_EQ, in1, in2);
 943        break;
 944    case 2: /* < / >= */
 945        cond = cond_make(TCG_COND_LT, in1, in2);
 946        break;
 947    case 3: /* <= / > */
 948        cond = cond_make(TCG_COND_LE, in1, in2);
 949        break;
 950    case 4: /* << / >>= */
 951        cond = cond_make(TCG_COND_LTU, in1, in2);
 952        break;
 953    case 5: /* <<= / >> */
 954        cond = cond_make(TCG_COND_LEU, in1, in2);
 955        break;
 956    default:
 957        return do_cond(cf, res, NULL, sv);
 958    }
 959    if (cf & 1) {
 960        cond.c = tcg_invert_cond(cond.c);
 961    }
 962
 963    return cond;
 964}
 965
 966/*
 967 * Similar, but for logicals, where the carry and overflow bits are not
 968 * computed, and use of them is undefined.
 969 *
 970 * Undefined or not, hardware does not trap.  It seems reasonable to
 971 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
 972 * how cases c={2,3} are treated.
 973 */
 974
 975static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
 976{
 977    switch (cf) {
 978    case 0:  /* never */
 979    case 9:  /* undef, C */
 980    case 11: /* undef, C & !Z */
 981    case 12: /* undef, V */
 982        return cond_make_f();
 983
 984    case 1:  /* true */
 985    case 8:  /* undef, !C */
 986    case 10: /* undef, !C | Z */
 987    case 13: /* undef, !V */
 988        return cond_make_t();
 989
 990    case 2:  /* == */
 991        return cond_make_0(TCG_COND_EQ, res);
 992    case 3:  /* <> */
 993        return cond_make_0(TCG_COND_NE, res);
 994    case 4:  /* < */
 995        return cond_make_0(TCG_COND_LT, res);
 996    case 5:  /* >= */
 997        return cond_make_0(TCG_COND_GE, res);
 998    case 6:  /* <= */
 999        return cond_make_0(TCG_COND_LE, res);
1000    case 7:  /* > */
1001        return cond_make_0(TCG_COND_GT, res);
1002
1003    case 14: /* OD */
1004    case 15: /* EV */
1005        return do_cond(cf, res, NULL, NULL);
1006
1007    default:
1008        g_assert_not_reached();
1009    }
1010}
1011
1012/* Similar, but for shift/extract/deposit conditions.  */
1013
1014static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1015{
1016    unsigned c, f;
1017
1018    /* Convert the compressed condition codes to standard.
1019       0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1020       4-7 are the reverse of 0-3.  */
1021    c = orig & 3;
1022    if (c == 3) {
1023        c = 7;
1024    }
1025    f = (orig & 4) / 4;
1026
1027    return do_log_cond(c * 2 + f, res);
1028}
1029
1030/* Similar, but for unit conditions.  */
1031
1032static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1033                              TCGv_reg in1, TCGv_reg in2)
1034{
1035    DisasCond cond;
1036    TCGv_reg tmp, cb = NULL;
1037
1038    if (cf & 8) {
1039        /* Since we want to test lots of carry-out bits all at once, do not
1040         * do our normal thing and compute carry-in of bit B+1 since that
1041         * leaves us with carry bits spread across two words.
1042         */
1043        cb = tcg_temp_new();
1044        tmp = tcg_temp_new();
1045        tcg_gen_or_reg(cb, in1, in2);
1046        tcg_gen_and_reg(tmp, in1, in2);
1047        tcg_gen_andc_reg(cb, cb, res);
1048        tcg_gen_or_reg(cb, cb, tmp);
1049        tcg_temp_free(tmp);
1050    }
1051
1052    switch (cf >> 1) {
1053    case 0: /* never / TR */
1054    case 1: /* undefined */
1055    case 5: /* undefined */
1056        cond = cond_make_f();
1057        break;
1058
1059    case 2: /* SBZ / NBZ */
1060        /* See hasless(v,1) from
1061         * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1062         */
1063        tmp = tcg_temp_new();
1064        tcg_gen_subi_reg(tmp, res, 0x01010101u);
1065        tcg_gen_andc_reg(tmp, tmp, res);
1066        tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1067        cond = cond_make_0(TCG_COND_NE, tmp);
1068        tcg_temp_free(tmp);
1069        break;
1070
1071    case 3: /* SHZ / NHZ */
1072        tmp = tcg_temp_new();
1073        tcg_gen_subi_reg(tmp, res, 0x00010001u);
1074        tcg_gen_andc_reg(tmp, tmp, res);
1075        tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1076        cond = cond_make_0(TCG_COND_NE, tmp);
1077        tcg_temp_free(tmp);
1078        break;
1079
1080    case 4: /* SDC / NDC */
1081        tcg_gen_andi_reg(cb, cb, 0x88888888u);
1082        cond = cond_make_0(TCG_COND_NE, cb);
1083        break;
1084
1085    case 6: /* SBC / NBC */
1086        tcg_gen_andi_reg(cb, cb, 0x80808080u);
1087        cond = cond_make_0(TCG_COND_NE, cb);
1088        break;
1089
1090    case 7: /* SHC / NHC */
1091        tcg_gen_andi_reg(cb, cb, 0x80008000u);
1092        cond = cond_make_0(TCG_COND_NE, cb);
1093        break;
1094
1095    default:
1096        g_assert_not_reached();
1097    }
1098    if (cf & 8) {
1099        tcg_temp_free(cb);
1100    }
1101    if (cf & 1) {
1102        cond.c = tcg_invert_cond(cond.c);
1103    }
1104
1105    return cond;
1106}
1107
1108/* Compute signed overflow for addition.  */
1109static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1110                          TCGv_reg in1, TCGv_reg in2)
1111{
1112    TCGv_reg sv = get_temp(ctx);
1113    TCGv_reg tmp = tcg_temp_new();
1114
1115    tcg_gen_xor_reg(sv, res, in1);
1116    tcg_gen_xor_reg(tmp, in1, in2);
1117    tcg_gen_andc_reg(sv, sv, tmp);
1118    tcg_temp_free(tmp);
1119
1120    return sv;
1121}
1122
1123/* Compute signed overflow for subtraction.  */
1124static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1125                          TCGv_reg in1, TCGv_reg in2)
1126{
1127    TCGv_reg sv = get_temp(ctx);
1128    TCGv_reg tmp = tcg_temp_new();
1129
1130    tcg_gen_xor_reg(sv, res, in1);
1131    tcg_gen_xor_reg(tmp, in1, in2);
1132    tcg_gen_and_reg(sv, sv, tmp);
1133    tcg_temp_free(tmp);
1134
1135    return sv;
1136}
1137
1138static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1139                   TCGv_reg in2, unsigned shift, bool is_l,
1140                   bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1141{
1142    TCGv_reg dest, cb, cb_msb, sv, tmp;
1143    unsigned c = cf >> 1;
1144    DisasCond cond;
1145
1146    dest = tcg_temp_new();
1147    cb = NULL;
1148    cb_msb = NULL;
1149
1150    if (shift) {
1151        tmp = get_temp(ctx);
1152        tcg_gen_shli_reg(tmp, in1, shift);
1153        in1 = tmp;
1154    }
1155
1156    if (!is_l || cond_need_cb(c)) {
1157        TCGv_reg zero = tcg_const_reg(0);
1158        cb_msb = get_temp(ctx);
1159        tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1160        if (is_c) {
1161            tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1162        }
1163        tcg_temp_free(zero);
1164        if (!is_l) {
1165            cb = get_temp(ctx);
1166            tcg_gen_xor_reg(cb, in1, in2);
1167            tcg_gen_xor_reg(cb, cb, dest);
1168        }
1169    } else {
1170        tcg_gen_add_reg(dest, in1, in2);
1171        if (is_c) {
1172            tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1173        }
1174    }
1175
1176    /* Compute signed overflow if required.  */
1177    sv = NULL;
1178    if (is_tsv || cond_need_sv(c)) {
1179        sv = do_add_sv(ctx, dest, in1, in2);
1180        if (is_tsv) {
1181            /* ??? Need to include overflow from shift.  */
1182            gen_helper_tsv(cpu_env, sv);
1183        }
1184    }
1185
1186    /* Emit any conditional trap before any writeback.  */
1187    cond = do_cond(cf, dest, cb_msb, sv);
1188    if (is_tc) {
1189        cond_prep(&cond);
1190        tmp = tcg_temp_new();
1191        tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1192        gen_helper_tcond(cpu_env, tmp);
1193        tcg_temp_free(tmp);
1194    }
1195
1196    /* Write back the result.  */
1197    if (!is_l) {
1198        save_or_nullify(ctx, cpu_psw_cb, cb);
1199        save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1200    }
1201    save_gpr(ctx, rt, dest);
1202    tcg_temp_free(dest);
1203
1204    /* Install the new nullification.  */
1205    cond_free(&ctx->null_cond);
1206    ctx->null_cond = cond;
1207}
1208
1209static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1210                       bool is_l, bool is_tsv, bool is_tc, bool is_c)
1211{
1212    TCGv_reg tcg_r1, tcg_r2;
1213
1214    if (a->cf) {
1215        nullify_over(ctx);
1216    }
1217    tcg_r1 = load_gpr(ctx, a->r1);
1218    tcg_r2 = load_gpr(ctx, a->r2);
1219    do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1220    return nullify_end(ctx);
1221}
1222
1223static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1224                       bool is_tsv, bool is_tc)
1225{
1226    TCGv_reg tcg_im, tcg_r2;
1227
1228    if (a->cf) {
1229        nullify_over(ctx);
1230    }
1231    tcg_im = load_const(ctx, a->i);
1232    tcg_r2 = load_gpr(ctx, a->r);
1233    do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1234    return nullify_end(ctx);
1235}
1236
1237static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1238                   TCGv_reg in2, bool is_tsv, bool is_b,
1239                   bool is_tc, unsigned cf)
1240{
1241    TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1242    unsigned c = cf >> 1;
1243    DisasCond cond;
1244
1245    dest = tcg_temp_new();
1246    cb = tcg_temp_new();
1247    cb_msb = tcg_temp_new();
1248
1249    zero = tcg_const_reg(0);
1250    if (is_b) {
1251        /* DEST,C = IN1 + ~IN2 + C.  */
1252        tcg_gen_not_reg(cb, in2);
1253        tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1254        tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1255        tcg_gen_xor_reg(cb, cb, in1);
1256        tcg_gen_xor_reg(cb, cb, dest);
1257    } else {
1258        /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1259           operations by seeding the high word with 1 and subtracting.  */
1260        tcg_gen_movi_reg(cb_msb, 1);
1261        tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1262        tcg_gen_eqv_reg(cb, in1, in2);
1263        tcg_gen_xor_reg(cb, cb, dest);
1264    }
1265    tcg_temp_free(zero);
1266
1267    /* Compute signed overflow if required.  */
1268    sv = NULL;
1269    if (is_tsv || cond_need_sv(c)) {
1270        sv = do_sub_sv(ctx, dest, in1, in2);
1271        if (is_tsv) {
1272            gen_helper_tsv(cpu_env, sv);
1273        }
1274    }
1275
1276    /* Compute the condition.  We cannot use the special case for borrow.  */
1277    if (!is_b) {
1278        cond = do_sub_cond(cf, dest, in1, in2, sv);
1279    } else {
1280        cond = do_cond(cf, dest, cb_msb, sv);
1281    }
1282
1283    /* Emit any conditional trap before any writeback.  */
1284    if (is_tc) {
1285        cond_prep(&cond);
1286        tmp = tcg_temp_new();
1287        tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1288        gen_helper_tcond(cpu_env, tmp);
1289        tcg_temp_free(tmp);
1290    }
1291
1292    /* Write back the result.  */
1293    save_or_nullify(ctx, cpu_psw_cb, cb);
1294    save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1295    save_gpr(ctx, rt, dest);
1296    tcg_temp_free(dest);
1297
1298    /* Install the new nullification.  */
1299    cond_free(&ctx->null_cond);
1300    ctx->null_cond = cond;
1301}
1302
1303static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1304                       bool is_tsv, bool is_b, bool is_tc)
1305{
1306    TCGv_reg tcg_r1, tcg_r2;
1307
1308    if (a->cf) {
1309        nullify_over(ctx);
1310    }
1311    tcg_r1 = load_gpr(ctx, a->r1);
1312    tcg_r2 = load_gpr(ctx, a->r2);
1313    do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1314    return nullify_end(ctx);
1315}
1316
1317static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1318{
1319    TCGv_reg tcg_im, tcg_r2;
1320
1321    if (a->cf) {
1322        nullify_over(ctx);
1323    }
1324    tcg_im = load_const(ctx, a->i);
1325    tcg_r2 = load_gpr(ctx, a->r);
1326    do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1327    return nullify_end(ctx);
1328}
1329
1330static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1331                      TCGv_reg in2, unsigned cf)
1332{
1333    TCGv_reg dest, sv;
1334    DisasCond cond;
1335
1336    dest = tcg_temp_new();
1337    tcg_gen_sub_reg(dest, in1, in2);
1338
1339    /* Compute signed overflow if required.  */
1340    sv = NULL;
1341    if (cond_need_sv(cf >> 1)) {
1342        sv = do_sub_sv(ctx, dest, in1, in2);
1343    }
1344
1345    /* Form the condition for the compare.  */
1346    cond = do_sub_cond(cf, dest, in1, in2, sv);
1347
1348    /* Clear.  */
1349    tcg_gen_movi_reg(dest, 0);
1350    save_gpr(ctx, rt, dest);
1351    tcg_temp_free(dest);
1352
1353    /* Install the new nullification.  */
1354    cond_free(&ctx->null_cond);
1355    ctx->null_cond = cond;
1356}
1357
1358static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1359                   TCGv_reg in2, unsigned cf,
1360                   void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1361{
1362    TCGv_reg dest = dest_gpr(ctx, rt);
1363
1364    /* Perform the operation, and writeback.  */
1365    fn(dest, in1, in2);
1366    save_gpr(ctx, rt, dest);
1367
1368    /* Install the new nullification.  */
1369    cond_free(&ctx->null_cond);
1370    if (cf) {
1371        ctx->null_cond = do_log_cond(cf, dest);
1372    }
1373}
1374
1375static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1376                       void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1377{
1378    TCGv_reg tcg_r1, tcg_r2;
1379
1380    if (a->cf) {
1381        nullify_over(ctx);
1382    }
1383    tcg_r1 = load_gpr(ctx, a->r1);
1384    tcg_r2 = load_gpr(ctx, a->r2);
1385    do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1386    return nullify_end(ctx);
1387}
1388
1389static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1390                    TCGv_reg in2, unsigned cf, bool is_tc,
1391                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1392{
1393    TCGv_reg dest;
1394    DisasCond cond;
1395
1396    if (cf == 0) {
1397        dest = dest_gpr(ctx, rt);
1398        fn(dest, in1, in2);
1399        save_gpr(ctx, rt, dest);
1400        cond_free(&ctx->null_cond);
1401    } else {
1402        dest = tcg_temp_new();
1403        fn(dest, in1, in2);
1404
1405        cond = do_unit_cond(cf, dest, in1, in2);
1406
1407        if (is_tc) {
1408            TCGv_reg tmp = tcg_temp_new();
1409            cond_prep(&cond);
1410            tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1411            gen_helper_tcond(cpu_env, tmp);
1412            tcg_temp_free(tmp);
1413        }
1414        save_gpr(ctx, rt, dest);
1415
1416        cond_free(&ctx->null_cond);
1417        ctx->null_cond = cond;
1418    }
1419}
1420
1421#ifndef CONFIG_USER_ONLY
1422/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1423   from the top 2 bits of the base register.  There are a few system
1424   instructions that have a 3-bit space specifier, for which SR0 is
1425   not special.  To handle this, pass ~SP.  */
1426static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1427{
1428    TCGv_ptr ptr;
1429    TCGv_reg tmp;
1430    TCGv_i64 spc;
1431
1432    if (sp != 0) {
1433        if (sp < 0) {
1434            sp = ~sp;
1435        }
1436        spc = get_temp_tl(ctx);
1437        load_spr(ctx, spc, sp);
1438        return spc;
1439    }
1440    if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1441        return cpu_srH;
1442    }
1443
1444    ptr = tcg_temp_new_ptr();
1445    tmp = tcg_temp_new();
1446    spc = get_temp_tl(ctx);
1447
1448    tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1449    tcg_gen_andi_reg(tmp, tmp, 030);
1450    tcg_gen_trunc_reg_ptr(ptr, tmp);
1451    tcg_temp_free(tmp);
1452
1453    tcg_gen_add_ptr(ptr, ptr, cpu_env);
1454    tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1455    tcg_temp_free_ptr(ptr);
1456
1457    return spc;
1458}
1459#endif
1460
1461static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1462                     unsigned rb, unsigned rx, int scale, target_sreg disp,
1463                     unsigned sp, int modify, bool is_phys)
1464{
1465    TCGv_reg base = load_gpr(ctx, rb);
1466    TCGv_reg ofs;
1467
1468    /* Note that RX is mutually exclusive with DISP.  */
1469    if (rx) {
1470        ofs = get_temp(ctx);
1471        tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1472        tcg_gen_add_reg(ofs, ofs, base);
1473    } else if (disp || modify) {
1474        ofs = get_temp(ctx);
1475        tcg_gen_addi_reg(ofs, base, disp);
1476    } else {
1477        ofs = base;
1478    }
1479
1480    *pofs = ofs;
1481#ifdef CONFIG_USER_ONLY
1482    *pgva = (modify <= 0 ? ofs : base);
1483#else
1484    TCGv_tl addr = get_temp_tl(ctx);
1485    tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1486    if (ctx->tb_flags & PSW_W) {
1487        tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1488    }
1489    if (!is_phys) {
1490        tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1491    }
1492    *pgva = addr;
1493#endif
1494}
1495
1496/* Emit a memory load.  The modify parameter should be
1497 * < 0 for pre-modify,
1498 * > 0 for post-modify,
1499 * = 0 for no base register update.
1500 */
1501static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1502                       unsigned rx, int scale, target_sreg disp,
1503                       unsigned sp, int modify, TCGMemOp mop)
1504{
1505    TCGv_reg ofs;
1506    TCGv_tl addr;
1507
1508    /* Caller uses nullify_over/nullify_end.  */
1509    assert(ctx->null_cond.c == TCG_COND_NEVER);
1510
1511    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1512             ctx->mmu_idx == MMU_PHYS_IDX);
1513    tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1514    if (modify) {
1515        save_gpr(ctx, rb, ofs);
1516    }
1517}
1518
1519static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1520                       unsigned rx, int scale, target_sreg disp,
1521                       unsigned sp, int modify, TCGMemOp mop)
1522{
1523    TCGv_reg ofs;
1524    TCGv_tl addr;
1525
1526    /* Caller uses nullify_over/nullify_end.  */
1527    assert(ctx->null_cond.c == TCG_COND_NEVER);
1528
1529    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1530             ctx->mmu_idx == MMU_PHYS_IDX);
1531    tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1532    if (modify) {
1533        save_gpr(ctx, rb, ofs);
1534    }
1535}
1536
1537static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1538                        unsigned rx, int scale, target_sreg disp,
1539                        unsigned sp, int modify, TCGMemOp mop)
1540{
1541    TCGv_reg ofs;
1542    TCGv_tl addr;
1543
1544    /* Caller uses nullify_over/nullify_end.  */
1545    assert(ctx->null_cond.c == TCG_COND_NEVER);
1546
1547    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1548             ctx->mmu_idx == MMU_PHYS_IDX);
1549    tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1550    if (modify) {
1551        save_gpr(ctx, rb, ofs);
1552    }
1553}
1554
1555static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1556                        unsigned rx, int scale, target_sreg disp,
1557                        unsigned sp, int modify, TCGMemOp mop)
1558{
1559    TCGv_reg ofs;
1560    TCGv_tl addr;
1561
1562    /* Caller uses nullify_over/nullify_end.  */
1563    assert(ctx->null_cond.c == TCG_COND_NEVER);
1564
1565    form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1566             ctx->mmu_idx == MMU_PHYS_IDX);
1567    tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1568    if (modify) {
1569        save_gpr(ctx, rb, ofs);
1570    }
1571}
1572
1573#if TARGET_REGISTER_BITS == 64
1574#define do_load_reg   do_load_64
1575#define do_store_reg  do_store_64
1576#else
1577#define do_load_reg   do_load_32
1578#define do_store_reg  do_store_32
1579#endif
1580
1581static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1582                    unsigned rx, int scale, target_sreg disp,
1583                    unsigned sp, int modify, TCGMemOp mop)
1584{
1585    TCGv_reg dest;
1586
1587    nullify_over(ctx);
1588
1589    if (modify == 0) {
1590        /* No base register update.  */
1591        dest = dest_gpr(ctx, rt);
1592    } else {
1593        /* Make sure if RT == RB, we see the result of the load.  */
1594        dest = get_temp(ctx);
1595    }
1596    do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1597    save_gpr(ctx, rt, dest);
1598
1599    return nullify_end(ctx);
1600}
1601
1602static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1603                      unsigned rx, int scale, target_sreg disp,
1604                      unsigned sp, int modify)
1605{
1606    TCGv_i32 tmp;
1607
1608    nullify_over(ctx);
1609
1610    tmp = tcg_temp_new_i32();
1611    do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1612    save_frw_i32(rt, tmp);
1613    tcg_temp_free_i32(tmp);
1614
1615    if (rt == 0) {
1616        gen_helper_loaded_fr0(cpu_env);
1617    }
1618
1619    return nullify_end(ctx);
1620}
1621
1622static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1623{
1624    return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1625                     a->disp, a->sp, a->m);
1626}
1627
1628static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1629                      unsigned rx, int scale, target_sreg disp,
1630                      unsigned sp, int modify)
1631{
1632    TCGv_i64 tmp;
1633
1634    nullify_over(ctx);
1635
1636    tmp = tcg_temp_new_i64();
1637    do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1638    save_frd(rt, tmp);
1639    tcg_temp_free_i64(tmp);
1640
1641    if (rt == 0) {
1642        gen_helper_loaded_fr0(cpu_env);
1643    }
1644
1645    return nullify_end(ctx);
1646}
1647
1648static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1649{
1650    return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1651                     a->disp, a->sp, a->m);
1652}
1653
1654static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1655                     target_sreg disp, unsigned sp,
1656                     int modify, TCGMemOp mop)
1657{
1658    nullify_over(ctx);
1659    do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1660    return nullify_end(ctx);
1661}
1662
1663static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1664                       unsigned rx, int scale, target_sreg disp,
1665                       unsigned sp, int modify)
1666{
1667    TCGv_i32 tmp;
1668
1669    nullify_over(ctx);
1670
1671    tmp = load_frw_i32(rt);
1672    do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1673    tcg_temp_free_i32(tmp);
1674
1675    return nullify_end(ctx);
1676}
1677
1678static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1679{
1680    return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1681                      a->disp, a->sp, a->m);
1682}
1683
1684static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1685                       unsigned rx, int scale, target_sreg disp,
1686                       unsigned sp, int modify)
1687{
1688    TCGv_i64 tmp;
1689
1690    nullify_over(ctx);
1691
1692    tmp = load_frd(rt);
1693    do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1694    tcg_temp_free_i64(tmp);
1695
1696    return nullify_end(ctx);
1697}
1698
1699static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1700{
1701    return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1702                      a->disp, a->sp, a->m);
1703}
1704
1705static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1706                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1707{
1708    TCGv_i32 tmp;
1709
1710    nullify_over(ctx);
1711    tmp = load_frw0_i32(ra);
1712
1713    func(tmp, cpu_env, tmp);
1714
1715    save_frw_i32(rt, tmp);
1716    tcg_temp_free_i32(tmp);
1717    return nullify_end(ctx);
1718}
1719
1720static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1721                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1722{
1723    TCGv_i32 dst;
1724    TCGv_i64 src;
1725
1726    nullify_over(ctx);
1727    src = load_frd(ra);
1728    dst = tcg_temp_new_i32();
1729
1730    func(dst, cpu_env, src);
1731
1732    tcg_temp_free_i64(src);
1733    save_frw_i32(rt, dst);
1734    tcg_temp_free_i32(dst);
1735    return nullify_end(ctx);
1736}
1737
1738static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1739                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1740{
1741    TCGv_i64 tmp;
1742
1743    nullify_over(ctx);
1744    tmp = load_frd0(ra);
1745
1746    func(tmp, cpu_env, tmp);
1747
1748    save_frd(rt, tmp);
1749    tcg_temp_free_i64(tmp);
1750    return nullify_end(ctx);
1751}
1752
1753static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1754                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1755{
1756    TCGv_i32 src;
1757    TCGv_i64 dst;
1758
1759    nullify_over(ctx);
1760    src = load_frw0_i32(ra);
1761    dst = tcg_temp_new_i64();
1762
1763    func(dst, cpu_env, src);
1764
1765    tcg_temp_free_i32(src);
1766    save_frd(rt, dst);
1767    tcg_temp_free_i64(dst);
1768    return nullify_end(ctx);
1769}
1770
1771static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1772                        unsigned ra, unsigned rb,
1773                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1774{
1775    TCGv_i32 a, b;
1776
1777    nullify_over(ctx);
1778    a = load_frw0_i32(ra);
1779    b = load_frw0_i32(rb);
1780
1781    func(a, cpu_env, a, b);
1782
1783    tcg_temp_free_i32(b);
1784    save_frw_i32(rt, a);
1785    tcg_temp_free_i32(a);
1786    return nullify_end(ctx);
1787}
1788
1789static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1790                        unsigned ra, unsigned rb,
1791                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1792{
1793    TCGv_i64 a, b;
1794
1795    nullify_over(ctx);
1796    a = load_frd0(ra);
1797    b = load_frd0(rb);
1798
1799    func(a, cpu_env, a, b);
1800
1801    tcg_temp_free_i64(b);
1802    save_frd(rt, a);
1803    tcg_temp_free_i64(a);
1804    return nullify_end(ctx);
1805}
1806
1807/* Emit an unconditional branch to a direct target, which may or may not
1808   have already had nullification handled.  */
1809static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1810                       unsigned link, bool is_n)
1811{
1812    if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1813        if (link != 0) {
1814            copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1815        }
1816        ctx->iaoq_n = dest;
1817        if (is_n) {
1818            ctx->null_cond.c = TCG_COND_ALWAYS;
1819        }
1820    } else {
1821        nullify_over(ctx);
1822
1823        if (link != 0) {
1824            copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1825        }
1826
1827        if (is_n && use_nullify_skip(ctx)) {
1828            nullify_set(ctx, 0);
1829            gen_goto_tb(ctx, 0, dest, dest + 4);
1830        } else {
1831            nullify_set(ctx, is_n);
1832            gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1833        }
1834
1835        nullify_end(ctx);
1836
1837        nullify_set(ctx, 0);
1838        gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1839        ctx->base.is_jmp = DISAS_NORETURN;
1840    }
1841    return true;
1842}
1843
1844/* Emit a conditional branch to a direct target.  If the branch itself
1845   is nullified, we should have already used nullify_over.  */
1846static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1847                       DisasCond *cond)
1848{
1849    target_ureg dest = iaoq_dest(ctx, disp);
1850    TCGLabel *taken = NULL;
1851    TCGCond c = cond->c;
1852    bool n;
1853
1854    assert(ctx->null_cond.c == TCG_COND_NEVER);
1855
1856    /* Handle TRUE and NEVER as direct branches.  */
1857    if (c == TCG_COND_ALWAYS) {
1858        return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1859    }
1860    if (c == TCG_COND_NEVER) {
1861        return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1862    }
1863
1864    taken = gen_new_label();
1865    cond_prep(cond);
1866    tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1867    cond_free(cond);
1868
1869    /* Not taken: Condition not satisfied; nullify on backward branches. */
1870    n = is_n && disp < 0;
1871    if (n && use_nullify_skip(ctx)) {
1872        nullify_set(ctx, 0);
1873        gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1874    } else {
1875        if (!n && ctx->null_lab) {
1876            gen_set_label(ctx->null_lab);
1877            ctx->null_lab = NULL;
1878        }
1879        nullify_set(ctx, n);
1880        if (ctx->iaoq_n == -1) {
1881            /* The temporary iaoq_n_var died at the branch above.
1882               Regenerate it here instead of saving it.  */
1883            tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1884        }
1885        gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1886    }
1887
1888    gen_set_label(taken);
1889
1890    /* Taken: Condition satisfied; nullify on forward branches.  */
1891    n = is_n && disp >= 0;
1892    if (n && use_nullify_skip(ctx)) {
1893        nullify_set(ctx, 0);
1894        gen_goto_tb(ctx, 1, dest, dest + 4);
1895    } else {
1896        nullify_set(ctx, n);
1897        gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1898    }
1899
1900    /* Not taken: the branch itself was nullified.  */
1901    if (ctx->null_lab) {
1902        gen_set_label(ctx->null_lab);
1903        ctx->null_lab = NULL;
1904        ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1905    } else {
1906        ctx->base.is_jmp = DISAS_NORETURN;
1907    }
1908    return true;
1909}
1910
1911/* Emit an unconditional branch to an indirect target.  This handles
1912   nullification of the branch itself.  */
1913static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1914                       unsigned link, bool is_n)
1915{
1916    TCGv_reg a0, a1, next, tmp;
1917    TCGCond c;
1918
1919    assert(ctx->null_lab == NULL);
1920
1921    if (ctx->null_cond.c == TCG_COND_NEVER) {
1922        if (link != 0) {
1923            copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1924        }
1925        next = get_temp(ctx);
1926        tcg_gen_mov_reg(next, dest);
1927        if (is_n) {
1928            if (use_nullify_skip(ctx)) {
1929                tcg_gen_mov_reg(cpu_iaoq_f, next);
1930                tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1931                nullify_set(ctx, 0);
1932                ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1933                return true;
1934            }
1935            ctx->null_cond.c = TCG_COND_ALWAYS;
1936        }
1937        ctx->iaoq_n = -1;
1938        ctx->iaoq_n_var = next;
1939    } else if (is_n && use_nullify_skip(ctx)) {
1940        /* The (conditional) branch, B, nullifies the next insn, N,
1941           and we're allowed to skip execution N (no single-step or
1942           tracepoint in effect).  Since the goto_ptr that we must use
1943           for the indirect branch consumes no special resources, we
1944           can (conditionally) skip B and continue execution.  */
1945        /* The use_nullify_skip test implies we have a known control path.  */
1946        tcg_debug_assert(ctx->iaoq_b != -1);
1947        tcg_debug_assert(ctx->iaoq_n != -1);
1948
1949        /* We do have to handle the non-local temporary, DEST, before
1950           branching.  Since IOAQ_F is not really live at this point, we
1951           can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1952        tcg_gen_mov_reg(cpu_iaoq_f, dest);
1953        tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1954
1955        nullify_over(ctx);
1956        if (link != 0) {
1957            tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1958        }
1959        tcg_gen_lookup_and_goto_ptr();
1960        return nullify_end(ctx);
1961    } else {
1962        cond_prep(&ctx->null_cond);
1963        c = ctx->null_cond.c;
1964        a0 = ctx->null_cond.a0;
1965        a1 = ctx->null_cond.a1;
1966
1967        tmp = tcg_temp_new();
1968        next = get_temp(ctx);
1969
1970        copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1971        tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1972        ctx->iaoq_n = -1;
1973        ctx->iaoq_n_var = next;
1974
1975        if (link != 0) {
1976            tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1977        }
1978
1979        if (is_n) {
1980            /* The branch nullifies the next insn, which means the state of N
1981               after the branch is the inverse of the state of N that applied
1982               to the branch.  */
1983            tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1984            cond_free(&ctx->null_cond);
1985            ctx->null_cond = cond_make_n();
1986            ctx->psw_n_nonzero = true;
1987        } else {
1988            cond_free(&ctx->null_cond);
1989        }
1990    }
1991    return true;
1992}
1993
1994/* Implement
1995 *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1996 *      IAOQ_Next{30..31} ← GR[b]{30..31};
1997 *    else
1998 *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1999 * which keeps the privilege level from being increased.
2000 */
2001static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
2002{
2003    TCGv_reg dest;
2004    switch (ctx->privilege) {
2005    case 0:
2006        /* Privilege 0 is maximum and is allowed to decrease.  */
2007        return offset;
2008    case 3:
2009        /* Privilege 3 is minimum and is never allowed to increase.  */
2010        dest = get_temp(ctx);
2011        tcg_gen_ori_reg(dest, offset, 3);
2012        break;
2013    default:
2014        dest = get_temp(ctx);
2015        tcg_gen_andi_reg(dest, offset, -4);
2016        tcg_gen_ori_reg(dest, dest, ctx->privilege);
2017        tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
2018        break;
2019    }
2020    return dest;
2021}
2022
2023#ifdef CONFIG_USER_ONLY
2024/* On Linux, page zero is normally marked execute only + gateway.
2025   Therefore normal read or write is supposed to fail, but specific
2026   offsets have kernel code mapped to raise permissions to implement
2027   system calls.  Handling this via an explicit check here, rather
2028   in than the "be disp(sr2,r0)" instruction that probably sent us
2029   here, is the easiest way to handle the branch delay slot on the
2030   aforementioned BE.  */
2031static void do_page_zero(DisasContext *ctx)
2032{
2033    /* If by some means we get here with PSW[N]=1, that implies that
2034       the B,GATE instruction would be skipped, and we'd fault on the
2035       next insn within the privilaged page.  */
2036    switch (ctx->null_cond.c) {
2037    case TCG_COND_NEVER:
2038        break;
2039    case TCG_COND_ALWAYS:
2040        tcg_gen_movi_reg(cpu_psw_n, 0);
2041        goto do_sigill;
2042    default:
2043        /* Since this is always the first (and only) insn within the
2044           TB, we should know the state of PSW[N] from TB->FLAGS.  */
2045        g_assert_not_reached();
2046    }
2047
2048    /* Check that we didn't arrive here via some means that allowed
2049       non-sequential instruction execution.  Normally the PSW[B] bit
2050       detects this by disallowing the B,GATE instruction to execute
2051       under such conditions.  */
2052    if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2053        goto do_sigill;
2054    }
2055
2056    switch (ctx->iaoq_f & -4) {
2057    case 0x00: /* Null pointer call */
2058        gen_excp_1(EXCP_IMP);
2059        ctx->base.is_jmp = DISAS_NORETURN;
2060        break;
2061
2062    case 0xb0: /* LWS */
2063        gen_excp_1(EXCP_SYSCALL_LWS);
2064        ctx->base.is_jmp = DISAS_NORETURN;
2065        break;
2066
2067    case 0xe0: /* SET_THREAD_POINTER */
2068        tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2069        tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2070        tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2071        ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2072        break;
2073
2074    case 0x100: /* SYSCALL */
2075        gen_excp_1(EXCP_SYSCALL);
2076        ctx->base.is_jmp = DISAS_NORETURN;
2077        break;
2078
2079    default:
2080    do_sigill:
2081        gen_excp_1(EXCP_ILL);
2082        ctx->base.is_jmp = DISAS_NORETURN;
2083        break;
2084    }
2085}
2086#endif
2087
2088static bool trans_nop(DisasContext *ctx, arg_nop *a)
2089{
2090    cond_free(&ctx->null_cond);
2091    return true;
2092}
2093
2094static bool trans_break(DisasContext *ctx, arg_break *a)
2095{
2096    return gen_excp_iir(ctx, EXCP_BREAK);
2097}
2098
2099static bool trans_sync(DisasContext *ctx, arg_sync *a)
2100{
2101    /* No point in nullifying the memory barrier.  */
2102    tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2103
2104    cond_free(&ctx->null_cond);
2105    return true;
2106}
2107
2108static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2109{
2110    unsigned rt = a->t;
2111    TCGv_reg tmp = dest_gpr(ctx, rt);
2112    tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2113    save_gpr(ctx, rt, tmp);
2114
2115    cond_free(&ctx->null_cond);
2116    return true;
2117}
2118
2119static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2120{
2121    unsigned rt = a->t;
2122    unsigned rs = a->sp;
2123    TCGv_i64 t0 = tcg_temp_new_i64();
2124    TCGv_reg t1 = tcg_temp_new();
2125
2126    load_spr(ctx, t0, rs);
2127    tcg_gen_shri_i64(t0, t0, 32);
2128    tcg_gen_trunc_i64_reg(t1, t0);
2129
2130    save_gpr(ctx, rt, t1);
2131    tcg_temp_free(t1);
2132    tcg_temp_free_i64(t0);
2133
2134    cond_free(&ctx->null_cond);
2135    return true;
2136}
2137
2138static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2139{
2140    unsigned rt = a->t;
2141    unsigned ctl = a->r;
2142    TCGv_reg tmp;
2143
2144    switch (ctl) {
2145    case CR_SAR:
2146#ifdef TARGET_HPPA64
2147        if (a->e == 0) {
2148            /* MFSAR without ,W masks low 5 bits.  */
2149            tmp = dest_gpr(ctx, rt);
2150            tcg_gen_andi_reg(tmp, cpu_sar, 31);
2151            save_gpr(ctx, rt, tmp);
2152            goto done;
2153        }
2154#endif
2155        save_gpr(ctx, rt, cpu_sar);
2156        goto done;
2157    case CR_IT: /* Interval Timer */
2158        /* FIXME: Respect PSW_S bit.  */
2159        nullify_over(ctx);
2160        tmp = dest_gpr(ctx, rt);
2161        if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2162            gen_io_start();
2163            gen_helper_read_interval_timer(tmp);
2164            gen_io_end();
2165            ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2166        } else {
2167            gen_helper_read_interval_timer(tmp);
2168        }
2169        save_gpr(ctx, rt, tmp);
2170        return nullify_end(ctx);
2171    case 26:
2172    case 27:
2173        break;
2174    default:
2175        /* All other control registers are privileged.  */
2176        CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2177        break;
2178    }
2179
2180    tmp = get_temp(ctx);
2181    tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2182    save_gpr(ctx, rt, tmp);
2183
2184 done:
2185    cond_free(&ctx->null_cond);
2186    return true;
2187}
2188
2189static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2190{
2191    unsigned rr = a->r;
2192    unsigned rs = a->sp;
2193    TCGv_i64 t64;
2194
2195    if (rs >= 5) {
2196        CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2197    }
2198    nullify_over(ctx);
2199
2200    t64 = tcg_temp_new_i64();
2201    tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2202    tcg_gen_shli_i64(t64, t64, 32);
2203
2204    if (rs >= 4) {
2205        tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2206        ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2207    } else {
2208        tcg_gen_mov_i64(cpu_sr[rs], t64);
2209    }
2210    tcg_temp_free_i64(t64);
2211
2212    return nullify_end(ctx);
2213}
2214
2215static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2216{
2217    unsigned ctl = a->t;
2218    TCGv_reg reg = load_gpr(ctx, a->r);
2219    TCGv_reg tmp;
2220
2221    if (ctl == CR_SAR) {
2222        tmp = tcg_temp_new();
2223        tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2224        save_or_nullify(ctx, cpu_sar, tmp);
2225        tcg_temp_free(tmp);
2226
2227        cond_free(&ctx->null_cond);
2228        return true;
2229    }
2230
2231    /* All other control registers are privileged or read-only.  */
2232    CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2233
2234#ifndef CONFIG_USER_ONLY
2235    nullify_over(ctx);
2236    switch (ctl) {
2237    case CR_IT:
2238        gen_helper_write_interval_timer(cpu_env, reg);
2239        break;
2240    case CR_EIRR:
2241        gen_helper_write_eirr(cpu_env, reg);
2242        break;
2243    case CR_EIEM:
2244        gen_helper_write_eiem(cpu_env, reg);
2245        ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2246        break;
2247
2248    case CR_IIASQ:
2249    case CR_IIAOQ:
2250        /* FIXME: Respect PSW_Q bit */
2251        /* The write advances the queue and stores to the back element.  */
2252        tmp = get_temp(ctx);
2253        tcg_gen_ld_reg(tmp, cpu_env,
2254                       offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2255        tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2256        tcg_gen_st_reg(reg, cpu_env,
2257                       offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2258        break;
2259
2260    case CR_PID1:
2261    case CR_PID2:
2262    case CR_PID3:
2263    case CR_PID4:
2264        tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2265#ifndef CONFIG_USER_ONLY
2266        gen_helper_change_prot_id(cpu_env);
2267#endif
2268        break;
2269
2270    default:
2271        tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2272        break;
2273    }
2274    return nullify_end(ctx);
2275#endif
2276}
2277
2278static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2279{
2280    TCGv_reg tmp = tcg_temp_new();
2281
2282    tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2283    tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2284    save_or_nullify(ctx, cpu_sar, tmp);
2285    tcg_temp_free(tmp);
2286
2287    cond_free(&ctx->null_cond);
2288    return true;
2289}
2290
2291static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2292{
2293    TCGv_reg dest = dest_gpr(ctx, a->t);
2294
2295#ifdef CONFIG_USER_ONLY
2296    /* We don't implement space registers in user mode. */
2297    tcg_gen_movi_reg(dest, 0);
2298#else
2299    TCGv_i64 t0 = tcg_temp_new_i64();
2300
2301    tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2302    tcg_gen_shri_i64(t0, t0, 32);
2303    tcg_gen_trunc_i64_reg(dest, t0);
2304
2305    tcg_temp_free_i64(t0);
2306#endif
2307    save_gpr(ctx, a->t, dest);
2308
2309    cond_free(&ctx->null_cond);
2310    return true;
2311}
2312
2313static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2314{
2315    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2316#ifndef CONFIG_USER_ONLY
2317    TCGv_reg tmp;
2318
2319    nullify_over(ctx);
2320
2321    tmp = get_temp(ctx);
2322    tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2323    tcg_gen_andi_reg(tmp, tmp, ~a->i);
2324    gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2325    save_gpr(ctx, a->t, tmp);
2326
2327    /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2328    ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2329    return nullify_end(ctx);
2330#endif
2331}
2332
2333static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2334{
2335    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2336#ifndef CONFIG_USER_ONLY
2337    TCGv_reg tmp;
2338
2339    nullify_over(ctx);
2340
2341    tmp = get_temp(ctx);
2342    tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2343    tcg_gen_ori_reg(tmp, tmp, a->i);
2344    gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2345    save_gpr(ctx, a->t, tmp);
2346
2347    /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2348    ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2349    return nullify_end(ctx);
2350#endif
2351}
2352
2353static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2354{
2355    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2356#ifndef CONFIG_USER_ONLY
2357    TCGv_reg tmp, reg;
2358    nullify_over(ctx);
2359
2360    reg = load_gpr(ctx, a->r);
2361    tmp = get_temp(ctx);
2362    gen_helper_swap_system_mask(tmp, cpu_env, reg);
2363
2364    /* Exit the TB to recognize new interrupts.  */
2365    ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2366    return nullify_end(ctx);
2367#endif
2368}
2369
2370static bool do_rfi(DisasContext *ctx, bool rfi_r)
2371{
2372    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2373#ifndef CONFIG_USER_ONLY
2374    nullify_over(ctx);
2375
2376    if (rfi_r) {
2377        gen_helper_rfi_r(cpu_env);
2378    } else {
2379        gen_helper_rfi(cpu_env);
2380    }
2381    /* Exit the TB to recognize new interrupts.  */
2382    if (ctx->base.singlestep_enabled) {
2383        gen_excp_1(EXCP_DEBUG);
2384    } else {
2385        tcg_gen_exit_tb(NULL, 0);
2386    }
2387    ctx->base.is_jmp = DISAS_NORETURN;
2388
2389    return nullify_end(ctx);
2390#endif
2391}
2392
2393static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2394{
2395    return do_rfi(ctx, false);
2396}
2397
2398static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2399{
2400    return do_rfi(ctx, true);
2401}
2402
2403static bool trans_halt(DisasContext *ctx, arg_halt *a)
2404{
2405    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2406#ifndef CONFIG_USER_ONLY
2407    nullify_over(ctx);
2408    gen_helper_halt(cpu_env);
2409    ctx->base.is_jmp = DISAS_NORETURN;
2410    return nullify_end(ctx);
2411#endif
2412}
2413
2414static bool trans_reset(DisasContext *ctx, arg_reset *a)
2415{
2416    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2417#ifndef CONFIG_USER_ONLY
2418    nullify_over(ctx);
2419    gen_helper_reset(cpu_env);
2420    ctx->base.is_jmp = DISAS_NORETURN;
2421    return nullify_end(ctx);
2422#endif
2423}
2424
2425static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2426{
2427    if (a->m) {
2428        TCGv_reg dest = dest_gpr(ctx, a->b);
2429        TCGv_reg src1 = load_gpr(ctx, a->b);
2430        TCGv_reg src2 = load_gpr(ctx, a->x);
2431
2432        /* The only thing we need to do is the base register modification.  */
2433        tcg_gen_add_reg(dest, src1, src2);
2434        save_gpr(ctx, a->b, dest);
2435    }
2436    cond_free(&ctx->null_cond);
2437    return true;
2438}
2439
2440static bool trans_probe(DisasContext *ctx, arg_probe *a)
2441{
2442    TCGv_reg dest, ofs;
2443    TCGv_i32 level, want;
2444    TCGv_tl addr;
2445
2446    nullify_over(ctx);
2447
2448    dest = dest_gpr(ctx, a->t);
2449    form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2450
2451    if (a->imm) {
2452        level = tcg_const_i32(a->ri);
2453    } else {
2454        level = tcg_temp_new_i32();
2455        tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2456        tcg_gen_andi_i32(level, level, 3);
2457    }
2458    want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ);
2459
2460    gen_helper_probe(dest, cpu_env, addr, level, want);
2461
2462    tcg_temp_free_i32(want);
2463    tcg_temp_free_i32(level);
2464
2465    save_gpr(ctx, a->t, dest);
2466    return nullify_end(ctx);
2467}
2468
2469static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2470{
2471    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2472#ifndef CONFIG_USER_ONLY
2473    TCGv_tl addr;
2474    TCGv_reg ofs, reg;
2475
2476    nullify_over(ctx);
2477
2478    form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2479    reg = load_gpr(ctx, a->r);
2480    if (a->addr) {
2481        gen_helper_itlba(cpu_env, addr, reg);
2482    } else {
2483        gen_helper_itlbp(cpu_env, addr, reg);
2484    }
2485
2486    /* Exit TB for TLB change if mmu is enabled.  */
2487    if (ctx->tb_flags & PSW_C) {
2488        ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2489    }
2490    return nullify_end(ctx);
2491#endif
2492}
2493
2494static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2495{
2496    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2497#ifndef CONFIG_USER_ONLY
2498    TCGv_tl addr;
2499    TCGv_reg ofs;
2500
2501    nullify_over(ctx);
2502
2503    form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2504    if (a->m) {
2505        save_gpr(ctx, a->b, ofs);
2506    }
2507    if (a->local) {
2508        gen_helper_ptlbe(cpu_env);
2509    } else {
2510        gen_helper_ptlb(cpu_env, addr);
2511    }
2512
2513    /* Exit TB for TLB change if mmu is enabled.  */
2514    if (ctx->tb_flags & PSW_C) {
2515        ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2516    }
2517    return nullify_end(ctx);
2518#endif
2519}
2520
2521static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2522{
2523    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2524#ifndef CONFIG_USER_ONLY
2525    TCGv_tl vaddr;
2526    TCGv_reg ofs, paddr;
2527
2528    nullify_over(ctx);
2529
2530    form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2531
2532    paddr = tcg_temp_new();
2533    gen_helper_lpa(paddr, cpu_env, vaddr);
2534
2535    /* Note that physical address result overrides base modification.  */
2536    if (a->m) {
2537        save_gpr(ctx, a->b, ofs);
2538    }
2539    save_gpr(ctx, a->t, paddr);
2540    tcg_temp_free(paddr);
2541
2542    return nullify_end(ctx);
2543#endif
2544}
2545
2546static bool trans_lci(DisasContext *ctx, arg_lci *a)
2547{
2548    TCGv_reg ci;
2549
2550    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2551
2552    /* The Coherence Index is an implementation-defined function of the
2553       physical address.  Two addresses with the same CI have a coherent
2554       view of the cache.  Our implementation is to return 0 for all,
2555       since the entire address space is coherent.  */
2556    ci = tcg_const_reg(0);
2557    save_gpr(ctx, a->t, ci);
2558    tcg_temp_free(ci);
2559
2560    cond_free(&ctx->null_cond);
2561    return true;
2562}
2563
2564static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2565{
2566    return do_add_reg(ctx, a, false, false, false, false);
2567}
2568
2569static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2570{
2571    return do_add_reg(ctx, a, true, false, false, false);
2572}
2573
2574static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2575{
2576    return do_add_reg(ctx, a, false, true, false, false);
2577}
2578
2579static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2580{
2581    return do_add_reg(ctx, a, false, false, false, true);
2582}
2583
2584static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2585{
2586    return do_add_reg(ctx, a, false, true, false, true);
2587}
2588
2589static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2590{
2591    return do_sub_reg(ctx, a, false, false, false);
2592}
2593
2594static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2595{
2596    return do_sub_reg(ctx, a, true, false, false);
2597}
2598
2599static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2600{
2601    return do_sub_reg(ctx, a, false, false, true);
2602}
2603
2604static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2605{
2606    return do_sub_reg(ctx, a, true, false, true);
2607}
2608
2609static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2610{
2611    return do_sub_reg(ctx, a, false, true, false);
2612}
2613
2614static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2615{
2616    return do_sub_reg(ctx, a, true, true, false);
2617}
2618
2619static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2620{
2621    return do_log_reg(ctx, a, tcg_gen_andc_reg);
2622}
2623
2624static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2625{
2626    return do_log_reg(ctx, a, tcg_gen_and_reg);
2627}
2628
2629static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2630{
2631    if (a->cf == 0) {
2632        unsigned r2 = a->r2;
2633        unsigned r1 = a->r1;
2634        unsigned rt = a->t;
2635
2636        if (rt == 0) { /* NOP */
2637            cond_free(&ctx->null_cond);
2638            return true;
2639        }
2640        if (r2 == 0) { /* COPY */
2641            if (r1 == 0) {
2642                TCGv_reg dest = dest_gpr(ctx, rt);
2643                tcg_gen_movi_reg(dest, 0);
2644                save_gpr(ctx, rt, dest);
2645            } else {
2646                save_gpr(ctx, rt, cpu_gr[r1]);
2647            }
2648            cond_free(&ctx->null_cond);
2649            return true;
2650        }
2651#ifndef CONFIG_USER_ONLY
2652        /* These are QEMU extensions and are nops in the real architecture:
2653         *
2654         * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2655         * or %r31,%r31,%r31 -- death loop; offline cpu
2656         *                      currently implemented as idle.
2657         */
2658        if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2659            TCGv_i32 tmp;
2660
2661            /* No need to check for supervisor, as userland can only pause
2662               until the next timer interrupt.  */
2663            nullify_over(ctx);
2664
2665            /* Advance the instruction queue.  */
2666            copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2667            copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2668            nullify_set(ctx, 0);
2669
2670            /* Tell the qemu main loop to halt until this cpu has work.  */
2671            tmp = tcg_const_i32(1);
2672            tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2673                                         offsetof(CPUState, halted));
2674            tcg_temp_free_i32(tmp);
2675            gen_excp_1(EXCP_HALTED);
2676            ctx->base.is_jmp = DISAS_NORETURN;
2677
2678            return nullify_end(ctx);
2679        }
2680#endif
2681    }
2682    return do_log_reg(ctx, a, tcg_gen_or_reg);
2683}
2684
2685static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2686{
2687    return do_log_reg(ctx, a, tcg_gen_xor_reg);
2688}
2689
2690static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2691{
2692    TCGv_reg tcg_r1, tcg_r2;
2693
2694    if (a->cf) {
2695        nullify_over(ctx);
2696    }
2697    tcg_r1 = load_gpr(ctx, a->r1);
2698    tcg_r2 = load_gpr(ctx, a->r2);
2699    do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2700    return nullify_end(ctx);
2701}
2702
2703static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2704{
2705    TCGv_reg tcg_r1, tcg_r2;
2706
2707    if (a->cf) {
2708        nullify_over(ctx);
2709    }
2710    tcg_r1 = load_gpr(ctx, a->r1);
2711    tcg_r2 = load_gpr(ctx, a->r2);
2712    do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2713    return nullify_end(ctx);
2714}
2715
2716static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2717{
2718    TCGv_reg tcg_r1, tcg_r2, tmp;
2719
2720    if (a->cf) {
2721        nullify_over(ctx);
2722    }
2723    tcg_r1 = load_gpr(ctx, a->r1);
2724    tcg_r2 = load_gpr(ctx, a->r2);
2725    tmp = get_temp(ctx);
2726    tcg_gen_not_reg(tmp, tcg_r2);
2727    do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2728    return nullify_end(ctx);
2729}
2730
2731static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2732{
2733    return do_uaddcm(ctx, a, false);
2734}
2735
2736static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2737{
2738    return do_uaddcm(ctx, a, true);
2739}
2740
2741static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2742{
2743    TCGv_reg tmp;
2744
2745    nullify_over(ctx);
2746
2747    tmp = get_temp(ctx);
2748    tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2749    if (!is_i) {
2750        tcg_gen_not_reg(tmp, tmp);
2751    }
2752    tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2753    tcg_gen_muli_reg(tmp, tmp, 6);
2754    do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2755            is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2756    return nullify_end(ctx);
2757}
2758
2759static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2760{
2761    return do_dcor(ctx, a, false);
2762}
2763
2764static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2765{
2766    return do_dcor(ctx, a, true);
2767}
2768
2769static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2770{
2771    TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2772
2773    nullify_over(ctx);
2774
2775    in1 = load_gpr(ctx, a->r1);
2776    in2 = load_gpr(ctx, a->r2);
2777
2778    add1 = tcg_temp_new();
2779    add2 = tcg_temp_new();
2780    addc = tcg_temp_new();
2781    dest = tcg_temp_new();
2782    zero = tcg_const_reg(0);
2783
2784    /* Form R1 << 1 | PSW[CB]{8}.  */
2785    tcg_gen_add_reg(add1, in1, in1);
2786    tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2787
2788    /* Add or subtract R2, depending on PSW[V].  Proper computation of
2789       carry{8} requires that we subtract via + ~R2 + 1, as described in
2790       the manual.  By extracting and masking V, we can produce the
2791       proper inputs to the addition without movcond.  */
2792    tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2793    tcg_gen_xor_reg(add2, in2, addc);
2794    tcg_gen_andi_reg(addc, addc, 1);
2795    /* ??? This is only correct for 32-bit.  */
2796    tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2797    tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2798
2799    tcg_temp_free(addc);
2800    tcg_temp_free(zero);
2801
2802    /* Write back the result register.  */
2803    save_gpr(ctx, a->t, dest);
2804
2805    /* Write back PSW[CB].  */
2806    tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2807    tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2808
2809    /* Write back PSW[V] for the division step.  */
2810    tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2811    tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2812
2813    /* Install the new nullification.  */
2814    if (a->cf) {
2815        TCGv_reg sv = NULL;
2816        if (cond_need_sv(a->cf >> 1)) {
2817            /* ??? The lshift is supposed to contribute to overflow.  */
2818            sv = do_add_sv(ctx, dest, add1, add2);
2819        }
2820        ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2821    }
2822
2823    tcg_temp_free(add1);
2824    tcg_temp_free(add2);
2825    tcg_temp_free(dest);
2826
2827    return nullify_end(ctx);
2828}
2829
2830static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2831{
2832    return do_add_imm(ctx, a, false, false);
2833}
2834
2835static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2836{
2837    return do_add_imm(ctx, a, true, false);
2838}
2839
2840static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2841{
2842    return do_add_imm(ctx, a, false, true);
2843}
2844
2845static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2846{
2847    return do_add_imm(ctx, a, true, true);
2848}
2849
2850static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2851{
2852    return do_sub_imm(ctx, a, false);
2853}
2854
2855static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2856{
2857    return do_sub_imm(ctx, a, true);
2858}
2859
2860static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2861{
2862    TCGv_reg tcg_im, tcg_r2;
2863
2864    if (a->cf) {
2865        nullify_over(ctx);
2866    }
2867
2868    tcg_im = load_const(ctx, a->i);
2869    tcg_r2 = load_gpr(ctx, a->r);
2870    do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2871
2872    return nullify_end(ctx);
2873}
2874
2875static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2876{
2877    return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2878                   a->disp, a->sp, a->m, a->size | MO_TE);
2879}
2880
2881static bool trans_st(DisasContext *ctx, arg_ldst *a)
2882{
2883    assert(a->x == 0 && a->scale == 0);
2884    return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2885}
2886
2887static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2888{
2889    TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
2890    TCGv_reg zero, dest, ofs;
2891    TCGv_tl addr;
2892
2893    nullify_over(ctx);
2894
2895    if (a->m) {
2896        /* Base register modification.  Make sure if RT == RB,
2897           we see the result of the load.  */
2898        dest = get_temp(ctx);
2899    } else {
2900        dest = dest_gpr(ctx, a->t);
2901    }
2902
2903    form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2904             a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2905    zero = tcg_const_reg(0);
2906    tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2907    if (a->m) {
2908        save_gpr(ctx, a->b, ofs);
2909    }
2910    save_gpr(ctx, a->t, dest);
2911
2912    return nullify_end(ctx);
2913}
2914
2915static bool trans_stby(DisasContext *ctx, arg_stby *a)
2916{
2917    TCGv_reg ofs, val;
2918    TCGv_tl addr;
2919
2920    nullify_over(ctx);
2921
2922    form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2923             ctx->mmu_idx == MMU_PHYS_IDX);
2924    val = load_gpr(ctx, a->r);
2925    if (a->a) {
2926        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2927            gen_helper_stby_e_parallel(cpu_env, addr, val);
2928        } else {
2929            gen_helper_stby_e(cpu_env, addr, val);
2930        }
2931    } else {
2932        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2933            gen_helper_stby_b_parallel(cpu_env, addr, val);
2934        } else {
2935            gen_helper_stby_b(cpu_env, addr, val);
2936        }
2937    }
2938    if (a->m) {
2939        tcg_gen_andi_reg(ofs, ofs, ~3);
2940        save_gpr(ctx, a->b, ofs);
2941    }
2942
2943    return nullify_end(ctx);
2944}
2945
2946static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2947{
2948    int hold_mmu_idx = ctx->mmu_idx;
2949
2950    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2951    ctx->mmu_idx = MMU_PHYS_IDX;
2952    trans_ld(ctx, a);
2953    ctx->mmu_idx = hold_mmu_idx;
2954    return true;
2955}
2956
2957static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2958{
2959    int hold_mmu_idx = ctx->mmu_idx;
2960
2961    CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2962    ctx->mmu_idx = MMU_PHYS_IDX;
2963    trans_st(ctx, a);
2964    ctx->mmu_idx = hold_mmu_idx;
2965    return true;
2966}
2967
2968static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2969{
2970    TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2971
2972    tcg_gen_movi_reg(tcg_rt, a->i);
2973    save_gpr(ctx, a->t, tcg_rt);
2974    cond_free(&ctx->null_cond);
2975    return true;
2976}
2977
2978static bool trans_addil(DisasContext *ctx, arg_addil *a)
2979{
2980    TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2981    TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2982
2983    tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2984    save_gpr(ctx, 1, tcg_r1);
2985    cond_free(&ctx->null_cond);
2986    return true;
2987}
2988
2989static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2990{
2991    TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2992
2993    /* Special case rb == 0, for the LDI pseudo-op.
2994       The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2995    if (a->b == 0) {
2996        tcg_gen_movi_reg(tcg_rt, a->i);
2997    } else {
2998        tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2999    }
3000    save_gpr(ctx, a->t, tcg_rt);
3001    cond_free(&ctx->null_cond);
3002    return true;
3003}
3004
3005static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3006                    unsigned c, unsigned f, unsigned n, int disp)
3007{
3008    TCGv_reg dest, in2, sv;
3009    DisasCond cond;
3010
3011    in2 = load_gpr(ctx, r);
3012    dest = get_temp(ctx);
3013
3014    tcg_gen_sub_reg(dest, in1, in2);
3015
3016    sv = NULL;
3017    if (cond_need_sv(c)) {
3018        sv = do_sub_sv(ctx, dest, in1, in2);
3019    }
3020
3021    cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3022    return do_cbranch(ctx, disp, n, &cond);
3023}
3024
3025static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3026{
3027    nullify_over(ctx);
3028    return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3029}
3030
3031static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3032{
3033    nullify_over(ctx);
3034    return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3035}
3036
3037static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3038                    unsigned c, unsigned f, unsigned n, int disp)
3039{
3040    TCGv_reg dest, in2, sv, cb_msb;
3041    DisasCond cond;
3042
3043    in2 = load_gpr(ctx, r);
3044    dest = tcg_temp_new();
3045    sv = NULL;
3046    cb_msb = NULL;
3047
3048    if (cond_need_cb(c)) {
3049        cb_msb = get_temp(ctx);
3050        tcg_gen_movi_reg(cb_msb, 0);
3051        tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3052    } else {
3053        tcg_gen_add_reg(dest, in1, in2);
3054    }
3055    if (cond_need_sv(c)) {
3056        sv = do_add_sv(ctx, dest, in1, in2);
3057    }
3058
3059    cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3060    save_gpr(ctx, r, dest);
3061    tcg_temp_free(dest);
3062    return do_cbranch(ctx, disp, n, &cond);
3063}
3064
3065static bool trans_addb(DisasContext *ctx, arg_addb *a)
3066{
3067    nullify_over(ctx);
3068    return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3069}
3070
3071static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3072{
3073    nullify_over(ctx);
3074    return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3075}
3076
3077static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3078{
3079    TCGv_reg tmp, tcg_r;
3080    DisasCond cond;
3081
3082    nullify_over(ctx);
3083
3084    tmp = tcg_temp_new();
3085    tcg_r = load_gpr(ctx, a->r);
3086    tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3087
3088    cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3089    tcg_temp_free(tmp);
3090    return do_cbranch(ctx, a->disp, a->n, &cond);
3091}
3092
3093static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3094{
3095    TCGv_reg tmp, tcg_r;
3096    DisasCond cond;
3097
3098    nullify_over(ctx);
3099
3100    tmp = tcg_temp_new();
3101    tcg_r = load_gpr(ctx, a->r);
3102    tcg_gen_shli_reg(tmp, tcg_r, a->p);
3103
3104    cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3105    tcg_temp_free(tmp);
3106    return do_cbranch(ctx, a->disp, a->n, &cond);
3107}
3108
3109static bool trans_movb(DisasContext *ctx, arg_movb *a)
3110{
3111    TCGv_reg dest;
3112    DisasCond cond;
3113
3114    nullify_over(ctx);
3115
3116    dest = dest_gpr(ctx, a->r2);
3117    if (a->r1 == 0) {
3118        tcg_gen_movi_reg(dest, 0);
3119    } else {
3120        tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3121    }
3122
3123    cond = do_sed_cond(a->c, dest);
3124    return do_cbranch(ctx, a->disp, a->n, &cond);
3125}
3126
3127static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3128{
3129    TCGv_reg dest;
3130    DisasCond cond;
3131
3132    nullify_over(ctx);
3133
3134    dest = dest_gpr(ctx, a->r);
3135    tcg_gen_movi_reg(dest, a->i);
3136
3137    cond = do_sed_cond(a->c, dest);
3138    return do_cbranch(ctx, a->disp, a->n, &cond);
3139}
3140
3141static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3142{
3143    TCGv_reg dest;
3144
3145    if (a->c) {
3146        nullify_over(ctx);
3147    }
3148
3149    dest = dest_gpr(ctx, a->t);
3150    if (a->r1 == 0) {
3151        tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3152        tcg_gen_shr_reg(dest, dest, cpu_sar);
3153    } else if (a->r1 == a->r2) {
3154        TCGv_i32 t32 = tcg_temp_new_i32();
3155        tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3156        tcg_gen_rotr_i32(t32, t32, cpu_sar);
3157        tcg_gen_extu_i32_reg(dest, t32);
3158        tcg_temp_free_i32(t32);
3159    } else {
3160        TCGv_i64 t = tcg_temp_new_i64();
3161        TCGv_i64 s = tcg_temp_new_i64();
3162
3163        tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3164        tcg_gen_extu_reg_i64(s, cpu_sar);
3165        tcg_gen_shr_i64(t, t, s);
3166        tcg_gen_trunc_i64_reg(dest, t);
3167
3168        tcg_temp_free_i64(t);
3169        tcg_temp_free_i64(s);
3170    }
3171    save_gpr(ctx, a->t, dest);
3172
3173    /* Install the new nullification.  */
3174    cond_free(&ctx->null_cond);
3175    if (a->c) {
3176        ctx->null_cond = do_sed_cond(a->c, dest);
3177    }
3178    return nullify_end(ctx);
3179}
3180
3181static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3182{
3183    unsigned sa = 31 - a->cpos;
3184    TCGv_reg dest, t2;
3185
3186    if (a->c) {
3187        nullify_over(ctx);
3188    }
3189
3190    dest = dest_gpr(ctx, a->t);
3191    t2 = load_gpr(ctx, a->r2);
3192    if (a->r1 == a->r2) {
3193        TCGv_i32 t32 = tcg_temp_new_i32();
3194        tcg_gen_trunc_reg_i32(t32, t2);
3195        tcg_gen_rotri_i32(t32, t32, sa);
3196        tcg_gen_extu_i32_reg(dest, t32);
3197        tcg_temp_free_i32(t32);
3198    } else if (a->r1 == 0) {
3199        tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3200    } else {
3201        TCGv_reg t0 = tcg_temp_new();
3202        tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3203        tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
3204        tcg_temp_free(t0);
3205    }
3206    save_gpr(ctx, a->t, dest);
3207
3208    /* Install the new nullification.  */
3209    cond_free(&ctx->null_cond);
3210    if (a->c) {
3211        ctx->null_cond = do_sed_cond(a->c, dest);
3212    }
3213    return nullify_end(ctx);
3214}
3215
3216static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3217{
3218    unsigned len = 32 - a->clen;
3219    TCGv_reg dest, src, tmp;
3220
3221    if (a->c) {
3222        nullify_over(ctx);
3223    }
3224
3225    dest = dest_gpr(ctx, a->t);
3226    src = load_gpr(ctx, a->r);
3227    tmp = tcg_temp_new();
3228
3229    /* Recall that SAR is using big-endian bit numbering.  */
3230    tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3231    if (a->se) {
3232        tcg_gen_sar_reg(dest, src, tmp);
3233        tcg_gen_sextract_reg(dest, dest, 0, len);
3234    } else {
3235        tcg_gen_shr_reg(dest, src, tmp);
3236        tcg_gen_extract_reg(dest, dest, 0, len);
3237    }
3238    tcg_temp_free(tmp);
3239    save_gpr(ctx, a->t, dest);
3240
3241    /* Install the new nullification.  */
3242    cond_free(&ctx->null_cond);
3243    if (a->c) {
3244        ctx->null_cond = do_sed_cond(a->c, dest);
3245    }
3246    return nullify_end(ctx);
3247}
3248
3249static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3250{
3251    unsigned len = 32 - a->clen;
3252    unsigned cpos = 31 - a->pos;
3253    TCGv_reg dest, src;
3254
3255    if (a->c) {
3256        nullify_over(ctx);
3257    }
3258
3259    dest = dest_gpr(ctx, a->t);
3260    src = load_gpr(ctx, a->r);
3261    if (a->se) {
3262        tcg_gen_sextract_reg(dest, src, cpos, len);
3263    } else {
3264        tcg_gen_extract_reg(dest, src, cpos, len);
3265    }
3266    save_gpr(ctx, a->t, dest);
3267
3268    /* Install the new nullification.  */
3269    cond_free(&ctx->null_cond);
3270    if (a->c) {
3271        ctx->null_cond = do_sed_cond(a->c, dest);
3272    }
3273    return nullify_end(ctx);
3274}
3275
3276static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3277{
3278    unsigned len = 32 - a->clen;
3279    target_sreg mask0, mask1;
3280    TCGv_reg dest;
3281
3282    if (a->c) {
3283        nullify_over(ctx);
3284    }
3285    if (a->cpos + len > 32) {
3286        len = 32 - a->cpos;
3287    }
3288
3289    dest = dest_gpr(ctx, a->t);
3290    mask0 = deposit64(0, a->cpos, len, a->i);
3291    mask1 = deposit64(-1, a->cpos, len, a->i);
3292
3293    if (a->nz) {
3294        TCGv_reg src = load_gpr(ctx, a->t);
3295        if (mask1 != -1) {
3296            tcg_gen_andi_reg(dest, src, mask1);
3297            src = dest;
3298        }
3299        tcg_gen_ori_reg(dest, src, mask0);
3300    } else {
3301        tcg_gen_movi_reg(dest, mask0);
3302    }
3303    save_gpr(ctx, a->t, dest);
3304
3305    /* Install the new nullification.  */
3306    cond_free(&ctx->null_cond);
3307    if (a->c) {
3308        ctx->null_cond = do_sed_cond(a->c, dest);
3309    }
3310    return nullify_end(ctx);
3311}
3312
3313static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3314{
3315    unsigned rs = a->nz ? a->t : 0;
3316    unsigned len = 32 - a->clen;
3317    TCGv_reg dest, val;
3318
3319    if (a->c) {
3320        nullify_over(ctx);
3321    }
3322    if (a->cpos + len > 32) {
3323        len = 32 - a->cpos;
3324    }
3325
3326    dest = dest_gpr(ctx, a->t);
3327    val = load_gpr(ctx, a->r);
3328    if (rs == 0) {
3329        tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3330    } else {
3331        tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3332    }
3333    save_gpr(ctx, a->t, dest);
3334
3335    /* Install the new nullification.  */
3336    cond_free(&ctx->null_cond);
3337    if (a->c) {
3338        ctx->null_cond = do_sed_cond(a->c, dest);
3339    }
3340    return nullify_end(ctx);
3341}
3342
3343static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3344                        unsigned nz, unsigned clen, TCGv_reg val)
3345{
3346    unsigned rs = nz ? rt : 0;
3347    unsigned len = 32 - clen;
3348    TCGv_reg mask, tmp, shift, dest;
3349    unsigned msb = 1U << (len - 1);
3350
3351    if (c) {
3352        nullify_over(ctx);
3353    }
3354
3355    dest = dest_gpr(ctx, rt);
3356    shift = tcg_temp_new();
3357    tmp = tcg_temp_new();
3358
3359    /* Convert big-endian bit numbering in SAR to left-shift.  */
3360    tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3361
3362    mask = tcg_const_reg(msb + (msb - 1));
3363    tcg_gen_and_reg(tmp, val, mask);
3364    if (rs) {
3365        tcg_gen_shl_reg(mask, mask, shift);
3366        tcg_gen_shl_reg(tmp, tmp, shift);
3367        tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3368        tcg_gen_or_reg(dest, dest, tmp);
3369    } else {
3370        tcg_gen_shl_reg(dest, tmp, shift);
3371    }
3372    tcg_temp_free(shift);
3373    tcg_temp_free(mask);
3374    tcg_temp_free(tmp);
3375    save_gpr(ctx, rt, dest);
3376
3377    /* Install the new nullification.  */
3378    cond_free(&ctx->null_cond);
3379    if (c) {
3380        ctx->null_cond = do_sed_cond(c, dest);
3381    }
3382    return nullify_end(ctx);
3383}
3384
3385static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3386{
3387    return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3388}
3389
3390static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3391{
3392    return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3393}
3394
3395static bool trans_be(DisasContext *ctx, arg_be *a)
3396{
3397    TCGv_reg tmp;
3398
3399#ifdef CONFIG_USER_ONLY
3400    /* ??? It seems like there should be a good way of using
3401       "be disp(sr2, r0)", the canonical gateway entry mechanism
3402       to our advantage.  But that appears to be inconvenient to
3403       manage along side branch delay slots.  Therefore we handle
3404       entry into the gateway page via absolute address.  */
3405    /* Since we don't implement spaces, just branch.  Do notice the special
3406       case of "be disp(*,r0)" using a direct branch to disp, so that we can
3407       goto_tb to the TB containing the syscall.  */
3408    if (a->b == 0) {
3409        return do_dbranch(ctx, a->disp, a->l, a->n);
3410    }
3411#else
3412    nullify_over(ctx);
3413#endif
3414
3415    tmp = get_temp(ctx);
3416    tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3417    tmp = do_ibranch_priv(ctx, tmp);
3418
3419#ifdef CONFIG_USER_ONLY
3420    return do_ibranch(ctx, tmp, a->l, a->n);
3421#else
3422    TCGv_i64 new_spc = tcg_temp_new_i64();
3423
3424    load_spr(ctx, new_spc, a->sp);
3425    if (a->l) {
3426        copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3427        tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3428    }
3429    if (a->n && use_nullify_skip(ctx)) {
3430        tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3431        tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3432        tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3433        tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3434    } else {
3435        copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3436        if (ctx->iaoq_b == -1) {
3437            tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3438        }
3439        tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3440        tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3441        nullify_set(ctx, a->n);
3442    }
3443    tcg_temp_free_i64(new_spc);
3444    tcg_gen_lookup_and_goto_ptr();
3445    ctx->base.is_jmp = DISAS_NORETURN;
3446    return nullify_end(ctx);
3447#endif
3448}
3449
3450static bool trans_bl(DisasContext *ctx, arg_bl *a)
3451{
3452    return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3453}
3454
3455static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3456{
3457    target_ureg dest = iaoq_dest(ctx, a->disp);
3458
3459    nullify_over(ctx);
3460
3461    /* Make sure the caller hasn't done something weird with the queue.
3462     * ??? This is not quite the same as the PSW[B] bit, which would be
3463     * expensive to track.  Real hardware will trap for
3464     *    b  gateway
3465     *    b  gateway+4  (in delay slot of first branch)
3466     * However, checking for a non-sequential instruction queue *will*
3467     * diagnose the security hole
3468     *    b  gateway
3469     *    b  evil
3470     * in which instructions at evil would run with increased privs.
3471     */
3472    if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3473        return gen_illegal(ctx);
3474    }
3475
3476#ifndef CONFIG_USER_ONLY
3477    if (ctx->tb_flags & PSW_C) {
3478        CPUHPPAState *env = ctx->cs->env_ptr;
3479        int type = hppa_artype_for_page(env, ctx->base.pc_next);
3480        /* If we could not find a TLB entry, then we need to generate an
3481           ITLB miss exception so the kernel will provide it.
3482           The resulting TLB fill operation will invalidate this TB and
3483           we will re-translate, at which point we *will* be able to find
3484           the TLB entry and determine if this is in fact a gateway page.  */
3485        if (type < 0) {
3486            gen_excp(ctx, EXCP_ITLB_MISS);
3487            return true;
3488        }
3489        /* No change for non-gateway pages or for priv decrease.  */
3490        if (type >= 4 && type - 4 < ctx->privilege) {
3491            dest = deposit32(dest, 0, 2, type - 4);
3492        }
3493    } else {
3494        dest &= -4;  /* priv = 0 */
3495    }
3496#endif
3497
3498    if (a->l) {
3499        TCGv_reg tmp = dest_gpr(ctx, a->l);
3500        if (ctx->privilege < 3) {
3501            tcg_gen_andi_reg(tmp, tmp, -4);
3502        }
3503        tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3504        save_gpr(ctx, a->l, tmp);
3505    }
3506
3507    return do_dbranch(ctx, dest, 0, a->n);
3508}
3509
3510static bool trans_blr(DisasContext *ctx, arg_blr *a)
3511{
3512    if (a->x) {
3513        TCGv_reg tmp = get_temp(ctx);
3514        tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3515        tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3516        /* The computation here never changes privilege level.  */
3517        return do_ibranch(ctx, tmp, a->l, a->n);
3518    } else {
3519        /* BLR R0,RX is a good way to load PC+8 into RX.  */
3520        return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3521    }
3522}
3523
3524static bool trans_bv(DisasContext *ctx, arg_bv *a)
3525{
3526    TCGv_reg dest;
3527
3528    if (a->x == 0) {
3529        dest = load_gpr(ctx, a->b);
3530    } else {
3531        dest = get_temp(ctx);
3532        tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3533        tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3534    }
3535    dest = do_ibranch_priv(ctx, dest);
3536    return do_ibranch(ctx, dest, 0, a->n);
3537}
3538
3539static bool trans_bve(DisasContext *ctx, arg_bve *a)
3540{
3541    TCGv_reg dest;
3542
3543#ifdef CONFIG_USER_ONLY
3544    dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3545    return do_ibranch(ctx, dest, a->l, a->n);
3546#else
3547    nullify_over(ctx);
3548    dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3549
3550    copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3551    if (ctx->iaoq_b == -1) {
3552        tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3553    }
3554    copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3555    tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3556    if (a->l) {
3557        copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3558    }
3559    nullify_set(ctx, a->n);
3560    tcg_gen_lookup_and_goto_ptr();
3561    ctx->base.is_jmp = DISAS_NORETURN;
3562    return nullify_end(ctx);
3563#endif
3564}
3565
3566/*
3567 * Float class 0
3568 */
3569
3570static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3571{
3572    tcg_gen_mov_i32(dst, src);
3573}
3574
3575static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3576{
3577    return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3578}
3579
3580static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3581{
3582    tcg_gen_mov_i64(dst, src);
3583}
3584
3585static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3586{
3587    return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3588}
3589
3590static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3591{
3592    tcg_gen_andi_i32(dst, src, INT32_MAX);
3593}
3594
3595static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3596{
3597    return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3598}
3599
3600static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3601{
3602    tcg_gen_andi_i64(dst, src, INT64_MAX);
3603}
3604
3605static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3606{
3607    return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3608}
3609
3610static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3611{
3612    return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3613}
3614
3615static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3616{
3617    return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3618}
3619
3620static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3621{
3622    return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3623}
3624
3625static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3626{
3627    return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3628}
3629
3630static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3631{
3632    tcg_gen_xori_i32(dst, src, INT32_MIN);
3633}
3634
3635static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3636{
3637    return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3638}
3639
3640static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3641{
3642    tcg_gen_xori_i64(dst, src, INT64_MIN);
3643}
3644
3645static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3646{
3647    return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3648}
3649
3650static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3651{
3652    tcg_gen_ori_i32(dst, src, INT32_MIN);
3653}
3654
3655static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3656{
3657    return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3658}
3659
3660static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3661{
3662    tcg_gen_ori_i64(dst, src, INT64_MIN);
3663}
3664
3665static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3666{
3667    return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3668}
3669
3670/*
3671 * Float class 1
3672 */
3673
3674static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3675{
3676    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3677}
3678
3679static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3680{
3681    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3682}
3683
3684static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3685{
3686    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3687}
3688
3689static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3690{
3691    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3692}
3693
3694static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3695{
3696    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3697}
3698
3699static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3700{
3701    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3702}
3703
3704static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3705{
3706    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3707}
3708
3709static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3710{
3711    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3712}
3713
3714static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3715{
3716    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3717}
3718
3719static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3720{
3721    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3722}
3723
3724static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3725{
3726    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3727}
3728
3729static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3730{
3731    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3732}
3733
3734static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3735{
3736    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3737}
3738
3739static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3740{
3741    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3742}
3743
3744static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3745{
3746    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3747}
3748
3749static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3750{
3751    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3752}
3753
3754static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3755{
3756    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3757}
3758
3759static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3760{
3761    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3762}
3763
3764static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3765{
3766    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3767}
3768
3769static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3770{
3771    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3772}
3773
3774static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3775{
3776    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3777}
3778
3779static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3780{
3781    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3782}
3783
3784static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3785{
3786    return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3787}
3788
3789static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3790{
3791    return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3792}
3793
3794static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3795{
3796    return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3797}
3798
3799static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3800{
3801    return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3802}
3803
3804/*
3805 * Float class 2
3806 */
3807
3808static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3809{
3810    TCGv_i32 ta, tb, tc, ty;
3811
3812    nullify_over(ctx);
3813
3814    ta = load_frw0_i32(a->r1);
3815    tb = load_frw0_i32(a->r2);
3816    ty = tcg_const_i32(a->y);
3817    tc = tcg_const_i32(a->c);
3818
3819    gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3820
3821    tcg_temp_free_i32(ta);
3822    tcg_temp_free_i32(tb);
3823    tcg_temp_free_i32(ty);
3824    tcg_temp_free_i32(tc);
3825
3826    return nullify_end(ctx);
3827}
3828
3829static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3830{
3831    TCGv_i64 ta, tb;
3832    TCGv_i32 tc, ty;
3833
3834    nullify_over(ctx);
3835
3836    ta = load_frd0(a->r1);
3837    tb = load_frd0(a->r2);
3838    ty = tcg_const_i32(a->y);
3839    tc = tcg_const_i32(a->c);
3840
3841    gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3842
3843    tcg_temp_free_i64(ta);
3844    tcg_temp_free_i64(tb);
3845    tcg_temp_free_i32(ty);
3846    tcg_temp_free_i32(tc);
3847
3848    return nullify_end(ctx);
3849}
3850
3851static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3852{
3853    TCGv_reg t;
3854
3855    nullify_over(ctx);
3856
3857    t = get_temp(ctx);
3858    tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3859
3860    if (a->y == 1) {
3861        int mask;
3862        bool inv = false;
3863
3864        switch (a->c) {
3865        case 0: /* simple */
3866            tcg_gen_andi_reg(t, t, 0x4000000);
3867            ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3868            goto done;
3869        case 2: /* rej */
3870            inv = true;
3871            /* fallthru */
3872        case 1: /* acc */
3873            mask = 0x43ff800;
3874            break;
3875        case 6: /* rej8 */
3876            inv = true;
3877            /* fallthru */
3878        case 5: /* acc8 */
3879            mask = 0x43f8000;
3880            break;
3881        case 9: /* acc6 */
3882            mask = 0x43e0000;
3883            break;
3884        case 13: /* acc4 */
3885            mask = 0x4380000;
3886            break;
3887        case 17: /* acc2 */
3888            mask = 0x4200000;
3889            break;
3890        default:
3891            gen_illegal(ctx);
3892            return true;
3893        }
3894        if (inv) {
3895            TCGv_reg c = load_const(ctx, mask);
3896            tcg_gen_or_reg(t, t, c);
3897            ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3898        } else {
3899            tcg_gen_andi_reg(t, t, mask);
3900            ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3901        }
3902    } else {
3903        unsigned cbit = (a->y ^ 1) - 1;
3904
3905        tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3906        ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3907        tcg_temp_free(t);
3908    }
3909
3910 done:
3911    return nullify_end(ctx);
3912}
3913
3914/*
3915 * Float class 2
3916 */
3917
3918static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3919{
3920    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3921}
3922
3923static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3924{
3925    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3926}
3927
3928static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3929{
3930    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3931}
3932
3933static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3934{
3935    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3936}
3937
3938static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3939{
3940    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3941}
3942
3943static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3944{
3945    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3946}
3947
3948static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3949{
3950    return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3951}
3952
3953static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3954{
3955    return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3956}
3957
3958static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3959{
3960    TCGv_i64 x, y;
3961
3962    nullify_over(ctx);
3963
3964    x = load_frw0_i64(a->r1);
3965    y = load_frw0_i64(a->r2);
3966    tcg_gen_mul_i64(x, x, y);
3967    save_frd(a->t, x);
3968    tcg_temp_free_i64(x);
3969    tcg_temp_free_i64(y);
3970
3971    return nullify_end(ctx);
3972}
3973
3974/* Convert the fmpyadd single-precision register encodings to standard.  */
3975static inline int fmpyadd_s_reg(unsigned r)
3976{
3977    return (r & 16) * 2 + 16 + (r & 15);
3978}
3979
3980static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3981{
3982    int tm = fmpyadd_s_reg(a->tm);
3983    int ra = fmpyadd_s_reg(a->ra);
3984    int ta = fmpyadd_s_reg(a->ta);
3985    int rm2 = fmpyadd_s_reg(a->rm2);
3986    int rm1 = fmpyadd_s_reg(a->rm1);
3987
3988    nullify_over(ctx);
3989
3990    do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3991    do_fop_weww(ctx, ta, ta, ra,
3992                is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3993
3994    return nullify_end(ctx);
3995}
3996
3997static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3998{
3999    return do_fmpyadd_s(ctx, a, false);
4000}
4001
4002static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4003{
4004    return do_fmpyadd_s(ctx, a, true);
4005}
4006
4007static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4008{
4009    nullify_over(ctx);
4010
4011    do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4012    do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4013                is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4014
4015    return nullify_end(ctx);
4016}
4017
4018static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4019{
4020    return do_fmpyadd_d(ctx, a, false);
4021}
4022
4023static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4024{
4025    return do_fmpyadd_d(ctx, a, true);
4026}
4027
4028static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4029{
4030    TCGv_i32 x, y, z;
4031
4032    nullify_over(ctx);
4033    x = load_frw0_i32(a->rm1);
4034    y = load_frw0_i32(a->rm2);
4035    z = load_frw0_i32(a->ra3);
4036
4037    if (a->neg) {
4038        gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4039    } else {
4040        gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4041    }
4042
4043    tcg_temp_free_i32(y);
4044    tcg_temp_free_i32(z);
4045    save_frw_i32(a->t, x);
4046    tcg_temp_free_i32(x);
4047    return nullify_end(ctx);
4048}
4049
4050static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4051{
4052    TCGv_i64 x, y, z;
4053
4054    nullify_over(ctx);
4055    x = load_frd0(a->rm1);
4056    y = load_frd0(a->rm2);
4057    z = load_frd0(a->ra3);
4058
4059    if (a->neg) {
4060        gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4061    } else {
4062        gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4063    }
4064
4065    tcg_temp_free_i64(y);
4066    tcg_temp_free_i64(z);
4067    save_frd(a->t, x);
4068    tcg_temp_free_i64(x);
4069    return nullify_end(ctx);
4070}
4071
4072static bool trans_diag(DisasContext *ctx, arg_diag *a)
4073{
4074    qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4075    cond_free(&ctx->null_cond);
4076    return true;
4077}
4078
4079static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4080{
4081    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4082    int bound;
4083
4084    ctx->cs = cs;
4085    ctx->tb_flags = ctx->base.tb->flags;
4086
4087#ifdef CONFIG_USER_ONLY
4088    ctx->privilege = MMU_USER_IDX;
4089    ctx->mmu_idx = MMU_USER_IDX;
4090    ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4091    ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4092#else
4093    ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4094    ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4095
4096    /* Recover the IAOQ values from the GVA + PRIV.  */
4097    uint64_t cs_base = ctx->base.tb->cs_base;
4098    uint64_t iasq_f = cs_base & ~0xffffffffull;
4099    int32_t diff = cs_base;
4100
4101    ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4102    ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4103#endif
4104    ctx->iaoq_n = -1;
4105    ctx->iaoq_n_var = NULL;
4106
4107    /* Bound the number of instructions by those left on the page.  */
4108    bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4109    ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4110
4111    ctx->ntempr = 0;
4112    ctx->ntempl = 0;
4113    memset(ctx->tempr, 0, sizeof(ctx->tempr));
4114    memset(ctx->templ, 0, sizeof(ctx->templ));
4115}
4116
4117static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4118{
4119    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4120
4121    /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4122    ctx->null_cond = cond_make_f();
4123    ctx->psw_n_nonzero = false;
4124    if (ctx->tb_flags & PSW_N) {
4125        ctx->null_cond.c = TCG_COND_ALWAYS;
4126        ctx->psw_n_nonzero = true;
4127    }
4128    ctx->null_lab = NULL;
4129}
4130
4131static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4132{
4133    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4134
4135    tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4136}
4137
4138static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4139                                      const CPUBreakpoint *bp)
4140{
4141    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4142
4143    gen_excp(ctx, EXCP_DEBUG);
4144    ctx->base.pc_next += 4;
4145    return true;
4146}
4147
4148static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4149{
4150    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4151    CPUHPPAState *env = cs->env_ptr;
4152    DisasJumpType ret;
4153    int i, n;
4154
4155    /* Execute one insn.  */
4156#ifdef CONFIG_USER_ONLY
4157    if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4158        do_page_zero(ctx);
4159        ret = ctx->base.is_jmp;
4160        assert(ret != DISAS_NEXT);
4161    } else
4162#endif
4163    {
4164        /* Always fetch the insn, even if nullified, so that we check
4165           the page permissions for execute.  */
4166        uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4167
4168        /* Set up the IA queue for the next insn.
4169           This will be overwritten by a branch.  */
4170        if (ctx->iaoq_b == -1) {
4171            ctx->iaoq_n = -1;
4172            ctx->iaoq_n_var = get_temp(ctx);
4173            tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4174        } else {
4175            ctx->iaoq_n = ctx->iaoq_b + 4;
4176            ctx->iaoq_n_var = NULL;
4177        }
4178
4179        if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4180            ctx->null_cond.c = TCG_COND_NEVER;
4181            ret = DISAS_NEXT;
4182        } else {
4183            ctx->insn = insn;
4184            if (!decode(ctx, insn)) {
4185                gen_illegal(ctx);
4186            }
4187            ret = ctx->base.is_jmp;
4188            assert(ctx->null_lab == NULL);
4189        }
4190    }
4191
4192    /* Free any temporaries allocated.  */
4193    for (i = 0, n = ctx->ntempr; i < n; ++i) {
4194        tcg_temp_free(ctx->tempr[i]);
4195        ctx->tempr[i] = NULL;
4196    }
4197    for (i = 0, n = ctx->ntempl; i < n; ++i) {
4198        tcg_temp_free_tl(ctx->templ[i]);
4199        ctx->templ[i] = NULL;
4200    }
4201    ctx->ntempr = 0;
4202    ctx->ntempl = 0;
4203
4204    /* Advance the insn queue.  Note that this check also detects
4205       a priority change within the instruction queue.  */
4206    if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4207        if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4208            && use_goto_tb(ctx, ctx->iaoq_b)
4209            && (ctx->null_cond.c == TCG_COND_NEVER
4210                || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4211            nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4212            gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4213            ctx->base.is_jmp = ret = DISAS_NORETURN;
4214        } else {
4215            ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4216        }
4217    }
4218    ctx->iaoq_f = ctx->iaoq_b;
4219    ctx->iaoq_b = ctx->iaoq_n;
4220    ctx->base.pc_next += 4;
4221
4222    switch (ret) {
4223    case DISAS_NORETURN:
4224    case DISAS_IAQ_N_UPDATED:
4225        break;
4226
4227    case DISAS_NEXT:
4228    case DISAS_IAQ_N_STALE:
4229    case DISAS_IAQ_N_STALE_EXIT:
4230        if (ctx->iaoq_f == -1) {
4231            tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4232            copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4233#ifndef CONFIG_USER_ONLY
4234            tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4235#endif
4236            nullify_save(ctx);
4237            ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4238                                ? DISAS_EXIT
4239                                : DISAS_IAQ_N_UPDATED);
4240        } else if (ctx->iaoq_b == -1) {
4241            tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4242        }
4243        break;
4244
4245    default:
4246        g_assert_not_reached();
4247    }
4248}
4249
4250static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4251{
4252    DisasContext *ctx = container_of(dcbase, DisasContext, base);
4253    DisasJumpType is_jmp = ctx->base.is_jmp;
4254
4255    switch (is_jmp) {
4256    case DISAS_NORETURN:
4257        break;
4258    case DISAS_TOO_MANY:
4259    case DISAS_IAQ_N_STALE:
4260    case DISAS_IAQ_N_STALE_EXIT:
4261        copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4262        copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4263        nullify_save(ctx);
4264        /* FALLTHRU */
4265    case DISAS_IAQ_N_UPDATED:
4266        if (ctx->base.singlestep_enabled) {
4267            gen_excp_1(EXCP_DEBUG);
4268        } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4269            tcg_gen_lookup_and_goto_ptr();
4270        }
4271        /* FALLTHRU */
4272    case DISAS_EXIT:
4273        tcg_gen_exit_tb(NULL, 0);
4274        break;
4275    default:
4276        g_assert_not_reached();
4277    }
4278}
4279
4280static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4281{
4282    target_ulong pc = dcbase->pc_first;
4283
4284#ifdef CONFIG_USER_ONLY
4285    switch (pc) {
4286    case 0x00:
4287        qemu_log("IN:\n0x00000000:  (null)\n");
4288        return;
4289    case 0xb0:
4290        qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4291        return;
4292    case 0xe0:
4293        qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4294        return;
4295    case 0x100:
4296        qemu_log("IN:\n0x00000100:  syscall\n");
4297        return;
4298    }
4299#endif
4300
4301    qemu_log("IN: %s\n", lookup_symbol(pc));
4302    log_target_disas(cs, pc, dcbase->tb->size);
4303}
4304
4305static const TranslatorOps hppa_tr_ops = {
4306    .init_disas_context = hppa_tr_init_disas_context,
4307    .tb_start           = hppa_tr_tb_start,
4308    .insn_start         = hppa_tr_insn_start,
4309    .breakpoint_check   = hppa_tr_breakpoint_check,
4310    .translate_insn     = hppa_tr_translate_insn,
4311    .tb_stop            = hppa_tr_tb_stop,
4312    .disas_log          = hppa_tr_disas_log,
4313};
4314
4315void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4316
4317{
4318    DisasContext ctx;
4319    translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4320}
4321
4322void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4323                          target_ulong *data)
4324{
4325    env->iaoq_f = data[0];
4326    if (data[1] != (target_ureg)-1) {
4327        env->iaoq_b = data[1];
4328    }
4329    /* Since we were executing the instruction at IAOQ_F, and took some
4330       sort of action that provoked the cpu_restore_state, we can infer
4331       that the instruction was not nullified.  */
4332    env->psw_n = 0;
4333}
4334