qemu/target/s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg-op.h"
  37#include "qemu/log.h"
  38#include "qemu/host-utils.h"
  39#include "exec/cpu_ldst.h"
  40#include "exec/gen-icount.h"
  41#include "exec/helper-proto.h"
  42#include "exec/helper-gen.h"
  43
  44#include "trace-tcg.h"
  45#include "exec/translator.h"
  46#include "exec/log.h"
  47#include "qemu/atomic128.h"
  48
  49
  50/* Information that (most) every instruction needs to manipulate.  */
  51typedef struct DisasContext DisasContext;
  52typedef struct DisasInsn DisasInsn;
  53typedef struct DisasFields DisasFields;
  54
  55struct DisasContext {
  56    DisasContextBase base;
  57    const DisasInsn *insn;
  58    DisasFields *fields;
  59    uint64_t ex_value;
  60    /*
  61     * During translate_one(), pc_tmp is used to determine the instruction
  62     * to be executed after base.pc_next - e.g. next sequential instruction
  63     * or a branch target.
  64     */
  65    uint64_t pc_tmp;
  66    uint32_t ilen;
  67    enum cc_op cc_op;
  68    bool do_debug;
  69};
  70
  71/* Information carried about a condition to be evaluated.  */
  72typedef struct {
  73    TCGCond cond:8;
  74    bool is_64;
  75    bool g1;
  76    bool g2;
  77    union {
  78        struct { TCGv_i64 a, b; } s64;
  79        struct { TCGv_i32 a, b; } s32;
  80    } u;
  81} DisasCompare;
  82
  83#ifdef DEBUG_INLINE_BRANCHES
  84static uint64_t inline_branch_hit[CC_OP_MAX];
  85static uint64_t inline_branch_miss[CC_OP_MAX];
  86#endif
  87
  88static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
  89{
  90    TCGv_i64 tmp;
  91
  92    if (s->base.tb->flags & FLAG_MASK_32) {
  93        if (s->base.tb->flags & FLAG_MASK_64) {
  94            tcg_gen_movi_i64(out, pc);
  95            return;
  96        }
  97        pc |= 0x80000000;
  98    }
  99    assert(!(s->base.tb->flags & FLAG_MASK_64));
 100    tmp = tcg_const_i64(pc);
 101    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
 102    tcg_temp_free_i64(tmp);
 103}
 104
 105static TCGv_i64 psw_addr;
 106static TCGv_i64 psw_mask;
 107static TCGv_i64 gbea;
 108
 109static TCGv_i32 cc_op;
 110static TCGv_i64 cc_src;
 111static TCGv_i64 cc_dst;
 112static TCGv_i64 cc_vr;
 113
 114static char cpu_reg_names[32][4];
 115static TCGv_i64 regs[16];
 116static TCGv_i64 fregs[16];
 117
 118void s390x_translate_init(void)
 119{
 120    int i;
 121
 122    psw_addr = tcg_global_mem_new_i64(cpu_env,
 123                                      offsetof(CPUS390XState, psw.addr),
 124                                      "psw_addr");
 125    psw_mask = tcg_global_mem_new_i64(cpu_env,
 126                                      offsetof(CPUS390XState, psw.mask),
 127                                      "psw_mask");
 128    gbea = tcg_global_mem_new_i64(cpu_env,
 129                                  offsetof(CPUS390XState, gbea),
 130                                  "gbea");
 131
 132    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 133                                   "cc_op");
 134    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 135                                    "cc_src");
 136    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 137                                    "cc_dst");
 138    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 139                                   "cc_vr");
 140
 141    for (i = 0; i < 16; i++) {
 142        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 143        regs[i] = tcg_global_mem_new(cpu_env,
 144                                     offsetof(CPUS390XState, regs[i]),
 145                                     cpu_reg_names[i]);
 146    }
 147
 148    for (i = 0; i < 16; i++) {
 149        snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
 150        fregs[i] = tcg_global_mem_new(cpu_env,
 151                                      offsetof(CPUS390XState, vregs[i][0].d),
 152                                      cpu_reg_names[i + 16]);
 153    }
 154}
 155
 156static TCGv_i64 load_reg(int reg)
 157{
 158    TCGv_i64 r = tcg_temp_new_i64();
 159    tcg_gen_mov_i64(r, regs[reg]);
 160    return r;
 161}
 162
 163static TCGv_i64 load_freg32_i64(int reg)
 164{
 165    TCGv_i64 r = tcg_temp_new_i64();
 166    tcg_gen_shri_i64(r, fregs[reg], 32);
 167    return r;
 168}
 169
 170static void store_reg(int reg, TCGv_i64 v)
 171{
 172    tcg_gen_mov_i64(regs[reg], v);
 173}
 174
 175static void store_freg(int reg, TCGv_i64 v)
 176{
 177    tcg_gen_mov_i64(fregs[reg], v);
 178}
 179
 180static void store_reg32_i64(int reg, TCGv_i64 v)
 181{
 182    /* 32 bit register writes keep the upper half */
 183    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 184}
 185
 186static void store_reg32h_i64(int reg, TCGv_i64 v)
 187{
 188    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 189}
 190
 191static void store_freg32_i64(int reg, TCGv_i64 v)
 192{
 193    tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
 194}
 195
 196static void return_low128(TCGv_i64 dest)
 197{
 198    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 199}
 200
 201static void update_psw_addr(DisasContext *s)
 202{
 203    /* psw.addr */
 204    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
 205}
 206
 207static void per_branch(DisasContext *s, bool to_next)
 208{
 209#ifndef CONFIG_USER_ONLY
 210    tcg_gen_movi_i64(gbea, s->base.pc_next);
 211
 212    if (s->base.tb->flags & FLAG_MASK_PER) {
 213        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
 214        gen_helper_per_branch(cpu_env, gbea, next_pc);
 215        if (to_next) {
 216            tcg_temp_free_i64(next_pc);
 217        }
 218    }
 219#endif
 220}
 221
 222static void per_branch_cond(DisasContext *s, TCGCond cond,
 223                            TCGv_i64 arg1, TCGv_i64 arg2)
 224{
 225#ifndef CONFIG_USER_ONLY
 226    if (s->base.tb->flags & FLAG_MASK_PER) {
 227        TCGLabel *lab = gen_new_label();
 228        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 229
 230        tcg_gen_movi_i64(gbea, s->base.pc_next);
 231        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 232
 233        gen_set_label(lab);
 234    } else {
 235        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
 236        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 237        tcg_temp_free_i64(pc);
 238    }
 239#endif
 240}
 241
 242static void per_breaking_event(DisasContext *s)
 243{
 244    tcg_gen_movi_i64(gbea, s->base.pc_next);
 245}
 246
 247static void update_cc_op(DisasContext *s)
 248{
 249    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 250        tcg_gen_movi_i32(cc_op, s->cc_op);
 251    }
 252}
 253
 254static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 255{
 256    return (uint64_t)cpu_lduw_code(env, pc);
 257}
 258
 259static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 260{
 261    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 262}
 263
 264static int get_mem_index(DisasContext *s)
 265{
 266    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
 267        return MMU_REAL_IDX;
 268    }
 269
 270    switch (s->base.tb->flags & FLAG_MASK_ASC) {
 271    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 272        return MMU_PRIMARY_IDX;
 273    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 274        return MMU_SECONDARY_IDX;
 275    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 276        return MMU_HOME_IDX;
 277    default:
 278        tcg_abort();
 279        break;
 280    }
 281}
 282
 283static void gen_exception(int excp)
 284{
 285    TCGv_i32 tmp = tcg_const_i32(excp);
 286    gen_helper_exception(cpu_env, tmp);
 287    tcg_temp_free_i32(tmp);
 288}
 289
 290static void gen_program_exception(DisasContext *s, int code)
 291{
 292    TCGv_i32 tmp;
 293
 294    /* Remember what pgm exeption this was.  */
 295    tmp = tcg_const_i32(code);
 296    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 297    tcg_temp_free_i32(tmp);
 298
 299    tmp = tcg_const_i32(s->ilen);
 300    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 301    tcg_temp_free_i32(tmp);
 302
 303    /* update the psw */
 304    update_psw_addr(s);
 305
 306    /* Save off cc.  */
 307    update_cc_op(s);
 308
 309    /* Trigger exception.  */
 310    gen_exception(EXCP_PGM);
 311}
 312
 313static inline void gen_illegal_opcode(DisasContext *s)
 314{
 315    gen_program_exception(s, PGM_OPERATION);
 316}
 317
 318static inline void gen_data_exception(uint8_t dxc)
 319{
 320    TCGv_i32 tmp = tcg_const_i32(dxc);
 321    gen_helper_data_exception(cpu_env, tmp);
 322    tcg_temp_free_i32(tmp);
 323}
 324
 325static inline void gen_trap(DisasContext *s)
 326{
 327    /* Set DXC to 0xff */
 328    gen_data_exception(0xff);
 329}
 330
 331static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 332{
 333    TCGv_i64 tmp = tcg_temp_new_i64();
 334    bool need_31 = !(s->base.tb->flags & FLAG_MASK_64);
 335
 336    /* Note that d2 is limited to 20 bits, signed.  If we crop negative
 337       displacements early we create larger immedate addends.  */
 338
 339    /* Note that addi optimizes the imm==0 case.  */
 340    if (b2 && x2) {
 341        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 342        tcg_gen_addi_i64(tmp, tmp, d2);
 343    } else if (b2) {
 344        tcg_gen_addi_i64(tmp, regs[b2], d2);
 345    } else if (x2) {
 346        tcg_gen_addi_i64(tmp, regs[x2], d2);
 347    } else {
 348        if (need_31) {
 349            d2 &= 0x7fffffff;
 350            need_31 = false;
 351        }
 352        tcg_gen_movi_i64(tmp, d2);
 353    }
 354    if (need_31) {
 355        tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
 356    }
 357
 358    return tmp;
 359}
 360
 361static inline bool live_cc_data(DisasContext *s)
 362{
 363    return (s->cc_op != CC_OP_DYNAMIC
 364            && s->cc_op != CC_OP_STATIC
 365            && s->cc_op > 3);
 366}
 367
 368static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 369{
 370    if (live_cc_data(s)) {
 371        tcg_gen_discard_i64(cc_src);
 372        tcg_gen_discard_i64(cc_dst);
 373        tcg_gen_discard_i64(cc_vr);
 374    }
 375    s->cc_op = CC_OP_CONST0 + val;
 376}
 377
 378static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 379{
 380    if (live_cc_data(s)) {
 381        tcg_gen_discard_i64(cc_src);
 382        tcg_gen_discard_i64(cc_vr);
 383    }
 384    tcg_gen_mov_i64(cc_dst, dst);
 385    s->cc_op = op;
 386}
 387
 388static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 389                                  TCGv_i64 dst)
 390{
 391    if (live_cc_data(s)) {
 392        tcg_gen_discard_i64(cc_vr);
 393    }
 394    tcg_gen_mov_i64(cc_src, src);
 395    tcg_gen_mov_i64(cc_dst, dst);
 396    s->cc_op = op;
 397}
 398
 399static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 400                                  TCGv_i64 dst, TCGv_i64 vr)
 401{
 402    tcg_gen_mov_i64(cc_src, src);
 403    tcg_gen_mov_i64(cc_dst, dst);
 404    tcg_gen_mov_i64(cc_vr, vr);
 405    s->cc_op = op;
 406}
 407
 408static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 409{
 410    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 411}
 412
 413static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 414{
 415    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 416}
 417
 418static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 419{
 420    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 421}
 422
 423static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 424{
 425    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 426}
 427
 428/* CC value is in env->cc_op */
 429static void set_cc_static(DisasContext *s)
 430{
 431    if (live_cc_data(s)) {
 432        tcg_gen_discard_i64(cc_src);
 433        tcg_gen_discard_i64(cc_dst);
 434        tcg_gen_discard_i64(cc_vr);
 435    }
 436    s->cc_op = CC_OP_STATIC;
 437}
 438
 439/* calculates cc into cc_op */
 440static void gen_op_calc_cc(DisasContext *s)
 441{
 442    TCGv_i32 local_cc_op = NULL;
 443    TCGv_i64 dummy = NULL;
 444
 445    switch (s->cc_op) {
 446    default:
 447        dummy = tcg_const_i64(0);
 448        /* FALLTHRU */
 449    case CC_OP_ADD_64:
 450    case CC_OP_ADDU_64:
 451    case CC_OP_ADDC_64:
 452    case CC_OP_SUB_64:
 453    case CC_OP_SUBU_64:
 454    case CC_OP_SUBB_64:
 455    case CC_OP_ADD_32:
 456    case CC_OP_ADDU_32:
 457    case CC_OP_ADDC_32:
 458    case CC_OP_SUB_32:
 459    case CC_OP_SUBU_32:
 460    case CC_OP_SUBB_32:
 461        local_cc_op = tcg_const_i32(s->cc_op);
 462        break;
 463    case CC_OP_CONST0:
 464    case CC_OP_CONST1:
 465    case CC_OP_CONST2:
 466    case CC_OP_CONST3:
 467    case CC_OP_STATIC:
 468    case CC_OP_DYNAMIC:
 469        break;
 470    }
 471
 472    switch (s->cc_op) {
 473    case CC_OP_CONST0:
 474    case CC_OP_CONST1:
 475    case CC_OP_CONST2:
 476    case CC_OP_CONST3:
 477        /* s->cc_op is the cc value */
 478        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 479        break;
 480    case CC_OP_STATIC:
 481        /* env->cc_op already is the cc value */
 482        break;
 483    case CC_OP_NZ:
 484    case CC_OP_ABS_64:
 485    case CC_OP_NABS_64:
 486    case CC_OP_ABS_32:
 487    case CC_OP_NABS_32:
 488    case CC_OP_LTGT0_32:
 489    case CC_OP_LTGT0_64:
 490    case CC_OP_COMP_32:
 491    case CC_OP_COMP_64:
 492    case CC_OP_NZ_F32:
 493    case CC_OP_NZ_F64:
 494    case CC_OP_FLOGR:
 495        /* 1 argument */
 496        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 497        break;
 498    case CC_OP_ICM:
 499    case CC_OP_LTGT_32:
 500    case CC_OP_LTGT_64:
 501    case CC_OP_LTUGTU_32:
 502    case CC_OP_LTUGTU_64:
 503    case CC_OP_TM_32:
 504    case CC_OP_TM_64:
 505    case CC_OP_SLA_32:
 506    case CC_OP_SLA_64:
 507    case CC_OP_NZ_F128:
 508        /* 2 arguments */
 509        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 510        break;
 511    case CC_OP_ADD_64:
 512    case CC_OP_ADDU_64:
 513    case CC_OP_ADDC_64:
 514    case CC_OP_SUB_64:
 515    case CC_OP_SUBU_64:
 516    case CC_OP_SUBB_64:
 517    case CC_OP_ADD_32:
 518    case CC_OP_ADDU_32:
 519    case CC_OP_ADDC_32:
 520    case CC_OP_SUB_32:
 521    case CC_OP_SUBU_32:
 522    case CC_OP_SUBB_32:
 523        /* 3 arguments */
 524        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 525        break;
 526    case CC_OP_DYNAMIC:
 527        /* unknown operation - assume 3 arguments and cc_op in env */
 528        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 529        break;
 530    default:
 531        tcg_abort();
 532    }
 533
 534    if (local_cc_op) {
 535        tcg_temp_free_i32(local_cc_op);
 536    }
 537    if (dummy) {
 538        tcg_temp_free_i64(dummy);
 539    }
 540
 541    /* We now have cc in cc_op as constant */
 542    set_cc_static(s);
 543}
 544
 545static bool use_exit_tb(DisasContext *s)
 546{
 547    return s->base.singlestep_enabled ||
 548            (tb_cflags(s->base.tb) & CF_LAST_IO) ||
 549            (s->base.tb->flags & FLAG_MASK_PER);
 550}
 551
 552static bool use_goto_tb(DisasContext *s, uint64_t dest)
 553{
 554    if (unlikely(use_exit_tb(s))) {
 555        return false;
 556    }
 557#ifndef CONFIG_USER_ONLY
 558    return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
 559           (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
 560#else
 561    return true;
 562#endif
 563}
 564
 565static void account_noninline_branch(DisasContext *s, int cc_op)
 566{
 567#ifdef DEBUG_INLINE_BRANCHES
 568    inline_branch_miss[cc_op]++;
 569#endif
 570}
 571
 572static void account_inline_branch(DisasContext *s, int cc_op)
 573{
 574#ifdef DEBUG_INLINE_BRANCHES
 575    inline_branch_hit[cc_op]++;
 576#endif
 577}
 578
 579/* Table of mask values to comparison codes, given a comparison as input.
 580   For such, CC=3 should not be possible.  */
 581static const TCGCond ltgt_cond[16] = {
 582    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 583    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 584    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 585    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 586    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 587    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 588    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 589    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 590};
 591
 592/* Table of mask values to comparison codes, given a logic op as input.
 593   For such, only CC=0 and CC=1 should be possible.  */
 594static const TCGCond nz_cond[16] = {
 595    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 596    TCG_COND_NEVER, TCG_COND_NEVER,
 597    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 598    TCG_COND_NE, TCG_COND_NE,
 599    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 600    TCG_COND_EQ, TCG_COND_EQ,
 601    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 602    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 603};
 604
 605/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 606   details required to generate a TCG comparison.  */
 607static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 608{
 609    TCGCond cond;
 610    enum cc_op old_cc_op = s->cc_op;
 611
 612    if (mask == 15 || mask == 0) {
 613        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 614        c->u.s32.a = cc_op;
 615        c->u.s32.b = cc_op;
 616        c->g1 = c->g2 = true;
 617        c->is_64 = false;
 618        return;
 619    }
 620
 621    /* Find the TCG condition for the mask + cc op.  */
 622    switch (old_cc_op) {
 623    case CC_OP_LTGT0_32:
 624    case CC_OP_LTGT0_64:
 625    case CC_OP_LTGT_32:
 626    case CC_OP_LTGT_64:
 627        cond = ltgt_cond[mask];
 628        if (cond == TCG_COND_NEVER) {
 629            goto do_dynamic;
 630        }
 631        account_inline_branch(s, old_cc_op);
 632        break;
 633
 634    case CC_OP_LTUGTU_32:
 635    case CC_OP_LTUGTU_64:
 636        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 637        if (cond == TCG_COND_NEVER) {
 638            goto do_dynamic;
 639        }
 640        account_inline_branch(s, old_cc_op);
 641        break;
 642
 643    case CC_OP_NZ:
 644        cond = nz_cond[mask];
 645        if (cond == TCG_COND_NEVER) {
 646            goto do_dynamic;
 647        }
 648        account_inline_branch(s, old_cc_op);
 649        break;
 650
 651    case CC_OP_TM_32:
 652    case CC_OP_TM_64:
 653        switch (mask) {
 654        case 8:
 655            cond = TCG_COND_EQ;
 656            break;
 657        case 4 | 2 | 1:
 658            cond = TCG_COND_NE;
 659            break;
 660        default:
 661            goto do_dynamic;
 662        }
 663        account_inline_branch(s, old_cc_op);
 664        break;
 665
 666    case CC_OP_ICM:
 667        switch (mask) {
 668        case 8:
 669            cond = TCG_COND_EQ;
 670            break;
 671        case 4 | 2 | 1:
 672        case 4 | 2:
 673            cond = TCG_COND_NE;
 674            break;
 675        default:
 676            goto do_dynamic;
 677        }
 678        account_inline_branch(s, old_cc_op);
 679        break;
 680
 681    case CC_OP_FLOGR:
 682        switch (mask & 0xa) {
 683        case 8: /* src == 0 -> no one bit found */
 684            cond = TCG_COND_EQ;
 685            break;
 686        case 2: /* src != 0 -> one bit found */
 687            cond = TCG_COND_NE;
 688            break;
 689        default:
 690            goto do_dynamic;
 691        }
 692        account_inline_branch(s, old_cc_op);
 693        break;
 694
 695    case CC_OP_ADDU_32:
 696    case CC_OP_ADDU_64:
 697        switch (mask) {
 698        case 8 | 2: /* vr == 0 */
 699            cond = TCG_COND_EQ;
 700            break;
 701        case 4 | 1: /* vr != 0 */
 702            cond = TCG_COND_NE;
 703            break;
 704        case 8 | 4: /* no carry -> vr >= src */
 705            cond = TCG_COND_GEU;
 706            break;
 707        case 2 | 1: /* carry -> vr < src */
 708            cond = TCG_COND_LTU;
 709            break;
 710        default:
 711            goto do_dynamic;
 712        }
 713        account_inline_branch(s, old_cc_op);
 714        break;
 715
 716    case CC_OP_SUBU_32:
 717    case CC_OP_SUBU_64:
 718        /* Note that CC=0 is impossible; treat it as dont-care.  */
 719        switch (mask & 7) {
 720        case 2: /* zero -> op1 == op2 */
 721            cond = TCG_COND_EQ;
 722            break;
 723        case 4 | 1: /* !zero -> op1 != op2 */
 724            cond = TCG_COND_NE;
 725            break;
 726        case 4: /* borrow (!carry) -> op1 < op2 */
 727            cond = TCG_COND_LTU;
 728            break;
 729        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
 730            cond = TCG_COND_GEU;
 731            break;
 732        default:
 733            goto do_dynamic;
 734        }
 735        account_inline_branch(s, old_cc_op);
 736        break;
 737
 738    default:
 739    do_dynamic:
 740        /* Calculate cc value.  */
 741        gen_op_calc_cc(s);
 742        /* FALLTHRU */
 743
 744    case CC_OP_STATIC:
 745        /* Jump based on CC.  We'll load up the real cond below;
 746           the assignment here merely avoids a compiler warning.  */
 747        account_noninline_branch(s, old_cc_op);
 748        old_cc_op = CC_OP_STATIC;
 749        cond = TCG_COND_NEVER;
 750        break;
 751    }
 752
 753    /* Load up the arguments of the comparison.  */
 754    c->is_64 = true;
 755    c->g1 = c->g2 = false;
 756    switch (old_cc_op) {
 757    case CC_OP_LTGT0_32:
 758        c->is_64 = false;
 759        c->u.s32.a = tcg_temp_new_i32();
 760        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 761        c->u.s32.b = tcg_const_i32(0);
 762        break;
 763    case CC_OP_LTGT_32:
 764    case CC_OP_LTUGTU_32:
 765    case CC_OP_SUBU_32:
 766        c->is_64 = false;
 767        c->u.s32.a = tcg_temp_new_i32();
 768        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 769        c->u.s32.b = tcg_temp_new_i32();
 770        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 771        break;
 772
 773    case CC_OP_LTGT0_64:
 774    case CC_OP_NZ:
 775    case CC_OP_FLOGR:
 776        c->u.s64.a = cc_dst;
 777        c->u.s64.b = tcg_const_i64(0);
 778        c->g1 = true;
 779        break;
 780    case CC_OP_LTGT_64:
 781    case CC_OP_LTUGTU_64:
 782    case CC_OP_SUBU_64:
 783        c->u.s64.a = cc_src;
 784        c->u.s64.b = cc_dst;
 785        c->g1 = c->g2 = true;
 786        break;
 787
 788    case CC_OP_TM_32:
 789    case CC_OP_TM_64:
 790    case CC_OP_ICM:
 791        c->u.s64.a = tcg_temp_new_i64();
 792        c->u.s64.b = tcg_const_i64(0);
 793        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 794        break;
 795
 796    case CC_OP_ADDU_32:
 797        c->is_64 = false;
 798        c->u.s32.a = tcg_temp_new_i32();
 799        c->u.s32.b = tcg_temp_new_i32();
 800        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
 801        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 802            tcg_gen_movi_i32(c->u.s32.b, 0);
 803        } else {
 804            tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
 805        }
 806        break;
 807
 808    case CC_OP_ADDU_64:
 809        c->u.s64.a = cc_vr;
 810        c->g1 = true;
 811        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 812            c->u.s64.b = tcg_const_i64(0);
 813        } else {
 814            c->u.s64.b = cc_src;
 815            c->g2 = true;
 816        }
 817        break;
 818
 819    case CC_OP_STATIC:
 820        c->is_64 = false;
 821        c->u.s32.a = cc_op;
 822        c->g1 = true;
 823        switch (mask) {
 824        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 825            cond = TCG_COND_NE;
 826            c->u.s32.b = tcg_const_i32(3);
 827            break;
 828        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 829            cond = TCG_COND_NE;
 830            c->u.s32.b = tcg_const_i32(2);
 831            break;
 832        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 833            cond = TCG_COND_NE;
 834            c->u.s32.b = tcg_const_i32(1);
 835            break;
 836        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 837            cond = TCG_COND_EQ;
 838            c->g1 = false;
 839            c->u.s32.a = tcg_temp_new_i32();
 840            c->u.s32.b = tcg_const_i32(0);
 841            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 842            break;
 843        case 0x8 | 0x4: /* cc < 2 */
 844            cond = TCG_COND_LTU;
 845            c->u.s32.b = tcg_const_i32(2);
 846            break;
 847        case 0x8: /* cc == 0 */
 848            cond = TCG_COND_EQ;
 849            c->u.s32.b = tcg_const_i32(0);
 850            break;
 851        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 852            cond = TCG_COND_NE;
 853            c->u.s32.b = tcg_const_i32(0);
 854            break;
 855        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 856            cond = TCG_COND_NE;
 857            c->g1 = false;
 858            c->u.s32.a = tcg_temp_new_i32();
 859            c->u.s32.b = tcg_const_i32(0);
 860            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 861            break;
 862        case 0x4: /* cc == 1 */
 863            cond = TCG_COND_EQ;
 864            c->u.s32.b = tcg_const_i32(1);
 865            break;
 866        case 0x2 | 0x1: /* cc > 1 */
 867            cond = TCG_COND_GTU;
 868            c->u.s32.b = tcg_const_i32(1);
 869            break;
 870        case 0x2: /* cc == 2 */
 871            cond = TCG_COND_EQ;
 872            c->u.s32.b = tcg_const_i32(2);
 873            break;
 874        case 0x1: /* cc == 3 */
 875            cond = TCG_COND_EQ;
 876            c->u.s32.b = tcg_const_i32(3);
 877            break;
 878        default:
 879            /* CC is masked by something else: (8 >> cc) & mask.  */
 880            cond = TCG_COND_NE;
 881            c->g1 = false;
 882            c->u.s32.a = tcg_const_i32(8);
 883            c->u.s32.b = tcg_const_i32(0);
 884            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 885            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 886            break;
 887        }
 888        break;
 889
 890    default:
 891        abort();
 892    }
 893    c->cond = cond;
 894}
 895
 896static void free_compare(DisasCompare *c)
 897{
 898    if (!c->g1) {
 899        if (c->is_64) {
 900            tcg_temp_free_i64(c->u.s64.a);
 901        } else {
 902            tcg_temp_free_i32(c->u.s32.a);
 903        }
 904    }
 905    if (!c->g2) {
 906        if (c->is_64) {
 907            tcg_temp_free_i64(c->u.s64.b);
 908        } else {
 909            tcg_temp_free_i32(c->u.s32.b);
 910        }
 911    }
 912}
 913
 914/* ====================================================================== */
 915/* Define the insn format enumeration.  */
 916#define F0(N)                         FMT_##N,
 917#define F1(N, X1)                     F0(N)
 918#define F2(N, X1, X2)                 F0(N)
 919#define F3(N, X1, X2, X3)             F0(N)
 920#define F4(N, X1, X2, X3, X4)         F0(N)
 921#define F5(N, X1, X2, X3, X4, X5)     F0(N)
 922
 923typedef enum {
 924#include "insn-format.def"
 925} DisasFormat;
 926
 927#undef F0
 928#undef F1
 929#undef F2
 930#undef F3
 931#undef F4
 932#undef F5
 933
 934/* Define a structure to hold the decoded fields.  We'll store each inside
 935   an array indexed by an enum.  In order to conserve memory, we'll arrange
 936   for fields that do not exist at the same time to overlap, thus the "C"
 937   for compact.  For checking purposes there is an "O" for original index
 938   as well that will be applied to availability bitmaps.  */
 939
 940enum DisasFieldIndexO {
 941    FLD_O_r1,
 942    FLD_O_r2,
 943    FLD_O_r3,
 944    FLD_O_m1,
 945    FLD_O_m3,
 946    FLD_O_m4,
 947    FLD_O_b1,
 948    FLD_O_b2,
 949    FLD_O_b4,
 950    FLD_O_d1,
 951    FLD_O_d2,
 952    FLD_O_d4,
 953    FLD_O_x2,
 954    FLD_O_l1,
 955    FLD_O_l2,
 956    FLD_O_i1,
 957    FLD_O_i2,
 958    FLD_O_i3,
 959    FLD_O_i4,
 960    FLD_O_i5
 961};
 962
 963enum DisasFieldIndexC {
 964    FLD_C_r1 = 0,
 965    FLD_C_m1 = 0,
 966    FLD_C_b1 = 0,
 967    FLD_C_i1 = 0,
 968
 969    FLD_C_r2 = 1,
 970    FLD_C_b2 = 1,
 971    FLD_C_i2 = 1,
 972
 973    FLD_C_r3 = 2,
 974    FLD_C_m3 = 2,
 975    FLD_C_i3 = 2,
 976
 977    FLD_C_m4 = 3,
 978    FLD_C_b4 = 3,
 979    FLD_C_i4 = 3,
 980    FLD_C_l1 = 3,
 981
 982    FLD_C_i5 = 4,
 983    FLD_C_d1 = 4,
 984
 985    FLD_C_d2 = 5,
 986
 987    FLD_C_d4 = 6,
 988    FLD_C_x2 = 6,
 989    FLD_C_l2 = 6,
 990
 991    NUM_C_FIELD = 7
 992};
 993
 994struct DisasFields {
 995    uint64_t raw_insn;
 996    unsigned op:8;
 997    unsigned op2:8;
 998    unsigned presentC:16;
 999    unsigned int presentO;
1000    int c[NUM_C_FIELD];
1001};
1002
1003/* This is the way fields are to be accessed out of DisasFields.  */
1004#define have_field(S, F)  have_field1((S), FLD_O_##F)
1005#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1006
1007static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1008{
1009    return (f->presentO >> c) & 1;
1010}
1011
1012static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1013                      enum DisasFieldIndexC c)
1014{
1015    assert(have_field1(f, o));
1016    return f->c[c];
1017}
1018
1019/* Describe the layout of each field in each format.  */
1020typedef struct DisasField {
1021    unsigned int beg:8;
1022    unsigned int size:8;
1023    unsigned int type:2;
1024    unsigned int indexC:6;
1025    enum DisasFieldIndexO indexO:8;
1026} DisasField;
1027
1028typedef struct DisasFormatInfo {
1029    DisasField op[NUM_C_FIELD];
1030} DisasFormatInfo;
1031
1032#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1033#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1034#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1035                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1036#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1037                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1038                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1039#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1040                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1041#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1042                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1043                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1044#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1045#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1046
1047#define F0(N)                     { { } },
1048#define F1(N, X1)                 { { X1 } },
1049#define F2(N, X1, X2)             { { X1, X2 } },
1050#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1051#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1052#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1053
1054static const DisasFormatInfo format_info[] = {
1055#include "insn-format.def"
1056};
1057
1058#undef F0
1059#undef F1
1060#undef F2
1061#undef F3
1062#undef F4
1063#undef F5
1064#undef R
1065#undef M
1066#undef BD
1067#undef BXD
1068#undef BDL
1069#undef BXDL
1070#undef I
1071#undef L
1072
1073/* Generally, we'll extract operands into this structures, operate upon
1074   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1075   of routines below for more details.  */
1076typedef struct {
1077    bool g_out, g_out2, g_in1, g_in2;
1078    TCGv_i64 out, out2, in1, in2;
1079    TCGv_i64 addr1;
1080} DisasOps;
1081
1082/* Instructions can place constraints on their operands, raising specification
1083   exceptions if they are violated.  To make this easy to automate, each "in1",
1084   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1085   of the following, or 0.  To make this easy to document, we'll put the
1086   SPEC_<name> defines next to <name>.  */
1087
1088#define SPEC_r1_even    1
1089#define SPEC_r2_even    2
1090#define SPEC_r3_even    4
1091#define SPEC_r1_f128    8
1092#define SPEC_r2_f128    16
1093
1094/* Return values from translate_one, indicating the state of the TB.  */
1095
1096/* We are not using a goto_tb (for whatever reason), but have updated
1097   the PC (for whatever reason), so there's no need to do it again on
1098   exiting the TB.  */
1099#define DISAS_PC_UPDATED        DISAS_TARGET_0
1100
1101/* We have emitted one or more goto_tb.  No fixup required.  */
1102#define DISAS_GOTO_TB           DISAS_TARGET_1
1103
1104/* We have updated the PC and CC values.  */
1105#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1106
1107/* We are exiting the TB, but have neither emitted a goto_tb, nor
1108   updated the PC for the next instruction to be executed.  */
1109#define DISAS_PC_STALE          DISAS_TARGET_3
1110
1111/* We are exiting the TB to the main loop.  */
1112#define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1113
1114
1115/* Instruction flags */
1116#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1117#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1118#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1119#define IF_BFP      0x0008      /* binary floating point instruction */
1120#define IF_DFP      0x0010      /* decimal floating point instruction */
1121#define IF_PRIV     0x0020      /* privileged instruction */
1122
1123struct DisasInsn {
1124    unsigned opc:16;
1125    unsigned flags:16;
1126    DisasFormat fmt:8;
1127    unsigned fac:8;
1128    unsigned spec:8;
1129
1130    const char *name;
1131
1132    /* Pre-process arguments before HELP_OP.  */
1133    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1134    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1135    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1136
1137    /*
1138     * Post-process output after HELP_OP.
1139     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1140     */
1141    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1142    void (*help_cout)(DisasContext *, DisasOps *);
1143
1144    /* Implement the operation itself.  */
1145    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1146
1147    uint64_t data;
1148};
1149
1150/* ====================================================================== */
1151/* Miscellaneous helpers, used by several operations.  */
1152
1153static void help_l2_shift(DisasContext *s, DisasFields *f,
1154                          DisasOps *o, int mask)
1155{
1156    int b2 = get_field(f, b2);
1157    int d2 = get_field(f, d2);
1158
1159    if (b2 == 0) {
1160        o->in2 = tcg_const_i64(d2 & mask);
1161    } else {
1162        o->in2 = get_address(s, 0, b2, d2);
1163        tcg_gen_andi_i64(o->in2, o->in2, mask);
1164    }
1165}
1166
1167static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1168{
1169    if (dest == s->pc_tmp) {
1170        per_branch(s, true);
1171        return DISAS_NEXT;
1172    }
1173    if (use_goto_tb(s, dest)) {
1174        update_cc_op(s);
1175        per_breaking_event(s);
1176        tcg_gen_goto_tb(0);
1177        tcg_gen_movi_i64(psw_addr, dest);
1178        tcg_gen_exit_tb(s->base.tb, 0);
1179        return DISAS_GOTO_TB;
1180    } else {
1181        tcg_gen_movi_i64(psw_addr, dest);
1182        per_branch(s, false);
1183        return DISAS_PC_UPDATED;
1184    }
1185}
1186
1187static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1188                                 bool is_imm, int imm, TCGv_i64 cdest)
1189{
1190    DisasJumpType ret;
1191    uint64_t dest = s->base.pc_next + 2 * imm;
1192    TCGLabel *lab;
1193
1194    /* Take care of the special cases first.  */
1195    if (c->cond == TCG_COND_NEVER) {
1196        ret = DISAS_NEXT;
1197        goto egress;
1198    }
1199    if (is_imm) {
1200        if (dest == s->pc_tmp) {
1201            /* Branch to next.  */
1202            per_branch(s, true);
1203            ret = DISAS_NEXT;
1204            goto egress;
1205        }
1206        if (c->cond == TCG_COND_ALWAYS) {
1207            ret = help_goto_direct(s, dest);
1208            goto egress;
1209        }
1210    } else {
1211        if (!cdest) {
1212            /* E.g. bcr %r0 -> no branch.  */
1213            ret = DISAS_NEXT;
1214            goto egress;
1215        }
1216        if (c->cond == TCG_COND_ALWAYS) {
1217            tcg_gen_mov_i64(psw_addr, cdest);
1218            per_branch(s, false);
1219            ret = DISAS_PC_UPDATED;
1220            goto egress;
1221        }
1222    }
1223
1224    if (use_goto_tb(s, s->pc_tmp)) {
1225        if (is_imm && use_goto_tb(s, dest)) {
1226            /* Both exits can use goto_tb.  */
1227            update_cc_op(s);
1228
1229            lab = gen_new_label();
1230            if (c->is_64) {
1231                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1232            } else {
1233                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1234            }
1235
1236            /* Branch not taken.  */
1237            tcg_gen_goto_tb(0);
1238            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1239            tcg_gen_exit_tb(s->base.tb, 0);
1240
1241            /* Branch taken.  */
1242            gen_set_label(lab);
1243            per_breaking_event(s);
1244            tcg_gen_goto_tb(1);
1245            tcg_gen_movi_i64(psw_addr, dest);
1246            tcg_gen_exit_tb(s->base.tb, 1);
1247
1248            ret = DISAS_GOTO_TB;
1249        } else {
1250            /* Fallthru can use goto_tb, but taken branch cannot.  */
1251            /* Store taken branch destination before the brcond.  This
1252               avoids having to allocate a new local temp to hold it.
1253               We'll overwrite this in the not taken case anyway.  */
1254            if (!is_imm) {
1255                tcg_gen_mov_i64(psw_addr, cdest);
1256            }
1257
1258            lab = gen_new_label();
1259            if (c->is_64) {
1260                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1261            } else {
1262                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1263            }
1264
1265            /* Branch not taken.  */
1266            update_cc_op(s);
1267            tcg_gen_goto_tb(0);
1268            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1269            tcg_gen_exit_tb(s->base.tb, 0);
1270
1271            gen_set_label(lab);
1272            if (is_imm) {
1273                tcg_gen_movi_i64(psw_addr, dest);
1274            }
1275            per_breaking_event(s);
1276            ret = DISAS_PC_UPDATED;
1277        }
1278    } else {
1279        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1280           Most commonly we're single-stepping or some other condition that
1281           disables all use of goto_tb.  Just update the PC and exit.  */
1282
1283        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1284        if (is_imm) {
1285            cdest = tcg_const_i64(dest);
1286        }
1287
1288        if (c->is_64) {
1289            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1290                                cdest, next);
1291            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1292        } else {
1293            TCGv_i32 t0 = tcg_temp_new_i32();
1294            TCGv_i64 t1 = tcg_temp_new_i64();
1295            TCGv_i64 z = tcg_const_i64(0);
1296            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1297            tcg_gen_extu_i32_i64(t1, t0);
1298            tcg_temp_free_i32(t0);
1299            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1300            per_branch_cond(s, TCG_COND_NE, t1, z);
1301            tcg_temp_free_i64(t1);
1302            tcg_temp_free_i64(z);
1303        }
1304
1305        if (is_imm) {
1306            tcg_temp_free_i64(cdest);
1307        }
1308        tcg_temp_free_i64(next);
1309
1310        ret = DISAS_PC_UPDATED;
1311    }
1312
1313 egress:
1314    free_compare(c);
1315    return ret;
1316}
1317
1318/* ====================================================================== */
1319/* The operations.  These perform the bulk of the work for any insn,
1320   usually after the operands have been loaded and output initialized.  */
1321
1322static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1323{
1324    TCGv_i64 z, n;
1325    z = tcg_const_i64(0);
1326    n = tcg_temp_new_i64();
1327    tcg_gen_neg_i64(n, o->in2);
1328    tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1329    tcg_temp_free_i64(n);
1330    tcg_temp_free_i64(z);
1331    return DISAS_NEXT;
1332}
1333
1334static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1335{
1336    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1337    return DISAS_NEXT;
1338}
1339
1340static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1341{
1342    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1343    return DISAS_NEXT;
1344}
1345
1346static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1347{
1348    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1349    tcg_gen_mov_i64(o->out2, o->in2);
1350    return DISAS_NEXT;
1351}
1352
1353static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1354{
1355    tcg_gen_add_i64(o->out, o->in1, o->in2);
1356    return DISAS_NEXT;
1357}
1358
1359static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1360{
1361    DisasCompare cmp;
1362    TCGv_i64 carry;
1363
1364    tcg_gen_add_i64(o->out, o->in1, o->in2);
1365
1366    /* The carry flag is the msb of CC, therefore the branch mask that would
1367       create that comparison is 3.  Feeding the generated comparison to
1368       setcond produces the carry flag that we desire.  */
1369    disas_jcc(s, &cmp, 3);
1370    carry = tcg_temp_new_i64();
1371    if (cmp.is_64) {
1372        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1373    } else {
1374        TCGv_i32 t = tcg_temp_new_i32();
1375        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1376        tcg_gen_extu_i32_i64(carry, t);
1377        tcg_temp_free_i32(t);
1378    }
1379    free_compare(&cmp);
1380
1381    tcg_gen_add_i64(o->out, o->out, carry);
1382    tcg_temp_free_i64(carry);
1383    return DISAS_NEXT;
1384}
1385
1386static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1387{
1388    o->in1 = tcg_temp_new_i64();
1389
1390    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1391        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1392    } else {
1393        /* Perform the atomic addition in memory. */
1394        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1395                                     s->insn->data);
1396    }
1397
1398    /* Recompute also for atomic case: needed for setting CC. */
1399    tcg_gen_add_i64(o->out, o->in1, o->in2);
1400
1401    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1402        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1403    }
1404    return DISAS_NEXT;
1405}
1406
1407static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1408{
1409    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1410    return DISAS_NEXT;
1411}
1412
1413static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1414{
1415    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1416    return DISAS_NEXT;
1417}
1418
1419static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1420{
1421    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1422    return_low128(o->out2);
1423    return DISAS_NEXT;
1424}
1425
1426static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1427{
1428    tcg_gen_and_i64(o->out, o->in1, o->in2);
1429    return DISAS_NEXT;
1430}
1431
1432static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1433{
1434    int shift = s->insn->data & 0xff;
1435    int size = s->insn->data >> 8;
1436    uint64_t mask = ((1ull << size) - 1) << shift;
1437
1438    assert(!o->g_in2);
1439    tcg_gen_shli_i64(o->in2, o->in2, shift);
1440    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1441    tcg_gen_and_i64(o->out, o->in1, o->in2);
1442
1443    /* Produce the CC from only the bits manipulated.  */
1444    tcg_gen_andi_i64(cc_dst, o->out, mask);
1445    set_cc_nz_u64(s, cc_dst);
1446    return DISAS_NEXT;
1447}
1448
1449static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1450{
1451    o->in1 = tcg_temp_new_i64();
1452
1453    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1454        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1455    } else {
1456        /* Perform the atomic operation in memory. */
1457        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1458                                     s->insn->data);
1459    }
1460
1461    /* Recompute also for atomic case: needed for setting CC. */
1462    tcg_gen_and_i64(o->out, o->in1, o->in2);
1463
1464    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1465        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1466    }
1467    return DISAS_NEXT;
1468}
1469
1470static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1471{
1472    pc_to_link_info(o->out, s, s->pc_tmp);
1473    if (o->in2) {
1474        tcg_gen_mov_i64(psw_addr, o->in2);
1475        per_branch(s, false);
1476        return DISAS_PC_UPDATED;
1477    } else {
1478        return DISAS_NEXT;
1479    }
1480}
1481
1482static void save_link_info(DisasContext *s, DisasOps *o)
1483{
1484    TCGv_i64 t;
1485
1486    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1487        pc_to_link_info(o->out, s, s->pc_tmp);
1488        return;
1489    }
1490    gen_op_calc_cc(s);
1491    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1492    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1493    t = tcg_temp_new_i64();
1494    tcg_gen_shri_i64(t, psw_mask, 16);
1495    tcg_gen_andi_i64(t, t, 0x0f000000);
1496    tcg_gen_or_i64(o->out, o->out, t);
1497    tcg_gen_extu_i32_i64(t, cc_op);
1498    tcg_gen_shli_i64(t, t, 28);
1499    tcg_gen_or_i64(o->out, o->out, t);
1500    tcg_temp_free_i64(t);
1501}
1502
1503static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1504{
1505    save_link_info(s, o);
1506    if (o->in2) {
1507        tcg_gen_mov_i64(psw_addr, o->in2);
1508        per_branch(s, false);
1509        return DISAS_PC_UPDATED;
1510    } else {
1511        return DISAS_NEXT;
1512    }
1513}
1514
1515static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1516{
1517    pc_to_link_info(o->out, s, s->pc_tmp);
1518    return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1519}
1520
1521static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1522{
1523    int m1 = get_field(s->fields, m1);
1524    bool is_imm = have_field(s->fields, i2);
1525    int imm = is_imm ? get_field(s->fields, i2) : 0;
1526    DisasCompare c;
1527
1528    /* BCR with R2 = 0 causes no branching */
1529    if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1530        if (m1 == 14) {
1531            /* Perform serialization */
1532            /* FIXME: check for fast-BCR-serialization facility */
1533            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1534        }
1535        if (m1 == 15) {
1536            /* Perform serialization */
1537            /* FIXME: perform checkpoint-synchronisation */
1538            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1539        }
1540        return DISAS_NEXT;
1541    }
1542
1543    disas_jcc(s, &c, m1);
1544    return help_branch(s, &c, is_imm, imm, o->in2);
1545}
1546
1547static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1548{
1549    int r1 = get_field(s->fields, r1);
1550    bool is_imm = have_field(s->fields, i2);
1551    int imm = is_imm ? get_field(s->fields, i2) : 0;
1552    DisasCompare c;
1553    TCGv_i64 t;
1554
1555    c.cond = TCG_COND_NE;
1556    c.is_64 = false;
1557    c.g1 = false;
1558    c.g2 = false;
1559
1560    t = tcg_temp_new_i64();
1561    tcg_gen_subi_i64(t, regs[r1], 1);
1562    store_reg32_i64(r1, t);
1563    c.u.s32.a = tcg_temp_new_i32();
1564    c.u.s32.b = tcg_const_i32(0);
1565    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1566    tcg_temp_free_i64(t);
1567
1568    return help_branch(s, &c, is_imm, imm, o->in2);
1569}
1570
1571static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1572{
1573    int r1 = get_field(s->fields, r1);
1574    int imm = get_field(s->fields, i2);
1575    DisasCompare c;
1576    TCGv_i64 t;
1577
1578    c.cond = TCG_COND_NE;
1579    c.is_64 = false;
1580    c.g1 = false;
1581    c.g2 = false;
1582
1583    t = tcg_temp_new_i64();
1584    tcg_gen_shri_i64(t, regs[r1], 32);
1585    tcg_gen_subi_i64(t, t, 1);
1586    store_reg32h_i64(r1, t);
1587    c.u.s32.a = tcg_temp_new_i32();
1588    c.u.s32.b = tcg_const_i32(0);
1589    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1590    tcg_temp_free_i64(t);
1591
1592    return help_branch(s, &c, 1, imm, o->in2);
1593}
1594
1595static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1596{
1597    int r1 = get_field(s->fields, r1);
1598    bool is_imm = have_field(s->fields, i2);
1599    int imm = is_imm ? get_field(s->fields, i2) : 0;
1600    DisasCompare c;
1601
1602    c.cond = TCG_COND_NE;
1603    c.is_64 = true;
1604    c.g1 = true;
1605    c.g2 = false;
1606
1607    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1608    c.u.s64.a = regs[r1];
1609    c.u.s64.b = tcg_const_i64(0);
1610
1611    return help_branch(s, &c, is_imm, imm, o->in2);
1612}
1613
1614static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1615{
1616    int r1 = get_field(s->fields, r1);
1617    int r3 = get_field(s->fields, r3);
1618    bool is_imm = have_field(s->fields, i2);
1619    int imm = is_imm ? get_field(s->fields, i2) : 0;
1620    DisasCompare c;
1621    TCGv_i64 t;
1622
1623    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1624    c.is_64 = false;
1625    c.g1 = false;
1626    c.g2 = false;
1627
1628    t = tcg_temp_new_i64();
1629    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1630    c.u.s32.a = tcg_temp_new_i32();
1631    c.u.s32.b = tcg_temp_new_i32();
1632    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1633    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1634    store_reg32_i64(r1, t);
1635    tcg_temp_free_i64(t);
1636
1637    return help_branch(s, &c, is_imm, imm, o->in2);
1638}
1639
1640static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1641{
1642    int r1 = get_field(s->fields, r1);
1643    int r3 = get_field(s->fields, r3);
1644    bool is_imm = have_field(s->fields, i2);
1645    int imm = is_imm ? get_field(s->fields, i2) : 0;
1646    DisasCompare c;
1647
1648    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1649    c.is_64 = true;
1650
1651    if (r1 == (r3 | 1)) {
1652        c.u.s64.b = load_reg(r3 | 1);
1653        c.g2 = false;
1654    } else {
1655        c.u.s64.b = regs[r3 | 1];
1656        c.g2 = true;
1657    }
1658
1659    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1660    c.u.s64.a = regs[r1];
1661    c.g1 = true;
1662
1663    return help_branch(s, &c, is_imm, imm, o->in2);
1664}
1665
1666static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1667{
1668    int imm, m3 = get_field(s->fields, m3);
1669    bool is_imm;
1670    DisasCompare c;
1671
1672    c.cond = ltgt_cond[m3];
1673    if (s->insn->data) {
1674        c.cond = tcg_unsigned_cond(c.cond);
1675    }
1676    c.is_64 = c.g1 = c.g2 = true;
1677    c.u.s64.a = o->in1;
1678    c.u.s64.b = o->in2;
1679
1680    is_imm = have_field(s->fields, i4);
1681    if (is_imm) {
1682        imm = get_field(s->fields, i4);
1683    } else {
1684        imm = 0;
1685        o->out = get_address(s, 0, get_field(s->fields, b4),
1686                             get_field(s->fields, d4));
1687    }
1688
1689    return help_branch(s, &c, is_imm, imm, o->out);
1690}
1691
1692static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1693{
1694    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1695    set_cc_static(s);
1696    return DISAS_NEXT;
1697}
1698
1699static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1700{
1701    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1702    set_cc_static(s);
1703    return DISAS_NEXT;
1704}
1705
1706static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1707{
1708    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1709    set_cc_static(s);
1710    return DISAS_NEXT;
1711}
1712
1713static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1714{
1715    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1716    gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1717    tcg_temp_free_i32(m3);
1718    gen_set_cc_nz_f32(s, o->in2);
1719    return DISAS_NEXT;
1720}
1721
1722static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1723{
1724    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1725    gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1726    tcg_temp_free_i32(m3);
1727    gen_set_cc_nz_f64(s, o->in2);
1728    return DISAS_NEXT;
1729}
1730
1731static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1732{
1733    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1734    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1735    tcg_temp_free_i32(m3);
1736    gen_set_cc_nz_f128(s, o->in1, o->in2);
1737    return DISAS_NEXT;
1738}
1739
1740static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1741{
1742    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1743    gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1744    tcg_temp_free_i32(m3);
1745    gen_set_cc_nz_f32(s, o->in2);
1746    return DISAS_NEXT;
1747}
1748
1749static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1750{
1751    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1752    gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1753    tcg_temp_free_i32(m3);
1754    gen_set_cc_nz_f64(s, o->in2);
1755    return DISAS_NEXT;
1756}
1757
1758static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1759{
1760    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1761    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1762    tcg_temp_free_i32(m3);
1763    gen_set_cc_nz_f128(s, o->in1, o->in2);
1764    return DISAS_NEXT;
1765}
1766
1767static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1768{
1769    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1770    gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1771    tcg_temp_free_i32(m3);
1772    gen_set_cc_nz_f32(s, o->in2);
1773    return DISAS_NEXT;
1774}
1775
1776static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1777{
1778    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1779    gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1780    tcg_temp_free_i32(m3);
1781    gen_set_cc_nz_f64(s, o->in2);
1782    return DISAS_NEXT;
1783}
1784
1785static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1786{
1787    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1788    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1789    tcg_temp_free_i32(m3);
1790    gen_set_cc_nz_f128(s, o->in1, o->in2);
1791    return DISAS_NEXT;
1792}
1793
1794static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1795{
1796    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1797    gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1798    tcg_temp_free_i32(m3);
1799    gen_set_cc_nz_f32(s, o->in2);
1800    return DISAS_NEXT;
1801}
1802
1803static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1804{
1805    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1806    gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1807    tcg_temp_free_i32(m3);
1808    gen_set_cc_nz_f64(s, o->in2);
1809    return DISAS_NEXT;
1810}
1811
1812static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1813{
1814    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1815    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1816    tcg_temp_free_i32(m3);
1817    gen_set_cc_nz_f128(s, o->in1, o->in2);
1818    return DISAS_NEXT;
1819}
1820
1821static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1822{
1823    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1824    gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1825    tcg_temp_free_i32(m3);
1826    return DISAS_NEXT;
1827}
1828
1829static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1830{
1831    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1832    gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1833    tcg_temp_free_i32(m3);
1834    return DISAS_NEXT;
1835}
1836
1837static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1838{
1839    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1840    gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1841    tcg_temp_free_i32(m3);
1842    return_low128(o->out2);
1843    return DISAS_NEXT;
1844}
1845
1846static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1847{
1848    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1849    gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1850    tcg_temp_free_i32(m3);
1851    return DISAS_NEXT;
1852}
1853
1854static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1855{
1856    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1857    gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1858    tcg_temp_free_i32(m3);
1859    return DISAS_NEXT;
1860}
1861
1862static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1863{
1864    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1865    gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1866    tcg_temp_free_i32(m3);
1867    return_low128(o->out2);
1868    return DISAS_NEXT;
1869}
1870
1871static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1872{
1873    int r2 = get_field(s->fields, r2);
1874    TCGv_i64 len = tcg_temp_new_i64();
1875
1876    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1877    set_cc_static(s);
1878    return_low128(o->out);
1879
1880    tcg_gen_add_i64(regs[r2], regs[r2], len);
1881    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1882    tcg_temp_free_i64(len);
1883
1884    return DISAS_NEXT;
1885}
1886
1887static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1888{
1889    int l = get_field(s->fields, l1);
1890    TCGv_i32 vl;
1891
1892    switch (l + 1) {
1893    case 1:
1894        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1895        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1896        break;
1897    case 2:
1898        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1899        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1900        break;
1901    case 4:
1902        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1903        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1904        break;
1905    case 8:
1906        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1907        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1908        break;
1909    default:
1910        vl = tcg_const_i32(l);
1911        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1912        tcg_temp_free_i32(vl);
1913        set_cc_static(s);
1914        return DISAS_NEXT;
1915    }
1916    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1917    return DISAS_NEXT;
1918}
1919
1920static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
1921{
1922    int r1 = get_field(s->fields, r1);
1923    int r2 = get_field(s->fields, r2);
1924    TCGv_i32 t1, t2;
1925
1926    /* r1 and r2 must be even.  */
1927    if (r1 & 1 || r2 & 1) {
1928        gen_program_exception(s, PGM_SPECIFICATION);
1929        return DISAS_NORETURN;
1930    }
1931
1932    t1 = tcg_const_i32(r1);
1933    t2 = tcg_const_i32(r2);
1934    gen_helper_clcl(cc_op, cpu_env, t1, t2);
1935    tcg_temp_free_i32(t1);
1936    tcg_temp_free_i32(t2);
1937    set_cc_static(s);
1938    return DISAS_NEXT;
1939}
1940
1941static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
1942{
1943    int r1 = get_field(s->fields, r1);
1944    int r3 = get_field(s->fields, r3);
1945    TCGv_i32 t1, t3;
1946
1947    /* r1 and r3 must be even.  */
1948    if (r1 & 1 || r3 & 1) {
1949        gen_program_exception(s, PGM_SPECIFICATION);
1950        return DISAS_NORETURN;
1951    }
1952
1953    t1 = tcg_const_i32(r1);
1954    t3 = tcg_const_i32(r3);
1955    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1956    tcg_temp_free_i32(t1);
1957    tcg_temp_free_i32(t3);
1958    set_cc_static(s);
1959    return DISAS_NEXT;
1960}
1961
1962static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
1963{
1964    int r1 = get_field(s->fields, r1);
1965    int r3 = get_field(s->fields, r3);
1966    TCGv_i32 t1, t3;
1967
1968    /* r1 and r3 must be even.  */
1969    if (r1 & 1 || r3 & 1) {
1970        gen_program_exception(s, PGM_SPECIFICATION);
1971        return DISAS_NORETURN;
1972    }
1973
1974    t1 = tcg_const_i32(r1);
1975    t3 = tcg_const_i32(r3);
1976    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1977    tcg_temp_free_i32(t1);
1978    tcg_temp_free_i32(t3);
1979    set_cc_static(s);
1980    return DISAS_NEXT;
1981}
1982
1983static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
1984{
1985    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1986    TCGv_i32 t1 = tcg_temp_new_i32();
1987    tcg_gen_extrl_i64_i32(t1, o->in1);
1988    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1989    set_cc_static(s);
1990    tcg_temp_free_i32(t1);
1991    tcg_temp_free_i32(m3);
1992    return DISAS_NEXT;
1993}
1994
1995static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
1996{
1997    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1998    set_cc_static(s);
1999    return_low128(o->in2);
2000    return DISAS_NEXT;
2001}
2002
2003static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2004{
2005    TCGv_i64 t = tcg_temp_new_i64();
2006    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2007    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2008    tcg_gen_or_i64(o->out, o->out, t);
2009    tcg_temp_free_i64(t);
2010    return DISAS_NEXT;
2011}
2012
2013static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2014{
2015    int d2 = get_field(s->fields, d2);
2016    int b2 = get_field(s->fields, b2);
2017    TCGv_i64 addr, cc;
2018
2019    /* Note that in1 = R3 (new value) and
2020       in2 = (zero-extended) R1 (expected value).  */
2021
2022    addr = get_address(s, 0, b2, d2);
2023    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2024                               get_mem_index(s), s->insn->data | MO_ALIGN);
2025    tcg_temp_free_i64(addr);
2026
2027    /* Are the memory and expected values (un)equal?  Note that this setcond
2028       produces the output CC value, thus the NE sense of the test.  */
2029    cc = tcg_temp_new_i64();
2030    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2031    tcg_gen_extrl_i64_i32(cc_op, cc);
2032    tcg_temp_free_i64(cc);
2033    set_cc_static(s);
2034
2035    return DISAS_NEXT;
2036}
2037
2038static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2039{
2040    int r1 = get_field(s->fields, r1);
2041    int r3 = get_field(s->fields, r3);
2042    int d2 = get_field(s->fields, d2);
2043    int b2 = get_field(s->fields, b2);
2044    DisasJumpType ret = DISAS_NEXT;
2045    TCGv_i64 addr;
2046    TCGv_i32 t_r1, t_r3;
2047
2048    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2049    addr = get_address(s, 0, b2, d2);
2050    t_r1 = tcg_const_i32(r1);
2051    t_r3 = tcg_const_i32(r3);
2052    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2053        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2054    } else if (HAVE_CMPXCHG128) {
2055        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2056    } else {
2057        gen_helper_exit_atomic(cpu_env);
2058        ret = DISAS_NORETURN;
2059    }
2060    tcg_temp_free_i64(addr);
2061    tcg_temp_free_i32(t_r1);
2062    tcg_temp_free_i32(t_r3);
2063
2064    set_cc_static(s);
2065    return ret;
2066}
2067
2068static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2069{
2070    int r3 = get_field(s->fields, r3);
2071    TCGv_i32 t_r3 = tcg_const_i32(r3);
2072
2073    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2074        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2075    } else {
2076        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2077    }
2078    tcg_temp_free_i32(t_r3);
2079
2080    set_cc_static(s);
2081    return DISAS_NEXT;
2082}
2083
2084#ifndef CONFIG_USER_ONLY
2085static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2086{
2087    TCGMemOp mop = s->insn->data;
2088    TCGv_i64 addr, old, cc;
2089    TCGLabel *lab = gen_new_label();
2090
2091    /* Note that in1 = R1 (zero-extended expected value),
2092       out = R1 (original reg), out2 = R1+1 (new value).  */
2093
2094    addr = tcg_temp_new_i64();
2095    old = tcg_temp_new_i64();
2096    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2097    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2098                               get_mem_index(s), mop | MO_ALIGN);
2099    tcg_temp_free_i64(addr);
2100
2101    /* Are the memory and expected values (un)equal?  */
2102    cc = tcg_temp_new_i64();
2103    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2104    tcg_gen_extrl_i64_i32(cc_op, cc);
2105
2106    /* Write back the output now, so that it happens before the
2107       following branch, so that we don't need local temps.  */
2108    if ((mop & MO_SIZE) == MO_32) {
2109        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2110    } else {
2111        tcg_gen_mov_i64(o->out, old);
2112    }
2113    tcg_temp_free_i64(old);
2114
2115    /* If the comparison was equal, and the LSB of R2 was set,
2116       then we need to flush the TLB (for all cpus).  */
2117    tcg_gen_xori_i64(cc, cc, 1);
2118    tcg_gen_and_i64(cc, cc, o->in2);
2119    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2120    tcg_temp_free_i64(cc);
2121
2122    gen_helper_purge(cpu_env);
2123    gen_set_label(lab);
2124
2125    return DISAS_NEXT;
2126}
2127#endif
2128
2129static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2130{
2131    TCGv_i64 t1 = tcg_temp_new_i64();
2132    TCGv_i32 t2 = tcg_temp_new_i32();
2133    tcg_gen_extrl_i64_i32(t2, o->in1);
2134    gen_helper_cvd(t1, t2);
2135    tcg_temp_free_i32(t2);
2136    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2137    tcg_temp_free_i64(t1);
2138    return DISAS_NEXT;
2139}
2140
2141static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2142{
2143    int m3 = get_field(s->fields, m3);
2144    TCGLabel *lab = gen_new_label();
2145    TCGCond c;
2146
2147    c = tcg_invert_cond(ltgt_cond[m3]);
2148    if (s->insn->data) {
2149        c = tcg_unsigned_cond(c);
2150    }
2151    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2152
2153    /* Trap.  */
2154    gen_trap(s);
2155
2156    gen_set_label(lab);
2157    return DISAS_NEXT;
2158}
2159
2160static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2161{
2162    int m3 = get_field(s->fields, m3);
2163    int r1 = get_field(s->fields, r1);
2164    int r2 = get_field(s->fields, r2);
2165    TCGv_i32 tr1, tr2, chk;
2166
2167    /* R1 and R2 must both be even.  */
2168    if ((r1 | r2) & 1) {
2169        gen_program_exception(s, PGM_SPECIFICATION);
2170        return DISAS_NORETURN;
2171    }
2172    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2173        m3 = 0;
2174    }
2175
2176    tr1 = tcg_const_i32(r1);
2177    tr2 = tcg_const_i32(r2);
2178    chk = tcg_const_i32(m3);
2179
2180    switch (s->insn->data) {
2181    case 12:
2182        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2183        break;
2184    case 14:
2185        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2186        break;
2187    case 21:
2188        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2189        break;
2190    case 24:
2191        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2192        break;
2193    case 41:
2194        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2195        break;
2196    case 42:
2197        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2198        break;
2199    default:
2200        g_assert_not_reached();
2201    }
2202
2203    tcg_temp_free_i32(tr1);
2204    tcg_temp_free_i32(tr2);
2205    tcg_temp_free_i32(chk);
2206    set_cc_static(s);
2207    return DISAS_NEXT;
2208}
2209
2210#ifndef CONFIG_USER_ONLY
2211static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2212{
2213    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2214    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2215    TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2216
2217    gen_helper_diag(cpu_env, r1, r3, func_code);
2218
2219    tcg_temp_free_i32(func_code);
2220    tcg_temp_free_i32(r3);
2221    tcg_temp_free_i32(r1);
2222    return DISAS_NEXT;
2223}
2224#endif
2225
2226static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2227{
2228    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2229    return_low128(o->out);
2230    return DISAS_NEXT;
2231}
2232
2233static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2234{
2235    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2236    return_low128(o->out);
2237    return DISAS_NEXT;
2238}
2239
2240static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2241{
2242    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2243    return_low128(o->out);
2244    return DISAS_NEXT;
2245}
2246
2247static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2248{
2249    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2250    return_low128(o->out);
2251    return DISAS_NEXT;
2252}
2253
2254static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2255{
2256    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2257    return DISAS_NEXT;
2258}
2259
2260static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2261{
2262    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2263    return DISAS_NEXT;
2264}
2265
2266static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2267{
2268    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2269    return_low128(o->out2);
2270    return DISAS_NEXT;
2271}
2272
2273static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2274{
2275    int r2 = get_field(s->fields, r2);
2276    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2277    return DISAS_NEXT;
2278}
2279
2280static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2281{
2282    /* No cache information provided.  */
2283    tcg_gen_movi_i64(o->out, -1);
2284    return DISAS_NEXT;
2285}
2286
2287static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2288{
2289    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2290    return DISAS_NEXT;
2291}
2292
2293static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2294{
2295    int r1 = get_field(s->fields, r1);
2296    int r2 = get_field(s->fields, r2);
2297    TCGv_i64 t = tcg_temp_new_i64();
2298
2299    /* Note the "subsequently" in the PoO, which implies a defined result
2300       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2301    tcg_gen_shri_i64(t, psw_mask, 32);
2302    store_reg32_i64(r1, t);
2303    if (r2 != 0) {
2304        store_reg32_i64(r2, psw_mask);
2305    }
2306
2307    tcg_temp_free_i64(t);
2308    return DISAS_NEXT;
2309}
2310
2311static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2312{
2313    int r1 = get_field(s->fields, r1);
2314    TCGv_i32 ilen;
2315    TCGv_i64 v1;
2316
2317    /* Nested EXECUTE is not allowed.  */
2318    if (unlikely(s->ex_value)) {
2319        gen_program_exception(s, PGM_EXECUTE);
2320        return DISAS_NORETURN;
2321    }
2322
2323    update_psw_addr(s);
2324    update_cc_op(s);
2325
2326    if (r1 == 0) {
2327        v1 = tcg_const_i64(0);
2328    } else {
2329        v1 = regs[r1];
2330    }
2331
2332    ilen = tcg_const_i32(s->ilen);
2333    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2334    tcg_temp_free_i32(ilen);
2335
2336    if (r1 == 0) {
2337        tcg_temp_free_i64(v1);
2338    }
2339
2340    return DISAS_PC_CC_UPDATED;
2341}
2342
2343static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2344{
2345    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2346    gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2347    tcg_temp_free_i32(m3);
2348    return DISAS_NEXT;
2349}
2350
2351static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2352{
2353    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2354    gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2355    tcg_temp_free_i32(m3);
2356    return DISAS_NEXT;
2357}
2358
2359static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2360{
2361    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2362    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2363    return_low128(o->out2);
2364    tcg_temp_free_i32(m3);
2365    return DISAS_NEXT;
2366}
2367
2368static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2369{
2370    /* We'll use the original input for cc computation, since we get to
2371       compare that against 0, which ought to be better than comparing
2372       the real output against 64.  It also lets cc_dst be a convenient
2373       temporary during our computation.  */
2374    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2375
2376    /* R1 = IN ? CLZ(IN) : 64.  */
2377    tcg_gen_clzi_i64(o->out, o->in2, 64);
2378
2379    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2380       value by 64, which is undefined.  But since the shift is 64 iff the
2381       input is zero, we still get the correct result after and'ing.  */
2382    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2383    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2384    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2385    return DISAS_NEXT;
2386}
2387
2388static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2389{
2390    int m3 = get_field(s->fields, m3);
2391    int pos, len, base = s->insn->data;
2392    TCGv_i64 tmp = tcg_temp_new_i64();
2393    uint64_t ccm;
2394
2395    switch (m3) {
2396    case 0xf:
2397        /* Effectively a 32-bit load.  */
2398        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2399        len = 32;
2400        goto one_insert;
2401
2402    case 0xc:
2403    case 0x6:
2404    case 0x3:
2405        /* Effectively a 16-bit load.  */
2406        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2407        len = 16;
2408        goto one_insert;
2409
2410    case 0x8:
2411    case 0x4:
2412    case 0x2:
2413    case 0x1:
2414        /* Effectively an 8-bit load.  */
2415        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2416        len = 8;
2417        goto one_insert;
2418
2419    one_insert:
2420        pos = base + ctz32(m3) * 8;
2421        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2422        ccm = ((1ull << len) - 1) << pos;
2423        break;
2424
2425    default:
2426        /* This is going to be a sequence of loads and inserts.  */
2427        pos = base + 32 - 8;
2428        ccm = 0;
2429        while (m3) {
2430            if (m3 & 0x8) {
2431                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2432                tcg_gen_addi_i64(o->in2, o->in2, 1);
2433                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2434                ccm |= 0xff << pos;
2435            }
2436            m3 = (m3 << 1) & 0xf;
2437            pos -= 8;
2438        }
2439        break;
2440    }
2441
2442    tcg_gen_movi_i64(tmp, ccm);
2443    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2444    tcg_temp_free_i64(tmp);
2445    return DISAS_NEXT;
2446}
2447
2448static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2449{
2450    int shift = s->insn->data & 0xff;
2451    int size = s->insn->data >> 8;
2452    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2453    return DISAS_NEXT;
2454}
2455
2456static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2457{
2458    TCGv_i64 t1, t2;
2459
2460    gen_op_calc_cc(s);
2461    t1 = tcg_temp_new_i64();
2462    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2463    t2 = tcg_temp_new_i64();
2464    tcg_gen_extu_i32_i64(t2, cc_op);
2465    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2466    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2467    tcg_temp_free_i64(t1);
2468    tcg_temp_free_i64(t2);
2469    return DISAS_NEXT;
2470}
2471
2472#ifndef CONFIG_USER_ONLY
2473static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2474{
2475    TCGv_i32 m4;
2476
2477    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2478        m4 = tcg_const_i32(get_field(s->fields, m4));
2479    } else {
2480        m4 = tcg_const_i32(0);
2481    }
2482    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2483    tcg_temp_free_i32(m4);
2484    return DISAS_NEXT;
2485}
2486
2487static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2488{
2489    TCGv_i32 m4;
2490
2491    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2492        m4 = tcg_const_i32(get_field(s->fields, m4));
2493    } else {
2494        m4 = tcg_const_i32(0);
2495    }
2496    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2497    tcg_temp_free_i32(m4);
2498    return DISAS_NEXT;
2499}
2500
2501static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2502{
2503    gen_helper_iske(o->out, cpu_env, o->in2);
2504    return DISAS_NEXT;
2505}
2506#endif
2507
2508static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2509{
2510    int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2511    int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2512    int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2513    TCGv_i32 t_r1, t_r2, t_r3, type;
2514
2515    switch (s->insn->data) {
2516    case S390_FEAT_TYPE_KMCTR:
2517        if (r3 & 1 || !r3) {
2518            gen_program_exception(s, PGM_SPECIFICATION);
2519            return DISAS_NORETURN;
2520        }
2521        /* FALL THROUGH */
2522    case S390_FEAT_TYPE_PPNO:
2523    case S390_FEAT_TYPE_KMF:
2524    case S390_FEAT_TYPE_KMC:
2525    case S390_FEAT_TYPE_KMO:
2526    case S390_FEAT_TYPE_KM:
2527        if (r1 & 1 || !r1) {
2528            gen_program_exception(s, PGM_SPECIFICATION);
2529            return DISAS_NORETURN;
2530        }
2531        /* FALL THROUGH */
2532    case S390_FEAT_TYPE_KMAC:
2533    case S390_FEAT_TYPE_KIMD:
2534    case S390_FEAT_TYPE_KLMD:
2535        if (r2 & 1 || !r2) {
2536            gen_program_exception(s, PGM_SPECIFICATION);
2537            return DISAS_NORETURN;
2538        }
2539        /* FALL THROUGH */
2540    case S390_FEAT_TYPE_PCKMO:
2541    case S390_FEAT_TYPE_PCC:
2542        break;
2543    default:
2544        g_assert_not_reached();
2545    };
2546
2547    t_r1 = tcg_const_i32(r1);
2548    t_r2 = tcg_const_i32(r2);
2549    t_r3 = tcg_const_i32(r3);
2550    type = tcg_const_i32(s->insn->data);
2551    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2552    set_cc_static(s);
2553    tcg_temp_free_i32(t_r1);
2554    tcg_temp_free_i32(t_r2);
2555    tcg_temp_free_i32(t_r3);
2556    tcg_temp_free_i32(type);
2557    return DISAS_NEXT;
2558}
2559
2560static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2561{
2562    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2563    set_cc_static(s);
2564    return DISAS_NEXT;
2565}
2566
2567static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2568{
2569    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2570    set_cc_static(s);
2571    return DISAS_NEXT;
2572}
2573
2574static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2575{
2576    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2577    set_cc_static(s);
2578    return DISAS_NEXT;
2579}
2580
2581static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2582{
2583    /* The real output is indeed the original value in memory;
2584       recompute the addition for the computation of CC.  */
2585    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2586                                 s->insn->data | MO_ALIGN);
2587    /* However, we need to recompute the addition for setting CC.  */
2588    tcg_gen_add_i64(o->out, o->in1, o->in2);
2589    return DISAS_NEXT;
2590}
2591
2592static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2593{
2594    /* The real output is indeed the original value in memory;
2595       recompute the addition for the computation of CC.  */
2596    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2597                                 s->insn->data | MO_ALIGN);
2598    /* However, we need to recompute the operation for setting CC.  */
2599    tcg_gen_and_i64(o->out, o->in1, o->in2);
2600    return DISAS_NEXT;
2601}
2602
2603static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2604{
2605    /* The real output is indeed the original value in memory;
2606       recompute the addition for the computation of CC.  */
2607    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2608                                s->insn->data | MO_ALIGN);
2609    /* However, we need to recompute the operation for setting CC.  */
2610    tcg_gen_or_i64(o->out, o->in1, o->in2);
2611    return DISAS_NEXT;
2612}
2613
2614static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2615{
2616    /* The real output is indeed the original value in memory;
2617       recompute the addition for the computation of CC.  */
2618    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2619                                 s->insn->data | MO_ALIGN);
2620    /* However, we need to recompute the operation for setting CC.  */
2621    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2622    return DISAS_NEXT;
2623}
2624
2625static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2626{
2627    gen_helper_ldeb(o->out, cpu_env, o->in2);
2628    return DISAS_NEXT;
2629}
2630
2631static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2632{
2633    gen_helper_ledb(o->out, cpu_env, o->in2);
2634    return DISAS_NEXT;
2635}
2636
2637static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2638{
2639    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2640    return DISAS_NEXT;
2641}
2642
2643static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2644{
2645    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2646    return DISAS_NEXT;
2647}
2648
2649static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2650{
2651    gen_helper_lxdb(o->out, cpu_env, o->in2);
2652    return_low128(o->out2);
2653    return DISAS_NEXT;
2654}
2655
2656static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2657{
2658    gen_helper_lxeb(o->out, cpu_env, o->in2);
2659    return_low128(o->out2);
2660    return DISAS_NEXT;
2661}
2662
2663static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2664{
2665    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2666    return DISAS_NEXT;
2667}
2668
2669static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2670{
2671    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2672    return DISAS_NEXT;
2673}
2674
2675static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2676{
2677    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2678    return DISAS_NEXT;
2679}
2680
2681static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2682{
2683    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2684    return DISAS_NEXT;
2685}
2686
2687static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2688{
2689    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2690    return DISAS_NEXT;
2691}
2692
2693static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2694{
2695    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2696    return DISAS_NEXT;
2697}
2698
2699static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2700{
2701    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2702    return DISAS_NEXT;
2703}
2704
2705static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2706{
2707    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2708    return DISAS_NEXT;
2709}
2710
2711static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2712{
2713    TCGLabel *lab = gen_new_label();
2714    store_reg32_i64(get_field(s->fields, r1), o->in2);
2715    /* The value is stored even in case of trap. */
2716    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2717    gen_trap(s);
2718    gen_set_label(lab);
2719    return DISAS_NEXT;
2720}
2721
2722static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2723{
2724    TCGLabel *lab = gen_new_label();
2725    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2726    /* The value is stored even in case of trap. */
2727    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2728    gen_trap(s);
2729    gen_set_label(lab);
2730    return DISAS_NEXT;
2731}
2732
2733static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2734{
2735    TCGLabel *lab = gen_new_label();
2736    store_reg32h_i64(get_field(s->fields, r1), o->in2);
2737    /* The value is stored even in case of trap. */
2738    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2739    gen_trap(s);
2740    gen_set_label(lab);
2741    return DISAS_NEXT;
2742}
2743
2744static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2745{
2746    TCGLabel *lab = gen_new_label();
2747    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2748    /* The value is stored even in case of trap. */
2749    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2750    gen_trap(s);
2751    gen_set_label(lab);
2752    return DISAS_NEXT;
2753}
2754
2755static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2756{
2757    TCGLabel *lab = gen_new_label();
2758    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2759    /* The value is stored even in case of trap. */
2760    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2761    gen_trap(s);
2762    gen_set_label(lab);
2763    return DISAS_NEXT;
2764}
2765
2766static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2767{
2768    DisasCompare c;
2769
2770    disas_jcc(s, &c, get_field(s->fields, m3));
2771
2772    if (c.is_64) {
2773        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2774                            o->in2, o->in1);
2775        free_compare(&c);
2776    } else {
2777        TCGv_i32 t32 = tcg_temp_new_i32();
2778        TCGv_i64 t, z;
2779
2780        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2781        free_compare(&c);
2782
2783        t = tcg_temp_new_i64();
2784        tcg_gen_extu_i32_i64(t, t32);
2785        tcg_temp_free_i32(t32);
2786
2787        z = tcg_const_i64(0);
2788        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2789        tcg_temp_free_i64(t);
2790        tcg_temp_free_i64(z);
2791    }
2792
2793    return DISAS_NEXT;
2794}
2795
2796#ifndef CONFIG_USER_ONLY
2797static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2798{
2799    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2800    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2801    gen_helper_lctl(cpu_env, r1, o->in2, r3);
2802    tcg_temp_free_i32(r1);
2803    tcg_temp_free_i32(r3);
2804    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2805    return DISAS_PC_STALE_NOCHAIN;
2806}
2807
2808static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2809{
2810    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2811    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2812    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2813    tcg_temp_free_i32(r1);
2814    tcg_temp_free_i32(r3);
2815    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2816    return DISAS_PC_STALE_NOCHAIN;
2817}
2818
2819static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2820{
2821    gen_helper_lra(o->out, cpu_env, o->in2);
2822    set_cc_static(s);
2823    return DISAS_NEXT;
2824}
2825
2826static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2827{
2828    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2829    return DISAS_NEXT;
2830}
2831
2832static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2833{
2834    TCGv_i64 t1, t2;
2835
2836    per_breaking_event(s);
2837
2838    t1 = tcg_temp_new_i64();
2839    t2 = tcg_temp_new_i64();
2840    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2841                        MO_TEUL | MO_ALIGN_8);
2842    tcg_gen_addi_i64(o->in2, o->in2, 4);
2843    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2844    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2845    tcg_gen_shli_i64(t1, t1, 32);
2846    gen_helper_load_psw(cpu_env, t1, t2);
2847    tcg_temp_free_i64(t1);
2848    tcg_temp_free_i64(t2);
2849    return DISAS_NORETURN;
2850}
2851
2852static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2853{
2854    TCGv_i64 t1, t2;
2855
2856    per_breaking_event(s);
2857
2858    t1 = tcg_temp_new_i64();
2859    t2 = tcg_temp_new_i64();
2860    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2861                        MO_TEQ | MO_ALIGN_8);
2862    tcg_gen_addi_i64(o->in2, o->in2, 8);
2863    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2864    gen_helper_load_psw(cpu_env, t1, t2);
2865    tcg_temp_free_i64(t1);
2866    tcg_temp_free_i64(t2);
2867    return DISAS_NORETURN;
2868}
2869#endif
2870
2871static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2872{
2873    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2874    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2875    gen_helper_lam(cpu_env, r1, o->in2, r3);
2876    tcg_temp_free_i32(r1);
2877    tcg_temp_free_i32(r3);
2878    return DISAS_NEXT;
2879}
2880
2881static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2882{
2883    int r1 = get_field(s->fields, r1);
2884    int r3 = get_field(s->fields, r3);
2885    TCGv_i64 t1, t2;
2886
2887    /* Only one register to read. */
2888    t1 = tcg_temp_new_i64();
2889    if (unlikely(r1 == r3)) {
2890        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2891        store_reg32_i64(r1, t1);
2892        tcg_temp_free(t1);
2893        return DISAS_NEXT;
2894    }
2895
2896    /* First load the values of the first and last registers to trigger
2897       possible page faults. */
2898    t2 = tcg_temp_new_i64();
2899    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2900    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2901    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2902    store_reg32_i64(r1, t1);
2903    store_reg32_i64(r3, t2);
2904
2905    /* Only two registers to read. */
2906    if (((r1 + 1) & 15) == r3) {
2907        tcg_temp_free(t2);
2908        tcg_temp_free(t1);
2909        return DISAS_NEXT;
2910    }
2911
2912    /* Then load the remaining registers. Page fault can't occur. */
2913    r3 = (r3 - 1) & 15;
2914    tcg_gen_movi_i64(t2, 4);
2915    while (r1 != r3) {
2916        r1 = (r1 + 1) & 15;
2917        tcg_gen_add_i64(o->in2, o->in2, t2);
2918        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2919        store_reg32_i64(r1, t1);
2920    }
2921    tcg_temp_free(t2);
2922    tcg_temp_free(t1);
2923
2924    return DISAS_NEXT;
2925}
2926
2927static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
2928{
2929    int r1 = get_field(s->fields, r1);
2930    int r3 = get_field(s->fields, r3);
2931    TCGv_i64 t1, t2;
2932
2933    /* Only one register to read. */
2934    t1 = tcg_temp_new_i64();
2935    if (unlikely(r1 == r3)) {
2936        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2937        store_reg32h_i64(r1, t1);
2938        tcg_temp_free(t1);
2939        return DISAS_NEXT;
2940    }
2941
2942    /* First load the values of the first and last registers to trigger
2943       possible page faults. */
2944    t2 = tcg_temp_new_i64();
2945    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2946    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2947    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2948    store_reg32h_i64(r1, t1);
2949    store_reg32h_i64(r3, t2);
2950
2951    /* Only two registers to read. */
2952    if (((r1 + 1) & 15) == r3) {
2953        tcg_temp_free(t2);
2954        tcg_temp_free(t1);
2955        return DISAS_NEXT;
2956    }
2957
2958    /* Then load the remaining registers. Page fault can't occur. */
2959    r3 = (r3 - 1) & 15;
2960    tcg_gen_movi_i64(t2, 4);
2961    while (r1 != r3) {
2962        r1 = (r1 + 1) & 15;
2963        tcg_gen_add_i64(o->in2, o->in2, t2);
2964        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2965        store_reg32h_i64(r1, t1);
2966    }
2967    tcg_temp_free(t2);
2968    tcg_temp_free(t1);
2969
2970    return DISAS_NEXT;
2971}
2972
2973static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
2974{
2975    int r1 = get_field(s->fields, r1);
2976    int r3 = get_field(s->fields, r3);
2977    TCGv_i64 t1, t2;
2978
2979    /* Only one register to read. */
2980    if (unlikely(r1 == r3)) {
2981        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2982        return DISAS_NEXT;
2983    }
2984
2985    /* First load the values of the first and last registers to trigger
2986       possible page faults. */
2987    t1 = tcg_temp_new_i64();
2988    t2 = tcg_temp_new_i64();
2989    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2990    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2991    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2992    tcg_gen_mov_i64(regs[r1], t1);
2993    tcg_temp_free(t2);
2994
2995    /* Only two registers to read. */
2996    if (((r1 + 1) & 15) == r3) {
2997        tcg_temp_free(t1);
2998        return DISAS_NEXT;
2999    }
3000
3001    /* Then load the remaining registers. Page fault can't occur. */
3002    r3 = (r3 - 1) & 15;
3003    tcg_gen_movi_i64(t1, 8);
3004    while (r1 != r3) {
3005        r1 = (r1 + 1) & 15;
3006        tcg_gen_add_i64(o->in2, o->in2, t1);
3007        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3008    }
3009    tcg_temp_free(t1);
3010
3011    return DISAS_NEXT;
3012}
3013
3014static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3015{
3016    TCGv_i64 a1, a2;
3017    TCGMemOp mop = s->insn->data;
3018
3019    /* In a parallel context, stop the world and single step.  */
3020    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3021        update_psw_addr(s);
3022        update_cc_op(s);
3023        gen_exception(EXCP_ATOMIC);
3024        return DISAS_NORETURN;
3025    }
3026
3027    /* In a serial context, perform the two loads ... */
3028    a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3029    a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3030    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3031    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3032    tcg_temp_free_i64(a1);
3033    tcg_temp_free_i64(a2);
3034
3035    /* ... and indicate that we performed them while interlocked.  */
3036    gen_op_movi_cc(s, 0);
3037    return DISAS_NEXT;
3038}
3039
3040static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3041{
3042    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3043        gen_helper_lpq(o->out, cpu_env, o->in2);
3044    } else if (HAVE_ATOMIC128) {
3045        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3046    } else {
3047        gen_helper_exit_atomic(cpu_env);
3048        return DISAS_NORETURN;
3049    }
3050    return_low128(o->out2);
3051    return DISAS_NEXT;
3052}
3053
3054#ifndef CONFIG_USER_ONLY
3055static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3056{
3057    gen_helper_lura(o->out, cpu_env, o->in2);
3058    return DISAS_NEXT;
3059}
3060
3061static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3062{
3063    gen_helper_lurag(o->out, cpu_env, o->in2);
3064    return DISAS_NEXT;
3065}
3066#endif
3067
3068static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3069{
3070    tcg_gen_andi_i64(o->out, o->in2, -256);
3071    return DISAS_NEXT;
3072}
3073
3074static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3075{
3076    o->out = o->in2;
3077    o->g_out = o->g_in2;
3078    o->in2 = NULL;
3079    o->g_in2 = false;
3080    return DISAS_NEXT;
3081}
3082
3083static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3084{
3085    int b2 = get_field(s->fields, b2);
3086    TCGv ar1 = tcg_temp_new_i64();
3087
3088    o->out = o->in2;
3089    o->g_out = o->g_in2;
3090    o->in2 = NULL;
3091    o->g_in2 = false;
3092
3093    switch (s->base.tb->flags & FLAG_MASK_ASC) {
3094    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3095        tcg_gen_movi_i64(ar1, 0);
3096        break;
3097    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3098        tcg_gen_movi_i64(ar1, 1);
3099        break;
3100    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3101        if (b2) {
3102            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3103        } else {
3104            tcg_gen_movi_i64(ar1, 0);
3105        }
3106        break;
3107    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3108        tcg_gen_movi_i64(ar1, 2);
3109        break;
3110    }
3111
3112    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3113    tcg_temp_free_i64(ar1);
3114
3115    return DISAS_NEXT;
3116}
3117
3118static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3119{
3120    o->out = o->in1;
3121    o->out2 = o->in2;
3122    o->g_out = o->g_in1;
3123    o->g_out2 = o->g_in2;
3124    o->in1 = NULL;
3125    o->in2 = NULL;
3126    o->g_in1 = o->g_in2 = false;
3127    return DISAS_NEXT;
3128}
3129
3130static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3131{
3132    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3133    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3134    tcg_temp_free_i32(l);
3135    return DISAS_NEXT;
3136}
3137
3138static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3139{
3140    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3141    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3142    tcg_temp_free_i32(l);
3143    return DISAS_NEXT;
3144}
3145
3146static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3147{
3148    int r1 = get_field(s->fields, r1);
3149    int r2 = get_field(s->fields, r2);
3150    TCGv_i32 t1, t2;
3151
3152    /* r1 and r2 must be even.  */
3153    if (r1 & 1 || r2 & 1) {
3154        gen_program_exception(s, PGM_SPECIFICATION);
3155        return DISAS_NORETURN;
3156    }
3157
3158    t1 = tcg_const_i32(r1);
3159    t2 = tcg_const_i32(r2);
3160    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3161    tcg_temp_free_i32(t1);
3162    tcg_temp_free_i32(t2);
3163    set_cc_static(s);
3164    return DISAS_NEXT;
3165}
3166
3167static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3168{
3169    int r1 = get_field(s->fields, r1);
3170    int r3 = get_field(s->fields, r3);
3171    TCGv_i32 t1, t3;
3172
3173    /* r1 and r3 must be even.  */
3174    if (r1 & 1 || r3 & 1) {
3175        gen_program_exception(s, PGM_SPECIFICATION);
3176        return DISAS_NORETURN;
3177    }
3178
3179    t1 = tcg_const_i32(r1);
3180    t3 = tcg_const_i32(r3);
3181    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3182    tcg_temp_free_i32(t1);
3183    tcg_temp_free_i32(t3);
3184    set_cc_static(s);
3185    return DISAS_NEXT;
3186}
3187
3188static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3189{
3190    int r1 = get_field(s->fields, r1);
3191    int r3 = get_field(s->fields, r3);
3192    TCGv_i32 t1, t3;
3193
3194    /* r1 and r3 must be even.  */
3195    if (r1 & 1 || r3 & 1) {
3196        gen_program_exception(s, PGM_SPECIFICATION);
3197        return DISAS_NORETURN;
3198    }
3199
3200    t1 = tcg_const_i32(r1);
3201    t3 = tcg_const_i32(r3);
3202    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3203    tcg_temp_free_i32(t1);
3204    tcg_temp_free_i32(t3);
3205    set_cc_static(s);
3206    return DISAS_NEXT;
3207}
3208
3209static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3210{
3211    int r3 = get_field(s->fields, r3);
3212    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3213    set_cc_static(s);
3214    return DISAS_NEXT;
3215}
3216
3217#ifndef CONFIG_USER_ONLY
3218static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3219{
3220    int r1 = get_field(s->fields, l1);
3221    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3222    set_cc_static(s);
3223    return DISAS_NEXT;
3224}
3225
3226static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3227{
3228    int r1 = get_field(s->fields, l1);
3229    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3230    set_cc_static(s);
3231    return DISAS_NEXT;
3232}
3233#endif
3234
3235static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3236{
3237    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3238    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3239    tcg_temp_free_i32(l);
3240    return DISAS_NEXT;
3241}
3242
3243static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3244{
3245    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3246    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3247    tcg_temp_free_i32(l);
3248    return DISAS_NEXT;
3249}
3250
3251static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3252{
3253    gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3254    set_cc_static(s);
3255    return DISAS_NEXT;
3256}
3257
3258static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3259{
3260    gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3261    set_cc_static(s);
3262    return_low128(o->in2);
3263    return DISAS_NEXT;
3264}
3265
3266static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3267{
3268    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3269    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3270    tcg_temp_free_i32(l);
3271    return DISAS_NEXT;
3272}
3273
3274static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3275{
3276    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3277    return DISAS_NEXT;
3278}
3279
3280static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3281{
3282    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3283    return DISAS_NEXT;
3284}
3285
3286static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3287{
3288    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3289    return DISAS_NEXT;
3290}
3291
3292static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3293{
3294    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3295    return DISAS_NEXT;
3296}
3297
3298static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3299{
3300    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3301    return DISAS_NEXT;
3302}
3303
3304static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3305{
3306    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3307    return_low128(o->out2);
3308    return DISAS_NEXT;
3309}
3310
3311static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3312{
3313    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3314    return_low128(o->out2);
3315    return DISAS_NEXT;
3316}
3317
3318static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3319{
3320    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3321    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3322    tcg_temp_free_i64(r3);
3323    return DISAS_NEXT;
3324}
3325
3326static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3327{
3328    int r3 = get_field(s->fields, r3);
3329    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3330    return DISAS_NEXT;
3331}
3332
3333static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3334{
3335    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3336    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3337    tcg_temp_free_i64(r3);
3338    return DISAS_NEXT;
3339}
3340
3341static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3342{
3343    int r3 = get_field(s->fields, r3);
3344    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3345    return DISAS_NEXT;
3346}
3347
3348static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3349{
3350    TCGv_i64 z, n;
3351    z = tcg_const_i64(0);
3352    n = tcg_temp_new_i64();
3353    tcg_gen_neg_i64(n, o->in2);
3354    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3355    tcg_temp_free_i64(n);
3356    tcg_temp_free_i64(z);
3357    return DISAS_NEXT;
3358}
3359
3360static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3361{
3362    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3363    return DISAS_NEXT;
3364}
3365
3366static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3367{
3368    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3369    return DISAS_NEXT;
3370}
3371
3372static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3373{
3374    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3375    tcg_gen_mov_i64(o->out2, o->in2);
3376    return DISAS_NEXT;
3377}
3378
3379static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3380{
3381    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3382    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3383    tcg_temp_free_i32(l);
3384    set_cc_static(s);
3385    return DISAS_NEXT;
3386}
3387
3388static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3389{
3390    tcg_gen_neg_i64(o->out, o->in2);
3391    return DISAS_NEXT;
3392}
3393
3394static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3395{
3396    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3397    return DISAS_NEXT;
3398}
3399
3400static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3401{
3402    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3403    return DISAS_NEXT;
3404}
3405
3406static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3407{
3408    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3409    tcg_gen_mov_i64(o->out2, o->in2);
3410    return DISAS_NEXT;
3411}
3412
3413static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3414{
3415    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3416    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3417    tcg_temp_free_i32(l);
3418    set_cc_static(s);
3419    return DISAS_NEXT;
3420}
3421
3422static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3423{
3424    tcg_gen_or_i64(o->out, o->in1, o->in2);
3425    return DISAS_NEXT;
3426}
3427
3428static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3429{
3430    int shift = s->insn->data & 0xff;
3431    int size = s->insn->data >> 8;
3432    uint64_t mask = ((1ull << size) - 1) << shift;
3433
3434    assert(!o->g_in2);
3435    tcg_gen_shli_i64(o->in2, o->in2, shift);
3436    tcg_gen_or_i64(o->out, o->in1, o->in2);
3437
3438    /* Produce the CC from only the bits manipulated.  */
3439    tcg_gen_andi_i64(cc_dst, o->out, mask);
3440    set_cc_nz_u64(s, cc_dst);
3441    return DISAS_NEXT;
3442}
3443
3444static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3445{
3446    o->in1 = tcg_temp_new_i64();
3447
3448    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3449        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3450    } else {
3451        /* Perform the atomic operation in memory. */
3452        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3453                                    s->insn->data);
3454    }
3455
3456    /* Recompute also for atomic case: needed for setting CC. */
3457    tcg_gen_or_i64(o->out, o->in1, o->in2);
3458
3459    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3460        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3461    }
3462    return DISAS_NEXT;
3463}
3464
3465static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3466{
3467    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3468    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3469    tcg_temp_free_i32(l);
3470    return DISAS_NEXT;
3471}
3472
3473static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3474{
3475    int l2 = get_field(s->fields, l2) + 1;
3476    TCGv_i32 l;
3477
3478    /* The length must not exceed 32 bytes.  */
3479    if (l2 > 32) {
3480        gen_program_exception(s, PGM_SPECIFICATION);
3481        return DISAS_NORETURN;
3482    }
3483    l = tcg_const_i32(l2);
3484    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3485    tcg_temp_free_i32(l);
3486    return DISAS_NEXT;
3487}
3488
3489static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3490{
3491    int l2 = get_field(s->fields, l2) + 1;
3492    TCGv_i32 l;
3493
3494    /* The length must be even and should not exceed 64 bytes.  */
3495    if ((l2 & 1) || (l2 > 64)) {
3496        gen_program_exception(s, PGM_SPECIFICATION);
3497        return DISAS_NORETURN;
3498    }
3499    l = tcg_const_i32(l2);
3500    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3501    tcg_temp_free_i32(l);
3502    return DISAS_NEXT;
3503}
3504
3505static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3506{
3507    gen_helper_popcnt(o->out, o->in2);
3508    return DISAS_NEXT;
3509}
3510
3511#ifndef CONFIG_USER_ONLY
3512static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3513{
3514    gen_helper_ptlb(cpu_env);
3515    return DISAS_NEXT;
3516}
3517#endif
3518
3519static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3520{
3521    int i3 = get_field(s->fields, i3);
3522    int i4 = get_field(s->fields, i4);
3523    int i5 = get_field(s->fields, i5);
3524    int do_zero = i4 & 0x80;
3525    uint64_t mask, imask, pmask;
3526    int pos, len, rot;
3527
3528    /* Adjust the arguments for the specific insn.  */
3529    switch (s->fields->op2) {
3530    case 0x55: /* risbg */
3531    case 0x59: /* risbgn */
3532        i3 &= 63;
3533        i4 &= 63;
3534        pmask = ~0;
3535        break;
3536    case 0x5d: /* risbhg */
3537        i3 &= 31;
3538        i4 &= 31;
3539        pmask = 0xffffffff00000000ull;
3540        break;
3541    case 0x51: /* risblg */
3542        i3 &= 31;
3543        i4 &= 31;
3544        pmask = 0x00000000ffffffffull;
3545        break;
3546    default:
3547        g_assert_not_reached();
3548    }
3549
3550    /* MASK is the set of bits to be inserted from R2.
3551       Take care for I3/I4 wraparound.  */
3552    mask = pmask >> i3;
3553    if (i3 <= i4) {
3554        mask ^= pmask >> i4 >> 1;
3555    } else {
3556        mask |= ~(pmask >> i4 >> 1);
3557    }
3558    mask &= pmask;
3559
3560    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3561       insns, we need to keep the other half of the register.  */
3562    imask = ~mask | ~pmask;
3563    if (do_zero) {
3564        imask = ~pmask;
3565    }
3566
3567    len = i4 - i3 + 1;
3568    pos = 63 - i4;
3569    rot = i5 & 63;
3570    if (s->fields->op2 == 0x5d) {
3571        pos += 32;
3572    }
3573
3574    /* In some cases we can implement this with extract.  */
3575    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3576        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3577        return DISAS_NEXT;
3578    }
3579
3580    /* In some cases we can implement this with deposit.  */
3581    if (len > 0 && (imask == 0 || ~mask == imask)) {
3582        /* Note that we rotate the bits to be inserted to the lsb, not to
3583           the position as described in the PoO.  */
3584        rot = (rot - pos) & 63;
3585    } else {
3586        pos = -1;
3587    }
3588
3589    /* Rotate the input as necessary.  */
3590    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3591
3592    /* Insert the selected bits into the output.  */
3593    if (pos >= 0) {
3594        if (imask == 0) {
3595            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3596        } else {
3597            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3598        }
3599    } else if (imask == 0) {
3600        tcg_gen_andi_i64(o->out, o->in2, mask);
3601    } else {
3602        tcg_gen_andi_i64(o->in2, o->in2, mask);
3603        tcg_gen_andi_i64(o->out, o->out, imask);
3604        tcg_gen_or_i64(o->out, o->out, o->in2);
3605    }
3606    return DISAS_NEXT;
3607}
3608
3609static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3610{
3611    int i3 = get_field(s->fields, i3);
3612    int i4 = get_field(s->fields, i4);
3613    int i5 = get_field(s->fields, i5);
3614    uint64_t mask;
3615
3616    /* If this is a test-only form, arrange to discard the result.  */
3617    if (i3 & 0x80) {
3618        o->out = tcg_temp_new_i64();
3619        o->g_out = false;
3620    }
3621
3622    i3 &= 63;
3623    i4 &= 63;
3624    i5 &= 63;
3625
3626    /* MASK is the set of bits to be operated on from R2.
3627       Take care for I3/I4 wraparound.  */
3628    mask = ~0ull >> i3;
3629    if (i3 <= i4) {
3630        mask ^= ~0ull >> i4 >> 1;
3631    } else {
3632        mask |= ~(~0ull >> i4 >> 1);
3633    }
3634
3635    /* Rotate the input as necessary.  */
3636    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3637
3638    /* Operate.  */
3639    switch (s->fields->op2) {
3640    case 0x55: /* AND */
3641        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3642        tcg_gen_and_i64(o->out, o->out, o->in2);
3643        break;
3644    case 0x56: /* OR */
3645        tcg_gen_andi_i64(o->in2, o->in2, mask);
3646        tcg_gen_or_i64(o->out, o->out, o->in2);
3647        break;
3648    case 0x57: /* XOR */
3649        tcg_gen_andi_i64(o->in2, o->in2, mask);
3650        tcg_gen_xor_i64(o->out, o->out, o->in2);
3651        break;
3652    default:
3653        abort();
3654    }
3655
3656    /* Set the CC.  */
3657    tcg_gen_andi_i64(cc_dst, o->out, mask);
3658    set_cc_nz_u64(s, cc_dst);
3659    return DISAS_NEXT;
3660}
3661
3662static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3663{
3664    tcg_gen_bswap16_i64(o->out, o->in2);
3665    return DISAS_NEXT;
3666}
3667
3668static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3669{
3670    tcg_gen_bswap32_i64(o->out, o->in2);
3671    return DISAS_NEXT;
3672}
3673
3674static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3675{
3676    tcg_gen_bswap64_i64(o->out, o->in2);
3677    return DISAS_NEXT;
3678}
3679
3680static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3681{
3682    TCGv_i32 t1 = tcg_temp_new_i32();
3683    TCGv_i32 t2 = tcg_temp_new_i32();
3684    TCGv_i32 to = tcg_temp_new_i32();
3685    tcg_gen_extrl_i64_i32(t1, o->in1);
3686    tcg_gen_extrl_i64_i32(t2, o->in2);
3687    tcg_gen_rotl_i32(to, t1, t2);
3688    tcg_gen_extu_i32_i64(o->out, to);
3689    tcg_temp_free_i32(t1);
3690    tcg_temp_free_i32(t2);
3691    tcg_temp_free_i32(to);
3692    return DISAS_NEXT;
3693}
3694
3695static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3696{
3697    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3698    return DISAS_NEXT;
3699}
3700
3701#ifndef CONFIG_USER_ONLY
3702static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3703{
3704    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3705    set_cc_static(s);
3706    return DISAS_NEXT;
3707}
3708
3709static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3710{
3711    gen_helper_sacf(cpu_env, o->in2);
3712    /* Addressing mode has changed, so end the block.  */
3713    return DISAS_PC_STALE;
3714}
3715#endif
3716
3717static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3718{
3719    int sam = s->insn->data;
3720    TCGv_i64 tsam;
3721    uint64_t mask;
3722
3723    switch (sam) {
3724    case 0:
3725        mask = 0xffffff;
3726        break;
3727    case 1:
3728        mask = 0x7fffffff;
3729        break;
3730    default:
3731        mask = -1;
3732        break;
3733    }
3734
3735    /* Bizarre but true, we check the address of the current insn for the
3736       specification exception, not the next to be executed.  Thus the PoO
3737       documents that Bad Things Happen two bytes before the end.  */
3738    if (s->base.pc_next & ~mask) {
3739        gen_program_exception(s, PGM_SPECIFICATION);
3740        return DISAS_NORETURN;
3741    }
3742    s->pc_tmp &= mask;
3743
3744    tsam = tcg_const_i64(sam);
3745    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3746    tcg_temp_free_i64(tsam);
3747
3748    /* Always exit the TB, since we (may have) changed execution mode.  */
3749    return DISAS_PC_STALE;
3750}
3751
3752static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3753{
3754    int r1 = get_field(s->fields, r1);
3755    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3756    return DISAS_NEXT;
3757}
3758
3759static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3760{
3761    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3762    return DISAS_NEXT;
3763}
3764
3765static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3766{
3767    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3768    return DISAS_NEXT;
3769}
3770
3771static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3772{
3773    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3774    return_low128(o->out2);
3775    return DISAS_NEXT;
3776}
3777
3778static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3779{
3780    gen_helper_sqeb(o->out, cpu_env, o->in2);
3781    return DISAS_NEXT;
3782}
3783
3784static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3785{
3786    gen_helper_sqdb(o->out, cpu_env, o->in2);
3787    return DISAS_NEXT;
3788}
3789
3790static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3791{
3792    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3793    return_low128(o->out2);
3794    return DISAS_NEXT;
3795}
3796
3797#ifndef CONFIG_USER_ONLY
3798static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3799{
3800    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3801    set_cc_static(s);
3802    return DISAS_NEXT;
3803}
3804
3805static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3806{
3807    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3808    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3809    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3810    set_cc_static(s);
3811    tcg_temp_free_i32(r1);
3812    tcg_temp_free_i32(r3);
3813    return DISAS_NEXT;
3814}
3815#endif
3816
3817static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3818{
3819    DisasCompare c;
3820    TCGv_i64 a, h;
3821    TCGLabel *lab;
3822    int r1;
3823
3824    disas_jcc(s, &c, get_field(s->fields, m3));
3825
3826    /* We want to store when the condition is fulfilled, so branch
3827       out when it's not */
3828    c.cond = tcg_invert_cond(c.cond);
3829
3830    lab = gen_new_label();
3831    if (c.is_64) {
3832        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3833    } else {
3834        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3835    }
3836    free_compare(&c);
3837
3838    r1 = get_field(s->fields, r1);
3839    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3840    switch (s->insn->data) {
3841    case 1: /* STOCG */
3842        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3843        break;
3844    case 0: /* STOC */
3845        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3846        break;
3847    case 2: /* STOCFH */
3848        h = tcg_temp_new_i64();
3849        tcg_gen_shri_i64(h, regs[r1], 32);
3850        tcg_gen_qemu_st32(h, a, get_mem_index(s));
3851        tcg_temp_free_i64(h);
3852        break;
3853    default:
3854        g_assert_not_reached();
3855    }
3856    tcg_temp_free_i64(a);
3857
3858    gen_set_label(lab);
3859    return DISAS_NEXT;
3860}
3861
3862static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3863{
3864    uint64_t sign = 1ull << s->insn->data;
3865    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3866    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3867    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3868    /* The arithmetic left shift is curious in that it does not affect
3869       the sign bit.  Copy that over from the source unchanged.  */
3870    tcg_gen_andi_i64(o->out, o->out, ~sign);
3871    tcg_gen_andi_i64(o->in1, o->in1, sign);
3872    tcg_gen_or_i64(o->out, o->out, o->in1);
3873    return DISAS_NEXT;
3874}
3875
3876static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3877{
3878    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3879    return DISAS_NEXT;
3880}
3881
3882static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3883{
3884    tcg_gen_sar_i64(o->out, o->in1, o->in2);
3885    return DISAS_NEXT;
3886}
3887
3888static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3889{
3890    tcg_gen_shr_i64(o->out, o->in1, o->in2);
3891    return DISAS_NEXT;
3892}
3893
3894static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
3895{
3896    gen_helper_sfpc(cpu_env, o->in2);
3897    return DISAS_NEXT;
3898}
3899
3900static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
3901{
3902    gen_helper_sfas(cpu_env, o->in2);
3903    return DISAS_NEXT;
3904}
3905
3906static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
3907{
3908    int b2 = get_field(s->fields, b2);
3909    int d2 = get_field(s->fields, d2);
3910    TCGv_i64 t1 = tcg_temp_new_i64();
3911    TCGv_i64 t2 = tcg_temp_new_i64();
3912    int mask, pos, len;
3913
3914    switch (s->fields->op2) {
3915    case 0x99: /* SRNM */
3916        pos = 0, len = 2;
3917        break;
3918    case 0xb8: /* SRNMB */
3919        pos = 0, len = 3;
3920        break;
3921    case 0xb9: /* SRNMT */
3922        pos = 4, len = 3;
3923        break;
3924    default:
3925        tcg_abort();
3926    }
3927    mask = (1 << len) - 1;
3928
3929    /* Insert the value into the appropriate field of the FPC.  */
3930    if (b2 == 0) {
3931        tcg_gen_movi_i64(t1, d2 & mask);
3932    } else {
3933        tcg_gen_addi_i64(t1, regs[b2], d2);
3934        tcg_gen_andi_i64(t1, t1, mask);
3935    }
3936    tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3937    tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3938    tcg_temp_free_i64(t1);
3939
3940    /* Then install the new FPC to set the rounding mode in fpu_status.  */
3941    gen_helper_sfpc(cpu_env, t2);
3942    tcg_temp_free_i64(t2);
3943    return DISAS_NEXT;
3944}
3945
3946static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
3947{
3948    tcg_gen_extrl_i64_i32(cc_op, o->in1);
3949    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3950    set_cc_static(s);
3951
3952    tcg_gen_shri_i64(o->in1, o->in1, 24);
3953    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3954    return DISAS_NEXT;
3955}
3956
3957static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
3958{
3959    int b1 = get_field(s->fields, b1);
3960    int d1 = get_field(s->fields, d1);
3961    int b2 = get_field(s->fields, b2);
3962    int d2 = get_field(s->fields, d2);
3963    int r3 = get_field(s->fields, r3);
3964    TCGv_i64 tmp = tcg_temp_new_i64();
3965
3966    /* fetch all operands first */
3967    o->in1 = tcg_temp_new_i64();
3968    tcg_gen_addi_i64(o->in1, regs[b1], d1);
3969    o->in2 = tcg_temp_new_i64();
3970    tcg_gen_addi_i64(o->in2, regs[b2], d2);
3971    o->addr1 = get_address(s, 0, r3, 0);
3972
3973    /* load the third operand into r3 before modifying anything */
3974    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3975
3976    /* subtract CPU timer from first operand and store in GR0 */
3977    gen_helper_stpt(tmp, cpu_env);
3978    tcg_gen_sub_i64(regs[0], o->in1, tmp);
3979
3980    /* store second operand in GR1 */
3981    tcg_gen_mov_i64(regs[1], o->in2);
3982
3983    tcg_temp_free_i64(tmp);
3984    return DISAS_NEXT;
3985}
3986
3987#ifndef CONFIG_USER_ONLY
3988static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
3989{
3990    tcg_gen_shri_i64(o->in2, o->in2, 4);
3991    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3992    return DISAS_NEXT;
3993}
3994
3995static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
3996{
3997    gen_helper_sske(cpu_env, o->in1, o->in2);
3998    return DISAS_NEXT;
3999}
4000
4001static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4002{
4003    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4004    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4005    return DISAS_PC_STALE_NOCHAIN;
4006}
4007
4008static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4009{
4010    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4011    return DISAS_NEXT;
4012}
4013
4014static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4015{
4016    gen_helper_stck(o->out, cpu_env);
4017    /* ??? We don't implement clock states.  */
4018    gen_op_movi_cc(s, 0);
4019    return DISAS_NEXT;
4020}
4021
4022static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4023{
4024    TCGv_i64 c1 = tcg_temp_new_i64();
4025    TCGv_i64 c2 = tcg_temp_new_i64();
4026    TCGv_i64 todpr = tcg_temp_new_i64();
4027    gen_helper_stck(c1, cpu_env);
4028    /* 16 bit value store in an uint32_t (only valid bits set) */
4029    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4030    /* Shift the 64-bit value into its place as a zero-extended
4031       104-bit value.  Note that "bit positions 64-103 are always
4032       non-zero so that they compare differently to STCK"; we set
4033       the least significant bit to 1.  */
4034    tcg_gen_shli_i64(c2, c1, 56);
4035    tcg_gen_shri_i64(c1, c1, 8);
4036    tcg_gen_ori_i64(c2, c2, 0x10000);
4037    tcg_gen_or_i64(c2, c2, todpr);
4038    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4039    tcg_gen_addi_i64(o->in2, o->in2, 8);
4040    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4041    tcg_temp_free_i64(c1);
4042    tcg_temp_free_i64(c2);
4043    tcg_temp_free_i64(todpr);
4044    /* ??? We don't implement clock states.  */
4045    gen_op_movi_cc(s, 0);
4046    return DISAS_NEXT;
4047}
4048
4049static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4050{
4051    tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4052    gen_helper_sck(cc_op, cpu_env, o->in1);
4053    set_cc_static(s);
4054    return DISAS_NEXT;
4055}
4056
4057static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4058{
4059    gen_helper_sckc(cpu_env, o->in2);
4060    return DISAS_NEXT;
4061}
4062
4063static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4064{
4065    gen_helper_sckpf(cpu_env, regs[0]);
4066    return DISAS_NEXT;
4067}
4068
4069static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4070{
4071    gen_helper_stckc(o->out, cpu_env);
4072    return DISAS_NEXT;
4073}
4074
4075static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4076{
4077    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4078    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4079    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4080    tcg_temp_free_i32(r1);
4081    tcg_temp_free_i32(r3);
4082    return DISAS_NEXT;
4083}
4084
4085static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4086{
4087    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4088    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4089    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4090    tcg_temp_free_i32(r1);
4091    tcg_temp_free_i32(r3);
4092    return DISAS_NEXT;
4093}
4094
4095static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4096{
4097    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4098    return DISAS_NEXT;
4099}
4100
4101static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4102{
4103    gen_helper_spt(cpu_env, o->in2);
4104    return DISAS_NEXT;
4105}
4106
4107static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4108{
4109    gen_helper_stfl(cpu_env);
4110    return DISAS_NEXT;
4111}
4112
4113static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4114{
4115    gen_helper_stpt(o->out, cpu_env);
4116    return DISAS_NEXT;
4117}
4118
4119static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4120{
4121    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4122    set_cc_static(s);
4123    return DISAS_NEXT;
4124}
4125
4126static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4127{
4128    gen_helper_spx(cpu_env, o->in2);
4129    return DISAS_NEXT;
4130}
4131
4132static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4133{
4134    gen_helper_xsch(cpu_env, regs[1]);
4135    set_cc_static(s);
4136    return DISAS_NEXT;
4137}
4138
4139static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4140{
4141    gen_helper_csch(cpu_env, regs[1]);
4142    set_cc_static(s);
4143    return DISAS_NEXT;
4144}
4145
4146static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4147{
4148    gen_helper_hsch(cpu_env, regs[1]);
4149    set_cc_static(s);
4150    return DISAS_NEXT;
4151}
4152
4153static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4154{
4155    gen_helper_msch(cpu_env, regs[1], o->in2);
4156    set_cc_static(s);
4157    return DISAS_NEXT;
4158}
4159
4160static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4161{
4162    gen_helper_rchp(cpu_env, regs[1]);
4163    set_cc_static(s);
4164    return DISAS_NEXT;
4165}
4166
4167static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4168{
4169    gen_helper_rsch(cpu_env, regs[1]);
4170    set_cc_static(s);
4171    return DISAS_NEXT;
4172}
4173
4174static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4175{
4176    gen_helper_sal(cpu_env, regs[1]);
4177    return DISAS_NEXT;
4178}
4179
4180static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4181{
4182    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4183    return DISAS_NEXT;
4184}
4185
4186static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4187{
4188    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4189    gen_op_movi_cc(s, 3);
4190    return DISAS_NEXT;
4191}
4192
4193static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4194{
4195    /* The instruction is suppressed if not provided. */
4196    return DISAS_NEXT;
4197}
4198
4199static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4200{
4201    gen_helper_ssch(cpu_env, regs[1], o->in2);
4202    set_cc_static(s);
4203    return DISAS_NEXT;
4204}
4205
4206static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4207{
4208    gen_helper_stsch(cpu_env, regs[1], o->in2);
4209    set_cc_static(s);
4210    return DISAS_NEXT;
4211}
4212
4213static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4214{
4215    gen_helper_stcrw(cpu_env, o->in2);
4216    set_cc_static(s);
4217    return DISAS_NEXT;
4218}
4219
4220static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4221{
4222    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4223    set_cc_static(s);
4224    return DISAS_NEXT;
4225}
4226
4227static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4228{
4229    gen_helper_tsch(cpu_env, regs[1], o->in2);
4230    set_cc_static(s);
4231    return DISAS_NEXT;
4232}
4233
4234static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4235{
4236    gen_helper_chsc(cpu_env, o->in2);
4237    set_cc_static(s);
4238    return DISAS_NEXT;
4239}
4240
4241static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4242{
4243    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4244    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4245    return DISAS_NEXT;
4246}
4247
4248static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4249{
4250    uint64_t i2 = get_field(s->fields, i2);
4251    TCGv_i64 t;
4252
4253    /* It is important to do what the instruction name says: STORE THEN.
4254       If we let the output hook perform the store then if we fault and
4255       restart, we'll have the wrong SYSTEM MASK in place.  */
4256    t = tcg_temp_new_i64();
4257    tcg_gen_shri_i64(t, psw_mask, 56);
4258    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4259    tcg_temp_free_i64(t);
4260
4261    if (s->fields->op == 0xac) {
4262        tcg_gen_andi_i64(psw_mask, psw_mask,
4263                         (i2 << 56) | 0x00ffffffffffffffull);
4264    } else {
4265        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4266    }
4267
4268    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4269    return DISAS_PC_STALE_NOCHAIN;
4270}
4271
4272static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4273{
4274    gen_helper_stura(cpu_env, o->in2, o->in1);
4275    return DISAS_NEXT;
4276}
4277
4278static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4279{
4280    gen_helper_sturg(cpu_env, o->in2, o->in1);
4281    return DISAS_NEXT;
4282}
4283#endif
4284
4285static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4286{
4287    gen_helper_stfle(cc_op, cpu_env, o->in2);
4288    set_cc_static(s);
4289    return DISAS_NEXT;
4290}
4291
4292static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4293{
4294    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4295    return DISAS_NEXT;
4296}
4297
4298static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4299{
4300    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4301    return DISAS_NEXT;
4302}
4303
4304static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4305{
4306    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4307    return DISAS_NEXT;
4308}
4309
4310static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4311{
4312    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4313    return DISAS_NEXT;
4314}
4315
4316static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4317{
4318    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4319    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4320    gen_helper_stam(cpu_env, r1, o->in2, r3);
4321    tcg_temp_free_i32(r1);
4322    tcg_temp_free_i32(r3);
4323    return DISAS_NEXT;
4324}
4325
4326static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4327{
4328    int m3 = get_field(s->fields, m3);
4329    int pos, base = s->insn->data;
4330    TCGv_i64 tmp = tcg_temp_new_i64();
4331
4332    pos = base + ctz32(m3) * 8;
4333    switch (m3) {
4334    case 0xf:
4335        /* Effectively a 32-bit store.  */
4336        tcg_gen_shri_i64(tmp, o->in1, pos);
4337        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4338        break;
4339
4340    case 0xc:
4341    case 0x6:
4342    case 0x3:
4343        /* Effectively a 16-bit store.  */
4344        tcg_gen_shri_i64(tmp, o->in1, pos);
4345        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4346        break;
4347
4348    case 0x8:
4349    case 0x4:
4350    case 0x2:
4351    case 0x1:
4352        /* Effectively an 8-bit store.  */
4353        tcg_gen_shri_i64(tmp, o->in1, pos);
4354        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4355        break;
4356
4357    default:
4358        /* This is going to be a sequence of shifts and stores.  */
4359        pos = base + 32 - 8;
4360        while (m3) {
4361            if (m3 & 0x8) {
4362                tcg_gen_shri_i64(tmp, o->in1, pos);
4363                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4364                tcg_gen_addi_i64(o->in2, o->in2, 1);
4365            }
4366            m3 = (m3 << 1) & 0xf;
4367            pos -= 8;
4368        }
4369        break;
4370    }
4371    tcg_temp_free_i64(tmp);
4372    return DISAS_NEXT;
4373}
4374
4375static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4376{
4377    int r1 = get_field(s->fields, r1);
4378    int r3 = get_field(s->fields, r3);
4379    int size = s->insn->data;
4380    TCGv_i64 tsize = tcg_const_i64(size);
4381
4382    while (1) {
4383        if (size == 8) {
4384            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4385        } else {
4386            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4387        }
4388        if (r1 == r3) {
4389            break;
4390        }
4391        tcg_gen_add_i64(o->in2, o->in2, tsize);
4392        r1 = (r1 + 1) & 15;
4393    }
4394
4395    tcg_temp_free_i64(tsize);
4396    return DISAS_NEXT;
4397}
4398
4399static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4400{
4401    int r1 = get_field(s->fields, r1);
4402    int r3 = get_field(s->fields, r3);
4403    TCGv_i64 t = tcg_temp_new_i64();
4404    TCGv_i64 t4 = tcg_const_i64(4);
4405    TCGv_i64 t32 = tcg_const_i64(32);
4406
4407    while (1) {
4408        tcg_gen_shl_i64(t, regs[r1], t32);
4409        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4410        if (r1 == r3) {
4411            break;
4412        }
4413        tcg_gen_add_i64(o->in2, o->in2, t4);
4414        r1 = (r1 + 1) & 15;
4415    }
4416
4417    tcg_temp_free_i64(t);
4418    tcg_temp_free_i64(t4);
4419    tcg_temp_free_i64(t32);
4420    return DISAS_NEXT;
4421}
4422
4423static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4424{
4425    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4426        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4427    } else if (HAVE_ATOMIC128) {
4428        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4429    } else {
4430        gen_helper_exit_atomic(cpu_env);
4431        return DISAS_NORETURN;
4432    }
4433    return DISAS_NEXT;
4434}
4435
4436static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4437{
4438    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4439    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4440
4441    gen_helper_srst(cpu_env, r1, r2);
4442
4443    tcg_temp_free_i32(r1);
4444    tcg_temp_free_i32(r2);
4445    set_cc_static(s);
4446    return DISAS_NEXT;
4447}
4448
4449static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4450{
4451    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4452    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4453
4454    gen_helper_srstu(cpu_env, r1, r2);
4455
4456    tcg_temp_free_i32(r1);
4457    tcg_temp_free_i32(r2);
4458    set_cc_static(s);
4459    return DISAS_NEXT;
4460}
4461
4462static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4463{
4464    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4465    return DISAS_NEXT;
4466}
4467
4468static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4469{
4470    DisasCompare cmp;
4471    TCGv_i64 borrow;
4472
4473    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4474
4475    /* The !borrow flag is the msb of CC.  Since we want the inverse of
4476       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
4477    disas_jcc(s, &cmp, 8 | 4);
4478    borrow = tcg_temp_new_i64();
4479    if (cmp.is_64) {
4480        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4481    } else {
4482        TCGv_i32 t = tcg_temp_new_i32();
4483        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4484        tcg_gen_extu_i32_i64(borrow, t);
4485        tcg_temp_free_i32(t);
4486    }
4487    free_compare(&cmp);
4488
4489    tcg_gen_sub_i64(o->out, o->out, borrow);
4490    tcg_temp_free_i64(borrow);
4491    return DISAS_NEXT;
4492}
4493
4494static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4495{
4496    TCGv_i32 t;
4497
4498    update_psw_addr(s);
4499    update_cc_op(s);
4500
4501    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4502    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4503    tcg_temp_free_i32(t);
4504
4505    t = tcg_const_i32(s->ilen);
4506    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4507    tcg_temp_free_i32(t);
4508
4509    gen_exception(EXCP_SVC);
4510    return DISAS_NORETURN;
4511}
4512
4513static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4514{
4515    int cc = 0;
4516
4517    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4518    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4519    gen_op_movi_cc(s, cc);
4520    return DISAS_NEXT;
4521}
4522
4523static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4524{
4525    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4526    set_cc_static(s);
4527    return DISAS_NEXT;
4528}
4529
4530static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4531{
4532    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4533    set_cc_static(s);
4534    return DISAS_NEXT;
4535}
4536
4537static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4538{
4539    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4540    set_cc_static(s);
4541    return DISAS_NEXT;
4542}
4543
4544#ifndef CONFIG_USER_ONLY
4545
4546static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4547{
4548    gen_helper_testblock(cc_op, cpu_env, o->in2);
4549    set_cc_static(s);
4550    return DISAS_NEXT;
4551}
4552
4553static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4554{
4555    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4556    set_cc_static(s);
4557    return DISAS_NEXT;
4558}
4559
4560#endif
4561
4562static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4563{
4564    TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4565    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4566    tcg_temp_free_i32(l1);
4567    set_cc_static(s);
4568    return DISAS_NEXT;
4569}
4570
4571static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4572{
4573    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4574    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4575    tcg_temp_free_i32(l);
4576    set_cc_static(s);
4577    return DISAS_NEXT;
4578}
4579
4580static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4581{
4582    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4583    return_low128(o->out2);
4584    set_cc_static(s);
4585    return DISAS_NEXT;
4586}
4587
4588static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4589{
4590    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4591    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4592    tcg_temp_free_i32(l);
4593    set_cc_static(s);
4594    return DISAS_NEXT;
4595}
4596
4597static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4598{
4599    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4600    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4601    tcg_temp_free_i32(l);
4602    set_cc_static(s);
4603    return DISAS_NEXT;
4604}
4605
4606static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4607{
4608    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4609    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4610    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4611    TCGv_i32 tst = tcg_temp_new_i32();
4612    int m3 = get_field(s->fields, m3);
4613
4614    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4615        m3 = 0;
4616    }
4617    if (m3 & 1) {
4618        tcg_gen_movi_i32(tst, -1);
4619    } else {
4620        tcg_gen_extrl_i64_i32(tst, regs[0]);
4621        if (s->insn->opc & 3) {
4622            tcg_gen_ext8u_i32(tst, tst);
4623        } else {
4624            tcg_gen_ext16u_i32(tst, tst);
4625        }
4626    }
4627    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4628
4629    tcg_temp_free_i32(r1);
4630    tcg_temp_free_i32(r2);
4631    tcg_temp_free_i32(sizes);
4632    tcg_temp_free_i32(tst);
4633    set_cc_static(s);
4634    return DISAS_NEXT;
4635}
4636
4637static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4638{
4639    TCGv_i32 t1 = tcg_const_i32(0xff);
4640    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4641    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4642    tcg_temp_free_i32(t1);
4643    set_cc_static(s);
4644    return DISAS_NEXT;
4645}
4646
4647static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4648{
4649    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4650    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4651    tcg_temp_free_i32(l);
4652    return DISAS_NEXT;
4653}
4654
4655static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4656{
4657    int l1 = get_field(s->fields, l1) + 1;
4658    TCGv_i32 l;
4659
4660    /* The length must not exceed 32 bytes.  */
4661    if (l1 > 32) {
4662        gen_program_exception(s, PGM_SPECIFICATION);
4663        return DISAS_NORETURN;
4664    }
4665    l = tcg_const_i32(l1);
4666    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4667    tcg_temp_free_i32(l);
4668    set_cc_static(s);
4669    return DISAS_NEXT;
4670}
4671
4672static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4673{
4674    int l1 = get_field(s->fields, l1) + 1;
4675    TCGv_i32 l;
4676
4677    /* The length must be even and should not exceed 64 bytes.  */
4678    if ((l1 & 1) || (l1 > 64)) {
4679        gen_program_exception(s, PGM_SPECIFICATION);
4680        return DISAS_NORETURN;
4681    }
4682    l = tcg_const_i32(l1);
4683    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4684    tcg_temp_free_i32(l);
4685    set_cc_static(s);
4686    return DISAS_NEXT;
4687}
4688
4689
4690static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4691{
4692    int d1 = get_field(s->fields, d1);
4693    int d2 = get_field(s->fields, d2);
4694    int b1 = get_field(s->fields, b1);
4695    int b2 = get_field(s->fields, b2);
4696    int l = get_field(s->fields, l1);
4697    TCGv_i32 t32;
4698
4699    o->addr1 = get_address(s, 0, b1, d1);
4700
4701    /* If the addresses are identical, this is a store/memset of zero.  */
4702    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4703        o->in2 = tcg_const_i64(0);
4704
4705        l++;
4706        while (l >= 8) {
4707            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4708            l -= 8;
4709            if (l > 0) {
4710                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4711            }
4712        }
4713        if (l >= 4) {
4714            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4715            l -= 4;
4716            if (l > 0) {
4717                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4718            }
4719        }
4720        if (l >= 2) {
4721            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4722            l -= 2;
4723            if (l > 0) {
4724                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4725            }
4726        }
4727        if (l) {
4728            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4729        }
4730        gen_op_movi_cc(s, 0);
4731        return DISAS_NEXT;
4732    }
4733
4734    /* But in general we'll defer to a helper.  */
4735    o->in2 = get_address(s, 0, b2, d2);
4736    t32 = tcg_const_i32(l);
4737    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4738    tcg_temp_free_i32(t32);
4739    set_cc_static(s);
4740    return DISAS_NEXT;
4741}
4742
4743static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4744{
4745    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4746    return DISAS_NEXT;
4747}
4748
4749static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4750{
4751    int shift = s->insn->data & 0xff;
4752    int size = s->insn->data >> 8;
4753    uint64_t mask = ((1ull << size) - 1) << shift;
4754
4755    assert(!o->g_in2);
4756    tcg_gen_shli_i64(o->in2, o->in2, shift);
4757    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4758
4759    /* Produce the CC from only the bits manipulated.  */
4760    tcg_gen_andi_i64(cc_dst, o->out, mask);
4761    set_cc_nz_u64(s, cc_dst);
4762    return DISAS_NEXT;
4763}
4764
4765static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4766{
4767    o->in1 = tcg_temp_new_i64();
4768
4769    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4770        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4771    } else {
4772        /* Perform the atomic operation in memory. */
4773        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4774                                     s->insn->data);
4775    }
4776
4777    /* Recompute also for atomic case: needed for setting CC. */
4778    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4779
4780    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4781        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4782    }
4783    return DISAS_NEXT;
4784}
4785
4786static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4787{
4788    o->out = tcg_const_i64(0);
4789    return DISAS_NEXT;
4790}
4791
4792static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4793{
4794    o->out = tcg_const_i64(0);
4795    o->out2 = o->out;
4796    o->g_out2 = true;
4797    return DISAS_NEXT;
4798}
4799
4800#ifndef CONFIG_USER_ONLY
4801static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4802{
4803    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4804
4805    gen_helper_clp(cpu_env, r2);
4806    tcg_temp_free_i32(r2);
4807    set_cc_static(s);
4808    return DISAS_NEXT;
4809}
4810
4811static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4812{
4813    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4814    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4815
4816    gen_helper_pcilg(cpu_env, r1, r2);
4817    tcg_temp_free_i32(r1);
4818    tcg_temp_free_i32(r2);
4819    set_cc_static(s);
4820    return DISAS_NEXT;
4821}
4822
4823static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4824{
4825    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4826    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4827
4828    gen_helper_pcistg(cpu_env, r1, r2);
4829    tcg_temp_free_i32(r1);
4830    tcg_temp_free_i32(r2);
4831    set_cc_static(s);
4832    return DISAS_NEXT;
4833}
4834
4835static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4836{
4837    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4838    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4839
4840    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4841    tcg_temp_free_i32(ar);
4842    tcg_temp_free_i32(r1);
4843    set_cc_static(s);
4844    return DISAS_NEXT;
4845}
4846
4847static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4848{
4849    gen_helper_sic(cpu_env, o->in1, o->in2);
4850    return DISAS_NEXT;
4851}
4852
4853static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4854{
4855    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4856    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4857
4858    gen_helper_rpcit(cpu_env, r1, r2);
4859    tcg_temp_free_i32(r1);
4860    tcg_temp_free_i32(r2);
4861    set_cc_static(s);
4862    return DISAS_NEXT;
4863}
4864
4865static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4866{
4867    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4868    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4869    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4870
4871    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4872    tcg_temp_free_i32(ar);
4873    tcg_temp_free_i32(r1);
4874    tcg_temp_free_i32(r3);
4875    set_cc_static(s);
4876    return DISAS_NEXT;
4877}
4878
4879static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4880{
4881    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4882    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4883
4884    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4885    tcg_temp_free_i32(ar);
4886    tcg_temp_free_i32(r1);
4887    set_cc_static(s);
4888    return DISAS_NEXT;
4889}
4890#endif
4891
4892/* ====================================================================== */
4893/* The "Cc OUTput" generators.  Given the generated output (and in some cases
4894   the original inputs), update the various cc data structures in order to
4895   be able to compute the new condition code.  */
4896
4897static void cout_abs32(DisasContext *s, DisasOps *o)
4898{
4899    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4900}
4901
4902static void cout_abs64(DisasContext *s, DisasOps *o)
4903{
4904    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4905}
4906
4907static void cout_adds32(DisasContext *s, DisasOps *o)
4908{
4909    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4910}
4911
4912static void cout_adds64(DisasContext *s, DisasOps *o)
4913{
4914    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4915}
4916
4917static void cout_addu32(DisasContext *s, DisasOps *o)
4918{
4919    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4920}
4921
4922static void cout_addu64(DisasContext *s, DisasOps *o)
4923{
4924    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4925}
4926
4927static void cout_addc32(DisasContext *s, DisasOps *o)
4928{
4929    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4930}
4931
4932static void cout_addc64(DisasContext *s, DisasOps *o)
4933{
4934    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4935}
4936
4937static void cout_cmps32(DisasContext *s, DisasOps *o)
4938{
4939    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4940}
4941
4942static void cout_cmps64(DisasContext *s, DisasOps *o)
4943{
4944    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4945}
4946
4947static void cout_cmpu32(DisasContext *s, DisasOps *o)
4948{
4949    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4950}
4951
4952static void cout_cmpu64(DisasContext *s, DisasOps *o)
4953{
4954    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4955}
4956
4957static void cout_f32(DisasContext *s, DisasOps *o)
4958{
4959    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4960}
4961
4962static void cout_f64(DisasContext *s, DisasOps *o)
4963{
4964    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4965}
4966
4967static void cout_f128(DisasContext *s, DisasOps *o)
4968{
4969    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4970}
4971
4972static void cout_nabs32(DisasContext *s, DisasOps *o)
4973{
4974    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4975}
4976
4977static void cout_nabs64(DisasContext *s, DisasOps *o)
4978{
4979    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4980}
4981
4982static void cout_neg32(DisasContext *s, DisasOps *o)
4983{
4984    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4985}
4986
4987static void cout_neg64(DisasContext *s, DisasOps *o)
4988{
4989    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4990}
4991
4992static void cout_nz32(DisasContext *s, DisasOps *o)
4993{
4994    tcg_gen_ext32u_i64(cc_dst, o->out);
4995    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4996}
4997
4998static void cout_nz64(DisasContext *s, DisasOps *o)
4999{
5000    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5001}
5002
5003static void cout_s32(DisasContext *s, DisasOps *o)
5004{
5005    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5006}
5007
5008static void cout_s64(DisasContext *s, DisasOps *o)
5009{
5010    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5011}
5012
5013static void cout_subs32(DisasContext *s, DisasOps *o)
5014{
5015    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5016}
5017
5018static void cout_subs64(DisasContext *s, DisasOps *o)
5019{
5020    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5021}
5022
5023static void cout_subu32(DisasContext *s, DisasOps *o)
5024{
5025    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5026}
5027
5028static void cout_subu64(DisasContext *s, DisasOps *o)
5029{
5030    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5031}
5032
5033static void cout_subb32(DisasContext *s, DisasOps *o)
5034{
5035    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5036}
5037
5038static void cout_subb64(DisasContext *s, DisasOps *o)
5039{
5040    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5041}
5042
5043static void cout_tm32(DisasContext *s, DisasOps *o)
5044{
5045    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5046}
5047
5048static void cout_tm64(DisasContext *s, DisasOps *o)
5049{
5050    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5051}
5052
5053/* ====================================================================== */
5054/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5055   with the TCG register to which we will write.  Used in combination with
5056   the "wout" generators, in some cases we need a new temporary, and in
5057   some cases we can write to a TCG global.  */
5058
5059static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5060{
5061    o->out = tcg_temp_new_i64();
5062}
5063#define SPEC_prep_new 0
5064
5065static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5066{
5067    o->out = tcg_temp_new_i64();
5068    o->out2 = tcg_temp_new_i64();
5069}
5070#define SPEC_prep_new_P 0
5071
5072static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5073{
5074    o->out = regs[get_field(f, r1)];
5075    o->g_out = true;
5076}
5077#define SPEC_prep_r1 0
5078
5079static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5080{
5081    int r1 = get_field(f, r1);
5082    o->out = regs[r1];
5083    o->out2 = regs[r1 + 1];
5084    o->g_out = o->g_out2 = true;
5085}
5086#define SPEC_prep_r1_P SPEC_r1_even
5087
5088static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5089{
5090    o->out = fregs[get_field(f, r1)];
5091    o->g_out = true;
5092}
5093#define SPEC_prep_f1 0
5094
5095static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5096{
5097    int r1 = get_field(f, r1);
5098    o->out = fregs[r1];
5099    o->out2 = fregs[r1 + 2];
5100    o->g_out = o->g_out2 = true;
5101}
5102#define SPEC_prep_x1 SPEC_r1_f128
5103
5104/* ====================================================================== */
5105/* The "Write OUTput" generators.  These generally perform some non-trivial
5106   copy of data to TCG globals, or to main memory.  The trivial cases are
5107   generally handled by having a "prep" generator install the TCG global
5108   as the destination of the operation.  */
5109
5110static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5111{
5112    store_reg(get_field(f, r1), o->out);
5113}
5114#define SPEC_wout_r1 0
5115
5116static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5117{
5118    int r1 = get_field(f, r1);
5119    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5120}
5121#define SPEC_wout_r1_8 0
5122
5123static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5124{
5125    int r1 = get_field(f, r1);
5126    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5127}
5128#define SPEC_wout_r1_16 0
5129
5130static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5131{
5132    store_reg32_i64(get_field(f, r1), o->out);
5133}
5134#define SPEC_wout_r1_32 0
5135
5136static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5137{
5138    store_reg32h_i64(get_field(f, r1), o->out);
5139}
5140#define SPEC_wout_r1_32h 0
5141
5142static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5143{
5144    int r1 = get_field(f, r1);
5145    store_reg32_i64(r1, o->out);
5146    store_reg32_i64(r1 + 1, o->out2);
5147}
5148#define SPEC_wout_r1_P32 SPEC_r1_even
5149
5150static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5151{
5152    int r1 = get_field(f, r1);
5153    store_reg32_i64(r1 + 1, o->out);
5154    tcg_gen_shri_i64(o->out, o->out, 32);
5155    store_reg32_i64(r1, o->out);
5156}
5157#define SPEC_wout_r1_D32 SPEC_r1_even
5158
5159static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5160{
5161    int r3 = get_field(f, r3);
5162    store_reg32_i64(r3, o->out);
5163    store_reg32_i64(r3 + 1, o->out2);
5164}
5165#define SPEC_wout_r3_P32 SPEC_r3_even
5166
5167static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5168{
5169    int r3 = get_field(f, r3);
5170    store_reg(r3, o->out);
5171    store_reg(r3 + 1, o->out2);
5172}
5173#define SPEC_wout_r3_P64 SPEC_r3_even
5174
5175static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5176{
5177    store_freg32_i64(get_field(f, r1), o->out);
5178}
5179#define SPEC_wout_e1 0
5180
5181static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5182{
5183    store_freg(get_field(f, r1), o->out);
5184}
5185#define SPEC_wout_f1 0
5186
5187static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5188{
5189    int f1 = get_field(s->fields, r1);
5190    store_freg(f1, o->out);
5191    store_freg(f1 + 2, o->out2);
5192}
5193#define SPEC_wout_x1 SPEC_r1_f128
5194
5195static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5196{
5197    if (get_field(f, r1) != get_field(f, r2)) {
5198        store_reg32_i64(get_field(f, r1), o->out);
5199    }
5200}
5201#define SPEC_wout_cond_r1r2_32 0
5202
5203static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5204{
5205    if (get_field(f, r1) != get_field(f, r2)) {
5206        store_freg32_i64(get_field(f, r1), o->out);
5207    }
5208}
5209#define SPEC_wout_cond_e1e2 0
5210
5211static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5212{
5213    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5214}
5215#define SPEC_wout_m1_8 0
5216
5217static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5218{
5219    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5220}
5221#define SPEC_wout_m1_16 0
5222
5223#ifndef CONFIG_USER_ONLY
5224static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5225{
5226    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5227}
5228#define SPEC_wout_m1_16a 0
5229#endif
5230
5231static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5232{
5233    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5234}
5235#define SPEC_wout_m1_32 0
5236
5237#ifndef CONFIG_USER_ONLY
5238static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5239{
5240    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5241}
5242#define SPEC_wout_m1_32a 0
5243#endif
5244
5245static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5246{
5247    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5248}
5249#define SPEC_wout_m1_64 0
5250
5251#ifndef CONFIG_USER_ONLY
5252static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5253{
5254    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5255}
5256#define SPEC_wout_m1_64a 0
5257#endif
5258
5259static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5260{
5261    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5262}
5263#define SPEC_wout_m2_32 0
5264
5265static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5266{
5267    store_reg(get_field(f, r1), o->in2);
5268}
5269#define SPEC_wout_in2_r1 0
5270
5271static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5272{
5273    store_reg32_i64(get_field(f, r1), o->in2);
5274}
5275#define SPEC_wout_in2_r1_32 0
5276
5277/* ====================================================================== */
5278/* The "INput 1" generators.  These load the first operand to an insn.  */
5279
5280static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5281{
5282    o->in1 = load_reg(get_field(f, r1));
5283}
5284#define SPEC_in1_r1 0
5285
5286static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5287{
5288    o->in1 = regs[get_field(f, r1)];
5289    o->g_in1 = true;
5290}
5291#define SPEC_in1_r1_o 0
5292
5293static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5294{
5295    o->in1 = tcg_temp_new_i64();
5296    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5297}
5298#define SPEC_in1_r1_32s 0
5299
5300static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5301{
5302    o->in1 = tcg_temp_new_i64();
5303    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5304}
5305#define SPEC_in1_r1_32u 0
5306
5307static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5308{
5309    o->in1 = tcg_temp_new_i64();
5310    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5311}
5312#define SPEC_in1_r1_sr32 0
5313
5314static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5315{
5316    o->in1 = load_reg(get_field(f, r1) + 1);
5317}
5318#define SPEC_in1_r1p1 SPEC_r1_even
5319
5320static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5321{
5322    o->in1 = tcg_temp_new_i64();
5323    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5324}
5325#define SPEC_in1_r1p1_32s SPEC_r1_even
5326
5327static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5328{
5329    o->in1 = tcg_temp_new_i64();
5330    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5331}
5332#define SPEC_in1_r1p1_32u SPEC_r1_even
5333
5334static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5335{
5336    int r1 = get_field(f, r1);
5337    o->in1 = tcg_temp_new_i64();
5338    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5339}
5340#define SPEC_in1_r1_D32 SPEC_r1_even
5341
5342static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5343{
5344    o->in1 = load_reg(get_field(f, r2));
5345}
5346#define SPEC_in1_r2 0
5347
5348static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5349{
5350    o->in1 = tcg_temp_new_i64();
5351    tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5352}
5353#define SPEC_in1_r2_sr32 0
5354
5355static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5356{
5357    o->in1 = load_reg(get_field(f, r3));
5358}
5359#define SPEC_in1_r3 0
5360
5361static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5362{
5363    o->in1 = regs[get_field(f, r3)];
5364    o->g_in1 = true;
5365}
5366#define SPEC_in1_r3_o 0
5367
5368static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5369{
5370    o->in1 = tcg_temp_new_i64();
5371    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5372}
5373#define SPEC_in1_r3_32s 0
5374
5375static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5376{
5377    o->in1 = tcg_temp_new_i64();
5378    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5379}
5380#define SPEC_in1_r3_32u 0
5381
5382static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5383{
5384    int r3 = get_field(f, r3);
5385    o->in1 = tcg_temp_new_i64();
5386    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5387}
5388#define SPEC_in1_r3_D32 SPEC_r3_even
5389
5390static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5391{
5392    o->in1 = load_freg32_i64(get_field(f, r1));
5393}
5394#define SPEC_in1_e1 0
5395
5396static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5397{
5398    o->in1 = fregs[get_field(f, r1)];
5399    o->g_in1 = true;
5400}
5401#define SPEC_in1_f1_o 0
5402
5403static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5404{
5405    int r1 = get_field(f, r1);
5406    o->out = fregs[r1];
5407    o->out2 = fregs[r1 + 2];
5408    o->g_out = o->g_out2 = true;
5409}
5410#define SPEC_in1_x1_o SPEC_r1_f128
5411
5412static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5413{
5414    o->in1 = fregs[get_field(f, r3)];
5415    o->g_in1 = true;
5416}
5417#define SPEC_in1_f3_o 0
5418
5419static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5420{
5421    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5422}
5423#define SPEC_in1_la1 0
5424
5425static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5426{
5427    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5428    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5429}
5430#define SPEC_in1_la2 0
5431
5432static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5433{
5434    in1_la1(s, f, o);
5435    o->in1 = tcg_temp_new_i64();
5436    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5437}
5438#define SPEC_in1_m1_8u 0
5439
5440static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5441{
5442    in1_la1(s, f, o);
5443    o->in1 = tcg_temp_new_i64();
5444    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5445}
5446#define SPEC_in1_m1_16s 0
5447
5448static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5449{
5450    in1_la1(s, f, o);
5451    o->in1 = tcg_temp_new_i64();
5452    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5453}
5454#define SPEC_in1_m1_16u 0
5455
5456static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5457{
5458    in1_la1(s, f, o);
5459    o->in1 = tcg_temp_new_i64();
5460    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5461}
5462#define SPEC_in1_m1_32s 0
5463
5464static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5465{
5466    in1_la1(s, f, o);
5467    o->in1 = tcg_temp_new_i64();
5468    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5469}
5470#define SPEC_in1_m1_32u 0
5471
5472static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5473{
5474    in1_la1(s, f, o);
5475    o->in1 = tcg_temp_new_i64();
5476    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5477}
5478#define SPEC_in1_m1_64 0
5479
5480/* ====================================================================== */
5481/* The "INput 2" generators.  These load the second operand to an insn.  */
5482
5483static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5484{
5485    o->in2 = regs[get_field(f, r1)];
5486    o->g_in2 = true;
5487}
5488#define SPEC_in2_r1_o 0
5489
5490static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5491{
5492    o->in2 = tcg_temp_new_i64();
5493    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5494}
5495#define SPEC_in2_r1_16u 0
5496
5497static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5498{
5499    o->in2 = tcg_temp_new_i64();
5500    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5501}
5502#define SPEC_in2_r1_32u 0
5503
5504static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5505{
5506    int r1 = get_field(f, r1);
5507    o->in2 = tcg_temp_new_i64();
5508    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5509}
5510#define SPEC_in2_r1_D32 SPEC_r1_even
5511
5512static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5513{
5514    o->in2 = load_reg(get_field(f, r2));
5515}
5516#define SPEC_in2_r2 0
5517
5518static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5519{
5520    o->in2 = regs[get_field(f, r2)];
5521    o->g_in2 = true;
5522}
5523#define SPEC_in2_r2_o 0
5524
5525static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5526{
5527    int r2 = get_field(f, r2);
5528    if (r2 != 0) {
5529        o->in2 = load_reg(r2);
5530    }
5531}
5532#define SPEC_in2_r2_nz 0
5533
5534static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5535{
5536    o->in2 = tcg_temp_new_i64();
5537    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5538}
5539#define SPEC_in2_r2_8s 0
5540
5541static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5542{
5543    o->in2 = tcg_temp_new_i64();
5544    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5545}
5546#define SPEC_in2_r2_8u 0
5547
5548static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5549{
5550    o->in2 = tcg_temp_new_i64();
5551    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5552}
5553#define SPEC_in2_r2_16s 0
5554
5555static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5556{
5557    o->in2 = tcg_temp_new_i64();
5558    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5559}
5560#define SPEC_in2_r2_16u 0
5561
5562static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5563{
5564    o->in2 = load_reg(get_field(f, r3));
5565}
5566#define SPEC_in2_r3 0
5567
5568static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5569{
5570    o->in2 = tcg_temp_new_i64();
5571    tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5572}
5573#define SPEC_in2_r3_sr32 0
5574
5575static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5576{
5577    o->in2 = tcg_temp_new_i64();
5578    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5579}
5580#define SPEC_in2_r2_32s 0
5581
5582static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5583{
5584    o->in2 = tcg_temp_new_i64();
5585    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5586}
5587#define SPEC_in2_r2_32u 0
5588
5589static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5590{
5591    o->in2 = tcg_temp_new_i64();
5592    tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5593}
5594#define SPEC_in2_r2_sr32 0
5595
5596static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5597{
5598    o->in2 = load_freg32_i64(get_field(f, r2));
5599}
5600#define SPEC_in2_e2 0
5601
5602static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5603{
5604    o->in2 = fregs[get_field(f, r2)];
5605    o->g_in2 = true;
5606}
5607#define SPEC_in2_f2_o 0
5608
5609static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5610{
5611    int r2 = get_field(f, r2);
5612    o->in1 = fregs[r2];
5613    o->in2 = fregs[r2 + 2];
5614    o->g_in1 = o->g_in2 = true;
5615}
5616#define SPEC_in2_x2_o SPEC_r2_f128
5617
5618static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5619{
5620    o->in2 = get_address(s, 0, get_field(f, r2), 0);
5621}
5622#define SPEC_in2_ra2 0
5623
5624static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5625{
5626    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5627    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5628}
5629#define SPEC_in2_a2 0
5630
5631static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5632{
5633    o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5634}
5635#define SPEC_in2_ri2 0
5636
5637static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5638{
5639    help_l2_shift(s, f, o, 31);
5640}
5641#define SPEC_in2_sh32 0
5642
5643static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5644{
5645    help_l2_shift(s, f, o, 63);
5646}
5647#define SPEC_in2_sh64 0
5648
5649static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5650{
5651    in2_a2(s, f, o);
5652    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5653}
5654#define SPEC_in2_m2_8u 0
5655
5656static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5657{
5658    in2_a2(s, f, o);
5659    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5660}
5661#define SPEC_in2_m2_16s 0
5662
5663static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5664{
5665    in2_a2(s, f, o);
5666    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5667}
5668#define SPEC_in2_m2_16u 0
5669
5670static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5671{
5672    in2_a2(s, f, o);
5673    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5674}
5675#define SPEC_in2_m2_32s 0
5676
5677static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5678{
5679    in2_a2(s, f, o);
5680    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5681}
5682#define SPEC_in2_m2_32u 0
5683
5684#ifndef CONFIG_USER_ONLY
5685static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5686{
5687    in2_a2(s, f, o);
5688    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5689}
5690#define SPEC_in2_m2_32ua 0
5691#endif
5692
5693static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5694{
5695    in2_a2(s, f, o);
5696    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5697}
5698#define SPEC_in2_m2_64 0
5699
5700#ifndef CONFIG_USER_ONLY
5701static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5702{
5703    in2_a2(s, f, o);
5704    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5705}
5706#define SPEC_in2_m2_64a 0
5707#endif
5708
5709static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5710{
5711    in2_ri2(s, f, o);
5712    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5713}
5714#define SPEC_in2_mri2_16u 0
5715
5716static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5717{
5718    in2_ri2(s, f, o);
5719    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5720}
5721#define SPEC_in2_mri2_32s 0
5722
5723static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5724{
5725    in2_ri2(s, f, o);
5726    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5727}
5728#define SPEC_in2_mri2_32u 0
5729
5730static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5731{
5732    in2_ri2(s, f, o);
5733    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5734}
5735#define SPEC_in2_mri2_64 0
5736
5737static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5738{
5739    o->in2 = tcg_const_i64(get_field(f, i2));
5740}
5741#define SPEC_in2_i2 0
5742
5743static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5744{
5745    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5746}
5747#define SPEC_in2_i2_8u 0
5748
5749static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5750{
5751    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5752}
5753#define SPEC_in2_i2_16u 0
5754
5755static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5756{
5757    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5758}
5759#define SPEC_in2_i2_32u 0
5760
5761static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5762{
5763    uint64_t i2 = (uint16_t)get_field(f, i2);
5764    o->in2 = tcg_const_i64(i2 << s->insn->data);
5765}
5766#define SPEC_in2_i2_16u_shl 0
5767
5768static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5769{
5770    uint64_t i2 = (uint32_t)get_field(f, i2);
5771    o->in2 = tcg_const_i64(i2 << s->insn->data);
5772}
5773#define SPEC_in2_i2_32u_shl 0
5774
5775#ifndef CONFIG_USER_ONLY
5776static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5777{
5778    o->in2 = tcg_const_i64(s->fields->raw_insn);
5779}
5780#define SPEC_in2_insn 0
5781#endif
5782
5783/* ====================================================================== */
5784
5785/* Find opc within the table of insns.  This is formulated as a switch
5786   statement so that (1) we get compile-time notice of cut-paste errors
5787   for duplicated opcodes, and (2) the compiler generates the binary
5788   search tree, rather than us having to post-process the table.  */
5789
5790#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5791    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5792
5793#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5794    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5795
5796#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5797    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5798
5799#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5800
5801enum DisasInsnEnum {
5802#include "insn-data.def"
5803};
5804
5805#undef E
5806#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5807    .opc = OPC,                                                             \
5808    .flags = FL,                                                            \
5809    .fmt = FMT_##FT,                                                        \
5810    .fac = FAC_##FC,                                                        \
5811    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5812    .name = #NM,                                                            \
5813    .help_in1 = in1_##I1,                                                   \
5814    .help_in2 = in2_##I2,                                                   \
5815    .help_prep = prep_##P,                                                  \
5816    .help_wout = wout_##W,                                                  \
5817    .help_cout = cout_##CC,                                                 \
5818    .help_op = op_##OP,                                                     \
5819    .data = D                                                               \
5820 },
5821
5822/* Allow 0 to be used for NULL in the table below.  */
5823#define in1_0  NULL
5824#define in2_0  NULL
5825#define prep_0  NULL
5826#define wout_0  NULL
5827#define cout_0  NULL
5828#define op_0  NULL
5829
5830#define SPEC_in1_0 0
5831#define SPEC_in2_0 0
5832#define SPEC_prep_0 0
5833#define SPEC_wout_0 0
5834
5835/* Give smaller names to the various facilities.  */
5836#define FAC_Z           S390_FEAT_ZARCH
5837#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5838#define FAC_DFP         S390_FEAT_DFP
5839#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5840#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
5841#define FAC_EE          S390_FEAT_EXECUTE_EXT
5842#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
5843#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
5844#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5845#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5846#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5847#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
5848#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
5849#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5850#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5851#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
5852#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
5853#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
5854#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
5855#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
5856#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
5857#define FAC_SFLE        S390_FEAT_STFLE
5858#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5859#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5860#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5861#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
5862#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
5863#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
5864#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
5865#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5866#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
5867#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
5868#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5869#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5870#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5871#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
5872#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
5873#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
5874
5875static const DisasInsn insn_info[] = {
5876#include "insn-data.def"
5877};
5878
5879#undef E
5880#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
5881    case OPC: return &insn_info[insn_ ## NM];
5882
5883static const DisasInsn *lookup_opc(uint16_t opc)
5884{
5885    switch (opc) {
5886#include "insn-data.def"
5887    default:
5888        return NULL;
5889    }
5890}
5891
5892#undef F
5893#undef E
5894#undef D
5895#undef C
5896
5897/* Extract a field from the insn.  The INSN should be left-aligned in
5898   the uint64_t so that we can more easily utilize the big-bit-endian
5899   definitions we extract from the Principals of Operation.  */
5900
5901static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5902{
5903    uint32_t r, m;
5904
5905    if (f->size == 0) {
5906        return;
5907    }
5908
5909    /* Zero extract the field from the insn.  */
5910    r = (insn << f->beg) >> (64 - f->size);
5911
5912    /* Sign-extend, or un-swap the field as necessary.  */
5913    switch (f->type) {
5914    case 0: /* unsigned */
5915        break;
5916    case 1: /* signed */
5917        assert(f->size <= 32);
5918        m = 1u << (f->size - 1);
5919        r = (r ^ m) - m;
5920        break;
5921    case 2: /* dl+dh split, signed 20 bit. */
5922        r = ((int8_t)r << 12) | (r >> 8);
5923        break;
5924    default:
5925        abort();
5926    }
5927
5928    /* Validate that the "compressed" encoding we selected above is valid.
5929       I.e. we havn't make two different original fields overlap.  */
5930    assert(((o->presentC >> f->indexC) & 1) == 0);
5931    o->presentC |= 1 << f->indexC;
5932    o->presentO |= 1 << f->indexO;
5933
5934    o->c[f->indexC] = r;
5935}
5936
5937/* Lookup the insn at the current PC, extracting the operands into O and
5938   returning the info struct for the insn.  Returns NULL for invalid insn.  */
5939
5940static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5941                                     DisasFields *f)
5942{
5943    uint64_t insn, pc = s->base.pc_next;
5944    int op, op2, ilen;
5945    const DisasInsn *info;
5946
5947    if (unlikely(s->ex_value)) {
5948        /* Drop the EX data now, so that it's clear on exception paths.  */
5949        TCGv_i64 zero = tcg_const_i64(0);
5950        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5951        tcg_temp_free_i64(zero);
5952
5953        /* Extract the values saved by EXECUTE.  */
5954        insn = s->ex_value & 0xffffffffffff0000ull;
5955        ilen = s->ex_value & 0xf;
5956        op = insn >> 56;
5957    } else {
5958        insn = ld_code2(env, pc);
5959        op = (insn >> 8) & 0xff;
5960        ilen = get_ilen(op);
5961        switch (ilen) {
5962        case 2:
5963            insn = insn << 48;
5964            break;
5965        case 4:
5966            insn = ld_code4(env, pc) << 32;
5967            break;
5968        case 6:
5969            insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5970            break;
5971        default:
5972            g_assert_not_reached();
5973        }
5974    }
5975    s->pc_tmp = s->base.pc_next + ilen;
5976    s->ilen = ilen;
5977
5978    /* We can't actually determine the insn format until we've looked up
5979       the full insn opcode.  Which we can't do without locating the
5980       secondary opcode.  Assume by default that OP2 is at bit 40; for
5981       those smaller insns that don't actually have a secondary opcode
5982       this will correctly result in OP2 = 0. */
5983    switch (op) {
5984    case 0x01: /* E */
5985    case 0x80: /* S */
5986    case 0x82: /* S */
5987    case 0x93: /* S */
5988    case 0xb2: /* S, RRF, RRE, IE */
5989    case 0xb3: /* RRE, RRD, RRF */
5990    case 0xb9: /* RRE, RRF */
5991    case 0xe5: /* SSE, SIL */
5992        op2 = (insn << 8) >> 56;
5993        break;
5994    case 0xa5: /* RI */
5995    case 0xa7: /* RI */
5996    case 0xc0: /* RIL */
5997    case 0xc2: /* RIL */
5998    case 0xc4: /* RIL */
5999    case 0xc6: /* RIL */
6000    case 0xc8: /* SSF */
6001    case 0xcc: /* RIL */
6002        op2 = (insn << 12) >> 60;
6003        break;
6004    case 0xc5: /* MII */
6005    case 0xc7: /* SMI */
6006    case 0xd0 ... 0xdf: /* SS */
6007    case 0xe1: /* SS */
6008    case 0xe2: /* SS */
6009    case 0xe8: /* SS */
6010    case 0xe9: /* SS */
6011    case 0xea: /* SS */
6012    case 0xee ... 0xf3: /* SS */
6013    case 0xf8 ... 0xfd: /* SS */
6014        op2 = 0;
6015        break;
6016    default:
6017        op2 = (insn << 40) >> 56;
6018        break;
6019    }
6020
6021    memset(f, 0, sizeof(*f));
6022    f->raw_insn = insn;
6023    f->op = op;
6024    f->op2 = op2;
6025
6026    /* Lookup the instruction.  */
6027    info = lookup_opc(op << 8 | op2);
6028
6029    /* If we found it, extract the operands.  */
6030    if (info != NULL) {
6031        DisasFormat fmt = info->fmt;
6032        int i;
6033
6034        for (i = 0; i < NUM_C_FIELD; ++i) {
6035            extract_field(f, &format_info[fmt].op[i], insn);
6036        }
6037    }
6038    return info;
6039}
6040
6041static bool is_afp_reg(int reg)
6042{
6043    return reg % 2 || reg > 6;
6044}
6045
6046static bool is_fp_pair(int reg)
6047{
6048    /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6049    return !(reg & 0x2);
6050}
6051
6052static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6053{
6054    const DisasInsn *insn;
6055    DisasJumpType ret = DISAS_NEXT;
6056    DisasFields f;
6057    DisasOps o;
6058
6059    /* Search for the insn in the table.  */
6060    insn = extract_insn(env, s, &f);
6061
6062    /* Not found means unimplemented/illegal opcode.  */
6063    if (insn == NULL) {
6064        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6065                      f.op, f.op2);
6066        gen_illegal_opcode(s);
6067        return DISAS_NORETURN;
6068    }
6069
6070#ifndef CONFIG_USER_ONLY
6071    if (s->base.tb->flags & FLAG_MASK_PER) {
6072        TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6073        gen_helper_per_ifetch(cpu_env, addr);
6074        tcg_temp_free_i64(addr);
6075    }
6076#endif
6077
6078    /* process flags */
6079    if (insn->flags) {
6080        /* privileged instruction */
6081        if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6082            gen_program_exception(s, PGM_PRIVILEGED);
6083            return DISAS_NORETURN;
6084        }
6085
6086        /* if AFP is not enabled, instructions and registers are forbidden */
6087        if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6088            uint8_t dxc = 0;
6089
6090            if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6091                dxc = 1;
6092            }
6093            if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6094                dxc = 1;
6095            }
6096            if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6097                dxc = 1;
6098            }
6099            if (insn->flags & IF_BFP) {
6100                dxc = 2;
6101            }
6102            if (insn->flags & IF_DFP) {
6103                dxc = 3;
6104            }
6105            if (dxc) {
6106                gen_data_exception(dxc);
6107                return DISAS_NORETURN;
6108            }
6109        }
6110    }
6111
6112    /* Check for insn specification exceptions.  */
6113    if (insn->spec) {
6114        if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6115            (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6116            (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6117            (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6118            (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6119            gen_program_exception(s, PGM_SPECIFICATION);
6120            return DISAS_NORETURN;
6121        }
6122    }
6123
6124    /* Set up the strutures we use to communicate with the helpers. */
6125    s->insn = insn;
6126    s->fields = &f;
6127    o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6128    o.out = NULL;
6129    o.out2 = NULL;
6130    o.in1 = NULL;
6131    o.in2 = NULL;
6132    o.addr1 = NULL;
6133
6134    /* Implement the instruction.  */
6135    if (insn->help_in1) {
6136        insn->help_in1(s, &f, &o);
6137    }
6138    if (insn->help_in2) {
6139        insn->help_in2(s, &f, &o);
6140    }
6141    if (insn->help_prep) {
6142        insn->help_prep(s, &f, &o);
6143    }
6144    if (insn->help_op) {
6145        ret = insn->help_op(s, &o);
6146    }
6147    if (ret != DISAS_NORETURN) {
6148        if (insn->help_wout) {
6149            insn->help_wout(s, &f, &o);
6150        }
6151        if (insn->help_cout) {
6152            insn->help_cout(s, &o);
6153        }
6154    }
6155
6156    /* Free any temporaries created by the helpers.  */
6157    if (o.out && !o.g_out) {
6158        tcg_temp_free_i64(o.out);
6159    }
6160    if (o.out2 && !o.g_out2) {
6161        tcg_temp_free_i64(o.out2);
6162    }
6163    if (o.in1 && !o.g_in1) {
6164        tcg_temp_free_i64(o.in1);
6165    }
6166    if (o.in2 && !o.g_in2) {
6167        tcg_temp_free_i64(o.in2);
6168    }
6169    if (o.addr1) {
6170        tcg_temp_free_i64(o.addr1);
6171    }
6172
6173#ifndef CONFIG_USER_ONLY
6174    if (s->base.tb->flags & FLAG_MASK_PER) {
6175        /* An exception might be triggered, save PSW if not already done.  */
6176        if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6177            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6178        }
6179
6180        /* Call the helper to check for a possible PER exception.  */
6181        gen_helper_per_check_exception(cpu_env);
6182    }
6183#endif
6184
6185    /* Advance to the next instruction.  */
6186    s->base.pc_next = s->pc_tmp;
6187    return ret;
6188}
6189
6190static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6191{
6192    DisasContext *dc = container_of(dcbase, DisasContext, base);
6193
6194    /* 31-bit mode */
6195    if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6196        dc->base.pc_first &= 0x7fffffff;
6197        dc->base.pc_next = dc->base.pc_first;
6198    }
6199
6200    dc->cc_op = CC_OP_DYNAMIC;
6201    dc->ex_value = dc->base.tb->cs_base;
6202    dc->do_debug = dc->base.singlestep_enabled;
6203}
6204
6205static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6206{
6207}
6208
6209static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6210{
6211    DisasContext *dc = container_of(dcbase, DisasContext, base);
6212
6213    tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6214}
6215
6216static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6217                                      const CPUBreakpoint *bp)
6218{
6219    DisasContext *dc = container_of(dcbase, DisasContext, base);
6220
6221    dc->base.is_jmp = DISAS_PC_STALE;
6222    dc->do_debug = true;
6223    /* The address covered by the breakpoint must be included in
6224       [tb->pc, tb->pc + tb->size) in order to for it to be
6225       properly cleared -- thus we increment the PC here so that
6226       the logic setting tb->size does the right thing.  */
6227    dc->base.pc_next += 2;
6228    return true;
6229}
6230
6231static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6232{
6233    CPUS390XState *env = cs->env_ptr;
6234    DisasContext *dc = container_of(dcbase, DisasContext, base);
6235
6236    dc->base.is_jmp = translate_one(env, dc);
6237    if (dc->base.is_jmp == DISAS_NEXT) {
6238        uint64_t page_start;
6239
6240        page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6241        if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6242            dc->base.is_jmp = DISAS_TOO_MANY;
6243        }
6244    }
6245}
6246
6247static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6248{
6249    DisasContext *dc = container_of(dcbase, DisasContext, base);
6250
6251    switch (dc->base.is_jmp) {
6252    case DISAS_GOTO_TB:
6253    case DISAS_NORETURN:
6254        break;
6255    case DISAS_TOO_MANY:
6256    case DISAS_PC_STALE:
6257    case DISAS_PC_STALE_NOCHAIN:
6258        update_psw_addr(dc);
6259        /* FALLTHRU */
6260    case DISAS_PC_UPDATED:
6261        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6262           cc op type is in env */
6263        update_cc_op(dc);
6264        /* FALLTHRU */
6265    case DISAS_PC_CC_UPDATED:
6266        /* Exit the TB, either by raising a debug exception or by return.  */
6267        if (dc->do_debug) {
6268            gen_exception(EXCP_DEBUG);
6269        } else if (use_exit_tb(dc) ||
6270                   dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6271            tcg_gen_exit_tb(NULL, 0);
6272        } else {
6273            tcg_gen_lookup_and_goto_ptr();
6274        }
6275        break;
6276    default:
6277        g_assert_not_reached();
6278    }
6279}
6280
6281static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6282{
6283    DisasContext *dc = container_of(dcbase, DisasContext, base);
6284
6285    if (unlikely(dc->ex_value)) {
6286        /* ??? Unfortunately log_target_disas can't use host memory.  */
6287        qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6288    } else {
6289        qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6290        log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6291    }
6292}
6293
6294static const TranslatorOps s390x_tr_ops = {
6295    .init_disas_context = s390x_tr_init_disas_context,
6296    .tb_start           = s390x_tr_tb_start,
6297    .insn_start         = s390x_tr_insn_start,
6298    .breakpoint_check   = s390x_tr_breakpoint_check,
6299    .translate_insn     = s390x_tr_translate_insn,
6300    .tb_stop            = s390x_tr_tb_stop,
6301    .disas_log          = s390x_tr_disas_log,
6302};
6303
6304void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
6305{
6306    DisasContext dc;
6307
6308    translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
6309}
6310
6311void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6312                          target_ulong *data)
6313{
6314    int cc_op = data[1];
6315    env->psw.addr = data[0];
6316    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6317        env->cc_op = cc_op;
6318    }
6319}
6320