qemu/target/s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg/tcg-op.h"
  37#include "tcg/tcg-op-gvec.h"
  38#include "qemu/log.h"
  39#include "qemu/host-utils.h"
  40#include "exec/cpu_ldst.h"
  41#include "exec/gen-icount.h"
  42#include "exec/helper-proto.h"
  43#include "exec/helper-gen.h"
  44
  45#include "trace-tcg.h"
  46#include "exec/translator.h"
  47#include "exec/log.h"
  48#include "qemu/atomic128.h"
  49
  50
  51/* Information that (most) every instruction needs to manipulate.  */
  52typedef struct DisasContext DisasContext;
  53typedef struct DisasInsn DisasInsn;
  54typedef struct DisasFields DisasFields;
  55
  56/*
  57 * Define a structure to hold the decoded fields.  We'll store each inside
  58 * an array indexed by an enum.  In order to conserve memory, we'll arrange
  59 * for fields that do not exist at the same time to overlap, thus the "C"
  60 * for compact.  For checking purposes there is an "O" for original index
  61 * as well that will be applied to availability bitmaps.
  62 */
  63
  64enum DisasFieldIndexO {
  65    FLD_O_r1,
  66    FLD_O_r2,
  67    FLD_O_r3,
  68    FLD_O_m1,
  69    FLD_O_m3,
  70    FLD_O_m4,
  71    FLD_O_m5,
  72    FLD_O_m6,
  73    FLD_O_b1,
  74    FLD_O_b2,
  75    FLD_O_b4,
  76    FLD_O_d1,
  77    FLD_O_d2,
  78    FLD_O_d4,
  79    FLD_O_x2,
  80    FLD_O_l1,
  81    FLD_O_l2,
  82    FLD_O_i1,
  83    FLD_O_i2,
  84    FLD_O_i3,
  85    FLD_O_i4,
  86    FLD_O_i5,
  87    FLD_O_v1,
  88    FLD_O_v2,
  89    FLD_O_v3,
  90    FLD_O_v4,
  91};
  92
  93enum DisasFieldIndexC {
  94    FLD_C_r1 = 0,
  95    FLD_C_m1 = 0,
  96    FLD_C_b1 = 0,
  97    FLD_C_i1 = 0,
  98    FLD_C_v1 = 0,
  99
 100    FLD_C_r2 = 1,
 101    FLD_C_b2 = 1,
 102    FLD_C_i2 = 1,
 103
 104    FLD_C_r3 = 2,
 105    FLD_C_m3 = 2,
 106    FLD_C_i3 = 2,
 107    FLD_C_v3 = 2,
 108
 109    FLD_C_m4 = 3,
 110    FLD_C_b4 = 3,
 111    FLD_C_i4 = 3,
 112    FLD_C_l1 = 3,
 113    FLD_C_v4 = 3,
 114
 115    FLD_C_i5 = 4,
 116    FLD_C_d1 = 4,
 117    FLD_C_m5 = 4,
 118
 119    FLD_C_d2 = 5,
 120    FLD_C_m6 = 5,
 121
 122    FLD_C_d4 = 6,
 123    FLD_C_x2 = 6,
 124    FLD_C_l2 = 6,
 125    FLD_C_v2 = 6,
 126
 127    NUM_C_FIELD = 7
 128};
 129
 130struct DisasFields {
 131    uint64_t raw_insn;
 132    unsigned op:8;
 133    unsigned op2:8;
 134    unsigned presentC:16;
 135    unsigned int presentO;
 136    int c[NUM_C_FIELD];
 137};
 138
 139struct DisasContext {
 140    DisasContextBase base;
 141    const DisasInsn *insn;
 142    DisasFields fields;
 143    uint64_t ex_value;
 144    /*
 145     * During translate_one(), pc_tmp is used to determine the instruction
 146     * to be executed after base.pc_next - e.g. next sequential instruction
 147     * or a branch target.
 148     */
 149    uint64_t pc_tmp;
 150    uint32_t ilen;
 151    enum cc_op cc_op;
 152    bool do_debug;
 153};
 154
 155/* Information carried about a condition to be evaluated.  */
 156typedef struct {
 157    TCGCond cond:8;
 158    bool is_64;
 159    bool g1;
 160    bool g2;
 161    union {
 162        struct { TCGv_i64 a, b; } s64;
 163        struct { TCGv_i32 a, b; } s32;
 164    } u;
 165} DisasCompare;
 166
 167#ifdef DEBUG_INLINE_BRANCHES
 168static uint64_t inline_branch_hit[CC_OP_MAX];
 169static uint64_t inline_branch_miss[CC_OP_MAX];
 170#endif
 171
 172static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
 173{
 174    TCGv_i64 tmp;
 175
 176    if (s->base.tb->flags & FLAG_MASK_32) {
 177        if (s->base.tb->flags & FLAG_MASK_64) {
 178            tcg_gen_movi_i64(out, pc);
 179            return;
 180        }
 181        pc |= 0x80000000;
 182    }
 183    assert(!(s->base.tb->flags & FLAG_MASK_64));
 184    tmp = tcg_const_i64(pc);
 185    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
 186    tcg_temp_free_i64(tmp);
 187}
 188
 189static TCGv_i64 psw_addr;
 190static TCGv_i64 psw_mask;
 191static TCGv_i64 gbea;
 192
 193static TCGv_i32 cc_op;
 194static TCGv_i64 cc_src;
 195static TCGv_i64 cc_dst;
 196static TCGv_i64 cc_vr;
 197
 198static char cpu_reg_names[16][4];
 199static TCGv_i64 regs[16];
 200
 201void s390x_translate_init(void)
 202{
 203    int i;
 204
 205    psw_addr = tcg_global_mem_new_i64(cpu_env,
 206                                      offsetof(CPUS390XState, psw.addr),
 207                                      "psw_addr");
 208    psw_mask = tcg_global_mem_new_i64(cpu_env,
 209                                      offsetof(CPUS390XState, psw.mask),
 210                                      "psw_mask");
 211    gbea = tcg_global_mem_new_i64(cpu_env,
 212                                  offsetof(CPUS390XState, gbea),
 213                                  "gbea");
 214
 215    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 216                                   "cc_op");
 217    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 218                                    "cc_src");
 219    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 220                                    "cc_dst");
 221    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 222                                   "cc_vr");
 223
 224    for (i = 0; i < 16; i++) {
 225        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 226        regs[i] = tcg_global_mem_new(cpu_env,
 227                                     offsetof(CPUS390XState, regs[i]),
 228                                     cpu_reg_names[i]);
 229    }
 230}
 231
 232static inline int vec_full_reg_offset(uint8_t reg)
 233{
 234    g_assert(reg < 32);
 235    return offsetof(CPUS390XState, vregs[reg][0]);
 236}
 237
 238static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
 239{
 240    /* Convert element size (es) - e.g. MO_8 - to bytes */
 241    const uint8_t bytes = 1 << es;
 242    int offs = enr * bytes;
 243
 244    /*
 245     * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
 246     * of the 16 byte vector, on both, little and big endian systems.
 247     *
 248     * Big Endian (target/possible host)
 249     * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
 250     * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
 251     * W:  [             0][             1] - [             2][             3]
 252     * DW: [                             0] - [                             1]
 253     *
 254     * Little Endian (possible host)
 255     * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
 256     * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
 257     * W:  [             1][             0] - [             3][             2]
 258     * DW: [                             0] - [                             1]
 259     *
 260     * For 16 byte elements, the two 8 byte halves will not form a host
 261     * int128 if the host is little endian, since they're in the wrong order.
 262     * Some operations (e.g. xor) do not care. For operations like addition,
 263     * the two 8 byte elements have to be loaded separately. Let's force all
 264     * 16 byte operations to handle it in a special way.
 265     */
 266    g_assert(es <= MO_64);
 267#ifndef HOST_WORDS_BIGENDIAN
 268    offs ^= (8 - bytes);
 269#endif
 270    return offs + vec_full_reg_offset(reg);
 271}
 272
 273static inline int freg64_offset(uint8_t reg)
 274{
 275    g_assert(reg < 16);
 276    return vec_reg_offset(reg, 0, MO_64);
 277}
 278
 279static inline int freg32_offset(uint8_t reg)
 280{
 281    g_assert(reg < 16);
 282    return vec_reg_offset(reg, 0, MO_32);
 283}
 284
 285static TCGv_i64 load_reg(int reg)
 286{
 287    TCGv_i64 r = tcg_temp_new_i64();
 288    tcg_gen_mov_i64(r, regs[reg]);
 289    return r;
 290}
 291
 292static TCGv_i64 load_freg(int reg)
 293{
 294    TCGv_i64 r = tcg_temp_new_i64();
 295
 296    tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
 297    return r;
 298}
 299
 300static TCGv_i64 load_freg32_i64(int reg)
 301{
 302    TCGv_i64 r = tcg_temp_new_i64();
 303
 304    tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
 305    return r;
 306}
 307
 308static void store_reg(int reg, TCGv_i64 v)
 309{
 310    tcg_gen_mov_i64(regs[reg], v);
 311}
 312
 313static void store_freg(int reg, TCGv_i64 v)
 314{
 315    tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
 316}
 317
 318static void store_reg32_i64(int reg, TCGv_i64 v)
 319{
 320    /* 32 bit register writes keep the upper half */
 321    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 322}
 323
 324static void store_reg32h_i64(int reg, TCGv_i64 v)
 325{
 326    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 327}
 328
 329static void store_freg32_i64(int reg, TCGv_i64 v)
 330{
 331    tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
 332}
 333
 334static void return_low128(TCGv_i64 dest)
 335{
 336    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 337}
 338
 339static void update_psw_addr(DisasContext *s)
 340{
 341    /* psw.addr */
 342    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
 343}
 344
 345static void per_branch(DisasContext *s, bool to_next)
 346{
 347#ifndef CONFIG_USER_ONLY
 348    tcg_gen_movi_i64(gbea, s->base.pc_next);
 349
 350    if (s->base.tb->flags & FLAG_MASK_PER) {
 351        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
 352        gen_helper_per_branch(cpu_env, gbea, next_pc);
 353        if (to_next) {
 354            tcg_temp_free_i64(next_pc);
 355        }
 356    }
 357#endif
 358}
 359
 360static void per_branch_cond(DisasContext *s, TCGCond cond,
 361                            TCGv_i64 arg1, TCGv_i64 arg2)
 362{
 363#ifndef CONFIG_USER_ONLY
 364    if (s->base.tb->flags & FLAG_MASK_PER) {
 365        TCGLabel *lab = gen_new_label();
 366        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 367
 368        tcg_gen_movi_i64(gbea, s->base.pc_next);
 369        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 370
 371        gen_set_label(lab);
 372    } else {
 373        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
 374        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 375        tcg_temp_free_i64(pc);
 376    }
 377#endif
 378}
 379
 380static void per_breaking_event(DisasContext *s)
 381{
 382    tcg_gen_movi_i64(gbea, s->base.pc_next);
 383}
 384
 385static void update_cc_op(DisasContext *s)
 386{
 387    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 388        tcg_gen_movi_i32(cc_op, s->cc_op);
 389    }
 390}
 391
 392static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 393{
 394    return (uint64_t)cpu_lduw_code(env, pc);
 395}
 396
 397static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 398{
 399    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 400}
 401
 402static int get_mem_index(DisasContext *s)
 403{
 404#ifdef CONFIG_USER_ONLY
 405    return MMU_USER_IDX;
 406#else
 407    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
 408        return MMU_REAL_IDX;
 409    }
 410
 411    switch (s->base.tb->flags & FLAG_MASK_ASC) {
 412    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 413        return MMU_PRIMARY_IDX;
 414    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 415        return MMU_SECONDARY_IDX;
 416    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 417        return MMU_HOME_IDX;
 418    default:
 419        tcg_abort();
 420        break;
 421    }
 422#endif
 423}
 424
 425static void gen_exception(int excp)
 426{
 427    TCGv_i32 tmp = tcg_const_i32(excp);
 428    gen_helper_exception(cpu_env, tmp);
 429    tcg_temp_free_i32(tmp);
 430}
 431
 432static void gen_program_exception(DisasContext *s, int code)
 433{
 434    TCGv_i32 tmp;
 435
 436    /* Remember what pgm exeption this was.  */
 437    tmp = tcg_const_i32(code);
 438    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 439    tcg_temp_free_i32(tmp);
 440
 441    tmp = tcg_const_i32(s->ilen);
 442    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 443    tcg_temp_free_i32(tmp);
 444
 445    /* update the psw */
 446    update_psw_addr(s);
 447
 448    /* Save off cc.  */
 449    update_cc_op(s);
 450
 451    /* Trigger exception.  */
 452    gen_exception(EXCP_PGM);
 453}
 454
 455static inline void gen_illegal_opcode(DisasContext *s)
 456{
 457    gen_program_exception(s, PGM_OPERATION);
 458}
 459
 460static inline void gen_data_exception(uint8_t dxc)
 461{
 462    TCGv_i32 tmp = tcg_const_i32(dxc);
 463    gen_helper_data_exception(cpu_env, tmp);
 464    tcg_temp_free_i32(tmp);
 465}
 466
 467static inline void gen_trap(DisasContext *s)
 468{
 469    /* Set DXC to 0xff */
 470    gen_data_exception(0xff);
 471}
 472
 473static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
 474                                  int64_t imm)
 475{
 476    tcg_gen_addi_i64(dst, src, imm);
 477    if (!(s->base.tb->flags & FLAG_MASK_64)) {
 478        if (s->base.tb->flags & FLAG_MASK_32) {
 479            tcg_gen_andi_i64(dst, dst, 0x7fffffff);
 480        } else {
 481            tcg_gen_andi_i64(dst, dst, 0x00ffffff);
 482        }
 483    }
 484}
 485
 486static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 487{
 488    TCGv_i64 tmp = tcg_temp_new_i64();
 489
 490    /*
 491     * Note that d2 is limited to 20 bits, signed.  If we crop negative
 492     * displacements early we create larger immedate addends.
 493     */
 494    if (b2 && x2) {
 495        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 496        gen_addi_and_wrap_i64(s, tmp, tmp, d2);
 497    } else if (b2) {
 498        gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
 499    } else if (x2) {
 500        gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
 501    } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
 502        if (s->base.tb->flags & FLAG_MASK_32) {
 503            tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
 504        } else {
 505            tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
 506        }
 507    } else {
 508        tcg_gen_movi_i64(tmp, d2);
 509    }
 510
 511    return tmp;
 512}
 513
 514static inline bool live_cc_data(DisasContext *s)
 515{
 516    return (s->cc_op != CC_OP_DYNAMIC
 517            && s->cc_op != CC_OP_STATIC
 518            && s->cc_op > 3);
 519}
 520
 521static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 522{
 523    if (live_cc_data(s)) {
 524        tcg_gen_discard_i64(cc_src);
 525        tcg_gen_discard_i64(cc_dst);
 526        tcg_gen_discard_i64(cc_vr);
 527    }
 528    s->cc_op = CC_OP_CONST0 + val;
 529}
 530
 531static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 532{
 533    if (live_cc_data(s)) {
 534        tcg_gen_discard_i64(cc_src);
 535        tcg_gen_discard_i64(cc_vr);
 536    }
 537    tcg_gen_mov_i64(cc_dst, dst);
 538    s->cc_op = op;
 539}
 540
 541static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 542                                  TCGv_i64 dst)
 543{
 544    if (live_cc_data(s)) {
 545        tcg_gen_discard_i64(cc_vr);
 546    }
 547    tcg_gen_mov_i64(cc_src, src);
 548    tcg_gen_mov_i64(cc_dst, dst);
 549    s->cc_op = op;
 550}
 551
 552static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 553                                  TCGv_i64 dst, TCGv_i64 vr)
 554{
 555    tcg_gen_mov_i64(cc_src, src);
 556    tcg_gen_mov_i64(cc_dst, dst);
 557    tcg_gen_mov_i64(cc_vr, vr);
 558    s->cc_op = op;
 559}
 560
 561static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 562{
 563    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 564}
 565
 566static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 567{
 568    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 569}
 570
 571static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 572{
 573    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 574}
 575
 576static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 577{
 578    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 579}
 580
 581/* CC value is in env->cc_op */
 582static void set_cc_static(DisasContext *s)
 583{
 584    if (live_cc_data(s)) {
 585        tcg_gen_discard_i64(cc_src);
 586        tcg_gen_discard_i64(cc_dst);
 587        tcg_gen_discard_i64(cc_vr);
 588    }
 589    s->cc_op = CC_OP_STATIC;
 590}
 591
 592/* calculates cc into cc_op */
 593static void gen_op_calc_cc(DisasContext *s)
 594{
 595    TCGv_i32 local_cc_op = NULL;
 596    TCGv_i64 dummy = NULL;
 597
 598    switch (s->cc_op) {
 599    default:
 600        dummy = tcg_const_i64(0);
 601        /* FALLTHRU */
 602    case CC_OP_ADD_64:
 603    case CC_OP_SUB_64:
 604    case CC_OP_ADD_32:
 605    case CC_OP_SUB_32:
 606        local_cc_op = tcg_const_i32(s->cc_op);
 607        break;
 608    case CC_OP_CONST0:
 609    case CC_OP_CONST1:
 610    case CC_OP_CONST2:
 611    case CC_OP_CONST3:
 612    case CC_OP_STATIC:
 613    case CC_OP_DYNAMIC:
 614        break;
 615    }
 616
 617    switch (s->cc_op) {
 618    case CC_OP_CONST0:
 619    case CC_OP_CONST1:
 620    case CC_OP_CONST2:
 621    case CC_OP_CONST3:
 622        /* s->cc_op is the cc value */
 623        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 624        break;
 625    case CC_OP_STATIC:
 626        /* env->cc_op already is the cc value */
 627        break;
 628    case CC_OP_NZ:
 629    case CC_OP_ABS_64:
 630    case CC_OP_NABS_64:
 631    case CC_OP_ABS_32:
 632    case CC_OP_NABS_32:
 633    case CC_OP_LTGT0_32:
 634    case CC_OP_LTGT0_64:
 635    case CC_OP_COMP_32:
 636    case CC_OP_COMP_64:
 637    case CC_OP_NZ_F32:
 638    case CC_OP_NZ_F64:
 639    case CC_OP_FLOGR:
 640    case CC_OP_LCBB:
 641    case CC_OP_MULS_32:
 642        /* 1 argument */
 643        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 644        break;
 645    case CC_OP_ADDU:
 646    case CC_OP_ICM:
 647    case CC_OP_LTGT_32:
 648    case CC_OP_LTGT_64:
 649    case CC_OP_LTUGTU_32:
 650    case CC_OP_LTUGTU_64:
 651    case CC_OP_TM_32:
 652    case CC_OP_TM_64:
 653    case CC_OP_SLA_32:
 654    case CC_OP_SLA_64:
 655    case CC_OP_SUBU:
 656    case CC_OP_NZ_F128:
 657    case CC_OP_VC:
 658    case CC_OP_MULS_64:
 659        /* 2 arguments */
 660        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 661        break;
 662    case CC_OP_ADD_64:
 663    case CC_OP_SUB_64:
 664    case CC_OP_ADD_32:
 665    case CC_OP_SUB_32:
 666        /* 3 arguments */
 667        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 668        break;
 669    case CC_OP_DYNAMIC:
 670        /* unknown operation - assume 3 arguments and cc_op in env */
 671        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 672        break;
 673    default:
 674        tcg_abort();
 675    }
 676
 677    if (local_cc_op) {
 678        tcg_temp_free_i32(local_cc_op);
 679    }
 680    if (dummy) {
 681        tcg_temp_free_i64(dummy);
 682    }
 683
 684    /* We now have cc in cc_op as constant */
 685    set_cc_static(s);
 686}
 687
 688static bool use_exit_tb(DisasContext *s)
 689{
 690    return s->base.singlestep_enabled ||
 691            (tb_cflags(s->base.tb) & CF_LAST_IO) ||
 692            (s->base.tb->flags & FLAG_MASK_PER);
 693}
 694
 695static bool use_goto_tb(DisasContext *s, uint64_t dest)
 696{
 697    if (unlikely(use_exit_tb(s))) {
 698        return false;
 699    }
 700#ifndef CONFIG_USER_ONLY
 701    return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
 702           (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
 703#else
 704    return true;
 705#endif
 706}
 707
 708static void account_noninline_branch(DisasContext *s, int cc_op)
 709{
 710#ifdef DEBUG_INLINE_BRANCHES
 711    inline_branch_miss[cc_op]++;
 712#endif
 713}
 714
 715static void account_inline_branch(DisasContext *s, int cc_op)
 716{
 717#ifdef DEBUG_INLINE_BRANCHES
 718    inline_branch_hit[cc_op]++;
 719#endif
 720}
 721
 722/* Table of mask values to comparison codes, given a comparison as input.
 723   For such, CC=3 should not be possible.  */
 724static const TCGCond ltgt_cond[16] = {
 725    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 726    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 727    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 728    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 729    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 730    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 731    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 732    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 733};
 734
 735/* Table of mask values to comparison codes, given a logic op as input.
 736   For such, only CC=0 and CC=1 should be possible.  */
 737static const TCGCond nz_cond[16] = {
 738    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 739    TCG_COND_NEVER, TCG_COND_NEVER,
 740    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 741    TCG_COND_NE, TCG_COND_NE,
 742    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 743    TCG_COND_EQ, TCG_COND_EQ,
 744    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 745    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 746};
 747
 748/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 749   details required to generate a TCG comparison.  */
 750static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 751{
 752    TCGCond cond;
 753    enum cc_op old_cc_op = s->cc_op;
 754
 755    if (mask == 15 || mask == 0) {
 756        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 757        c->u.s32.a = cc_op;
 758        c->u.s32.b = cc_op;
 759        c->g1 = c->g2 = true;
 760        c->is_64 = false;
 761        return;
 762    }
 763
 764    /* Find the TCG condition for the mask + cc op.  */
 765    switch (old_cc_op) {
 766    case CC_OP_LTGT0_32:
 767    case CC_OP_LTGT0_64:
 768    case CC_OP_LTGT_32:
 769    case CC_OP_LTGT_64:
 770        cond = ltgt_cond[mask];
 771        if (cond == TCG_COND_NEVER) {
 772            goto do_dynamic;
 773        }
 774        account_inline_branch(s, old_cc_op);
 775        break;
 776
 777    case CC_OP_LTUGTU_32:
 778    case CC_OP_LTUGTU_64:
 779        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 780        if (cond == TCG_COND_NEVER) {
 781            goto do_dynamic;
 782        }
 783        account_inline_branch(s, old_cc_op);
 784        break;
 785
 786    case CC_OP_NZ:
 787        cond = nz_cond[mask];
 788        if (cond == TCG_COND_NEVER) {
 789            goto do_dynamic;
 790        }
 791        account_inline_branch(s, old_cc_op);
 792        break;
 793
 794    case CC_OP_TM_32:
 795    case CC_OP_TM_64:
 796        switch (mask) {
 797        case 8:
 798            cond = TCG_COND_EQ;
 799            break;
 800        case 4 | 2 | 1:
 801            cond = TCG_COND_NE;
 802            break;
 803        default:
 804            goto do_dynamic;
 805        }
 806        account_inline_branch(s, old_cc_op);
 807        break;
 808
 809    case CC_OP_ICM:
 810        switch (mask) {
 811        case 8:
 812            cond = TCG_COND_EQ;
 813            break;
 814        case 4 | 2 | 1:
 815        case 4 | 2:
 816            cond = TCG_COND_NE;
 817            break;
 818        default:
 819            goto do_dynamic;
 820        }
 821        account_inline_branch(s, old_cc_op);
 822        break;
 823
 824    case CC_OP_FLOGR:
 825        switch (mask & 0xa) {
 826        case 8: /* src == 0 -> no one bit found */
 827            cond = TCG_COND_EQ;
 828            break;
 829        case 2: /* src != 0 -> one bit found */
 830            cond = TCG_COND_NE;
 831            break;
 832        default:
 833            goto do_dynamic;
 834        }
 835        account_inline_branch(s, old_cc_op);
 836        break;
 837
 838    case CC_OP_ADDU:
 839    case CC_OP_SUBU:
 840        switch (mask) {
 841        case 8 | 2: /* result == 0 */
 842            cond = TCG_COND_EQ;
 843            break;
 844        case 4 | 1: /* result != 0 */
 845            cond = TCG_COND_NE;
 846            break;
 847        case 8 | 4: /* !carry (borrow) */
 848            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
 849            break;
 850        case 2 | 1: /* carry (!borrow) */
 851            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
 852            break;
 853        default:
 854            goto do_dynamic;
 855        }
 856        account_inline_branch(s, old_cc_op);
 857        break;
 858
 859    default:
 860    do_dynamic:
 861        /* Calculate cc value.  */
 862        gen_op_calc_cc(s);
 863        /* FALLTHRU */
 864
 865    case CC_OP_STATIC:
 866        /* Jump based on CC.  We'll load up the real cond below;
 867           the assignment here merely avoids a compiler warning.  */
 868        account_noninline_branch(s, old_cc_op);
 869        old_cc_op = CC_OP_STATIC;
 870        cond = TCG_COND_NEVER;
 871        break;
 872    }
 873
 874    /* Load up the arguments of the comparison.  */
 875    c->is_64 = true;
 876    c->g1 = c->g2 = false;
 877    switch (old_cc_op) {
 878    case CC_OP_LTGT0_32:
 879        c->is_64 = false;
 880        c->u.s32.a = tcg_temp_new_i32();
 881        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 882        c->u.s32.b = tcg_const_i32(0);
 883        break;
 884    case CC_OP_LTGT_32:
 885    case CC_OP_LTUGTU_32:
 886        c->is_64 = false;
 887        c->u.s32.a = tcg_temp_new_i32();
 888        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 889        c->u.s32.b = tcg_temp_new_i32();
 890        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 891        break;
 892
 893    case CC_OP_LTGT0_64:
 894    case CC_OP_NZ:
 895    case CC_OP_FLOGR:
 896        c->u.s64.a = cc_dst;
 897        c->u.s64.b = tcg_const_i64(0);
 898        c->g1 = true;
 899        break;
 900    case CC_OP_LTGT_64:
 901    case CC_OP_LTUGTU_64:
 902        c->u.s64.a = cc_src;
 903        c->u.s64.b = cc_dst;
 904        c->g1 = c->g2 = true;
 905        break;
 906
 907    case CC_OP_TM_32:
 908    case CC_OP_TM_64:
 909    case CC_OP_ICM:
 910        c->u.s64.a = tcg_temp_new_i64();
 911        c->u.s64.b = tcg_const_i64(0);
 912        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 913        break;
 914
 915    case CC_OP_ADDU:
 916    case CC_OP_SUBU:
 917        c->is_64 = true;
 918        c->u.s64.b = tcg_const_i64(0);
 919        c->g1 = true;
 920        switch (mask) {
 921        case 8 | 2:
 922        case 4 | 1: /* result */
 923            c->u.s64.a = cc_dst;
 924            break;
 925        case 8 | 4:
 926        case 2 | 1: /* carry */
 927            c->u.s64.a = cc_src;
 928            break;
 929        default:
 930            g_assert_not_reached();
 931        }
 932        break;
 933
 934    case CC_OP_STATIC:
 935        c->is_64 = false;
 936        c->u.s32.a = cc_op;
 937        c->g1 = true;
 938        switch (mask) {
 939        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 940            cond = TCG_COND_NE;
 941            c->u.s32.b = tcg_const_i32(3);
 942            break;
 943        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 944            cond = TCG_COND_NE;
 945            c->u.s32.b = tcg_const_i32(2);
 946            break;
 947        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 948            cond = TCG_COND_NE;
 949            c->u.s32.b = tcg_const_i32(1);
 950            break;
 951        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 952            cond = TCG_COND_EQ;
 953            c->g1 = false;
 954            c->u.s32.a = tcg_temp_new_i32();
 955            c->u.s32.b = tcg_const_i32(0);
 956            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 957            break;
 958        case 0x8 | 0x4: /* cc < 2 */
 959            cond = TCG_COND_LTU;
 960            c->u.s32.b = tcg_const_i32(2);
 961            break;
 962        case 0x8: /* cc == 0 */
 963            cond = TCG_COND_EQ;
 964            c->u.s32.b = tcg_const_i32(0);
 965            break;
 966        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 967            cond = TCG_COND_NE;
 968            c->u.s32.b = tcg_const_i32(0);
 969            break;
 970        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 971            cond = TCG_COND_NE;
 972            c->g1 = false;
 973            c->u.s32.a = tcg_temp_new_i32();
 974            c->u.s32.b = tcg_const_i32(0);
 975            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 976            break;
 977        case 0x4: /* cc == 1 */
 978            cond = TCG_COND_EQ;
 979            c->u.s32.b = tcg_const_i32(1);
 980            break;
 981        case 0x2 | 0x1: /* cc > 1 */
 982            cond = TCG_COND_GTU;
 983            c->u.s32.b = tcg_const_i32(1);
 984            break;
 985        case 0x2: /* cc == 2 */
 986            cond = TCG_COND_EQ;
 987            c->u.s32.b = tcg_const_i32(2);
 988            break;
 989        case 0x1: /* cc == 3 */
 990            cond = TCG_COND_EQ;
 991            c->u.s32.b = tcg_const_i32(3);
 992            break;
 993        default:
 994            /* CC is masked by something else: (8 >> cc) & mask.  */
 995            cond = TCG_COND_NE;
 996            c->g1 = false;
 997            c->u.s32.a = tcg_const_i32(8);
 998            c->u.s32.b = tcg_const_i32(0);
 999            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
1000            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
1001            break;
1002        }
1003        break;
1004
1005    default:
1006        abort();
1007    }
1008    c->cond = cond;
1009}
1010
1011static void free_compare(DisasCompare *c)
1012{
1013    if (!c->g1) {
1014        if (c->is_64) {
1015            tcg_temp_free_i64(c->u.s64.a);
1016        } else {
1017            tcg_temp_free_i32(c->u.s32.a);
1018        }
1019    }
1020    if (!c->g2) {
1021        if (c->is_64) {
1022            tcg_temp_free_i64(c->u.s64.b);
1023        } else {
1024            tcg_temp_free_i32(c->u.s32.b);
1025        }
1026    }
1027}
1028
1029/* ====================================================================== */
1030/* Define the insn format enumeration.  */
1031#define F0(N)                         FMT_##N,
1032#define F1(N, X1)                     F0(N)
1033#define F2(N, X1, X2)                 F0(N)
1034#define F3(N, X1, X2, X3)             F0(N)
1035#define F4(N, X1, X2, X3, X4)         F0(N)
1036#define F5(N, X1, X2, X3, X4, X5)     F0(N)
1037#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1038
1039typedef enum {
1040#include "insn-format.def"
1041} DisasFormat;
1042
1043#undef F0
1044#undef F1
1045#undef F2
1046#undef F3
1047#undef F4
1048#undef F5
1049#undef F6
1050
1051/* This is the way fields are to be accessed out of DisasFields.  */
1052#define have_field(S, F)  have_field1((S), FLD_O_##F)
1053#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1054
1055static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1056{
1057    return (s->fields.presentO >> c) & 1;
1058}
1059
1060static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1061                      enum DisasFieldIndexC c)
1062{
1063    assert(have_field1(s, o));
1064    return s->fields.c[c];
1065}
1066
1067/* Describe the layout of each field in each format.  */
1068typedef struct DisasField {
1069    unsigned int beg:8;
1070    unsigned int size:8;
1071    unsigned int type:2;
1072    unsigned int indexC:6;
1073    enum DisasFieldIndexO indexO:8;
1074} DisasField;
1075
1076typedef struct DisasFormatInfo {
1077    DisasField op[NUM_C_FIELD];
1078} DisasFormatInfo;
1079
1080#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1081#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1082#define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1083#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1084                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1085#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1086                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1087                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1088#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1089                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1090#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1091                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1092                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1093#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1094#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1095
1096#define F0(N)                     { { } },
1097#define F1(N, X1)                 { { X1 } },
1098#define F2(N, X1, X2)             { { X1, X2 } },
1099#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1100#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1101#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1102#define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1103
1104static const DisasFormatInfo format_info[] = {
1105#include "insn-format.def"
1106};
1107
1108#undef F0
1109#undef F1
1110#undef F2
1111#undef F3
1112#undef F4
1113#undef F5
1114#undef F6
1115#undef R
1116#undef M
1117#undef V
1118#undef BD
1119#undef BXD
1120#undef BDL
1121#undef BXDL
1122#undef I
1123#undef L
1124
1125/* Generally, we'll extract operands into this structures, operate upon
1126   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1127   of routines below for more details.  */
1128typedef struct {
1129    bool g_out, g_out2, g_in1, g_in2;
1130    TCGv_i64 out, out2, in1, in2;
1131    TCGv_i64 addr1;
1132} DisasOps;
1133
1134/* Instructions can place constraints on their operands, raising specification
1135   exceptions if they are violated.  To make this easy to automate, each "in1",
1136   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1137   of the following, or 0.  To make this easy to document, we'll put the
1138   SPEC_<name> defines next to <name>.  */
1139
1140#define SPEC_r1_even    1
1141#define SPEC_r2_even    2
1142#define SPEC_r3_even    4
1143#define SPEC_r1_f128    8
1144#define SPEC_r2_f128    16
1145
1146/* Return values from translate_one, indicating the state of the TB.  */
1147
1148/* We are not using a goto_tb (for whatever reason), but have updated
1149   the PC (for whatever reason), so there's no need to do it again on
1150   exiting the TB.  */
1151#define DISAS_PC_UPDATED        DISAS_TARGET_0
1152
1153/* We have emitted one or more goto_tb.  No fixup required.  */
1154#define DISAS_GOTO_TB           DISAS_TARGET_1
1155
1156/* We have updated the PC and CC values.  */
1157#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1158
1159/* We are exiting the TB, but have neither emitted a goto_tb, nor
1160   updated the PC for the next instruction to be executed.  */
1161#define DISAS_PC_STALE          DISAS_TARGET_3
1162
1163/* We are exiting the TB to the main loop.  */
1164#define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1165
1166
1167/* Instruction flags */
1168#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1169#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1170#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1171#define IF_BFP      0x0008      /* binary floating point instruction */
1172#define IF_DFP      0x0010      /* decimal floating point instruction */
1173#define IF_PRIV     0x0020      /* privileged instruction */
1174#define IF_VEC      0x0040      /* vector instruction */
1175#define IF_IO       0x0080      /* input/output instruction */
1176
1177struct DisasInsn {
1178    unsigned opc:16;
1179    unsigned flags:16;
1180    DisasFormat fmt:8;
1181    unsigned fac:8;
1182    unsigned spec:8;
1183
1184    const char *name;
1185
1186    /* Pre-process arguments before HELP_OP.  */
1187    void (*help_in1)(DisasContext *, DisasOps *);
1188    void (*help_in2)(DisasContext *, DisasOps *);
1189    void (*help_prep)(DisasContext *, DisasOps *);
1190
1191    /*
1192     * Post-process output after HELP_OP.
1193     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1194     */
1195    void (*help_wout)(DisasContext *, DisasOps *);
1196    void (*help_cout)(DisasContext *, DisasOps *);
1197
1198    /* Implement the operation itself.  */
1199    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1200
1201    uint64_t data;
1202};
1203
1204/* ====================================================================== */
1205/* Miscellaneous helpers, used by several operations.  */
1206
1207static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1208{
1209    int b2 = get_field(s, b2);
1210    int d2 = get_field(s, d2);
1211
1212    if (b2 == 0) {
1213        o->in2 = tcg_const_i64(d2 & mask);
1214    } else {
1215        o->in2 = get_address(s, 0, b2, d2);
1216        tcg_gen_andi_i64(o->in2, o->in2, mask);
1217    }
1218}
1219
1220static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1221{
1222    if (dest == s->pc_tmp) {
1223        per_branch(s, true);
1224        return DISAS_NEXT;
1225    }
1226    if (use_goto_tb(s, dest)) {
1227        update_cc_op(s);
1228        per_breaking_event(s);
1229        tcg_gen_goto_tb(0);
1230        tcg_gen_movi_i64(psw_addr, dest);
1231        tcg_gen_exit_tb(s->base.tb, 0);
1232        return DISAS_GOTO_TB;
1233    } else {
1234        tcg_gen_movi_i64(psw_addr, dest);
1235        per_branch(s, false);
1236        return DISAS_PC_UPDATED;
1237    }
1238}
1239
1240static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1241                                 bool is_imm, int imm, TCGv_i64 cdest)
1242{
1243    DisasJumpType ret;
1244    uint64_t dest = s->base.pc_next + 2 * imm;
1245    TCGLabel *lab;
1246
1247    /* Take care of the special cases first.  */
1248    if (c->cond == TCG_COND_NEVER) {
1249        ret = DISAS_NEXT;
1250        goto egress;
1251    }
1252    if (is_imm) {
1253        if (dest == s->pc_tmp) {
1254            /* Branch to next.  */
1255            per_branch(s, true);
1256            ret = DISAS_NEXT;
1257            goto egress;
1258        }
1259        if (c->cond == TCG_COND_ALWAYS) {
1260            ret = help_goto_direct(s, dest);
1261            goto egress;
1262        }
1263    } else {
1264        if (!cdest) {
1265            /* E.g. bcr %r0 -> no branch.  */
1266            ret = DISAS_NEXT;
1267            goto egress;
1268        }
1269        if (c->cond == TCG_COND_ALWAYS) {
1270            tcg_gen_mov_i64(psw_addr, cdest);
1271            per_branch(s, false);
1272            ret = DISAS_PC_UPDATED;
1273            goto egress;
1274        }
1275    }
1276
1277    if (use_goto_tb(s, s->pc_tmp)) {
1278        if (is_imm && use_goto_tb(s, dest)) {
1279            /* Both exits can use goto_tb.  */
1280            update_cc_op(s);
1281
1282            lab = gen_new_label();
1283            if (c->is_64) {
1284                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1285            } else {
1286                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1287            }
1288
1289            /* Branch not taken.  */
1290            tcg_gen_goto_tb(0);
1291            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1292            tcg_gen_exit_tb(s->base.tb, 0);
1293
1294            /* Branch taken.  */
1295            gen_set_label(lab);
1296            per_breaking_event(s);
1297            tcg_gen_goto_tb(1);
1298            tcg_gen_movi_i64(psw_addr, dest);
1299            tcg_gen_exit_tb(s->base.tb, 1);
1300
1301            ret = DISAS_GOTO_TB;
1302        } else {
1303            /* Fallthru can use goto_tb, but taken branch cannot.  */
1304            /* Store taken branch destination before the brcond.  This
1305               avoids having to allocate a new local temp to hold it.
1306               We'll overwrite this in the not taken case anyway.  */
1307            if (!is_imm) {
1308                tcg_gen_mov_i64(psw_addr, cdest);
1309            }
1310
1311            lab = gen_new_label();
1312            if (c->is_64) {
1313                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1314            } else {
1315                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1316            }
1317
1318            /* Branch not taken.  */
1319            update_cc_op(s);
1320            tcg_gen_goto_tb(0);
1321            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1322            tcg_gen_exit_tb(s->base.tb, 0);
1323
1324            gen_set_label(lab);
1325            if (is_imm) {
1326                tcg_gen_movi_i64(psw_addr, dest);
1327            }
1328            per_breaking_event(s);
1329            ret = DISAS_PC_UPDATED;
1330        }
1331    } else {
1332        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1333           Most commonly we're single-stepping or some other condition that
1334           disables all use of goto_tb.  Just update the PC and exit.  */
1335
1336        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1337        if (is_imm) {
1338            cdest = tcg_const_i64(dest);
1339        }
1340
1341        if (c->is_64) {
1342            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1343                                cdest, next);
1344            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1345        } else {
1346            TCGv_i32 t0 = tcg_temp_new_i32();
1347            TCGv_i64 t1 = tcg_temp_new_i64();
1348            TCGv_i64 z = tcg_const_i64(0);
1349            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1350            tcg_gen_extu_i32_i64(t1, t0);
1351            tcg_temp_free_i32(t0);
1352            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1353            per_branch_cond(s, TCG_COND_NE, t1, z);
1354            tcg_temp_free_i64(t1);
1355            tcg_temp_free_i64(z);
1356        }
1357
1358        if (is_imm) {
1359            tcg_temp_free_i64(cdest);
1360        }
1361        tcg_temp_free_i64(next);
1362
1363        ret = DISAS_PC_UPDATED;
1364    }
1365
1366 egress:
1367    free_compare(c);
1368    return ret;
1369}
1370
1371/* ====================================================================== */
1372/* The operations.  These perform the bulk of the work for any insn,
1373   usually after the operands have been loaded and output initialized.  */
1374
1375static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1376{
1377    tcg_gen_abs_i64(o->out, o->in2);
1378    return DISAS_NEXT;
1379}
1380
1381static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1382{
1383    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1384    return DISAS_NEXT;
1385}
1386
1387static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1388{
1389    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1390    return DISAS_NEXT;
1391}
1392
1393static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1394{
1395    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1396    tcg_gen_mov_i64(o->out2, o->in2);
1397    return DISAS_NEXT;
1398}
1399
1400static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1401{
1402    tcg_gen_add_i64(o->out, o->in1, o->in2);
1403    return DISAS_NEXT;
1404}
1405
1406static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1407{
1408    tcg_gen_movi_i64(cc_src, 0);
1409    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1410    return DISAS_NEXT;
1411}
1412
1413/* Compute carry into cc_src. */
1414static void compute_carry(DisasContext *s)
1415{
1416    switch (s->cc_op) {
1417    case CC_OP_ADDU:
1418        /* The carry value is already in cc_src (1,0). */
1419        break;
1420    case CC_OP_SUBU:
1421        tcg_gen_addi_i64(cc_src, cc_src, 1);
1422        break;
1423    default:
1424        gen_op_calc_cc(s);
1425        /* fall through */
1426    case CC_OP_STATIC:
1427        /* The carry flag is the msb of CC; compute into cc_src. */
1428        tcg_gen_extu_i32_i64(cc_src, cc_op);
1429        tcg_gen_shri_i64(cc_src, cc_src, 1);
1430        break;
1431    }
1432}
1433
1434static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1435{
1436    compute_carry(s);
1437    tcg_gen_add_i64(o->out, o->in1, o->in2);
1438    tcg_gen_add_i64(o->out, o->out, cc_src);
1439    return DISAS_NEXT;
1440}
1441
1442static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1443{
1444    compute_carry(s);
1445
1446    TCGv_i64 zero = tcg_const_i64(0);
1447    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1448    tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1449    tcg_temp_free_i64(zero);
1450
1451    return DISAS_NEXT;
1452}
1453
1454static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1455{
1456    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1457
1458    o->in1 = tcg_temp_new_i64();
1459    if (non_atomic) {
1460        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1461    } else {
1462        /* Perform the atomic addition in memory. */
1463        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1464                                     s->insn->data);
1465    }
1466
1467    /* Recompute also for atomic case: needed for setting CC. */
1468    tcg_gen_add_i64(o->out, o->in1, o->in2);
1469
1470    if (non_atomic) {
1471        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1472    }
1473    return DISAS_NEXT;
1474}
1475
1476static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1477{
1478    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1479
1480    o->in1 = tcg_temp_new_i64();
1481    if (non_atomic) {
1482        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1483    } else {
1484        /* Perform the atomic addition in memory. */
1485        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1486                                     s->insn->data);
1487    }
1488
1489    /* Recompute also for atomic case: needed for setting CC. */
1490    tcg_gen_movi_i64(cc_src, 0);
1491    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1492
1493    if (non_atomic) {
1494        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1495    }
1496    return DISAS_NEXT;
1497}
1498
1499static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1500{
1501    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1502    return DISAS_NEXT;
1503}
1504
1505static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1506{
1507    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1508    return DISAS_NEXT;
1509}
1510
1511static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1512{
1513    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1514    return_low128(o->out2);
1515    return DISAS_NEXT;
1516}
1517
1518static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1519{
1520    tcg_gen_and_i64(o->out, o->in1, o->in2);
1521    return DISAS_NEXT;
1522}
1523
1524static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1525{
1526    int shift = s->insn->data & 0xff;
1527    int size = s->insn->data >> 8;
1528    uint64_t mask = ((1ull << size) - 1) << shift;
1529
1530    assert(!o->g_in2);
1531    tcg_gen_shli_i64(o->in2, o->in2, shift);
1532    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1533    tcg_gen_and_i64(o->out, o->in1, o->in2);
1534
1535    /* Produce the CC from only the bits manipulated.  */
1536    tcg_gen_andi_i64(cc_dst, o->out, mask);
1537    set_cc_nz_u64(s, cc_dst);
1538    return DISAS_NEXT;
1539}
1540
1541static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1542{
1543    o->in1 = tcg_temp_new_i64();
1544
1545    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1546        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1547    } else {
1548        /* Perform the atomic operation in memory. */
1549        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1550                                     s->insn->data);
1551    }
1552
1553    /* Recompute also for atomic case: needed for setting CC. */
1554    tcg_gen_and_i64(o->out, o->in1, o->in2);
1555
1556    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1557        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1558    }
1559    return DISAS_NEXT;
1560}
1561
1562static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1563{
1564    pc_to_link_info(o->out, s, s->pc_tmp);
1565    if (o->in2) {
1566        tcg_gen_mov_i64(psw_addr, o->in2);
1567        per_branch(s, false);
1568        return DISAS_PC_UPDATED;
1569    } else {
1570        return DISAS_NEXT;
1571    }
1572}
1573
1574static void save_link_info(DisasContext *s, DisasOps *o)
1575{
1576    TCGv_i64 t;
1577
1578    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1579        pc_to_link_info(o->out, s, s->pc_tmp);
1580        return;
1581    }
1582    gen_op_calc_cc(s);
1583    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1584    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1585    t = tcg_temp_new_i64();
1586    tcg_gen_shri_i64(t, psw_mask, 16);
1587    tcg_gen_andi_i64(t, t, 0x0f000000);
1588    tcg_gen_or_i64(o->out, o->out, t);
1589    tcg_gen_extu_i32_i64(t, cc_op);
1590    tcg_gen_shli_i64(t, t, 28);
1591    tcg_gen_or_i64(o->out, o->out, t);
1592    tcg_temp_free_i64(t);
1593}
1594
1595static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1596{
1597    save_link_info(s, o);
1598    if (o->in2) {
1599        tcg_gen_mov_i64(psw_addr, o->in2);
1600        per_branch(s, false);
1601        return DISAS_PC_UPDATED;
1602    } else {
1603        return DISAS_NEXT;
1604    }
1605}
1606
1607static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1608{
1609    pc_to_link_info(o->out, s, s->pc_tmp);
1610    return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1611}
1612
1613static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1614{
1615    int m1 = get_field(s, m1);
1616    bool is_imm = have_field(s, i2);
1617    int imm = is_imm ? get_field(s, i2) : 0;
1618    DisasCompare c;
1619
1620    /* BCR with R2 = 0 causes no branching */
1621    if (have_field(s, r2) && get_field(s, r2) == 0) {
1622        if (m1 == 14) {
1623            /* Perform serialization */
1624            /* FIXME: check for fast-BCR-serialization facility */
1625            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1626        }
1627        if (m1 == 15) {
1628            /* Perform serialization */
1629            /* FIXME: perform checkpoint-synchronisation */
1630            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1631        }
1632        return DISAS_NEXT;
1633    }
1634
1635    disas_jcc(s, &c, m1);
1636    return help_branch(s, &c, is_imm, imm, o->in2);
1637}
1638
1639static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1640{
1641    int r1 = get_field(s, r1);
1642    bool is_imm = have_field(s, i2);
1643    int imm = is_imm ? get_field(s, i2) : 0;
1644    DisasCompare c;
1645    TCGv_i64 t;
1646
1647    c.cond = TCG_COND_NE;
1648    c.is_64 = false;
1649    c.g1 = false;
1650    c.g2 = false;
1651
1652    t = tcg_temp_new_i64();
1653    tcg_gen_subi_i64(t, regs[r1], 1);
1654    store_reg32_i64(r1, t);
1655    c.u.s32.a = tcg_temp_new_i32();
1656    c.u.s32.b = tcg_const_i32(0);
1657    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1658    tcg_temp_free_i64(t);
1659
1660    return help_branch(s, &c, is_imm, imm, o->in2);
1661}
1662
1663static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1664{
1665    int r1 = get_field(s, r1);
1666    int imm = get_field(s, i2);
1667    DisasCompare c;
1668    TCGv_i64 t;
1669
1670    c.cond = TCG_COND_NE;
1671    c.is_64 = false;
1672    c.g1 = false;
1673    c.g2 = false;
1674
1675    t = tcg_temp_new_i64();
1676    tcg_gen_shri_i64(t, regs[r1], 32);
1677    tcg_gen_subi_i64(t, t, 1);
1678    store_reg32h_i64(r1, t);
1679    c.u.s32.a = tcg_temp_new_i32();
1680    c.u.s32.b = tcg_const_i32(0);
1681    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1682    tcg_temp_free_i64(t);
1683
1684    return help_branch(s, &c, 1, imm, o->in2);
1685}
1686
1687static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1688{
1689    int r1 = get_field(s, r1);
1690    bool is_imm = have_field(s, i2);
1691    int imm = is_imm ? get_field(s, i2) : 0;
1692    DisasCompare c;
1693
1694    c.cond = TCG_COND_NE;
1695    c.is_64 = true;
1696    c.g1 = true;
1697    c.g2 = false;
1698
1699    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1700    c.u.s64.a = regs[r1];
1701    c.u.s64.b = tcg_const_i64(0);
1702
1703    return help_branch(s, &c, is_imm, imm, o->in2);
1704}
1705
1706static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1707{
1708    int r1 = get_field(s, r1);
1709    int r3 = get_field(s, r3);
1710    bool is_imm = have_field(s, i2);
1711    int imm = is_imm ? get_field(s, i2) : 0;
1712    DisasCompare c;
1713    TCGv_i64 t;
1714
1715    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1716    c.is_64 = false;
1717    c.g1 = false;
1718    c.g2 = false;
1719
1720    t = tcg_temp_new_i64();
1721    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1722    c.u.s32.a = tcg_temp_new_i32();
1723    c.u.s32.b = tcg_temp_new_i32();
1724    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1725    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1726    store_reg32_i64(r1, t);
1727    tcg_temp_free_i64(t);
1728
1729    return help_branch(s, &c, is_imm, imm, o->in2);
1730}
1731
1732static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1733{
1734    int r1 = get_field(s, r1);
1735    int r3 = get_field(s, r3);
1736    bool is_imm = have_field(s, i2);
1737    int imm = is_imm ? get_field(s, i2) : 0;
1738    DisasCompare c;
1739
1740    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1741    c.is_64 = true;
1742
1743    if (r1 == (r3 | 1)) {
1744        c.u.s64.b = load_reg(r3 | 1);
1745        c.g2 = false;
1746    } else {
1747        c.u.s64.b = regs[r3 | 1];
1748        c.g2 = true;
1749    }
1750
1751    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1752    c.u.s64.a = regs[r1];
1753    c.g1 = true;
1754
1755    return help_branch(s, &c, is_imm, imm, o->in2);
1756}
1757
1758static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1759{
1760    int imm, m3 = get_field(s, m3);
1761    bool is_imm;
1762    DisasCompare c;
1763
1764    c.cond = ltgt_cond[m3];
1765    if (s->insn->data) {
1766        c.cond = tcg_unsigned_cond(c.cond);
1767    }
1768    c.is_64 = c.g1 = c.g2 = true;
1769    c.u.s64.a = o->in1;
1770    c.u.s64.b = o->in2;
1771
1772    is_imm = have_field(s, i4);
1773    if (is_imm) {
1774        imm = get_field(s, i4);
1775    } else {
1776        imm = 0;
1777        o->out = get_address(s, 0, get_field(s, b4),
1778                             get_field(s, d4));
1779    }
1780
1781    return help_branch(s, &c, is_imm, imm, o->out);
1782}
1783
1784static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1785{
1786    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1787    set_cc_static(s);
1788    return DISAS_NEXT;
1789}
1790
1791static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1792{
1793    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1794    set_cc_static(s);
1795    return DISAS_NEXT;
1796}
1797
1798static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1799{
1800    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1801    set_cc_static(s);
1802    return DISAS_NEXT;
1803}
1804
1805static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1806                                   bool m4_with_fpe)
1807{
1808    const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1809    uint8_t m3 = get_field(s, m3);
1810    uint8_t m4 = get_field(s, m4);
1811
1812    /* m3 field was introduced with FPE */
1813    if (!fpe && m3_with_fpe) {
1814        m3 = 0;
1815    }
1816    /* m4 field was introduced with FPE */
1817    if (!fpe && m4_with_fpe) {
1818        m4 = 0;
1819    }
1820
1821    /* Check for valid rounding modes. Mode 3 was introduced later. */
1822    if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1823        gen_program_exception(s, PGM_SPECIFICATION);
1824        return NULL;
1825    }
1826
1827    return tcg_const_i32(deposit32(m3, 4, 4, m4));
1828}
1829
1830static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1831{
1832    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1833
1834    if (!m34) {
1835        return DISAS_NORETURN;
1836    }
1837    gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1838    tcg_temp_free_i32(m34);
1839    gen_set_cc_nz_f32(s, o->in2);
1840    return DISAS_NEXT;
1841}
1842
1843static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1844{
1845    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1846
1847    if (!m34) {
1848        return DISAS_NORETURN;
1849    }
1850    gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1851    tcg_temp_free_i32(m34);
1852    gen_set_cc_nz_f64(s, o->in2);
1853    return DISAS_NEXT;
1854}
1855
1856static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1857{
1858    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1859
1860    if (!m34) {
1861        return DISAS_NORETURN;
1862    }
1863    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1864    tcg_temp_free_i32(m34);
1865    gen_set_cc_nz_f128(s, o->in1, o->in2);
1866    return DISAS_NEXT;
1867}
1868
1869static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1870{
1871    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1872
1873    if (!m34) {
1874        return DISAS_NORETURN;
1875    }
1876    gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1877    tcg_temp_free_i32(m34);
1878    gen_set_cc_nz_f32(s, o->in2);
1879    return DISAS_NEXT;
1880}
1881
1882static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1883{
1884    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1885
1886    if (!m34) {
1887        return DISAS_NORETURN;
1888    }
1889    gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1890    tcg_temp_free_i32(m34);
1891    gen_set_cc_nz_f64(s, o->in2);
1892    return DISAS_NEXT;
1893}
1894
1895static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1896{
1897    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1898
1899    if (!m34) {
1900        return DISAS_NORETURN;
1901    }
1902    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1903    tcg_temp_free_i32(m34);
1904    gen_set_cc_nz_f128(s, o->in1, o->in2);
1905    return DISAS_NEXT;
1906}
1907
1908static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1909{
1910    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1911
1912    if (!m34) {
1913        return DISAS_NORETURN;
1914    }
1915    gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1916    tcg_temp_free_i32(m34);
1917    gen_set_cc_nz_f32(s, o->in2);
1918    return DISAS_NEXT;
1919}
1920
1921static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1922{
1923    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1924
1925    if (!m34) {
1926        return DISAS_NORETURN;
1927    }
1928    gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1929    tcg_temp_free_i32(m34);
1930    gen_set_cc_nz_f64(s, o->in2);
1931    return DISAS_NEXT;
1932}
1933
1934static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1935{
1936    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1937
1938    if (!m34) {
1939        return DISAS_NORETURN;
1940    }
1941    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1942    tcg_temp_free_i32(m34);
1943    gen_set_cc_nz_f128(s, o->in1, o->in2);
1944    return DISAS_NEXT;
1945}
1946
1947static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1948{
1949    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1950
1951    if (!m34) {
1952        return DISAS_NORETURN;
1953    }
1954    gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1955    tcg_temp_free_i32(m34);
1956    gen_set_cc_nz_f32(s, o->in2);
1957    return DISAS_NEXT;
1958}
1959
1960static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1961{
1962    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1963
1964    if (!m34) {
1965        return DISAS_NORETURN;
1966    }
1967    gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1968    tcg_temp_free_i32(m34);
1969    gen_set_cc_nz_f64(s, o->in2);
1970    return DISAS_NEXT;
1971}
1972
1973static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1974{
1975    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1976
1977    if (!m34) {
1978        return DISAS_NORETURN;
1979    }
1980    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1981    tcg_temp_free_i32(m34);
1982    gen_set_cc_nz_f128(s, o->in1, o->in2);
1983    return DISAS_NEXT;
1984}
1985
1986static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1987{
1988    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1989
1990    if (!m34) {
1991        return DISAS_NORETURN;
1992    }
1993    gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1994    tcg_temp_free_i32(m34);
1995    return DISAS_NEXT;
1996}
1997
1998static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1999{
2000    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2001
2002    if (!m34) {
2003        return DISAS_NORETURN;
2004    }
2005    gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2006    tcg_temp_free_i32(m34);
2007    return DISAS_NEXT;
2008}
2009
2010static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2011{
2012    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2013
2014    if (!m34) {
2015        return DISAS_NORETURN;
2016    }
2017    gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2018    tcg_temp_free_i32(m34);
2019    return_low128(o->out2);
2020    return DISAS_NEXT;
2021}
2022
2023static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2024{
2025    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2026
2027    if (!m34) {
2028        return DISAS_NORETURN;
2029    }
2030    gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2031    tcg_temp_free_i32(m34);
2032    return DISAS_NEXT;
2033}
2034
2035static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2036{
2037    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2038
2039    if (!m34) {
2040        return DISAS_NORETURN;
2041    }
2042    gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2043    tcg_temp_free_i32(m34);
2044    return DISAS_NEXT;
2045}
2046
2047static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2048{
2049    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2050
2051    if (!m34) {
2052        return DISAS_NORETURN;
2053    }
2054    gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2055    tcg_temp_free_i32(m34);
2056    return_low128(o->out2);
2057    return DISAS_NEXT;
2058}
2059
2060static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2061{
2062    int r2 = get_field(s, r2);
2063    TCGv_i64 len = tcg_temp_new_i64();
2064
2065    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2066    set_cc_static(s);
2067    return_low128(o->out);
2068
2069    tcg_gen_add_i64(regs[r2], regs[r2], len);
2070    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2071    tcg_temp_free_i64(len);
2072
2073    return DISAS_NEXT;
2074}
2075
2076static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2077{
2078    int l = get_field(s, l1);
2079    TCGv_i32 vl;
2080
2081    switch (l + 1) {
2082    case 1:
2083        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2084        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2085        break;
2086    case 2:
2087        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2088        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2089        break;
2090    case 4:
2091        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2092        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2093        break;
2094    case 8:
2095        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2096        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2097        break;
2098    default:
2099        vl = tcg_const_i32(l);
2100        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2101        tcg_temp_free_i32(vl);
2102        set_cc_static(s);
2103        return DISAS_NEXT;
2104    }
2105    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2106    return DISAS_NEXT;
2107}
2108
2109static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2110{
2111    int r1 = get_field(s, r1);
2112    int r2 = get_field(s, r2);
2113    TCGv_i32 t1, t2;
2114
2115    /* r1 and r2 must be even.  */
2116    if (r1 & 1 || r2 & 1) {
2117        gen_program_exception(s, PGM_SPECIFICATION);
2118        return DISAS_NORETURN;
2119    }
2120
2121    t1 = tcg_const_i32(r1);
2122    t2 = tcg_const_i32(r2);
2123    gen_helper_clcl(cc_op, cpu_env, t1, t2);
2124    tcg_temp_free_i32(t1);
2125    tcg_temp_free_i32(t2);
2126    set_cc_static(s);
2127    return DISAS_NEXT;
2128}
2129
2130static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2131{
2132    int r1 = get_field(s, r1);
2133    int r3 = get_field(s, r3);
2134    TCGv_i32 t1, t3;
2135
2136    /* r1 and r3 must be even.  */
2137    if (r1 & 1 || r3 & 1) {
2138        gen_program_exception(s, PGM_SPECIFICATION);
2139        return DISAS_NORETURN;
2140    }
2141
2142    t1 = tcg_const_i32(r1);
2143    t3 = tcg_const_i32(r3);
2144    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2145    tcg_temp_free_i32(t1);
2146    tcg_temp_free_i32(t3);
2147    set_cc_static(s);
2148    return DISAS_NEXT;
2149}
2150
2151static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2152{
2153    int r1 = get_field(s, r1);
2154    int r3 = get_field(s, r3);
2155    TCGv_i32 t1, t3;
2156
2157    /* r1 and r3 must be even.  */
2158    if (r1 & 1 || r3 & 1) {
2159        gen_program_exception(s, PGM_SPECIFICATION);
2160        return DISAS_NORETURN;
2161    }
2162
2163    t1 = tcg_const_i32(r1);
2164    t3 = tcg_const_i32(r3);
2165    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2166    tcg_temp_free_i32(t1);
2167    tcg_temp_free_i32(t3);
2168    set_cc_static(s);
2169    return DISAS_NEXT;
2170}
2171
2172static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2173{
2174    TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2175    TCGv_i32 t1 = tcg_temp_new_i32();
2176    tcg_gen_extrl_i64_i32(t1, o->in1);
2177    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2178    set_cc_static(s);
2179    tcg_temp_free_i32(t1);
2180    tcg_temp_free_i32(m3);
2181    return DISAS_NEXT;
2182}
2183
2184static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2185{
2186    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2187    set_cc_static(s);
2188    return_low128(o->in2);
2189    return DISAS_NEXT;
2190}
2191
2192static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2193{
2194    TCGv_i64 t = tcg_temp_new_i64();
2195    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2196    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2197    tcg_gen_or_i64(o->out, o->out, t);
2198    tcg_temp_free_i64(t);
2199    return DISAS_NEXT;
2200}
2201
2202static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2203{
2204    int d2 = get_field(s, d2);
2205    int b2 = get_field(s, b2);
2206    TCGv_i64 addr, cc;
2207
2208    /* Note that in1 = R3 (new value) and
2209       in2 = (zero-extended) R1 (expected value).  */
2210
2211    addr = get_address(s, 0, b2, d2);
2212    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2213                               get_mem_index(s), s->insn->data | MO_ALIGN);
2214    tcg_temp_free_i64(addr);
2215
2216    /* Are the memory and expected values (un)equal?  Note that this setcond
2217       produces the output CC value, thus the NE sense of the test.  */
2218    cc = tcg_temp_new_i64();
2219    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2220    tcg_gen_extrl_i64_i32(cc_op, cc);
2221    tcg_temp_free_i64(cc);
2222    set_cc_static(s);
2223
2224    return DISAS_NEXT;
2225}
2226
2227static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2228{
2229    int r1 = get_field(s, r1);
2230    int r3 = get_field(s, r3);
2231    int d2 = get_field(s, d2);
2232    int b2 = get_field(s, b2);
2233    DisasJumpType ret = DISAS_NEXT;
2234    TCGv_i64 addr;
2235    TCGv_i32 t_r1, t_r3;
2236
2237    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2238    addr = get_address(s, 0, b2, d2);
2239    t_r1 = tcg_const_i32(r1);
2240    t_r3 = tcg_const_i32(r3);
2241    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2242        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2243    } else if (HAVE_CMPXCHG128) {
2244        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2245    } else {
2246        gen_helper_exit_atomic(cpu_env);
2247        ret = DISAS_NORETURN;
2248    }
2249    tcg_temp_free_i64(addr);
2250    tcg_temp_free_i32(t_r1);
2251    tcg_temp_free_i32(t_r3);
2252
2253    set_cc_static(s);
2254    return ret;
2255}
2256
2257static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2258{
2259    int r3 = get_field(s, r3);
2260    TCGv_i32 t_r3 = tcg_const_i32(r3);
2261
2262    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2263        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2264    } else {
2265        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2266    }
2267    tcg_temp_free_i32(t_r3);
2268
2269    set_cc_static(s);
2270    return DISAS_NEXT;
2271}
2272
2273#ifndef CONFIG_USER_ONLY
2274static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2275{
2276    MemOp mop = s->insn->data;
2277    TCGv_i64 addr, old, cc;
2278    TCGLabel *lab = gen_new_label();
2279
2280    /* Note that in1 = R1 (zero-extended expected value),
2281       out = R1 (original reg), out2 = R1+1 (new value).  */
2282
2283    addr = tcg_temp_new_i64();
2284    old = tcg_temp_new_i64();
2285    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2286    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2287                               get_mem_index(s), mop | MO_ALIGN);
2288    tcg_temp_free_i64(addr);
2289
2290    /* Are the memory and expected values (un)equal?  */
2291    cc = tcg_temp_new_i64();
2292    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2293    tcg_gen_extrl_i64_i32(cc_op, cc);
2294
2295    /* Write back the output now, so that it happens before the
2296       following branch, so that we don't need local temps.  */
2297    if ((mop & MO_SIZE) == MO_32) {
2298        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2299    } else {
2300        tcg_gen_mov_i64(o->out, old);
2301    }
2302    tcg_temp_free_i64(old);
2303
2304    /* If the comparison was equal, and the LSB of R2 was set,
2305       then we need to flush the TLB (for all cpus).  */
2306    tcg_gen_xori_i64(cc, cc, 1);
2307    tcg_gen_and_i64(cc, cc, o->in2);
2308    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2309    tcg_temp_free_i64(cc);
2310
2311    gen_helper_purge(cpu_env);
2312    gen_set_label(lab);
2313
2314    return DISAS_NEXT;
2315}
2316#endif
2317
2318static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2319{
2320    TCGv_i64 t1 = tcg_temp_new_i64();
2321    TCGv_i32 t2 = tcg_temp_new_i32();
2322    tcg_gen_extrl_i64_i32(t2, o->in1);
2323    gen_helper_cvd(t1, t2);
2324    tcg_temp_free_i32(t2);
2325    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2326    tcg_temp_free_i64(t1);
2327    return DISAS_NEXT;
2328}
2329
2330static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2331{
2332    int m3 = get_field(s, m3);
2333    TCGLabel *lab = gen_new_label();
2334    TCGCond c;
2335
2336    c = tcg_invert_cond(ltgt_cond[m3]);
2337    if (s->insn->data) {
2338        c = tcg_unsigned_cond(c);
2339    }
2340    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2341
2342    /* Trap.  */
2343    gen_trap(s);
2344
2345    gen_set_label(lab);
2346    return DISAS_NEXT;
2347}
2348
2349static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2350{
2351    int m3 = get_field(s, m3);
2352    int r1 = get_field(s, r1);
2353    int r2 = get_field(s, r2);
2354    TCGv_i32 tr1, tr2, chk;
2355
2356    /* R1 and R2 must both be even.  */
2357    if ((r1 | r2) & 1) {
2358        gen_program_exception(s, PGM_SPECIFICATION);
2359        return DISAS_NORETURN;
2360    }
2361    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2362        m3 = 0;
2363    }
2364
2365    tr1 = tcg_const_i32(r1);
2366    tr2 = tcg_const_i32(r2);
2367    chk = tcg_const_i32(m3);
2368
2369    switch (s->insn->data) {
2370    case 12:
2371        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2372        break;
2373    case 14:
2374        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2375        break;
2376    case 21:
2377        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2378        break;
2379    case 24:
2380        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2381        break;
2382    case 41:
2383        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2384        break;
2385    case 42:
2386        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2387        break;
2388    default:
2389        g_assert_not_reached();
2390    }
2391
2392    tcg_temp_free_i32(tr1);
2393    tcg_temp_free_i32(tr2);
2394    tcg_temp_free_i32(chk);
2395    set_cc_static(s);
2396    return DISAS_NEXT;
2397}
2398
2399#ifndef CONFIG_USER_ONLY
2400static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2401{
2402    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2403    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2404    TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2405
2406    gen_helper_diag(cpu_env, r1, r3, func_code);
2407
2408    tcg_temp_free_i32(func_code);
2409    tcg_temp_free_i32(r3);
2410    tcg_temp_free_i32(r1);
2411    return DISAS_NEXT;
2412}
2413#endif
2414
2415static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2416{
2417    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2418    return_low128(o->out);
2419    return DISAS_NEXT;
2420}
2421
2422static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2423{
2424    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2425    return_low128(o->out);
2426    return DISAS_NEXT;
2427}
2428
2429static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2430{
2431    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2432    return_low128(o->out);
2433    return DISAS_NEXT;
2434}
2435
2436static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2437{
2438    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2439    return_low128(o->out);
2440    return DISAS_NEXT;
2441}
2442
2443static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2444{
2445    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2446    return DISAS_NEXT;
2447}
2448
2449static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2450{
2451    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2452    return DISAS_NEXT;
2453}
2454
2455static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2456{
2457    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2458    return_low128(o->out2);
2459    return DISAS_NEXT;
2460}
2461
2462static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2463{
2464    int r2 = get_field(s, r2);
2465    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2466    return DISAS_NEXT;
2467}
2468
2469static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2470{
2471    /* No cache information provided.  */
2472    tcg_gen_movi_i64(o->out, -1);
2473    return DISAS_NEXT;
2474}
2475
2476static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2477{
2478    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2479    return DISAS_NEXT;
2480}
2481
2482static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2483{
2484    int r1 = get_field(s, r1);
2485    int r2 = get_field(s, r2);
2486    TCGv_i64 t = tcg_temp_new_i64();
2487
2488    /* Note the "subsequently" in the PoO, which implies a defined result
2489       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2490    tcg_gen_shri_i64(t, psw_mask, 32);
2491    store_reg32_i64(r1, t);
2492    if (r2 != 0) {
2493        store_reg32_i64(r2, psw_mask);
2494    }
2495
2496    tcg_temp_free_i64(t);
2497    return DISAS_NEXT;
2498}
2499
2500static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2501{
2502    int r1 = get_field(s, r1);
2503    TCGv_i32 ilen;
2504    TCGv_i64 v1;
2505
2506    /* Nested EXECUTE is not allowed.  */
2507    if (unlikely(s->ex_value)) {
2508        gen_program_exception(s, PGM_EXECUTE);
2509        return DISAS_NORETURN;
2510    }
2511
2512    update_psw_addr(s);
2513    update_cc_op(s);
2514
2515    if (r1 == 0) {
2516        v1 = tcg_const_i64(0);
2517    } else {
2518        v1 = regs[r1];
2519    }
2520
2521    ilen = tcg_const_i32(s->ilen);
2522    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2523    tcg_temp_free_i32(ilen);
2524
2525    if (r1 == 0) {
2526        tcg_temp_free_i64(v1);
2527    }
2528
2529    return DISAS_PC_CC_UPDATED;
2530}
2531
2532static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2533{
2534    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2535
2536    if (!m34) {
2537        return DISAS_NORETURN;
2538    }
2539    gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2540    tcg_temp_free_i32(m34);
2541    return DISAS_NEXT;
2542}
2543
2544static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2545{
2546    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2547
2548    if (!m34) {
2549        return DISAS_NORETURN;
2550    }
2551    gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2552    tcg_temp_free_i32(m34);
2553    return DISAS_NEXT;
2554}
2555
2556static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2557{
2558    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2559
2560    if (!m34) {
2561        return DISAS_NORETURN;
2562    }
2563    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2564    return_low128(o->out2);
2565    tcg_temp_free_i32(m34);
2566    return DISAS_NEXT;
2567}
2568
2569static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2570{
2571    /* We'll use the original input for cc computation, since we get to
2572       compare that against 0, which ought to be better than comparing
2573       the real output against 64.  It also lets cc_dst be a convenient
2574       temporary during our computation.  */
2575    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2576
2577    /* R1 = IN ? CLZ(IN) : 64.  */
2578    tcg_gen_clzi_i64(o->out, o->in2, 64);
2579
2580    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2581       value by 64, which is undefined.  But since the shift is 64 iff the
2582       input is zero, we still get the correct result after and'ing.  */
2583    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2584    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2585    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2586    return DISAS_NEXT;
2587}
2588
2589static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2590{
2591    int m3 = get_field(s, m3);
2592    int pos, len, base = s->insn->data;
2593    TCGv_i64 tmp = tcg_temp_new_i64();
2594    uint64_t ccm;
2595
2596    switch (m3) {
2597    case 0xf:
2598        /* Effectively a 32-bit load.  */
2599        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2600        len = 32;
2601        goto one_insert;
2602
2603    case 0xc:
2604    case 0x6:
2605    case 0x3:
2606        /* Effectively a 16-bit load.  */
2607        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2608        len = 16;
2609        goto one_insert;
2610
2611    case 0x8:
2612    case 0x4:
2613    case 0x2:
2614    case 0x1:
2615        /* Effectively an 8-bit load.  */
2616        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2617        len = 8;
2618        goto one_insert;
2619
2620    one_insert:
2621        pos = base + ctz32(m3) * 8;
2622        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2623        ccm = ((1ull << len) - 1) << pos;
2624        break;
2625
2626    default:
2627        /* This is going to be a sequence of loads and inserts.  */
2628        pos = base + 32 - 8;
2629        ccm = 0;
2630        while (m3) {
2631            if (m3 & 0x8) {
2632                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2633                tcg_gen_addi_i64(o->in2, o->in2, 1);
2634                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2635                ccm |= 0xff << pos;
2636            }
2637            m3 = (m3 << 1) & 0xf;
2638            pos -= 8;
2639        }
2640        break;
2641    }
2642
2643    tcg_gen_movi_i64(tmp, ccm);
2644    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2645    tcg_temp_free_i64(tmp);
2646    return DISAS_NEXT;
2647}
2648
2649static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2650{
2651    int shift = s->insn->data & 0xff;
2652    int size = s->insn->data >> 8;
2653    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2654    return DISAS_NEXT;
2655}
2656
2657static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2658{
2659    TCGv_i64 t1, t2;
2660
2661    gen_op_calc_cc(s);
2662    t1 = tcg_temp_new_i64();
2663    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2664    t2 = tcg_temp_new_i64();
2665    tcg_gen_extu_i32_i64(t2, cc_op);
2666    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2667    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2668    tcg_temp_free_i64(t1);
2669    tcg_temp_free_i64(t2);
2670    return DISAS_NEXT;
2671}
2672
2673#ifndef CONFIG_USER_ONLY
2674static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2675{
2676    TCGv_i32 m4;
2677
2678    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2679        m4 = tcg_const_i32(get_field(s, m4));
2680    } else {
2681        m4 = tcg_const_i32(0);
2682    }
2683    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2684    tcg_temp_free_i32(m4);
2685    return DISAS_NEXT;
2686}
2687
2688static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2689{
2690    TCGv_i32 m4;
2691
2692    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2693        m4 = tcg_const_i32(get_field(s, m4));
2694    } else {
2695        m4 = tcg_const_i32(0);
2696    }
2697    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2698    tcg_temp_free_i32(m4);
2699    return DISAS_NEXT;
2700}
2701
2702static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2703{
2704    gen_helper_iske(o->out, cpu_env, o->in2);
2705    return DISAS_NEXT;
2706}
2707#endif
2708
2709static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2710{
2711    int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2712    int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2713    int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2714    TCGv_i32 t_r1, t_r2, t_r3, type;
2715
2716    switch (s->insn->data) {
2717    case S390_FEAT_TYPE_KMA:
2718        if (r3 == r1 || r3 == r2) {
2719            gen_program_exception(s, PGM_SPECIFICATION);
2720            return DISAS_NORETURN;
2721        }
2722        /* FALL THROUGH */
2723    case S390_FEAT_TYPE_KMCTR:
2724        if (r3 & 1 || !r3) {
2725            gen_program_exception(s, PGM_SPECIFICATION);
2726            return DISAS_NORETURN;
2727        }
2728        /* FALL THROUGH */
2729    case S390_FEAT_TYPE_PPNO:
2730    case S390_FEAT_TYPE_KMF:
2731    case S390_FEAT_TYPE_KMC:
2732    case S390_FEAT_TYPE_KMO:
2733    case S390_FEAT_TYPE_KM:
2734        if (r1 & 1 || !r1) {
2735            gen_program_exception(s, PGM_SPECIFICATION);
2736            return DISAS_NORETURN;
2737        }
2738        /* FALL THROUGH */
2739    case S390_FEAT_TYPE_KMAC:
2740    case S390_FEAT_TYPE_KIMD:
2741    case S390_FEAT_TYPE_KLMD:
2742        if (r2 & 1 || !r2) {
2743            gen_program_exception(s, PGM_SPECIFICATION);
2744            return DISAS_NORETURN;
2745        }
2746        /* FALL THROUGH */
2747    case S390_FEAT_TYPE_PCKMO:
2748    case S390_FEAT_TYPE_PCC:
2749        break;
2750    default:
2751        g_assert_not_reached();
2752    };
2753
2754    t_r1 = tcg_const_i32(r1);
2755    t_r2 = tcg_const_i32(r2);
2756    t_r3 = tcg_const_i32(r3);
2757    type = tcg_const_i32(s->insn->data);
2758    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2759    set_cc_static(s);
2760    tcg_temp_free_i32(t_r1);
2761    tcg_temp_free_i32(t_r2);
2762    tcg_temp_free_i32(t_r3);
2763    tcg_temp_free_i32(type);
2764    return DISAS_NEXT;
2765}
2766
2767static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2768{
2769    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2770    set_cc_static(s);
2771    return DISAS_NEXT;
2772}
2773
2774static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2775{
2776    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2777    set_cc_static(s);
2778    return DISAS_NEXT;
2779}
2780
2781static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2782{
2783    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2784    set_cc_static(s);
2785    return DISAS_NEXT;
2786}
2787
2788static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2789{
2790    /* The real output is indeed the original value in memory;
2791       recompute the addition for the computation of CC.  */
2792    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2793                                 s->insn->data | MO_ALIGN);
2794    /* However, we need to recompute the addition for setting CC.  */
2795    tcg_gen_add_i64(o->out, o->in1, o->in2);
2796    return DISAS_NEXT;
2797}
2798
2799static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2800{
2801    /* The real output is indeed the original value in memory;
2802       recompute the addition for the computation of CC.  */
2803    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2804                                 s->insn->data | MO_ALIGN);
2805    /* However, we need to recompute the operation for setting CC.  */
2806    tcg_gen_and_i64(o->out, o->in1, o->in2);
2807    return DISAS_NEXT;
2808}
2809
2810static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2811{
2812    /* The real output is indeed the original value in memory;
2813       recompute the addition for the computation of CC.  */
2814    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2815                                s->insn->data | MO_ALIGN);
2816    /* However, we need to recompute the operation for setting CC.  */
2817    tcg_gen_or_i64(o->out, o->in1, o->in2);
2818    return DISAS_NEXT;
2819}
2820
2821static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2822{
2823    /* The real output is indeed the original value in memory;
2824       recompute the addition for the computation of CC.  */
2825    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2826                                 s->insn->data | MO_ALIGN);
2827    /* However, we need to recompute the operation for setting CC.  */
2828    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2829    return DISAS_NEXT;
2830}
2831
2832static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2833{
2834    gen_helper_ldeb(o->out, cpu_env, o->in2);
2835    return DISAS_NEXT;
2836}
2837
2838static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2839{
2840    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2841
2842    if (!m34) {
2843        return DISAS_NORETURN;
2844    }
2845    gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2846    tcg_temp_free_i32(m34);
2847    return DISAS_NEXT;
2848}
2849
2850static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2851{
2852    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2853
2854    if (!m34) {
2855        return DISAS_NORETURN;
2856    }
2857    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2858    tcg_temp_free_i32(m34);
2859    return DISAS_NEXT;
2860}
2861
2862static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2863{
2864    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2865
2866    if (!m34) {
2867        return DISAS_NORETURN;
2868    }
2869    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2870    tcg_temp_free_i32(m34);
2871    return DISAS_NEXT;
2872}
2873
2874static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2875{
2876    gen_helper_lxdb(o->out, cpu_env, o->in2);
2877    return_low128(o->out2);
2878    return DISAS_NEXT;
2879}
2880
2881static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2882{
2883    gen_helper_lxeb(o->out, cpu_env, o->in2);
2884    return_low128(o->out2);
2885    return DISAS_NEXT;
2886}
2887
2888static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2889{
2890    tcg_gen_shli_i64(o->out, o->in2, 32);
2891    return DISAS_NEXT;
2892}
2893
2894static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2895{
2896    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2897    return DISAS_NEXT;
2898}
2899
2900static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2901{
2902    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2903    return DISAS_NEXT;
2904}
2905
2906static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2907{
2908    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2909    return DISAS_NEXT;
2910}
2911
2912static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2913{
2914    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2915    return DISAS_NEXT;
2916}
2917
2918static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2919{
2920    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2921    return DISAS_NEXT;
2922}
2923
2924static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2925{
2926    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2927    return DISAS_NEXT;
2928}
2929
2930static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2931{
2932    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2933    return DISAS_NEXT;
2934}
2935
2936static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2937{
2938    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2939    return DISAS_NEXT;
2940}
2941
2942static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2943{
2944    TCGLabel *lab = gen_new_label();
2945    store_reg32_i64(get_field(s, r1), o->in2);
2946    /* The value is stored even in case of trap. */
2947    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2948    gen_trap(s);
2949    gen_set_label(lab);
2950    return DISAS_NEXT;
2951}
2952
2953static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2954{
2955    TCGLabel *lab = gen_new_label();
2956    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2957    /* The value is stored even in case of trap. */
2958    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2959    gen_trap(s);
2960    gen_set_label(lab);
2961    return DISAS_NEXT;
2962}
2963
2964static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2965{
2966    TCGLabel *lab = gen_new_label();
2967    store_reg32h_i64(get_field(s, r1), o->in2);
2968    /* The value is stored even in case of trap. */
2969    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2970    gen_trap(s);
2971    gen_set_label(lab);
2972    return DISAS_NEXT;
2973}
2974
2975static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2976{
2977    TCGLabel *lab = gen_new_label();
2978    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2979    /* The value is stored even in case of trap. */
2980    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2981    gen_trap(s);
2982    gen_set_label(lab);
2983    return DISAS_NEXT;
2984}
2985
2986static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2987{
2988    TCGLabel *lab = gen_new_label();
2989    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2990    /* The value is stored even in case of trap. */
2991    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2992    gen_trap(s);
2993    gen_set_label(lab);
2994    return DISAS_NEXT;
2995}
2996
2997static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2998{
2999    DisasCompare c;
3000
3001    disas_jcc(s, &c, get_field(s, m3));
3002
3003    if (c.is_64) {
3004        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
3005                            o->in2, o->in1);
3006        free_compare(&c);
3007    } else {
3008        TCGv_i32 t32 = tcg_temp_new_i32();
3009        TCGv_i64 t, z;
3010
3011        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3012        free_compare(&c);
3013
3014        t = tcg_temp_new_i64();
3015        tcg_gen_extu_i32_i64(t, t32);
3016        tcg_temp_free_i32(t32);
3017
3018        z = tcg_const_i64(0);
3019        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3020        tcg_temp_free_i64(t);
3021        tcg_temp_free_i64(z);
3022    }
3023
3024    return DISAS_NEXT;
3025}
3026
3027#ifndef CONFIG_USER_ONLY
3028static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3029{
3030    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3031    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3032    gen_helper_lctl(cpu_env, r1, o->in2, r3);
3033    tcg_temp_free_i32(r1);
3034    tcg_temp_free_i32(r3);
3035    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3036    return DISAS_PC_STALE_NOCHAIN;
3037}
3038
3039static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3040{
3041    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3042    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3043    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3044    tcg_temp_free_i32(r1);
3045    tcg_temp_free_i32(r3);
3046    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3047    return DISAS_PC_STALE_NOCHAIN;
3048}
3049
3050static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3051{
3052    gen_helper_lra(o->out, cpu_env, o->in2);
3053    set_cc_static(s);
3054    return DISAS_NEXT;
3055}
3056
3057static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3058{
3059    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3060    return DISAS_NEXT;
3061}
3062
3063static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3064{
3065    TCGv_i64 t1, t2;
3066
3067    per_breaking_event(s);
3068
3069    t1 = tcg_temp_new_i64();
3070    t2 = tcg_temp_new_i64();
3071    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3072                        MO_TEUL | MO_ALIGN_8);
3073    tcg_gen_addi_i64(o->in2, o->in2, 4);
3074    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3075    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3076    tcg_gen_shli_i64(t1, t1, 32);
3077    gen_helper_load_psw(cpu_env, t1, t2);
3078    tcg_temp_free_i64(t1);
3079    tcg_temp_free_i64(t2);
3080    return DISAS_NORETURN;
3081}
3082
3083static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3084{
3085    TCGv_i64 t1, t2;
3086
3087    per_breaking_event(s);
3088
3089    t1 = tcg_temp_new_i64();
3090    t2 = tcg_temp_new_i64();
3091    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3092                        MO_TEQ | MO_ALIGN_8);
3093    tcg_gen_addi_i64(o->in2, o->in2, 8);
3094    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3095    gen_helper_load_psw(cpu_env, t1, t2);
3096    tcg_temp_free_i64(t1);
3097    tcg_temp_free_i64(t2);
3098    return DISAS_NORETURN;
3099}
3100#endif
3101
3102static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3103{
3104    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3105    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3106    gen_helper_lam(cpu_env, r1, o->in2, r3);
3107    tcg_temp_free_i32(r1);
3108    tcg_temp_free_i32(r3);
3109    return DISAS_NEXT;
3110}
3111
3112static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3113{
3114    int r1 = get_field(s, r1);
3115    int r3 = get_field(s, r3);
3116    TCGv_i64 t1, t2;
3117
3118    /* Only one register to read. */
3119    t1 = tcg_temp_new_i64();
3120    if (unlikely(r1 == r3)) {
3121        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3122        store_reg32_i64(r1, t1);
3123        tcg_temp_free(t1);
3124        return DISAS_NEXT;
3125    }
3126
3127    /* First load the values of the first and last registers to trigger
3128       possible page faults. */
3129    t2 = tcg_temp_new_i64();
3130    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3131    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3132    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3133    store_reg32_i64(r1, t1);
3134    store_reg32_i64(r3, t2);
3135
3136    /* Only two registers to read. */
3137    if (((r1 + 1) & 15) == r3) {
3138        tcg_temp_free(t2);
3139        tcg_temp_free(t1);
3140        return DISAS_NEXT;
3141    }
3142
3143    /* Then load the remaining registers. Page fault can't occur. */
3144    r3 = (r3 - 1) & 15;
3145    tcg_gen_movi_i64(t2, 4);
3146    while (r1 != r3) {
3147        r1 = (r1 + 1) & 15;
3148        tcg_gen_add_i64(o->in2, o->in2, t2);
3149        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3150        store_reg32_i64(r1, t1);
3151    }
3152    tcg_temp_free(t2);
3153    tcg_temp_free(t1);
3154
3155    return DISAS_NEXT;
3156}
3157
3158static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3159{
3160    int r1 = get_field(s, r1);
3161    int r3 = get_field(s, r3);
3162    TCGv_i64 t1, t2;
3163
3164    /* Only one register to read. */
3165    t1 = tcg_temp_new_i64();
3166    if (unlikely(r1 == r3)) {
3167        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3168        store_reg32h_i64(r1, t1);
3169        tcg_temp_free(t1);
3170        return DISAS_NEXT;
3171    }
3172
3173    /* First load the values of the first and last registers to trigger
3174       possible page faults. */
3175    t2 = tcg_temp_new_i64();
3176    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3177    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3178    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3179    store_reg32h_i64(r1, t1);
3180    store_reg32h_i64(r3, t2);
3181
3182    /* Only two registers to read. */
3183    if (((r1 + 1) & 15) == r3) {
3184        tcg_temp_free(t2);
3185        tcg_temp_free(t1);
3186        return DISAS_NEXT;
3187    }
3188
3189    /* Then load the remaining registers. Page fault can't occur. */
3190    r3 = (r3 - 1) & 15;
3191    tcg_gen_movi_i64(t2, 4);
3192    while (r1 != r3) {
3193        r1 = (r1 + 1) & 15;
3194        tcg_gen_add_i64(o->in2, o->in2, t2);
3195        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3196        store_reg32h_i64(r1, t1);
3197    }
3198    tcg_temp_free(t2);
3199    tcg_temp_free(t1);
3200
3201    return DISAS_NEXT;
3202}
3203
3204static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3205{
3206    int r1 = get_field(s, r1);
3207    int r3 = get_field(s, r3);
3208    TCGv_i64 t1, t2;
3209
3210    /* Only one register to read. */
3211    if (unlikely(r1 == r3)) {
3212        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3213        return DISAS_NEXT;
3214    }
3215
3216    /* First load the values of the first and last registers to trigger
3217       possible page faults. */
3218    t1 = tcg_temp_new_i64();
3219    t2 = tcg_temp_new_i64();
3220    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3221    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3222    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3223    tcg_gen_mov_i64(regs[r1], t1);
3224    tcg_temp_free(t2);
3225
3226    /* Only two registers to read. */
3227    if (((r1 + 1) & 15) == r3) {
3228        tcg_temp_free(t1);
3229        return DISAS_NEXT;
3230    }
3231
3232    /* Then load the remaining registers. Page fault can't occur. */
3233    r3 = (r3 - 1) & 15;
3234    tcg_gen_movi_i64(t1, 8);
3235    while (r1 != r3) {
3236        r1 = (r1 + 1) & 15;
3237        tcg_gen_add_i64(o->in2, o->in2, t1);
3238        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3239    }
3240    tcg_temp_free(t1);
3241
3242    return DISAS_NEXT;
3243}
3244
3245static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3246{
3247    TCGv_i64 a1, a2;
3248    MemOp mop = s->insn->data;
3249
3250    /* In a parallel context, stop the world and single step.  */
3251    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3252        update_psw_addr(s);
3253        update_cc_op(s);
3254        gen_exception(EXCP_ATOMIC);
3255        return DISAS_NORETURN;
3256    }
3257
3258    /* In a serial context, perform the two loads ... */
3259    a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3260    a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3261    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3262    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3263    tcg_temp_free_i64(a1);
3264    tcg_temp_free_i64(a2);
3265
3266    /* ... and indicate that we performed them while interlocked.  */
3267    gen_op_movi_cc(s, 0);
3268    return DISAS_NEXT;
3269}
3270
3271static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3272{
3273    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3274        gen_helper_lpq(o->out, cpu_env, o->in2);
3275    } else if (HAVE_ATOMIC128) {
3276        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3277    } else {
3278        gen_helper_exit_atomic(cpu_env);
3279        return DISAS_NORETURN;
3280    }
3281    return_low128(o->out2);
3282    return DISAS_NEXT;
3283}
3284
3285#ifndef CONFIG_USER_ONLY
3286static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3287{
3288    tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3289    return DISAS_NEXT;
3290}
3291#endif
3292
3293static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3294{
3295    tcg_gen_andi_i64(o->out, o->in2, -256);
3296    return DISAS_NEXT;
3297}
3298
3299static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3300{
3301    const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3302
3303    if (get_field(s, m3) > 6) {
3304        gen_program_exception(s, PGM_SPECIFICATION);
3305        return DISAS_NORETURN;
3306    }
3307
3308    tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3309    tcg_gen_neg_i64(o->addr1, o->addr1);
3310    tcg_gen_movi_i64(o->out, 16);
3311    tcg_gen_umin_i64(o->out, o->out, o->addr1);
3312    gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3313    return DISAS_NEXT;
3314}
3315
3316static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3317{
3318#if !defined(CONFIG_USER_ONLY)
3319    TCGv_i32 i2;
3320#endif
3321    const uint16_t monitor_class = get_field(s, i2);
3322
3323    if (monitor_class & 0xff00) {
3324        gen_program_exception(s, PGM_SPECIFICATION);
3325        return DISAS_NORETURN;
3326    }
3327
3328#if !defined(CONFIG_USER_ONLY)
3329    i2 = tcg_const_i32(monitor_class);
3330    gen_helper_monitor_call(cpu_env, o->addr1, i2);
3331    tcg_temp_free_i32(i2);
3332#endif
3333    /* Defaults to a NOP. */
3334    return DISAS_NEXT;
3335}
3336
3337static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3338{
3339    o->out = o->in2;
3340    o->g_out = o->g_in2;
3341    o->in2 = NULL;
3342    o->g_in2 = false;
3343    return DISAS_NEXT;
3344}
3345
3346static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3347{
3348    int b2 = get_field(s, b2);
3349    TCGv ar1 = tcg_temp_new_i64();
3350
3351    o->out = o->in2;
3352    o->g_out = o->g_in2;
3353    o->in2 = NULL;
3354    o->g_in2 = false;
3355
3356    switch (s->base.tb->flags & FLAG_MASK_ASC) {
3357    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3358        tcg_gen_movi_i64(ar1, 0);
3359        break;
3360    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3361        tcg_gen_movi_i64(ar1, 1);
3362        break;
3363    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3364        if (b2) {
3365            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3366        } else {
3367            tcg_gen_movi_i64(ar1, 0);
3368        }
3369        break;
3370    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3371        tcg_gen_movi_i64(ar1, 2);
3372        break;
3373    }
3374
3375    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3376    tcg_temp_free_i64(ar1);
3377
3378    return DISAS_NEXT;
3379}
3380
3381static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3382{
3383    o->out = o->in1;
3384    o->out2 = o->in2;
3385    o->g_out = o->g_in1;
3386    o->g_out2 = o->g_in2;
3387    o->in1 = NULL;
3388    o->in2 = NULL;
3389    o->g_in1 = o->g_in2 = false;
3390    return DISAS_NEXT;
3391}
3392
3393static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3394{
3395    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3396    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3397    tcg_temp_free_i32(l);
3398    return DISAS_NEXT;
3399}
3400
3401static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3402{
3403    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3404    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3405    tcg_temp_free_i32(l);
3406    return DISAS_NEXT;
3407}
3408
3409static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3410{
3411    int r1 = get_field(s, r1);
3412    int r2 = get_field(s, r2);
3413    TCGv_i32 t1, t2;
3414
3415    /* r1 and r2 must be even.  */
3416    if (r1 & 1 || r2 & 1) {
3417        gen_program_exception(s, PGM_SPECIFICATION);
3418        return DISAS_NORETURN;
3419    }
3420
3421    t1 = tcg_const_i32(r1);
3422    t2 = tcg_const_i32(r2);
3423    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3424    tcg_temp_free_i32(t1);
3425    tcg_temp_free_i32(t2);
3426    set_cc_static(s);
3427    return DISAS_NEXT;
3428}
3429
3430static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3431{
3432    int r1 = get_field(s, r1);
3433    int r3 = get_field(s, r3);
3434    TCGv_i32 t1, t3;
3435
3436    /* r1 and r3 must be even.  */
3437    if (r1 & 1 || r3 & 1) {
3438        gen_program_exception(s, PGM_SPECIFICATION);
3439        return DISAS_NORETURN;
3440    }
3441
3442    t1 = tcg_const_i32(r1);
3443    t3 = tcg_const_i32(r3);
3444    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3445    tcg_temp_free_i32(t1);
3446    tcg_temp_free_i32(t3);
3447    set_cc_static(s);
3448    return DISAS_NEXT;
3449}
3450
3451static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3452{
3453    int r1 = get_field(s, r1);
3454    int r3 = get_field(s, r3);
3455    TCGv_i32 t1, t3;
3456
3457    /* r1 and r3 must be even.  */
3458    if (r1 & 1 || r3 & 1) {
3459        gen_program_exception(s, PGM_SPECIFICATION);
3460        return DISAS_NORETURN;
3461    }
3462
3463    t1 = tcg_const_i32(r1);
3464    t3 = tcg_const_i32(r3);
3465    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3466    tcg_temp_free_i32(t1);
3467    tcg_temp_free_i32(t3);
3468    set_cc_static(s);
3469    return DISAS_NEXT;
3470}
3471
3472static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3473{
3474    int r3 = get_field(s, r3);
3475    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3476    set_cc_static(s);
3477    return DISAS_NEXT;
3478}
3479
3480#ifndef CONFIG_USER_ONLY
3481static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3482{
3483    int r1 = get_field(s, l1);
3484    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3485    set_cc_static(s);
3486    return DISAS_NEXT;
3487}
3488
3489static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3490{
3491    int r1 = get_field(s, l1);
3492    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3493    set_cc_static(s);
3494    return DISAS_NEXT;
3495}
3496#endif
3497
3498static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3499{
3500    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3501    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3502    tcg_temp_free_i32(l);
3503    return DISAS_NEXT;
3504}
3505
3506static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3507{
3508    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3509    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3510    tcg_temp_free_i32(l);
3511    return DISAS_NEXT;
3512}
3513
3514static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3515{
3516    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3517    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3518
3519    gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3520    tcg_temp_free_i32(t1);
3521    tcg_temp_free_i32(t2);
3522    set_cc_static(s);
3523    return DISAS_NEXT;
3524}
3525
3526static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3527{
3528    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3529    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3530
3531    gen_helper_mvst(cc_op, cpu_env, t1, t2);
3532    tcg_temp_free_i32(t1);
3533    tcg_temp_free_i32(t2);
3534    set_cc_static(s);
3535    return DISAS_NEXT;
3536}
3537
3538static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3539{
3540    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3541    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3542    tcg_temp_free_i32(l);
3543    return DISAS_NEXT;
3544}
3545
3546static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3547{
3548    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3549    return DISAS_NEXT;
3550}
3551
3552static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3553{
3554    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3555    return DISAS_NEXT;
3556}
3557
3558static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3559{
3560    tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3561    return DISAS_NEXT;
3562}
3563
3564static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3565{
3566    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3567    return DISAS_NEXT;
3568}
3569
3570static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3571{
3572    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3573    return DISAS_NEXT;
3574}
3575
3576static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3577{
3578    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3579    return DISAS_NEXT;
3580}
3581
3582static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3583{
3584    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3585    return_low128(o->out2);
3586    return DISAS_NEXT;
3587}
3588
3589static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3590{
3591    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3592    return_low128(o->out2);
3593    return DISAS_NEXT;
3594}
3595
3596static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3597{
3598    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3599    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3600    tcg_temp_free_i64(r3);
3601    return DISAS_NEXT;
3602}
3603
3604static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3605{
3606    TCGv_i64 r3 = load_freg(get_field(s, r3));
3607    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3608    tcg_temp_free_i64(r3);
3609    return DISAS_NEXT;
3610}
3611
3612static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3613{
3614    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3615    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3616    tcg_temp_free_i64(r3);
3617    return DISAS_NEXT;
3618}
3619
3620static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3621{
3622    TCGv_i64 r3 = load_freg(get_field(s, r3));
3623    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3624    tcg_temp_free_i64(r3);
3625    return DISAS_NEXT;
3626}
3627
3628static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3629{
3630    TCGv_i64 z, n;
3631    z = tcg_const_i64(0);
3632    n = tcg_temp_new_i64();
3633    tcg_gen_neg_i64(n, o->in2);
3634    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3635    tcg_temp_free_i64(n);
3636    tcg_temp_free_i64(z);
3637    return DISAS_NEXT;
3638}
3639
3640static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3641{
3642    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3643    return DISAS_NEXT;
3644}
3645
3646static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3647{
3648    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3649    return DISAS_NEXT;
3650}
3651
3652static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3653{
3654    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3655    tcg_gen_mov_i64(o->out2, o->in2);
3656    return DISAS_NEXT;
3657}
3658
3659static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3660{
3661    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3662    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3663    tcg_temp_free_i32(l);
3664    set_cc_static(s);
3665    return DISAS_NEXT;
3666}
3667
3668static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3669{
3670    tcg_gen_neg_i64(o->out, o->in2);
3671    return DISAS_NEXT;
3672}
3673
3674static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3675{
3676    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3677    return DISAS_NEXT;
3678}
3679
3680static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3681{
3682    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3683    return DISAS_NEXT;
3684}
3685
3686static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3687{
3688    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3689    tcg_gen_mov_i64(o->out2, o->in2);
3690    return DISAS_NEXT;
3691}
3692
3693static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3694{
3695    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3696    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3697    tcg_temp_free_i32(l);
3698    set_cc_static(s);
3699    return DISAS_NEXT;
3700}
3701
3702static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3703{
3704    tcg_gen_or_i64(o->out, o->in1, o->in2);
3705    return DISAS_NEXT;
3706}
3707
3708static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3709{
3710    int shift = s->insn->data & 0xff;
3711    int size = s->insn->data >> 8;
3712    uint64_t mask = ((1ull << size) - 1) << shift;
3713
3714    assert(!o->g_in2);
3715    tcg_gen_shli_i64(o->in2, o->in2, shift);
3716    tcg_gen_or_i64(o->out, o->in1, o->in2);
3717
3718    /* Produce the CC from only the bits manipulated.  */
3719    tcg_gen_andi_i64(cc_dst, o->out, mask);
3720    set_cc_nz_u64(s, cc_dst);
3721    return DISAS_NEXT;
3722}
3723
3724static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3725{
3726    o->in1 = tcg_temp_new_i64();
3727
3728    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3729        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3730    } else {
3731        /* Perform the atomic operation in memory. */
3732        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3733                                    s->insn->data);
3734    }
3735
3736    /* Recompute also for atomic case: needed for setting CC. */
3737    tcg_gen_or_i64(o->out, o->in1, o->in2);
3738
3739    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3740        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3741    }
3742    return DISAS_NEXT;
3743}
3744
3745static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3746{
3747    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3748    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3749    tcg_temp_free_i32(l);
3750    return DISAS_NEXT;
3751}
3752
3753static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3754{
3755    int l2 = get_field(s, l2) + 1;
3756    TCGv_i32 l;
3757
3758    /* The length must not exceed 32 bytes.  */
3759    if (l2 > 32) {
3760        gen_program_exception(s, PGM_SPECIFICATION);
3761        return DISAS_NORETURN;
3762    }
3763    l = tcg_const_i32(l2);
3764    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3765    tcg_temp_free_i32(l);
3766    return DISAS_NEXT;
3767}
3768
3769static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3770{
3771    int l2 = get_field(s, l2) + 1;
3772    TCGv_i32 l;
3773
3774    /* The length must be even and should not exceed 64 bytes.  */
3775    if ((l2 & 1) || (l2 > 64)) {
3776        gen_program_exception(s, PGM_SPECIFICATION);
3777        return DISAS_NORETURN;
3778    }
3779    l = tcg_const_i32(l2);
3780    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3781    tcg_temp_free_i32(l);
3782    return DISAS_NEXT;
3783}
3784
3785static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3786{
3787    gen_helper_popcnt(o->out, o->in2);
3788    return DISAS_NEXT;
3789}
3790
3791#ifndef CONFIG_USER_ONLY
3792static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3793{
3794    gen_helper_ptlb(cpu_env);
3795    return DISAS_NEXT;
3796}
3797#endif
3798
3799static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3800{
3801    int i3 = get_field(s, i3);
3802    int i4 = get_field(s, i4);
3803    int i5 = get_field(s, i5);
3804    int do_zero = i4 & 0x80;
3805    uint64_t mask, imask, pmask;
3806    int pos, len, rot;
3807
3808    /* Adjust the arguments for the specific insn.  */
3809    switch (s->fields.op2) {
3810    case 0x55: /* risbg */
3811    case 0x59: /* risbgn */
3812        i3 &= 63;
3813        i4 &= 63;
3814        pmask = ~0;
3815        break;
3816    case 0x5d: /* risbhg */
3817        i3 &= 31;
3818        i4 &= 31;
3819        pmask = 0xffffffff00000000ull;
3820        break;
3821    case 0x51: /* risblg */
3822        i3 = (i3 & 31) + 32;
3823        i4 = (i4 & 31) + 32;
3824        pmask = 0x00000000ffffffffull;
3825        break;
3826    default:
3827        g_assert_not_reached();
3828    }
3829
3830    /* MASK is the set of bits to be inserted from R2. */
3831    if (i3 <= i4) {
3832        /* [0...i3---i4...63] */
3833        mask = (-1ull >> i3) & (-1ull << (63 - i4));
3834    } else {
3835        /* [0---i4...i3---63] */
3836        mask = (-1ull >> i3) | (-1ull << (63 - i4));
3837    }
3838    /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3839    mask &= pmask;
3840
3841    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3842       insns, we need to keep the other half of the register.  */
3843    imask = ~mask | ~pmask;
3844    if (do_zero) {
3845        imask = ~pmask;
3846    }
3847
3848    len = i4 - i3 + 1;
3849    pos = 63 - i4;
3850    rot = i5 & 63;
3851
3852    /* In some cases we can implement this with extract.  */
3853    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3854        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3855        return DISAS_NEXT;
3856    }
3857
3858    /* In some cases we can implement this with deposit.  */
3859    if (len > 0 && (imask == 0 || ~mask == imask)) {
3860        /* Note that we rotate the bits to be inserted to the lsb, not to
3861           the position as described in the PoO.  */
3862        rot = (rot - pos) & 63;
3863    } else {
3864        pos = -1;
3865    }
3866
3867    /* Rotate the input as necessary.  */
3868    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3869
3870    /* Insert the selected bits into the output.  */
3871    if (pos >= 0) {
3872        if (imask == 0) {
3873            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3874        } else {
3875            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3876        }
3877    } else if (imask == 0) {
3878        tcg_gen_andi_i64(o->out, o->in2, mask);
3879    } else {
3880        tcg_gen_andi_i64(o->in2, o->in2, mask);
3881        tcg_gen_andi_i64(o->out, o->out, imask);
3882        tcg_gen_or_i64(o->out, o->out, o->in2);
3883    }
3884    return DISAS_NEXT;
3885}
3886
3887static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3888{
3889    int i3 = get_field(s, i3);
3890    int i4 = get_field(s, i4);
3891    int i5 = get_field(s, i5);
3892    uint64_t mask;
3893
3894    /* If this is a test-only form, arrange to discard the result.  */
3895    if (i3 & 0x80) {
3896        o->out = tcg_temp_new_i64();
3897        o->g_out = false;
3898    }
3899
3900    i3 &= 63;
3901    i4 &= 63;
3902    i5 &= 63;
3903
3904    /* MASK is the set of bits to be operated on from R2.
3905       Take care for I3/I4 wraparound.  */
3906    mask = ~0ull >> i3;
3907    if (i3 <= i4) {
3908        mask ^= ~0ull >> i4 >> 1;
3909    } else {
3910        mask |= ~(~0ull >> i4 >> 1);
3911    }
3912
3913    /* Rotate the input as necessary.  */
3914    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3915
3916    /* Operate.  */
3917    switch (s->fields.op2) {
3918    case 0x54: /* AND */
3919        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3920        tcg_gen_and_i64(o->out, o->out, o->in2);
3921        break;
3922    case 0x56: /* OR */
3923        tcg_gen_andi_i64(o->in2, o->in2, mask);
3924        tcg_gen_or_i64(o->out, o->out, o->in2);
3925        break;
3926    case 0x57: /* XOR */
3927        tcg_gen_andi_i64(o->in2, o->in2, mask);
3928        tcg_gen_xor_i64(o->out, o->out, o->in2);
3929        break;
3930    default:
3931        abort();
3932    }
3933
3934    /* Set the CC.  */
3935    tcg_gen_andi_i64(cc_dst, o->out, mask);
3936    set_cc_nz_u64(s, cc_dst);
3937    return DISAS_NEXT;
3938}
3939
3940static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3941{
3942    tcg_gen_bswap16_i64(o->out, o->in2);
3943    return DISAS_NEXT;
3944}
3945
3946static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3947{
3948    tcg_gen_bswap32_i64(o->out, o->in2);
3949    return DISAS_NEXT;
3950}
3951
3952static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3953{
3954    tcg_gen_bswap64_i64(o->out, o->in2);
3955    return DISAS_NEXT;
3956}
3957
3958static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3959{
3960    TCGv_i32 t1 = tcg_temp_new_i32();
3961    TCGv_i32 t2 = tcg_temp_new_i32();
3962    TCGv_i32 to = tcg_temp_new_i32();
3963    tcg_gen_extrl_i64_i32(t1, o->in1);
3964    tcg_gen_extrl_i64_i32(t2, o->in2);
3965    tcg_gen_rotl_i32(to, t1, t2);
3966    tcg_gen_extu_i32_i64(o->out, to);
3967    tcg_temp_free_i32(t1);
3968    tcg_temp_free_i32(t2);
3969    tcg_temp_free_i32(to);
3970    return DISAS_NEXT;
3971}
3972
3973static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3974{
3975    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3976    return DISAS_NEXT;
3977}
3978
3979#ifndef CONFIG_USER_ONLY
3980static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3981{
3982    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3983    set_cc_static(s);
3984    return DISAS_NEXT;
3985}
3986
3987static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3988{
3989    gen_helper_sacf(cpu_env, o->in2);
3990    /* Addressing mode has changed, so end the block.  */
3991    return DISAS_PC_STALE;
3992}
3993#endif
3994
3995static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3996{
3997    int sam = s->insn->data;
3998    TCGv_i64 tsam;
3999    uint64_t mask;
4000
4001    switch (sam) {
4002    case 0:
4003        mask = 0xffffff;
4004        break;
4005    case 1:
4006        mask = 0x7fffffff;
4007        break;
4008    default:
4009        mask = -1;
4010        break;
4011    }
4012
4013    /* Bizarre but true, we check the address of the current insn for the
4014       specification exception, not the next to be executed.  Thus the PoO
4015       documents that Bad Things Happen two bytes before the end.  */
4016    if (s->base.pc_next & ~mask) {
4017        gen_program_exception(s, PGM_SPECIFICATION);
4018        return DISAS_NORETURN;
4019    }
4020    s->pc_tmp &= mask;
4021
4022    tsam = tcg_const_i64(sam);
4023    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4024    tcg_temp_free_i64(tsam);
4025
4026    /* Always exit the TB, since we (may have) changed execution mode.  */
4027    return DISAS_PC_STALE;
4028}
4029
4030static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4031{
4032    int r1 = get_field(s, r1);
4033    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4034    return DISAS_NEXT;
4035}
4036
4037static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4038{
4039    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4040    return DISAS_NEXT;
4041}
4042
4043static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4044{
4045    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4046    return DISAS_NEXT;
4047}
4048
4049static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4050{
4051    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4052    return_low128(o->out2);
4053    return DISAS_NEXT;
4054}
4055
4056static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4057{
4058    gen_helper_sqeb(o->out, cpu_env, o->in2);
4059    return DISAS_NEXT;
4060}
4061
4062static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4063{
4064    gen_helper_sqdb(o->out, cpu_env, o->in2);
4065    return DISAS_NEXT;
4066}
4067
4068static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4069{
4070    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4071    return_low128(o->out2);
4072    return DISAS_NEXT;
4073}
4074
4075#ifndef CONFIG_USER_ONLY
4076static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4077{
4078    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4079    set_cc_static(s);
4080    return DISAS_NEXT;
4081}
4082
4083static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4084{
4085    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4086    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4087    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4088    set_cc_static(s);
4089    tcg_temp_free_i32(r1);
4090    tcg_temp_free_i32(r3);
4091    return DISAS_NEXT;
4092}
4093#endif
4094
4095static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4096{
4097    DisasCompare c;
4098    TCGv_i64 a, h;
4099    TCGLabel *lab;
4100    int r1;
4101
4102    disas_jcc(s, &c, get_field(s, m3));
4103
4104    /* We want to store when the condition is fulfilled, so branch
4105       out when it's not */
4106    c.cond = tcg_invert_cond(c.cond);
4107
4108    lab = gen_new_label();
4109    if (c.is_64) {
4110        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4111    } else {
4112        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4113    }
4114    free_compare(&c);
4115
4116    r1 = get_field(s, r1);
4117    a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4118    switch (s->insn->data) {
4119    case 1: /* STOCG */
4120        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4121        break;
4122    case 0: /* STOC */
4123        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4124        break;
4125    case 2: /* STOCFH */
4126        h = tcg_temp_new_i64();
4127        tcg_gen_shri_i64(h, regs[r1], 32);
4128        tcg_gen_qemu_st32(h, a, get_mem_index(s));
4129        tcg_temp_free_i64(h);
4130        break;
4131    default:
4132        g_assert_not_reached();
4133    }
4134    tcg_temp_free_i64(a);
4135
4136    gen_set_label(lab);
4137    return DISAS_NEXT;
4138}
4139
4140static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4141{
4142    uint64_t sign = 1ull << s->insn->data;
4143    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4144    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4145    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4146    /* The arithmetic left shift is curious in that it does not affect
4147       the sign bit.  Copy that over from the source unchanged.  */
4148    tcg_gen_andi_i64(o->out, o->out, ~sign);
4149    tcg_gen_andi_i64(o->in1, o->in1, sign);
4150    tcg_gen_or_i64(o->out, o->out, o->in1);
4151    return DISAS_NEXT;
4152}
4153
4154static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4155{
4156    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4157    return DISAS_NEXT;
4158}
4159
4160static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4161{
4162    tcg_gen_sar_i64(o->out, o->in1, o->in2);
4163    return DISAS_NEXT;
4164}
4165
4166static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4167{
4168    tcg_gen_shr_i64(o->out, o->in1, o->in2);
4169    return DISAS_NEXT;
4170}
4171
4172static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4173{
4174    gen_helper_sfpc(cpu_env, o->in2);
4175    return DISAS_NEXT;
4176}
4177
4178static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4179{
4180    gen_helper_sfas(cpu_env, o->in2);
4181    return DISAS_NEXT;
4182}
4183
4184static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4185{
4186    /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4187    tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4188    gen_helper_srnm(cpu_env, o->addr1);
4189    return DISAS_NEXT;
4190}
4191
4192static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4193{
4194    /* Bits 0-55 are are ignored. */
4195    tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4196    gen_helper_srnm(cpu_env, o->addr1);
4197    return DISAS_NEXT;
4198}
4199
4200static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4201{
4202    TCGv_i64 tmp = tcg_temp_new_i64();
4203
4204    /* Bits other than 61-63 are ignored. */
4205    tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4206
4207    /* No need to call a helper, we don't implement dfp */
4208    tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4209    tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4210    tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4211
4212    tcg_temp_free_i64(tmp);
4213    return DISAS_NEXT;
4214}
4215
4216static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4217{
4218    tcg_gen_extrl_i64_i32(cc_op, o->in1);
4219    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4220    set_cc_static(s);
4221
4222    tcg_gen_shri_i64(o->in1, o->in1, 24);
4223    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4224    return DISAS_NEXT;
4225}
4226
4227static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4228{
4229    int b1 = get_field(s, b1);
4230    int d1 = get_field(s, d1);
4231    int b2 = get_field(s, b2);
4232    int d2 = get_field(s, d2);
4233    int r3 = get_field(s, r3);
4234    TCGv_i64 tmp = tcg_temp_new_i64();
4235
4236    /* fetch all operands first */
4237    o->in1 = tcg_temp_new_i64();
4238    tcg_gen_addi_i64(o->in1, regs[b1], d1);
4239    o->in2 = tcg_temp_new_i64();
4240    tcg_gen_addi_i64(o->in2, regs[b2], d2);
4241    o->addr1 = tcg_temp_new_i64();
4242    gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4243
4244    /* load the third operand into r3 before modifying anything */
4245    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4246
4247    /* subtract CPU timer from first operand and store in GR0 */
4248    gen_helper_stpt(tmp, cpu_env);
4249    tcg_gen_sub_i64(regs[0], o->in1, tmp);
4250
4251    /* store second operand in GR1 */
4252    tcg_gen_mov_i64(regs[1], o->in2);
4253
4254    tcg_temp_free_i64(tmp);
4255    return DISAS_NEXT;
4256}
4257
4258#ifndef CONFIG_USER_ONLY
4259static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4260{
4261    tcg_gen_shri_i64(o->in2, o->in2, 4);
4262    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4263    return DISAS_NEXT;
4264}
4265
4266static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4267{
4268    gen_helper_sske(cpu_env, o->in1, o->in2);
4269    return DISAS_NEXT;
4270}
4271
4272static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4273{
4274    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4275    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4276    return DISAS_PC_STALE_NOCHAIN;
4277}
4278
4279static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4280{
4281    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4282    return DISAS_NEXT;
4283}
4284#endif
4285
4286static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4287{
4288    gen_helper_stck(o->out, cpu_env);
4289    /* ??? We don't implement clock states.  */
4290    gen_op_movi_cc(s, 0);
4291    return DISAS_NEXT;
4292}
4293
4294static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4295{
4296    TCGv_i64 c1 = tcg_temp_new_i64();
4297    TCGv_i64 c2 = tcg_temp_new_i64();
4298    TCGv_i64 todpr = tcg_temp_new_i64();
4299    gen_helper_stck(c1, cpu_env);
4300    /* 16 bit value store in an uint32_t (only valid bits set) */
4301    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4302    /* Shift the 64-bit value into its place as a zero-extended
4303       104-bit value.  Note that "bit positions 64-103 are always
4304       non-zero so that they compare differently to STCK"; we set
4305       the least significant bit to 1.  */
4306    tcg_gen_shli_i64(c2, c1, 56);
4307    tcg_gen_shri_i64(c1, c1, 8);
4308    tcg_gen_ori_i64(c2, c2, 0x10000);
4309    tcg_gen_or_i64(c2, c2, todpr);
4310    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4311    tcg_gen_addi_i64(o->in2, o->in2, 8);
4312    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4313    tcg_temp_free_i64(c1);
4314    tcg_temp_free_i64(c2);
4315    tcg_temp_free_i64(todpr);
4316    /* ??? We don't implement clock states.  */
4317    gen_op_movi_cc(s, 0);
4318    return DISAS_NEXT;
4319}
4320
4321#ifndef CONFIG_USER_ONLY
4322static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4323{
4324    tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4325    gen_helper_sck(cc_op, cpu_env, o->in1);
4326    set_cc_static(s);
4327    return DISAS_NEXT;
4328}
4329
4330static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4331{
4332    gen_helper_sckc(cpu_env, o->in2);
4333    return DISAS_NEXT;
4334}
4335
4336static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4337{
4338    gen_helper_sckpf(cpu_env, regs[0]);
4339    return DISAS_NEXT;
4340}
4341
4342static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4343{
4344    gen_helper_stckc(o->out, cpu_env);
4345    return DISAS_NEXT;
4346}
4347
4348static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4349{
4350    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4351    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4352    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4353    tcg_temp_free_i32(r1);
4354    tcg_temp_free_i32(r3);
4355    return DISAS_NEXT;
4356}
4357
4358static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4359{
4360    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4361    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4362    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4363    tcg_temp_free_i32(r1);
4364    tcg_temp_free_i32(r3);
4365    return DISAS_NEXT;
4366}
4367
4368static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4369{
4370    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4371    return DISAS_NEXT;
4372}
4373
4374static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4375{
4376    gen_helper_spt(cpu_env, o->in2);
4377    return DISAS_NEXT;
4378}
4379
4380static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4381{
4382    gen_helper_stfl(cpu_env);
4383    return DISAS_NEXT;
4384}
4385
4386static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4387{
4388    gen_helper_stpt(o->out, cpu_env);
4389    return DISAS_NEXT;
4390}
4391
4392static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4393{
4394    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4395    set_cc_static(s);
4396    return DISAS_NEXT;
4397}
4398
4399static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4400{
4401    gen_helper_spx(cpu_env, o->in2);
4402    return DISAS_NEXT;
4403}
4404
4405static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4406{
4407    gen_helper_xsch(cpu_env, regs[1]);
4408    set_cc_static(s);
4409    return DISAS_NEXT;
4410}
4411
4412static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4413{
4414    gen_helper_csch(cpu_env, regs[1]);
4415    set_cc_static(s);
4416    return DISAS_NEXT;
4417}
4418
4419static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4420{
4421    gen_helper_hsch(cpu_env, regs[1]);
4422    set_cc_static(s);
4423    return DISAS_NEXT;
4424}
4425
4426static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4427{
4428    gen_helper_msch(cpu_env, regs[1], o->in2);
4429    set_cc_static(s);
4430    return DISAS_NEXT;
4431}
4432
4433static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4434{
4435    gen_helper_rchp(cpu_env, regs[1]);
4436    set_cc_static(s);
4437    return DISAS_NEXT;
4438}
4439
4440static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4441{
4442    gen_helper_rsch(cpu_env, regs[1]);
4443    set_cc_static(s);
4444    return DISAS_NEXT;
4445}
4446
4447static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4448{
4449    gen_helper_sal(cpu_env, regs[1]);
4450    return DISAS_NEXT;
4451}
4452
4453static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4454{
4455    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4456    return DISAS_NEXT;
4457}
4458
4459static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4460{
4461    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4462    gen_op_movi_cc(s, 3);
4463    return DISAS_NEXT;
4464}
4465
4466static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4467{
4468    /* The instruction is suppressed if not provided. */
4469    return DISAS_NEXT;
4470}
4471
4472static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4473{
4474    gen_helper_ssch(cpu_env, regs[1], o->in2);
4475    set_cc_static(s);
4476    return DISAS_NEXT;
4477}
4478
4479static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4480{
4481    gen_helper_stsch(cpu_env, regs[1], o->in2);
4482    set_cc_static(s);
4483    return DISAS_NEXT;
4484}
4485
4486static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4487{
4488    gen_helper_stcrw(cpu_env, o->in2);
4489    set_cc_static(s);
4490    return DISAS_NEXT;
4491}
4492
4493static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4494{
4495    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4496    set_cc_static(s);
4497    return DISAS_NEXT;
4498}
4499
4500static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4501{
4502    gen_helper_tsch(cpu_env, regs[1], o->in2);
4503    set_cc_static(s);
4504    return DISAS_NEXT;
4505}
4506
4507static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4508{
4509    gen_helper_chsc(cpu_env, o->in2);
4510    set_cc_static(s);
4511    return DISAS_NEXT;
4512}
4513
4514static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4515{
4516    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4517    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4518    return DISAS_NEXT;
4519}
4520
4521static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4522{
4523    uint64_t i2 = get_field(s, i2);
4524    TCGv_i64 t;
4525
4526    /* It is important to do what the instruction name says: STORE THEN.
4527       If we let the output hook perform the store then if we fault and
4528       restart, we'll have the wrong SYSTEM MASK in place.  */
4529    t = tcg_temp_new_i64();
4530    tcg_gen_shri_i64(t, psw_mask, 56);
4531    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4532    tcg_temp_free_i64(t);
4533
4534    if (s->fields.op == 0xac) {
4535        tcg_gen_andi_i64(psw_mask, psw_mask,
4536                         (i2 << 56) | 0x00ffffffffffffffull);
4537    } else {
4538        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4539    }
4540
4541    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4542    return DISAS_PC_STALE_NOCHAIN;
4543}
4544
4545static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4546{
4547    tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4548
4549    if (s->base.tb->flags & FLAG_MASK_PER) {
4550        update_psw_addr(s);
4551        gen_helper_per_store_real(cpu_env);
4552    }
4553    return DISAS_NEXT;
4554}
4555#endif
4556
4557static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4558{
4559    gen_helper_stfle(cc_op, cpu_env, o->in2);
4560    set_cc_static(s);
4561    return DISAS_NEXT;
4562}
4563
4564static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4565{
4566    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4567    return DISAS_NEXT;
4568}
4569
4570static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4571{
4572    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4573    return DISAS_NEXT;
4574}
4575
4576static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4577{
4578    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4579    return DISAS_NEXT;
4580}
4581
4582static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4583{
4584    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4585    return DISAS_NEXT;
4586}
4587
4588static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4589{
4590    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4591    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4592    gen_helper_stam(cpu_env, r1, o->in2, r3);
4593    tcg_temp_free_i32(r1);
4594    tcg_temp_free_i32(r3);
4595    return DISAS_NEXT;
4596}
4597
4598static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4599{
4600    int m3 = get_field(s, m3);
4601    int pos, base = s->insn->data;
4602    TCGv_i64 tmp = tcg_temp_new_i64();
4603
4604    pos = base + ctz32(m3) * 8;
4605    switch (m3) {
4606    case 0xf:
4607        /* Effectively a 32-bit store.  */
4608        tcg_gen_shri_i64(tmp, o->in1, pos);
4609        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4610        break;
4611
4612    case 0xc:
4613    case 0x6:
4614    case 0x3:
4615        /* Effectively a 16-bit store.  */
4616        tcg_gen_shri_i64(tmp, o->in1, pos);
4617        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4618        break;
4619
4620    case 0x8:
4621    case 0x4:
4622    case 0x2:
4623    case 0x1:
4624        /* Effectively an 8-bit store.  */
4625        tcg_gen_shri_i64(tmp, o->in1, pos);
4626        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4627        break;
4628
4629    default:
4630        /* This is going to be a sequence of shifts and stores.  */
4631        pos = base + 32 - 8;
4632        while (m3) {
4633            if (m3 & 0x8) {
4634                tcg_gen_shri_i64(tmp, o->in1, pos);
4635                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4636                tcg_gen_addi_i64(o->in2, o->in2, 1);
4637            }
4638            m3 = (m3 << 1) & 0xf;
4639            pos -= 8;
4640        }
4641        break;
4642    }
4643    tcg_temp_free_i64(tmp);
4644    return DISAS_NEXT;
4645}
4646
4647static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4648{
4649    int r1 = get_field(s, r1);
4650    int r3 = get_field(s, r3);
4651    int size = s->insn->data;
4652    TCGv_i64 tsize = tcg_const_i64(size);
4653
4654    while (1) {
4655        if (size == 8) {
4656            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4657        } else {
4658            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4659        }
4660        if (r1 == r3) {
4661            break;
4662        }
4663        tcg_gen_add_i64(o->in2, o->in2, tsize);
4664        r1 = (r1 + 1) & 15;
4665    }
4666
4667    tcg_temp_free_i64(tsize);
4668    return DISAS_NEXT;
4669}
4670
4671static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4672{
4673    int r1 = get_field(s, r1);
4674    int r3 = get_field(s, r3);
4675    TCGv_i64 t = tcg_temp_new_i64();
4676    TCGv_i64 t4 = tcg_const_i64(4);
4677    TCGv_i64 t32 = tcg_const_i64(32);
4678
4679    while (1) {
4680        tcg_gen_shl_i64(t, regs[r1], t32);
4681        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4682        if (r1 == r3) {
4683            break;
4684        }
4685        tcg_gen_add_i64(o->in2, o->in2, t4);
4686        r1 = (r1 + 1) & 15;
4687    }
4688
4689    tcg_temp_free_i64(t);
4690    tcg_temp_free_i64(t4);
4691    tcg_temp_free_i64(t32);
4692    return DISAS_NEXT;
4693}
4694
4695static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4696{
4697    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4698        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4699    } else if (HAVE_ATOMIC128) {
4700        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4701    } else {
4702        gen_helper_exit_atomic(cpu_env);
4703        return DISAS_NORETURN;
4704    }
4705    return DISAS_NEXT;
4706}
4707
4708static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4709{
4710    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4711    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4712
4713    gen_helper_srst(cpu_env, r1, r2);
4714
4715    tcg_temp_free_i32(r1);
4716    tcg_temp_free_i32(r2);
4717    set_cc_static(s);
4718    return DISAS_NEXT;
4719}
4720
4721static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4722{
4723    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4724    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4725
4726    gen_helper_srstu(cpu_env, r1, r2);
4727
4728    tcg_temp_free_i32(r1);
4729    tcg_temp_free_i32(r2);
4730    set_cc_static(s);
4731    return DISAS_NEXT;
4732}
4733
4734static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4735{
4736    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4737    return DISAS_NEXT;
4738}
4739
4740static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4741{
4742    tcg_gen_movi_i64(cc_src, 0);
4743    tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4744    return DISAS_NEXT;
4745}
4746
4747/* Compute borrow (0, -1) into cc_src. */
4748static void compute_borrow(DisasContext *s)
4749{
4750    switch (s->cc_op) {
4751    case CC_OP_SUBU:
4752        /* The borrow value is already in cc_src (0,-1). */
4753        break;
4754    default:
4755        gen_op_calc_cc(s);
4756        /* fall through */
4757    case CC_OP_STATIC:
4758        /* The carry flag is the msb of CC; compute into cc_src. */
4759        tcg_gen_extu_i32_i64(cc_src, cc_op);
4760        tcg_gen_shri_i64(cc_src, cc_src, 1);
4761        /* fall through */
4762    case CC_OP_ADDU:
4763        /* Convert carry (1,0) to borrow (0,-1). */
4764        tcg_gen_subi_i64(cc_src, cc_src, 1);
4765        break;
4766    }
4767}
4768
4769static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4770{
4771    compute_borrow(s);
4772
4773    /* Borrow is {0, -1}, so add to subtract. */
4774    tcg_gen_add_i64(o->out, o->in1, cc_src);
4775    tcg_gen_sub_i64(o->out, o->out, o->in2);
4776    return DISAS_NEXT;
4777}
4778
4779static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4780{
4781    compute_borrow(s);
4782
4783    /*
4784     * Borrow is {0, -1}, so add to subtract; replicate the
4785     * borrow input to produce 128-bit -1 for the addition.
4786     */
4787    TCGv_i64 zero = tcg_const_i64(0);
4788    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4789    tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4790    tcg_temp_free_i64(zero);
4791
4792    return DISAS_NEXT;
4793}
4794
4795static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4796{
4797    TCGv_i32 t;
4798
4799    update_psw_addr(s);
4800    update_cc_op(s);
4801
4802    t = tcg_const_i32(get_field(s, i1) & 0xff);
4803    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4804    tcg_temp_free_i32(t);
4805
4806    t = tcg_const_i32(s->ilen);
4807    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4808    tcg_temp_free_i32(t);
4809
4810    gen_exception(EXCP_SVC);
4811    return DISAS_NORETURN;
4812}
4813
4814static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4815{
4816    int cc = 0;
4817
4818    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4819    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4820    gen_op_movi_cc(s, cc);
4821    return DISAS_NEXT;
4822}
4823
4824static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4825{
4826    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4827    set_cc_static(s);
4828    return DISAS_NEXT;
4829}
4830
4831static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4832{
4833    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4834    set_cc_static(s);
4835    return DISAS_NEXT;
4836}
4837
4838static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4839{
4840    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4841    set_cc_static(s);
4842    return DISAS_NEXT;
4843}
4844
4845#ifndef CONFIG_USER_ONLY
4846
4847static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4848{
4849    gen_helper_testblock(cc_op, cpu_env, o->in2);
4850    set_cc_static(s);
4851    return DISAS_NEXT;
4852}
4853
4854static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4855{
4856    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4857    set_cc_static(s);
4858    return DISAS_NEXT;
4859}
4860
4861#endif
4862
4863static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4864{
4865    TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4866    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4867    tcg_temp_free_i32(l1);
4868    set_cc_static(s);
4869    return DISAS_NEXT;
4870}
4871
4872static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4873{
4874    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4875    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4876    tcg_temp_free_i32(l);
4877    set_cc_static(s);
4878    return DISAS_NEXT;
4879}
4880
4881static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4882{
4883    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4884    return_low128(o->out2);
4885    set_cc_static(s);
4886    return DISAS_NEXT;
4887}
4888
4889static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4890{
4891    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4892    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4893    tcg_temp_free_i32(l);
4894    set_cc_static(s);
4895    return DISAS_NEXT;
4896}
4897
4898static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4899{
4900    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4901    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4902    tcg_temp_free_i32(l);
4903    set_cc_static(s);
4904    return DISAS_NEXT;
4905}
4906
4907static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4908{
4909    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4910    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4911    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4912    TCGv_i32 tst = tcg_temp_new_i32();
4913    int m3 = get_field(s, m3);
4914
4915    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4916        m3 = 0;
4917    }
4918    if (m3 & 1) {
4919        tcg_gen_movi_i32(tst, -1);
4920    } else {
4921        tcg_gen_extrl_i64_i32(tst, regs[0]);
4922        if (s->insn->opc & 3) {
4923            tcg_gen_ext8u_i32(tst, tst);
4924        } else {
4925            tcg_gen_ext16u_i32(tst, tst);
4926        }
4927    }
4928    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4929
4930    tcg_temp_free_i32(r1);
4931    tcg_temp_free_i32(r2);
4932    tcg_temp_free_i32(sizes);
4933    tcg_temp_free_i32(tst);
4934    set_cc_static(s);
4935    return DISAS_NEXT;
4936}
4937
4938static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4939{
4940    TCGv_i32 t1 = tcg_const_i32(0xff);
4941    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4942    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4943    tcg_temp_free_i32(t1);
4944    set_cc_static(s);
4945    return DISAS_NEXT;
4946}
4947
4948static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4949{
4950    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4951    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4952    tcg_temp_free_i32(l);
4953    return DISAS_NEXT;
4954}
4955
4956static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4957{
4958    int l1 = get_field(s, l1) + 1;
4959    TCGv_i32 l;
4960
4961    /* The length must not exceed 32 bytes.  */
4962    if (l1 > 32) {
4963        gen_program_exception(s, PGM_SPECIFICATION);
4964        return DISAS_NORETURN;
4965    }
4966    l = tcg_const_i32(l1);
4967    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4968    tcg_temp_free_i32(l);
4969    set_cc_static(s);
4970    return DISAS_NEXT;
4971}
4972
4973static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4974{
4975    int l1 = get_field(s, l1) + 1;
4976    TCGv_i32 l;
4977
4978    /* The length must be even and should not exceed 64 bytes.  */
4979    if ((l1 & 1) || (l1 > 64)) {
4980        gen_program_exception(s, PGM_SPECIFICATION);
4981        return DISAS_NORETURN;
4982    }
4983    l = tcg_const_i32(l1);
4984    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4985    tcg_temp_free_i32(l);
4986    set_cc_static(s);
4987    return DISAS_NEXT;
4988}
4989
4990
4991static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4992{
4993    int d1 = get_field(s, d1);
4994    int d2 = get_field(s, d2);
4995    int b1 = get_field(s, b1);
4996    int b2 = get_field(s, b2);
4997    int l = get_field(s, l1);
4998    TCGv_i32 t32;
4999
5000    o->addr1 = get_address(s, 0, b1, d1);
5001
5002    /* If the addresses are identical, this is a store/memset of zero.  */
5003    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
5004        o->in2 = tcg_const_i64(0);
5005
5006        l++;
5007        while (l >= 8) {
5008            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
5009            l -= 8;
5010            if (l > 0) {
5011                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
5012            }
5013        }
5014        if (l >= 4) {
5015            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5016            l -= 4;
5017            if (l > 0) {
5018                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5019            }
5020        }
5021        if (l >= 2) {
5022            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5023            l -= 2;
5024            if (l > 0) {
5025                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5026            }
5027        }
5028        if (l) {
5029            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5030        }
5031        gen_op_movi_cc(s, 0);
5032        return DISAS_NEXT;
5033    }
5034
5035    /* But in general we'll defer to a helper.  */
5036    o->in2 = get_address(s, 0, b2, d2);
5037    t32 = tcg_const_i32(l);
5038    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5039    tcg_temp_free_i32(t32);
5040    set_cc_static(s);
5041    return DISAS_NEXT;
5042}
5043
5044static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5045{
5046    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5047    return DISAS_NEXT;
5048}
5049
5050static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5051{
5052    int shift = s->insn->data & 0xff;
5053    int size = s->insn->data >> 8;
5054    uint64_t mask = ((1ull << size) - 1) << shift;
5055
5056    assert(!o->g_in2);
5057    tcg_gen_shli_i64(o->in2, o->in2, shift);
5058    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5059
5060    /* Produce the CC from only the bits manipulated.  */
5061    tcg_gen_andi_i64(cc_dst, o->out, mask);
5062    set_cc_nz_u64(s, cc_dst);
5063    return DISAS_NEXT;
5064}
5065
5066static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5067{
5068    o->in1 = tcg_temp_new_i64();
5069
5070    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5071        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5072    } else {
5073        /* Perform the atomic operation in memory. */
5074        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5075                                     s->insn->data);
5076    }
5077
5078    /* Recompute also for atomic case: needed for setting CC. */
5079    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5080
5081    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5082        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5083    }
5084    return DISAS_NEXT;
5085}
5086
5087static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5088{
5089    o->out = tcg_const_i64(0);
5090    return DISAS_NEXT;
5091}
5092
5093static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5094{
5095    o->out = tcg_const_i64(0);
5096    o->out2 = o->out;
5097    o->g_out2 = true;
5098    return DISAS_NEXT;
5099}
5100
5101#ifndef CONFIG_USER_ONLY
5102static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5103{
5104    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5105
5106    gen_helper_clp(cpu_env, r2);
5107    tcg_temp_free_i32(r2);
5108    set_cc_static(s);
5109    return DISAS_NEXT;
5110}
5111
5112static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5113{
5114    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5115    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5116
5117    gen_helper_pcilg(cpu_env, r1, r2);
5118    tcg_temp_free_i32(r1);
5119    tcg_temp_free_i32(r2);
5120    set_cc_static(s);
5121    return DISAS_NEXT;
5122}
5123
5124static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5125{
5126    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5127    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5128
5129    gen_helper_pcistg(cpu_env, r1, r2);
5130    tcg_temp_free_i32(r1);
5131    tcg_temp_free_i32(r2);
5132    set_cc_static(s);
5133    return DISAS_NEXT;
5134}
5135
5136static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5137{
5138    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5139    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5140
5141    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5142    tcg_temp_free_i32(ar);
5143    tcg_temp_free_i32(r1);
5144    set_cc_static(s);
5145    return DISAS_NEXT;
5146}
5147
5148static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5149{
5150    gen_helper_sic(cpu_env, o->in1, o->in2);
5151    return DISAS_NEXT;
5152}
5153
5154static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5155{
5156    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5157    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5158
5159    gen_helper_rpcit(cpu_env, r1, r2);
5160    tcg_temp_free_i32(r1);
5161    tcg_temp_free_i32(r2);
5162    set_cc_static(s);
5163    return DISAS_NEXT;
5164}
5165
5166static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5167{
5168    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5169    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5170    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5171
5172    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5173    tcg_temp_free_i32(ar);
5174    tcg_temp_free_i32(r1);
5175    tcg_temp_free_i32(r3);
5176    set_cc_static(s);
5177    return DISAS_NEXT;
5178}
5179
5180static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5181{
5182    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5183    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5184
5185    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5186    tcg_temp_free_i32(ar);
5187    tcg_temp_free_i32(r1);
5188    set_cc_static(s);
5189    return DISAS_NEXT;
5190}
5191#endif
5192
5193#include "translate_vx.c.inc"
5194
5195/* ====================================================================== */
5196/* The "Cc OUTput" generators.  Given the generated output (and in some cases
5197   the original inputs), update the various cc data structures in order to
5198   be able to compute the new condition code.  */
5199
5200static void cout_abs32(DisasContext *s, DisasOps *o)
5201{
5202    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5203}
5204
5205static void cout_abs64(DisasContext *s, DisasOps *o)
5206{
5207    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5208}
5209
5210static void cout_adds32(DisasContext *s, DisasOps *o)
5211{
5212    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5213}
5214
5215static void cout_adds64(DisasContext *s, DisasOps *o)
5216{
5217    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5218}
5219
5220static void cout_addu32(DisasContext *s, DisasOps *o)
5221{
5222    tcg_gen_shri_i64(cc_src, o->out, 32);
5223    tcg_gen_ext32u_i64(cc_dst, o->out);
5224    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5225}
5226
5227static void cout_addu64(DisasContext *s, DisasOps *o)
5228{
5229    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5230}
5231
5232static void cout_cmps32(DisasContext *s, DisasOps *o)
5233{
5234    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5235}
5236
5237static void cout_cmps64(DisasContext *s, DisasOps *o)
5238{
5239    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5240}
5241
5242static void cout_cmpu32(DisasContext *s, DisasOps *o)
5243{
5244    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5245}
5246
5247static void cout_cmpu64(DisasContext *s, DisasOps *o)
5248{
5249    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5250}
5251
5252static void cout_f32(DisasContext *s, DisasOps *o)
5253{
5254    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5255}
5256
5257static void cout_f64(DisasContext *s, DisasOps *o)
5258{
5259    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5260}
5261
5262static void cout_f128(DisasContext *s, DisasOps *o)
5263{
5264    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5265}
5266
5267static void cout_nabs32(DisasContext *s, DisasOps *o)
5268{
5269    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5270}
5271
5272static void cout_nabs64(DisasContext *s, DisasOps *o)
5273{
5274    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5275}
5276
5277static void cout_neg32(DisasContext *s, DisasOps *o)
5278{
5279    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5280}
5281
5282static void cout_neg64(DisasContext *s, DisasOps *o)
5283{
5284    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5285}
5286
5287static void cout_nz32(DisasContext *s, DisasOps *o)
5288{
5289    tcg_gen_ext32u_i64(cc_dst, o->out);
5290    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5291}
5292
5293static void cout_nz64(DisasContext *s, DisasOps *o)
5294{
5295    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5296}
5297
5298static void cout_s32(DisasContext *s, DisasOps *o)
5299{
5300    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5301}
5302
5303static void cout_s64(DisasContext *s, DisasOps *o)
5304{
5305    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5306}
5307
5308static void cout_subs32(DisasContext *s, DisasOps *o)
5309{
5310    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5311}
5312
5313static void cout_subs64(DisasContext *s, DisasOps *o)
5314{
5315    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5316}
5317
5318static void cout_subu32(DisasContext *s, DisasOps *o)
5319{
5320    tcg_gen_sari_i64(cc_src, o->out, 32);
5321    tcg_gen_ext32u_i64(cc_dst, o->out);
5322    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5323}
5324
5325static void cout_subu64(DisasContext *s, DisasOps *o)
5326{
5327    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5328}
5329
5330static void cout_tm32(DisasContext *s, DisasOps *o)
5331{
5332    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5333}
5334
5335static void cout_tm64(DisasContext *s, DisasOps *o)
5336{
5337    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5338}
5339
5340static void cout_muls32(DisasContext *s, DisasOps *o)
5341{
5342    gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5343}
5344
5345static void cout_muls64(DisasContext *s, DisasOps *o)
5346{
5347    /* out contains "high" part, out2 contains "low" part of 128 bit result */
5348    gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5349}
5350
5351/* ====================================================================== */
5352/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5353   with the TCG register to which we will write.  Used in combination with
5354   the "wout" generators, in some cases we need a new temporary, and in
5355   some cases we can write to a TCG global.  */
5356
5357static void prep_new(DisasContext *s, DisasOps *o)
5358{
5359    o->out = tcg_temp_new_i64();
5360}
5361#define SPEC_prep_new 0
5362
5363static void prep_new_P(DisasContext *s, DisasOps *o)
5364{
5365    o->out = tcg_temp_new_i64();
5366    o->out2 = tcg_temp_new_i64();
5367}
5368#define SPEC_prep_new_P 0
5369
5370static void prep_r1(DisasContext *s, DisasOps *o)
5371{
5372    o->out = regs[get_field(s, r1)];
5373    o->g_out = true;
5374}
5375#define SPEC_prep_r1 0
5376
5377static void prep_r1_P(DisasContext *s, DisasOps *o)
5378{
5379    int r1 = get_field(s, r1);
5380    o->out = regs[r1];
5381    o->out2 = regs[r1 + 1];
5382    o->g_out = o->g_out2 = true;
5383}
5384#define SPEC_prep_r1_P SPEC_r1_even
5385
5386/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5387static void prep_x1(DisasContext *s, DisasOps *o)
5388{
5389    o->out = load_freg(get_field(s, r1));
5390    o->out2 = load_freg(get_field(s, r1) + 2);
5391}
5392#define SPEC_prep_x1 SPEC_r1_f128
5393
5394/* ====================================================================== */
5395/* The "Write OUTput" generators.  These generally perform some non-trivial
5396   copy of data to TCG globals, or to main memory.  The trivial cases are
5397   generally handled by having a "prep" generator install the TCG global
5398   as the destination of the operation.  */
5399
5400static void wout_r1(DisasContext *s, DisasOps *o)
5401{
5402    store_reg(get_field(s, r1), o->out);
5403}
5404#define SPEC_wout_r1 0
5405
5406static void wout_out2_r1(DisasContext *s, DisasOps *o)
5407{
5408    store_reg(get_field(s, r1), o->out2);
5409}
5410#define SPEC_wout_out2_r1 0
5411
5412static void wout_r1_8(DisasContext *s, DisasOps *o)
5413{
5414    int r1 = get_field(s, r1);
5415    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5416}
5417#define SPEC_wout_r1_8 0
5418
5419static void wout_r1_16(DisasContext *s, DisasOps *o)
5420{
5421    int r1 = get_field(s, r1);
5422    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5423}
5424#define SPEC_wout_r1_16 0
5425
5426static void wout_r1_32(DisasContext *s, DisasOps *o)
5427{
5428    store_reg32_i64(get_field(s, r1), o->out);
5429}
5430#define SPEC_wout_r1_32 0
5431
5432static void wout_r1_32h(DisasContext *s, DisasOps *o)
5433{
5434    store_reg32h_i64(get_field(s, r1), o->out);
5435}
5436#define SPEC_wout_r1_32h 0
5437
5438static void wout_r1_P32(DisasContext *s, DisasOps *o)
5439{
5440    int r1 = get_field(s, r1);
5441    store_reg32_i64(r1, o->out);
5442    store_reg32_i64(r1 + 1, o->out2);
5443}
5444#define SPEC_wout_r1_P32 SPEC_r1_even
5445
5446static void wout_r1_D32(DisasContext *s, DisasOps *o)
5447{
5448    int r1 = get_field(s, r1);
5449    store_reg32_i64(r1 + 1, o->out);
5450    tcg_gen_shri_i64(o->out, o->out, 32);
5451    store_reg32_i64(r1, o->out);
5452}
5453#define SPEC_wout_r1_D32 SPEC_r1_even
5454
5455static void wout_r3_P32(DisasContext *s, DisasOps *o)
5456{
5457    int r3 = get_field(s, r3);
5458    store_reg32_i64(r3, o->out);
5459    store_reg32_i64(r3 + 1, o->out2);
5460}
5461#define SPEC_wout_r3_P32 SPEC_r3_even
5462
5463static void wout_r3_P64(DisasContext *s, DisasOps *o)
5464{
5465    int r3 = get_field(s, r3);
5466    store_reg(r3, o->out);
5467    store_reg(r3 + 1, o->out2);
5468}
5469#define SPEC_wout_r3_P64 SPEC_r3_even
5470
5471static void wout_e1(DisasContext *s, DisasOps *o)
5472{
5473    store_freg32_i64(get_field(s, r1), o->out);
5474}
5475#define SPEC_wout_e1 0
5476
5477static void wout_f1(DisasContext *s, DisasOps *o)
5478{
5479    store_freg(get_field(s, r1), o->out);
5480}
5481#define SPEC_wout_f1 0
5482
5483static void wout_x1(DisasContext *s, DisasOps *o)
5484{
5485    int f1 = get_field(s, r1);
5486    store_freg(f1, o->out);
5487    store_freg(f1 + 2, o->out2);
5488}
5489#define SPEC_wout_x1 SPEC_r1_f128
5490
5491static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5492{
5493    if (get_field(s, r1) != get_field(s, r2)) {
5494        store_reg32_i64(get_field(s, r1), o->out);
5495    }
5496}
5497#define SPEC_wout_cond_r1r2_32 0
5498
5499static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5500{
5501    if (get_field(s, r1) != get_field(s, r2)) {
5502        store_freg32_i64(get_field(s, r1), o->out);
5503    }
5504}
5505#define SPEC_wout_cond_e1e2 0
5506
5507static void wout_m1_8(DisasContext *s, DisasOps *o)
5508{
5509    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5510}
5511#define SPEC_wout_m1_8 0
5512
5513static void wout_m1_16(DisasContext *s, DisasOps *o)
5514{
5515    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5516}
5517#define SPEC_wout_m1_16 0
5518
5519#ifndef CONFIG_USER_ONLY
5520static void wout_m1_16a(DisasContext *s, DisasOps *o)
5521{
5522    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5523}
5524#define SPEC_wout_m1_16a 0
5525#endif
5526
5527static void wout_m1_32(DisasContext *s, DisasOps *o)
5528{
5529    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5530}
5531#define SPEC_wout_m1_32 0
5532
5533#ifndef CONFIG_USER_ONLY
5534static void wout_m1_32a(DisasContext *s, DisasOps *o)
5535{
5536    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5537}
5538#define SPEC_wout_m1_32a 0
5539#endif
5540
5541static void wout_m1_64(DisasContext *s, DisasOps *o)
5542{
5543    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5544}
5545#define SPEC_wout_m1_64 0
5546
5547#ifndef CONFIG_USER_ONLY
5548static void wout_m1_64a(DisasContext *s, DisasOps *o)
5549{
5550    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5551}
5552#define SPEC_wout_m1_64a 0
5553#endif
5554
5555static void wout_m2_32(DisasContext *s, DisasOps *o)
5556{
5557    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5558}
5559#define SPEC_wout_m2_32 0
5560
5561static void wout_in2_r1(DisasContext *s, DisasOps *o)
5562{
5563    store_reg(get_field(s, r1), o->in2);
5564}
5565#define SPEC_wout_in2_r1 0
5566
5567static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5568{
5569    store_reg32_i64(get_field(s, r1), o->in2);
5570}
5571#define SPEC_wout_in2_r1_32 0
5572
5573/* ====================================================================== */
5574/* The "INput 1" generators.  These load the first operand to an insn.  */
5575
5576static void in1_r1(DisasContext *s, DisasOps *o)
5577{
5578    o->in1 = load_reg(get_field(s, r1));
5579}
5580#define SPEC_in1_r1 0
5581
5582static void in1_r1_o(DisasContext *s, DisasOps *o)
5583{
5584    o->in1 = regs[get_field(s, r1)];
5585    o->g_in1 = true;
5586}
5587#define SPEC_in1_r1_o 0
5588
5589static void in1_r1_32s(DisasContext *s, DisasOps *o)
5590{
5591    o->in1 = tcg_temp_new_i64();
5592    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5593}
5594#define SPEC_in1_r1_32s 0
5595
5596static void in1_r1_32u(DisasContext *s, DisasOps *o)
5597{
5598    o->in1 = tcg_temp_new_i64();
5599    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5600}
5601#define SPEC_in1_r1_32u 0
5602
5603static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5604{
5605    o->in1 = tcg_temp_new_i64();
5606    tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5607}
5608#define SPEC_in1_r1_sr32 0
5609
5610static void in1_r1p1(DisasContext *s, DisasOps *o)
5611{
5612    o->in1 = load_reg(get_field(s, r1) + 1);
5613}
5614#define SPEC_in1_r1p1 SPEC_r1_even
5615
5616static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5617{
5618    o->in1 = regs[get_field(s, r1) + 1];
5619    o->g_in1 = true;
5620}
5621#define SPEC_in1_r1p1_o SPEC_r1_even
5622
5623static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5624{
5625    o->in1 = tcg_temp_new_i64();
5626    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5627}
5628#define SPEC_in1_r1p1_32s SPEC_r1_even
5629
5630static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5631{
5632    o->in1 = tcg_temp_new_i64();
5633    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5634}
5635#define SPEC_in1_r1p1_32u SPEC_r1_even
5636
5637static void in1_r1_D32(DisasContext *s, DisasOps *o)
5638{
5639    int r1 = get_field(s, r1);
5640    o->in1 = tcg_temp_new_i64();
5641    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5642}
5643#define SPEC_in1_r1_D32 SPEC_r1_even
5644
5645static void in1_r2(DisasContext *s, DisasOps *o)
5646{
5647    o->in1 = load_reg(get_field(s, r2));
5648}
5649#define SPEC_in1_r2 0
5650
5651static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5652{
5653    o->in1 = tcg_temp_new_i64();
5654    tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5655}
5656#define SPEC_in1_r2_sr32 0
5657
5658static void in1_r2_32u(DisasContext *s, DisasOps *o)
5659{
5660    o->in1 = tcg_temp_new_i64();
5661    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5662}
5663#define SPEC_in1_r2_32u 0
5664
5665static void in1_r3(DisasContext *s, DisasOps *o)
5666{
5667    o->in1 = load_reg(get_field(s, r3));
5668}
5669#define SPEC_in1_r3 0
5670
5671static void in1_r3_o(DisasContext *s, DisasOps *o)
5672{
5673    o->in1 = regs[get_field(s, r3)];
5674    o->g_in1 = true;
5675}
5676#define SPEC_in1_r3_o 0
5677
5678static void in1_r3_32s(DisasContext *s, DisasOps *o)
5679{
5680    o->in1 = tcg_temp_new_i64();
5681    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5682}
5683#define SPEC_in1_r3_32s 0
5684
5685static void in1_r3_32u(DisasContext *s, DisasOps *o)
5686{
5687    o->in1 = tcg_temp_new_i64();
5688    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5689}
5690#define SPEC_in1_r3_32u 0
5691
5692static void in1_r3_D32(DisasContext *s, DisasOps *o)
5693{
5694    int r3 = get_field(s, r3);
5695    o->in1 = tcg_temp_new_i64();
5696    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5697}
5698#define SPEC_in1_r3_D32 SPEC_r3_even
5699
5700static void in1_e1(DisasContext *s, DisasOps *o)
5701{
5702    o->in1 = load_freg32_i64(get_field(s, r1));
5703}
5704#define SPEC_in1_e1 0
5705
5706static void in1_f1(DisasContext *s, DisasOps *o)
5707{
5708    o->in1 = load_freg(get_field(s, r1));
5709}
5710#define SPEC_in1_f1 0
5711
5712/* Load the high double word of an extended (128-bit) format FP number */
5713static void in1_x2h(DisasContext *s, DisasOps *o)
5714{
5715    o->in1 = load_freg(get_field(s, r2));
5716}
5717#define SPEC_in1_x2h SPEC_r2_f128
5718
5719static void in1_f3(DisasContext *s, DisasOps *o)
5720{
5721    o->in1 = load_freg(get_field(s, r3));
5722}
5723#define SPEC_in1_f3 0
5724
5725static void in1_la1(DisasContext *s, DisasOps *o)
5726{
5727    o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5728}
5729#define SPEC_in1_la1 0
5730
5731static void in1_la2(DisasContext *s, DisasOps *o)
5732{
5733    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5734    o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5735}
5736#define SPEC_in1_la2 0
5737
5738static void in1_m1_8u(DisasContext *s, DisasOps *o)
5739{
5740    in1_la1(s, o);
5741    o->in1 = tcg_temp_new_i64();
5742    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5743}
5744#define SPEC_in1_m1_8u 0
5745
5746static void in1_m1_16s(DisasContext *s, DisasOps *o)
5747{
5748    in1_la1(s, o);
5749    o->in1 = tcg_temp_new_i64();
5750    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5751}
5752#define SPEC_in1_m1_16s 0
5753
5754static void in1_m1_16u(DisasContext *s, DisasOps *o)
5755{
5756    in1_la1(s, o);
5757    o->in1 = tcg_temp_new_i64();
5758    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5759}
5760#define SPEC_in1_m1_16u 0
5761
5762static void in1_m1_32s(DisasContext *s, DisasOps *o)
5763{
5764    in1_la1(s, o);
5765    o->in1 = tcg_temp_new_i64();
5766    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5767}
5768#define SPEC_in1_m1_32s 0
5769
5770static void in1_m1_32u(DisasContext *s, DisasOps *o)
5771{
5772    in1_la1(s, o);
5773    o->in1 = tcg_temp_new_i64();
5774    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5775}
5776#define SPEC_in1_m1_32u 0
5777
5778static void in1_m1_64(DisasContext *s, DisasOps *o)
5779{
5780    in1_la1(s, o);
5781    o->in1 = tcg_temp_new_i64();
5782    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5783}
5784#define SPEC_in1_m1_64 0
5785
5786/* ====================================================================== */
5787/* The "INput 2" generators.  These load the second operand to an insn.  */
5788
5789static void in2_r1_o(DisasContext *s, DisasOps *o)
5790{
5791    o->in2 = regs[get_field(s, r1)];
5792    o->g_in2 = true;
5793}
5794#define SPEC_in2_r1_o 0
5795
5796static void in2_r1_16u(DisasContext *s, DisasOps *o)
5797{
5798    o->in2 = tcg_temp_new_i64();
5799    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5800}
5801#define SPEC_in2_r1_16u 0
5802
5803static void in2_r1_32u(DisasContext *s, DisasOps *o)
5804{
5805    o->in2 = tcg_temp_new_i64();
5806    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5807}
5808#define SPEC_in2_r1_32u 0
5809
5810static void in2_r1_D32(DisasContext *s, DisasOps *o)
5811{
5812    int r1 = get_field(s, r1);
5813    o->in2 = tcg_temp_new_i64();
5814    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5815}
5816#define SPEC_in2_r1_D32 SPEC_r1_even
5817
5818static void in2_r2(DisasContext *s, DisasOps *o)
5819{
5820    o->in2 = load_reg(get_field(s, r2));
5821}
5822#define SPEC_in2_r2 0
5823
5824static void in2_r2_o(DisasContext *s, DisasOps *o)
5825{
5826    o->in2 = regs[get_field(s, r2)];
5827    o->g_in2 = true;
5828}
5829#define SPEC_in2_r2_o 0
5830
5831static void in2_r2_nz(DisasContext *s, DisasOps *o)
5832{
5833    int r2 = get_field(s, r2);
5834    if (r2 != 0) {
5835        o->in2 = load_reg(r2);
5836    }
5837}
5838#define SPEC_in2_r2_nz 0
5839
5840static void in2_r2_8s(DisasContext *s, DisasOps *o)
5841{
5842    o->in2 = tcg_temp_new_i64();
5843    tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5844}
5845#define SPEC_in2_r2_8s 0
5846
5847static void in2_r2_8u(DisasContext *s, DisasOps *o)
5848{
5849    o->in2 = tcg_temp_new_i64();
5850    tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5851}
5852#define SPEC_in2_r2_8u 0
5853
5854static void in2_r2_16s(DisasContext *s, DisasOps *o)
5855{
5856    o->in2 = tcg_temp_new_i64();
5857    tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5858}
5859#define SPEC_in2_r2_16s 0
5860
5861static void in2_r2_16u(DisasContext *s, DisasOps *o)
5862{
5863    o->in2 = tcg_temp_new_i64();
5864    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5865}
5866#define SPEC_in2_r2_16u 0
5867
5868static void in2_r3(DisasContext *s, DisasOps *o)
5869{
5870    o->in2 = load_reg(get_field(s, r3));
5871}
5872#define SPEC_in2_r3 0
5873
5874static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5875{
5876    o->in2 = tcg_temp_new_i64();
5877    tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5878}
5879#define SPEC_in2_r3_sr32 0
5880
5881static void in2_r3_32u(DisasContext *s, DisasOps *o)
5882{
5883    o->in2 = tcg_temp_new_i64();
5884    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5885}
5886#define SPEC_in2_r3_32u 0
5887
5888static void in2_r2_32s(DisasContext *s, DisasOps *o)
5889{
5890    o->in2 = tcg_temp_new_i64();
5891    tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5892}
5893#define SPEC_in2_r2_32s 0
5894
5895static void in2_r2_32u(DisasContext *s, DisasOps *o)
5896{
5897    o->in2 = tcg_temp_new_i64();
5898    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5899}
5900#define SPEC_in2_r2_32u 0
5901
5902static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5903{
5904    o->in2 = tcg_temp_new_i64();
5905    tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5906}
5907#define SPEC_in2_r2_sr32 0
5908
5909static void in2_e2(DisasContext *s, DisasOps *o)
5910{
5911    o->in2 = load_freg32_i64(get_field(s, r2));
5912}
5913#define SPEC_in2_e2 0
5914
5915static void in2_f2(DisasContext *s, DisasOps *o)
5916{
5917    o->in2 = load_freg(get_field(s, r2));
5918}
5919#define SPEC_in2_f2 0
5920
5921/* Load the low double word of an extended (128-bit) format FP number */
5922static void in2_x2l(DisasContext *s, DisasOps *o)
5923{
5924    o->in2 = load_freg(get_field(s, r2) + 2);
5925}
5926#define SPEC_in2_x2l SPEC_r2_f128
5927
5928static void in2_ra2(DisasContext *s, DisasOps *o)
5929{
5930    int r2 = get_field(s, r2);
5931
5932    /* Note: *don't* treat !r2 as 0, use the reg value. */
5933    o->in2 = tcg_temp_new_i64();
5934    gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5935}
5936#define SPEC_in2_ra2 0
5937
5938static void in2_a2(DisasContext *s, DisasOps *o)
5939{
5940    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5941    o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5942}
5943#define SPEC_in2_a2 0
5944
5945static void in2_ri2(DisasContext *s, DisasOps *o)
5946{
5947    o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5948}
5949#define SPEC_in2_ri2 0
5950
5951static void in2_sh32(DisasContext *s, DisasOps *o)
5952{
5953    help_l2_shift(s, o, 31);
5954}
5955#define SPEC_in2_sh32 0
5956
5957static void in2_sh64(DisasContext *s, DisasOps *o)
5958{
5959    help_l2_shift(s, o, 63);
5960}
5961#define SPEC_in2_sh64 0
5962
5963static void in2_m2_8u(DisasContext *s, DisasOps *o)
5964{
5965    in2_a2(s, o);
5966    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5967}
5968#define SPEC_in2_m2_8u 0
5969
5970static void in2_m2_16s(DisasContext *s, DisasOps *o)
5971{
5972    in2_a2(s, o);
5973    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5974}
5975#define SPEC_in2_m2_16s 0
5976
5977static void in2_m2_16u(DisasContext *s, DisasOps *o)
5978{
5979    in2_a2(s, o);
5980    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5981}
5982#define SPEC_in2_m2_16u 0
5983
5984static void in2_m2_32s(DisasContext *s, DisasOps *o)
5985{
5986    in2_a2(s, o);
5987    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5988}
5989#define SPEC_in2_m2_32s 0
5990
5991static void in2_m2_32u(DisasContext *s, DisasOps *o)
5992{
5993    in2_a2(s, o);
5994    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5995}
5996#define SPEC_in2_m2_32u 0
5997
5998#ifndef CONFIG_USER_ONLY
5999static void in2_m2_32ua(DisasContext *s, DisasOps *o)
6000{
6001    in2_a2(s, o);
6002    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
6003}
6004#define SPEC_in2_m2_32ua 0
6005#endif
6006
6007static void in2_m2_64(DisasContext *s, DisasOps *o)
6008{
6009    in2_a2(s, o);
6010    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6011}
6012#define SPEC_in2_m2_64 0
6013
6014static void in2_m2_64w(DisasContext *s, DisasOps *o)
6015{
6016    in2_a2(s, o);
6017    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6018    gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6019}
6020#define SPEC_in2_m2_64w 0
6021
6022#ifndef CONFIG_USER_ONLY
6023static void in2_m2_64a(DisasContext *s, DisasOps *o)
6024{
6025    in2_a2(s, o);
6026    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
6027}
6028#define SPEC_in2_m2_64a 0
6029#endif
6030
6031static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6032{
6033    in2_ri2(s, o);
6034    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6035}
6036#define SPEC_in2_mri2_16u 0
6037
6038static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6039{
6040    in2_ri2(s, o);
6041    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6042}
6043#define SPEC_in2_mri2_32s 0
6044
6045static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6046{
6047    in2_ri2(s, o);
6048    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6049}
6050#define SPEC_in2_mri2_32u 0
6051
6052static void in2_mri2_64(DisasContext *s, DisasOps *o)
6053{
6054    in2_ri2(s, o);
6055    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6056}
6057#define SPEC_in2_mri2_64 0
6058
6059static void in2_i2(DisasContext *s, DisasOps *o)
6060{
6061    o->in2 = tcg_const_i64(get_field(s, i2));
6062}
6063#define SPEC_in2_i2 0
6064
6065static void in2_i2_8u(DisasContext *s, DisasOps *o)
6066{
6067    o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6068}
6069#define SPEC_in2_i2_8u 0
6070
6071static void in2_i2_16u(DisasContext *s, DisasOps *o)
6072{
6073    o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6074}
6075#define SPEC_in2_i2_16u 0
6076
6077static void in2_i2_32u(DisasContext *s, DisasOps *o)
6078{
6079    o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6080}
6081#define SPEC_in2_i2_32u 0
6082
6083static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6084{
6085    uint64_t i2 = (uint16_t)get_field(s, i2);
6086    o->in2 = tcg_const_i64(i2 << s->insn->data);
6087}
6088#define SPEC_in2_i2_16u_shl 0
6089
6090static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6091{
6092    uint64_t i2 = (uint32_t)get_field(s, i2);
6093    o->in2 = tcg_const_i64(i2 << s->insn->data);
6094}
6095#define SPEC_in2_i2_32u_shl 0
6096
6097#ifndef CONFIG_USER_ONLY
6098static void in2_insn(DisasContext *s, DisasOps *o)
6099{
6100    o->in2 = tcg_const_i64(s->fields.raw_insn);
6101}
6102#define SPEC_in2_insn 0
6103#endif
6104
6105/* ====================================================================== */
6106
6107/* Find opc within the table of insns.  This is formulated as a switch
6108   statement so that (1) we get compile-time notice of cut-paste errors
6109   for duplicated opcodes, and (2) the compiler generates the binary
6110   search tree, rather than us having to post-process the table.  */
6111
6112#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6113    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6114
6115#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6116    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6117
6118#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6119    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6120
6121#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6122
6123enum DisasInsnEnum {
6124#include "insn-data.def"
6125};
6126
6127#undef E
6128#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6129    .opc = OPC,                                                             \
6130    .flags = FL,                                                            \
6131    .fmt = FMT_##FT,                                                        \
6132    .fac = FAC_##FC,                                                        \
6133    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6134    .name = #NM,                                                            \
6135    .help_in1 = in1_##I1,                                                   \
6136    .help_in2 = in2_##I2,                                                   \
6137    .help_prep = prep_##P,                                                  \
6138    .help_wout = wout_##W,                                                  \
6139    .help_cout = cout_##CC,                                                 \
6140    .help_op = op_##OP,                                                     \
6141    .data = D                                                               \
6142 },
6143
6144/* Allow 0 to be used for NULL in the table below.  */
6145#define in1_0  NULL
6146#define in2_0  NULL
6147#define prep_0  NULL
6148#define wout_0  NULL
6149#define cout_0  NULL
6150#define op_0  NULL
6151
6152#define SPEC_in1_0 0
6153#define SPEC_in2_0 0
6154#define SPEC_prep_0 0
6155#define SPEC_wout_0 0
6156
6157/* Give smaller names to the various facilities.  */
6158#define FAC_Z           S390_FEAT_ZARCH
6159#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6160#define FAC_DFP         S390_FEAT_DFP
6161#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6162#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6163#define FAC_EE          S390_FEAT_EXECUTE_EXT
6164#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6165#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6166#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6167#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6168#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6169#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6170#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6171#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6172#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6173#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6174#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6175#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6176#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6177#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6178#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6179#define FAC_SFLE        S390_FEAT_STFLE
6180#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6181#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6182#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6183#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6184#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6185#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6186#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6187#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6188#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6189#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6190#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6191#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6192#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6193#define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6194#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6195#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6196#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6197#define FAC_V           S390_FEAT_VECTOR /* vector facility */
6198#define FAC_VE          S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6199#define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6200
6201static const DisasInsn insn_info[] = {
6202#include "insn-data.def"
6203};
6204
6205#undef E
6206#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6207    case OPC: return &insn_info[insn_ ## NM];
6208
6209static const DisasInsn *lookup_opc(uint16_t opc)
6210{
6211    switch (opc) {
6212#include "insn-data.def"
6213    default:
6214        return NULL;
6215    }
6216}
6217
6218#undef F
6219#undef E
6220#undef D
6221#undef C
6222
6223/* Extract a field from the insn.  The INSN should be left-aligned in
6224   the uint64_t so that we can more easily utilize the big-bit-endian
6225   definitions we extract from the Principals of Operation.  */
6226
6227static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6228{
6229    uint32_t r, m;
6230
6231    if (f->size == 0) {
6232        return;
6233    }
6234
6235    /* Zero extract the field from the insn.  */
6236    r = (insn << f->beg) >> (64 - f->size);
6237
6238    /* Sign-extend, or un-swap the field as necessary.  */
6239    switch (f->type) {
6240    case 0: /* unsigned */
6241        break;
6242    case 1: /* signed */
6243        assert(f->size <= 32);
6244        m = 1u << (f->size - 1);
6245        r = (r ^ m) - m;
6246        break;
6247    case 2: /* dl+dh split, signed 20 bit. */
6248        r = ((int8_t)r << 12) | (r >> 8);
6249        break;
6250    case 3: /* MSB stored in RXB */
6251        g_assert(f->size == 4);
6252        switch (f->beg) {
6253        case 8:
6254            r |= extract64(insn, 63 - 36, 1) << 4;
6255            break;
6256        case 12:
6257            r |= extract64(insn, 63 - 37, 1) << 4;
6258            break;
6259        case 16:
6260            r |= extract64(insn, 63 - 38, 1) << 4;
6261            break;
6262        case 32:
6263            r |= extract64(insn, 63 - 39, 1) << 4;
6264            break;
6265        default:
6266            g_assert_not_reached();
6267        }
6268        break;
6269    default:
6270        abort();
6271    }
6272
6273    /* Validate that the "compressed" encoding we selected above is valid.
6274       I.e. we havn't make two different original fields overlap.  */
6275    assert(((o->presentC >> f->indexC) & 1) == 0);
6276    o->presentC |= 1 << f->indexC;
6277    o->presentO |= 1 << f->indexO;
6278
6279    o->c[f->indexC] = r;
6280}
6281
6282/* Lookup the insn at the current PC, extracting the operands into O and
6283   returning the info struct for the insn.  Returns NULL for invalid insn.  */
6284
6285static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6286{
6287    uint64_t insn, pc = s->base.pc_next;
6288    int op, op2, ilen;
6289    const DisasInsn *info;
6290
6291    if (unlikely(s->ex_value)) {
6292        /* Drop the EX data now, so that it's clear on exception paths.  */
6293        TCGv_i64 zero = tcg_const_i64(0);
6294        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6295        tcg_temp_free_i64(zero);
6296
6297        /* Extract the values saved by EXECUTE.  */
6298        insn = s->ex_value & 0xffffffffffff0000ull;
6299        ilen = s->ex_value & 0xf;
6300        op = insn >> 56;
6301    } else {
6302        insn = ld_code2(env, pc);
6303        op = (insn >> 8) & 0xff;
6304        ilen = get_ilen(op);
6305        switch (ilen) {
6306        case 2:
6307            insn = insn << 48;
6308            break;
6309        case 4:
6310            insn = ld_code4(env, pc) << 32;
6311            break;
6312        case 6:
6313            insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6314            break;
6315        default:
6316            g_assert_not_reached();
6317        }
6318    }
6319    s->pc_tmp = s->base.pc_next + ilen;
6320    s->ilen = ilen;
6321
6322    /* We can't actually determine the insn format until we've looked up
6323       the full insn opcode.  Which we can't do without locating the
6324       secondary opcode.  Assume by default that OP2 is at bit 40; for
6325       those smaller insns that don't actually have a secondary opcode
6326       this will correctly result in OP2 = 0. */
6327    switch (op) {
6328    case 0x01: /* E */
6329    case 0x80: /* S */
6330    case 0x82: /* S */
6331    case 0x93: /* S */
6332    case 0xb2: /* S, RRF, RRE, IE */
6333    case 0xb3: /* RRE, RRD, RRF */
6334    case 0xb9: /* RRE, RRF */
6335    case 0xe5: /* SSE, SIL */
6336        op2 = (insn << 8) >> 56;
6337        break;
6338    case 0xa5: /* RI */
6339    case 0xa7: /* RI */
6340    case 0xc0: /* RIL */
6341    case 0xc2: /* RIL */
6342    case 0xc4: /* RIL */
6343    case 0xc6: /* RIL */
6344    case 0xc8: /* SSF */
6345    case 0xcc: /* RIL */
6346        op2 = (insn << 12) >> 60;
6347        break;
6348    case 0xc5: /* MII */
6349    case 0xc7: /* SMI */
6350    case 0xd0 ... 0xdf: /* SS */
6351    case 0xe1: /* SS */
6352    case 0xe2: /* SS */
6353    case 0xe8: /* SS */
6354    case 0xe9: /* SS */
6355    case 0xea: /* SS */
6356    case 0xee ... 0xf3: /* SS */
6357    case 0xf8 ... 0xfd: /* SS */
6358        op2 = 0;
6359        break;
6360    default:
6361        op2 = (insn << 40) >> 56;
6362        break;
6363    }
6364
6365    memset(&s->fields, 0, sizeof(s->fields));
6366    s->fields.raw_insn = insn;
6367    s->fields.op = op;
6368    s->fields.op2 = op2;
6369
6370    /* Lookup the instruction.  */
6371    info = lookup_opc(op << 8 | op2);
6372    s->insn = info;
6373
6374    /* If we found it, extract the operands.  */
6375    if (info != NULL) {
6376        DisasFormat fmt = info->fmt;
6377        int i;
6378
6379        for (i = 0; i < NUM_C_FIELD; ++i) {
6380            extract_field(&s->fields, &format_info[fmt].op[i], insn);
6381        }
6382    }
6383    return info;
6384}
6385
6386static bool is_afp_reg(int reg)
6387{
6388    return reg % 2 || reg > 6;
6389}
6390
6391static bool is_fp_pair(int reg)
6392{
6393    /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6394    return !(reg & 0x2);
6395}
6396
6397static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6398{
6399    const DisasInsn *insn;
6400    DisasJumpType ret = DISAS_NEXT;
6401    DisasOps o = {};
6402    bool icount = false;
6403
6404    /* Search for the insn in the table.  */
6405    insn = extract_insn(env, s);
6406
6407    /* Emit insn_start now that we know the ILEN.  */
6408    tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6409
6410    /* Not found means unimplemented/illegal opcode.  */
6411    if (insn == NULL) {
6412        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6413                      s->fields.op, s->fields.op2);
6414        gen_illegal_opcode(s);
6415        return DISAS_NORETURN;
6416    }
6417
6418#ifndef CONFIG_USER_ONLY
6419    if (s->base.tb->flags & FLAG_MASK_PER) {
6420        TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6421        gen_helper_per_ifetch(cpu_env, addr);
6422        tcg_temp_free_i64(addr);
6423    }
6424#endif
6425
6426    /* process flags */
6427    if (insn->flags) {
6428        /* privileged instruction */
6429        if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6430            gen_program_exception(s, PGM_PRIVILEGED);
6431            return DISAS_NORETURN;
6432        }
6433
6434        /* if AFP is not enabled, instructions and registers are forbidden */
6435        if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6436            uint8_t dxc = 0;
6437
6438            if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6439                dxc = 1;
6440            }
6441            if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6442                dxc = 1;
6443            }
6444            if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6445                dxc = 1;
6446            }
6447            if (insn->flags & IF_BFP) {
6448                dxc = 2;
6449            }
6450            if (insn->flags & IF_DFP) {
6451                dxc = 3;
6452            }
6453            if (insn->flags & IF_VEC) {
6454                dxc = 0xfe;
6455            }
6456            if (dxc) {
6457                gen_data_exception(dxc);
6458                return DISAS_NORETURN;
6459            }
6460        }
6461
6462        /* if vector instructions not enabled, executing them is forbidden */
6463        if (insn->flags & IF_VEC) {
6464            if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6465                gen_data_exception(0xfe);
6466                return DISAS_NORETURN;
6467            }
6468        }
6469
6470        /* input/output is the special case for icount mode */
6471        if (unlikely(insn->flags & IF_IO)) {
6472            icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6473            if (icount) {
6474                gen_io_start();
6475            }
6476        }
6477    }
6478
6479    /* Check for insn specification exceptions.  */
6480    if (insn->spec) {
6481        if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6482            (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6483            (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6484            (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6485            (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6486            gen_program_exception(s, PGM_SPECIFICATION);
6487            return DISAS_NORETURN;
6488        }
6489    }
6490
6491    /* Implement the instruction.  */
6492    if (insn->help_in1) {
6493        insn->help_in1(s, &o);
6494    }
6495    if (insn->help_in2) {
6496        insn->help_in2(s, &o);
6497    }
6498    if (insn->help_prep) {
6499        insn->help_prep(s, &o);
6500    }
6501    if (insn->help_op) {
6502        ret = insn->help_op(s, &o);
6503    }
6504    if (ret != DISAS_NORETURN) {
6505        if (insn->help_wout) {
6506            insn->help_wout(s, &o);
6507        }
6508        if (insn->help_cout) {
6509            insn->help_cout(s, &o);
6510        }
6511    }
6512
6513    /* Free any temporaries created by the helpers.  */
6514    if (o.out && !o.g_out) {
6515        tcg_temp_free_i64(o.out);
6516    }
6517    if (o.out2 && !o.g_out2) {
6518        tcg_temp_free_i64(o.out2);
6519    }
6520    if (o.in1 && !o.g_in1) {
6521        tcg_temp_free_i64(o.in1);
6522    }
6523    if (o.in2 && !o.g_in2) {
6524        tcg_temp_free_i64(o.in2);
6525    }
6526    if (o.addr1) {
6527        tcg_temp_free_i64(o.addr1);
6528    }
6529
6530    /* io should be the last instruction in tb when icount is enabled */
6531    if (unlikely(icount && ret == DISAS_NEXT)) {
6532        ret = DISAS_PC_STALE;
6533    }
6534
6535#ifndef CONFIG_USER_ONLY
6536    if (s->base.tb->flags & FLAG_MASK_PER) {
6537        /* An exception might be triggered, save PSW if not already done.  */
6538        if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6539            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6540        }
6541
6542        /* Call the helper to check for a possible PER exception.  */
6543        gen_helper_per_check_exception(cpu_env);
6544    }
6545#endif
6546
6547    /* Advance to the next instruction.  */
6548    s->base.pc_next = s->pc_tmp;
6549    return ret;
6550}
6551
6552static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6553{
6554    DisasContext *dc = container_of(dcbase, DisasContext, base);
6555
6556    /* 31-bit mode */
6557    if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6558        dc->base.pc_first &= 0x7fffffff;
6559        dc->base.pc_next = dc->base.pc_first;
6560    }
6561
6562    dc->cc_op = CC_OP_DYNAMIC;
6563    dc->ex_value = dc->base.tb->cs_base;
6564    dc->do_debug = dc->base.singlestep_enabled;
6565}
6566
6567static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6568{
6569}
6570
6571static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6572{
6573}
6574
6575static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6576                                      const CPUBreakpoint *bp)
6577{
6578    DisasContext *dc = container_of(dcbase, DisasContext, base);
6579
6580    /*
6581     * Emit an insn_start to accompany the breakpoint exception.
6582     * The ILEN value is a dummy, since this does not result in
6583     * an s390x exception, but an internal qemu exception which
6584     * brings us back to interact with the gdbstub.
6585     */
6586    tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2);
6587
6588    dc->base.is_jmp = DISAS_PC_STALE;
6589    dc->do_debug = true;
6590    /* The address covered by the breakpoint must be included in
6591       [tb->pc, tb->pc + tb->size) in order to for it to be
6592       properly cleared -- thus we increment the PC here so that
6593       the logic setting tb->size does the right thing.  */
6594    dc->base.pc_next += 2;
6595    return true;
6596}
6597
6598static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6599{
6600    CPUS390XState *env = cs->env_ptr;
6601    DisasContext *dc = container_of(dcbase, DisasContext, base);
6602
6603    dc->base.is_jmp = translate_one(env, dc);
6604    if (dc->base.is_jmp == DISAS_NEXT) {
6605        uint64_t page_start;
6606
6607        page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6608        if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6609            dc->base.is_jmp = DISAS_TOO_MANY;
6610        }
6611    }
6612}
6613
6614static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6615{
6616    DisasContext *dc = container_of(dcbase, DisasContext, base);
6617
6618    switch (dc->base.is_jmp) {
6619    case DISAS_GOTO_TB:
6620    case DISAS_NORETURN:
6621        break;
6622    case DISAS_TOO_MANY:
6623    case DISAS_PC_STALE:
6624    case DISAS_PC_STALE_NOCHAIN:
6625        update_psw_addr(dc);
6626        /* FALLTHRU */
6627    case DISAS_PC_UPDATED:
6628        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6629           cc op type is in env */
6630        update_cc_op(dc);
6631        /* FALLTHRU */
6632    case DISAS_PC_CC_UPDATED:
6633        /* Exit the TB, either by raising a debug exception or by return.  */
6634        if (dc->do_debug) {
6635            gen_exception(EXCP_DEBUG);
6636        } else if (use_exit_tb(dc) ||
6637                   dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6638            tcg_gen_exit_tb(NULL, 0);
6639        } else {
6640            tcg_gen_lookup_and_goto_ptr();
6641        }
6642        break;
6643    default:
6644        g_assert_not_reached();
6645    }
6646}
6647
6648static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6649{
6650    DisasContext *dc = container_of(dcbase, DisasContext, base);
6651
6652    if (unlikely(dc->ex_value)) {
6653        /* ??? Unfortunately log_target_disas can't use host memory.  */
6654        qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6655    } else {
6656        qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6657        log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6658    }
6659}
6660
6661static const TranslatorOps s390x_tr_ops = {
6662    .init_disas_context = s390x_tr_init_disas_context,
6663    .tb_start           = s390x_tr_tb_start,
6664    .insn_start         = s390x_tr_insn_start,
6665    .breakpoint_check   = s390x_tr_breakpoint_check,
6666    .translate_insn     = s390x_tr_translate_insn,
6667    .tb_stop            = s390x_tr_tb_stop,
6668    .disas_log          = s390x_tr_disas_log,
6669};
6670
6671void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6672{
6673    DisasContext dc;
6674
6675    translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6676}
6677
6678void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6679                          target_ulong *data)
6680{
6681    int cc_op = data[1];
6682
6683    env->psw.addr = data[0];
6684
6685    /* Update the CC opcode if it is not already up-to-date.  */
6686    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6687        env->cc_op = cc_op;
6688    }
6689
6690    /* Record ILEN.  */
6691    env->int_pgm_ilen = data[2];
6692}
6693