qemu/target/s390x/tcg/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "s390x-internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg/tcg-op.h"
  37#include "tcg/tcg-op-gvec.h"
  38#include "qemu/log.h"
  39#include "qemu/host-utils.h"
  40#include "exec/cpu_ldst.h"
  41#include "exec/gen-icount.h"
  42#include "exec/helper-proto.h"
  43#include "exec/helper-gen.h"
  44
  45#include "exec/translator.h"
  46#include "exec/log.h"
  47#include "qemu/atomic128.h"
  48
  49
  50/* Information that (most) every instruction needs to manipulate.  */
  51typedef struct DisasContext DisasContext;
  52typedef struct DisasInsn DisasInsn;
  53typedef struct DisasFields DisasFields;
  54
  55/*
  56 * Define a structure to hold the decoded fields.  We'll store each inside
  57 * an array indexed by an enum.  In order to conserve memory, we'll arrange
  58 * for fields that do not exist at the same time to overlap, thus the "C"
  59 * for compact.  For checking purposes there is an "O" for original index
  60 * as well that will be applied to availability bitmaps.
  61 */
  62
  63enum DisasFieldIndexO {
  64    FLD_O_r1,
  65    FLD_O_r2,
  66    FLD_O_r3,
  67    FLD_O_m1,
  68    FLD_O_m3,
  69    FLD_O_m4,
  70    FLD_O_m5,
  71    FLD_O_m6,
  72    FLD_O_b1,
  73    FLD_O_b2,
  74    FLD_O_b4,
  75    FLD_O_d1,
  76    FLD_O_d2,
  77    FLD_O_d4,
  78    FLD_O_x2,
  79    FLD_O_l1,
  80    FLD_O_l2,
  81    FLD_O_i1,
  82    FLD_O_i2,
  83    FLD_O_i3,
  84    FLD_O_i4,
  85    FLD_O_i5,
  86    FLD_O_v1,
  87    FLD_O_v2,
  88    FLD_O_v3,
  89    FLD_O_v4,
  90};
  91
  92enum DisasFieldIndexC {
  93    FLD_C_r1 = 0,
  94    FLD_C_m1 = 0,
  95    FLD_C_b1 = 0,
  96    FLD_C_i1 = 0,
  97    FLD_C_v1 = 0,
  98
  99    FLD_C_r2 = 1,
 100    FLD_C_b2 = 1,
 101    FLD_C_i2 = 1,
 102
 103    FLD_C_r3 = 2,
 104    FLD_C_m3 = 2,
 105    FLD_C_i3 = 2,
 106    FLD_C_v3 = 2,
 107
 108    FLD_C_m4 = 3,
 109    FLD_C_b4 = 3,
 110    FLD_C_i4 = 3,
 111    FLD_C_l1 = 3,
 112    FLD_C_v4 = 3,
 113
 114    FLD_C_i5 = 4,
 115    FLD_C_d1 = 4,
 116    FLD_C_m5 = 4,
 117
 118    FLD_C_d2 = 5,
 119    FLD_C_m6 = 5,
 120
 121    FLD_C_d4 = 6,
 122    FLD_C_x2 = 6,
 123    FLD_C_l2 = 6,
 124    FLD_C_v2 = 6,
 125
 126    NUM_C_FIELD = 7
 127};
 128
 129struct DisasFields {
 130    uint64_t raw_insn;
 131    unsigned op:8;
 132    unsigned op2:8;
 133    unsigned presentC:16;
 134    unsigned int presentO;
 135    int c[NUM_C_FIELD];
 136};
 137
 138struct DisasContext {
 139    DisasContextBase base;
 140    const DisasInsn *insn;
 141    TCGOp *insn_start;
 142    DisasFields fields;
 143    uint64_t ex_value;
 144    /*
 145     * During translate_one(), pc_tmp is used to determine the instruction
 146     * to be executed after base.pc_next - e.g. next sequential instruction
 147     * or a branch target.
 148     */
 149    uint64_t pc_tmp;
 150    uint32_t ilen;
 151    enum cc_op cc_op;
 152};
 153
 154/* Information carried about a condition to be evaluated.  */
 155typedef struct {
 156    TCGCond cond:8;
 157    bool is_64;
 158    bool g1;
 159    bool g2;
 160    union {
 161        struct { TCGv_i64 a, b; } s64;
 162        struct { TCGv_i32 a, b; } s32;
 163    } u;
 164} DisasCompare;
 165
 166#ifdef DEBUG_INLINE_BRANCHES
 167static uint64_t inline_branch_hit[CC_OP_MAX];
 168static uint64_t inline_branch_miss[CC_OP_MAX];
 169#endif
 170
 171static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
 172{
 173    TCGv_i64 tmp;
 174
 175    if (s->base.tb->flags & FLAG_MASK_32) {
 176        if (s->base.tb->flags & FLAG_MASK_64) {
 177            tcg_gen_movi_i64(out, pc);
 178            return;
 179        }
 180        pc |= 0x80000000;
 181    }
 182    assert(!(s->base.tb->flags & FLAG_MASK_64));
 183    tmp = tcg_const_i64(pc);
 184    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
 185    tcg_temp_free_i64(tmp);
 186}
 187
 188static TCGv_i64 psw_addr;
 189static TCGv_i64 psw_mask;
 190static TCGv_i64 gbea;
 191
 192static TCGv_i32 cc_op;
 193static TCGv_i64 cc_src;
 194static TCGv_i64 cc_dst;
 195static TCGv_i64 cc_vr;
 196
 197static char cpu_reg_names[16][4];
 198static TCGv_i64 regs[16];
 199
 200void s390x_translate_init(void)
 201{
 202    int i;
 203
 204    psw_addr = tcg_global_mem_new_i64(cpu_env,
 205                                      offsetof(CPUS390XState, psw.addr),
 206                                      "psw_addr");
 207    psw_mask = tcg_global_mem_new_i64(cpu_env,
 208                                      offsetof(CPUS390XState, psw.mask),
 209                                      "psw_mask");
 210    gbea = tcg_global_mem_new_i64(cpu_env,
 211                                  offsetof(CPUS390XState, gbea),
 212                                  "gbea");
 213
 214    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 215                                   "cc_op");
 216    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 217                                    "cc_src");
 218    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 219                                    "cc_dst");
 220    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 221                                   "cc_vr");
 222
 223    for (i = 0; i < 16; i++) {
 224        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 225        regs[i] = tcg_global_mem_new(cpu_env,
 226                                     offsetof(CPUS390XState, regs[i]),
 227                                     cpu_reg_names[i]);
 228    }
 229}
 230
 231static inline int vec_full_reg_offset(uint8_t reg)
 232{
 233    g_assert(reg < 32);
 234    return offsetof(CPUS390XState, vregs[reg][0]);
 235}
 236
 237static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
 238{
 239    /* Convert element size (es) - e.g. MO_8 - to bytes */
 240    const uint8_t bytes = 1 << es;
 241    int offs = enr * bytes;
 242
 243    /*
 244     * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
 245     * of the 16 byte vector, on both, little and big endian systems.
 246     *
 247     * Big Endian (target/possible host)
 248     * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
 249     * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
 250     * W:  [             0][             1] - [             2][             3]
 251     * DW: [                             0] - [                             1]
 252     *
 253     * Little Endian (possible host)
 254     * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
 255     * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
 256     * W:  [             1][             0] - [             3][             2]
 257     * DW: [                             0] - [                             1]
 258     *
 259     * For 16 byte elements, the two 8 byte halves will not form a host
 260     * int128 if the host is little endian, since they're in the wrong order.
 261     * Some operations (e.g. xor) do not care. For operations like addition,
 262     * the two 8 byte elements have to be loaded separately. Let's force all
 263     * 16 byte operations to handle it in a special way.
 264     */
 265    g_assert(es <= MO_64);
 266#ifndef HOST_WORDS_BIGENDIAN
 267    offs ^= (8 - bytes);
 268#endif
 269    return offs + vec_full_reg_offset(reg);
 270}
 271
 272static inline int freg64_offset(uint8_t reg)
 273{
 274    g_assert(reg < 16);
 275    return vec_reg_offset(reg, 0, MO_64);
 276}
 277
 278static inline int freg32_offset(uint8_t reg)
 279{
 280    g_assert(reg < 16);
 281    return vec_reg_offset(reg, 0, MO_32);
 282}
 283
 284static TCGv_i64 load_reg(int reg)
 285{
 286    TCGv_i64 r = tcg_temp_new_i64();
 287    tcg_gen_mov_i64(r, regs[reg]);
 288    return r;
 289}
 290
 291static TCGv_i64 load_freg(int reg)
 292{
 293    TCGv_i64 r = tcg_temp_new_i64();
 294
 295    tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
 296    return r;
 297}
 298
 299static TCGv_i64 load_freg32_i64(int reg)
 300{
 301    TCGv_i64 r = tcg_temp_new_i64();
 302
 303    tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
 304    return r;
 305}
 306
 307static void store_reg(int reg, TCGv_i64 v)
 308{
 309    tcg_gen_mov_i64(regs[reg], v);
 310}
 311
 312static void store_freg(int reg, TCGv_i64 v)
 313{
 314    tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
 315}
 316
 317static void store_reg32_i64(int reg, TCGv_i64 v)
 318{
 319    /* 32 bit register writes keep the upper half */
 320    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 321}
 322
 323static void store_reg32h_i64(int reg, TCGv_i64 v)
 324{
 325    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 326}
 327
 328static void store_freg32_i64(int reg, TCGv_i64 v)
 329{
 330    tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
 331}
 332
 333static void return_low128(TCGv_i64 dest)
 334{
 335    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 336}
 337
 338static void update_psw_addr(DisasContext *s)
 339{
 340    /* psw.addr */
 341    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
 342}
 343
 344static void per_branch(DisasContext *s, bool to_next)
 345{
 346#ifndef CONFIG_USER_ONLY
 347    tcg_gen_movi_i64(gbea, s->base.pc_next);
 348
 349    if (s->base.tb->flags & FLAG_MASK_PER) {
 350        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
 351        gen_helper_per_branch(cpu_env, gbea, next_pc);
 352        if (to_next) {
 353            tcg_temp_free_i64(next_pc);
 354        }
 355    }
 356#endif
 357}
 358
 359static void per_branch_cond(DisasContext *s, TCGCond cond,
 360                            TCGv_i64 arg1, TCGv_i64 arg2)
 361{
 362#ifndef CONFIG_USER_ONLY
 363    if (s->base.tb->flags & FLAG_MASK_PER) {
 364        TCGLabel *lab = gen_new_label();
 365        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 366
 367        tcg_gen_movi_i64(gbea, s->base.pc_next);
 368        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 369
 370        gen_set_label(lab);
 371    } else {
 372        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
 373        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 374        tcg_temp_free_i64(pc);
 375    }
 376#endif
 377}
 378
 379static void per_breaking_event(DisasContext *s)
 380{
 381    tcg_gen_movi_i64(gbea, s->base.pc_next);
 382}
 383
 384static void update_cc_op(DisasContext *s)
 385{
 386    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 387        tcg_gen_movi_i32(cc_op, s->cc_op);
 388    }
 389}
 390
 391static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
 392                                uint64_t pc)
 393{
 394    return (uint64_t)translator_lduw(env, &s->base, pc);
 395}
 396
 397static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
 398                                uint64_t pc)
 399{
 400    return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
 401}
 402
 403static int get_mem_index(DisasContext *s)
 404{
 405#ifdef CONFIG_USER_ONLY
 406    return MMU_USER_IDX;
 407#else
 408    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
 409        return MMU_REAL_IDX;
 410    }
 411
 412    switch (s->base.tb->flags & FLAG_MASK_ASC) {
 413    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 414        return MMU_PRIMARY_IDX;
 415    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 416        return MMU_SECONDARY_IDX;
 417    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 418        return MMU_HOME_IDX;
 419    default:
 420        tcg_abort();
 421        break;
 422    }
 423#endif
 424}
 425
 426static void gen_exception(int excp)
 427{
 428    TCGv_i32 tmp = tcg_const_i32(excp);
 429    gen_helper_exception(cpu_env, tmp);
 430    tcg_temp_free_i32(tmp);
 431}
 432
 433static void gen_program_exception(DisasContext *s, int code)
 434{
 435    TCGv_i32 tmp;
 436
 437    /* Remember what pgm exeption this was.  */
 438    tmp = tcg_const_i32(code);
 439    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 440    tcg_temp_free_i32(tmp);
 441
 442    tmp = tcg_const_i32(s->ilen);
 443    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 444    tcg_temp_free_i32(tmp);
 445
 446    /* update the psw */
 447    update_psw_addr(s);
 448
 449    /* Save off cc.  */
 450    update_cc_op(s);
 451
 452    /* Trigger exception.  */
 453    gen_exception(EXCP_PGM);
 454}
 455
 456static inline void gen_illegal_opcode(DisasContext *s)
 457{
 458    gen_program_exception(s, PGM_OPERATION);
 459}
 460
 461static inline void gen_data_exception(uint8_t dxc)
 462{
 463    TCGv_i32 tmp = tcg_const_i32(dxc);
 464    gen_helper_data_exception(cpu_env, tmp);
 465    tcg_temp_free_i32(tmp);
 466}
 467
 468static inline void gen_trap(DisasContext *s)
 469{
 470    /* Set DXC to 0xff */
 471    gen_data_exception(0xff);
 472}
 473
 474static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
 475                                  int64_t imm)
 476{
 477    tcg_gen_addi_i64(dst, src, imm);
 478    if (!(s->base.tb->flags & FLAG_MASK_64)) {
 479        if (s->base.tb->flags & FLAG_MASK_32) {
 480            tcg_gen_andi_i64(dst, dst, 0x7fffffff);
 481        } else {
 482            tcg_gen_andi_i64(dst, dst, 0x00ffffff);
 483        }
 484    }
 485}
 486
 487static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 488{
 489    TCGv_i64 tmp = tcg_temp_new_i64();
 490
 491    /*
 492     * Note that d2 is limited to 20 bits, signed.  If we crop negative
 493     * displacements early we create larger immedate addends.
 494     */
 495    if (b2 && x2) {
 496        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 497        gen_addi_and_wrap_i64(s, tmp, tmp, d2);
 498    } else if (b2) {
 499        gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
 500    } else if (x2) {
 501        gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
 502    } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
 503        if (s->base.tb->flags & FLAG_MASK_32) {
 504            tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
 505        } else {
 506            tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
 507        }
 508    } else {
 509        tcg_gen_movi_i64(tmp, d2);
 510    }
 511
 512    return tmp;
 513}
 514
 515static inline bool live_cc_data(DisasContext *s)
 516{
 517    return (s->cc_op != CC_OP_DYNAMIC
 518            && s->cc_op != CC_OP_STATIC
 519            && s->cc_op > 3);
 520}
 521
 522static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 523{
 524    if (live_cc_data(s)) {
 525        tcg_gen_discard_i64(cc_src);
 526        tcg_gen_discard_i64(cc_dst);
 527        tcg_gen_discard_i64(cc_vr);
 528    }
 529    s->cc_op = CC_OP_CONST0 + val;
 530}
 531
 532static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 533{
 534    if (live_cc_data(s)) {
 535        tcg_gen_discard_i64(cc_src);
 536        tcg_gen_discard_i64(cc_vr);
 537    }
 538    tcg_gen_mov_i64(cc_dst, dst);
 539    s->cc_op = op;
 540}
 541
 542static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 543                                  TCGv_i64 dst)
 544{
 545    if (live_cc_data(s)) {
 546        tcg_gen_discard_i64(cc_vr);
 547    }
 548    tcg_gen_mov_i64(cc_src, src);
 549    tcg_gen_mov_i64(cc_dst, dst);
 550    s->cc_op = op;
 551}
 552
 553static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 554                                  TCGv_i64 dst, TCGv_i64 vr)
 555{
 556    tcg_gen_mov_i64(cc_src, src);
 557    tcg_gen_mov_i64(cc_dst, dst);
 558    tcg_gen_mov_i64(cc_vr, vr);
 559    s->cc_op = op;
 560}
 561
 562static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 563{
 564    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 565}
 566
 567/* CC value is in env->cc_op */
 568static void set_cc_static(DisasContext *s)
 569{
 570    if (live_cc_data(s)) {
 571        tcg_gen_discard_i64(cc_src);
 572        tcg_gen_discard_i64(cc_dst);
 573        tcg_gen_discard_i64(cc_vr);
 574    }
 575    s->cc_op = CC_OP_STATIC;
 576}
 577
 578/* calculates cc into cc_op */
 579static void gen_op_calc_cc(DisasContext *s)
 580{
 581    TCGv_i32 local_cc_op = NULL;
 582    TCGv_i64 dummy = NULL;
 583
 584    switch (s->cc_op) {
 585    default:
 586        dummy = tcg_const_i64(0);
 587        /* FALLTHRU */
 588    case CC_OP_ADD_64:
 589    case CC_OP_SUB_64:
 590    case CC_OP_ADD_32:
 591    case CC_OP_SUB_32:
 592        local_cc_op = tcg_const_i32(s->cc_op);
 593        break;
 594    case CC_OP_CONST0:
 595    case CC_OP_CONST1:
 596    case CC_OP_CONST2:
 597    case CC_OP_CONST3:
 598    case CC_OP_STATIC:
 599    case CC_OP_DYNAMIC:
 600        break;
 601    }
 602
 603    switch (s->cc_op) {
 604    case CC_OP_CONST0:
 605    case CC_OP_CONST1:
 606    case CC_OP_CONST2:
 607    case CC_OP_CONST3:
 608        /* s->cc_op is the cc value */
 609        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 610        break;
 611    case CC_OP_STATIC:
 612        /* env->cc_op already is the cc value */
 613        break;
 614    case CC_OP_NZ:
 615    case CC_OP_ABS_64:
 616    case CC_OP_NABS_64:
 617    case CC_OP_ABS_32:
 618    case CC_OP_NABS_32:
 619    case CC_OP_LTGT0_32:
 620    case CC_OP_LTGT0_64:
 621    case CC_OP_COMP_32:
 622    case CC_OP_COMP_64:
 623    case CC_OP_NZ_F32:
 624    case CC_OP_NZ_F64:
 625    case CC_OP_FLOGR:
 626    case CC_OP_LCBB:
 627    case CC_OP_MULS_32:
 628        /* 1 argument */
 629        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 630        break;
 631    case CC_OP_ADDU:
 632    case CC_OP_ICM:
 633    case CC_OP_LTGT_32:
 634    case CC_OP_LTGT_64:
 635    case CC_OP_LTUGTU_32:
 636    case CC_OP_LTUGTU_64:
 637    case CC_OP_TM_32:
 638    case CC_OP_TM_64:
 639    case CC_OP_SLA_32:
 640    case CC_OP_SLA_64:
 641    case CC_OP_SUBU:
 642    case CC_OP_NZ_F128:
 643    case CC_OP_VC:
 644    case CC_OP_MULS_64:
 645        /* 2 arguments */
 646        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 647        break;
 648    case CC_OP_ADD_64:
 649    case CC_OP_SUB_64:
 650    case CC_OP_ADD_32:
 651    case CC_OP_SUB_32:
 652        /* 3 arguments */
 653        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 654        break;
 655    case CC_OP_DYNAMIC:
 656        /* unknown operation - assume 3 arguments and cc_op in env */
 657        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 658        break;
 659    default:
 660        tcg_abort();
 661    }
 662
 663    if (local_cc_op) {
 664        tcg_temp_free_i32(local_cc_op);
 665    }
 666    if (dummy) {
 667        tcg_temp_free_i64(dummy);
 668    }
 669
 670    /* We now have cc in cc_op as constant */
 671    set_cc_static(s);
 672}
 673
 674static bool use_goto_tb(DisasContext *s, uint64_t dest)
 675{
 676    if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
 677        return false;
 678    }
 679    return translator_use_goto_tb(&s->base, dest);
 680}
 681
 682static void account_noninline_branch(DisasContext *s, int cc_op)
 683{
 684#ifdef DEBUG_INLINE_BRANCHES
 685    inline_branch_miss[cc_op]++;
 686#endif
 687}
 688
 689static void account_inline_branch(DisasContext *s, int cc_op)
 690{
 691#ifdef DEBUG_INLINE_BRANCHES
 692    inline_branch_hit[cc_op]++;
 693#endif
 694}
 695
 696/* Table of mask values to comparison codes, given a comparison as input.
 697   For such, CC=3 should not be possible.  */
 698static const TCGCond ltgt_cond[16] = {
 699    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 700    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 701    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 702    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 703    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 704    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 705    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 706    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 707};
 708
 709/* Table of mask values to comparison codes, given a logic op as input.
 710   For such, only CC=0 and CC=1 should be possible.  */
 711static const TCGCond nz_cond[16] = {
 712    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 713    TCG_COND_NEVER, TCG_COND_NEVER,
 714    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 715    TCG_COND_NE, TCG_COND_NE,
 716    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 717    TCG_COND_EQ, TCG_COND_EQ,
 718    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 719    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 720};
 721
 722/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 723   details required to generate a TCG comparison.  */
 724static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 725{
 726    TCGCond cond;
 727    enum cc_op old_cc_op = s->cc_op;
 728
 729    if (mask == 15 || mask == 0) {
 730        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 731        c->u.s32.a = cc_op;
 732        c->u.s32.b = cc_op;
 733        c->g1 = c->g2 = true;
 734        c->is_64 = false;
 735        return;
 736    }
 737
 738    /* Find the TCG condition for the mask + cc op.  */
 739    switch (old_cc_op) {
 740    case CC_OP_LTGT0_32:
 741    case CC_OP_LTGT0_64:
 742    case CC_OP_LTGT_32:
 743    case CC_OP_LTGT_64:
 744        cond = ltgt_cond[mask];
 745        if (cond == TCG_COND_NEVER) {
 746            goto do_dynamic;
 747        }
 748        account_inline_branch(s, old_cc_op);
 749        break;
 750
 751    case CC_OP_LTUGTU_32:
 752    case CC_OP_LTUGTU_64:
 753        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 754        if (cond == TCG_COND_NEVER) {
 755            goto do_dynamic;
 756        }
 757        account_inline_branch(s, old_cc_op);
 758        break;
 759
 760    case CC_OP_NZ:
 761        cond = nz_cond[mask];
 762        if (cond == TCG_COND_NEVER) {
 763            goto do_dynamic;
 764        }
 765        account_inline_branch(s, old_cc_op);
 766        break;
 767
 768    case CC_OP_TM_32:
 769    case CC_OP_TM_64:
 770        switch (mask) {
 771        case 8:
 772            cond = TCG_COND_EQ;
 773            break;
 774        case 4 | 2 | 1:
 775            cond = TCG_COND_NE;
 776            break;
 777        default:
 778            goto do_dynamic;
 779        }
 780        account_inline_branch(s, old_cc_op);
 781        break;
 782
 783    case CC_OP_ICM:
 784        switch (mask) {
 785        case 8:
 786            cond = TCG_COND_EQ;
 787            break;
 788        case 4 | 2 | 1:
 789        case 4 | 2:
 790            cond = TCG_COND_NE;
 791            break;
 792        default:
 793            goto do_dynamic;
 794        }
 795        account_inline_branch(s, old_cc_op);
 796        break;
 797
 798    case CC_OP_FLOGR:
 799        switch (mask & 0xa) {
 800        case 8: /* src == 0 -> no one bit found */
 801            cond = TCG_COND_EQ;
 802            break;
 803        case 2: /* src != 0 -> one bit found */
 804            cond = TCG_COND_NE;
 805            break;
 806        default:
 807            goto do_dynamic;
 808        }
 809        account_inline_branch(s, old_cc_op);
 810        break;
 811
 812    case CC_OP_ADDU:
 813    case CC_OP_SUBU:
 814        switch (mask) {
 815        case 8 | 2: /* result == 0 */
 816            cond = TCG_COND_EQ;
 817            break;
 818        case 4 | 1: /* result != 0 */
 819            cond = TCG_COND_NE;
 820            break;
 821        case 8 | 4: /* !carry (borrow) */
 822            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
 823            break;
 824        case 2 | 1: /* carry (!borrow) */
 825            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
 826            break;
 827        default:
 828            goto do_dynamic;
 829        }
 830        account_inline_branch(s, old_cc_op);
 831        break;
 832
 833    default:
 834    do_dynamic:
 835        /* Calculate cc value.  */
 836        gen_op_calc_cc(s);
 837        /* FALLTHRU */
 838
 839    case CC_OP_STATIC:
 840        /* Jump based on CC.  We'll load up the real cond below;
 841           the assignment here merely avoids a compiler warning.  */
 842        account_noninline_branch(s, old_cc_op);
 843        old_cc_op = CC_OP_STATIC;
 844        cond = TCG_COND_NEVER;
 845        break;
 846    }
 847
 848    /* Load up the arguments of the comparison.  */
 849    c->is_64 = true;
 850    c->g1 = c->g2 = false;
 851    switch (old_cc_op) {
 852    case CC_OP_LTGT0_32:
 853        c->is_64 = false;
 854        c->u.s32.a = tcg_temp_new_i32();
 855        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 856        c->u.s32.b = tcg_const_i32(0);
 857        break;
 858    case CC_OP_LTGT_32:
 859    case CC_OP_LTUGTU_32:
 860        c->is_64 = false;
 861        c->u.s32.a = tcg_temp_new_i32();
 862        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 863        c->u.s32.b = tcg_temp_new_i32();
 864        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 865        break;
 866
 867    case CC_OP_LTGT0_64:
 868    case CC_OP_NZ:
 869    case CC_OP_FLOGR:
 870        c->u.s64.a = cc_dst;
 871        c->u.s64.b = tcg_const_i64(0);
 872        c->g1 = true;
 873        break;
 874    case CC_OP_LTGT_64:
 875    case CC_OP_LTUGTU_64:
 876        c->u.s64.a = cc_src;
 877        c->u.s64.b = cc_dst;
 878        c->g1 = c->g2 = true;
 879        break;
 880
 881    case CC_OP_TM_32:
 882    case CC_OP_TM_64:
 883    case CC_OP_ICM:
 884        c->u.s64.a = tcg_temp_new_i64();
 885        c->u.s64.b = tcg_const_i64(0);
 886        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 887        break;
 888
 889    case CC_OP_ADDU:
 890    case CC_OP_SUBU:
 891        c->is_64 = true;
 892        c->u.s64.b = tcg_const_i64(0);
 893        c->g1 = true;
 894        switch (mask) {
 895        case 8 | 2:
 896        case 4 | 1: /* result */
 897            c->u.s64.a = cc_dst;
 898            break;
 899        case 8 | 4:
 900        case 2 | 1: /* carry */
 901            c->u.s64.a = cc_src;
 902            break;
 903        default:
 904            g_assert_not_reached();
 905        }
 906        break;
 907
 908    case CC_OP_STATIC:
 909        c->is_64 = false;
 910        c->u.s32.a = cc_op;
 911        c->g1 = true;
 912        switch (mask) {
 913        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 914            cond = TCG_COND_NE;
 915            c->u.s32.b = tcg_const_i32(3);
 916            break;
 917        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 918            cond = TCG_COND_NE;
 919            c->u.s32.b = tcg_const_i32(2);
 920            break;
 921        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 922            cond = TCG_COND_NE;
 923            c->u.s32.b = tcg_const_i32(1);
 924            break;
 925        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 926            cond = TCG_COND_EQ;
 927            c->g1 = false;
 928            c->u.s32.a = tcg_temp_new_i32();
 929            c->u.s32.b = tcg_const_i32(0);
 930            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 931            break;
 932        case 0x8 | 0x4: /* cc < 2 */
 933            cond = TCG_COND_LTU;
 934            c->u.s32.b = tcg_const_i32(2);
 935            break;
 936        case 0x8: /* cc == 0 */
 937            cond = TCG_COND_EQ;
 938            c->u.s32.b = tcg_const_i32(0);
 939            break;
 940        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 941            cond = TCG_COND_NE;
 942            c->u.s32.b = tcg_const_i32(0);
 943            break;
 944        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 945            cond = TCG_COND_NE;
 946            c->g1 = false;
 947            c->u.s32.a = tcg_temp_new_i32();
 948            c->u.s32.b = tcg_const_i32(0);
 949            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 950            break;
 951        case 0x4: /* cc == 1 */
 952            cond = TCG_COND_EQ;
 953            c->u.s32.b = tcg_const_i32(1);
 954            break;
 955        case 0x2 | 0x1: /* cc > 1 */
 956            cond = TCG_COND_GTU;
 957            c->u.s32.b = tcg_const_i32(1);
 958            break;
 959        case 0x2: /* cc == 2 */
 960            cond = TCG_COND_EQ;
 961            c->u.s32.b = tcg_const_i32(2);
 962            break;
 963        case 0x1: /* cc == 3 */
 964            cond = TCG_COND_EQ;
 965            c->u.s32.b = tcg_const_i32(3);
 966            break;
 967        default:
 968            /* CC is masked by something else: (8 >> cc) & mask.  */
 969            cond = TCG_COND_NE;
 970            c->g1 = false;
 971            c->u.s32.a = tcg_const_i32(8);
 972            c->u.s32.b = tcg_const_i32(0);
 973            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 974            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 975            break;
 976        }
 977        break;
 978
 979    default:
 980        abort();
 981    }
 982    c->cond = cond;
 983}
 984
 985static void free_compare(DisasCompare *c)
 986{
 987    if (!c->g1) {
 988        if (c->is_64) {
 989            tcg_temp_free_i64(c->u.s64.a);
 990        } else {
 991            tcg_temp_free_i32(c->u.s32.a);
 992        }
 993    }
 994    if (!c->g2) {
 995        if (c->is_64) {
 996            tcg_temp_free_i64(c->u.s64.b);
 997        } else {
 998            tcg_temp_free_i32(c->u.s32.b);
 999        }
1000    }
1001}
1002
1003/* ====================================================================== */
1004/* Define the insn format enumeration.  */
1005#define F0(N)                         FMT_##N,
1006#define F1(N, X1)                     F0(N)
1007#define F2(N, X1, X2)                 F0(N)
1008#define F3(N, X1, X2, X3)             F0(N)
1009#define F4(N, X1, X2, X3, X4)         F0(N)
1010#define F5(N, X1, X2, X3, X4, X5)     F0(N)
1011#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1012
1013typedef enum {
1014#include "insn-format.def"
1015} DisasFormat;
1016
1017#undef F0
1018#undef F1
1019#undef F2
1020#undef F3
1021#undef F4
1022#undef F5
1023#undef F6
1024
1025/* This is the way fields are to be accessed out of DisasFields.  */
1026#define have_field(S, F)  have_field1((S), FLD_O_##F)
1027#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1028
1029static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1030{
1031    return (s->fields.presentO >> c) & 1;
1032}
1033
1034static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1035                      enum DisasFieldIndexC c)
1036{
1037    assert(have_field1(s, o));
1038    return s->fields.c[c];
1039}
1040
1041/* Describe the layout of each field in each format.  */
1042typedef struct DisasField {
1043    unsigned int beg:8;
1044    unsigned int size:8;
1045    unsigned int type:2;
1046    unsigned int indexC:6;
1047    enum DisasFieldIndexO indexO:8;
1048} DisasField;
1049
1050typedef struct DisasFormatInfo {
1051    DisasField op[NUM_C_FIELD];
1052} DisasFormatInfo;
1053
1054#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1055#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1056#define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1057#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1058                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1059#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1060                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1061                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1062#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1063                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1064#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1065                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1066                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1067#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1068#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1069
1070#define F0(N)                     { { } },
1071#define F1(N, X1)                 { { X1 } },
1072#define F2(N, X1, X2)             { { X1, X2 } },
1073#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1074#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1075#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1076#define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1077
1078static const DisasFormatInfo format_info[] = {
1079#include "insn-format.def"
1080};
1081
1082#undef F0
1083#undef F1
1084#undef F2
1085#undef F3
1086#undef F4
1087#undef F5
1088#undef F6
1089#undef R
1090#undef M
1091#undef V
1092#undef BD
1093#undef BXD
1094#undef BDL
1095#undef BXDL
1096#undef I
1097#undef L
1098
1099/* Generally, we'll extract operands into this structures, operate upon
1100   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1101   of routines below for more details.  */
1102typedef struct {
1103    bool g_out, g_out2, g_in1, g_in2;
1104    TCGv_i64 out, out2, in1, in2;
1105    TCGv_i64 addr1;
1106} DisasOps;
1107
1108/* Instructions can place constraints on their operands, raising specification
1109   exceptions if they are violated.  To make this easy to automate, each "in1",
1110   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1111   of the following, or 0.  To make this easy to document, we'll put the
1112   SPEC_<name> defines next to <name>.  */
1113
1114#define SPEC_r1_even    1
1115#define SPEC_r2_even    2
1116#define SPEC_r3_even    4
1117#define SPEC_r1_f128    8
1118#define SPEC_r2_f128    16
1119
1120/* Return values from translate_one, indicating the state of the TB.  */
1121
1122/* We are not using a goto_tb (for whatever reason), but have updated
1123   the PC (for whatever reason), so there's no need to do it again on
1124   exiting the TB.  */
1125#define DISAS_PC_UPDATED        DISAS_TARGET_0
1126
1127/* We have emitted one or more goto_tb.  No fixup required.  */
1128#define DISAS_GOTO_TB           DISAS_TARGET_1
1129
1130/* We have updated the PC and CC values.  */
1131#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1132
1133/* We are exiting the TB, but have neither emitted a goto_tb, nor
1134   updated the PC for the next instruction to be executed.  */
1135#define DISAS_PC_STALE          DISAS_TARGET_3
1136
1137/* We are exiting the TB to the main loop.  */
1138#define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1139
1140
1141/* Instruction flags */
1142#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1143#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1144#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1145#define IF_BFP      0x0008      /* binary floating point instruction */
1146#define IF_DFP      0x0010      /* decimal floating point instruction */
1147#define IF_PRIV     0x0020      /* privileged instruction */
1148#define IF_VEC      0x0040      /* vector instruction */
1149#define IF_IO       0x0080      /* input/output instruction */
1150
1151struct DisasInsn {
1152    unsigned opc:16;
1153    unsigned flags:16;
1154    DisasFormat fmt:8;
1155    unsigned fac:8;
1156    unsigned spec:8;
1157
1158    const char *name;
1159
1160    /* Pre-process arguments before HELP_OP.  */
1161    void (*help_in1)(DisasContext *, DisasOps *);
1162    void (*help_in2)(DisasContext *, DisasOps *);
1163    void (*help_prep)(DisasContext *, DisasOps *);
1164
1165    /*
1166     * Post-process output after HELP_OP.
1167     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1168     */
1169    void (*help_wout)(DisasContext *, DisasOps *);
1170    void (*help_cout)(DisasContext *, DisasOps *);
1171
1172    /* Implement the operation itself.  */
1173    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1174
1175    uint64_t data;
1176};
1177
1178/* ====================================================================== */
1179/* Miscellaneous helpers, used by several operations.  */
1180
1181static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1182{
1183    int b2 = get_field(s, b2);
1184    int d2 = get_field(s, d2);
1185
1186    if (b2 == 0) {
1187        o->in2 = tcg_const_i64(d2 & mask);
1188    } else {
1189        o->in2 = get_address(s, 0, b2, d2);
1190        tcg_gen_andi_i64(o->in2, o->in2, mask);
1191    }
1192}
1193
1194static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1195{
1196    if (dest == s->pc_tmp) {
1197        per_branch(s, true);
1198        return DISAS_NEXT;
1199    }
1200    if (use_goto_tb(s, dest)) {
1201        update_cc_op(s);
1202        per_breaking_event(s);
1203        tcg_gen_goto_tb(0);
1204        tcg_gen_movi_i64(psw_addr, dest);
1205        tcg_gen_exit_tb(s->base.tb, 0);
1206        return DISAS_GOTO_TB;
1207    } else {
1208        tcg_gen_movi_i64(psw_addr, dest);
1209        per_branch(s, false);
1210        return DISAS_PC_UPDATED;
1211    }
1212}
1213
1214static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1215                                 bool is_imm, int imm, TCGv_i64 cdest)
1216{
1217    DisasJumpType ret;
1218    uint64_t dest = s->base.pc_next + 2 * imm;
1219    TCGLabel *lab;
1220
1221    /* Take care of the special cases first.  */
1222    if (c->cond == TCG_COND_NEVER) {
1223        ret = DISAS_NEXT;
1224        goto egress;
1225    }
1226    if (is_imm) {
1227        if (dest == s->pc_tmp) {
1228            /* Branch to next.  */
1229            per_branch(s, true);
1230            ret = DISAS_NEXT;
1231            goto egress;
1232        }
1233        if (c->cond == TCG_COND_ALWAYS) {
1234            ret = help_goto_direct(s, dest);
1235            goto egress;
1236        }
1237    } else {
1238        if (!cdest) {
1239            /* E.g. bcr %r0 -> no branch.  */
1240            ret = DISAS_NEXT;
1241            goto egress;
1242        }
1243        if (c->cond == TCG_COND_ALWAYS) {
1244            tcg_gen_mov_i64(psw_addr, cdest);
1245            per_branch(s, false);
1246            ret = DISAS_PC_UPDATED;
1247            goto egress;
1248        }
1249    }
1250
1251    if (use_goto_tb(s, s->pc_tmp)) {
1252        if (is_imm && use_goto_tb(s, dest)) {
1253            /* Both exits can use goto_tb.  */
1254            update_cc_op(s);
1255
1256            lab = gen_new_label();
1257            if (c->is_64) {
1258                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1259            } else {
1260                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1261            }
1262
1263            /* Branch not taken.  */
1264            tcg_gen_goto_tb(0);
1265            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1266            tcg_gen_exit_tb(s->base.tb, 0);
1267
1268            /* Branch taken.  */
1269            gen_set_label(lab);
1270            per_breaking_event(s);
1271            tcg_gen_goto_tb(1);
1272            tcg_gen_movi_i64(psw_addr, dest);
1273            tcg_gen_exit_tb(s->base.tb, 1);
1274
1275            ret = DISAS_GOTO_TB;
1276        } else {
1277            /* Fallthru can use goto_tb, but taken branch cannot.  */
1278            /* Store taken branch destination before the brcond.  This
1279               avoids having to allocate a new local temp to hold it.
1280               We'll overwrite this in the not taken case anyway.  */
1281            if (!is_imm) {
1282                tcg_gen_mov_i64(psw_addr, cdest);
1283            }
1284
1285            lab = gen_new_label();
1286            if (c->is_64) {
1287                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1288            } else {
1289                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1290            }
1291
1292            /* Branch not taken.  */
1293            update_cc_op(s);
1294            tcg_gen_goto_tb(0);
1295            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1296            tcg_gen_exit_tb(s->base.tb, 0);
1297
1298            gen_set_label(lab);
1299            if (is_imm) {
1300                tcg_gen_movi_i64(psw_addr, dest);
1301            }
1302            per_breaking_event(s);
1303            ret = DISAS_PC_UPDATED;
1304        }
1305    } else {
1306        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1307           Most commonly we're single-stepping or some other condition that
1308           disables all use of goto_tb.  Just update the PC and exit.  */
1309
1310        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1311        if (is_imm) {
1312            cdest = tcg_const_i64(dest);
1313        }
1314
1315        if (c->is_64) {
1316            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1317                                cdest, next);
1318            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1319        } else {
1320            TCGv_i32 t0 = tcg_temp_new_i32();
1321            TCGv_i64 t1 = tcg_temp_new_i64();
1322            TCGv_i64 z = tcg_const_i64(0);
1323            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1324            tcg_gen_extu_i32_i64(t1, t0);
1325            tcg_temp_free_i32(t0);
1326            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1327            per_branch_cond(s, TCG_COND_NE, t1, z);
1328            tcg_temp_free_i64(t1);
1329            tcg_temp_free_i64(z);
1330        }
1331
1332        if (is_imm) {
1333            tcg_temp_free_i64(cdest);
1334        }
1335        tcg_temp_free_i64(next);
1336
1337        ret = DISAS_PC_UPDATED;
1338    }
1339
1340 egress:
1341    free_compare(c);
1342    return ret;
1343}
1344
1345/* ====================================================================== */
1346/* The operations.  These perform the bulk of the work for any insn,
1347   usually after the operands have been loaded and output initialized.  */
1348
1349static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1350{
1351    tcg_gen_abs_i64(o->out, o->in2);
1352    return DISAS_NEXT;
1353}
1354
1355static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1356{
1357    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1358    return DISAS_NEXT;
1359}
1360
1361static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1362{
1363    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1364    return DISAS_NEXT;
1365}
1366
1367static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1368{
1369    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1370    tcg_gen_mov_i64(o->out2, o->in2);
1371    return DISAS_NEXT;
1372}
1373
1374static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1375{
1376    tcg_gen_add_i64(o->out, o->in1, o->in2);
1377    return DISAS_NEXT;
1378}
1379
1380static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1381{
1382    tcg_gen_movi_i64(cc_src, 0);
1383    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1384    return DISAS_NEXT;
1385}
1386
1387/* Compute carry into cc_src. */
1388static void compute_carry(DisasContext *s)
1389{
1390    switch (s->cc_op) {
1391    case CC_OP_ADDU:
1392        /* The carry value is already in cc_src (1,0). */
1393        break;
1394    case CC_OP_SUBU:
1395        tcg_gen_addi_i64(cc_src, cc_src, 1);
1396        break;
1397    default:
1398        gen_op_calc_cc(s);
1399        /* fall through */
1400    case CC_OP_STATIC:
1401        /* The carry flag is the msb of CC; compute into cc_src. */
1402        tcg_gen_extu_i32_i64(cc_src, cc_op);
1403        tcg_gen_shri_i64(cc_src, cc_src, 1);
1404        break;
1405    }
1406}
1407
1408static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1409{
1410    compute_carry(s);
1411    tcg_gen_add_i64(o->out, o->in1, o->in2);
1412    tcg_gen_add_i64(o->out, o->out, cc_src);
1413    return DISAS_NEXT;
1414}
1415
1416static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1417{
1418    compute_carry(s);
1419
1420    TCGv_i64 zero = tcg_const_i64(0);
1421    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1422    tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1423    tcg_temp_free_i64(zero);
1424
1425    return DISAS_NEXT;
1426}
1427
1428static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1429{
1430    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1431
1432    o->in1 = tcg_temp_new_i64();
1433    if (non_atomic) {
1434        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1435    } else {
1436        /* Perform the atomic addition in memory. */
1437        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1438                                     s->insn->data);
1439    }
1440
1441    /* Recompute also for atomic case: needed for setting CC. */
1442    tcg_gen_add_i64(o->out, o->in1, o->in2);
1443
1444    if (non_atomic) {
1445        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1446    }
1447    return DISAS_NEXT;
1448}
1449
1450static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1451{
1452    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1453
1454    o->in1 = tcg_temp_new_i64();
1455    if (non_atomic) {
1456        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1457    } else {
1458        /* Perform the atomic addition in memory. */
1459        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1460                                     s->insn->data);
1461    }
1462
1463    /* Recompute also for atomic case: needed for setting CC. */
1464    tcg_gen_movi_i64(cc_src, 0);
1465    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1466
1467    if (non_atomic) {
1468        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1469    }
1470    return DISAS_NEXT;
1471}
1472
1473static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1474{
1475    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1476    return DISAS_NEXT;
1477}
1478
1479static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1480{
1481    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1482    return DISAS_NEXT;
1483}
1484
1485static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1486{
1487    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1488    return_low128(o->out2);
1489    return DISAS_NEXT;
1490}
1491
1492static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1493{
1494    tcg_gen_and_i64(o->out, o->in1, o->in2);
1495    return DISAS_NEXT;
1496}
1497
1498static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1499{
1500    int shift = s->insn->data & 0xff;
1501    int size = s->insn->data >> 8;
1502    uint64_t mask = ((1ull << size) - 1) << shift;
1503
1504    assert(!o->g_in2);
1505    tcg_gen_shli_i64(o->in2, o->in2, shift);
1506    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1507    tcg_gen_and_i64(o->out, o->in1, o->in2);
1508
1509    /* Produce the CC from only the bits manipulated.  */
1510    tcg_gen_andi_i64(cc_dst, o->out, mask);
1511    set_cc_nz_u64(s, cc_dst);
1512    return DISAS_NEXT;
1513}
1514
1515static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1516{
1517    o->in1 = tcg_temp_new_i64();
1518
1519    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1520        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1521    } else {
1522        /* Perform the atomic operation in memory. */
1523        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1524                                     s->insn->data);
1525    }
1526
1527    /* Recompute also for atomic case: needed for setting CC. */
1528    tcg_gen_and_i64(o->out, o->in1, o->in2);
1529
1530    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1531        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1532    }
1533    return DISAS_NEXT;
1534}
1535
1536static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1537{
1538    pc_to_link_info(o->out, s, s->pc_tmp);
1539    if (o->in2) {
1540        tcg_gen_mov_i64(psw_addr, o->in2);
1541        per_branch(s, false);
1542        return DISAS_PC_UPDATED;
1543    } else {
1544        return DISAS_NEXT;
1545    }
1546}
1547
1548static void save_link_info(DisasContext *s, DisasOps *o)
1549{
1550    TCGv_i64 t;
1551
1552    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1553        pc_to_link_info(o->out, s, s->pc_tmp);
1554        return;
1555    }
1556    gen_op_calc_cc(s);
1557    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1558    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1559    t = tcg_temp_new_i64();
1560    tcg_gen_shri_i64(t, psw_mask, 16);
1561    tcg_gen_andi_i64(t, t, 0x0f000000);
1562    tcg_gen_or_i64(o->out, o->out, t);
1563    tcg_gen_extu_i32_i64(t, cc_op);
1564    tcg_gen_shli_i64(t, t, 28);
1565    tcg_gen_or_i64(o->out, o->out, t);
1566    tcg_temp_free_i64(t);
1567}
1568
1569static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1570{
1571    save_link_info(s, o);
1572    if (o->in2) {
1573        tcg_gen_mov_i64(psw_addr, o->in2);
1574        per_branch(s, false);
1575        return DISAS_PC_UPDATED;
1576    } else {
1577        return DISAS_NEXT;
1578    }
1579}
1580
1581static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1582{
1583    pc_to_link_info(o->out, s, s->pc_tmp);
1584    return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1585}
1586
1587static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1588{
1589    int m1 = get_field(s, m1);
1590    bool is_imm = have_field(s, i2);
1591    int imm = is_imm ? get_field(s, i2) : 0;
1592    DisasCompare c;
1593
1594    /* BCR with R2 = 0 causes no branching */
1595    if (have_field(s, r2) && get_field(s, r2) == 0) {
1596        if (m1 == 14) {
1597            /* Perform serialization */
1598            /* FIXME: check for fast-BCR-serialization facility */
1599            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1600        }
1601        if (m1 == 15) {
1602            /* Perform serialization */
1603            /* FIXME: perform checkpoint-synchronisation */
1604            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1605        }
1606        return DISAS_NEXT;
1607    }
1608
1609    disas_jcc(s, &c, m1);
1610    return help_branch(s, &c, is_imm, imm, o->in2);
1611}
1612
1613static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1614{
1615    int r1 = get_field(s, r1);
1616    bool is_imm = have_field(s, i2);
1617    int imm = is_imm ? get_field(s, i2) : 0;
1618    DisasCompare c;
1619    TCGv_i64 t;
1620
1621    c.cond = TCG_COND_NE;
1622    c.is_64 = false;
1623    c.g1 = false;
1624    c.g2 = false;
1625
1626    t = tcg_temp_new_i64();
1627    tcg_gen_subi_i64(t, regs[r1], 1);
1628    store_reg32_i64(r1, t);
1629    c.u.s32.a = tcg_temp_new_i32();
1630    c.u.s32.b = tcg_const_i32(0);
1631    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1632    tcg_temp_free_i64(t);
1633
1634    return help_branch(s, &c, is_imm, imm, o->in2);
1635}
1636
1637static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1638{
1639    int r1 = get_field(s, r1);
1640    int imm = get_field(s, i2);
1641    DisasCompare c;
1642    TCGv_i64 t;
1643
1644    c.cond = TCG_COND_NE;
1645    c.is_64 = false;
1646    c.g1 = false;
1647    c.g2 = false;
1648
1649    t = tcg_temp_new_i64();
1650    tcg_gen_shri_i64(t, regs[r1], 32);
1651    tcg_gen_subi_i64(t, t, 1);
1652    store_reg32h_i64(r1, t);
1653    c.u.s32.a = tcg_temp_new_i32();
1654    c.u.s32.b = tcg_const_i32(0);
1655    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1656    tcg_temp_free_i64(t);
1657
1658    return help_branch(s, &c, 1, imm, o->in2);
1659}
1660
1661static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1662{
1663    int r1 = get_field(s, r1);
1664    bool is_imm = have_field(s, i2);
1665    int imm = is_imm ? get_field(s, i2) : 0;
1666    DisasCompare c;
1667
1668    c.cond = TCG_COND_NE;
1669    c.is_64 = true;
1670    c.g1 = true;
1671    c.g2 = false;
1672
1673    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1674    c.u.s64.a = regs[r1];
1675    c.u.s64.b = tcg_const_i64(0);
1676
1677    return help_branch(s, &c, is_imm, imm, o->in2);
1678}
1679
1680static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1681{
1682    int r1 = get_field(s, r1);
1683    int r3 = get_field(s, r3);
1684    bool is_imm = have_field(s, i2);
1685    int imm = is_imm ? get_field(s, i2) : 0;
1686    DisasCompare c;
1687    TCGv_i64 t;
1688
1689    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1690    c.is_64 = false;
1691    c.g1 = false;
1692    c.g2 = false;
1693
1694    t = tcg_temp_new_i64();
1695    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1696    c.u.s32.a = tcg_temp_new_i32();
1697    c.u.s32.b = tcg_temp_new_i32();
1698    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1699    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1700    store_reg32_i64(r1, t);
1701    tcg_temp_free_i64(t);
1702
1703    return help_branch(s, &c, is_imm, imm, o->in2);
1704}
1705
1706static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1707{
1708    int r1 = get_field(s, r1);
1709    int r3 = get_field(s, r3);
1710    bool is_imm = have_field(s, i2);
1711    int imm = is_imm ? get_field(s, i2) : 0;
1712    DisasCompare c;
1713
1714    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1715    c.is_64 = true;
1716
1717    if (r1 == (r3 | 1)) {
1718        c.u.s64.b = load_reg(r3 | 1);
1719        c.g2 = false;
1720    } else {
1721        c.u.s64.b = regs[r3 | 1];
1722        c.g2 = true;
1723    }
1724
1725    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1726    c.u.s64.a = regs[r1];
1727    c.g1 = true;
1728
1729    return help_branch(s, &c, is_imm, imm, o->in2);
1730}
1731
1732static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1733{
1734    int imm, m3 = get_field(s, m3);
1735    bool is_imm;
1736    DisasCompare c;
1737
1738    c.cond = ltgt_cond[m3];
1739    if (s->insn->data) {
1740        c.cond = tcg_unsigned_cond(c.cond);
1741    }
1742    c.is_64 = c.g1 = c.g2 = true;
1743    c.u.s64.a = o->in1;
1744    c.u.s64.b = o->in2;
1745
1746    is_imm = have_field(s, i4);
1747    if (is_imm) {
1748        imm = get_field(s, i4);
1749    } else {
1750        imm = 0;
1751        o->out = get_address(s, 0, get_field(s, b4),
1752                             get_field(s, d4));
1753    }
1754
1755    return help_branch(s, &c, is_imm, imm, o->out);
1756}
1757
1758static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1759{
1760    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1761    set_cc_static(s);
1762    return DISAS_NEXT;
1763}
1764
1765static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1766{
1767    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1768    set_cc_static(s);
1769    return DISAS_NEXT;
1770}
1771
1772static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1773{
1774    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1775    set_cc_static(s);
1776    return DISAS_NEXT;
1777}
1778
1779static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1780                                   bool m4_with_fpe)
1781{
1782    const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1783    uint8_t m3 = get_field(s, m3);
1784    uint8_t m4 = get_field(s, m4);
1785
1786    /* m3 field was introduced with FPE */
1787    if (!fpe && m3_with_fpe) {
1788        m3 = 0;
1789    }
1790    /* m4 field was introduced with FPE */
1791    if (!fpe && m4_with_fpe) {
1792        m4 = 0;
1793    }
1794
1795    /* Check for valid rounding modes. Mode 3 was introduced later. */
1796    if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1797        gen_program_exception(s, PGM_SPECIFICATION);
1798        return NULL;
1799    }
1800
1801    return tcg_const_i32(deposit32(m3, 4, 4, m4));
1802}
1803
1804static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1805{
1806    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1807
1808    if (!m34) {
1809        return DISAS_NORETURN;
1810    }
1811    gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1812    tcg_temp_free_i32(m34);
1813    set_cc_static(s);
1814    return DISAS_NEXT;
1815}
1816
1817static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1818{
1819    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820
1821    if (!m34) {
1822        return DISAS_NORETURN;
1823    }
1824    gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1825    tcg_temp_free_i32(m34);
1826    set_cc_static(s);
1827    return DISAS_NEXT;
1828}
1829
1830static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1831{
1832    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1833
1834    if (!m34) {
1835        return DISAS_NORETURN;
1836    }
1837    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1838    tcg_temp_free_i32(m34);
1839    set_cc_static(s);
1840    return DISAS_NEXT;
1841}
1842
1843static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1844{
1845    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1846
1847    if (!m34) {
1848        return DISAS_NORETURN;
1849    }
1850    gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1851    tcg_temp_free_i32(m34);
1852    set_cc_static(s);
1853    return DISAS_NEXT;
1854}
1855
1856static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1857{
1858    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1859
1860    if (!m34) {
1861        return DISAS_NORETURN;
1862    }
1863    gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1864    tcg_temp_free_i32(m34);
1865    set_cc_static(s);
1866    return DISAS_NEXT;
1867}
1868
1869static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1870{
1871    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1872
1873    if (!m34) {
1874        return DISAS_NORETURN;
1875    }
1876    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1877    tcg_temp_free_i32(m34);
1878    set_cc_static(s);
1879    return DISAS_NEXT;
1880}
1881
1882static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1883{
1884    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1885
1886    if (!m34) {
1887        return DISAS_NORETURN;
1888    }
1889    gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1890    tcg_temp_free_i32(m34);
1891    set_cc_static(s);
1892    return DISAS_NEXT;
1893}
1894
1895static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1896{
1897    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1898
1899    if (!m34) {
1900        return DISAS_NORETURN;
1901    }
1902    gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1903    tcg_temp_free_i32(m34);
1904    set_cc_static(s);
1905    return DISAS_NEXT;
1906}
1907
1908static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1909{
1910    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1911
1912    if (!m34) {
1913        return DISAS_NORETURN;
1914    }
1915    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1916    tcg_temp_free_i32(m34);
1917    set_cc_static(s);
1918    return DISAS_NEXT;
1919}
1920
1921static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1922{
1923    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1924
1925    if (!m34) {
1926        return DISAS_NORETURN;
1927    }
1928    gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1929    tcg_temp_free_i32(m34);
1930    set_cc_static(s);
1931    return DISAS_NEXT;
1932}
1933
1934static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1935{
1936    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1937
1938    if (!m34) {
1939        return DISAS_NORETURN;
1940    }
1941    gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1942    tcg_temp_free_i32(m34);
1943    set_cc_static(s);
1944    return DISAS_NEXT;
1945}
1946
1947static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1948{
1949    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1950
1951    if (!m34) {
1952        return DISAS_NORETURN;
1953    }
1954    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1955    tcg_temp_free_i32(m34);
1956    set_cc_static(s);
1957    return DISAS_NEXT;
1958}
1959
1960static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1961{
1962    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1963
1964    if (!m34) {
1965        return DISAS_NORETURN;
1966    }
1967    gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1968    tcg_temp_free_i32(m34);
1969    return DISAS_NEXT;
1970}
1971
1972static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1973{
1974    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1975
1976    if (!m34) {
1977        return DISAS_NORETURN;
1978    }
1979    gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1980    tcg_temp_free_i32(m34);
1981    return DISAS_NEXT;
1982}
1983
1984static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1985{
1986    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1987
1988    if (!m34) {
1989        return DISAS_NORETURN;
1990    }
1991    gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
1992    tcg_temp_free_i32(m34);
1993    return_low128(o->out2);
1994    return DISAS_NEXT;
1995}
1996
1997static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1998{
1999    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2000
2001    if (!m34) {
2002        return DISAS_NORETURN;
2003    }
2004    gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2005    tcg_temp_free_i32(m34);
2006    return DISAS_NEXT;
2007}
2008
2009static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2010{
2011    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2012
2013    if (!m34) {
2014        return DISAS_NORETURN;
2015    }
2016    gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2017    tcg_temp_free_i32(m34);
2018    return DISAS_NEXT;
2019}
2020
2021static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2022{
2023    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2024
2025    if (!m34) {
2026        return DISAS_NORETURN;
2027    }
2028    gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2029    tcg_temp_free_i32(m34);
2030    return_low128(o->out2);
2031    return DISAS_NEXT;
2032}
2033
2034static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2035{
2036    int r2 = get_field(s, r2);
2037    TCGv_i64 len = tcg_temp_new_i64();
2038
2039    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2040    set_cc_static(s);
2041    return_low128(o->out);
2042
2043    tcg_gen_add_i64(regs[r2], regs[r2], len);
2044    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2045    tcg_temp_free_i64(len);
2046
2047    return DISAS_NEXT;
2048}
2049
2050static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2051{
2052    int l = get_field(s, l1);
2053    TCGv_i32 vl;
2054
2055    switch (l + 1) {
2056    case 1:
2057        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2058        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2059        break;
2060    case 2:
2061        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2062        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2063        break;
2064    case 4:
2065        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2066        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2067        break;
2068    case 8:
2069        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2070        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2071        break;
2072    default:
2073        vl = tcg_const_i32(l);
2074        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2075        tcg_temp_free_i32(vl);
2076        set_cc_static(s);
2077        return DISAS_NEXT;
2078    }
2079    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2080    return DISAS_NEXT;
2081}
2082
2083static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2084{
2085    int r1 = get_field(s, r1);
2086    int r2 = get_field(s, r2);
2087    TCGv_i32 t1, t2;
2088
2089    /* r1 and r2 must be even.  */
2090    if (r1 & 1 || r2 & 1) {
2091        gen_program_exception(s, PGM_SPECIFICATION);
2092        return DISAS_NORETURN;
2093    }
2094
2095    t1 = tcg_const_i32(r1);
2096    t2 = tcg_const_i32(r2);
2097    gen_helper_clcl(cc_op, cpu_env, t1, t2);
2098    tcg_temp_free_i32(t1);
2099    tcg_temp_free_i32(t2);
2100    set_cc_static(s);
2101    return DISAS_NEXT;
2102}
2103
2104static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2105{
2106    int r1 = get_field(s, r1);
2107    int r3 = get_field(s, r3);
2108    TCGv_i32 t1, t3;
2109
2110    /* r1 and r3 must be even.  */
2111    if (r1 & 1 || r3 & 1) {
2112        gen_program_exception(s, PGM_SPECIFICATION);
2113        return DISAS_NORETURN;
2114    }
2115
2116    t1 = tcg_const_i32(r1);
2117    t3 = tcg_const_i32(r3);
2118    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2119    tcg_temp_free_i32(t1);
2120    tcg_temp_free_i32(t3);
2121    set_cc_static(s);
2122    return DISAS_NEXT;
2123}
2124
2125static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2126{
2127    int r1 = get_field(s, r1);
2128    int r3 = get_field(s, r3);
2129    TCGv_i32 t1, t3;
2130
2131    /* r1 and r3 must be even.  */
2132    if (r1 & 1 || r3 & 1) {
2133        gen_program_exception(s, PGM_SPECIFICATION);
2134        return DISAS_NORETURN;
2135    }
2136
2137    t1 = tcg_const_i32(r1);
2138    t3 = tcg_const_i32(r3);
2139    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2140    tcg_temp_free_i32(t1);
2141    tcg_temp_free_i32(t3);
2142    set_cc_static(s);
2143    return DISAS_NEXT;
2144}
2145
2146static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2147{
2148    TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2149    TCGv_i32 t1 = tcg_temp_new_i32();
2150    tcg_gen_extrl_i64_i32(t1, o->in1);
2151    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2152    set_cc_static(s);
2153    tcg_temp_free_i32(t1);
2154    tcg_temp_free_i32(m3);
2155    return DISAS_NEXT;
2156}
2157
2158static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2159{
2160    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2161    set_cc_static(s);
2162    return_low128(o->in2);
2163    return DISAS_NEXT;
2164}
2165
2166static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2167{
2168    TCGv_i64 t = tcg_temp_new_i64();
2169    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2170    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2171    tcg_gen_or_i64(o->out, o->out, t);
2172    tcg_temp_free_i64(t);
2173    return DISAS_NEXT;
2174}
2175
2176static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2177{
2178    int d2 = get_field(s, d2);
2179    int b2 = get_field(s, b2);
2180    TCGv_i64 addr, cc;
2181
2182    /* Note that in1 = R3 (new value) and
2183       in2 = (zero-extended) R1 (expected value).  */
2184
2185    addr = get_address(s, 0, b2, d2);
2186    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2187                               get_mem_index(s), s->insn->data | MO_ALIGN);
2188    tcg_temp_free_i64(addr);
2189
2190    /* Are the memory and expected values (un)equal?  Note that this setcond
2191       produces the output CC value, thus the NE sense of the test.  */
2192    cc = tcg_temp_new_i64();
2193    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2194    tcg_gen_extrl_i64_i32(cc_op, cc);
2195    tcg_temp_free_i64(cc);
2196    set_cc_static(s);
2197
2198    return DISAS_NEXT;
2199}
2200
2201static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2202{
2203    int r1 = get_field(s, r1);
2204    int r3 = get_field(s, r3);
2205    int d2 = get_field(s, d2);
2206    int b2 = get_field(s, b2);
2207    DisasJumpType ret = DISAS_NEXT;
2208    TCGv_i64 addr;
2209    TCGv_i32 t_r1, t_r3;
2210
2211    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2212    addr = get_address(s, 0, b2, d2);
2213    t_r1 = tcg_const_i32(r1);
2214    t_r3 = tcg_const_i32(r3);
2215    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2216        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2217    } else if (HAVE_CMPXCHG128) {
2218        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2219    } else {
2220        gen_helper_exit_atomic(cpu_env);
2221        ret = DISAS_NORETURN;
2222    }
2223    tcg_temp_free_i64(addr);
2224    tcg_temp_free_i32(t_r1);
2225    tcg_temp_free_i32(t_r3);
2226
2227    set_cc_static(s);
2228    return ret;
2229}
2230
2231static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2232{
2233    int r3 = get_field(s, r3);
2234    TCGv_i32 t_r3 = tcg_const_i32(r3);
2235
2236    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2237        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2238    } else {
2239        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2240    }
2241    tcg_temp_free_i32(t_r3);
2242
2243    set_cc_static(s);
2244    return DISAS_NEXT;
2245}
2246
2247#ifndef CONFIG_USER_ONLY
2248static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2249{
2250    MemOp mop = s->insn->data;
2251    TCGv_i64 addr, old, cc;
2252    TCGLabel *lab = gen_new_label();
2253
2254    /* Note that in1 = R1 (zero-extended expected value),
2255       out = R1 (original reg), out2 = R1+1 (new value).  */
2256
2257    addr = tcg_temp_new_i64();
2258    old = tcg_temp_new_i64();
2259    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2260    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2261                               get_mem_index(s), mop | MO_ALIGN);
2262    tcg_temp_free_i64(addr);
2263
2264    /* Are the memory and expected values (un)equal?  */
2265    cc = tcg_temp_new_i64();
2266    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2267    tcg_gen_extrl_i64_i32(cc_op, cc);
2268
2269    /* Write back the output now, so that it happens before the
2270       following branch, so that we don't need local temps.  */
2271    if ((mop & MO_SIZE) == MO_32) {
2272        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2273    } else {
2274        tcg_gen_mov_i64(o->out, old);
2275    }
2276    tcg_temp_free_i64(old);
2277
2278    /* If the comparison was equal, and the LSB of R2 was set,
2279       then we need to flush the TLB (for all cpus).  */
2280    tcg_gen_xori_i64(cc, cc, 1);
2281    tcg_gen_and_i64(cc, cc, o->in2);
2282    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2283    tcg_temp_free_i64(cc);
2284
2285    gen_helper_purge(cpu_env);
2286    gen_set_label(lab);
2287
2288    return DISAS_NEXT;
2289}
2290#endif
2291
2292static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2293{
2294    TCGv_i64 t1 = tcg_temp_new_i64();
2295    TCGv_i32 t2 = tcg_temp_new_i32();
2296    tcg_gen_extrl_i64_i32(t2, o->in1);
2297    gen_helper_cvd(t1, t2);
2298    tcg_temp_free_i32(t2);
2299    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2300    tcg_temp_free_i64(t1);
2301    return DISAS_NEXT;
2302}
2303
2304static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2305{
2306    int m3 = get_field(s, m3);
2307    TCGLabel *lab = gen_new_label();
2308    TCGCond c;
2309
2310    c = tcg_invert_cond(ltgt_cond[m3]);
2311    if (s->insn->data) {
2312        c = tcg_unsigned_cond(c);
2313    }
2314    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2315
2316    /* Trap.  */
2317    gen_trap(s);
2318
2319    gen_set_label(lab);
2320    return DISAS_NEXT;
2321}
2322
2323static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2324{
2325    int m3 = get_field(s, m3);
2326    int r1 = get_field(s, r1);
2327    int r2 = get_field(s, r2);
2328    TCGv_i32 tr1, tr2, chk;
2329
2330    /* R1 and R2 must both be even.  */
2331    if ((r1 | r2) & 1) {
2332        gen_program_exception(s, PGM_SPECIFICATION);
2333        return DISAS_NORETURN;
2334    }
2335    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2336        m3 = 0;
2337    }
2338
2339    tr1 = tcg_const_i32(r1);
2340    tr2 = tcg_const_i32(r2);
2341    chk = tcg_const_i32(m3);
2342
2343    switch (s->insn->data) {
2344    case 12:
2345        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2346        break;
2347    case 14:
2348        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2349        break;
2350    case 21:
2351        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2352        break;
2353    case 24:
2354        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2355        break;
2356    case 41:
2357        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2358        break;
2359    case 42:
2360        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2361        break;
2362    default:
2363        g_assert_not_reached();
2364    }
2365
2366    tcg_temp_free_i32(tr1);
2367    tcg_temp_free_i32(tr2);
2368    tcg_temp_free_i32(chk);
2369    set_cc_static(s);
2370    return DISAS_NEXT;
2371}
2372
2373#ifndef CONFIG_USER_ONLY
2374static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2375{
2376    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2377    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2378    TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2379
2380    gen_helper_diag(cpu_env, r1, r3, func_code);
2381
2382    tcg_temp_free_i32(func_code);
2383    tcg_temp_free_i32(r3);
2384    tcg_temp_free_i32(r1);
2385    return DISAS_NEXT;
2386}
2387#endif
2388
2389static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2390{
2391    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2392    return_low128(o->out);
2393    return DISAS_NEXT;
2394}
2395
2396static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2397{
2398    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2399    return_low128(o->out);
2400    return DISAS_NEXT;
2401}
2402
2403static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2404{
2405    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2406    return_low128(o->out);
2407    return DISAS_NEXT;
2408}
2409
2410static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2411{
2412    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2413    return_low128(o->out);
2414    return DISAS_NEXT;
2415}
2416
2417static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2418{
2419    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2420    return DISAS_NEXT;
2421}
2422
2423static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2424{
2425    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2426    return DISAS_NEXT;
2427}
2428
2429static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2430{
2431    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2432    return_low128(o->out2);
2433    return DISAS_NEXT;
2434}
2435
2436static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2437{
2438    int r2 = get_field(s, r2);
2439    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2440    return DISAS_NEXT;
2441}
2442
2443static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2444{
2445    /* No cache information provided.  */
2446    tcg_gen_movi_i64(o->out, -1);
2447    return DISAS_NEXT;
2448}
2449
2450static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2451{
2452    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2453    return DISAS_NEXT;
2454}
2455
2456static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2457{
2458    int r1 = get_field(s, r1);
2459    int r2 = get_field(s, r2);
2460    TCGv_i64 t = tcg_temp_new_i64();
2461
2462    /* Note the "subsequently" in the PoO, which implies a defined result
2463       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2464    tcg_gen_shri_i64(t, psw_mask, 32);
2465    store_reg32_i64(r1, t);
2466    if (r2 != 0) {
2467        store_reg32_i64(r2, psw_mask);
2468    }
2469
2470    tcg_temp_free_i64(t);
2471    return DISAS_NEXT;
2472}
2473
2474static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2475{
2476    int r1 = get_field(s, r1);
2477    TCGv_i32 ilen;
2478    TCGv_i64 v1;
2479
2480    /* Nested EXECUTE is not allowed.  */
2481    if (unlikely(s->ex_value)) {
2482        gen_program_exception(s, PGM_EXECUTE);
2483        return DISAS_NORETURN;
2484    }
2485
2486    update_psw_addr(s);
2487    update_cc_op(s);
2488
2489    if (r1 == 0) {
2490        v1 = tcg_const_i64(0);
2491    } else {
2492        v1 = regs[r1];
2493    }
2494
2495    ilen = tcg_const_i32(s->ilen);
2496    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2497    tcg_temp_free_i32(ilen);
2498
2499    if (r1 == 0) {
2500        tcg_temp_free_i64(v1);
2501    }
2502
2503    return DISAS_PC_CC_UPDATED;
2504}
2505
2506static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2507{
2508    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2509
2510    if (!m34) {
2511        return DISAS_NORETURN;
2512    }
2513    gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2514    tcg_temp_free_i32(m34);
2515    return DISAS_NEXT;
2516}
2517
2518static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2519{
2520    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2521
2522    if (!m34) {
2523        return DISAS_NORETURN;
2524    }
2525    gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2526    tcg_temp_free_i32(m34);
2527    return DISAS_NEXT;
2528}
2529
2530static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2531{
2532    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2533
2534    if (!m34) {
2535        return DISAS_NORETURN;
2536    }
2537    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2538    return_low128(o->out2);
2539    tcg_temp_free_i32(m34);
2540    return DISAS_NEXT;
2541}
2542
2543static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2544{
2545    /* We'll use the original input for cc computation, since we get to
2546       compare that against 0, which ought to be better than comparing
2547       the real output against 64.  It also lets cc_dst be a convenient
2548       temporary during our computation.  */
2549    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2550
2551    /* R1 = IN ? CLZ(IN) : 64.  */
2552    tcg_gen_clzi_i64(o->out, o->in2, 64);
2553
2554    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2555       value by 64, which is undefined.  But since the shift is 64 iff the
2556       input is zero, we still get the correct result after and'ing.  */
2557    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2558    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2559    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2560    return DISAS_NEXT;
2561}
2562
2563static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2564{
2565    int m3 = get_field(s, m3);
2566    int pos, len, base = s->insn->data;
2567    TCGv_i64 tmp = tcg_temp_new_i64();
2568    uint64_t ccm;
2569
2570    switch (m3) {
2571    case 0xf:
2572        /* Effectively a 32-bit load.  */
2573        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2574        len = 32;
2575        goto one_insert;
2576
2577    case 0xc:
2578    case 0x6:
2579    case 0x3:
2580        /* Effectively a 16-bit load.  */
2581        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2582        len = 16;
2583        goto one_insert;
2584
2585    case 0x8:
2586    case 0x4:
2587    case 0x2:
2588    case 0x1:
2589        /* Effectively an 8-bit load.  */
2590        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2591        len = 8;
2592        goto one_insert;
2593
2594    one_insert:
2595        pos = base + ctz32(m3) * 8;
2596        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2597        ccm = ((1ull << len) - 1) << pos;
2598        break;
2599
2600    default:
2601        /* This is going to be a sequence of loads and inserts.  */
2602        pos = base + 32 - 8;
2603        ccm = 0;
2604        while (m3) {
2605            if (m3 & 0x8) {
2606                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2607                tcg_gen_addi_i64(o->in2, o->in2, 1);
2608                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2609                ccm |= 0xff << pos;
2610            }
2611            m3 = (m3 << 1) & 0xf;
2612            pos -= 8;
2613        }
2614        break;
2615    }
2616
2617    tcg_gen_movi_i64(tmp, ccm);
2618    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2619    tcg_temp_free_i64(tmp);
2620    return DISAS_NEXT;
2621}
2622
2623static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2624{
2625    int shift = s->insn->data & 0xff;
2626    int size = s->insn->data >> 8;
2627    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2628    return DISAS_NEXT;
2629}
2630
2631static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2632{
2633    TCGv_i64 t1, t2;
2634
2635    gen_op_calc_cc(s);
2636    t1 = tcg_temp_new_i64();
2637    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2638    t2 = tcg_temp_new_i64();
2639    tcg_gen_extu_i32_i64(t2, cc_op);
2640    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2641    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2642    tcg_temp_free_i64(t1);
2643    tcg_temp_free_i64(t2);
2644    return DISAS_NEXT;
2645}
2646
2647#ifndef CONFIG_USER_ONLY
2648static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2649{
2650    TCGv_i32 m4;
2651
2652    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2653        m4 = tcg_const_i32(get_field(s, m4));
2654    } else {
2655        m4 = tcg_const_i32(0);
2656    }
2657    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2658    tcg_temp_free_i32(m4);
2659    return DISAS_NEXT;
2660}
2661
2662static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2663{
2664    TCGv_i32 m4;
2665
2666    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2667        m4 = tcg_const_i32(get_field(s, m4));
2668    } else {
2669        m4 = tcg_const_i32(0);
2670    }
2671    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2672    tcg_temp_free_i32(m4);
2673    return DISAS_NEXT;
2674}
2675
2676static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2677{
2678    gen_helper_iske(o->out, cpu_env, o->in2);
2679    return DISAS_NEXT;
2680}
2681#endif
2682
2683static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2684{
2685    int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2686    int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2687    int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2688    TCGv_i32 t_r1, t_r2, t_r3, type;
2689
2690    switch (s->insn->data) {
2691    case S390_FEAT_TYPE_KMA:
2692        if (r3 == r1 || r3 == r2) {
2693            gen_program_exception(s, PGM_SPECIFICATION);
2694            return DISAS_NORETURN;
2695        }
2696        /* FALL THROUGH */
2697    case S390_FEAT_TYPE_KMCTR:
2698        if (r3 & 1 || !r3) {
2699            gen_program_exception(s, PGM_SPECIFICATION);
2700            return DISAS_NORETURN;
2701        }
2702        /* FALL THROUGH */
2703    case S390_FEAT_TYPE_PPNO:
2704    case S390_FEAT_TYPE_KMF:
2705    case S390_FEAT_TYPE_KMC:
2706    case S390_FEAT_TYPE_KMO:
2707    case S390_FEAT_TYPE_KM:
2708        if (r1 & 1 || !r1) {
2709            gen_program_exception(s, PGM_SPECIFICATION);
2710            return DISAS_NORETURN;
2711        }
2712        /* FALL THROUGH */
2713    case S390_FEAT_TYPE_KMAC:
2714    case S390_FEAT_TYPE_KIMD:
2715    case S390_FEAT_TYPE_KLMD:
2716        if (r2 & 1 || !r2) {
2717            gen_program_exception(s, PGM_SPECIFICATION);
2718            return DISAS_NORETURN;
2719        }
2720        /* FALL THROUGH */
2721    case S390_FEAT_TYPE_PCKMO:
2722    case S390_FEAT_TYPE_PCC:
2723        break;
2724    default:
2725        g_assert_not_reached();
2726    };
2727
2728    t_r1 = tcg_const_i32(r1);
2729    t_r2 = tcg_const_i32(r2);
2730    t_r3 = tcg_const_i32(r3);
2731    type = tcg_const_i32(s->insn->data);
2732    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2733    set_cc_static(s);
2734    tcg_temp_free_i32(t_r1);
2735    tcg_temp_free_i32(t_r2);
2736    tcg_temp_free_i32(t_r3);
2737    tcg_temp_free_i32(type);
2738    return DISAS_NEXT;
2739}
2740
2741static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2742{
2743    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2744    set_cc_static(s);
2745    return DISAS_NEXT;
2746}
2747
2748static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2749{
2750    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2751    set_cc_static(s);
2752    return DISAS_NEXT;
2753}
2754
2755static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2756{
2757    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2758    set_cc_static(s);
2759    return DISAS_NEXT;
2760}
2761
2762static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2763{
2764    /* The real output is indeed the original value in memory;
2765       recompute the addition for the computation of CC.  */
2766    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2767                                 s->insn->data | MO_ALIGN);
2768    /* However, we need to recompute the addition for setting CC.  */
2769    tcg_gen_add_i64(o->out, o->in1, o->in2);
2770    return DISAS_NEXT;
2771}
2772
2773static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2774{
2775    /* The real output is indeed the original value in memory;
2776       recompute the addition for the computation of CC.  */
2777    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2778                                 s->insn->data | MO_ALIGN);
2779    /* However, we need to recompute the operation for setting CC.  */
2780    tcg_gen_and_i64(o->out, o->in1, o->in2);
2781    return DISAS_NEXT;
2782}
2783
2784static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2785{
2786    /* The real output is indeed the original value in memory;
2787       recompute the addition for the computation of CC.  */
2788    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2789                                s->insn->data | MO_ALIGN);
2790    /* However, we need to recompute the operation for setting CC.  */
2791    tcg_gen_or_i64(o->out, o->in1, o->in2);
2792    return DISAS_NEXT;
2793}
2794
2795static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2796{
2797    /* The real output is indeed the original value in memory;
2798       recompute the addition for the computation of CC.  */
2799    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2800                                 s->insn->data | MO_ALIGN);
2801    /* However, we need to recompute the operation for setting CC.  */
2802    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2803    return DISAS_NEXT;
2804}
2805
2806static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2807{
2808    gen_helper_ldeb(o->out, cpu_env, o->in2);
2809    return DISAS_NEXT;
2810}
2811
2812static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2813{
2814    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2815
2816    if (!m34) {
2817        return DISAS_NORETURN;
2818    }
2819    gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2820    tcg_temp_free_i32(m34);
2821    return DISAS_NEXT;
2822}
2823
2824static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2825{
2826    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2827
2828    if (!m34) {
2829        return DISAS_NORETURN;
2830    }
2831    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2832    tcg_temp_free_i32(m34);
2833    return DISAS_NEXT;
2834}
2835
2836static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2837{
2838    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2839
2840    if (!m34) {
2841        return DISAS_NORETURN;
2842    }
2843    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2844    tcg_temp_free_i32(m34);
2845    return DISAS_NEXT;
2846}
2847
2848static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2849{
2850    gen_helper_lxdb(o->out, cpu_env, o->in2);
2851    return_low128(o->out2);
2852    return DISAS_NEXT;
2853}
2854
2855static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2856{
2857    gen_helper_lxeb(o->out, cpu_env, o->in2);
2858    return_low128(o->out2);
2859    return DISAS_NEXT;
2860}
2861
2862static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2863{
2864    tcg_gen_shli_i64(o->out, o->in2, 32);
2865    return DISAS_NEXT;
2866}
2867
2868static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2869{
2870    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2871    return DISAS_NEXT;
2872}
2873
2874static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2875{
2876    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2877    return DISAS_NEXT;
2878}
2879
2880static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2881{
2882    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2883    return DISAS_NEXT;
2884}
2885
2886static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2887{
2888    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2889    return DISAS_NEXT;
2890}
2891
2892static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2893{
2894    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2895    return DISAS_NEXT;
2896}
2897
2898static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2899{
2900    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2901    return DISAS_NEXT;
2902}
2903
2904static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2905{
2906    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2907    return DISAS_NEXT;
2908}
2909
2910static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2911{
2912    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2913    return DISAS_NEXT;
2914}
2915
2916static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2917{
2918    TCGLabel *lab = gen_new_label();
2919    store_reg32_i64(get_field(s, r1), o->in2);
2920    /* The value is stored even in case of trap. */
2921    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2922    gen_trap(s);
2923    gen_set_label(lab);
2924    return DISAS_NEXT;
2925}
2926
2927static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2928{
2929    TCGLabel *lab = gen_new_label();
2930    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2931    /* The value is stored even in case of trap. */
2932    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2933    gen_trap(s);
2934    gen_set_label(lab);
2935    return DISAS_NEXT;
2936}
2937
2938static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2939{
2940    TCGLabel *lab = gen_new_label();
2941    store_reg32h_i64(get_field(s, r1), o->in2);
2942    /* The value is stored even in case of trap. */
2943    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2944    gen_trap(s);
2945    gen_set_label(lab);
2946    return DISAS_NEXT;
2947}
2948
2949static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2950{
2951    TCGLabel *lab = gen_new_label();
2952    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2953    /* The value is stored even in case of trap. */
2954    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2955    gen_trap(s);
2956    gen_set_label(lab);
2957    return DISAS_NEXT;
2958}
2959
2960static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2961{
2962    TCGLabel *lab = gen_new_label();
2963    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2964    /* The value is stored even in case of trap. */
2965    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2966    gen_trap(s);
2967    gen_set_label(lab);
2968    return DISAS_NEXT;
2969}
2970
2971static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2972{
2973    DisasCompare c;
2974
2975    disas_jcc(s, &c, get_field(s, m3));
2976
2977    if (c.is_64) {
2978        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2979                            o->in2, o->in1);
2980        free_compare(&c);
2981    } else {
2982        TCGv_i32 t32 = tcg_temp_new_i32();
2983        TCGv_i64 t, z;
2984
2985        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2986        free_compare(&c);
2987
2988        t = tcg_temp_new_i64();
2989        tcg_gen_extu_i32_i64(t, t32);
2990        tcg_temp_free_i32(t32);
2991
2992        z = tcg_const_i64(0);
2993        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2994        tcg_temp_free_i64(t);
2995        tcg_temp_free_i64(z);
2996    }
2997
2998    return DISAS_NEXT;
2999}
3000
3001#ifndef CONFIG_USER_ONLY
3002static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3003{
3004    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3005    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3006    gen_helper_lctl(cpu_env, r1, o->in2, r3);
3007    tcg_temp_free_i32(r1);
3008    tcg_temp_free_i32(r3);
3009    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3010    return DISAS_PC_STALE_NOCHAIN;
3011}
3012
3013static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3014{
3015    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3016    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3017    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3018    tcg_temp_free_i32(r1);
3019    tcg_temp_free_i32(r3);
3020    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3021    return DISAS_PC_STALE_NOCHAIN;
3022}
3023
3024static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3025{
3026    gen_helper_lra(o->out, cpu_env, o->in2);
3027    set_cc_static(s);
3028    return DISAS_NEXT;
3029}
3030
3031static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3032{
3033    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3034    return DISAS_NEXT;
3035}
3036
3037static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3038{
3039    TCGv_i64 t1, t2;
3040
3041    per_breaking_event(s);
3042
3043    t1 = tcg_temp_new_i64();
3044    t2 = tcg_temp_new_i64();
3045    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3046                        MO_TEUL | MO_ALIGN_8);
3047    tcg_gen_addi_i64(o->in2, o->in2, 4);
3048    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3049    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3050    tcg_gen_shli_i64(t1, t1, 32);
3051    gen_helper_load_psw(cpu_env, t1, t2);
3052    tcg_temp_free_i64(t1);
3053    tcg_temp_free_i64(t2);
3054    return DISAS_NORETURN;
3055}
3056
3057static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3058{
3059    TCGv_i64 t1, t2;
3060
3061    per_breaking_event(s);
3062
3063    t1 = tcg_temp_new_i64();
3064    t2 = tcg_temp_new_i64();
3065    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3066                        MO_TEQ | MO_ALIGN_8);
3067    tcg_gen_addi_i64(o->in2, o->in2, 8);
3068    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3069    gen_helper_load_psw(cpu_env, t1, t2);
3070    tcg_temp_free_i64(t1);
3071    tcg_temp_free_i64(t2);
3072    return DISAS_NORETURN;
3073}
3074#endif
3075
3076static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3077{
3078    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3079    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3080    gen_helper_lam(cpu_env, r1, o->in2, r3);
3081    tcg_temp_free_i32(r1);
3082    tcg_temp_free_i32(r3);
3083    return DISAS_NEXT;
3084}
3085
3086static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3087{
3088    int r1 = get_field(s, r1);
3089    int r3 = get_field(s, r3);
3090    TCGv_i64 t1, t2;
3091
3092    /* Only one register to read. */
3093    t1 = tcg_temp_new_i64();
3094    if (unlikely(r1 == r3)) {
3095        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3096        store_reg32_i64(r1, t1);
3097        tcg_temp_free(t1);
3098        return DISAS_NEXT;
3099    }
3100
3101    /* First load the values of the first and last registers to trigger
3102       possible page faults. */
3103    t2 = tcg_temp_new_i64();
3104    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3105    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3106    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3107    store_reg32_i64(r1, t1);
3108    store_reg32_i64(r3, t2);
3109
3110    /* Only two registers to read. */
3111    if (((r1 + 1) & 15) == r3) {
3112        tcg_temp_free(t2);
3113        tcg_temp_free(t1);
3114        return DISAS_NEXT;
3115    }
3116
3117    /* Then load the remaining registers. Page fault can't occur. */
3118    r3 = (r3 - 1) & 15;
3119    tcg_gen_movi_i64(t2, 4);
3120    while (r1 != r3) {
3121        r1 = (r1 + 1) & 15;
3122        tcg_gen_add_i64(o->in2, o->in2, t2);
3123        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3124        store_reg32_i64(r1, t1);
3125    }
3126    tcg_temp_free(t2);
3127    tcg_temp_free(t1);
3128
3129    return DISAS_NEXT;
3130}
3131
3132static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3133{
3134    int r1 = get_field(s, r1);
3135    int r3 = get_field(s, r3);
3136    TCGv_i64 t1, t2;
3137
3138    /* Only one register to read. */
3139    t1 = tcg_temp_new_i64();
3140    if (unlikely(r1 == r3)) {
3141        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3142        store_reg32h_i64(r1, t1);
3143        tcg_temp_free(t1);
3144        return DISAS_NEXT;
3145    }
3146
3147    /* First load the values of the first and last registers to trigger
3148       possible page faults. */
3149    t2 = tcg_temp_new_i64();
3150    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3151    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3152    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3153    store_reg32h_i64(r1, t1);
3154    store_reg32h_i64(r3, t2);
3155
3156    /* Only two registers to read. */
3157    if (((r1 + 1) & 15) == r3) {
3158        tcg_temp_free(t2);
3159        tcg_temp_free(t1);
3160        return DISAS_NEXT;
3161    }
3162
3163    /* Then load the remaining registers. Page fault can't occur. */
3164    r3 = (r3 - 1) & 15;
3165    tcg_gen_movi_i64(t2, 4);
3166    while (r1 != r3) {
3167        r1 = (r1 + 1) & 15;
3168        tcg_gen_add_i64(o->in2, o->in2, t2);
3169        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3170        store_reg32h_i64(r1, t1);
3171    }
3172    tcg_temp_free(t2);
3173    tcg_temp_free(t1);
3174
3175    return DISAS_NEXT;
3176}
3177
3178static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3179{
3180    int r1 = get_field(s, r1);
3181    int r3 = get_field(s, r3);
3182    TCGv_i64 t1, t2;
3183
3184    /* Only one register to read. */
3185    if (unlikely(r1 == r3)) {
3186        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3187        return DISAS_NEXT;
3188    }
3189
3190    /* First load the values of the first and last registers to trigger
3191       possible page faults. */
3192    t1 = tcg_temp_new_i64();
3193    t2 = tcg_temp_new_i64();
3194    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3195    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3196    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3197    tcg_gen_mov_i64(regs[r1], t1);
3198    tcg_temp_free(t2);
3199
3200    /* Only two registers to read. */
3201    if (((r1 + 1) & 15) == r3) {
3202        tcg_temp_free(t1);
3203        return DISAS_NEXT;
3204    }
3205
3206    /* Then load the remaining registers. Page fault can't occur. */
3207    r3 = (r3 - 1) & 15;
3208    tcg_gen_movi_i64(t1, 8);
3209    while (r1 != r3) {
3210        r1 = (r1 + 1) & 15;
3211        tcg_gen_add_i64(o->in2, o->in2, t1);
3212        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3213    }
3214    tcg_temp_free(t1);
3215
3216    return DISAS_NEXT;
3217}
3218
3219static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3220{
3221    TCGv_i64 a1, a2;
3222    MemOp mop = s->insn->data;
3223
3224    /* In a parallel context, stop the world and single step.  */
3225    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3226        update_psw_addr(s);
3227        update_cc_op(s);
3228        gen_exception(EXCP_ATOMIC);
3229        return DISAS_NORETURN;
3230    }
3231
3232    /* In a serial context, perform the two loads ... */
3233    a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3234    a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3235    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3236    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3237    tcg_temp_free_i64(a1);
3238    tcg_temp_free_i64(a2);
3239
3240    /* ... and indicate that we performed them while interlocked.  */
3241    gen_op_movi_cc(s, 0);
3242    return DISAS_NEXT;
3243}
3244
3245static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3246{
3247    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3248        gen_helper_lpq(o->out, cpu_env, o->in2);
3249    } else if (HAVE_ATOMIC128) {
3250        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3251    } else {
3252        gen_helper_exit_atomic(cpu_env);
3253        return DISAS_NORETURN;
3254    }
3255    return_low128(o->out2);
3256    return DISAS_NEXT;
3257}
3258
3259#ifndef CONFIG_USER_ONLY
3260static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3261{
3262    tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3263    return DISAS_NEXT;
3264}
3265#endif
3266
3267static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3268{
3269    tcg_gen_andi_i64(o->out, o->in2, -256);
3270    return DISAS_NEXT;
3271}
3272
3273static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3274{
3275    const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3276
3277    if (get_field(s, m3) > 6) {
3278        gen_program_exception(s, PGM_SPECIFICATION);
3279        return DISAS_NORETURN;
3280    }
3281
3282    tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3283    tcg_gen_neg_i64(o->addr1, o->addr1);
3284    tcg_gen_movi_i64(o->out, 16);
3285    tcg_gen_umin_i64(o->out, o->out, o->addr1);
3286    gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3287    return DISAS_NEXT;
3288}
3289
3290static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3291{
3292#if !defined(CONFIG_USER_ONLY)
3293    TCGv_i32 i2;
3294#endif
3295    const uint16_t monitor_class = get_field(s, i2);
3296
3297    if (monitor_class & 0xff00) {
3298        gen_program_exception(s, PGM_SPECIFICATION);
3299        return DISAS_NORETURN;
3300    }
3301
3302#if !defined(CONFIG_USER_ONLY)
3303    i2 = tcg_const_i32(monitor_class);
3304    gen_helper_monitor_call(cpu_env, o->addr1, i2);
3305    tcg_temp_free_i32(i2);
3306#endif
3307    /* Defaults to a NOP. */
3308    return DISAS_NEXT;
3309}
3310
3311static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3312{
3313    o->out = o->in2;
3314    o->g_out = o->g_in2;
3315    o->in2 = NULL;
3316    o->g_in2 = false;
3317    return DISAS_NEXT;
3318}
3319
3320static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3321{
3322    int b2 = get_field(s, b2);
3323    TCGv ar1 = tcg_temp_new_i64();
3324
3325    o->out = o->in2;
3326    o->g_out = o->g_in2;
3327    o->in2 = NULL;
3328    o->g_in2 = false;
3329
3330    switch (s->base.tb->flags & FLAG_MASK_ASC) {
3331    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3332        tcg_gen_movi_i64(ar1, 0);
3333        break;
3334    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3335        tcg_gen_movi_i64(ar1, 1);
3336        break;
3337    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3338        if (b2) {
3339            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3340        } else {
3341            tcg_gen_movi_i64(ar1, 0);
3342        }
3343        break;
3344    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3345        tcg_gen_movi_i64(ar1, 2);
3346        break;
3347    }
3348
3349    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3350    tcg_temp_free_i64(ar1);
3351
3352    return DISAS_NEXT;
3353}
3354
3355static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3356{
3357    o->out = o->in1;
3358    o->out2 = o->in2;
3359    o->g_out = o->g_in1;
3360    o->g_out2 = o->g_in2;
3361    o->in1 = NULL;
3362    o->in2 = NULL;
3363    o->g_in1 = o->g_in2 = false;
3364    return DISAS_NEXT;
3365}
3366
3367static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3368{
3369    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3370    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3371    tcg_temp_free_i32(l);
3372    return DISAS_NEXT;
3373}
3374
3375static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3376{
3377    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3378    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3379    tcg_temp_free_i32(l);
3380    return DISAS_NEXT;
3381}
3382
3383static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3384{
3385    int r1 = get_field(s, r1);
3386    int r2 = get_field(s, r2);
3387    TCGv_i32 t1, t2;
3388
3389    /* r1 and r2 must be even.  */
3390    if (r1 & 1 || r2 & 1) {
3391        gen_program_exception(s, PGM_SPECIFICATION);
3392        return DISAS_NORETURN;
3393    }
3394
3395    t1 = tcg_const_i32(r1);
3396    t2 = tcg_const_i32(r2);
3397    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3398    tcg_temp_free_i32(t1);
3399    tcg_temp_free_i32(t2);
3400    set_cc_static(s);
3401    return DISAS_NEXT;
3402}
3403
3404static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3405{
3406    int r1 = get_field(s, r1);
3407    int r3 = get_field(s, r3);
3408    TCGv_i32 t1, t3;
3409
3410    /* r1 and r3 must be even.  */
3411    if (r1 & 1 || r3 & 1) {
3412        gen_program_exception(s, PGM_SPECIFICATION);
3413        return DISAS_NORETURN;
3414    }
3415
3416    t1 = tcg_const_i32(r1);
3417    t3 = tcg_const_i32(r3);
3418    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3419    tcg_temp_free_i32(t1);
3420    tcg_temp_free_i32(t3);
3421    set_cc_static(s);
3422    return DISAS_NEXT;
3423}
3424
3425static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3426{
3427    int r1 = get_field(s, r1);
3428    int r3 = get_field(s, r3);
3429    TCGv_i32 t1, t3;
3430
3431    /* r1 and r3 must be even.  */
3432    if (r1 & 1 || r3 & 1) {
3433        gen_program_exception(s, PGM_SPECIFICATION);
3434        return DISAS_NORETURN;
3435    }
3436
3437    t1 = tcg_const_i32(r1);
3438    t3 = tcg_const_i32(r3);
3439    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3440    tcg_temp_free_i32(t1);
3441    tcg_temp_free_i32(t3);
3442    set_cc_static(s);
3443    return DISAS_NEXT;
3444}
3445
3446static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3447{
3448    int r3 = get_field(s, r3);
3449    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3450    set_cc_static(s);
3451    return DISAS_NEXT;
3452}
3453
3454#ifndef CONFIG_USER_ONLY
3455static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3456{
3457    int r1 = get_field(s, l1);
3458    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3459    set_cc_static(s);
3460    return DISAS_NEXT;
3461}
3462
3463static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3464{
3465    int r1 = get_field(s, l1);
3466    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3467    set_cc_static(s);
3468    return DISAS_NEXT;
3469}
3470#endif
3471
3472static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3473{
3474    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3475    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3476    tcg_temp_free_i32(l);
3477    return DISAS_NEXT;
3478}
3479
3480static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3481{
3482    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3483    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3484    tcg_temp_free_i32(l);
3485    return DISAS_NEXT;
3486}
3487
3488static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3489{
3490    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3491    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3492
3493    gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3494    tcg_temp_free_i32(t1);
3495    tcg_temp_free_i32(t2);
3496    set_cc_static(s);
3497    return DISAS_NEXT;
3498}
3499
3500static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3501{
3502    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3503    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3504
3505    gen_helper_mvst(cc_op, cpu_env, t1, t2);
3506    tcg_temp_free_i32(t1);
3507    tcg_temp_free_i32(t2);
3508    set_cc_static(s);
3509    return DISAS_NEXT;
3510}
3511
3512static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3513{
3514    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3515    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3516    tcg_temp_free_i32(l);
3517    return DISAS_NEXT;
3518}
3519
3520static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3521{
3522    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3523    return DISAS_NEXT;
3524}
3525
3526static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3527{
3528    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3529    return DISAS_NEXT;
3530}
3531
3532static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3533{
3534    tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3535    return DISAS_NEXT;
3536}
3537
3538static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3539{
3540    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3541    return DISAS_NEXT;
3542}
3543
3544static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3545{
3546    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3547    return DISAS_NEXT;
3548}
3549
3550static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3551{
3552    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3553    return DISAS_NEXT;
3554}
3555
3556static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3557{
3558    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3559    return_low128(o->out2);
3560    return DISAS_NEXT;
3561}
3562
3563static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3564{
3565    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3566    return_low128(o->out2);
3567    return DISAS_NEXT;
3568}
3569
3570static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3571{
3572    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3573    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3574    tcg_temp_free_i64(r3);
3575    return DISAS_NEXT;
3576}
3577
3578static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3579{
3580    TCGv_i64 r3 = load_freg(get_field(s, r3));
3581    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3582    tcg_temp_free_i64(r3);
3583    return DISAS_NEXT;
3584}
3585
3586static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3587{
3588    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3589    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3590    tcg_temp_free_i64(r3);
3591    return DISAS_NEXT;
3592}
3593
3594static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3595{
3596    TCGv_i64 r3 = load_freg(get_field(s, r3));
3597    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3598    tcg_temp_free_i64(r3);
3599    return DISAS_NEXT;
3600}
3601
3602static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3603{
3604    TCGv_i64 z, n;
3605    z = tcg_const_i64(0);
3606    n = tcg_temp_new_i64();
3607    tcg_gen_neg_i64(n, o->in2);
3608    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3609    tcg_temp_free_i64(n);
3610    tcg_temp_free_i64(z);
3611    return DISAS_NEXT;
3612}
3613
3614static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3615{
3616    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3617    return DISAS_NEXT;
3618}
3619
3620static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3621{
3622    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3623    return DISAS_NEXT;
3624}
3625
3626static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3627{
3628    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3629    tcg_gen_mov_i64(o->out2, o->in2);
3630    return DISAS_NEXT;
3631}
3632
3633static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3634{
3635    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3636    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3637    tcg_temp_free_i32(l);
3638    set_cc_static(s);
3639    return DISAS_NEXT;
3640}
3641
3642static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3643{
3644    tcg_gen_neg_i64(o->out, o->in2);
3645    return DISAS_NEXT;
3646}
3647
3648static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3649{
3650    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3651    return DISAS_NEXT;
3652}
3653
3654static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3655{
3656    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3657    return DISAS_NEXT;
3658}
3659
3660static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3661{
3662    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3663    tcg_gen_mov_i64(o->out2, o->in2);
3664    return DISAS_NEXT;
3665}
3666
3667static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3668{
3669    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3670    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3671    tcg_temp_free_i32(l);
3672    set_cc_static(s);
3673    return DISAS_NEXT;
3674}
3675
3676static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3677{
3678    tcg_gen_or_i64(o->out, o->in1, o->in2);
3679    return DISAS_NEXT;
3680}
3681
3682static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3683{
3684    int shift = s->insn->data & 0xff;
3685    int size = s->insn->data >> 8;
3686    uint64_t mask = ((1ull << size) - 1) << shift;
3687
3688    assert(!o->g_in2);
3689    tcg_gen_shli_i64(o->in2, o->in2, shift);
3690    tcg_gen_or_i64(o->out, o->in1, o->in2);
3691
3692    /* Produce the CC from only the bits manipulated.  */
3693    tcg_gen_andi_i64(cc_dst, o->out, mask);
3694    set_cc_nz_u64(s, cc_dst);
3695    return DISAS_NEXT;
3696}
3697
3698static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3699{
3700    o->in1 = tcg_temp_new_i64();
3701
3702    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3703        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3704    } else {
3705        /* Perform the atomic operation in memory. */
3706        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3707                                    s->insn->data);
3708    }
3709
3710    /* Recompute also for atomic case: needed for setting CC. */
3711    tcg_gen_or_i64(o->out, o->in1, o->in2);
3712
3713    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3714        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3715    }
3716    return DISAS_NEXT;
3717}
3718
3719static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3720{
3721    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3722    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3723    tcg_temp_free_i32(l);
3724    return DISAS_NEXT;
3725}
3726
3727static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3728{
3729    int l2 = get_field(s, l2) + 1;
3730    TCGv_i32 l;
3731
3732    /* The length must not exceed 32 bytes.  */
3733    if (l2 > 32) {
3734        gen_program_exception(s, PGM_SPECIFICATION);
3735        return DISAS_NORETURN;
3736    }
3737    l = tcg_const_i32(l2);
3738    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3739    tcg_temp_free_i32(l);
3740    return DISAS_NEXT;
3741}
3742
3743static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3744{
3745    int l2 = get_field(s, l2) + 1;
3746    TCGv_i32 l;
3747
3748    /* The length must be even and should not exceed 64 bytes.  */
3749    if ((l2 & 1) || (l2 > 64)) {
3750        gen_program_exception(s, PGM_SPECIFICATION);
3751        return DISAS_NORETURN;
3752    }
3753    l = tcg_const_i32(l2);
3754    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3755    tcg_temp_free_i32(l);
3756    return DISAS_NEXT;
3757}
3758
3759static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3760{
3761    gen_helper_popcnt(o->out, o->in2);
3762    return DISAS_NEXT;
3763}
3764
3765#ifndef CONFIG_USER_ONLY
3766static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3767{
3768    gen_helper_ptlb(cpu_env);
3769    return DISAS_NEXT;
3770}
3771#endif
3772
3773static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3774{
3775    int i3 = get_field(s, i3);
3776    int i4 = get_field(s, i4);
3777    int i5 = get_field(s, i5);
3778    int do_zero = i4 & 0x80;
3779    uint64_t mask, imask, pmask;
3780    int pos, len, rot;
3781
3782    /* Adjust the arguments for the specific insn.  */
3783    switch (s->fields.op2) {
3784    case 0x55: /* risbg */
3785    case 0x59: /* risbgn */
3786        i3 &= 63;
3787        i4 &= 63;
3788        pmask = ~0;
3789        break;
3790    case 0x5d: /* risbhg */
3791        i3 &= 31;
3792        i4 &= 31;
3793        pmask = 0xffffffff00000000ull;
3794        break;
3795    case 0x51: /* risblg */
3796        i3 = (i3 & 31) + 32;
3797        i4 = (i4 & 31) + 32;
3798        pmask = 0x00000000ffffffffull;
3799        break;
3800    default:
3801        g_assert_not_reached();
3802    }
3803
3804    /* MASK is the set of bits to be inserted from R2. */
3805    if (i3 <= i4) {
3806        /* [0...i3---i4...63] */
3807        mask = (-1ull >> i3) & (-1ull << (63 - i4));
3808    } else {
3809        /* [0---i4...i3---63] */
3810        mask = (-1ull >> i3) | (-1ull << (63 - i4));
3811    }
3812    /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3813    mask &= pmask;
3814
3815    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3816       insns, we need to keep the other half of the register.  */
3817    imask = ~mask | ~pmask;
3818    if (do_zero) {
3819        imask = ~pmask;
3820    }
3821
3822    len = i4 - i3 + 1;
3823    pos = 63 - i4;
3824    rot = i5 & 63;
3825
3826    /* In some cases we can implement this with extract.  */
3827    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3828        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3829        return DISAS_NEXT;
3830    }
3831
3832    /* In some cases we can implement this with deposit.  */
3833    if (len > 0 && (imask == 0 || ~mask == imask)) {
3834        /* Note that we rotate the bits to be inserted to the lsb, not to
3835           the position as described in the PoO.  */
3836        rot = (rot - pos) & 63;
3837    } else {
3838        pos = -1;
3839    }
3840
3841    /* Rotate the input as necessary.  */
3842    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3843
3844    /* Insert the selected bits into the output.  */
3845    if (pos >= 0) {
3846        if (imask == 0) {
3847            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3848        } else {
3849            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3850        }
3851    } else if (imask == 0) {
3852        tcg_gen_andi_i64(o->out, o->in2, mask);
3853    } else {
3854        tcg_gen_andi_i64(o->in2, o->in2, mask);
3855        tcg_gen_andi_i64(o->out, o->out, imask);
3856        tcg_gen_or_i64(o->out, o->out, o->in2);
3857    }
3858    return DISAS_NEXT;
3859}
3860
3861static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3862{
3863    int i3 = get_field(s, i3);
3864    int i4 = get_field(s, i4);
3865    int i5 = get_field(s, i5);
3866    uint64_t mask;
3867
3868    /* If this is a test-only form, arrange to discard the result.  */
3869    if (i3 & 0x80) {
3870        o->out = tcg_temp_new_i64();
3871        o->g_out = false;
3872    }
3873
3874    i3 &= 63;
3875    i4 &= 63;
3876    i5 &= 63;
3877
3878    /* MASK is the set of bits to be operated on from R2.
3879       Take care for I3/I4 wraparound.  */
3880    mask = ~0ull >> i3;
3881    if (i3 <= i4) {
3882        mask ^= ~0ull >> i4 >> 1;
3883    } else {
3884        mask |= ~(~0ull >> i4 >> 1);
3885    }
3886
3887    /* Rotate the input as necessary.  */
3888    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3889
3890    /* Operate.  */
3891    switch (s->fields.op2) {
3892    case 0x54: /* AND */
3893        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3894        tcg_gen_and_i64(o->out, o->out, o->in2);
3895        break;
3896    case 0x56: /* OR */
3897        tcg_gen_andi_i64(o->in2, o->in2, mask);
3898        tcg_gen_or_i64(o->out, o->out, o->in2);
3899        break;
3900    case 0x57: /* XOR */
3901        tcg_gen_andi_i64(o->in2, o->in2, mask);
3902        tcg_gen_xor_i64(o->out, o->out, o->in2);
3903        break;
3904    default:
3905        abort();
3906    }
3907
3908    /* Set the CC.  */
3909    tcg_gen_andi_i64(cc_dst, o->out, mask);
3910    set_cc_nz_u64(s, cc_dst);
3911    return DISAS_NEXT;
3912}
3913
3914static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3915{
3916    tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3917    return DISAS_NEXT;
3918}
3919
3920static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3921{
3922    tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3923    return DISAS_NEXT;
3924}
3925
3926static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3927{
3928    tcg_gen_bswap64_i64(o->out, o->in2);
3929    return DISAS_NEXT;
3930}
3931
3932static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3933{
3934    TCGv_i32 t1 = tcg_temp_new_i32();
3935    TCGv_i32 t2 = tcg_temp_new_i32();
3936    TCGv_i32 to = tcg_temp_new_i32();
3937    tcg_gen_extrl_i64_i32(t1, o->in1);
3938    tcg_gen_extrl_i64_i32(t2, o->in2);
3939    tcg_gen_rotl_i32(to, t1, t2);
3940    tcg_gen_extu_i32_i64(o->out, to);
3941    tcg_temp_free_i32(t1);
3942    tcg_temp_free_i32(t2);
3943    tcg_temp_free_i32(to);
3944    return DISAS_NEXT;
3945}
3946
3947static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3948{
3949    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3950    return DISAS_NEXT;
3951}
3952
3953#ifndef CONFIG_USER_ONLY
3954static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3955{
3956    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3957    set_cc_static(s);
3958    return DISAS_NEXT;
3959}
3960
3961static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3962{
3963    gen_helper_sacf(cpu_env, o->in2);
3964    /* Addressing mode has changed, so end the block.  */
3965    return DISAS_PC_STALE;
3966}
3967#endif
3968
3969static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3970{
3971    int sam = s->insn->data;
3972    TCGv_i64 tsam;
3973    uint64_t mask;
3974
3975    switch (sam) {
3976    case 0:
3977        mask = 0xffffff;
3978        break;
3979    case 1:
3980        mask = 0x7fffffff;
3981        break;
3982    default:
3983        mask = -1;
3984        break;
3985    }
3986
3987    /* Bizarre but true, we check the address of the current insn for the
3988       specification exception, not the next to be executed.  Thus the PoO
3989       documents that Bad Things Happen two bytes before the end.  */
3990    if (s->base.pc_next & ~mask) {
3991        gen_program_exception(s, PGM_SPECIFICATION);
3992        return DISAS_NORETURN;
3993    }
3994    s->pc_tmp &= mask;
3995
3996    tsam = tcg_const_i64(sam);
3997    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3998    tcg_temp_free_i64(tsam);
3999
4000    /* Always exit the TB, since we (may have) changed execution mode.  */
4001    return DISAS_PC_STALE;
4002}
4003
4004static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4005{
4006    int r1 = get_field(s, r1);
4007    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4008    return DISAS_NEXT;
4009}
4010
4011static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4012{
4013    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4014    return DISAS_NEXT;
4015}
4016
4017static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4018{
4019    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4020    return DISAS_NEXT;
4021}
4022
4023static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4024{
4025    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4026    return_low128(o->out2);
4027    return DISAS_NEXT;
4028}
4029
4030static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4031{
4032    gen_helper_sqeb(o->out, cpu_env, o->in2);
4033    return DISAS_NEXT;
4034}
4035
4036static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4037{
4038    gen_helper_sqdb(o->out, cpu_env, o->in2);
4039    return DISAS_NEXT;
4040}
4041
4042static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4043{
4044    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4045    return_low128(o->out2);
4046    return DISAS_NEXT;
4047}
4048
4049#ifndef CONFIG_USER_ONLY
4050static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4051{
4052    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4053    set_cc_static(s);
4054    return DISAS_NEXT;
4055}
4056
4057static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4058{
4059    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4060    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4061    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4062    set_cc_static(s);
4063    tcg_temp_free_i32(r1);
4064    tcg_temp_free_i32(r3);
4065    return DISAS_NEXT;
4066}
4067#endif
4068
4069static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4070{
4071    DisasCompare c;
4072    TCGv_i64 a, h;
4073    TCGLabel *lab;
4074    int r1;
4075
4076    disas_jcc(s, &c, get_field(s, m3));
4077
4078    /* We want to store when the condition is fulfilled, so branch
4079       out when it's not */
4080    c.cond = tcg_invert_cond(c.cond);
4081
4082    lab = gen_new_label();
4083    if (c.is_64) {
4084        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4085    } else {
4086        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4087    }
4088    free_compare(&c);
4089
4090    r1 = get_field(s, r1);
4091    a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4092    switch (s->insn->data) {
4093    case 1: /* STOCG */
4094        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4095        break;
4096    case 0: /* STOC */
4097        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4098        break;
4099    case 2: /* STOCFH */
4100        h = tcg_temp_new_i64();
4101        tcg_gen_shri_i64(h, regs[r1], 32);
4102        tcg_gen_qemu_st32(h, a, get_mem_index(s));
4103        tcg_temp_free_i64(h);
4104        break;
4105    default:
4106        g_assert_not_reached();
4107    }
4108    tcg_temp_free_i64(a);
4109
4110    gen_set_label(lab);
4111    return DISAS_NEXT;
4112}
4113
4114static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4115{
4116    uint64_t sign = 1ull << s->insn->data;
4117    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4118    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4119    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4120    /* The arithmetic left shift is curious in that it does not affect
4121       the sign bit.  Copy that over from the source unchanged.  */
4122    tcg_gen_andi_i64(o->out, o->out, ~sign);
4123    tcg_gen_andi_i64(o->in1, o->in1, sign);
4124    tcg_gen_or_i64(o->out, o->out, o->in1);
4125    return DISAS_NEXT;
4126}
4127
4128static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4129{
4130    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4131    return DISAS_NEXT;
4132}
4133
4134static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4135{
4136    tcg_gen_sar_i64(o->out, o->in1, o->in2);
4137    return DISAS_NEXT;
4138}
4139
4140static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4141{
4142    tcg_gen_shr_i64(o->out, o->in1, o->in2);
4143    return DISAS_NEXT;
4144}
4145
4146static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4147{
4148    gen_helper_sfpc(cpu_env, o->in2);
4149    return DISAS_NEXT;
4150}
4151
4152static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4153{
4154    gen_helper_sfas(cpu_env, o->in2);
4155    return DISAS_NEXT;
4156}
4157
4158static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4159{
4160    /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4161    tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4162    gen_helper_srnm(cpu_env, o->addr1);
4163    return DISAS_NEXT;
4164}
4165
4166static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4167{
4168    /* Bits 0-55 are are ignored. */
4169    tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4170    gen_helper_srnm(cpu_env, o->addr1);
4171    return DISAS_NEXT;
4172}
4173
4174static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4175{
4176    TCGv_i64 tmp = tcg_temp_new_i64();
4177
4178    /* Bits other than 61-63 are ignored. */
4179    tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4180
4181    /* No need to call a helper, we don't implement dfp */
4182    tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4183    tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4184    tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4185
4186    tcg_temp_free_i64(tmp);
4187    return DISAS_NEXT;
4188}
4189
4190static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4191{
4192    tcg_gen_extrl_i64_i32(cc_op, o->in1);
4193    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4194    set_cc_static(s);
4195
4196    tcg_gen_shri_i64(o->in1, o->in1, 24);
4197    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4198    return DISAS_NEXT;
4199}
4200
4201static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4202{
4203    int b1 = get_field(s, b1);
4204    int d1 = get_field(s, d1);
4205    int b2 = get_field(s, b2);
4206    int d2 = get_field(s, d2);
4207    int r3 = get_field(s, r3);
4208    TCGv_i64 tmp = tcg_temp_new_i64();
4209
4210    /* fetch all operands first */
4211    o->in1 = tcg_temp_new_i64();
4212    tcg_gen_addi_i64(o->in1, regs[b1], d1);
4213    o->in2 = tcg_temp_new_i64();
4214    tcg_gen_addi_i64(o->in2, regs[b2], d2);
4215    o->addr1 = tcg_temp_new_i64();
4216    gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4217
4218    /* load the third operand into r3 before modifying anything */
4219    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4220
4221    /* subtract CPU timer from first operand and store in GR0 */
4222    gen_helper_stpt(tmp, cpu_env);
4223    tcg_gen_sub_i64(regs[0], o->in1, tmp);
4224
4225    /* store second operand in GR1 */
4226    tcg_gen_mov_i64(regs[1], o->in2);
4227
4228    tcg_temp_free_i64(tmp);
4229    return DISAS_NEXT;
4230}
4231
4232#ifndef CONFIG_USER_ONLY
4233static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4234{
4235    tcg_gen_shri_i64(o->in2, o->in2, 4);
4236    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4237    return DISAS_NEXT;
4238}
4239
4240static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4241{
4242    gen_helper_sske(cpu_env, o->in1, o->in2);
4243    return DISAS_NEXT;
4244}
4245
4246static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4247{
4248    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4249    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4250    return DISAS_PC_STALE_NOCHAIN;
4251}
4252
4253static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4254{
4255    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4256    return DISAS_NEXT;
4257}
4258#endif
4259
4260static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4261{
4262    gen_helper_stck(o->out, cpu_env);
4263    /* ??? We don't implement clock states.  */
4264    gen_op_movi_cc(s, 0);
4265    return DISAS_NEXT;
4266}
4267
4268static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4269{
4270    TCGv_i64 c1 = tcg_temp_new_i64();
4271    TCGv_i64 c2 = tcg_temp_new_i64();
4272    TCGv_i64 todpr = tcg_temp_new_i64();
4273    gen_helper_stck(c1, cpu_env);
4274    /* 16 bit value store in an uint32_t (only valid bits set) */
4275    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4276    /* Shift the 64-bit value into its place as a zero-extended
4277       104-bit value.  Note that "bit positions 64-103 are always
4278       non-zero so that they compare differently to STCK"; we set
4279       the least significant bit to 1.  */
4280    tcg_gen_shli_i64(c2, c1, 56);
4281    tcg_gen_shri_i64(c1, c1, 8);
4282    tcg_gen_ori_i64(c2, c2, 0x10000);
4283    tcg_gen_or_i64(c2, c2, todpr);
4284    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4285    tcg_gen_addi_i64(o->in2, o->in2, 8);
4286    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4287    tcg_temp_free_i64(c1);
4288    tcg_temp_free_i64(c2);
4289    tcg_temp_free_i64(todpr);
4290    /* ??? We don't implement clock states.  */
4291    gen_op_movi_cc(s, 0);
4292    return DISAS_NEXT;
4293}
4294
4295#ifndef CONFIG_USER_ONLY
4296static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4297{
4298    tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4299    gen_helper_sck(cc_op, cpu_env, o->in1);
4300    set_cc_static(s);
4301    return DISAS_NEXT;
4302}
4303
4304static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4305{
4306    gen_helper_sckc(cpu_env, o->in2);
4307    return DISAS_NEXT;
4308}
4309
4310static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4311{
4312    gen_helper_sckpf(cpu_env, regs[0]);
4313    return DISAS_NEXT;
4314}
4315
4316static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4317{
4318    gen_helper_stckc(o->out, cpu_env);
4319    return DISAS_NEXT;
4320}
4321
4322static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4323{
4324    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4325    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4326    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4327    tcg_temp_free_i32(r1);
4328    tcg_temp_free_i32(r3);
4329    return DISAS_NEXT;
4330}
4331
4332static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4333{
4334    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4335    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4336    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4337    tcg_temp_free_i32(r1);
4338    tcg_temp_free_i32(r3);
4339    return DISAS_NEXT;
4340}
4341
4342static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4343{
4344    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4345    return DISAS_NEXT;
4346}
4347
4348static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4349{
4350    gen_helper_spt(cpu_env, o->in2);
4351    return DISAS_NEXT;
4352}
4353
4354static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4355{
4356    gen_helper_stfl(cpu_env);
4357    return DISAS_NEXT;
4358}
4359
4360static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4361{
4362    gen_helper_stpt(o->out, cpu_env);
4363    return DISAS_NEXT;
4364}
4365
4366static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4367{
4368    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4369    set_cc_static(s);
4370    return DISAS_NEXT;
4371}
4372
4373static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4374{
4375    gen_helper_spx(cpu_env, o->in2);
4376    return DISAS_NEXT;
4377}
4378
4379static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4380{
4381    gen_helper_xsch(cpu_env, regs[1]);
4382    set_cc_static(s);
4383    return DISAS_NEXT;
4384}
4385
4386static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4387{
4388    gen_helper_csch(cpu_env, regs[1]);
4389    set_cc_static(s);
4390    return DISAS_NEXT;
4391}
4392
4393static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4394{
4395    gen_helper_hsch(cpu_env, regs[1]);
4396    set_cc_static(s);
4397    return DISAS_NEXT;
4398}
4399
4400static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4401{
4402    gen_helper_msch(cpu_env, regs[1], o->in2);
4403    set_cc_static(s);
4404    return DISAS_NEXT;
4405}
4406
4407static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4408{
4409    gen_helper_rchp(cpu_env, regs[1]);
4410    set_cc_static(s);
4411    return DISAS_NEXT;
4412}
4413
4414static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4415{
4416    gen_helper_rsch(cpu_env, regs[1]);
4417    set_cc_static(s);
4418    return DISAS_NEXT;
4419}
4420
4421static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4422{
4423    gen_helper_sal(cpu_env, regs[1]);
4424    return DISAS_NEXT;
4425}
4426
4427static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4428{
4429    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4430    return DISAS_NEXT;
4431}
4432
4433static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4434{
4435    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4436    gen_op_movi_cc(s, 3);
4437    return DISAS_NEXT;
4438}
4439
4440static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4441{
4442    /* The instruction is suppressed if not provided. */
4443    return DISAS_NEXT;
4444}
4445
4446static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4447{
4448    gen_helper_ssch(cpu_env, regs[1], o->in2);
4449    set_cc_static(s);
4450    return DISAS_NEXT;
4451}
4452
4453static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4454{
4455    gen_helper_stsch(cpu_env, regs[1], o->in2);
4456    set_cc_static(s);
4457    return DISAS_NEXT;
4458}
4459
4460static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4461{
4462    gen_helper_stcrw(cpu_env, o->in2);
4463    set_cc_static(s);
4464    return DISAS_NEXT;
4465}
4466
4467static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4468{
4469    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4470    set_cc_static(s);
4471    return DISAS_NEXT;
4472}
4473
4474static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4475{
4476    gen_helper_tsch(cpu_env, regs[1], o->in2);
4477    set_cc_static(s);
4478    return DISAS_NEXT;
4479}
4480
4481static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4482{
4483    gen_helper_chsc(cpu_env, o->in2);
4484    set_cc_static(s);
4485    return DISAS_NEXT;
4486}
4487
4488static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4489{
4490    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4491    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4492    return DISAS_NEXT;
4493}
4494
4495static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4496{
4497    uint64_t i2 = get_field(s, i2);
4498    TCGv_i64 t;
4499
4500    /* It is important to do what the instruction name says: STORE THEN.
4501       If we let the output hook perform the store then if we fault and
4502       restart, we'll have the wrong SYSTEM MASK in place.  */
4503    t = tcg_temp_new_i64();
4504    tcg_gen_shri_i64(t, psw_mask, 56);
4505    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4506    tcg_temp_free_i64(t);
4507
4508    if (s->fields.op == 0xac) {
4509        tcg_gen_andi_i64(psw_mask, psw_mask,
4510                         (i2 << 56) | 0x00ffffffffffffffull);
4511    } else {
4512        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4513    }
4514
4515    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4516    return DISAS_PC_STALE_NOCHAIN;
4517}
4518
4519static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4520{
4521    tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4522
4523    if (s->base.tb->flags & FLAG_MASK_PER) {
4524        update_psw_addr(s);
4525        gen_helper_per_store_real(cpu_env);
4526    }
4527    return DISAS_NEXT;
4528}
4529#endif
4530
4531static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4532{
4533    gen_helper_stfle(cc_op, cpu_env, o->in2);
4534    set_cc_static(s);
4535    return DISAS_NEXT;
4536}
4537
4538static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4539{
4540    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4541    return DISAS_NEXT;
4542}
4543
4544static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4545{
4546    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4547    return DISAS_NEXT;
4548}
4549
4550static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4551{
4552    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4553    return DISAS_NEXT;
4554}
4555
4556static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4557{
4558    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4559    return DISAS_NEXT;
4560}
4561
4562static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4563{
4564    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4565    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4566    gen_helper_stam(cpu_env, r1, o->in2, r3);
4567    tcg_temp_free_i32(r1);
4568    tcg_temp_free_i32(r3);
4569    return DISAS_NEXT;
4570}
4571
4572static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4573{
4574    int m3 = get_field(s, m3);
4575    int pos, base = s->insn->data;
4576    TCGv_i64 tmp = tcg_temp_new_i64();
4577
4578    pos = base + ctz32(m3) * 8;
4579    switch (m3) {
4580    case 0xf:
4581        /* Effectively a 32-bit store.  */
4582        tcg_gen_shri_i64(tmp, o->in1, pos);
4583        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4584        break;
4585
4586    case 0xc:
4587    case 0x6:
4588    case 0x3:
4589        /* Effectively a 16-bit store.  */
4590        tcg_gen_shri_i64(tmp, o->in1, pos);
4591        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4592        break;
4593
4594    case 0x8:
4595    case 0x4:
4596    case 0x2:
4597    case 0x1:
4598        /* Effectively an 8-bit store.  */
4599        tcg_gen_shri_i64(tmp, o->in1, pos);
4600        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4601        break;
4602
4603    default:
4604        /* This is going to be a sequence of shifts and stores.  */
4605        pos = base + 32 - 8;
4606        while (m3) {
4607            if (m3 & 0x8) {
4608                tcg_gen_shri_i64(tmp, o->in1, pos);
4609                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4610                tcg_gen_addi_i64(o->in2, o->in2, 1);
4611            }
4612            m3 = (m3 << 1) & 0xf;
4613            pos -= 8;
4614        }
4615        break;
4616    }
4617    tcg_temp_free_i64(tmp);
4618    return DISAS_NEXT;
4619}
4620
4621static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4622{
4623    int r1 = get_field(s, r1);
4624    int r3 = get_field(s, r3);
4625    int size = s->insn->data;
4626    TCGv_i64 tsize = tcg_const_i64(size);
4627
4628    while (1) {
4629        if (size == 8) {
4630            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4631        } else {
4632            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4633        }
4634        if (r1 == r3) {
4635            break;
4636        }
4637        tcg_gen_add_i64(o->in2, o->in2, tsize);
4638        r1 = (r1 + 1) & 15;
4639    }
4640
4641    tcg_temp_free_i64(tsize);
4642    return DISAS_NEXT;
4643}
4644
4645static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4646{
4647    int r1 = get_field(s, r1);
4648    int r3 = get_field(s, r3);
4649    TCGv_i64 t = tcg_temp_new_i64();
4650    TCGv_i64 t4 = tcg_const_i64(4);
4651    TCGv_i64 t32 = tcg_const_i64(32);
4652
4653    while (1) {
4654        tcg_gen_shl_i64(t, regs[r1], t32);
4655        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4656        if (r1 == r3) {
4657            break;
4658        }
4659        tcg_gen_add_i64(o->in2, o->in2, t4);
4660        r1 = (r1 + 1) & 15;
4661    }
4662
4663    tcg_temp_free_i64(t);
4664    tcg_temp_free_i64(t4);
4665    tcg_temp_free_i64(t32);
4666    return DISAS_NEXT;
4667}
4668
4669static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4670{
4671    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4672        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4673    } else if (HAVE_ATOMIC128) {
4674        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4675    } else {
4676        gen_helper_exit_atomic(cpu_env);
4677        return DISAS_NORETURN;
4678    }
4679    return DISAS_NEXT;
4680}
4681
4682static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4683{
4684    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4685    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4686
4687    gen_helper_srst(cpu_env, r1, r2);
4688
4689    tcg_temp_free_i32(r1);
4690    tcg_temp_free_i32(r2);
4691    set_cc_static(s);
4692    return DISAS_NEXT;
4693}
4694
4695static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4696{
4697    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4698    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4699
4700    gen_helper_srstu(cpu_env, r1, r2);
4701
4702    tcg_temp_free_i32(r1);
4703    tcg_temp_free_i32(r2);
4704    set_cc_static(s);
4705    return DISAS_NEXT;
4706}
4707
4708static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4709{
4710    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4711    return DISAS_NEXT;
4712}
4713
4714static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4715{
4716    tcg_gen_movi_i64(cc_src, 0);
4717    tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4718    return DISAS_NEXT;
4719}
4720
4721/* Compute borrow (0, -1) into cc_src. */
4722static void compute_borrow(DisasContext *s)
4723{
4724    switch (s->cc_op) {
4725    case CC_OP_SUBU:
4726        /* The borrow value is already in cc_src (0,-1). */
4727        break;
4728    default:
4729        gen_op_calc_cc(s);
4730        /* fall through */
4731    case CC_OP_STATIC:
4732        /* The carry flag is the msb of CC; compute into cc_src. */
4733        tcg_gen_extu_i32_i64(cc_src, cc_op);
4734        tcg_gen_shri_i64(cc_src, cc_src, 1);
4735        /* fall through */
4736    case CC_OP_ADDU:
4737        /* Convert carry (1,0) to borrow (0,-1). */
4738        tcg_gen_subi_i64(cc_src, cc_src, 1);
4739        break;
4740    }
4741}
4742
4743static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4744{
4745    compute_borrow(s);
4746
4747    /* Borrow is {0, -1}, so add to subtract. */
4748    tcg_gen_add_i64(o->out, o->in1, cc_src);
4749    tcg_gen_sub_i64(o->out, o->out, o->in2);
4750    return DISAS_NEXT;
4751}
4752
4753static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4754{
4755    compute_borrow(s);
4756
4757    /*
4758     * Borrow is {0, -1}, so add to subtract; replicate the
4759     * borrow input to produce 128-bit -1 for the addition.
4760     */
4761    TCGv_i64 zero = tcg_const_i64(0);
4762    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4763    tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4764    tcg_temp_free_i64(zero);
4765
4766    return DISAS_NEXT;
4767}
4768
4769static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4770{
4771    TCGv_i32 t;
4772
4773    update_psw_addr(s);
4774    update_cc_op(s);
4775
4776    t = tcg_const_i32(get_field(s, i1) & 0xff);
4777    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4778    tcg_temp_free_i32(t);
4779
4780    t = tcg_const_i32(s->ilen);
4781    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4782    tcg_temp_free_i32(t);
4783
4784    gen_exception(EXCP_SVC);
4785    return DISAS_NORETURN;
4786}
4787
4788static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4789{
4790    int cc = 0;
4791
4792    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4793    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4794    gen_op_movi_cc(s, cc);
4795    return DISAS_NEXT;
4796}
4797
4798static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4799{
4800    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4801    set_cc_static(s);
4802    return DISAS_NEXT;
4803}
4804
4805static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4806{
4807    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4808    set_cc_static(s);
4809    return DISAS_NEXT;
4810}
4811
4812static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4813{
4814    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4815    set_cc_static(s);
4816    return DISAS_NEXT;
4817}
4818
4819#ifndef CONFIG_USER_ONLY
4820
4821static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4822{
4823    gen_helper_testblock(cc_op, cpu_env, o->in2);
4824    set_cc_static(s);
4825    return DISAS_NEXT;
4826}
4827
4828static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4829{
4830    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4831    set_cc_static(s);
4832    return DISAS_NEXT;
4833}
4834
4835#endif
4836
4837static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4838{
4839    TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4840    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4841    tcg_temp_free_i32(l1);
4842    set_cc_static(s);
4843    return DISAS_NEXT;
4844}
4845
4846static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4847{
4848    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4849    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4850    tcg_temp_free_i32(l);
4851    set_cc_static(s);
4852    return DISAS_NEXT;
4853}
4854
4855static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4856{
4857    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4858    return_low128(o->out2);
4859    set_cc_static(s);
4860    return DISAS_NEXT;
4861}
4862
4863static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4864{
4865    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4866    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4867    tcg_temp_free_i32(l);
4868    set_cc_static(s);
4869    return DISAS_NEXT;
4870}
4871
4872static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4873{
4874    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4875    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4876    tcg_temp_free_i32(l);
4877    set_cc_static(s);
4878    return DISAS_NEXT;
4879}
4880
4881static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4882{
4883    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4884    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4885    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4886    TCGv_i32 tst = tcg_temp_new_i32();
4887    int m3 = get_field(s, m3);
4888
4889    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4890        m3 = 0;
4891    }
4892    if (m3 & 1) {
4893        tcg_gen_movi_i32(tst, -1);
4894    } else {
4895        tcg_gen_extrl_i64_i32(tst, regs[0]);
4896        if (s->insn->opc & 3) {
4897            tcg_gen_ext8u_i32(tst, tst);
4898        } else {
4899            tcg_gen_ext16u_i32(tst, tst);
4900        }
4901    }
4902    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4903
4904    tcg_temp_free_i32(r1);
4905    tcg_temp_free_i32(r2);
4906    tcg_temp_free_i32(sizes);
4907    tcg_temp_free_i32(tst);
4908    set_cc_static(s);
4909    return DISAS_NEXT;
4910}
4911
4912static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4913{
4914    TCGv_i32 t1 = tcg_const_i32(0xff);
4915    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4916    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4917    tcg_temp_free_i32(t1);
4918    set_cc_static(s);
4919    return DISAS_NEXT;
4920}
4921
4922static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4923{
4924    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4925    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4926    tcg_temp_free_i32(l);
4927    return DISAS_NEXT;
4928}
4929
4930static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4931{
4932    int l1 = get_field(s, l1) + 1;
4933    TCGv_i32 l;
4934
4935    /* The length must not exceed 32 bytes.  */
4936    if (l1 > 32) {
4937        gen_program_exception(s, PGM_SPECIFICATION);
4938        return DISAS_NORETURN;
4939    }
4940    l = tcg_const_i32(l1);
4941    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4942    tcg_temp_free_i32(l);
4943    set_cc_static(s);
4944    return DISAS_NEXT;
4945}
4946
4947static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4948{
4949    int l1 = get_field(s, l1) + 1;
4950    TCGv_i32 l;
4951
4952    /* The length must be even and should not exceed 64 bytes.  */
4953    if ((l1 & 1) || (l1 > 64)) {
4954        gen_program_exception(s, PGM_SPECIFICATION);
4955        return DISAS_NORETURN;
4956    }
4957    l = tcg_const_i32(l1);
4958    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4959    tcg_temp_free_i32(l);
4960    set_cc_static(s);
4961    return DISAS_NEXT;
4962}
4963
4964
4965static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4966{
4967    int d1 = get_field(s, d1);
4968    int d2 = get_field(s, d2);
4969    int b1 = get_field(s, b1);
4970    int b2 = get_field(s, b2);
4971    int l = get_field(s, l1);
4972    TCGv_i32 t32;
4973
4974    o->addr1 = get_address(s, 0, b1, d1);
4975
4976    /* If the addresses are identical, this is a store/memset of zero.  */
4977    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4978        o->in2 = tcg_const_i64(0);
4979
4980        l++;
4981        while (l >= 8) {
4982            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4983            l -= 8;
4984            if (l > 0) {
4985                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4986            }
4987        }
4988        if (l >= 4) {
4989            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4990            l -= 4;
4991            if (l > 0) {
4992                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4993            }
4994        }
4995        if (l >= 2) {
4996            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4997            l -= 2;
4998            if (l > 0) {
4999                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5000            }
5001        }
5002        if (l) {
5003            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5004        }
5005        gen_op_movi_cc(s, 0);
5006        return DISAS_NEXT;
5007    }
5008
5009    /* But in general we'll defer to a helper.  */
5010    o->in2 = get_address(s, 0, b2, d2);
5011    t32 = tcg_const_i32(l);
5012    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5013    tcg_temp_free_i32(t32);
5014    set_cc_static(s);
5015    return DISAS_NEXT;
5016}
5017
5018static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5019{
5020    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5021    return DISAS_NEXT;
5022}
5023
5024static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5025{
5026    int shift = s->insn->data & 0xff;
5027    int size = s->insn->data >> 8;
5028    uint64_t mask = ((1ull << size) - 1) << shift;
5029
5030    assert(!o->g_in2);
5031    tcg_gen_shli_i64(o->in2, o->in2, shift);
5032    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5033
5034    /* Produce the CC from only the bits manipulated.  */
5035    tcg_gen_andi_i64(cc_dst, o->out, mask);
5036    set_cc_nz_u64(s, cc_dst);
5037    return DISAS_NEXT;
5038}
5039
5040static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5041{
5042    o->in1 = tcg_temp_new_i64();
5043
5044    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5045        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5046    } else {
5047        /* Perform the atomic operation in memory. */
5048        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5049                                     s->insn->data);
5050    }
5051
5052    /* Recompute also for atomic case: needed for setting CC. */
5053    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5054
5055    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5056        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5057    }
5058    return DISAS_NEXT;
5059}
5060
5061static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5062{
5063    o->out = tcg_const_i64(0);
5064    return DISAS_NEXT;
5065}
5066
5067static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5068{
5069    o->out = tcg_const_i64(0);
5070    o->out2 = o->out;
5071    o->g_out2 = true;
5072    return DISAS_NEXT;
5073}
5074
5075#ifndef CONFIG_USER_ONLY
5076static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5077{
5078    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5079
5080    gen_helper_clp(cpu_env, r2);
5081    tcg_temp_free_i32(r2);
5082    set_cc_static(s);
5083    return DISAS_NEXT;
5084}
5085
5086static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5087{
5088    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5089    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5090
5091    gen_helper_pcilg(cpu_env, r1, r2);
5092    tcg_temp_free_i32(r1);
5093    tcg_temp_free_i32(r2);
5094    set_cc_static(s);
5095    return DISAS_NEXT;
5096}
5097
5098static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5099{
5100    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5101    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5102
5103    gen_helper_pcistg(cpu_env, r1, r2);
5104    tcg_temp_free_i32(r1);
5105    tcg_temp_free_i32(r2);
5106    set_cc_static(s);
5107    return DISAS_NEXT;
5108}
5109
5110static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5111{
5112    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5113    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5114
5115    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5116    tcg_temp_free_i32(ar);
5117    tcg_temp_free_i32(r1);
5118    set_cc_static(s);
5119    return DISAS_NEXT;
5120}
5121
5122static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5123{
5124    gen_helper_sic(cpu_env, o->in1, o->in2);
5125    return DISAS_NEXT;
5126}
5127
5128static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5129{
5130    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5131    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5132
5133    gen_helper_rpcit(cpu_env, r1, r2);
5134    tcg_temp_free_i32(r1);
5135    tcg_temp_free_i32(r2);
5136    set_cc_static(s);
5137    return DISAS_NEXT;
5138}
5139
5140static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5141{
5142    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5143    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5144    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5145
5146    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5147    tcg_temp_free_i32(ar);
5148    tcg_temp_free_i32(r1);
5149    tcg_temp_free_i32(r3);
5150    set_cc_static(s);
5151    return DISAS_NEXT;
5152}
5153
5154static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5155{
5156    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5157    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5158
5159    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5160    tcg_temp_free_i32(ar);
5161    tcg_temp_free_i32(r1);
5162    set_cc_static(s);
5163    return DISAS_NEXT;
5164}
5165#endif
5166
5167#include "translate_vx.c.inc"
5168
5169/* ====================================================================== */
5170/* The "Cc OUTput" generators.  Given the generated output (and in some cases
5171   the original inputs), update the various cc data structures in order to
5172   be able to compute the new condition code.  */
5173
5174static void cout_abs32(DisasContext *s, DisasOps *o)
5175{
5176    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5177}
5178
5179static void cout_abs64(DisasContext *s, DisasOps *o)
5180{
5181    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5182}
5183
5184static void cout_adds32(DisasContext *s, DisasOps *o)
5185{
5186    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5187}
5188
5189static void cout_adds64(DisasContext *s, DisasOps *o)
5190{
5191    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5192}
5193
5194static void cout_addu32(DisasContext *s, DisasOps *o)
5195{
5196    tcg_gen_shri_i64(cc_src, o->out, 32);
5197    tcg_gen_ext32u_i64(cc_dst, o->out);
5198    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5199}
5200
5201static void cout_addu64(DisasContext *s, DisasOps *o)
5202{
5203    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5204}
5205
5206static void cout_cmps32(DisasContext *s, DisasOps *o)
5207{
5208    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5209}
5210
5211static void cout_cmps64(DisasContext *s, DisasOps *o)
5212{
5213    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5214}
5215
5216static void cout_cmpu32(DisasContext *s, DisasOps *o)
5217{
5218    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5219}
5220
5221static void cout_cmpu64(DisasContext *s, DisasOps *o)
5222{
5223    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5224}
5225
5226static void cout_f32(DisasContext *s, DisasOps *o)
5227{
5228    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5229}
5230
5231static void cout_f64(DisasContext *s, DisasOps *o)
5232{
5233    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5234}
5235
5236static void cout_f128(DisasContext *s, DisasOps *o)
5237{
5238    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5239}
5240
5241static void cout_nabs32(DisasContext *s, DisasOps *o)
5242{
5243    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5244}
5245
5246static void cout_nabs64(DisasContext *s, DisasOps *o)
5247{
5248    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5249}
5250
5251static void cout_neg32(DisasContext *s, DisasOps *o)
5252{
5253    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5254}
5255
5256static void cout_neg64(DisasContext *s, DisasOps *o)
5257{
5258    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5259}
5260
5261static void cout_nz32(DisasContext *s, DisasOps *o)
5262{
5263    tcg_gen_ext32u_i64(cc_dst, o->out);
5264    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5265}
5266
5267static void cout_nz64(DisasContext *s, DisasOps *o)
5268{
5269    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5270}
5271
5272static void cout_s32(DisasContext *s, DisasOps *o)
5273{
5274    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5275}
5276
5277static void cout_s64(DisasContext *s, DisasOps *o)
5278{
5279    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5280}
5281
5282static void cout_subs32(DisasContext *s, DisasOps *o)
5283{
5284    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5285}
5286
5287static void cout_subs64(DisasContext *s, DisasOps *o)
5288{
5289    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5290}
5291
5292static void cout_subu32(DisasContext *s, DisasOps *o)
5293{
5294    tcg_gen_sari_i64(cc_src, o->out, 32);
5295    tcg_gen_ext32u_i64(cc_dst, o->out);
5296    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5297}
5298
5299static void cout_subu64(DisasContext *s, DisasOps *o)
5300{
5301    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5302}
5303
5304static void cout_tm32(DisasContext *s, DisasOps *o)
5305{
5306    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5307}
5308
5309static void cout_tm64(DisasContext *s, DisasOps *o)
5310{
5311    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5312}
5313
5314static void cout_muls32(DisasContext *s, DisasOps *o)
5315{
5316    gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5317}
5318
5319static void cout_muls64(DisasContext *s, DisasOps *o)
5320{
5321    /* out contains "high" part, out2 contains "low" part of 128 bit result */
5322    gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5323}
5324
5325/* ====================================================================== */
5326/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5327   with the TCG register to which we will write.  Used in combination with
5328   the "wout" generators, in some cases we need a new temporary, and in
5329   some cases we can write to a TCG global.  */
5330
5331static void prep_new(DisasContext *s, DisasOps *o)
5332{
5333    o->out = tcg_temp_new_i64();
5334}
5335#define SPEC_prep_new 0
5336
5337static void prep_new_P(DisasContext *s, DisasOps *o)
5338{
5339    o->out = tcg_temp_new_i64();
5340    o->out2 = tcg_temp_new_i64();
5341}
5342#define SPEC_prep_new_P 0
5343
5344static void prep_r1(DisasContext *s, DisasOps *o)
5345{
5346    o->out = regs[get_field(s, r1)];
5347    o->g_out = true;
5348}
5349#define SPEC_prep_r1 0
5350
5351static void prep_r1_P(DisasContext *s, DisasOps *o)
5352{
5353    int r1 = get_field(s, r1);
5354    o->out = regs[r1];
5355    o->out2 = regs[r1 + 1];
5356    o->g_out = o->g_out2 = true;
5357}
5358#define SPEC_prep_r1_P SPEC_r1_even
5359
5360/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5361static void prep_x1(DisasContext *s, DisasOps *o)
5362{
5363    o->out = load_freg(get_field(s, r1));
5364    o->out2 = load_freg(get_field(s, r1) + 2);
5365}
5366#define SPEC_prep_x1 SPEC_r1_f128
5367
5368/* ====================================================================== */
5369/* The "Write OUTput" generators.  These generally perform some non-trivial
5370   copy of data to TCG globals, or to main memory.  The trivial cases are
5371   generally handled by having a "prep" generator install the TCG global
5372   as the destination of the operation.  */
5373
5374static void wout_r1(DisasContext *s, DisasOps *o)
5375{
5376    store_reg(get_field(s, r1), o->out);
5377}
5378#define SPEC_wout_r1 0
5379
5380static void wout_out2_r1(DisasContext *s, DisasOps *o)
5381{
5382    store_reg(get_field(s, r1), o->out2);
5383}
5384#define SPEC_wout_out2_r1 0
5385
5386static void wout_r1_8(DisasContext *s, DisasOps *o)
5387{
5388    int r1 = get_field(s, r1);
5389    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5390}
5391#define SPEC_wout_r1_8 0
5392
5393static void wout_r1_16(DisasContext *s, DisasOps *o)
5394{
5395    int r1 = get_field(s, r1);
5396    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5397}
5398#define SPEC_wout_r1_16 0
5399
5400static void wout_r1_32(DisasContext *s, DisasOps *o)
5401{
5402    store_reg32_i64(get_field(s, r1), o->out);
5403}
5404#define SPEC_wout_r1_32 0
5405
5406static void wout_r1_32h(DisasContext *s, DisasOps *o)
5407{
5408    store_reg32h_i64(get_field(s, r1), o->out);
5409}
5410#define SPEC_wout_r1_32h 0
5411
5412static void wout_r1_P32(DisasContext *s, DisasOps *o)
5413{
5414    int r1 = get_field(s, r1);
5415    store_reg32_i64(r1, o->out);
5416    store_reg32_i64(r1 + 1, o->out2);
5417}
5418#define SPEC_wout_r1_P32 SPEC_r1_even
5419
5420static void wout_r1_D32(DisasContext *s, DisasOps *o)
5421{
5422    int r1 = get_field(s, r1);
5423    store_reg32_i64(r1 + 1, o->out);
5424    tcg_gen_shri_i64(o->out, o->out, 32);
5425    store_reg32_i64(r1, o->out);
5426}
5427#define SPEC_wout_r1_D32 SPEC_r1_even
5428
5429static void wout_r3_P32(DisasContext *s, DisasOps *o)
5430{
5431    int r3 = get_field(s, r3);
5432    store_reg32_i64(r3, o->out);
5433    store_reg32_i64(r3 + 1, o->out2);
5434}
5435#define SPEC_wout_r3_P32 SPEC_r3_even
5436
5437static void wout_r3_P64(DisasContext *s, DisasOps *o)
5438{
5439    int r3 = get_field(s, r3);
5440    store_reg(r3, o->out);
5441    store_reg(r3 + 1, o->out2);
5442}
5443#define SPEC_wout_r3_P64 SPEC_r3_even
5444
5445static void wout_e1(DisasContext *s, DisasOps *o)
5446{
5447    store_freg32_i64(get_field(s, r1), o->out);
5448}
5449#define SPEC_wout_e1 0
5450
5451static void wout_f1(DisasContext *s, DisasOps *o)
5452{
5453    store_freg(get_field(s, r1), o->out);
5454}
5455#define SPEC_wout_f1 0
5456
5457static void wout_x1(DisasContext *s, DisasOps *o)
5458{
5459    int f1 = get_field(s, r1);
5460    store_freg(f1, o->out);
5461    store_freg(f1 + 2, o->out2);
5462}
5463#define SPEC_wout_x1 SPEC_r1_f128
5464
5465static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5466{
5467    if (get_field(s, r1) != get_field(s, r2)) {
5468        store_reg32_i64(get_field(s, r1), o->out);
5469    }
5470}
5471#define SPEC_wout_cond_r1r2_32 0
5472
5473static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5474{
5475    if (get_field(s, r1) != get_field(s, r2)) {
5476        store_freg32_i64(get_field(s, r1), o->out);
5477    }
5478}
5479#define SPEC_wout_cond_e1e2 0
5480
5481static void wout_m1_8(DisasContext *s, DisasOps *o)
5482{
5483    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5484}
5485#define SPEC_wout_m1_8 0
5486
5487static void wout_m1_16(DisasContext *s, DisasOps *o)
5488{
5489    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5490}
5491#define SPEC_wout_m1_16 0
5492
5493#ifndef CONFIG_USER_ONLY
5494static void wout_m1_16a(DisasContext *s, DisasOps *o)
5495{
5496    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5497}
5498#define SPEC_wout_m1_16a 0
5499#endif
5500
5501static void wout_m1_32(DisasContext *s, DisasOps *o)
5502{
5503    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5504}
5505#define SPEC_wout_m1_32 0
5506
5507#ifndef CONFIG_USER_ONLY
5508static void wout_m1_32a(DisasContext *s, DisasOps *o)
5509{
5510    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5511}
5512#define SPEC_wout_m1_32a 0
5513#endif
5514
5515static void wout_m1_64(DisasContext *s, DisasOps *o)
5516{
5517    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5518}
5519#define SPEC_wout_m1_64 0
5520
5521#ifndef CONFIG_USER_ONLY
5522static void wout_m1_64a(DisasContext *s, DisasOps *o)
5523{
5524    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5525}
5526#define SPEC_wout_m1_64a 0
5527#endif
5528
5529static void wout_m2_32(DisasContext *s, DisasOps *o)
5530{
5531    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5532}
5533#define SPEC_wout_m2_32 0
5534
5535static void wout_in2_r1(DisasContext *s, DisasOps *o)
5536{
5537    store_reg(get_field(s, r1), o->in2);
5538}
5539#define SPEC_wout_in2_r1 0
5540
5541static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5542{
5543    store_reg32_i64(get_field(s, r1), o->in2);
5544}
5545#define SPEC_wout_in2_r1_32 0
5546
5547/* ====================================================================== */
5548/* The "INput 1" generators.  These load the first operand to an insn.  */
5549
5550static void in1_r1(DisasContext *s, DisasOps *o)
5551{
5552    o->in1 = load_reg(get_field(s, r1));
5553}
5554#define SPEC_in1_r1 0
5555
5556static void in1_r1_o(DisasContext *s, DisasOps *o)
5557{
5558    o->in1 = regs[get_field(s, r1)];
5559    o->g_in1 = true;
5560}
5561#define SPEC_in1_r1_o 0
5562
5563static void in1_r1_32s(DisasContext *s, DisasOps *o)
5564{
5565    o->in1 = tcg_temp_new_i64();
5566    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5567}
5568#define SPEC_in1_r1_32s 0
5569
5570static void in1_r1_32u(DisasContext *s, DisasOps *o)
5571{
5572    o->in1 = tcg_temp_new_i64();
5573    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5574}
5575#define SPEC_in1_r1_32u 0
5576
5577static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5578{
5579    o->in1 = tcg_temp_new_i64();
5580    tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5581}
5582#define SPEC_in1_r1_sr32 0
5583
5584static void in1_r1p1(DisasContext *s, DisasOps *o)
5585{
5586    o->in1 = load_reg(get_field(s, r1) + 1);
5587}
5588#define SPEC_in1_r1p1 SPEC_r1_even
5589
5590static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5591{
5592    o->in1 = regs[get_field(s, r1) + 1];
5593    o->g_in1 = true;
5594}
5595#define SPEC_in1_r1p1_o SPEC_r1_even
5596
5597static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5598{
5599    o->in1 = tcg_temp_new_i64();
5600    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5601}
5602#define SPEC_in1_r1p1_32s SPEC_r1_even
5603
5604static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5605{
5606    o->in1 = tcg_temp_new_i64();
5607    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5608}
5609#define SPEC_in1_r1p1_32u SPEC_r1_even
5610
5611static void in1_r1_D32(DisasContext *s, DisasOps *o)
5612{
5613    int r1 = get_field(s, r1);
5614    o->in1 = tcg_temp_new_i64();
5615    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5616}
5617#define SPEC_in1_r1_D32 SPEC_r1_even
5618
5619static void in1_r2(DisasContext *s, DisasOps *o)
5620{
5621    o->in1 = load_reg(get_field(s, r2));
5622}
5623#define SPEC_in1_r2 0
5624
5625static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5626{
5627    o->in1 = tcg_temp_new_i64();
5628    tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5629}
5630#define SPEC_in1_r2_sr32 0
5631
5632static void in1_r2_32u(DisasContext *s, DisasOps *o)
5633{
5634    o->in1 = tcg_temp_new_i64();
5635    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5636}
5637#define SPEC_in1_r2_32u 0
5638
5639static void in1_r3(DisasContext *s, DisasOps *o)
5640{
5641    o->in1 = load_reg(get_field(s, r3));
5642}
5643#define SPEC_in1_r3 0
5644
5645static void in1_r3_o(DisasContext *s, DisasOps *o)
5646{
5647    o->in1 = regs[get_field(s, r3)];
5648    o->g_in1 = true;
5649}
5650#define SPEC_in1_r3_o 0
5651
5652static void in1_r3_32s(DisasContext *s, DisasOps *o)
5653{
5654    o->in1 = tcg_temp_new_i64();
5655    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5656}
5657#define SPEC_in1_r3_32s 0
5658
5659static void in1_r3_32u(DisasContext *s, DisasOps *o)
5660{
5661    o->in1 = tcg_temp_new_i64();
5662    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5663}
5664#define SPEC_in1_r3_32u 0
5665
5666static void in1_r3_D32(DisasContext *s, DisasOps *o)
5667{
5668    int r3 = get_field(s, r3);
5669    o->in1 = tcg_temp_new_i64();
5670    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5671}
5672#define SPEC_in1_r3_D32 SPEC_r3_even
5673
5674static void in1_e1(DisasContext *s, DisasOps *o)
5675{
5676    o->in1 = load_freg32_i64(get_field(s, r1));
5677}
5678#define SPEC_in1_e1 0
5679
5680static void in1_f1(DisasContext *s, DisasOps *o)
5681{
5682    o->in1 = load_freg(get_field(s, r1));
5683}
5684#define SPEC_in1_f1 0
5685
5686/* Load the high double word of an extended (128-bit) format FP number */
5687static void in1_x2h(DisasContext *s, DisasOps *o)
5688{
5689    o->in1 = load_freg(get_field(s, r2));
5690}
5691#define SPEC_in1_x2h SPEC_r2_f128
5692
5693static void in1_f3(DisasContext *s, DisasOps *o)
5694{
5695    o->in1 = load_freg(get_field(s, r3));
5696}
5697#define SPEC_in1_f3 0
5698
5699static void in1_la1(DisasContext *s, DisasOps *o)
5700{
5701    o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5702}
5703#define SPEC_in1_la1 0
5704
5705static void in1_la2(DisasContext *s, DisasOps *o)
5706{
5707    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5708    o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5709}
5710#define SPEC_in1_la2 0
5711
5712static void in1_m1_8u(DisasContext *s, DisasOps *o)
5713{
5714    in1_la1(s, o);
5715    o->in1 = tcg_temp_new_i64();
5716    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5717}
5718#define SPEC_in1_m1_8u 0
5719
5720static void in1_m1_16s(DisasContext *s, DisasOps *o)
5721{
5722    in1_la1(s, o);
5723    o->in1 = tcg_temp_new_i64();
5724    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5725}
5726#define SPEC_in1_m1_16s 0
5727
5728static void in1_m1_16u(DisasContext *s, DisasOps *o)
5729{
5730    in1_la1(s, o);
5731    o->in1 = tcg_temp_new_i64();
5732    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5733}
5734#define SPEC_in1_m1_16u 0
5735
5736static void in1_m1_32s(DisasContext *s, DisasOps *o)
5737{
5738    in1_la1(s, o);
5739    o->in1 = tcg_temp_new_i64();
5740    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5741}
5742#define SPEC_in1_m1_32s 0
5743
5744static void in1_m1_32u(DisasContext *s, DisasOps *o)
5745{
5746    in1_la1(s, o);
5747    o->in1 = tcg_temp_new_i64();
5748    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5749}
5750#define SPEC_in1_m1_32u 0
5751
5752static void in1_m1_64(DisasContext *s, DisasOps *o)
5753{
5754    in1_la1(s, o);
5755    o->in1 = tcg_temp_new_i64();
5756    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5757}
5758#define SPEC_in1_m1_64 0
5759
5760/* ====================================================================== */
5761/* The "INput 2" generators.  These load the second operand to an insn.  */
5762
5763static void in2_r1_o(DisasContext *s, DisasOps *o)
5764{
5765    o->in2 = regs[get_field(s, r1)];
5766    o->g_in2 = true;
5767}
5768#define SPEC_in2_r1_o 0
5769
5770static void in2_r1_16u(DisasContext *s, DisasOps *o)
5771{
5772    o->in2 = tcg_temp_new_i64();
5773    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5774}
5775#define SPEC_in2_r1_16u 0
5776
5777static void in2_r1_32u(DisasContext *s, DisasOps *o)
5778{
5779    o->in2 = tcg_temp_new_i64();
5780    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5781}
5782#define SPEC_in2_r1_32u 0
5783
5784static void in2_r1_D32(DisasContext *s, DisasOps *o)
5785{
5786    int r1 = get_field(s, r1);
5787    o->in2 = tcg_temp_new_i64();
5788    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5789}
5790#define SPEC_in2_r1_D32 SPEC_r1_even
5791
5792static void in2_r2(DisasContext *s, DisasOps *o)
5793{
5794    o->in2 = load_reg(get_field(s, r2));
5795}
5796#define SPEC_in2_r2 0
5797
5798static void in2_r2_o(DisasContext *s, DisasOps *o)
5799{
5800    o->in2 = regs[get_field(s, r2)];
5801    o->g_in2 = true;
5802}
5803#define SPEC_in2_r2_o 0
5804
5805static void in2_r2_nz(DisasContext *s, DisasOps *o)
5806{
5807    int r2 = get_field(s, r2);
5808    if (r2 != 0) {
5809        o->in2 = load_reg(r2);
5810    }
5811}
5812#define SPEC_in2_r2_nz 0
5813
5814static void in2_r2_8s(DisasContext *s, DisasOps *o)
5815{
5816    o->in2 = tcg_temp_new_i64();
5817    tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5818}
5819#define SPEC_in2_r2_8s 0
5820
5821static void in2_r2_8u(DisasContext *s, DisasOps *o)
5822{
5823    o->in2 = tcg_temp_new_i64();
5824    tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5825}
5826#define SPEC_in2_r2_8u 0
5827
5828static void in2_r2_16s(DisasContext *s, DisasOps *o)
5829{
5830    o->in2 = tcg_temp_new_i64();
5831    tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5832}
5833#define SPEC_in2_r2_16s 0
5834
5835static void in2_r2_16u(DisasContext *s, DisasOps *o)
5836{
5837    o->in2 = tcg_temp_new_i64();
5838    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5839}
5840#define SPEC_in2_r2_16u 0
5841
5842static void in2_r3(DisasContext *s, DisasOps *o)
5843{
5844    o->in2 = load_reg(get_field(s, r3));
5845}
5846#define SPEC_in2_r3 0
5847
5848static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5849{
5850    o->in2 = tcg_temp_new_i64();
5851    tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5852}
5853#define SPEC_in2_r3_sr32 0
5854
5855static void in2_r3_32u(DisasContext *s, DisasOps *o)
5856{
5857    o->in2 = tcg_temp_new_i64();
5858    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5859}
5860#define SPEC_in2_r3_32u 0
5861
5862static void in2_r2_32s(DisasContext *s, DisasOps *o)
5863{
5864    o->in2 = tcg_temp_new_i64();
5865    tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5866}
5867#define SPEC_in2_r2_32s 0
5868
5869static void in2_r2_32u(DisasContext *s, DisasOps *o)
5870{
5871    o->in2 = tcg_temp_new_i64();
5872    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5873}
5874#define SPEC_in2_r2_32u 0
5875
5876static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5877{
5878    o->in2 = tcg_temp_new_i64();
5879    tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5880}
5881#define SPEC_in2_r2_sr32 0
5882
5883static void in2_e2(DisasContext *s, DisasOps *o)
5884{
5885    o->in2 = load_freg32_i64(get_field(s, r2));
5886}
5887#define SPEC_in2_e2 0
5888
5889static void in2_f2(DisasContext *s, DisasOps *o)
5890{
5891    o->in2 = load_freg(get_field(s, r2));
5892}
5893#define SPEC_in2_f2 0
5894
5895/* Load the low double word of an extended (128-bit) format FP number */
5896static void in2_x2l(DisasContext *s, DisasOps *o)
5897{
5898    o->in2 = load_freg(get_field(s, r2) + 2);
5899}
5900#define SPEC_in2_x2l SPEC_r2_f128
5901
5902static void in2_ra2(DisasContext *s, DisasOps *o)
5903{
5904    int r2 = get_field(s, r2);
5905
5906    /* Note: *don't* treat !r2 as 0, use the reg value. */
5907    o->in2 = tcg_temp_new_i64();
5908    gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5909}
5910#define SPEC_in2_ra2 0
5911
5912static void in2_a2(DisasContext *s, DisasOps *o)
5913{
5914    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5915    o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5916}
5917#define SPEC_in2_a2 0
5918
5919static void in2_ri2(DisasContext *s, DisasOps *o)
5920{
5921    o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5922}
5923#define SPEC_in2_ri2 0
5924
5925static void in2_sh32(DisasContext *s, DisasOps *o)
5926{
5927    help_l2_shift(s, o, 31);
5928}
5929#define SPEC_in2_sh32 0
5930
5931static void in2_sh64(DisasContext *s, DisasOps *o)
5932{
5933    help_l2_shift(s, o, 63);
5934}
5935#define SPEC_in2_sh64 0
5936
5937static void in2_m2_8u(DisasContext *s, DisasOps *o)
5938{
5939    in2_a2(s, o);
5940    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5941}
5942#define SPEC_in2_m2_8u 0
5943
5944static void in2_m2_16s(DisasContext *s, DisasOps *o)
5945{
5946    in2_a2(s, o);
5947    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5948}
5949#define SPEC_in2_m2_16s 0
5950
5951static void in2_m2_16u(DisasContext *s, DisasOps *o)
5952{
5953    in2_a2(s, o);
5954    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5955}
5956#define SPEC_in2_m2_16u 0
5957
5958static void in2_m2_32s(DisasContext *s, DisasOps *o)
5959{
5960    in2_a2(s, o);
5961    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5962}
5963#define SPEC_in2_m2_32s 0
5964
5965static void in2_m2_32u(DisasContext *s, DisasOps *o)
5966{
5967    in2_a2(s, o);
5968    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5969}
5970#define SPEC_in2_m2_32u 0
5971
5972#ifndef CONFIG_USER_ONLY
5973static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5974{
5975    in2_a2(s, o);
5976    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5977}
5978#define SPEC_in2_m2_32ua 0
5979#endif
5980
5981static void in2_m2_64(DisasContext *s, DisasOps *o)
5982{
5983    in2_a2(s, o);
5984    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5985}
5986#define SPEC_in2_m2_64 0
5987
5988static void in2_m2_64w(DisasContext *s, DisasOps *o)
5989{
5990    in2_a2(s, o);
5991    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5992    gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5993}
5994#define SPEC_in2_m2_64w 0
5995
5996#ifndef CONFIG_USER_ONLY
5997static void in2_m2_64a(DisasContext *s, DisasOps *o)
5998{
5999    in2_a2(s, o);
6000    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
6001}
6002#define SPEC_in2_m2_64a 0
6003#endif
6004
6005static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6006{
6007    in2_ri2(s, o);
6008    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6009}
6010#define SPEC_in2_mri2_16u 0
6011
6012static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6013{
6014    in2_ri2(s, o);
6015    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6016}
6017#define SPEC_in2_mri2_32s 0
6018
6019static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6020{
6021    in2_ri2(s, o);
6022    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6023}
6024#define SPEC_in2_mri2_32u 0
6025
6026static void in2_mri2_64(DisasContext *s, DisasOps *o)
6027{
6028    in2_ri2(s, o);
6029    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6030}
6031#define SPEC_in2_mri2_64 0
6032
6033static void in2_i2(DisasContext *s, DisasOps *o)
6034{
6035    o->in2 = tcg_const_i64(get_field(s, i2));
6036}
6037#define SPEC_in2_i2 0
6038
6039static void in2_i2_8u(DisasContext *s, DisasOps *o)
6040{
6041    o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6042}
6043#define SPEC_in2_i2_8u 0
6044
6045static void in2_i2_16u(DisasContext *s, DisasOps *o)
6046{
6047    o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6048}
6049#define SPEC_in2_i2_16u 0
6050
6051static void in2_i2_32u(DisasContext *s, DisasOps *o)
6052{
6053    o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6054}
6055#define SPEC_in2_i2_32u 0
6056
6057static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6058{
6059    uint64_t i2 = (uint16_t)get_field(s, i2);
6060    o->in2 = tcg_const_i64(i2 << s->insn->data);
6061}
6062#define SPEC_in2_i2_16u_shl 0
6063
6064static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6065{
6066    uint64_t i2 = (uint32_t)get_field(s, i2);
6067    o->in2 = tcg_const_i64(i2 << s->insn->data);
6068}
6069#define SPEC_in2_i2_32u_shl 0
6070
6071#ifndef CONFIG_USER_ONLY
6072static void in2_insn(DisasContext *s, DisasOps *o)
6073{
6074    o->in2 = tcg_const_i64(s->fields.raw_insn);
6075}
6076#define SPEC_in2_insn 0
6077#endif
6078
6079/* ====================================================================== */
6080
6081/* Find opc within the table of insns.  This is formulated as a switch
6082   statement so that (1) we get compile-time notice of cut-paste errors
6083   for duplicated opcodes, and (2) the compiler generates the binary
6084   search tree, rather than us having to post-process the table.  */
6085
6086#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6087    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6088
6089#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6090    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6091
6092#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6093    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6094
6095#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6096
6097enum DisasInsnEnum {
6098#include "insn-data.def"
6099};
6100
6101#undef E
6102#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6103    .opc = OPC,                                                             \
6104    .flags = FL,                                                            \
6105    .fmt = FMT_##FT,                                                        \
6106    .fac = FAC_##FC,                                                        \
6107    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6108    .name = #NM,                                                            \
6109    .help_in1 = in1_##I1,                                                   \
6110    .help_in2 = in2_##I2,                                                   \
6111    .help_prep = prep_##P,                                                  \
6112    .help_wout = wout_##W,                                                  \
6113    .help_cout = cout_##CC,                                                 \
6114    .help_op = op_##OP,                                                     \
6115    .data = D                                                               \
6116 },
6117
6118/* Allow 0 to be used for NULL in the table below.  */
6119#define in1_0  NULL
6120#define in2_0  NULL
6121#define prep_0  NULL
6122#define wout_0  NULL
6123#define cout_0  NULL
6124#define op_0  NULL
6125
6126#define SPEC_in1_0 0
6127#define SPEC_in2_0 0
6128#define SPEC_prep_0 0
6129#define SPEC_wout_0 0
6130
6131/* Give smaller names to the various facilities.  */
6132#define FAC_Z           S390_FEAT_ZARCH
6133#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6134#define FAC_DFP         S390_FEAT_DFP
6135#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6136#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6137#define FAC_EE          S390_FEAT_EXECUTE_EXT
6138#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6139#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6140#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6141#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6142#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6143#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6144#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6145#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6146#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6147#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6148#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6149#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6150#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6151#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6152#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6153#define FAC_SFLE        S390_FEAT_STFLE
6154#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6155#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6156#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6157#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6158#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6159#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6160#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6161#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6162#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6163#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6164#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6165#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6166#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6167#define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6168#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6169#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6170#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6171#define FAC_V           S390_FEAT_VECTOR /* vector facility */
6172#define FAC_VE          S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6173#define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6174
6175static const DisasInsn insn_info[] = {
6176#include "insn-data.def"
6177};
6178
6179#undef E
6180#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6181    case OPC: return &insn_info[insn_ ## NM];
6182
6183static const DisasInsn *lookup_opc(uint16_t opc)
6184{
6185    switch (opc) {
6186#include "insn-data.def"
6187    default:
6188        return NULL;
6189    }
6190}
6191
6192#undef F
6193#undef E
6194#undef D
6195#undef C
6196
6197/* Extract a field from the insn.  The INSN should be left-aligned in
6198   the uint64_t so that we can more easily utilize the big-bit-endian
6199   definitions we extract from the Principals of Operation.  */
6200
6201static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6202{
6203    uint32_t r, m;
6204
6205    if (f->size == 0) {
6206        return;
6207    }
6208
6209    /* Zero extract the field from the insn.  */
6210    r = (insn << f->beg) >> (64 - f->size);
6211
6212    /* Sign-extend, or un-swap the field as necessary.  */
6213    switch (f->type) {
6214    case 0: /* unsigned */
6215        break;
6216    case 1: /* signed */
6217        assert(f->size <= 32);
6218        m = 1u << (f->size - 1);
6219        r = (r ^ m) - m;
6220        break;
6221    case 2: /* dl+dh split, signed 20 bit. */
6222        r = ((int8_t)r << 12) | (r >> 8);
6223        break;
6224    case 3: /* MSB stored in RXB */
6225        g_assert(f->size == 4);
6226        switch (f->beg) {
6227        case 8:
6228            r |= extract64(insn, 63 - 36, 1) << 4;
6229            break;
6230        case 12:
6231            r |= extract64(insn, 63 - 37, 1) << 4;
6232            break;
6233        case 16:
6234            r |= extract64(insn, 63 - 38, 1) << 4;
6235            break;
6236        case 32:
6237            r |= extract64(insn, 63 - 39, 1) << 4;
6238            break;
6239        default:
6240            g_assert_not_reached();
6241        }
6242        break;
6243    default:
6244        abort();
6245    }
6246
6247    /*
6248     * Validate that the "compressed" encoding we selected above is valid.
6249     * I.e. we haven't made two different original fields overlap.
6250     */
6251    assert(((o->presentC >> f->indexC) & 1) == 0);
6252    o->presentC |= 1 << f->indexC;
6253    o->presentO |= 1 << f->indexO;
6254
6255    o->c[f->indexC] = r;
6256}
6257
6258/* Lookup the insn at the current PC, extracting the operands into O and
6259   returning the info struct for the insn.  Returns NULL for invalid insn.  */
6260
6261static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6262{
6263    uint64_t insn, pc = s->base.pc_next;
6264    int op, op2, ilen;
6265    const DisasInsn *info;
6266
6267    if (unlikely(s->ex_value)) {
6268        /* Drop the EX data now, so that it's clear on exception paths.  */
6269        TCGv_i64 zero = tcg_const_i64(0);
6270        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6271        tcg_temp_free_i64(zero);
6272
6273        /* Extract the values saved by EXECUTE.  */
6274        insn = s->ex_value & 0xffffffffffff0000ull;
6275        ilen = s->ex_value & 0xf;
6276        op = insn >> 56;
6277    } else {
6278        insn = ld_code2(env, s, pc);
6279        op = (insn >> 8) & 0xff;
6280        ilen = get_ilen(op);
6281        switch (ilen) {
6282        case 2:
6283            insn = insn << 48;
6284            break;
6285        case 4:
6286            insn = ld_code4(env, s, pc) << 32;
6287            break;
6288        case 6:
6289            insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6290            break;
6291        default:
6292            g_assert_not_reached();
6293        }
6294    }
6295    s->pc_tmp = s->base.pc_next + ilen;
6296    s->ilen = ilen;
6297
6298    /* We can't actually determine the insn format until we've looked up
6299       the full insn opcode.  Which we can't do without locating the
6300       secondary opcode.  Assume by default that OP2 is at bit 40; for
6301       those smaller insns that don't actually have a secondary opcode
6302       this will correctly result in OP2 = 0. */
6303    switch (op) {
6304    case 0x01: /* E */
6305    case 0x80: /* S */
6306    case 0x82: /* S */
6307    case 0x93: /* S */
6308    case 0xb2: /* S, RRF, RRE, IE */
6309    case 0xb3: /* RRE, RRD, RRF */
6310    case 0xb9: /* RRE, RRF */
6311    case 0xe5: /* SSE, SIL */
6312        op2 = (insn << 8) >> 56;
6313        break;
6314    case 0xa5: /* RI */
6315    case 0xa7: /* RI */
6316    case 0xc0: /* RIL */
6317    case 0xc2: /* RIL */
6318    case 0xc4: /* RIL */
6319    case 0xc6: /* RIL */
6320    case 0xc8: /* SSF */
6321    case 0xcc: /* RIL */
6322        op2 = (insn << 12) >> 60;
6323        break;
6324    case 0xc5: /* MII */
6325    case 0xc7: /* SMI */
6326    case 0xd0 ... 0xdf: /* SS */
6327    case 0xe1: /* SS */
6328    case 0xe2: /* SS */
6329    case 0xe8: /* SS */
6330    case 0xe9: /* SS */
6331    case 0xea: /* SS */
6332    case 0xee ... 0xf3: /* SS */
6333    case 0xf8 ... 0xfd: /* SS */
6334        op2 = 0;
6335        break;
6336    default:
6337        op2 = (insn << 40) >> 56;
6338        break;
6339    }
6340
6341    memset(&s->fields, 0, sizeof(s->fields));
6342    s->fields.raw_insn = insn;
6343    s->fields.op = op;
6344    s->fields.op2 = op2;
6345
6346    /* Lookup the instruction.  */
6347    info = lookup_opc(op << 8 | op2);
6348    s->insn = info;
6349
6350    /* If we found it, extract the operands.  */
6351    if (info != NULL) {
6352        DisasFormat fmt = info->fmt;
6353        int i;
6354
6355        for (i = 0; i < NUM_C_FIELD; ++i) {
6356            extract_field(&s->fields, &format_info[fmt].op[i], insn);
6357        }
6358    }
6359    return info;
6360}
6361
6362static bool is_afp_reg(int reg)
6363{
6364    return reg % 2 || reg > 6;
6365}
6366
6367static bool is_fp_pair(int reg)
6368{
6369    /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6370    return !(reg & 0x2);
6371}
6372
6373static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6374{
6375    const DisasInsn *insn;
6376    DisasJumpType ret = DISAS_NEXT;
6377    DisasOps o = {};
6378    bool icount = false;
6379
6380    /* Search for the insn in the table.  */
6381    insn = extract_insn(env, s);
6382
6383    /* Update insn_start now that we know the ILEN.  */
6384    tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6385
6386    /* Not found means unimplemented/illegal opcode.  */
6387    if (insn == NULL) {
6388        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6389                      s->fields.op, s->fields.op2);
6390        gen_illegal_opcode(s);
6391        ret = DISAS_NORETURN;
6392        goto out;
6393    }
6394
6395#ifndef CONFIG_USER_ONLY
6396    if (s->base.tb->flags & FLAG_MASK_PER) {
6397        TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6398        gen_helper_per_ifetch(cpu_env, addr);
6399        tcg_temp_free_i64(addr);
6400    }
6401#endif
6402
6403    /* process flags */
6404    if (insn->flags) {
6405        /* privileged instruction */
6406        if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6407            gen_program_exception(s, PGM_PRIVILEGED);
6408            ret = DISAS_NORETURN;
6409            goto out;
6410        }
6411
6412        /* if AFP is not enabled, instructions and registers are forbidden */
6413        if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6414            uint8_t dxc = 0;
6415
6416            if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6417                dxc = 1;
6418            }
6419            if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6420                dxc = 1;
6421            }
6422            if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6423                dxc = 1;
6424            }
6425            if (insn->flags & IF_BFP) {
6426                dxc = 2;
6427            }
6428            if (insn->flags & IF_DFP) {
6429                dxc = 3;
6430            }
6431            if (insn->flags & IF_VEC) {
6432                dxc = 0xfe;
6433            }
6434            if (dxc) {
6435                gen_data_exception(dxc);
6436                ret = DISAS_NORETURN;
6437                goto out;
6438            }
6439        }
6440
6441        /* if vector instructions not enabled, executing them is forbidden */
6442        if (insn->flags & IF_VEC) {
6443            if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6444                gen_data_exception(0xfe);
6445                ret = DISAS_NORETURN;
6446                goto out;
6447            }
6448        }
6449
6450        /* input/output is the special case for icount mode */
6451        if (unlikely(insn->flags & IF_IO)) {
6452            icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6453            if (icount) {
6454                gen_io_start();
6455            }
6456        }
6457    }
6458
6459    /* Check for insn specification exceptions.  */
6460    if (insn->spec) {
6461        if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6462            (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6463            (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6464            (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6465            (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6466            gen_program_exception(s, PGM_SPECIFICATION);
6467            ret = DISAS_NORETURN;
6468            goto out;
6469        }
6470    }
6471
6472    /* Implement the instruction.  */
6473    if (insn->help_in1) {
6474        insn->help_in1(s, &o);
6475    }
6476    if (insn->help_in2) {
6477        insn->help_in2(s, &o);
6478    }
6479    if (insn->help_prep) {
6480        insn->help_prep(s, &o);
6481    }
6482    if (insn->help_op) {
6483        ret = insn->help_op(s, &o);
6484    }
6485    if (ret != DISAS_NORETURN) {
6486        if (insn->help_wout) {
6487            insn->help_wout(s, &o);
6488        }
6489        if (insn->help_cout) {
6490            insn->help_cout(s, &o);
6491        }
6492    }
6493
6494    /* Free any temporaries created by the helpers.  */
6495    if (o.out && !o.g_out) {
6496        tcg_temp_free_i64(o.out);
6497    }
6498    if (o.out2 && !o.g_out2) {
6499        tcg_temp_free_i64(o.out2);
6500    }
6501    if (o.in1 && !o.g_in1) {
6502        tcg_temp_free_i64(o.in1);
6503    }
6504    if (o.in2 && !o.g_in2) {
6505        tcg_temp_free_i64(o.in2);
6506    }
6507    if (o.addr1) {
6508        tcg_temp_free_i64(o.addr1);
6509    }
6510
6511    /* io should be the last instruction in tb when icount is enabled */
6512    if (unlikely(icount && ret == DISAS_NEXT)) {
6513        ret = DISAS_PC_STALE;
6514    }
6515
6516#ifndef CONFIG_USER_ONLY
6517    if (s->base.tb->flags & FLAG_MASK_PER) {
6518        /* An exception might be triggered, save PSW if not already done.  */
6519        if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6520            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6521        }
6522
6523        /* Call the helper to check for a possible PER exception.  */
6524        gen_helper_per_check_exception(cpu_env);
6525    }
6526#endif
6527
6528out:
6529    /* Advance to the next instruction.  */
6530    s->base.pc_next = s->pc_tmp;
6531    return ret;
6532}
6533
6534static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6535{
6536    DisasContext *dc = container_of(dcbase, DisasContext, base);
6537
6538    /* 31-bit mode */
6539    if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6540        dc->base.pc_first &= 0x7fffffff;
6541        dc->base.pc_next = dc->base.pc_first;
6542    }
6543
6544    dc->cc_op = CC_OP_DYNAMIC;
6545    dc->ex_value = dc->base.tb->cs_base;
6546}
6547
6548static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6549{
6550}
6551
6552static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6553{
6554    DisasContext *dc = container_of(dcbase, DisasContext, base);
6555
6556    /* Delay the set of ilen until we've read the insn. */
6557    tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6558    dc->insn_start = tcg_last_op();
6559}
6560
6561static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6562{
6563    CPUS390XState *env = cs->env_ptr;
6564    DisasContext *dc = container_of(dcbase, DisasContext, base);
6565
6566    dc->base.is_jmp = translate_one(env, dc);
6567    if (dc->base.is_jmp == DISAS_NEXT) {
6568        uint64_t page_start;
6569
6570        page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6571        if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6572            dc->base.is_jmp = DISAS_TOO_MANY;
6573        }
6574    }
6575}
6576
6577static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6578{
6579    DisasContext *dc = container_of(dcbase, DisasContext, base);
6580
6581    switch (dc->base.is_jmp) {
6582    case DISAS_GOTO_TB:
6583    case DISAS_NORETURN:
6584        break;
6585    case DISAS_TOO_MANY:
6586    case DISAS_PC_STALE:
6587    case DISAS_PC_STALE_NOCHAIN:
6588        update_psw_addr(dc);
6589        /* FALLTHRU */
6590    case DISAS_PC_UPDATED:
6591        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6592           cc op type is in env */
6593        update_cc_op(dc);
6594        /* FALLTHRU */
6595    case DISAS_PC_CC_UPDATED:
6596        /* Exit the TB, either by raising a debug exception or by return.  */
6597        if ((dc->base.tb->flags & FLAG_MASK_PER) ||
6598             dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6599            tcg_gen_exit_tb(NULL, 0);
6600        } else {
6601            tcg_gen_lookup_and_goto_ptr();
6602        }
6603        break;
6604    default:
6605        g_assert_not_reached();
6606    }
6607}
6608
6609static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6610{
6611    DisasContext *dc = container_of(dcbase, DisasContext, base);
6612
6613    if (unlikely(dc->ex_value)) {
6614        /* ??? Unfortunately log_target_disas can't use host memory.  */
6615        qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6616    } else {
6617        qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6618        log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6619    }
6620}
6621
6622static const TranslatorOps s390x_tr_ops = {
6623    .init_disas_context = s390x_tr_init_disas_context,
6624    .tb_start           = s390x_tr_tb_start,
6625    .insn_start         = s390x_tr_insn_start,
6626    .translate_insn     = s390x_tr_translate_insn,
6627    .tb_stop            = s390x_tr_tb_stop,
6628    .disas_log          = s390x_tr_disas_log,
6629};
6630
6631void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6632{
6633    DisasContext dc;
6634
6635    translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6636}
6637
6638void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6639                          target_ulong *data)
6640{
6641    int cc_op = data[1];
6642
6643    env->psw.addr = data[0];
6644
6645    /* Update the CC opcode if it is not already up-to-date.  */
6646    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6647        env->cc_op = cc_op;
6648    }
6649
6650    /* Record ILEN.  */
6651    env->int_pgm_ilen = data[2];
6652}
6653