qemu/target/s390x/tcg/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "s390x-internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg/tcg-op.h"
  37#include "tcg/tcg-op-gvec.h"
  38#include "qemu/log.h"
  39#include "qemu/host-utils.h"
  40#include "exec/cpu_ldst.h"
  41#include "exec/gen-icount.h"
  42#include "exec/helper-proto.h"
  43#include "exec/helper-gen.h"
  44
  45#include "exec/translator.h"
  46#include "exec/log.h"
  47#include "qemu/atomic128.h"
  48
  49
  50/* Information that (most) every instruction needs to manipulate.  */
  51typedef struct DisasContext DisasContext;
  52typedef struct DisasInsn DisasInsn;
  53typedef struct DisasFields DisasFields;
  54
  55/*
  56 * Define a structure to hold the decoded fields.  We'll store each inside
  57 * an array indexed by an enum.  In order to conserve memory, we'll arrange
  58 * for fields that do not exist at the same time to overlap, thus the "C"
  59 * for compact.  For checking purposes there is an "O" for original index
  60 * as well that will be applied to availability bitmaps.
  61 */
  62
  63enum DisasFieldIndexO {
  64    FLD_O_r1,
  65    FLD_O_r2,
  66    FLD_O_r3,
  67    FLD_O_m1,
  68    FLD_O_m3,
  69    FLD_O_m4,
  70    FLD_O_m5,
  71    FLD_O_m6,
  72    FLD_O_b1,
  73    FLD_O_b2,
  74    FLD_O_b4,
  75    FLD_O_d1,
  76    FLD_O_d2,
  77    FLD_O_d4,
  78    FLD_O_x2,
  79    FLD_O_l1,
  80    FLD_O_l2,
  81    FLD_O_i1,
  82    FLD_O_i2,
  83    FLD_O_i3,
  84    FLD_O_i4,
  85    FLD_O_i5,
  86    FLD_O_v1,
  87    FLD_O_v2,
  88    FLD_O_v3,
  89    FLD_O_v4,
  90};
  91
  92enum DisasFieldIndexC {
  93    FLD_C_r1 = 0,
  94    FLD_C_m1 = 0,
  95    FLD_C_b1 = 0,
  96    FLD_C_i1 = 0,
  97    FLD_C_v1 = 0,
  98
  99    FLD_C_r2 = 1,
 100    FLD_C_b2 = 1,
 101    FLD_C_i2 = 1,
 102
 103    FLD_C_r3 = 2,
 104    FLD_C_m3 = 2,
 105    FLD_C_i3 = 2,
 106    FLD_C_v3 = 2,
 107
 108    FLD_C_m4 = 3,
 109    FLD_C_b4 = 3,
 110    FLD_C_i4 = 3,
 111    FLD_C_l1 = 3,
 112    FLD_C_v4 = 3,
 113
 114    FLD_C_i5 = 4,
 115    FLD_C_d1 = 4,
 116    FLD_C_m5 = 4,
 117
 118    FLD_C_d2 = 5,
 119    FLD_C_m6 = 5,
 120
 121    FLD_C_d4 = 6,
 122    FLD_C_x2 = 6,
 123    FLD_C_l2 = 6,
 124    FLD_C_v2 = 6,
 125
 126    NUM_C_FIELD = 7
 127};
 128
 129struct DisasFields {
 130    uint64_t raw_insn;
 131    unsigned op:8;
 132    unsigned op2:8;
 133    unsigned presentC:16;
 134    unsigned int presentO;
 135    int c[NUM_C_FIELD];
 136};
 137
 138struct DisasContext {
 139    DisasContextBase base;
 140    const DisasInsn *insn;
 141    DisasFields fields;
 142    uint64_t ex_value;
 143    /*
 144     * During translate_one(), pc_tmp is used to determine the instruction
 145     * to be executed after base.pc_next - e.g. next sequential instruction
 146     * or a branch target.
 147     */
 148    uint64_t pc_tmp;
 149    uint32_t ilen;
 150    enum cc_op cc_op;
 151    bool do_debug;
 152};
 153
 154/* Information carried about a condition to be evaluated.  */
 155typedef struct {
 156    TCGCond cond:8;
 157    bool is_64;
 158    bool g1;
 159    bool g2;
 160    union {
 161        struct { TCGv_i64 a, b; } s64;
 162        struct { TCGv_i32 a, b; } s32;
 163    } u;
 164} DisasCompare;
 165
 166#ifdef DEBUG_INLINE_BRANCHES
 167static uint64_t inline_branch_hit[CC_OP_MAX];
 168static uint64_t inline_branch_miss[CC_OP_MAX];
 169#endif
 170
 171static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
 172{
 173    TCGv_i64 tmp;
 174
 175    if (s->base.tb->flags & FLAG_MASK_32) {
 176        if (s->base.tb->flags & FLAG_MASK_64) {
 177            tcg_gen_movi_i64(out, pc);
 178            return;
 179        }
 180        pc |= 0x80000000;
 181    }
 182    assert(!(s->base.tb->flags & FLAG_MASK_64));
 183    tmp = tcg_const_i64(pc);
 184    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
 185    tcg_temp_free_i64(tmp);
 186}
 187
 188static TCGv_i64 psw_addr;
 189static TCGv_i64 psw_mask;
 190static TCGv_i64 gbea;
 191
 192static TCGv_i32 cc_op;
 193static TCGv_i64 cc_src;
 194static TCGv_i64 cc_dst;
 195static TCGv_i64 cc_vr;
 196
 197static char cpu_reg_names[16][4];
 198static TCGv_i64 regs[16];
 199
 200void s390x_translate_init(void)
 201{
 202    int i;
 203
 204    psw_addr = tcg_global_mem_new_i64(cpu_env,
 205                                      offsetof(CPUS390XState, psw.addr),
 206                                      "psw_addr");
 207    psw_mask = tcg_global_mem_new_i64(cpu_env,
 208                                      offsetof(CPUS390XState, psw.mask),
 209                                      "psw_mask");
 210    gbea = tcg_global_mem_new_i64(cpu_env,
 211                                  offsetof(CPUS390XState, gbea),
 212                                  "gbea");
 213
 214    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 215                                   "cc_op");
 216    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 217                                    "cc_src");
 218    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 219                                    "cc_dst");
 220    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 221                                   "cc_vr");
 222
 223    for (i = 0; i < 16; i++) {
 224        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 225        regs[i] = tcg_global_mem_new(cpu_env,
 226                                     offsetof(CPUS390XState, regs[i]),
 227                                     cpu_reg_names[i]);
 228    }
 229}
 230
 231static inline int vec_full_reg_offset(uint8_t reg)
 232{
 233    g_assert(reg < 32);
 234    return offsetof(CPUS390XState, vregs[reg][0]);
 235}
 236
 237static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
 238{
 239    /* Convert element size (es) - e.g. MO_8 - to bytes */
 240    const uint8_t bytes = 1 << es;
 241    int offs = enr * bytes;
 242
 243    /*
 244     * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
 245     * of the 16 byte vector, on both, little and big endian systems.
 246     *
 247     * Big Endian (target/possible host)
 248     * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
 249     * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
 250     * W:  [             0][             1] - [             2][             3]
 251     * DW: [                             0] - [                             1]
 252     *
 253     * Little Endian (possible host)
 254     * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
 255     * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
 256     * W:  [             1][             0] - [             3][             2]
 257     * DW: [                             0] - [                             1]
 258     *
 259     * For 16 byte elements, the two 8 byte halves will not form a host
 260     * int128 if the host is little endian, since they're in the wrong order.
 261     * Some operations (e.g. xor) do not care. For operations like addition,
 262     * the two 8 byte elements have to be loaded separately. Let's force all
 263     * 16 byte operations to handle it in a special way.
 264     */
 265    g_assert(es <= MO_64);
 266#ifndef HOST_WORDS_BIGENDIAN
 267    offs ^= (8 - bytes);
 268#endif
 269    return offs + vec_full_reg_offset(reg);
 270}
 271
 272static inline int freg64_offset(uint8_t reg)
 273{
 274    g_assert(reg < 16);
 275    return vec_reg_offset(reg, 0, MO_64);
 276}
 277
 278static inline int freg32_offset(uint8_t reg)
 279{
 280    g_assert(reg < 16);
 281    return vec_reg_offset(reg, 0, MO_32);
 282}
 283
 284static TCGv_i64 load_reg(int reg)
 285{
 286    TCGv_i64 r = tcg_temp_new_i64();
 287    tcg_gen_mov_i64(r, regs[reg]);
 288    return r;
 289}
 290
 291static TCGv_i64 load_freg(int reg)
 292{
 293    TCGv_i64 r = tcg_temp_new_i64();
 294
 295    tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
 296    return r;
 297}
 298
 299static TCGv_i64 load_freg32_i64(int reg)
 300{
 301    TCGv_i64 r = tcg_temp_new_i64();
 302
 303    tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
 304    return r;
 305}
 306
 307static void store_reg(int reg, TCGv_i64 v)
 308{
 309    tcg_gen_mov_i64(regs[reg], v);
 310}
 311
 312static void store_freg(int reg, TCGv_i64 v)
 313{
 314    tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
 315}
 316
 317static void store_reg32_i64(int reg, TCGv_i64 v)
 318{
 319    /* 32 bit register writes keep the upper half */
 320    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 321}
 322
 323static void store_reg32h_i64(int reg, TCGv_i64 v)
 324{
 325    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 326}
 327
 328static void store_freg32_i64(int reg, TCGv_i64 v)
 329{
 330    tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
 331}
 332
 333static void return_low128(TCGv_i64 dest)
 334{
 335    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 336}
 337
 338static void update_psw_addr(DisasContext *s)
 339{
 340    /* psw.addr */
 341    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
 342}
 343
 344static void per_branch(DisasContext *s, bool to_next)
 345{
 346#ifndef CONFIG_USER_ONLY
 347    tcg_gen_movi_i64(gbea, s->base.pc_next);
 348
 349    if (s->base.tb->flags & FLAG_MASK_PER) {
 350        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
 351        gen_helper_per_branch(cpu_env, gbea, next_pc);
 352        if (to_next) {
 353            tcg_temp_free_i64(next_pc);
 354        }
 355    }
 356#endif
 357}
 358
 359static void per_branch_cond(DisasContext *s, TCGCond cond,
 360                            TCGv_i64 arg1, TCGv_i64 arg2)
 361{
 362#ifndef CONFIG_USER_ONLY
 363    if (s->base.tb->flags & FLAG_MASK_PER) {
 364        TCGLabel *lab = gen_new_label();
 365        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 366
 367        tcg_gen_movi_i64(gbea, s->base.pc_next);
 368        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 369
 370        gen_set_label(lab);
 371    } else {
 372        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
 373        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 374        tcg_temp_free_i64(pc);
 375    }
 376#endif
 377}
 378
 379static void per_breaking_event(DisasContext *s)
 380{
 381    tcg_gen_movi_i64(gbea, s->base.pc_next);
 382}
 383
 384static void update_cc_op(DisasContext *s)
 385{
 386    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 387        tcg_gen_movi_i32(cc_op, s->cc_op);
 388    }
 389}
 390
 391static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 392{
 393    return (uint64_t)cpu_lduw_code(env, pc);
 394}
 395
 396static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 397{
 398    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 399}
 400
 401static int get_mem_index(DisasContext *s)
 402{
 403#ifdef CONFIG_USER_ONLY
 404    return MMU_USER_IDX;
 405#else
 406    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
 407        return MMU_REAL_IDX;
 408    }
 409
 410    switch (s->base.tb->flags & FLAG_MASK_ASC) {
 411    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 412        return MMU_PRIMARY_IDX;
 413    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 414        return MMU_SECONDARY_IDX;
 415    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 416        return MMU_HOME_IDX;
 417    default:
 418        tcg_abort();
 419        break;
 420    }
 421#endif
 422}
 423
 424static void gen_exception(int excp)
 425{
 426    TCGv_i32 tmp = tcg_const_i32(excp);
 427    gen_helper_exception(cpu_env, tmp);
 428    tcg_temp_free_i32(tmp);
 429}
 430
 431static void gen_program_exception(DisasContext *s, int code)
 432{
 433    TCGv_i32 tmp;
 434
 435    /* Remember what pgm exeption this was.  */
 436    tmp = tcg_const_i32(code);
 437    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 438    tcg_temp_free_i32(tmp);
 439
 440    tmp = tcg_const_i32(s->ilen);
 441    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 442    tcg_temp_free_i32(tmp);
 443
 444    /* update the psw */
 445    update_psw_addr(s);
 446
 447    /* Save off cc.  */
 448    update_cc_op(s);
 449
 450    /* Trigger exception.  */
 451    gen_exception(EXCP_PGM);
 452}
 453
 454static inline void gen_illegal_opcode(DisasContext *s)
 455{
 456    gen_program_exception(s, PGM_OPERATION);
 457}
 458
 459static inline void gen_data_exception(uint8_t dxc)
 460{
 461    TCGv_i32 tmp = tcg_const_i32(dxc);
 462    gen_helper_data_exception(cpu_env, tmp);
 463    tcg_temp_free_i32(tmp);
 464}
 465
 466static inline void gen_trap(DisasContext *s)
 467{
 468    /* Set DXC to 0xff */
 469    gen_data_exception(0xff);
 470}
 471
 472static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
 473                                  int64_t imm)
 474{
 475    tcg_gen_addi_i64(dst, src, imm);
 476    if (!(s->base.tb->flags & FLAG_MASK_64)) {
 477        if (s->base.tb->flags & FLAG_MASK_32) {
 478            tcg_gen_andi_i64(dst, dst, 0x7fffffff);
 479        } else {
 480            tcg_gen_andi_i64(dst, dst, 0x00ffffff);
 481        }
 482    }
 483}
 484
 485static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 486{
 487    TCGv_i64 tmp = tcg_temp_new_i64();
 488
 489    /*
 490     * Note that d2 is limited to 20 bits, signed.  If we crop negative
 491     * displacements early we create larger immedate addends.
 492     */
 493    if (b2 && x2) {
 494        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 495        gen_addi_and_wrap_i64(s, tmp, tmp, d2);
 496    } else if (b2) {
 497        gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
 498    } else if (x2) {
 499        gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
 500    } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
 501        if (s->base.tb->flags & FLAG_MASK_32) {
 502            tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
 503        } else {
 504            tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
 505        }
 506    } else {
 507        tcg_gen_movi_i64(tmp, d2);
 508    }
 509
 510    return tmp;
 511}
 512
 513static inline bool live_cc_data(DisasContext *s)
 514{
 515    return (s->cc_op != CC_OP_DYNAMIC
 516            && s->cc_op != CC_OP_STATIC
 517            && s->cc_op > 3);
 518}
 519
 520static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 521{
 522    if (live_cc_data(s)) {
 523        tcg_gen_discard_i64(cc_src);
 524        tcg_gen_discard_i64(cc_dst);
 525        tcg_gen_discard_i64(cc_vr);
 526    }
 527    s->cc_op = CC_OP_CONST0 + val;
 528}
 529
 530static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 531{
 532    if (live_cc_data(s)) {
 533        tcg_gen_discard_i64(cc_src);
 534        tcg_gen_discard_i64(cc_vr);
 535    }
 536    tcg_gen_mov_i64(cc_dst, dst);
 537    s->cc_op = op;
 538}
 539
 540static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 541                                  TCGv_i64 dst)
 542{
 543    if (live_cc_data(s)) {
 544        tcg_gen_discard_i64(cc_vr);
 545    }
 546    tcg_gen_mov_i64(cc_src, src);
 547    tcg_gen_mov_i64(cc_dst, dst);
 548    s->cc_op = op;
 549}
 550
 551static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 552                                  TCGv_i64 dst, TCGv_i64 vr)
 553{
 554    tcg_gen_mov_i64(cc_src, src);
 555    tcg_gen_mov_i64(cc_dst, dst);
 556    tcg_gen_mov_i64(cc_vr, vr);
 557    s->cc_op = op;
 558}
 559
 560static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 561{
 562    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 563}
 564
 565/* CC value is in env->cc_op */
 566static void set_cc_static(DisasContext *s)
 567{
 568    if (live_cc_data(s)) {
 569        tcg_gen_discard_i64(cc_src);
 570        tcg_gen_discard_i64(cc_dst);
 571        tcg_gen_discard_i64(cc_vr);
 572    }
 573    s->cc_op = CC_OP_STATIC;
 574}
 575
 576/* calculates cc into cc_op */
 577static void gen_op_calc_cc(DisasContext *s)
 578{
 579    TCGv_i32 local_cc_op = NULL;
 580    TCGv_i64 dummy = NULL;
 581
 582    switch (s->cc_op) {
 583    default:
 584        dummy = tcg_const_i64(0);
 585        /* FALLTHRU */
 586    case CC_OP_ADD_64:
 587    case CC_OP_SUB_64:
 588    case CC_OP_ADD_32:
 589    case CC_OP_SUB_32:
 590        local_cc_op = tcg_const_i32(s->cc_op);
 591        break;
 592    case CC_OP_CONST0:
 593    case CC_OP_CONST1:
 594    case CC_OP_CONST2:
 595    case CC_OP_CONST3:
 596    case CC_OP_STATIC:
 597    case CC_OP_DYNAMIC:
 598        break;
 599    }
 600
 601    switch (s->cc_op) {
 602    case CC_OP_CONST0:
 603    case CC_OP_CONST1:
 604    case CC_OP_CONST2:
 605    case CC_OP_CONST3:
 606        /* s->cc_op is the cc value */
 607        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 608        break;
 609    case CC_OP_STATIC:
 610        /* env->cc_op already is the cc value */
 611        break;
 612    case CC_OP_NZ:
 613    case CC_OP_ABS_64:
 614    case CC_OP_NABS_64:
 615    case CC_OP_ABS_32:
 616    case CC_OP_NABS_32:
 617    case CC_OP_LTGT0_32:
 618    case CC_OP_LTGT0_64:
 619    case CC_OP_COMP_32:
 620    case CC_OP_COMP_64:
 621    case CC_OP_NZ_F32:
 622    case CC_OP_NZ_F64:
 623    case CC_OP_FLOGR:
 624    case CC_OP_LCBB:
 625    case CC_OP_MULS_32:
 626        /* 1 argument */
 627        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 628        break;
 629    case CC_OP_ADDU:
 630    case CC_OP_ICM:
 631    case CC_OP_LTGT_32:
 632    case CC_OP_LTGT_64:
 633    case CC_OP_LTUGTU_32:
 634    case CC_OP_LTUGTU_64:
 635    case CC_OP_TM_32:
 636    case CC_OP_TM_64:
 637    case CC_OP_SLA_32:
 638    case CC_OP_SLA_64:
 639    case CC_OP_SUBU:
 640    case CC_OP_NZ_F128:
 641    case CC_OP_VC:
 642    case CC_OP_MULS_64:
 643        /* 2 arguments */
 644        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 645        break;
 646    case CC_OP_ADD_64:
 647    case CC_OP_SUB_64:
 648    case CC_OP_ADD_32:
 649    case CC_OP_SUB_32:
 650        /* 3 arguments */
 651        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 652        break;
 653    case CC_OP_DYNAMIC:
 654        /* unknown operation - assume 3 arguments and cc_op in env */
 655        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 656        break;
 657    default:
 658        tcg_abort();
 659    }
 660
 661    if (local_cc_op) {
 662        tcg_temp_free_i32(local_cc_op);
 663    }
 664    if (dummy) {
 665        tcg_temp_free_i64(dummy);
 666    }
 667
 668    /* We now have cc in cc_op as constant */
 669    set_cc_static(s);
 670}
 671
 672static bool use_goto_tb(DisasContext *s, uint64_t dest)
 673{
 674    if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
 675        return false;
 676    }
 677    return translator_use_goto_tb(&s->base, dest);
 678}
 679
 680static void account_noninline_branch(DisasContext *s, int cc_op)
 681{
 682#ifdef DEBUG_INLINE_BRANCHES
 683    inline_branch_miss[cc_op]++;
 684#endif
 685}
 686
 687static void account_inline_branch(DisasContext *s, int cc_op)
 688{
 689#ifdef DEBUG_INLINE_BRANCHES
 690    inline_branch_hit[cc_op]++;
 691#endif
 692}
 693
 694/* Table of mask values to comparison codes, given a comparison as input.
 695   For such, CC=3 should not be possible.  */
 696static const TCGCond ltgt_cond[16] = {
 697    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 698    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 699    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 700    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 701    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 702    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 703    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 704    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 705};
 706
 707/* Table of mask values to comparison codes, given a logic op as input.
 708   For such, only CC=0 and CC=1 should be possible.  */
 709static const TCGCond nz_cond[16] = {
 710    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 711    TCG_COND_NEVER, TCG_COND_NEVER,
 712    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 713    TCG_COND_NE, TCG_COND_NE,
 714    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 715    TCG_COND_EQ, TCG_COND_EQ,
 716    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 717    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 718};
 719
 720/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 721   details required to generate a TCG comparison.  */
 722static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 723{
 724    TCGCond cond;
 725    enum cc_op old_cc_op = s->cc_op;
 726
 727    if (mask == 15 || mask == 0) {
 728        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 729        c->u.s32.a = cc_op;
 730        c->u.s32.b = cc_op;
 731        c->g1 = c->g2 = true;
 732        c->is_64 = false;
 733        return;
 734    }
 735
 736    /* Find the TCG condition for the mask + cc op.  */
 737    switch (old_cc_op) {
 738    case CC_OP_LTGT0_32:
 739    case CC_OP_LTGT0_64:
 740    case CC_OP_LTGT_32:
 741    case CC_OP_LTGT_64:
 742        cond = ltgt_cond[mask];
 743        if (cond == TCG_COND_NEVER) {
 744            goto do_dynamic;
 745        }
 746        account_inline_branch(s, old_cc_op);
 747        break;
 748
 749    case CC_OP_LTUGTU_32:
 750    case CC_OP_LTUGTU_64:
 751        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 752        if (cond == TCG_COND_NEVER) {
 753            goto do_dynamic;
 754        }
 755        account_inline_branch(s, old_cc_op);
 756        break;
 757
 758    case CC_OP_NZ:
 759        cond = nz_cond[mask];
 760        if (cond == TCG_COND_NEVER) {
 761            goto do_dynamic;
 762        }
 763        account_inline_branch(s, old_cc_op);
 764        break;
 765
 766    case CC_OP_TM_32:
 767    case CC_OP_TM_64:
 768        switch (mask) {
 769        case 8:
 770            cond = TCG_COND_EQ;
 771            break;
 772        case 4 | 2 | 1:
 773            cond = TCG_COND_NE;
 774            break;
 775        default:
 776            goto do_dynamic;
 777        }
 778        account_inline_branch(s, old_cc_op);
 779        break;
 780
 781    case CC_OP_ICM:
 782        switch (mask) {
 783        case 8:
 784            cond = TCG_COND_EQ;
 785            break;
 786        case 4 | 2 | 1:
 787        case 4 | 2:
 788            cond = TCG_COND_NE;
 789            break;
 790        default:
 791            goto do_dynamic;
 792        }
 793        account_inline_branch(s, old_cc_op);
 794        break;
 795
 796    case CC_OP_FLOGR:
 797        switch (mask & 0xa) {
 798        case 8: /* src == 0 -> no one bit found */
 799            cond = TCG_COND_EQ;
 800            break;
 801        case 2: /* src != 0 -> one bit found */
 802            cond = TCG_COND_NE;
 803            break;
 804        default:
 805            goto do_dynamic;
 806        }
 807        account_inline_branch(s, old_cc_op);
 808        break;
 809
 810    case CC_OP_ADDU:
 811    case CC_OP_SUBU:
 812        switch (mask) {
 813        case 8 | 2: /* result == 0 */
 814            cond = TCG_COND_EQ;
 815            break;
 816        case 4 | 1: /* result != 0 */
 817            cond = TCG_COND_NE;
 818            break;
 819        case 8 | 4: /* !carry (borrow) */
 820            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
 821            break;
 822        case 2 | 1: /* carry (!borrow) */
 823            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
 824            break;
 825        default:
 826            goto do_dynamic;
 827        }
 828        account_inline_branch(s, old_cc_op);
 829        break;
 830
 831    default:
 832    do_dynamic:
 833        /* Calculate cc value.  */
 834        gen_op_calc_cc(s);
 835        /* FALLTHRU */
 836
 837    case CC_OP_STATIC:
 838        /* Jump based on CC.  We'll load up the real cond below;
 839           the assignment here merely avoids a compiler warning.  */
 840        account_noninline_branch(s, old_cc_op);
 841        old_cc_op = CC_OP_STATIC;
 842        cond = TCG_COND_NEVER;
 843        break;
 844    }
 845
 846    /* Load up the arguments of the comparison.  */
 847    c->is_64 = true;
 848    c->g1 = c->g2 = false;
 849    switch (old_cc_op) {
 850    case CC_OP_LTGT0_32:
 851        c->is_64 = false;
 852        c->u.s32.a = tcg_temp_new_i32();
 853        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 854        c->u.s32.b = tcg_const_i32(0);
 855        break;
 856    case CC_OP_LTGT_32:
 857    case CC_OP_LTUGTU_32:
 858        c->is_64 = false;
 859        c->u.s32.a = tcg_temp_new_i32();
 860        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 861        c->u.s32.b = tcg_temp_new_i32();
 862        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 863        break;
 864
 865    case CC_OP_LTGT0_64:
 866    case CC_OP_NZ:
 867    case CC_OP_FLOGR:
 868        c->u.s64.a = cc_dst;
 869        c->u.s64.b = tcg_const_i64(0);
 870        c->g1 = true;
 871        break;
 872    case CC_OP_LTGT_64:
 873    case CC_OP_LTUGTU_64:
 874        c->u.s64.a = cc_src;
 875        c->u.s64.b = cc_dst;
 876        c->g1 = c->g2 = true;
 877        break;
 878
 879    case CC_OP_TM_32:
 880    case CC_OP_TM_64:
 881    case CC_OP_ICM:
 882        c->u.s64.a = tcg_temp_new_i64();
 883        c->u.s64.b = tcg_const_i64(0);
 884        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 885        break;
 886
 887    case CC_OP_ADDU:
 888    case CC_OP_SUBU:
 889        c->is_64 = true;
 890        c->u.s64.b = tcg_const_i64(0);
 891        c->g1 = true;
 892        switch (mask) {
 893        case 8 | 2:
 894        case 4 | 1: /* result */
 895            c->u.s64.a = cc_dst;
 896            break;
 897        case 8 | 4:
 898        case 2 | 1: /* carry */
 899            c->u.s64.a = cc_src;
 900            break;
 901        default:
 902            g_assert_not_reached();
 903        }
 904        break;
 905
 906    case CC_OP_STATIC:
 907        c->is_64 = false;
 908        c->u.s32.a = cc_op;
 909        c->g1 = true;
 910        switch (mask) {
 911        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 912            cond = TCG_COND_NE;
 913            c->u.s32.b = tcg_const_i32(3);
 914            break;
 915        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 916            cond = TCG_COND_NE;
 917            c->u.s32.b = tcg_const_i32(2);
 918            break;
 919        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 920            cond = TCG_COND_NE;
 921            c->u.s32.b = tcg_const_i32(1);
 922            break;
 923        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 924            cond = TCG_COND_EQ;
 925            c->g1 = false;
 926            c->u.s32.a = tcg_temp_new_i32();
 927            c->u.s32.b = tcg_const_i32(0);
 928            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 929            break;
 930        case 0x8 | 0x4: /* cc < 2 */
 931            cond = TCG_COND_LTU;
 932            c->u.s32.b = tcg_const_i32(2);
 933            break;
 934        case 0x8: /* cc == 0 */
 935            cond = TCG_COND_EQ;
 936            c->u.s32.b = tcg_const_i32(0);
 937            break;
 938        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 939            cond = TCG_COND_NE;
 940            c->u.s32.b = tcg_const_i32(0);
 941            break;
 942        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 943            cond = TCG_COND_NE;
 944            c->g1 = false;
 945            c->u.s32.a = tcg_temp_new_i32();
 946            c->u.s32.b = tcg_const_i32(0);
 947            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 948            break;
 949        case 0x4: /* cc == 1 */
 950            cond = TCG_COND_EQ;
 951            c->u.s32.b = tcg_const_i32(1);
 952            break;
 953        case 0x2 | 0x1: /* cc > 1 */
 954            cond = TCG_COND_GTU;
 955            c->u.s32.b = tcg_const_i32(1);
 956            break;
 957        case 0x2: /* cc == 2 */
 958            cond = TCG_COND_EQ;
 959            c->u.s32.b = tcg_const_i32(2);
 960            break;
 961        case 0x1: /* cc == 3 */
 962            cond = TCG_COND_EQ;
 963            c->u.s32.b = tcg_const_i32(3);
 964            break;
 965        default:
 966            /* CC is masked by something else: (8 >> cc) & mask.  */
 967            cond = TCG_COND_NE;
 968            c->g1 = false;
 969            c->u.s32.a = tcg_const_i32(8);
 970            c->u.s32.b = tcg_const_i32(0);
 971            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 972            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 973            break;
 974        }
 975        break;
 976
 977    default:
 978        abort();
 979    }
 980    c->cond = cond;
 981}
 982
 983static void free_compare(DisasCompare *c)
 984{
 985    if (!c->g1) {
 986        if (c->is_64) {
 987            tcg_temp_free_i64(c->u.s64.a);
 988        } else {
 989            tcg_temp_free_i32(c->u.s32.a);
 990        }
 991    }
 992    if (!c->g2) {
 993        if (c->is_64) {
 994            tcg_temp_free_i64(c->u.s64.b);
 995        } else {
 996            tcg_temp_free_i32(c->u.s32.b);
 997        }
 998    }
 999}
1000
1001/* ====================================================================== */
1002/* Define the insn format enumeration.  */
1003#define F0(N)                         FMT_##N,
1004#define F1(N, X1)                     F0(N)
1005#define F2(N, X1, X2)                 F0(N)
1006#define F3(N, X1, X2, X3)             F0(N)
1007#define F4(N, X1, X2, X3, X4)         F0(N)
1008#define F5(N, X1, X2, X3, X4, X5)     F0(N)
1009#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1010
1011typedef enum {
1012#include "insn-format.def"
1013} DisasFormat;
1014
1015#undef F0
1016#undef F1
1017#undef F2
1018#undef F3
1019#undef F4
1020#undef F5
1021#undef F6
1022
1023/* This is the way fields are to be accessed out of DisasFields.  */
1024#define have_field(S, F)  have_field1((S), FLD_O_##F)
1025#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1026
1027static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1028{
1029    return (s->fields.presentO >> c) & 1;
1030}
1031
1032static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1033                      enum DisasFieldIndexC c)
1034{
1035    assert(have_field1(s, o));
1036    return s->fields.c[c];
1037}
1038
1039/* Describe the layout of each field in each format.  */
1040typedef struct DisasField {
1041    unsigned int beg:8;
1042    unsigned int size:8;
1043    unsigned int type:2;
1044    unsigned int indexC:6;
1045    enum DisasFieldIndexO indexO:8;
1046} DisasField;
1047
1048typedef struct DisasFormatInfo {
1049    DisasField op[NUM_C_FIELD];
1050} DisasFormatInfo;
1051
1052#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1053#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1054#define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1055#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1056                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1057#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1058                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1059                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1060#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1061                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1062#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1063                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1064                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1065#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1066#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1067
1068#define F0(N)                     { { } },
1069#define F1(N, X1)                 { { X1 } },
1070#define F2(N, X1, X2)             { { X1, X2 } },
1071#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1072#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1073#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1074#define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1075
1076static const DisasFormatInfo format_info[] = {
1077#include "insn-format.def"
1078};
1079
1080#undef F0
1081#undef F1
1082#undef F2
1083#undef F3
1084#undef F4
1085#undef F5
1086#undef F6
1087#undef R
1088#undef M
1089#undef V
1090#undef BD
1091#undef BXD
1092#undef BDL
1093#undef BXDL
1094#undef I
1095#undef L
1096
1097/* Generally, we'll extract operands into this structures, operate upon
1098   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1099   of routines below for more details.  */
1100typedef struct {
1101    bool g_out, g_out2, g_in1, g_in2;
1102    TCGv_i64 out, out2, in1, in2;
1103    TCGv_i64 addr1;
1104} DisasOps;
1105
1106/* Instructions can place constraints on their operands, raising specification
1107   exceptions if they are violated.  To make this easy to automate, each "in1",
1108   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1109   of the following, or 0.  To make this easy to document, we'll put the
1110   SPEC_<name> defines next to <name>.  */
1111
1112#define SPEC_r1_even    1
1113#define SPEC_r2_even    2
1114#define SPEC_r3_even    4
1115#define SPEC_r1_f128    8
1116#define SPEC_r2_f128    16
1117
1118/* Return values from translate_one, indicating the state of the TB.  */
1119
1120/* We are not using a goto_tb (for whatever reason), but have updated
1121   the PC (for whatever reason), so there's no need to do it again on
1122   exiting the TB.  */
1123#define DISAS_PC_UPDATED        DISAS_TARGET_0
1124
1125/* We have emitted one or more goto_tb.  No fixup required.  */
1126#define DISAS_GOTO_TB           DISAS_TARGET_1
1127
1128/* We have updated the PC and CC values.  */
1129#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1130
1131/* We are exiting the TB, but have neither emitted a goto_tb, nor
1132   updated the PC for the next instruction to be executed.  */
1133#define DISAS_PC_STALE          DISAS_TARGET_3
1134
1135/* We are exiting the TB to the main loop.  */
1136#define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1137
1138
1139/* Instruction flags */
1140#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1141#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1142#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1143#define IF_BFP      0x0008      /* binary floating point instruction */
1144#define IF_DFP      0x0010      /* decimal floating point instruction */
1145#define IF_PRIV     0x0020      /* privileged instruction */
1146#define IF_VEC      0x0040      /* vector instruction */
1147#define IF_IO       0x0080      /* input/output instruction */
1148
1149struct DisasInsn {
1150    unsigned opc:16;
1151    unsigned flags:16;
1152    DisasFormat fmt:8;
1153    unsigned fac:8;
1154    unsigned spec:8;
1155
1156    const char *name;
1157
1158    /* Pre-process arguments before HELP_OP.  */
1159    void (*help_in1)(DisasContext *, DisasOps *);
1160    void (*help_in2)(DisasContext *, DisasOps *);
1161    void (*help_prep)(DisasContext *, DisasOps *);
1162
1163    /*
1164     * Post-process output after HELP_OP.
1165     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1166     */
1167    void (*help_wout)(DisasContext *, DisasOps *);
1168    void (*help_cout)(DisasContext *, DisasOps *);
1169
1170    /* Implement the operation itself.  */
1171    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1172
1173    uint64_t data;
1174};
1175
1176/* ====================================================================== */
1177/* Miscellaneous helpers, used by several operations.  */
1178
1179static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
1180{
1181    int b2 = get_field(s, b2);
1182    int d2 = get_field(s, d2);
1183
1184    if (b2 == 0) {
1185        o->in2 = tcg_const_i64(d2 & mask);
1186    } else {
1187        o->in2 = get_address(s, 0, b2, d2);
1188        tcg_gen_andi_i64(o->in2, o->in2, mask);
1189    }
1190}
1191
1192static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1193{
1194    if (dest == s->pc_tmp) {
1195        per_branch(s, true);
1196        return DISAS_NEXT;
1197    }
1198    if (use_goto_tb(s, dest)) {
1199        update_cc_op(s);
1200        per_breaking_event(s);
1201        tcg_gen_goto_tb(0);
1202        tcg_gen_movi_i64(psw_addr, dest);
1203        tcg_gen_exit_tb(s->base.tb, 0);
1204        return DISAS_GOTO_TB;
1205    } else {
1206        tcg_gen_movi_i64(psw_addr, dest);
1207        per_branch(s, false);
1208        return DISAS_PC_UPDATED;
1209    }
1210}
1211
1212static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1213                                 bool is_imm, int imm, TCGv_i64 cdest)
1214{
1215    DisasJumpType ret;
1216    uint64_t dest = s->base.pc_next + 2 * imm;
1217    TCGLabel *lab;
1218
1219    /* Take care of the special cases first.  */
1220    if (c->cond == TCG_COND_NEVER) {
1221        ret = DISAS_NEXT;
1222        goto egress;
1223    }
1224    if (is_imm) {
1225        if (dest == s->pc_tmp) {
1226            /* Branch to next.  */
1227            per_branch(s, true);
1228            ret = DISAS_NEXT;
1229            goto egress;
1230        }
1231        if (c->cond == TCG_COND_ALWAYS) {
1232            ret = help_goto_direct(s, dest);
1233            goto egress;
1234        }
1235    } else {
1236        if (!cdest) {
1237            /* E.g. bcr %r0 -> no branch.  */
1238            ret = DISAS_NEXT;
1239            goto egress;
1240        }
1241        if (c->cond == TCG_COND_ALWAYS) {
1242            tcg_gen_mov_i64(psw_addr, cdest);
1243            per_branch(s, false);
1244            ret = DISAS_PC_UPDATED;
1245            goto egress;
1246        }
1247    }
1248
1249    if (use_goto_tb(s, s->pc_tmp)) {
1250        if (is_imm && use_goto_tb(s, dest)) {
1251            /* Both exits can use goto_tb.  */
1252            update_cc_op(s);
1253
1254            lab = gen_new_label();
1255            if (c->is_64) {
1256                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1257            } else {
1258                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1259            }
1260
1261            /* Branch not taken.  */
1262            tcg_gen_goto_tb(0);
1263            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1264            tcg_gen_exit_tb(s->base.tb, 0);
1265
1266            /* Branch taken.  */
1267            gen_set_label(lab);
1268            per_breaking_event(s);
1269            tcg_gen_goto_tb(1);
1270            tcg_gen_movi_i64(psw_addr, dest);
1271            tcg_gen_exit_tb(s->base.tb, 1);
1272
1273            ret = DISAS_GOTO_TB;
1274        } else {
1275            /* Fallthru can use goto_tb, but taken branch cannot.  */
1276            /* Store taken branch destination before the brcond.  This
1277               avoids having to allocate a new local temp to hold it.
1278               We'll overwrite this in the not taken case anyway.  */
1279            if (!is_imm) {
1280                tcg_gen_mov_i64(psw_addr, cdest);
1281            }
1282
1283            lab = gen_new_label();
1284            if (c->is_64) {
1285                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1286            } else {
1287                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1288            }
1289
1290            /* Branch not taken.  */
1291            update_cc_op(s);
1292            tcg_gen_goto_tb(0);
1293            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1294            tcg_gen_exit_tb(s->base.tb, 0);
1295
1296            gen_set_label(lab);
1297            if (is_imm) {
1298                tcg_gen_movi_i64(psw_addr, dest);
1299            }
1300            per_breaking_event(s);
1301            ret = DISAS_PC_UPDATED;
1302        }
1303    } else {
1304        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1305           Most commonly we're single-stepping or some other condition that
1306           disables all use of goto_tb.  Just update the PC and exit.  */
1307
1308        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1309        if (is_imm) {
1310            cdest = tcg_const_i64(dest);
1311        }
1312
1313        if (c->is_64) {
1314            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1315                                cdest, next);
1316            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1317        } else {
1318            TCGv_i32 t0 = tcg_temp_new_i32();
1319            TCGv_i64 t1 = tcg_temp_new_i64();
1320            TCGv_i64 z = tcg_const_i64(0);
1321            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1322            tcg_gen_extu_i32_i64(t1, t0);
1323            tcg_temp_free_i32(t0);
1324            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1325            per_branch_cond(s, TCG_COND_NE, t1, z);
1326            tcg_temp_free_i64(t1);
1327            tcg_temp_free_i64(z);
1328        }
1329
1330        if (is_imm) {
1331            tcg_temp_free_i64(cdest);
1332        }
1333        tcg_temp_free_i64(next);
1334
1335        ret = DISAS_PC_UPDATED;
1336    }
1337
1338 egress:
1339    free_compare(c);
1340    return ret;
1341}
1342
1343/* ====================================================================== */
1344/* The operations.  These perform the bulk of the work for any insn,
1345   usually after the operands have been loaded and output initialized.  */
1346
1347static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1348{
1349    tcg_gen_abs_i64(o->out, o->in2);
1350    return DISAS_NEXT;
1351}
1352
1353static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1354{
1355    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1356    return DISAS_NEXT;
1357}
1358
1359static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1360{
1361    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1362    return DISAS_NEXT;
1363}
1364
1365static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1366{
1367    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1368    tcg_gen_mov_i64(o->out2, o->in2);
1369    return DISAS_NEXT;
1370}
1371
1372static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1373{
1374    tcg_gen_add_i64(o->out, o->in1, o->in2);
1375    return DISAS_NEXT;
1376}
1377
1378static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1379{
1380    tcg_gen_movi_i64(cc_src, 0);
1381    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1382    return DISAS_NEXT;
1383}
1384
1385/* Compute carry into cc_src. */
1386static void compute_carry(DisasContext *s)
1387{
1388    switch (s->cc_op) {
1389    case CC_OP_ADDU:
1390        /* The carry value is already in cc_src (1,0). */
1391        break;
1392    case CC_OP_SUBU:
1393        tcg_gen_addi_i64(cc_src, cc_src, 1);
1394        break;
1395    default:
1396        gen_op_calc_cc(s);
1397        /* fall through */
1398    case CC_OP_STATIC:
1399        /* The carry flag is the msb of CC; compute into cc_src. */
1400        tcg_gen_extu_i32_i64(cc_src, cc_op);
1401        tcg_gen_shri_i64(cc_src, cc_src, 1);
1402        break;
1403    }
1404}
1405
1406static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1407{
1408    compute_carry(s);
1409    tcg_gen_add_i64(o->out, o->in1, o->in2);
1410    tcg_gen_add_i64(o->out, o->out, cc_src);
1411    return DISAS_NEXT;
1412}
1413
1414static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1415{
1416    compute_carry(s);
1417
1418    TCGv_i64 zero = tcg_const_i64(0);
1419    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1420    tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1421    tcg_temp_free_i64(zero);
1422
1423    return DISAS_NEXT;
1424}
1425
1426static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1427{
1428    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1429
1430    o->in1 = tcg_temp_new_i64();
1431    if (non_atomic) {
1432        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1433    } else {
1434        /* Perform the atomic addition in memory. */
1435        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1436                                     s->insn->data);
1437    }
1438
1439    /* Recompute also for atomic case: needed for setting CC. */
1440    tcg_gen_add_i64(o->out, o->in1, o->in2);
1441
1442    if (non_atomic) {
1443        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1444    }
1445    return DISAS_NEXT;
1446}
1447
1448static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1449{
1450    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1451
1452    o->in1 = tcg_temp_new_i64();
1453    if (non_atomic) {
1454        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1455    } else {
1456        /* Perform the atomic addition in memory. */
1457        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1458                                     s->insn->data);
1459    }
1460
1461    /* Recompute also for atomic case: needed for setting CC. */
1462    tcg_gen_movi_i64(cc_src, 0);
1463    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1464
1465    if (non_atomic) {
1466        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1467    }
1468    return DISAS_NEXT;
1469}
1470
1471static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1472{
1473    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1474    return DISAS_NEXT;
1475}
1476
1477static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1478{
1479    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1480    return DISAS_NEXT;
1481}
1482
1483static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1484{
1485    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1486    return_low128(o->out2);
1487    return DISAS_NEXT;
1488}
1489
1490static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1491{
1492    tcg_gen_and_i64(o->out, o->in1, o->in2);
1493    return DISAS_NEXT;
1494}
1495
1496static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1497{
1498    int shift = s->insn->data & 0xff;
1499    int size = s->insn->data >> 8;
1500    uint64_t mask = ((1ull << size) - 1) << shift;
1501
1502    assert(!o->g_in2);
1503    tcg_gen_shli_i64(o->in2, o->in2, shift);
1504    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1505    tcg_gen_and_i64(o->out, o->in1, o->in2);
1506
1507    /* Produce the CC from only the bits manipulated.  */
1508    tcg_gen_andi_i64(cc_dst, o->out, mask);
1509    set_cc_nz_u64(s, cc_dst);
1510    return DISAS_NEXT;
1511}
1512
1513static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1514{
1515    o->in1 = tcg_temp_new_i64();
1516
1517    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1518        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1519    } else {
1520        /* Perform the atomic operation in memory. */
1521        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1522                                     s->insn->data);
1523    }
1524
1525    /* Recompute also for atomic case: needed for setting CC. */
1526    tcg_gen_and_i64(o->out, o->in1, o->in2);
1527
1528    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1529        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1530    }
1531    return DISAS_NEXT;
1532}
1533
1534static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1535{
1536    pc_to_link_info(o->out, s, s->pc_tmp);
1537    if (o->in2) {
1538        tcg_gen_mov_i64(psw_addr, o->in2);
1539        per_branch(s, false);
1540        return DISAS_PC_UPDATED;
1541    } else {
1542        return DISAS_NEXT;
1543    }
1544}
1545
1546static void save_link_info(DisasContext *s, DisasOps *o)
1547{
1548    TCGv_i64 t;
1549
1550    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1551        pc_to_link_info(o->out, s, s->pc_tmp);
1552        return;
1553    }
1554    gen_op_calc_cc(s);
1555    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1556    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1557    t = tcg_temp_new_i64();
1558    tcg_gen_shri_i64(t, psw_mask, 16);
1559    tcg_gen_andi_i64(t, t, 0x0f000000);
1560    tcg_gen_or_i64(o->out, o->out, t);
1561    tcg_gen_extu_i32_i64(t, cc_op);
1562    tcg_gen_shli_i64(t, t, 28);
1563    tcg_gen_or_i64(o->out, o->out, t);
1564    tcg_temp_free_i64(t);
1565}
1566
1567static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1568{
1569    save_link_info(s, o);
1570    if (o->in2) {
1571        tcg_gen_mov_i64(psw_addr, o->in2);
1572        per_branch(s, false);
1573        return DISAS_PC_UPDATED;
1574    } else {
1575        return DISAS_NEXT;
1576    }
1577}
1578
1579static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1580{
1581    pc_to_link_info(o->out, s, s->pc_tmp);
1582    return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
1583}
1584
1585static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1586{
1587    int m1 = get_field(s, m1);
1588    bool is_imm = have_field(s, i2);
1589    int imm = is_imm ? get_field(s, i2) : 0;
1590    DisasCompare c;
1591
1592    /* BCR with R2 = 0 causes no branching */
1593    if (have_field(s, r2) && get_field(s, r2) == 0) {
1594        if (m1 == 14) {
1595            /* Perform serialization */
1596            /* FIXME: check for fast-BCR-serialization facility */
1597            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1598        }
1599        if (m1 == 15) {
1600            /* Perform serialization */
1601            /* FIXME: perform checkpoint-synchronisation */
1602            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1603        }
1604        return DISAS_NEXT;
1605    }
1606
1607    disas_jcc(s, &c, m1);
1608    return help_branch(s, &c, is_imm, imm, o->in2);
1609}
1610
1611static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1612{
1613    int r1 = get_field(s, r1);
1614    bool is_imm = have_field(s, i2);
1615    int imm = is_imm ? get_field(s, i2) : 0;
1616    DisasCompare c;
1617    TCGv_i64 t;
1618
1619    c.cond = TCG_COND_NE;
1620    c.is_64 = false;
1621    c.g1 = false;
1622    c.g2 = false;
1623
1624    t = tcg_temp_new_i64();
1625    tcg_gen_subi_i64(t, regs[r1], 1);
1626    store_reg32_i64(r1, t);
1627    c.u.s32.a = tcg_temp_new_i32();
1628    c.u.s32.b = tcg_const_i32(0);
1629    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1630    tcg_temp_free_i64(t);
1631
1632    return help_branch(s, &c, is_imm, imm, o->in2);
1633}
1634
1635static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1636{
1637    int r1 = get_field(s, r1);
1638    int imm = get_field(s, i2);
1639    DisasCompare c;
1640    TCGv_i64 t;
1641
1642    c.cond = TCG_COND_NE;
1643    c.is_64 = false;
1644    c.g1 = false;
1645    c.g2 = false;
1646
1647    t = tcg_temp_new_i64();
1648    tcg_gen_shri_i64(t, regs[r1], 32);
1649    tcg_gen_subi_i64(t, t, 1);
1650    store_reg32h_i64(r1, t);
1651    c.u.s32.a = tcg_temp_new_i32();
1652    c.u.s32.b = tcg_const_i32(0);
1653    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1654    tcg_temp_free_i64(t);
1655
1656    return help_branch(s, &c, 1, imm, o->in2);
1657}
1658
1659static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1660{
1661    int r1 = get_field(s, r1);
1662    bool is_imm = have_field(s, i2);
1663    int imm = is_imm ? get_field(s, i2) : 0;
1664    DisasCompare c;
1665
1666    c.cond = TCG_COND_NE;
1667    c.is_64 = true;
1668    c.g1 = true;
1669    c.g2 = false;
1670
1671    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1672    c.u.s64.a = regs[r1];
1673    c.u.s64.b = tcg_const_i64(0);
1674
1675    return help_branch(s, &c, is_imm, imm, o->in2);
1676}
1677
1678static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1679{
1680    int r1 = get_field(s, r1);
1681    int r3 = get_field(s, r3);
1682    bool is_imm = have_field(s, i2);
1683    int imm = is_imm ? get_field(s, i2) : 0;
1684    DisasCompare c;
1685    TCGv_i64 t;
1686
1687    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1688    c.is_64 = false;
1689    c.g1 = false;
1690    c.g2 = false;
1691
1692    t = tcg_temp_new_i64();
1693    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1694    c.u.s32.a = tcg_temp_new_i32();
1695    c.u.s32.b = tcg_temp_new_i32();
1696    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1697    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1698    store_reg32_i64(r1, t);
1699    tcg_temp_free_i64(t);
1700
1701    return help_branch(s, &c, is_imm, imm, o->in2);
1702}
1703
1704static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1705{
1706    int r1 = get_field(s, r1);
1707    int r3 = get_field(s, r3);
1708    bool is_imm = have_field(s, i2);
1709    int imm = is_imm ? get_field(s, i2) : 0;
1710    DisasCompare c;
1711
1712    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1713    c.is_64 = true;
1714
1715    if (r1 == (r3 | 1)) {
1716        c.u.s64.b = load_reg(r3 | 1);
1717        c.g2 = false;
1718    } else {
1719        c.u.s64.b = regs[r3 | 1];
1720        c.g2 = true;
1721    }
1722
1723    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1724    c.u.s64.a = regs[r1];
1725    c.g1 = true;
1726
1727    return help_branch(s, &c, is_imm, imm, o->in2);
1728}
1729
1730static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1731{
1732    int imm, m3 = get_field(s, m3);
1733    bool is_imm;
1734    DisasCompare c;
1735
1736    c.cond = ltgt_cond[m3];
1737    if (s->insn->data) {
1738        c.cond = tcg_unsigned_cond(c.cond);
1739    }
1740    c.is_64 = c.g1 = c.g2 = true;
1741    c.u.s64.a = o->in1;
1742    c.u.s64.b = o->in2;
1743
1744    is_imm = have_field(s, i4);
1745    if (is_imm) {
1746        imm = get_field(s, i4);
1747    } else {
1748        imm = 0;
1749        o->out = get_address(s, 0, get_field(s, b4),
1750                             get_field(s, d4));
1751    }
1752
1753    return help_branch(s, &c, is_imm, imm, o->out);
1754}
1755
1756static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1757{
1758    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1759    set_cc_static(s);
1760    return DISAS_NEXT;
1761}
1762
1763static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1764{
1765    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1766    set_cc_static(s);
1767    return DISAS_NEXT;
1768}
1769
1770static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1771{
1772    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1773    set_cc_static(s);
1774    return DISAS_NEXT;
1775}
1776
1777static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1778                                   bool m4_with_fpe)
1779{
1780    const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1781    uint8_t m3 = get_field(s, m3);
1782    uint8_t m4 = get_field(s, m4);
1783
1784    /* m3 field was introduced with FPE */
1785    if (!fpe && m3_with_fpe) {
1786        m3 = 0;
1787    }
1788    /* m4 field was introduced with FPE */
1789    if (!fpe && m4_with_fpe) {
1790        m4 = 0;
1791    }
1792
1793    /* Check for valid rounding modes. Mode 3 was introduced later. */
1794    if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1795        gen_program_exception(s, PGM_SPECIFICATION);
1796        return NULL;
1797    }
1798
1799    return tcg_const_i32(deposit32(m3, 4, 4, m4));
1800}
1801
1802static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1803{
1804    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1805
1806    if (!m34) {
1807        return DISAS_NORETURN;
1808    }
1809    gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1810    tcg_temp_free_i32(m34);
1811    set_cc_static(s);
1812    return DISAS_NEXT;
1813}
1814
1815static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1816{
1817    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1818
1819    if (!m34) {
1820        return DISAS_NORETURN;
1821    }
1822    gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1823    tcg_temp_free_i32(m34);
1824    set_cc_static(s);
1825    return DISAS_NEXT;
1826}
1827
1828static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1829{
1830    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1831
1832    if (!m34) {
1833        return DISAS_NORETURN;
1834    }
1835    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1836    tcg_temp_free_i32(m34);
1837    set_cc_static(s);
1838    return DISAS_NEXT;
1839}
1840
1841static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1842{
1843    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844
1845    if (!m34) {
1846        return DISAS_NORETURN;
1847    }
1848    gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1849    tcg_temp_free_i32(m34);
1850    set_cc_static(s);
1851    return DISAS_NEXT;
1852}
1853
1854static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1855{
1856    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1857
1858    if (!m34) {
1859        return DISAS_NORETURN;
1860    }
1861    gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1862    tcg_temp_free_i32(m34);
1863    set_cc_static(s);
1864    return DISAS_NEXT;
1865}
1866
1867static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1868{
1869    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1870
1871    if (!m34) {
1872        return DISAS_NORETURN;
1873    }
1874    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1875    tcg_temp_free_i32(m34);
1876    set_cc_static(s);
1877    return DISAS_NEXT;
1878}
1879
1880static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1881{
1882    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1883
1884    if (!m34) {
1885        return DISAS_NORETURN;
1886    }
1887    gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1888    tcg_temp_free_i32(m34);
1889    set_cc_static(s);
1890    return DISAS_NEXT;
1891}
1892
1893static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1894{
1895    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1896
1897    if (!m34) {
1898        return DISAS_NORETURN;
1899    }
1900    gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1901    tcg_temp_free_i32(m34);
1902    set_cc_static(s);
1903    return DISAS_NEXT;
1904}
1905
1906static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1907{
1908    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1909
1910    if (!m34) {
1911        return DISAS_NORETURN;
1912    }
1913    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1914    tcg_temp_free_i32(m34);
1915    set_cc_static(s);
1916    return DISAS_NEXT;
1917}
1918
1919static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1920{
1921    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1922
1923    if (!m34) {
1924        return DISAS_NORETURN;
1925    }
1926    gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1927    tcg_temp_free_i32(m34);
1928    set_cc_static(s);
1929    return DISAS_NEXT;
1930}
1931
1932static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1933{
1934    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1935
1936    if (!m34) {
1937        return DISAS_NORETURN;
1938    }
1939    gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1940    tcg_temp_free_i32(m34);
1941    set_cc_static(s);
1942    return DISAS_NEXT;
1943}
1944
1945static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1946{
1947    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1948
1949    if (!m34) {
1950        return DISAS_NORETURN;
1951    }
1952    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1953    tcg_temp_free_i32(m34);
1954    set_cc_static(s);
1955    return DISAS_NEXT;
1956}
1957
1958static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1959{
1960    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1961
1962    if (!m34) {
1963        return DISAS_NORETURN;
1964    }
1965    gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1966    tcg_temp_free_i32(m34);
1967    return DISAS_NEXT;
1968}
1969
1970static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1971{
1972    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1973
1974    if (!m34) {
1975        return DISAS_NORETURN;
1976    }
1977    gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1978    tcg_temp_free_i32(m34);
1979    return DISAS_NEXT;
1980}
1981
1982static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1983{
1984    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1985
1986    if (!m34) {
1987        return DISAS_NORETURN;
1988    }
1989    gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
1990    tcg_temp_free_i32(m34);
1991    return_low128(o->out2);
1992    return DISAS_NEXT;
1993}
1994
1995static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1996{
1997    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1998
1999    if (!m34) {
2000        return DISAS_NORETURN;
2001    }
2002    gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2003    tcg_temp_free_i32(m34);
2004    return DISAS_NEXT;
2005}
2006
2007static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2008{
2009    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2010
2011    if (!m34) {
2012        return DISAS_NORETURN;
2013    }
2014    gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2015    tcg_temp_free_i32(m34);
2016    return DISAS_NEXT;
2017}
2018
2019static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2020{
2021    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2022
2023    if (!m34) {
2024        return DISAS_NORETURN;
2025    }
2026    gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2027    tcg_temp_free_i32(m34);
2028    return_low128(o->out2);
2029    return DISAS_NEXT;
2030}
2031
2032static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2033{
2034    int r2 = get_field(s, r2);
2035    TCGv_i64 len = tcg_temp_new_i64();
2036
2037    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2038    set_cc_static(s);
2039    return_low128(o->out);
2040
2041    tcg_gen_add_i64(regs[r2], regs[r2], len);
2042    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2043    tcg_temp_free_i64(len);
2044
2045    return DISAS_NEXT;
2046}
2047
2048static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2049{
2050    int l = get_field(s, l1);
2051    TCGv_i32 vl;
2052
2053    switch (l + 1) {
2054    case 1:
2055        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2056        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2057        break;
2058    case 2:
2059        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2060        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2061        break;
2062    case 4:
2063        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2064        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2065        break;
2066    case 8:
2067        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2068        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2069        break;
2070    default:
2071        vl = tcg_const_i32(l);
2072        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2073        tcg_temp_free_i32(vl);
2074        set_cc_static(s);
2075        return DISAS_NEXT;
2076    }
2077    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2078    return DISAS_NEXT;
2079}
2080
2081static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2082{
2083    int r1 = get_field(s, r1);
2084    int r2 = get_field(s, r2);
2085    TCGv_i32 t1, t2;
2086
2087    /* r1 and r2 must be even.  */
2088    if (r1 & 1 || r2 & 1) {
2089        gen_program_exception(s, PGM_SPECIFICATION);
2090        return DISAS_NORETURN;
2091    }
2092
2093    t1 = tcg_const_i32(r1);
2094    t2 = tcg_const_i32(r2);
2095    gen_helper_clcl(cc_op, cpu_env, t1, t2);
2096    tcg_temp_free_i32(t1);
2097    tcg_temp_free_i32(t2);
2098    set_cc_static(s);
2099    return DISAS_NEXT;
2100}
2101
2102static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2103{
2104    int r1 = get_field(s, r1);
2105    int r3 = get_field(s, r3);
2106    TCGv_i32 t1, t3;
2107
2108    /* r1 and r3 must be even.  */
2109    if (r1 & 1 || r3 & 1) {
2110        gen_program_exception(s, PGM_SPECIFICATION);
2111        return DISAS_NORETURN;
2112    }
2113
2114    t1 = tcg_const_i32(r1);
2115    t3 = tcg_const_i32(r3);
2116    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2117    tcg_temp_free_i32(t1);
2118    tcg_temp_free_i32(t3);
2119    set_cc_static(s);
2120    return DISAS_NEXT;
2121}
2122
2123static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2124{
2125    int r1 = get_field(s, r1);
2126    int r3 = get_field(s, r3);
2127    TCGv_i32 t1, t3;
2128
2129    /* r1 and r3 must be even.  */
2130    if (r1 & 1 || r3 & 1) {
2131        gen_program_exception(s, PGM_SPECIFICATION);
2132        return DISAS_NORETURN;
2133    }
2134
2135    t1 = tcg_const_i32(r1);
2136    t3 = tcg_const_i32(r3);
2137    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2138    tcg_temp_free_i32(t1);
2139    tcg_temp_free_i32(t3);
2140    set_cc_static(s);
2141    return DISAS_NEXT;
2142}
2143
2144static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2145{
2146    TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2147    TCGv_i32 t1 = tcg_temp_new_i32();
2148    tcg_gen_extrl_i64_i32(t1, o->in1);
2149    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2150    set_cc_static(s);
2151    tcg_temp_free_i32(t1);
2152    tcg_temp_free_i32(m3);
2153    return DISAS_NEXT;
2154}
2155
2156static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2157{
2158    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2159    set_cc_static(s);
2160    return_low128(o->in2);
2161    return DISAS_NEXT;
2162}
2163
2164static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2165{
2166    TCGv_i64 t = tcg_temp_new_i64();
2167    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2168    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2169    tcg_gen_or_i64(o->out, o->out, t);
2170    tcg_temp_free_i64(t);
2171    return DISAS_NEXT;
2172}
2173
2174static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2175{
2176    int d2 = get_field(s, d2);
2177    int b2 = get_field(s, b2);
2178    TCGv_i64 addr, cc;
2179
2180    /* Note that in1 = R3 (new value) and
2181       in2 = (zero-extended) R1 (expected value).  */
2182
2183    addr = get_address(s, 0, b2, d2);
2184    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2185                               get_mem_index(s), s->insn->data | MO_ALIGN);
2186    tcg_temp_free_i64(addr);
2187
2188    /* Are the memory and expected values (un)equal?  Note that this setcond
2189       produces the output CC value, thus the NE sense of the test.  */
2190    cc = tcg_temp_new_i64();
2191    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2192    tcg_gen_extrl_i64_i32(cc_op, cc);
2193    tcg_temp_free_i64(cc);
2194    set_cc_static(s);
2195
2196    return DISAS_NEXT;
2197}
2198
2199static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2200{
2201    int r1 = get_field(s, r1);
2202    int r3 = get_field(s, r3);
2203    int d2 = get_field(s, d2);
2204    int b2 = get_field(s, b2);
2205    DisasJumpType ret = DISAS_NEXT;
2206    TCGv_i64 addr;
2207    TCGv_i32 t_r1, t_r3;
2208
2209    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2210    addr = get_address(s, 0, b2, d2);
2211    t_r1 = tcg_const_i32(r1);
2212    t_r3 = tcg_const_i32(r3);
2213    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2214        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2215    } else if (HAVE_CMPXCHG128) {
2216        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2217    } else {
2218        gen_helper_exit_atomic(cpu_env);
2219        ret = DISAS_NORETURN;
2220    }
2221    tcg_temp_free_i64(addr);
2222    tcg_temp_free_i32(t_r1);
2223    tcg_temp_free_i32(t_r3);
2224
2225    set_cc_static(s);
2226    return ret;
2227}
2228
2229static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2230{
2231    int r3 = get_field(s, r3);
2232    TCGv_i32 t_r3 = tcg_const_i32(r3);
2233
2234    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2235        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2236    } else {
2237        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2238    }
2239    tcg_temp_free_i32(t_r3);
2240
2241    set_cc_static(s);
2242    return DISAS_NEXT;
2243}
2244
2245#ifndef CONFIG_USER_ONLY
2246static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2247{
2248    MemOp mop = s->insn->data;
2249    TCGv_i64 addr, old, cc;
2250    TCGLabel *lab = gen_new_label();
2251
2252    /* Note that in1 = R1 (zero-extended expected value),
2253       out = R1 (original reg), out2 = R1+1 (new value).  */
2254
2255    addr = tcg_temp_new_i64();
2256    old = tcg_temp_new_i64();
2257    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2258    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2259                               get_mem_index(s), mop | MO_ALIGN);
2260    tcg_temp_free_i64(addr);
2261
2262    /* Are the memory and expected values (un)equal?  */
2263    cc = tcg_temp_new_i64();
2264    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2265    tcg_gen_extrl_i64_i32(cc_op, cc);
2266
2267    /* Write back the output now, so that it happens before the
2268       following branch, so that we don't need local temps.  */
2269    if ((mop & MO_SIZE) == MO_32) {
2270        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2271    } else {
2272        tcg_gen_mov_i64(o->out, old);
2273    }
2274    tcg_temp_free_i64(old);
2275
2276    /* If the comparison was equal, and the LSB of R2 was set,
2277       then we need to flush the TLB (for all cpus).  */
2278    tcg_gen_xori_i64(cc, cc, 1);
2279    tcg_gen_and_i64(cc, cc, o->in2);
2280    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2281    tcg_temp_free_i64(cc);
2282
2283    gen_helper_purge(cpu_env);
2284    gen_set_label(lab);
2285
2286    return DISAS_NEXT;
2287}
2288#endif
2289
2290static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2291{
2292    TCGv_i64 t1 = tcg_temp_new_i64();
2293    TCGv_i32 t2 = tcg_temp_new_i32();
2294    tcg_gen_extrl_i64_i32(t2, o->in1);
2295    gen_helper_cvd(t1, t2);
2296    tcg_temp_free_i32(t2);
2297    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2298    tcg_temp_free_i64(t1);
2299    return DISAS_NEXT;
2300}
2301
2302static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2303{
2304    int m3 = get_field(s, m3);
2305    TCGLabel *lab = gen_new_label();
2306    TCGCond c;
2307
2308    c = tcg_invert_cond(ltgt_cond[m3]);
2309    if (s->insn->data) {
2310        c = tcg_unsigned_cond(c);
2311    }
2312    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2313
2314    /* Trap.  */
2315    gen_trap(s);
2316
2317    gen_set_label(lab);
2318    return DISAS_NEXT;
2319}
2320
2321static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2322{
2323    int m3 = get_field(s, m3);
2324    int r1 = get_field(s, r1);
2325    int r2 = get_field(s, r2);
2326    TCGv_i32 tr1, tr2, chk;
2327
2328    /* R1 and R2 must both be even.  */
2329    if ((r1 | r2) & 1) {
2330        gen_program_exception(s, PGM_SPECIFICATION);
2331        return DISAS_NORETURN;
2332    }
2333    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2334        m3 = 0;
2335    }
2336
2337    tr1 = tcg_const_i32(r1);
2338    tr2 = tcg_const_i32(r2);
2339    chk = tcg_const_i32(m3);
2340
2341    switch (s->insn->data) {
2342    case 12:
2343        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2344        break;
2345    case 14:
2346        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2347        break;
2348    case 21:
2349        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2350        break;
2351    case 24:
2352        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2353        break;
2354    case 41:
2355        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2356        break;
2357    case 42:
2358        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2359        break;
2360    default:
2361        g_assert_not_reached();
2362    }
2363
2364    tcg_temp_free_i32(tr1);
2365    tcg_temp_free_i32(tr2);
2366    tcg_temp_free_i32(chk);
2367    set_cc_static(s);
2368    return DISAS_NEXT;
2369}
2370
2371#ifndef CONFIG_USER_ONLY
2372static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2373{
2374    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2375    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2376    TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2377
2378    gen_helper_diag(cpu_env, r1, r3, func_code);
2379
2380    tcg_temp_free_i32(func_code);
2381    tcg_temp_free_i32(r3);
2382    tcg_temp_free_i32(r1);
2383    return DISAS_NEXT;
2384}
2385#endif
2386
2387static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2388{
2389    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2390    return_low128(o->out);
2391    return DISAS_NEXT;
2392}
2393
2394static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2395{
2396    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2397    return_low128(o->out);
2398    return DISAS_NEXT;
2399}
2400
2401static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2402{
2403    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2404    return_low128(o->out);
2405    return DISAS_NEXT;
2406}
2407
2408static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2409{
2410    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2411    return_low128(o->out);
2412    return DISAS_NEXT;
2413}
2414
2415static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2416{
2417    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2418    return DISAS_NEXT;
2419}
2420
2421static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2422{
2423    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2424    return DISAS_NEXT;
2425}
2426
2427static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2428{
2429    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2430    return_low128(o->out2);
2431    return DISAS_NEXT;
2432}
2433
2434static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2435{
2436    int r2 = get_field(s, r2);
2437    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2438    return DISAS_NEXT;
2439}
2440
2441static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2442{
2443    /* No cache information provided.  */
2444    tcg_gen_movi_i64(o->out, -1);
2445    return DISAS_NEXT;
2446}
2447
2448static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2449{
2450    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2451    return DISAS_NEXT;
2452}
2453
2454static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2455{
2456    int r1 = get_field(s, r1);
2457    int r2 = get_field(s, r2);
2458    TCGv_i64 t = tcg_temp_new_i64();
2459
2460    /* Note the "subsequently" in the PoO, which implies a defined result
2461       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2462    tcg_gen_shri_i64(t, psw_mask, 32);
2463    store_reg32_i64(r1, t);
2464    if (r2 != 0) {
2465        store_reg32_i64(r2, psw_mask);
2466    }
2467
2468    tcg_temp_free_i64(t);
2469    return DISAS_NEXT;
2470}
2471
2472static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2473{
2474    int r1 = get_field(s, r1);
2475    TCGv_i32 ilen;
2476    TCGv_i64 v1;
2477
2478    /* Nested EXECUTE is not allowed.  */
2479    if (unlikely(s->ex_value)) {
2480        gen_program_exception(s, PGM_EXECUTE);
2481        return DISAS_NORETURN;
2482    }
2483
2484    update_psw_addr(s);
2485    update_cc_op(s);
2486
2487    if (r1 == 0) {
2488        v1 = tcg_const_i64(0);
2489    } else {
2490        v1 = regs[r1];
2491    }
2492
2493    ilen = tcg_const_i32(s->ilen);
2494    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2495    tcg_temp_free_i32(ilen);
2496
2497    if (r1 == 0) {
2498        tcg_temp_free_i64(v1);
2499    }
2500
2501    return DISAS_PC_CC_UPDATED;
2502}
2503
2504static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2505{
2506    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2507
2508    if (!m34) {
2509        return DISAS_NORETURN;
2510    }
2511    gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2512    tcg_temp_free_i32(m34);
2513    return DISAS_NEXT;
2514}
2515
2516static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2517{
2518    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2519
2520    if (!m34) {
2521        return DISAS_NORETURN;
2522    }
2523    gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2524    tcg_temp_free_i32(m34);
2525    return DISAS_NEXT;
2526}
2527
2528static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2529{
2530    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2531
2532    if (!m34) {
2533        return DISAS_NORETURN;
2534    }
2535    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2536    return_low128(o->out2);
2537    tcg_temp_free_i32(m34);
2538    return DISAS_NEXT;
2539}
2540
2541static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2542{
2543    /* We'll use the original input for cc computation, since we get to
2544       compare that against 0, which ought to be better than comparing
2545       the real output against 64.  It also lets cc_dst be a convenient
2546       temporary during our computation.  */
2547    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2548
2549    /* R1 = IN ? CLZ(IN) : 64.  */
2550    tcg_gen_clzi_i64(o->out, o->in2, 64);
2551
2552    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2553       value by 64, which is undefined.  But since the shift is 64 iff the
2554       input is zero, we still get the correct result after and'ing.  */
2555    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2556    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2557    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2558    return DISAS_NEXT;
2559}
2560
2561static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2562{
2563    int m3 = get_field(s, m3);
2564    int pos, len, base = s->insn->data;
2565    TCGv_i64 tmp = tcg_temp_new_i64();
2566    uint64_t ccm;
2567
2568    switch (m3) {
2569    case 0xf:
2570        /* Effectively a 32-bit load.  */
2571        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2572        len = 32;
2573        goto one_insert;
2574
2575    case 0xc:
2576    case 0x6:
2577    case 0x3:
2578        /* Effectively a 16-bit load.  */
2579        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2580        len = 16;
2581        goto one_insert;
2582
2583    case 0x8:
2584    case 0x4:
2585    case 0x2:
2586    case 0x1:
2587        /* Effectively an 8-bit load.  */
2588        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2589        len = 8;
2590        goto one_insert;
2591
2592    one_insert:
2593        pos = base + ctz32(m3) * 8;
2594        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2595        ccm = ((1ull << len) - 1) << pos;
2596        break;
2597
2598    default:
2599        /* This is going to be a sequence of loads and inserts.  */
2600        pos = base + 32 - 8;
2601        ccm = 0;
2602        while (m3) {
2603            if (m3 & 0x8) {
2604                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2605                tcg_gen_addi_i64(o->in2, o->in2, 1);
2606                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2607                ccm |= 0xff << pos;
2608            }
2609            m3 = (m3 << 1) & 0xf;
2610            pos -= 8;
2611        }
2612        break;
2613    }
2614
2615    tcg_gen_movi_i64(tmp, ccm);
2616    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2617    tcg_temp_free_i64(tmp);
2618    return DISAS_NEXT;
2619}
2620
2621static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2622{
2623    int shift = s->insn->data & 0xff;
2624    int size = s->insn->data >> 8;
2625    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2626    return DISAS_NEXT;
2627}
2628
2629static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2630{
2631    TCGv_i64 t1, t2;
2632
2633    gen_op_calc_cc(s);
2634    t1 = tcg_temp_new_i64();
2635    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2636    t2 = tcg_temp_new_i64();
2637    tcg_gen_extu_i32_i64(t2, cc_op);
2638    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2639    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2640    tcg_temp_free_i64(t1);
2641    tcg_temp_free_i64(t2);
2642    return DISAS_NEXT;
2643}
2644
2645#ifndef CONFIG_USER_ONLY
2646static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2647{
2648    TCGv_i32 m4;
2649
2650    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2651        m4 = tcg_const_i32(get_field(s, m4));
2652    } else {
2653        m4 = tcg_const_i32(0);
2654    }
2655    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2656    tcg_temp_free_i32(m4);
2657    return DISAS_NEXT;
2658}
2659
2660static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2661{
2662    TCGv_i32 m4;
2663
2664    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2665        m4 = tcg_const_i32(get_field(s, m4));
2666    } else {
2667        m4 = tcg_const_i32(0);
2668    }
2669    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2670    tcg_temp_free_i32(m4);
2671    return DISAS_NEXT;
2672}
2673
2674static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2675{
2676    gen_helper_iske(o->out, cpu_env, o->in2);
2677    return DISAS_NEXT;
2678}
2679#endif
2680
2681static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2682{
2683    int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2684    int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2685    int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2686    TCGv_i32 t_r1, t_r2, t_r3, type;
2687
2688    switch (s->insn->data) {
2689    case S390_FEAT_TYPE_KMA:
2690        if (r3 == r1 || r3 == r2) {
2691            gen_program_exception(s, PGM_SPECIFICATION);
2692            return DISAS_NORETURN;
2693        }
2694        /* FALL THROUGH */
2695    case S390_FEAT_TYPE_KMCTR:
2696        if (r3 & 1 || !r3) {
2697            gen_program_exception(s, PGM_SPECIFICATION);
2698            return DISAS_NORETURN;
2699        }
2700        /* FALL THROUGH */
2701    case S390_FEAT_TYPE_PPNO:
2702    case S390_FEAT_TYPE_KMF:
2703    case S390_FEAT_TYPE_KMC:
2704    case S390_FEAT_TYPE_KMO:
2705    case S390_FEAT_TYPE_KM:
2706        if (r1 & 1 || !r1) {
2707            gen_program_exception(s, PGM_SPECIFICATION);
2708            return DISAS_NORETURN;
2709        }
2710        /* FALL THROUGH */
2711    case S390_FEAT_TYPE_KMAC:
2712    case S390_FEAT_TYPE_KIMD:
2713    case S390_FEAT_TYPE_KLMD:
2714        if (r2 & 1 || !r2) {
2715            gen_program_exception(s, PGM_SPECIFICATION);
2716            return DISAS_NORETURN;
2717        }
2718        /* FALL THROUGH */
2719    case S390_FEAT_TYPE_PCKMO:
2720    case S390_FEAT_TYPE_PCC:
2721        break;
2722    default:
2723        g_assert_not_reached();
2724    };
2725
2726    t_r1 = tcg_const_i32(r1);
2727    t_r2 = tcg_const_i32(r2);
2728    t_r3 = tcg_const_i32(r3);
2729    type = tcg_const_i32(s->insn->data);
2730    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2731    set_cc_static(s);
2732    tcg_temp_free_i32(t_r1);
2733    tcg_temp_free_i32(t_r2);
2734    tcg_temp_free_i32(t_r3);
2735    tcg_temp_free_i32(type);
2736    return DISAS_NEXT;
2737}
2738
2739static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2740{
2741    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2742    set_cc_static(s);
2743    return DISAS_NEXT;
2744}
2745
2746static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2747{
2748    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2749    set_cc_static(s);
2750    return DISAS_NEXT;
2751}
2752
2753static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2754{
2755    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2756    set_cc_static(s);
2757    return DISAS_NEXT;
2758}
2759
2760static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2761{
2762    /* The real output is indeed the original value in memory;
2763       recompute the addition for the computation of CC.  */
2764    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2765                                 s->insn->data | MO_ALIGN);
2766    /* However, we need to recompute the addition for setting CC.  */
2767    tcg_gen_add_i64(o->out, o->in1, o->in2);
2768    return DISAS_NEXT;
2769}
2770
2771static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2772{
2773    /* The real output is indeed the original value in memory;
2774       recompute the addition for the computation of CC.  */
2775    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2776                                 s->insn->data | MO_ALIGN);
2777    /* However, we need to recompute the operation for setting CC.  */
2778    tcg_gen_and_i64(o->out, o->in1, o->in2);
2779    return DISAS_NEXT;
2780}
2781
2782static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2783{
2784    /* The real output is indeed the original value in memory;
2785       recompute the addition for the computation of CC.  */
2786    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2787                                s->insn->data | MO_ALIGN);
2788    /* However, we need to recompute the operation for setting CC.  */
2789    tcg_gen_or_i64(o->out, o->in1, o->in2);
2790    return DISAS_NEXT;
2791}
2792
2793static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2794{
2795    /* The real output is indeed the original value in memory;
2796       recompute the addition for the computation of CC.  */
2797    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2798                                 s->insn->data | MO_ALIGN);
2799    /* However, we need to recompute the operation for setting CC.  */
2800    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2801    return DISAS_NEXT;
2802}
2803
2804static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2805{
2806    gen_helper_ldeb(o->out, cpu_env, o->in2);
2807    return DISAS_NEXT;
2808}
2809
2810static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2811{
2812    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2813
2814    if (!m34) {
2815        return DISAS_NORETURN;
2816    }
2817    gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2818    tcg_temp_free_i32(m34);
2819    return DISAS_NEXT;
2820}
2821
2822static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2823{
2824    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2825
2826    if (!m34) {
2827        return DISAS_NORETURN;
2828    }
2829    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2830    tcg_temp_free_i32(m34);
2831    return DISAS_NEXT;
2832}
2833
2834static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2835{
2836    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2837
2838    if (!m34) {
2839        return DISAS_NORETURN;
2840    }
2841    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2842    tcg_temp_free_i32(m34);
2843    return DISAS_NEXT;
2844}
2845
2846static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2847{
2848    gen_helper_lxdb(o->out, cpu_env, o->in2);
2849    return_low128(o->out2);
2850    return DISAS_NEXT;
2851}
2852
2853static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2854{
2855    gen_helper_lxeb(o->out, cpu_env, o->in2);
2856    return_low128(o->out2);
2857    return DISAS_NEXT;
2858}
2859
2860static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2861{
2862    tcg_gen_shli_i64(o->out, o->in2, 32);
2863    return DISAS_NEXT;
2864}
2865
2866static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2867{
2868    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2869    return DISAS_NEXT;
2870}
2871
2872static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2873{
2874    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2875    return DISAS_NEXT;
2876}
2877
2878static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2879{
2880    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2881    return DISAS_NEXT;
2882}
2883
2884static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2885{
2886    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2887    return DISAS_NEXT;
2888}
2889
2890static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2891{
2892    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2893    return DISAS_NEXT;
2894}
2895
2896static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2897{
2898    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2899    return DISAS_NEXT;
2900}
2901
2902static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2903{
2904    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2905    return DISAS_NEXT;
2906}
2907
2908static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2909{
2910    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2911    return DISAS_NEXT;
2912}
2913
2914static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2915{
2916    TCGLabel *lab = gen_new_label();
2917    store_reg32_i64(get_field(s, r1), o->in2);
2918    /* The value is stored even in case of trap. */
2919    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2920    gen_trap(s);
2921    gen_set_label(lab);
2922    return DISAS_NEXT;
2923}
2924
2925static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2926{
2927    TCGLabel *lab = gen_new_label();
2928    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2929    /* The value is stored even in case of trap. */
2930    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2931    gen_trap(s);
2932    gen_set_label(lab);
2933    return DISAS_NEXT;
2934}
2935
2936static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2937{
2938    TCGLabel *lab = gen_new_label();
2939    store_reg32h_i64(get_field(s, r1), o->in2);
2940    /* The value is stored even in case of trap. */
2941    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2942    gen_trap(s);
2943    gen_set_label(lab);
2944    return DISAS_NEXT;
2945}
2946
2947static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2948{
2949    TCGLabel *lab = gen_new_label();
2950    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2951    /* The value is stored even in case of trap. */
2952    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2953    gen_trap(s);
2954    gen_set_label(lab);
2955    return DISAS_NEXT;
2956}
2957
2958static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2959{
2960    TCGLabel *lab = gen_new_label();
2961    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2962    /* The value is stored even in case of trap. */
2963    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2964    gen_trap(s);
2965    gen_set_label(lab);
2966    return DISAS_NEXT;
2967}
2968
2969static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2970{
2971    DisasCompare c;
2972
2973    disas_jcc(s, &c, get_field(s, m3));
2974
2975    if (c.is_64) {
2976        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2977                            o->in2, o->in1);
2978        free_compare(&c);
2979    } else {
2980        TCGv_i32 t32 = tcg_temp_new_i32();
2981        TCGv_i64 t, z;
2982
2983        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2984        free_compare(&c);
2985
2986        t = tcg_temp_new_i64();
2987        tcg_gen_extu_i32_i64(t, t32);
2988        tcg_temp_free_i32(t32);
2989
2990        z = tcg_const_i64(0);
2991        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2992        tcg_temp_free_i64(t);
2993        tcg_temp_free_i64(z);
2994    }
2995
2996    return DISAS_NEXT;
2997}
2998
2999#ifndef CONFIG_USER_ONLY
3000static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3001{
3002    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3003    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3004    gen_helper_lctl(cpu_env, r1, o->in2, r3);
3005    tcg_temp_free_i32(r1);
3006    tcg_temp_free_i32(r3);
3007    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3008    return DISAS_PC_STALE_NOCHAIN;
3009}
3010
3011static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3012{
3013    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3014    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3015    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3016    tcg_temp_free_i32(r1);
3017    tcg_temp_free_i32(r3);
3018    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3019    return DISAS_PC_STALE_NOCHAIN;
3020}
3021
3022static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3023{
3024    gen_helper_lra(o->out, cpu_env, o->in2);
3025    set_cc_static(s);
3026    return DISAS_NEXT;
3027}
3028
3029static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3030{
3031    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3032    return DISAS_NEXT;
3033}
3034
3035static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3036{
3037    TCGv_i64 t1, t2;
3038
3039    per_breaking_event(s);
3040
3041    t1 = tcg_temp_new_i64();
3042    t2 = tcg_temp_new_i64();
3043    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3044                        MO_TEUL | MO_ALIGN_8);
3045    tcg_gen_addi_i64(o->in2, o->in2, 4);
3046    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3047    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3048    tcg_gen_shli_i64(t1, t1, 32);
3049    gen_helper_load_psw(cpu_env, t1, t2);
3050    tcg_temp_free_i64(t1);
3051    tcg_temp_free_i64(t2);
3052    return DISAS_NORETURN;
3053}
3054
3055static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3056{
3057    TCGv_i64 t1, t2;
3058
3059    per_breaking_event(s);
3060
3061    t1 = tcg_temp_new_i64();
3062    t2 = tcg_temp_new_i64();
3063    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3064                        MO_TEQ | MO_ALIGN_8);
3065    tcg_gen_addi_i64(o->in2, o->in2, 8);
3066    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3067    gen_helper_load_psw(cpu_env, t1, t2);
3068    tcg_temp_free_i64(t1);
3069    tcg_temp_free_i64(t2);
3070    return DISAS_NORETURN;
3071}
3072#endif
3073
3074static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3075{
3076    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3077    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3078    gen_helper_lam(cpu_env, r1, o->in2, r3);
3079    tcg_temp_free_i32(r1);
3080    tcg_temp_free_i32(r3);
3081    return DISAS_NEXT;
3082}
3083
3084static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3085{
3086    int r1 = get_field(s, r1);
3087    int r3 = get_field(s, r3);
3088    TCGv_i64 t1, t2;
3089
3090    /* Only one register to read. */
3091    t1 = tcg_temp_new_i64();
3092    if (unlikely(r1 == r3)) {
3093        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3094        store_reg32_i64(r1, t1);
3095        tcg_temp_free(t1);
3096        return DISAS_NEXT;
3097    }
3098
3099    /* First load the values of the first and last registers to trigger
3100       possible page faults. */
3101    t2 = tcg_temp_new_i64();
3102    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3103    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3104    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3105    store_reg32_i64(r1, t1);
3106    store_reg32_i64(r3, t2);
3107
3108    /* Only two registers to read. */
3109    if (((r1 + 1) & 15) == r3) {
3110        tcg_temp_free(t2);
3111        tcg_temp_free(t1);
3112        return DISAS_NEXT;
3113    }
3114
3115    /* Then load the remaining registers. Page fault can't occur. */
3116    r3 = (r3 - 1) & 15;
3117    tcg_gen_movi_i64(t2, 4);
3118    while (r1 != r3) {
3119        r1 = (r1 + 1) & 15;
3120        tcg_gen_add_i64(o->in2, o->in2, t2);
3121        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3122        store_reg32_i64(r1, t1);
3123    }
3124    tcg_temp_free(t2);
3125    tcg_temp_free(t1);
3126
3127    return DISAS_NEXT;
3128}
3129
3130static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3131{
3132    int r1 = get_field(s, r1);
3133    int r3 = get_field(s, r3);
3134    TCGv_i64 t1, t2;
3135
3136    /* Only one register to read. */
3137    t1 = tcg_temp_new_i64();
3138    if (unlikely(r1 == r3)) {
3139        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3140        store_reg32h_i64(r1, t1);
3141        tcg_temp_free(t1);
3142        return DISAS_NEXT;
3143    }
3144
3145    /* First load the values of the first and last registers to trigger
3146       possible page faults. */
3147    t2 = tcg_temp_new_i64();
3148    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3149    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3150    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3151    store_reg32h_i64(r1, t1);
3152    store_reg32h_i64(r3, t2);
3153
3154    /* Only two registers to read. */
3155    if (((r1 + 1) & 15) == r3) {
3156        tcg_temp_free(t2);
3157        tcg_temp_free(t1);
3158        return DISAS_NEXT;
3159    }
3160
3161    /* Then load the remaining registers. Page fault can't occur. */
3162    r3 = (r3 - 1) & 15;
3163    tcg_gen_movi_i64(t2, 4);
3164    while (r1 != r3) {
3165        r1 = (r1 + 1) & 15;
3166        tcg_gen_add_i64(o->in2, o->in2, t2);
3167        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3168        store_reg32h_i64(r1, t1);
3169    }
3170    tcg_temp_free(t2);
3171    tcg_temp_free(t1);
3172
3173    return DISAS_NEXT;
3174}
3175
3176static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3177{
3178    int r1 = get_field(s, r1);
3179    int r3 = get_field(s, r3);
3180    TCGv_i64 t1, t2;
3181
3182    /* Only one register to read. */
3183    if (unlikely(r1 == r3)) {
3184        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3185        return DISAS_NEXT;
3186    }
3187
3188    /* First load the values of the first and last registers to trigger
3189       possible page faults. */
3190    t1 = tcg_temp_new_i64();
3191    t2 = tcg_temp_new_i64();
3192    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3193    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3194    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3195    tcg_gen_mov_i64(regs[r1], t1);
3196    tcg_temp_free(t2);
3197
3198    /* Only two registers to read. */
3199    if (((r1 + 1) & 15) == r3) {
3200        tcg_temp_free(t1);
3201        return DISAS_NEXT;
3202    }
3203
3204    /* Then load the remaining registers. Page fault can't occur. */
3205    r3 = (r3 - 1) & 15;
3206    tcg_gen_movi_i64(t1, 8);
3207    while (r1 != r3) {
3208        r1 = (r1 + 1) & 15;
3209        tcg_gen_add_i64(o->in2, o->in2, t1);
3210        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3211    }
3212    tcg_temp_free(t1);
3213
3214    return DISAS_NEXT;
3215}
3216
3217static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3218{
3219    TCGv_i64 a1, a2;
3220    MemOp mop = s->insn->data;
3221
3222    /* In a parallel context, stop the world and single step.  */
3223    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3224        update_psw_addr(s);
3225        update_cc_op(s);
3226        gen_exception(EXCP_ATOMIC);
3227        return DISAS_NORETURN;
3228    }
3229
3230    /* In a serial context, perform the two loads ... */
3231    a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3232    a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3233    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3234    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3235    tcg_temp_free_i64(a1);
3236    tcg_temp_free_i64(a2);
3237
3238    /* ... and indicate that we performed them while interlocked.  */
3239    gen_op_movi_cc(s, 0);
3240    return DISAS_NEXT;
3241}
3242
3243static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3244{
3245    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3246        gen_helper_lpq(o->out, cpu_env, o->in2);
3247    } else if (HAVE_ATOMIC128) {
3248        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3249    } else {
3250        gen_helper_exit_atomic(cpu_env);
3251        return DISAS_NORETURN;
3252    }
3253    return_low128(o->out2);
3254    return DISAS_NEXT;
3255}
3256
3257#ifndef CONFIG_USER_ONLY
3258static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3259{
3260    tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3261    return DISAS_NEXT;
3262}
3263#endif
3264
3265static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3266{
3267    tcg_gen_andi_i64(o->out, o->in2, -256);
3268    return DISAS_NEXT;
3269}
3270
3271static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3272{
3273    const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3274
3275    if (get_field(s, m3) > 6) {
3276        gen_program_exception(s, PGM_SPECIFICATION);
3277        return DISAS_NORETURN;
3278    }
3279
3280    tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3281    tcg_gen_neg_i64(o->addr1, o->addr1);
3282    tcg_gen_movi_i64(o->out, 16);
3283    tcg_gen_umin_i64(o->out, o->out, o->addr1);
3284    gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3285    return DISAS_NEXT;
3286}
3287
3288static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3289{
3290#if !defined(CONFIG_USER_ONLY)
3291    TCGv_i32 i2;
3292#endif
3293    const uint16_t monitor_class = get_field(s, i2);
3294
3295    if (monitor_class & 0xff00) {
3296        gen_program_exception(s, PGM_SPECIFICATION);
3297        return DISAS_NORETURN;
3298    }
3299
3300#if !defined(CONFIG_USER_ONLY)
3301    i2 = tcg_const_i32(monitor_class);
3302    gen_helper_monitor_call(cpu_env, o->addr1, i2);
3303    tcg_temp_free_i32(i2);
3304#endif
3305    /* Defaults to a NOP. */
3306    return DISAS_NEXT;
3307}
3308
3309static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3310{
3311    o->out = o->in2;
3312    o->g_out = o->g_in2;
3313    o->in2 = NULL;
3314    o->g_in2 = false;
3315    return DISAS_NEXT;
3316}
3317
3318static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3319{
3320    int b2 = get_field(s, b2);
3321    TCGv ar1 = tcg_temp_new_i64();
3322
3323    o->out = o->in2;
3324    o->g_out = o->g_in2;
3325    o->in2 = NULL;
3326    o->g_in2 = false;
3327
3328    switch (s->base.tb->flags & FLAG_MASK_ASC) {
3329    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3330        tcg_gen_movi_i64(ar1, 0);
3331        break;
3332    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3333        tcg_gen_movi_i64(ar1, 1);
3334        break;
3335    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3336        if (b2) {
3337            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3338        } else {
3339            tcg_gen_movi_i64(ar1, 0);
3340        }
3341        break;
3342    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3343        tcg_gen_movi_i64(ar1, 2);
3344        break;
3345    }
3346
3347    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3348    tcg_temp_free_i64(ar1);
3349
3350    return DISAS_NEXT;
3351}
3352
3353static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3354{
3355    o->out = o->in1;
3356    o->out2 = o->in2;
3357    o->g_out = o->g_in1;
3358    o->g_out2 = o->g_in2;
3359    o->in1 = NULL;
3360    o->in2 = NULL;
3361    o->g_in1 = o->g_in2 = false;
3362    return DISAS_NEXT;
3363}
3364
3365static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3366{
3367    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3368    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3369    tcg_temp_free_i32(l);
3370    return DISAS_NEXT;
3371}
3372
3373static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3374{
3375    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3376    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3377    tcg_temp_free_i32(l);
3378    return DISAS_NEXT;
3379}
3380
3381static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3382{
3383    int r1 = get_field(s, r1);
3384    int r2 = get_field(s, r2);
3385    TCGv_i32 t1, t2;
3386
3387    /* r1 and r2 must be even.  */
3388    if (r1 & 1 || r2 & 1) {
3389        gen_program_exception(s, PGM_SPECIFICATION);
3390        return DISAS_NORETURN;
3391    }
3392
3393    t1 = tcg_const_i32(r1);
3394    t2 = tcg_const_i32(r2);
3395    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3396    tcg_temp_free_i32(t1);
3397    tcg_temp_free_i32(t2);
3398    set_cc_static(s);
3399    return DISAS_NEXT;
3400}
3401
3402static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3403{
3404    int r1 = get_field(s, r1);
3405    int r3 = get_field(s, r3);
3406    TCGv_i32 t1, t3;
3407
3408    /* r1 and r3 must be even.  */
3409    if (r1 & 1 || r3 & 1) {
3410        gen_program_exception(s, PGM_SPECIFICATION);
3411        return DISAS_NORETURN;
3412    }
3413
3414    t1 = tcg_const_i32(r1);
3415    t3 = tcg_const_i32(r3);
3416    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3417    tcg_temp_free_i32(t1);
3418    tcg_temp_free_i32(t3);
3419    set_cc_static(s);
3420    return DISAS_NEXT;
3421}
3422
3423static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3424{
3425    int r1 = get_field(s, r1);
3426    int r3 = get_field(s, r3);
3427    TCGv_i32 t1, t3;
3428
3429    /* r1 and r3 must be even.  */
3430    if (r1 & 1 || r3 & 1) {
3431        gen_program_exception(s, PGM_SPECIFICATION);
3432        return DISAS_NORETURN;
3433    }
3434
3435    t1 = tcg_const_i32(r1);
3436    t3 = tcg_const_i32(r3);
3437    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3438    tcg_temp_free_i32(t1);
3439    tcg_temp_free_i32(t3);
3440    set_cc_static(s);
3441    return DISAS_NEXT;
3442}
3443
3444static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3445{
3446    int r3 = get_field(s, r3);
3447    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3448    set_cc_static(s);
3449    return DISAS_NEXT;
3450}
3451
3452#ifndef CONFIG_USER_ONLY
3453static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3454{
3455    int r1 = get_field(s, l1);
3456    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3457    set_cc_static(s);
3458    return DISAS_NEXT;
3459}
3460
3461static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3462{
3463    int r1 = get_field(s, l1);
3464    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3465    set_cc_static(s);
3466    return DISAS_NEXT;
3467}
3468#endif
3469
3470static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3471{
3472    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3473    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3474    tcg_temp_free_i32(l);
3475    return DISAS_NEXT;
3476}
3477
3478static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3479{
3480    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3481    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3482    tcg_temp_free_i32(l);
3483    return DISAS_NEXT;
3484}
3485
3486static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3487{
3488    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3489    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3490
3491    gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3492    tcg_temp_free_i32(t1);
3493    tcg_temp_free_i32(t2);
3494    set_cc_static(s);
3495    return DISAS_NEXT;
3496}
3497
3498static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3499{
3500    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3501    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3502
3503    gen_helper_mvst(cc_op, cpu_env, t1, t2);
3504    tcg_temp_free_i32(t1);
3505    tcg_temp_free_i32(t2);
3506    set_cc_static(s);
3507    return DISAS_NEXT;
3508}
3509
3510static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3511{
3512    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3513    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3514    tcg_temp_free_i32(l);
3515    return DISAS_NEXT;
3516}
3517
3518static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3519{
3520    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3521    return DISAS_NEXT;
3522}
3523
3524static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3525{
3526    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3527    return DISAS_NEXT;
3528}
3529
3530static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3531{
3532    tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3533    return DISAS_NEXT;
3534}
3535
3536static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3537{
3538    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3539    return DISAS_NEXT;
3540}
3541
3542static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3543{
3544    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3545    return DISAS_NEXT;
3546}
3547
3548static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3549{
3550    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3551    return DISAS_NEXT;
3552}
3553
3554static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3555{
3556    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3557    return_low128(o->out2);
3558    return DISAS_NEXT;
3559}
3560
3561static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3562{
3563    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3564    return_low128(o->out2);
3565    return DISAS_NEXT;
3566}
3567
3568static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3569{
3570    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3571    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3572    tcg_temp_free_i64(r3);
3573    return DISAS_NEXT;
3574}
3575
3576static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3577{
3578    TCGv_i64 r3 = load_freg(get_field(s, r3));
3579    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3580    tcg_temp_free_i64(r3);
3581    return DISAS_NEXT;
3582}
3583
3584static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3585{
3586    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3587    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3588    tcg_temp_free_i64(r3);
3589    return DISAS_NEXT;
3590}
3591
3592static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3593{
3594    TCGv_i64 r3 = load_freg(get_field(s, r3));
3595    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3596    tcg_temp_free_i64(r3);
3597    return DISAS_NEXT;
3598}
3599
3600static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3601{
3602    TCGv_i64 z, n;
3603    z = tcg_const_i64(0);
3604    n = tcg_temp_new_i64();
3605    tcg_gen_neg_i64(n, o->in2);
3606    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3607    tcg_temp_free_i64(n);
3608    tcg_temp_free_i64(z);
3609    return DISAS_NEXT;
3610}
3611
3612static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3613{
3614    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3615    return DISAS_NEXT;
3616}
3617
3618static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3619{
3620    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3621    return DISAS_NEXT;
3622}
3623
3624static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3625{
3626    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3627    tcg_gen_mov_i64(o->out2, o->in2);
3628    return DISAS_NEXT;
3629}
3630
3631static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3632{
3633    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3634    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3635    tcg_temp_free_i32(l);
3636    set_cc_static(s);
3637    return DISAS_NEXT;
3638}
3639
3640static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3641{
3642    tcg_gen_neg_i64(o->out, o->in2);
3643    return DISAS_NEXT;
3644}
3645
3646static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3647{
3648    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3649    return DISAS_NEXT;
3650}
3651
3652static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3653{
3654    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3655    return DISAS_NEXT;
3656}
3657
3658static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3659{
3660    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3661    tcg_gen_mov_i64(o->out2, o->in2);
3662    return DISAS_NEXT;
3663}
3664
3665static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3666{
3667    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3668    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3669    tcg_temp_free_i32(l);
3670    set_cc_static(s);
3671    return DISAS_NEXT;
3672}
3673
3674static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3675{
3676    tcg_gen_or_i64(o->out, o->in1, o->in2);
3677    return DISAS_NEXT;
3678}
3679
3680static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3681{
3682    int shift = s->insn->data & 0xff;
3683    int size = s->insn->data >> 8;
3684    uint64_t mask = ((1ull << size) - 1) << shift;
3685
3686    assert(!o->g_in2);
3687    tcg_gen_shli_i64(o->in2, o->in2, shift);
3688    tcg_gen_or_i64(o->out, o->in1, o->in2);
3689
3690    /* Produce the CC from only the bits manipulated.  */
3691    tcg_gen_andi_i64(cc_dst, o->out, mask);
3692    set_cc_nz_u64(s, cc_dst);
3693    return DISAS_NEXT;
3694}
3695
3696static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3697{
3698    o->in1 = tcg_temp_new_i64();
3699
3700    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3701        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3702    } else {
3703        /* Perform the atomic operation in memory. */
3704        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3705                                    s->insn->data);
3706    }
3707
3708    /* Recompute also for atomic case: needed for setting CC. */
3709    tcg_gen_or_i64(o->out, o->in1, o->in2);
3710
3711    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3712        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3713    }
3714    return DISAS_NEXT;
3715}
3716
3717static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3718{
3719    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3720    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3721    tcg_temp_free_i32(l);
3722    return DISAS_NEXT;
3723}
3724
3725static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3726{
3727    int l2 = get_field(s, l2) + 1;
3728    TCGv_i32 l;
3729
3730    /* The length must not exceed 32 bytes.  */
3731    if (l2 > 32) {
3732        gen_program_exception(s, PGM_SPECIFICATION);
3733        return DISAS_NORETURN;
3734    }
3735    l = tcg_const_i32(l2);
3736    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3737    tcg_temp_free_i32(l);
3738    return DISAS_NEXT;
3739}
3740
3741static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3742{
3743    int l2 = get_field(s, l2) + 1;
3744    TCGv_i32 l;
3745
3746    /* The length must be even and should not exceed 64 bytes.  */
3747    if ((l2 & 1) || (l2 > 64)) {
3748        gen_program_exception(s, PGM_SPECIFICATION);
3749        return DISAS_NORETURN;
3750    }
3751    l = tcg_const_i32(l2);
3752    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3753    tcg_temp_free_i32(l);
3754    return DISAS_NEXT;
3755}
3756
3757static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3758{
3759    gen_helper_popcnt(o->out, o->in2);
3760    return DISAS_NEXT;
3761}
3762
3763#ifndef CONFIG_USER_ONLY
3764static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3765{
3766    gen_helper_ptlb(cpu_env);
3767    return DISAS_NEXT;
3768}
3769#endif
3770
3771static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3772{
3773    int i3 = get_field(s, i3);
3774    int i4 = get_field(s, i4);
3775    int i5 = get_field(s, i5);
3776    int do_zero = i4 & 0x80;
3777    uint64_t mask, imask, pmask;
3778    int pos, len, rot;
3779
3780    /* Adjust the arguments for the specific insn.  */
3781    switch (s->fields.op2) {
3782    case 0x55: /* risbg */
3783    case 0x59: /* risbgn */
3784        i3 &= 63;
3785        i4 &= 63;
3786        pmask = ~0;
3787        break;
3788    case 0x5d: /* risbhg */
3789        i3 &= 31;
3790        i4 &= 31;
3791        pmask = 0xffffffff00000000ull;
3792        break;
3793    case 0x51: /* risblg */
3794        i3 = (i3 & 31) + 32;
3795        i4 = (i4 & 31) + 32;
3796        pmask = 0x00000000ffffffffull;
3797        break;
3798    default:
3799        g_assert_not_reached();
3800    }
3801
3802    /* MASK is the set of bits to be inserted from R2. */
3803    if (i3 <= i4) {
3804        /* [0...i3---i4...63] */
3805        mask = (-1ull >> i3) & (-1ull << (63 - i4));
3806    } else {
3807        /* [0---i4...i3---63] */
3808        mask = (-1ull >> i3) | (-1ull << (63 - i4));
3809    }
3810    /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3811    mask &= pmask;
3812
3813    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3814       insns, we need to keep the other half of the register.  */
3815    imask = ~mask | ~pmask;
3816    if (do_zero) {
3817        imask = ~pmask;
3818    }
3819
3820    len = i4 - i3 + 1;
3821    pos = 63 - i4;
3822    rot = i5 & 63;
3823
3824    /* In some cases we can implement this with extract.  */
3825    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3826        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3827        return DISAS_NEXT;
3828    }
3829
3830    /* In some cases we can implement this with deposit.  */
3831    if (len > 0 && (imask == 0 || ~mask == imask)) {
3832        /* Note that we rotate the bits to be inserted to the lsb, not to
3833           the position as described in the PoO.  */
3834        rot = (rot - pos) & 63;
3835    } else {
3836        pos = -1;
3837    }
3838
3839    /* Rotate the input as necessary.  */
3840    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3841
3842    /* Insert the selected bits into the output.  */
3843    if (pos >= 0) {
3844        if (imask == 0) {
3845            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3846        } else {
3847            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3848        }
3849    } else if (imask == 0) {
3850        tcg_gen_andi_i64(o->out, o->in2, mask);
3851    } else {
3852        tcg_gen_andi_i64(o->in2, o->in2, mask);
3853        tcg_gen_andi_i64(o->out, o->out, imask);
3854        tcg_gen_or_i64(o->out, o->out, o->in2);
3855    }
3856    return DISAS_NEXT;
3857}
3858
3859static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3860{
3861    int i3 = get_field(s, i3);
3862    int i4 = get_field(s, i4);
3863    int i5 = get_field(s, i5);
3864    uint64_t mask;
3865
3866    /* If this is a test-only form, arrange to discard the result.  */
3867    if (i3 & 0x80) {
3868        o->out = tcg_temp_new_i64();
3869        o->g_out = false;
3870    }
3871
3872    i3 &= 63;
3873    i4 &= 63;
3874    i5 &= 63;
3875
3876    /* MASK is the set of bits to be operated on from R2.
3877       Take care for I3/I4 wraparound.  */
3878    mask = ~0ull >> i3;
3879    if (i3 <= i4) {
3880        mask ^= ~0ull >> i4 >> 1;
3881    } else {
3882        mask |= ~(~0ull >> i4 >> 1);
3883    }
3884
3885    /* Rotate the input as necessary.  */
3886    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3887
3888    /* Operate.  */
3889    switch (s->fields.op2) {
3890    case 0x54: /* AND */
3891        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3892        tcg_gen_and_i64(o->out, o->out, o->in2);
3893        break;
3894    case 0x56: /* OR */
3895        tcg_gen_andi_i64(o->in2, o->in2, mask);
3896        tcg_gen_or_i64(o->out, o->out, o->in2);
3897        break;
3898    case 0x57: /* XOR */
3899        tcg_gen_andi_i64(o->in2, o->in2, mask);
3900        tcg_gen_xor_i64(o->out, o->out, o->in2);
3901        break;
3902    default:
3903        abort();
3904    }
3905
3906    /* Set the CC.  */
3907    tcg_gen_andi_i64(cc_dst, o->out, mask);
3908    set_cc_nz_u64(s, cc_dst);
3909    return DISAS_NEXT;
3910}
3911
3912static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3913{
3914    tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3915    return DISAS_NEXT;
3916}
3917
3918static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3919{
3920    tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3921    return DISAS_NEXT;
3922}
3923
3924static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3925{
3926    tcg_gen_bswap64_i64(o->out, o->in2);
3927    return DISAS_NEXT;
3928}
3929
3930static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3931{
3932    TCGv_i32 t1 = tcg_temp_new_i32();
3933    TCGv_i32 t2 = tcg_temp_new_i32();
3934    TCGv_i32 to = tcg_temp_new_i32();
3935    tcg_gen_extrl_i64_i32(t1, o->in1);
3936    tcg_gen_extrl_i64_i32(t2, o->in2);
3937    tcg_gen_rotl_i32(to, t1, t2);
3938    tcg_gen_extu_i32_i64(o->out, to);
3939    tcg_temp_free_i32(t1);
3940    tcg_temp_free_i32(t2);
3941    tcg_temp_free_i32(to);
3942    return DISAS_NEXT;
3943}
3944
3945static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3946{
3947    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3948    return DISAS_NEXT;
3949}
3950
3951#ifndef CONFIG_USER_ONLY
3952static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3953{
3954    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3955    set_cc_static(s);
3956    return DISAS_NEXT;
3957}
3958
3959static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3960{
3961    gen_helper_sacf(cpu_env, o->in2);
3962    /* Addressing mode has changed, so end the block.  */
3963    return DISAS_PC_STALE;
3964}
3965#endif
3966
3967static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3968{
3969    int sam = s->insn->data;
3970    TCGv_i64 tsam;
3971    uint64_t mask;
3972
3973    switch (sam) {
3974    case 0:
3975        mask = 0xffffff;
3976        break;
3977    case 1:
3978        mask = 0x7fffffff;
3979        break;
3980    default:
3981        mask = -1;
3982        break;
3983    }
3984
3985    /* Bizarre but true, we check the address of the current insn for the
3986       specification exception, not the next to be executed.  Thus the PoO
3987       documents that Bad Things Happen two bytes before the end.  */
3988    if (s->base.pc_next & ~mask) {
3989        gen_program_exception(s, PGM_SPECIFICATION);
3990        return DISAS_NORETURN;
3991    }
3992    s->pc_tmp &= mask;
3993
3994    tsam = tcg_const_i64(sam);
3995    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3996    tcg_temp_free_i64(tsam);
3997
3998    /* Always exit the TB, since we (may have) changed execution mode.  */
3999    return DISAS_PC_STALE;
4000}
4001
4002static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4003{
4004    int r1 = get_field(s, r1);
4005    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4006    return DISAS_NEXT;
4007}
4008
4009static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4010{
4011    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4012    return DISAS_NEXT;
4013}
4014
4015static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4016{
4017    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4018    return DISAS_NEXT;
4019}
4020
4021static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4022{
4023    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4024    return_low128(o->out2);
4025    return DISAS_NEXT;
4026}
4027
4028static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4029{
4030    gen_helper_sqeb(o->out, cpu_env, o->in2);
4031    return DISAS_NEXT;
4032}
4033
4034static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4035{
4036    gen_helper_sqdb(o->out, cpu_env, o->in2);
4037    return DISAS_NEXT;
4038}
4039
4040static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4041{
4042    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4043    return_low128(o->out2);
4044    return DISAS_NEXT;
4045}
4046
4047#ifndef CONFIG_USER_ONLY
4048static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4049{
4050    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4051    set_cc_static(s);
4052    return DISAS_NEXT;
4053}
4054
4055static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4056{
4057    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4058    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4059    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4060    set_cc_static(s);
4061    tcg_temp_free_i32(r1);
4062    tcg_temp_free_i32(r3);
4063    return DISAS_NEXT;
4064}
4065#endif
4066
4067static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4068{
4069    DisasCompare c;
4070    TCGv_i64 a, h;
4071    TCGLabel *lab;
4072    int r1;
4073
4074    disas_jcc(s, &c, get_field(s, m3));
4075
4076    /* We want to store when the condition is fulfilled, so branch
4077       out when it's not */
4078    c.cond = tcg_invert_cond(c.cond);
4079
4080    lab = gen_new_label();
4081    if (c.is_64) {
4082        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4083    } else {
4084        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4085    }
4086    free_compare(&c);
4087
4088    r1 = get_field(s, r1);
4089    a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4090    switch (s->insn->data) {
4091    case 1: /* STOCG */
4092        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4093        break;
4094    case 0: /* STOC */
4095        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4096        break;
4097    case 2: /* STOCFH */
4098        h = tcg_temp_new_i64();
4099        tcg_gen_shri_i64(h, regs[r1], 32);
4100        tcg_gen_qemu_st32(h, a, get_mem_index(s));
4101        tcg_temp_free_i64(h);
4102        break;
4103    default:
4104        g_assert_not_reached();
4105    }
4106    tcg_temp_free_i64(a);
4107
4108    gen_set_label(lab);
4109    return DISAS_NEXT;
4110}
4111
4112static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4113{
4114    uint64_t sign = 1ull << s->insn->data;
4115    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4116    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4117    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4118    /* The arithmetic left shift is curious in that it does not affect
4119       the sign bit.  Copy that over from the source unchanged.  */
4120    tcg_gen_andi_i64(o->out, o->out, ~sign);
4121    tcg_gen_andi_i64(o->in1, o->in1, sign);
4122    tcg_gen_or_i64(o->out, o->out, o->in1);
4123    return DISAS_NEXT;
4124}
4125
4126static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4127{
4128    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4129    return DISAS_NEXT;
4130}
4131
4132static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4133{
4134    tcg_gen_sar_i64(o->out, o->in1, o->in2);
4135    return DISAS_NEXT;
4136}
4137
4138static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4139{
4140    tcg_gen_shr_i64(o->out, o->in1, o->in2);
4141    return DISAS_NEXT;
4142}
4143
4144static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4145{
4146    gen_helper_sfpc(cpu_env, o->in2);
4147    return DISAS_NEXT;
4148}
4149
4150static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4151{
4152    gen_helper_sfas(cpu_env, o->in2);
4153    return DISAS_NEXT;
4154}
4155
4156static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4157{
4158    /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4159    tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4160    gen_helper_srnm(cpu_env, o->addr1);
4161    return DISAS_NEXT;
4162}
4163
4164static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4165{
4166    /* Bits 0-55 are are ignored. */
4167    tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4168    gen_helper_srnm(cpu_env, o->addr1);
4169    return DISAS_NEXT;
4170}
4171
4172static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4173{
4174    TCGv_i64 tmp = tcg_temp_new_i64();
4175
4176    /* Bits other than 61-63 are ignored. */
4177    tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4178
4179    /* No need to call a helper, we don't implement dfp */
4180    tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4181    tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4182    tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4183
4184    tcg_temp_free_i64(tmp);
4185    return DISAS_NEXT;
4186}
4187
4188static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4189{
4190    tcg_gen_extrl_i64_i32(cc_op, o->in1);
4191    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4192    set_cc_static(s);
4193
4194    tcg_gen_shri_i64(o->in1, o->in1, 24);
4195    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4196    return DISAS_NEXT;
4197}
4198
4199static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4200{
4201    int b1 = get_field(s, b1);
4202    int d1 = get_field(s, d1);
4203    int b2 = get_field(s, b2);
4204    int d2 = get_field(s, d2);
4205    int r3 = get_field(s, r3);
4206    TCGv_i64 tmp = tcg_temp_new_i64();
4207
4208    /* fetch all operands first */
4209    o->in1 = tcg_temp_new_i64();
4210    tcg_gen_addi_i64(o->in1, regs[b1], d1);
4211    o->in2 = tcg_temp_new_i64();
4212    tcg_gen_addi_i64(o->in2, regs[b2], d2);
4213    o->addr1 = tcg_temp_new_i64();
4214    gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4215
4216    /* load the third operand into r3 before modifying anything */
4217    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4218
4219    /* subtract CPU timer from first operand and store in GR0 */
4220    gen_helper_stpt(tmp, cpu_env);
4221    tcg_gen_sub_i64(regs[0], o->in1, tmp);
4222
4223    /* store second operand in GR1 */
4224    tcg_gen_mov_i64(regs[1], o->in2);
4225
4226    tcg_temp_free_i64(tmp);
4227    return DISAS_NEXT;
4228}
4229
4230#ifndef CONFIG_USER_ONLY
4231static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4232{
4233    tcg_gen_shri_i64(o->in2, o->in2, 4);
4234    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4235    return DISAS_NEXT;
4236}
4237
4238static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4239{
4240    gen_helper_sske(cpu_env, o->in1, o->in2);
4241    return DISAS_NEXT;
4242}
4243
4244static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4245{
4246    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4247    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4248    return DISAS_PC_STALE_NOCHAIN;
4249}
4250
4251static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4252{
4253    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4254    return DISAS_NEXT;
4255}
4256#endif
4257
4258static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4259{
4260    gen_helper_stck(o->out, cpu_env);
4261    /* ??? We don't implement clock states.  */
4262    gen_op_movi_cc(s, 0);
4263    return DISAS_NEXT;
4264}
4265
4266static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4267{
4268    TCGv_i64 c1 = tcg_temp_new_i64();
4269    TCGv_i64 c2 = tcg_temp_new_i64();
4270    TCGv_i64 todpr = tcg_temp_new_i64();
4271    gen_helper_stck(c1, cpu_env);
4272    /* 16 bit value store in an uint32_t (only valid bits set) */
4273    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4274    /* Shift the 64-bit value into its place as a zero-extended
4275       104-bit value.  Note that "bit positions 64-103 are always
4276       non-zero so that they compare differently to STCK"; we set
4277       the least significant bit to 1.  */
4278    tcg_gen_shli_i64(c2, c1, 56);
4279    tcg_gen_shri_i64(c1, c1, 8);
4280    tcg_gen_ori_i64(c2, c2, 0x10000);
4281    tcg_gen_or_i64(c2, c2, todpr);
4282    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4283    tcg_gen_addi_i64(o->in2, o->in2, 8);
4284    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4285    tcg_temp_free_i64(c1);
4286    tcg_temp_free_i64(c2);
4287    tcg_temp_free_i64(todpr);
4288    /* ??? We don't implement clock states.  */
4289    gen_op_movi_cc(s, 0);
4290    return DISAS_NEXT;
4291}
4292
4293#ifndef CONFIG_USER_ONLY
4294static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4295{
4296    tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4297    gen_helper_sck(cc_op, cpu_env, o->in1);
4298    set_cc_static(s);
4299    return DISAS_NEXT;
4300}
4301
4302static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4303{
4304    gen_helper_sckc(cpu_env, o->in2);
4305    return DISAS_NEXT;
4306}
4307
4308static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4309{
4310    gen_helper_sckpf(cpu_env, regs[0]);
4311    return DISAS_NEXT;
4312}
4313
4314static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4315{
4316    gen_helper_stckc(o->out, cpu_env);
4317    return DISAS_NEXT;
4318}
4319
4320static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4321{
4322    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4323    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4324    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4325    tcg_temp_free_i32(r1);
4326    tcg_temp_free_i32(r3);
4327    return DISAS_NEXT;
4328}
4329
4330static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4331{
4332    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4333    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4334    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4335    tcg_temp_free_i32(r1);
4336    tcg_temp_free_i32(r3);
4337    return DISAS_NEXT;
4338}
4339
4340static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4341{
4342    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4343    return DISAS_NEXT;
4344}
4345
4346static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4347{
4348    gen_helper_spt(cpu_env, o->in2);
4349    return DISAS_NEXT;
4350}
4351
4352static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4353{
4354    gen_helper_stfl(cpu_env);
4355    return DISAS_NEXT;
4356}
4357
4358static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4359{
4360    gen_helper_stpt(o->out, cpu_env);
4361    return DISAS_NEXT;
4362}
4363
4364static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4365{
4366    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4367    set_cc_static(s);
4368    return DISAS_NEXT;
4369}
4370
4371static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4372{
4373    gen_helper_spx(cpu_env, o->in2);
4374    return DISAS_NEXT;
4375}
4376
4377static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4378{
4379    gen_helper_xsch(cpu_env, regs[1]);
4380    set_cc_static(s);
4381    return DISAS_NEXT;
4382}
4383
4384static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4385{
4386    gen_helper_csch(cpu_env, regs[1]);
4387    set_cc_static(s);
4388    return DISAS_NEXT;
4389}
4390
4391static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4392{
4393    gen_helper_hsch(cpu_env, regs[1]);
4394    set_cc_static(s);
4395    return DISAS_NEXT;
4396}
4397
4398static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4399{
4400    gen_helper_msch(cpu_env, regs[1], o->in2);
4401    set_cc_static(s);
4402    return DISAS_NEXT;
4403}
4404
4405static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4406{
4407    gen_helper_rchp(cpu_env, regs[1]);
4408    set_cc_static(s);
4409    return DISAS_NEXT;
4410}
4411
4412static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4413{
4414    gen_helper_rsch(cpu_env, regs[1]);
4415    set_cc_static(s);
4416    return DISAS_NEXT;
4417}
4418
4419static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4420{
4421    gen_helper_sal(cpu_env, regs[1]);
4422    return DISAS_NEXT;
4423}
4424
4425static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4426{
4427    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4428    return DISAS_NEXT;
4429}
4430
4431static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4432{
4433    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4434    gen_op_movi_cc(s, 3);
4435    return DISAS_NEXT;
4436}
4437
4438static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4439{
4440    /* The instruction is suppressed if not provided. */
4441    return DISAS_NEXT;
4442}
4443
4444static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4445{
4446    gen_helper_ssch(cpu_env, regs[1], o->in2);
4447    set_cc_static(s);
4448    return DISAS_NEXT;
4449}
4450
4451static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4452{
4453    gen_helper_stsch(cpu_env, regs[1], o->in2);
4454    set_cc_static(s);
4455    return DISAS_NEXT;
4456}
4457
4458static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4459{
4460    gen_helper_stcrw(cpu_env, o->in2);
4461    set_cc_static(s);
4462    return DISAS_NEXT;
4463}
4464
4465static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4466{
4467    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4468    set_cc_static(s);
4469    return DISAS_NEXT;
4470}
4471
4472static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4473{
4474    gen_helper_tsch(cpu_env, regs[1], o->in2);
4475    set_cc_static(s);
4476    return DISAS_NEXT;
4477}
4478
4479static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4480{
4481    gen_helper_chsc(cpu_env, o->in2);
4482    set_cc_static(s);
4483    return DISAS_NEXT;
4484}
4485
4486static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4487{
4488    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4489    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4490    return DISAS_NEXT;
4491}
4492
4493static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4494{
4495    uint64_t i2 = get_field(s, i2);
4496    TCGv_i64 t;
4497
4498    /* It is important to do what the instruction name says: STORE THEN.
4499       If we let the output hook perform the store then if we fault and
4500       restart, we'll have the wrong SYSTEM MASK in place.  */
4501    t = tcg_temp_new_i64();
4502    tcg_gen_shri_i64(t, psw_mask, 56);
4503    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4504    tcg_temp_free_i64(t);
4505
4506    if (s->fields.op == 0xac) {
4507        tcg_gen_andi_i64(psw_mask, psw_mask,
4508                         (i2 << 56) | 0x00ffffffffffffffull);
4509    } else {
4510        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4511    }
4512
4513    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4514    return DISAS_PC_STALE_NOCHAIN;
4515}
4516
4517static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4518{
4519    tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4520
4521    if (s->base.tb->flags & FLAG_MASK_PER) {
4522        update_psw_addr(s);
4523        gen_helper_per_store_real(cpu_env);
4524    }
4525    return DISAS_NEXT;
4526}
4527#endif
4528
4529static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4530{
4531    gen_helper_stfle(cc_op, cpu_env, o->in2);
4532    set_cc_static(s);
4533    return DISAS_NEXT;
4534}
4535
4536static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4537{
4538    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4539    return DISAS_NEXT;
4540}
4541
4542static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4543{
4544    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4545    return DISAS_NEXT;
4546}
4547
4548static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4549{
4550    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4551    return DISAS_NEXT;
4552}
4553
4554static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4555{
4556    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4557    return DISAS_NEXT;
4558}
4559
4560static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4561{
4562    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4563    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4564    gen_helper_stam(cpu_env, r1, o->in2, r3);
4565    tcg_temp_free_i32(r1);
4566    tcg_temp_free_i32(r3);
4567    return DISAS_NEXT;
4568}
4569
4570static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4571{
4572    int m3 = get_field(s, m3);
4573    int pos, base = s->insn->data;
4574    TCGv_i64 tmp = tcg_temp_new_i64();
4575
4576    pos = base + ctz32(m3) * 8;
4577    switch (m3) {
4578    case 0xf:
4579        /* Effectively a 32-bit store.  */
4580        tcg_gen_shri_i64(tmp, o->in1, pos);
4581        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4582        break;
4583
4584    case 0xc:
4585    case 0x6:
4586    case 0x3:
4587        /* Effectively a 16-bit store.  */
4588        tcg_gen_shri_i64(tmp, o->in1, pos);
4589        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4590        break;
4591
4592    case 0x8:
4593    case 0x4:
4594    case 0x2:
4595    case 0x1:
4596        /* Effectively an 8-bit store.  */
4597        tcg_gen_shri_i64(tmp, o->in1, pos);
4598        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4599        break;
4600
4601    default:
4602        /* This is going to be a sequence of shifts and stores.  */
4603        pos = base + 32 - 8;
4604        while (m3) {
4605            if (m3 & 0x8) {
4606                tcg_gen_shri_i64(tmp, o->in1, pos);
4607                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4608                tcg_gen_addi_i64(o->in2, o->in2, 1);
4609            }
4610            m3 = (m3 << 1) & 0xf;
4611            pos -= 8;
4612        }
4613        break;
4614    }
4615    tcg_temp_free_i64(tmp);
4616    return DISAS_NEXT;
4617}
4618
4619static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4620{
4621    int r1 = get_field(s, r1);
4622    int r3 = get_field(s, r3);
4623    int size = s->insn->data;
4624    TCGv_i64 tsize = tcg_const_i64(size);
4625
4626    while (1) {
4627        if (size == 8) {
4628            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4629        } else {
4630            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4631        }
4632        if (r1 == r3) {
4633            break;
4634        }
4635        tcg_gen_add_i64(o->in2, o->in2, tsize);
4636        r1 = (r1 + 1) & 15;
4637    }
4638
4639    tcg_temp_free_i64(tsize);
4640    return DISAS_NEXT;
4641}
4642
4643static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4644{
4645    int r1 = get_field(s, r1);
4646    int r3 = get_field(s, r3);
4647    TCGv_i64 t = tcg_temp_new_i64();
4648    TCGv_i64 t4 = tcg_const_i64(4);
4649    TCGv_i64 t32 = tcg_const_i64(32);
4650
4651    while (1) {
4652        tcg_gen_shl_i64(t, regs[r1], t32);
4653        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4654        if (r1 == r3) {
4655            break;
4656        }
4657        tcg_gen_add_i64(o->in2, o->in2, t4);
4658        r1 = (r1 + 1) & 15;
4659    }
4660
4661    tcg_temp_free_i64(t);
4662    tcg_temp_free_i64(t4);
4663    tcg_temp_free_i64(t32);
4664    return DISAS_NEXT;
4665}
4666
4667static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4668{
4669    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4670        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4671    } else if (HAVE_ATOMIC128) {
4672        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4673    } else {
4674        gen_helper_exit_atomic(cpu_env);
4675        return DISAS_NORETURN;
4676    }
4677    return DISAS_NEXT;
4678}
4679
4680static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4681{
4682    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4683    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4684
4685    gen_helper_srst(cpu_env, r1, r2);
4686
4687    tcg_temp_free_i32(r1);
4688    tcg_temp_free_i32(r2);
4689    set_cc_static(s);
4690    return DISAS_NEXT;
4691}
4692
4693static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4694{
4695    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4696    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4697
4698    gen_helper_srstu(cpu_env, r1, r2);
4699
4700    tcg_temp_free_i32(r1);
4701    tcg_temp_free_i32(r2);
4702    set_cc_static(s);
4703    return DISAS_NEXT;
4704}
4705
4706static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4707{
4708    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4709    return DISAS_NEXT;
4710}
4711
4712static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4713{
4714    tcg_gen_movi_i64(cc_src, 0);
4715    tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4716    return DISAS_NEXT;
4717}
4718
4719/* Compute borrow (0, -1) into cc_src. */
4720static void compute_borrow(DisasContext *s)
4721{
4722    switch (s->cc_op) {
4723    case CC_OP_SUBU:
4724        /* The borrow value is already in cc_src (0,-1). */
4725        break;
4726    default:
4727        gen_op_calc_cc(s);
4728        /* fall through */
4729    case CC_OP_STATIC:
4730        /* The carry flag is the msb of CC; compute into cc_src. */
4731        tcg_gen_extu_i32_i64(cc_src, cc_op);
4732        tcg_gen_shri_i64(cc_src, cc_src, 1);
4733        /* fall through */
4734    case CC_OP_ADDU:
4735        /* Convert carry (1,0) to borrow (0,-1). */
4736        tcg_gen_subi_i64(cc_src, cc_src, 1);
4737        break;
4738    }
4739}
4740
4741static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4742{
4743    compute_borrow(s);
4744
4745    /* Borrow is {0, -1}, so add to subtract. */
4746    tcg_gen_add_i64(o->out, o->in1, cc_src);
4747    tcg_gen_sub_i64(o->out, o->out, o->in2);
4748    return DISAS_NEXT;
4749}
4750
4751static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4752{
4753    compute_borrow(s);
4754
4755    /*
4756     * Borrow is {0, -1}, so add to subtract; replicate the
4757     * borrow input to produce 128-bit -1 for the addition.
4758     */
4759    TCGv_i64 zero = tcg_const_i64(0);
4760    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4761    tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4762    tcg_temp_free_i64(zero);
4763
4764    return DISAS_NEXT;
4765}
4766
4767static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4768{
4769    TCGv_i32 t;
4770
4771    update_psw_addr(s);
4772    update_cc_op(s);
4773
4774    t = tcg_const_i32(get_field(s, i1) & 0xff);
4775    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4776    tcg_temp_free_i32(t);
4777
4778    t = tcg_const_i32(s->ilen);
4779    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4780    tcg_temp_free_i32(t);
4781
4782    gen_exception(EXCP_SVC);
4783    return DISAS_NORETURN;
4784}
4785
4786static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4787{
4788    int cc = 0;
4789
4790    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4791    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4792    gen_op_movi_cc(s, cc);
4793    return DISAS_NEXT;
4794}
4795
4796static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4797{
4798    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4799    set_cc_static(s);
4800    return DISAS_NEXT;
4801}
4802
4803static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4804{
4805    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4806    set_cc_static(s);
4807    return DISAS_NEXT;
4808}
4809
4810static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4811{
4812    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4813    set_cc_static(s);
4814    return DISAS_NEXT;
4815}
4816
4817#ifndef CONFIG_USER_ONLY
4818
4819static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4820{
4821    gen_helper_testblock(cc_op, cpu_env, o->in2);
4822    set_cc_static(s);
4823    return DISAS_NEXT;
4824}
4825
4826static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4827{
4828    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4829    set_cc_static(s);
4830    return DISAS_NEXT;
4831}
4832
4833#endif
4834
4835static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4836{
4837    TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4838    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4839    tcg_temp_free_i32(l1);
4840    set_cc_static(s);
4841    return DISAS_NEXT;
4842}
4843
4844static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4845{
4846    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4847    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4848    tcg_temp_free_i32(l);
4849    set_cc_static(s);
4850    return DISAS_NEXT;
4851}
4852
4853static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4854{
4855    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4856    return_low128(o->out2);
4857    set_cc_static(s);
4858    return DISAS_NEXT;
4859}
4860
4861static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4862{
4863    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4864    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4865    tcg_temp_free_i32(l);
4866    set_cc_static(s);
4867    return DISAS_NEXT;
4868}
4869
4870static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4871{
4872    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4873    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4874    tcg_temp_free_i32(l);
4875    set_cc_static(s);
4876    return DISAS_NEXT;
4877}
4878
4879static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4880{
4881    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4882    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4883    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4884    TCGv_i32 tst = tcg_temp_new_i32();
4885    int m3 = get_field(s, m3);
4886
4887    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4888        m3 = 0;
4889    }
4890    if (m3 & 1) {
4891        tcg_gen_movi_i32(tst, -1);
4892    } else {
4893        tcg_gen_extrl_i64_i32(tst, regs[0]);
4894        if (s->insn->opc & 3) {
4895            tcg_gen_ext8u_i32(tst, tst);
4896        } else {
4897            tcg_gen_ext16u_i32(tst, tst);
4898        }
4899    }
4900    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4901
4902    tcg_temp_free_i32(r1);
4903    tcg_temp_free_i32(r2);
4904    tcg_temp_free_i32(sizes);
4905    tcg_temp_free_i32(tst);
4906    set_cc_static(s);
4907    return DISAS_NEXT;
4908}
4909
4910static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4911{
4912    TCGv_i32 t1 = tcg_const_i32(0xff);
4913    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4914    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4915    tcg_temp_free_i32(t1);
4916    set_cc_static(s);
4917    return DISAS_NEXT;
4918}
4919
4920static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4921{
4922    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4923    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4924    tcg_temp_free_i32(l);
4925    return DISAS_NEXT;
4926}
4927
4928static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4929{
4930    int l1 = get_field(s, l1) + 1;
4931    TCGv_i32 l;
4932
4933    /* The length must not exceed 32 bytes.  */
4934    if (l1 > 32) {
4935        gen_program_exception(s, PGM_SPECIFICATION);
4936        return DISAS_NORETURN;
4937    }
4938    l = tcg_const_i32(l1);
4939    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4940    tcg_temp_free_i32(l);
4941    set_cc_static(s);
4942    return DISAS_NEXT;
4943}
4944
4945static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4946{
4947    int l1 = get_field(s, l1) + 1;
4948    TCGv_i32 l;
4949
4950    /* The length must be even and should not exceed 64 bytes.  */
4951    if ((l1 & 1) || (l1 > 64)) {
4952        gen_program_exception(s, PGM_SPECIFICATION);
4953        return DISAS_NORETURN;
4954    }
4955    l = tcg_const_i32(l1);
4956    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4957    tcg_temp_free_i32(l);
4958    set_cc_static(s);
4959    return DISAS_NEXT;
4960}
4961
4962
4963static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4964{
4965    int d1 = get_field(s, d1);
4966    int d2 = get_field(s, d2);
4967    int b1 = get_field(s, b1);
4968    int b2 = get_field(s, b2);
4969    int l = get_field(s, l1);
4970    TCGv_i32 t32;
4971
4972    o->addr1 = get_address(s, 0, b1, d1);
4973
4974    /* If the addresses are identical, this is a store/memset of zero.  */
4975    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4976        o->in2 = tcg_const_i64(0);
4977
4978        l++;
4979        while (l >= 8) {
4980            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4981            l -= 8;
4982            if (l > 0) {
4983                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4984            }
4985        }
4986        if (l >= 4) {
4987            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4988            l -= 4;
4989            if (l > 0) {
4990                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4991            }
4992        }
4993        if (l >= 2) {
4994            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4995            l -= 2;
4996            if (l > 0) {
4997                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4998            }
4999        }
5000        if (l) {
5001            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5002        }
5003        gen_op_movi_cc(s, 0);
5004        return DISAS_NEXT;
5005    }
5006
5007    /* But in general we'll defer to a helper.  */
5008    o->in2 = get_address(s, 0, b2, d2);
5009    t32 = tcg_const_i32(l);
5010    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5011    tcg_temp_free_i32(t32);
5012    set_cc_static(s);
5013    return DISAS_NEXT;
5014}
5015
5016static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5017{
5018    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5019    return DISAS_NEXT;
5020}
5021
5022static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5023{
5024    int shift = s->insn->data & 0xff;
5025    int size = s->insn->data >> 8;
5026    uint64_t mask = ((1ull << size) - 1) << shift;
5027
5028    assert(!o->g_in2);
5029    tcg_gen_shli_i64(o->in2, o->in2, shift);
5030    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5031
5032    /* Produce the CC from only the bits manipulated.  */
5033    tcg_gen_andi_i64(cc_dst, o->out, mask);
5034    set_cc_nz_u64(s, cc_dst);
5035    return DISAS_NEXT;
5036}
5037
5038static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5039{
5040    o->in1 = tcg_temp_new_i64();
5041
5042    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5043        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5044    } else {
5045        /* Perform the atomic operation in memory. */
5046        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5047                                     s->insn->data);
5048    }
5049
5050    /* Recompute also for atomic case: needed for setting CC. */
5051    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5052
5053    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5054        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5055    }
5056    return DISAS_NEXT;
5057}
5058
5059static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5060{
5061    o->out = tcg_const_i64(0);
5062    return DISAS_NEXT;
5063}
5064
5065static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5066{
5067    o->out = tcg_const_i64(0);
5068    o->out2 = o->out;
5069    o->g_out2 = true;
5070    return DISAS_NEXT;
5071}
5072
5073#ifndef CONFIG_USER_ONLY
5074static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5075{
5076    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5077
5078    gen_helper_clp(cpu_env, r2);
5079    tcg_temp_free_i32(r2);
5080    set_cc_static(s);
5081    return DISAS_NEXT;
5082}
5083
5084static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5085{
5086    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5087    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5088
5089    gen_helper_pcilg(cpu_env, r1, r2);
5090    tcg_temp_free_i32(r1);
5091    tcg_temp_free_i32(r2);
5092    set_cc_static(s);
5093    return DISAS_NEXT;
5094}
5095
5096static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5097{
5098    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5099    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5100
5101    gen_helper_pcistg(cpu_env, r1, r2);
5102    tcg_temp_free_i32(r1);
5103    tcg_temp_free_i32(r2);
5104    set_cc_static(s);
5105    return DISAS_NEXT;
5106}
5107
5108static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5109{
5110    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5111    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5112
5113    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5114    tcg_temp_free_i32(ar);
5115    tcg_temp_free_i32(r1);
5116    set_cc_static(s);
5117    return DISAS_NEXT;
5118}
5119
5120static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5121{
5122    gen_helper_sic(cpu_env, o->in1, o->in2);
5123    return DISAS_NEXT;
5124}
5125
5126static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5127{
5128    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5129    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5130
5131    gen_helper_rpcit(cpu_env, r1, r2);
5132    tcg_temp_free_i32(r1);
5133    tcg_temp_free_i32(r2);
5134    set_cc_static(s);
5135    return DISAS_NEXT;
5136}
5137
5138static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5139{
5140    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5141    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5142    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5143
5144    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5145    tcg_temp_free_i32(ar);
5146    tcg_temp_free_i32(r1);
5147    tcg_temp_free_i32(r3);
5148    set_cc_static(s);
5149    return DISAS_NEXT;
5150}
5151
5152static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5153{
5154    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5155    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5156
5157    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5158    tcg_temp_free_i32(ar);
5159    tcg_temp_free_i32(r1);
5160    set_cc_static(s);
5161    return DISAS_NEXT;
5162}
5163#endif
5164
5165#include "translate_vx.c.inc"
5166
5167/* ====================================================================== */
5168/* The "Cc OUTput" generators.  Given the generated output (and in some cases
5169   the original inputs), update the various cc data structures in order to
5170   be able to compute the new condition code.  */
5171
5172static void cout_abs32(DisasContext *s, DisasOps *o)
5173{
5174    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5175}
5176
5177static void cout_abs64(DisasContext *s, DisasOps *o)
5178{
5179    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5180}
5181
5182static void cout_adds32(DisasContext *s, DisasOps *o)
5183{
5184    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5185}
5186
5187static void cout_adds64(DisasContext *s, DisasOps *o)
5188{
5189    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5190}
5191
5192static void cout_addu32(DisasContext *s, DisasOps *o)
5193{
5194    tcg_gen_shri_i64(cc_src, o->out, 32);
5195    tcg_gen_ext32u_i64(cc_dst, o->out);
5196    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5197}
5198
5199static void cout_addu64(DisasContext *s, DisasOps *o)
5200{
5201    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5202}
5203
5204static void cout_cmps32(DisasContext *s, DisasOps *o)
5205{
5206    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5207}
5208
5209static void cout_cmps64(DisasContext *s, DisasOps *o)
5210{
5211    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5212}
5213
5214static void cout_cmpu32(DisasContext *s, DisasOps *o)
5215{
5216    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5217}
5218
5219static void cout_cmpu64(DisasContext *s, DisasOps *o)
5220{
5221    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5222}
5223
5224static void cout_f32(DisasContext *s, DisasOps *o)
5225{
5226    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5227}
5228
5229static void cout_f64(DisasContext *s, DisasOps *o)
5230{
5231    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5232}
5233
5234static void cout_f128(DisasContext *s, DisasOps *o)
5235{
5236    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5237}
5238
5239static void cout_nabs32(DisasContext *s, DisasOps *o)
5240{
5241    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5242}
5243
5244static void cout_nabs64(DisasContext *s, DisasOps *o)
5245{
5246    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5247}
5248
5249static void cout_neg32(DisasContext *s, DisasOps *o)
5250{
5251    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5252}
5253
5254static void cout_neg64(DisasContext *s, DisasOps *o)
5255{
5256    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5257}
5258
5259static void cout_nz32(DisasContext *s, DisasOps *o)
5260{
5261    tcg_gen_ext32u_i64(cc_dst, o->out);
5262    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5263}
5264
5265static void cout_nz64(DisasContext *s, DisasOps *o)
5266{
5267    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5268}
5269
5270static void cout_s32(DisasContext *s, DisasOps *o)
5271{
5272    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5273}
5274
5275static void cout_s64(DisasContext *s, DisasOps *o)
5276{
5277    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5278}
5279
5280static void cout_subs32(DisasContext *s, DisasOps *o)
5281{
5282    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5283}
5284
5285static void cout_subs64(DisasContext *s, DisasOps *o)
5286{
5287    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5288}
5289
5290static void cout_subu32(DisasContext *s, DisasOps *o)
5291{
5292    tcg_gen_sari_i64(cc_src, o->out, 32);
5293    tcg_gen_ext32u_i64(cc_dst, o->out);
5294    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5295}
5296
5297static void cout_subu64(DisasContext *s, DisasOps *o)
5298{
5299    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5300}
5301
5302static void cout_tm32(DisasContext *s, DisasOps *o)
5303{
5304    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5305}
5306
5307static void cout_tm64(DisasContext *s, DisasOps *o)
5308{
5309    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5310}
5311
5312static void cout_muls32(DisasContext *s, DisasOps *o)
5313{
5314    gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5315}
5316
5317static void cout_muls64(DisasContext *s, DisasOps *o)
5318{
5319    /* out contains "high" part, out2 contains "low" part of 128 bit result */
5320    gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5321}
5322
5323/* ====================================================================== */
5324/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5325   with the TCG register to which we will write.  Used in combination with
5326   the "wout" generators, in some cases we need a new temporary, and in
5327   some cases we can write to a TCG global.  */
5328
5329static void prep_new(DisasContext *s, DisasOps *o)
5330{
5331    o->out = tcg_temp_new_i64();
5332}
5333#define SPEC_prep_new 0
5334
5335static void prep_new_P(DisasContext *s, DisasOps *o)
5336{
5337    o->out = tcg_temp_new_i64();
5338    o->out2 = tcg_temp_new_i64();
5339}
5340#define SPEC_prep_new_P 0
5341
5342static void prep_r1(DisasContext *s, DisasOps *o)
5343{
5344    o->out = regs[get_field(s, r1)];
5345    o->g_out = true;
5346}
5347#define SPEC_prep_r1 0
5348
5349static void prep_r1_P(DisasContext *s, DisasOps *o)
5350{
5351    int r1 = get_field(s, r1);
5352    o->out = regs[r1];
5353    o->out2 = regs[r1 + 1];
5354    o->g_out = o->g_out2 = true;
5355}
5356#define SPEC_prep_r1_P SPEC_r1_even
5357
5358/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5359static void prep_x1(DisasContext *s, DisasOps *o)
5360{
5361    o->out = load_freg(get_field(s, r1));
5362    o->out2 = load_freg(get_field(s, r1) + 2);
5363}
5364#define SPEC_prep_x1 SPEC_r1_f128
5365
5366/* ====================================================================== */
5367/* The "Write OUTput" generators.  These generally perform some non-trivial
5368   copy of data to TCG globals, or to main memory.  The trivial cases are
5369   generally handled by having a "prep" generator install the TCG global
5370   as the destination of the operation.  */
5371
5372static void wout_r1(DisasContext *s, DisasOps *o)
5373{
5374    store_reg(get_field(s, r1), o->out);
5375}
5376#define SPEC_wout_r1 0
5377
5378static void wout_out2_r1(DisasContext *s, DisasOps *o)
5379{
5380    store_reg(get_field(s, r1), o->out2);
5381}
5382#define SPEC_wout_out2_r1 0
5383
5384static void wout_r1_8(DisasContext *s, DisasOps *o)
5385{
5386    int r1 = get_field(s, r1);
5387    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5388}
5389#define SPEC_wout_r1_8 0
5390
5391static void wout_r1_16(DisasContext *s, DisasOps *o)
5392{
5393    int r1 = get_field(s, r1);
5394    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5395}
5396#define SPEC_wout_r1_16 0
5397
5398static void wout_r1_32(DisasContext *s, DisasOps *o)
5399{
5400    store_reg32_i64(get_field(s, r1), o->out);
5401}
5402#define SPEC_wout_r1_32 0
5403
5404static void wout_r1_32h(DisasContext *s, DisasOps *o)
5405{
5406    store_reg32h_i64(get_field(s, r1), o->out);
5407}
5408#define SPEC_wout_r1_32h 0
5409
5410static void wout_r1_P32(DisasContext *s, DisasOps *o)
5411{
5412    int r1 = get_field(s, r1);
5413    store_reg32_i64(r1, o->out);
5414    store_reg32_i64(r1 + 1, o->out2);
5415}
5416#define SPEC_wout_r1_P32 SPEC_r1_even
5417
5418static void wout_r1_D32(DisasContext *s, DisasOps *o)
5419{
5420    int r1 = get_field(s, r1);
5421    store_reg32_i64(r1 + 1, o->out);
5422    tcg_gen_shri_i64(o->out, o->out, 32);
5423    store_reg32_i64(r1, o->out);
5424}
5425#define SPEC_wout_r1_D32 SPEC_r1_even
5426
5427static void wout_r3_P32(DisasContext *s, DisasOps *o)
5428{
5429    int r3 = get_field(s, r3);
5430    store_reg32_i64(r3, o->out);
5431    store_reg32_i64(r3 + 1, o->out2);
5432}
5433#define SPEC_wout_r3_P32 SPEC_r3_even
5434
5435static void wout_r3_P64(DisasContext *s, DisasOps *o)
5436{
5437    int r3 = get_field(s, r3);
5438    store_reg(r3, o->out);
5439    store_reg(r3 + 1, o->out2);
5440}
5441#define SPEC_wout_r3_P64 SPEC_r3_even
5442
5443static void wout_e1(DisasContext *s, DisasOps *o)
5444{
5445    store_freg32_i64(get_field(s, r1), o->out);
5446}
5447#define SPEC_wout_e1 0
5448
5449static void wout_f1(DisasContext *s, DisasOps *o)
5450{
5451    store_freg(get_field(s, r1), o->out);
5452}
5453#define SPEC_wout_f1 0
5454
5455static void wout_x1(DisasContext *s, DisasOps *o)
5456{
5457    int f1 = get_field(s, r1);
5458    store_freg(f1, o->out);
5459    store_freg(f1 + 2, o->out2);
5460}
5461#define SPEC_wout_x1 SPEC_r1_f128
5462
5463static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5464{
5465    if (get_field(s, r1) != get_field(s, r2)) {
5466        store_reg32_i64(get_field(s, r1), o->out);
5467    }
5468}
5469#define SPEC_wout_cond_r1r2_32 0
5470
5471static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5472{
5473    if (get_field(s, r1) != get_field(s, r2)) {
5474        store_freg32_i64(get_field(s, r1), o->out);
5475    }
5476}
5477#define SPEC_wout_cond_e1e2 0
5478
5479static void wout_m1_8(DisasContext *s, DisasOps *o)
5480{
5481    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5482}
5483#define SPEC_wout_m1_8 0
5484
5485static void wout_m1_16(DisasContext *s, DisasOps *o)
5486{
5487    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5488}
5489#define SPEC_wout_m1_16 0
5490
5491#ifndef CONFIG_USER_ONLY
5492static void wout_m1_16a(DisasContext *s, DisasOps *o)
5493{
5494    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5495}
5496#define SPEC_wout_m1_16a 0
5497#endif
5498
5499static void wout_m1_32(DisasContext *s, DisasOps *o)
5500{
5501    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5502}
5503#define SPEC_wout_m1_32 0
5504
5505#ifndef CONFIG_USER_ONLY
5506static void wout_m1_32a(DisasContext *s, DisasOps *o)
5507{
5508    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5509}
5510#define SPEC_wout_m1_32a 0
5511#endif
5512
5513static void wout_m1_64(DisasContext *s, DisasOps *o)
5514{
5515    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5516}
5517#define SPEC_wout_m1_64 0
5518
5519#ifndef CONFIG_USER_ONLY
5520static void wout_m1_64a(DisasContext *s, DisasOps *o)
5521{
5522    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5523}
5524#define SPEC_wout_m1_64a 0
5525#endif
5526
5527static void wout_m2_32(DisasContext *s, DisasOps *o)
5528{
5529    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5530}
5531#define SPEC_wout_m2_32 0
5532
5533static void wout_in2_r1(DisasContext *s, DisasOps *o)
5534{
5535    store_reg(get_field(s, r1), o->in2);
5536}
5537#define SPEC_wout_in2_r1 0
5538
5539static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5540{
5541    store_reg32_i64(get_field(s, r1), o->in2);
5542}
5543#define SPEC_wout_in2_r1_32 0
5544
5545/* ====================================================================== */
5546/* The "INput 1" generators.  These load the first operand to an insn.  */
5547
5548static void in1_r1(DisasContext *s, DisasOps *o)
5549{
5550    o->in1 = load_reg(get_field(s, r1));
5551}
5552#define SPEC_in1_r1 0
5553
5554static void in1_r1_o(DisasContext *s, DisasOps *o)
5555{
5556    o->in1 = regs[get_field(s, r1)];
5557    o->g_in1 = true;
5558}
5559#define SPEC_in1_r1_o 0
5560
5561static void in1_r1_32s(DisasContext *s, DisasOps *o)
5562{
5563    o->in1 = tcg_temp_new_i64();
5564    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5565}
5566#define SPEC_in1_r1_32s 0
5567
5568static void in1_r1_32u(DisasContext *s, DisasOps *o)
5569{
5570    o->in1 = tcg_temp_new_i64();
5571    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5572}
5573#define SPEC_in1_r1_32u 0
5574
5575static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5576{
5577    o->in1 = tcg_temp_new_i64();
5578    tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5579}
5580#define SPEC_in1_r1_sr32 0
5581
5582static void in1_r1p1(DisasContext *s, DisasOps *o)
5583{
5584    o->in1 = load_reg(get_field(s, r1) + 1);
5585}
5586#define SPEC_in1_r1p1 SPEC_r1_even
5587
5588static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5589{
5590    o->in1 = regs[get_field(s, r1) + 1];
5591    o->g_in1 = true;
5592}
5593#define SPEC_in1_r1p1_o SPEC_r1_even
5594
5595static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5596{
5597    o->in1 = tcg_temp_new_i64();
5598    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5599}
5600#define SPEC_in1_r1p1_32s SPEC_r1_even
5601
5602static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5603{
5604    o->in1 = tcg_temp_new_i64();
5605    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5606}
5607#define SPEC_in1_r1p1_32u SPEC_r1_even
5608
5609static void in1_r1_D32(DisasContext *s, DisasOps *o)
5610{
5611    int r1 = get_field(s, r1);
5612    o->in1 = tcg_temp_new_i64();
5613    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5614}
5615#define SPEC_in1_r1_D32 SPEC_r1_even
5616
5617static void in1_r2(DisasContext *s, DisasOps *o)
5618{
5619    o->in1 = load_reg(get_field(s, r2));
5620}
5621#define SPEC_in1_r2 0
5622
5623static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5624{
5625    o->in1 = tcg_temp_new_i64();
5626    tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5627}
5628#define SPEC_in1_r2_sr32 0
5629
5630static void in1_r2_32u(DisasContext *s, DisasOps *o)
5631{
5632    o->in1 = tcg_temp_new_i64();
5633    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5634}
5635#define SPEC_in1_r2_32u 0
5636
5637static void in1_r3(DisasContext *s, DisasOps *o)
5638{
5639    o->in1 = load_reg(get_field(s, r3));
5640}
5641#define SPEC_in1_r3 0
5642
5643static void in1_r3_o(DisasContext *s, DisasOps *o)
5644{
5645    o->in1 = regs[get_field(s, r3)];
5646    o->g_in1 = true;
5647}
5648#define SPEC_in1_r3_o 0
5649
5650static void in1_r3_32s(DisasContext *s, DisasOps *o)
5651{
5652    o->in1 = tcg_temp_new_i64();
5653    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5654}
5655#define SPEC_in1_r3_32s 0
5656
5657static void in1_r3_32u(DisasContext *s, DisasOps *o)
5658{
5659    o->in1 = tcg_temp_new_i64();
5660    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5661}
5662#define SPEC_in1_r3_32u 0
5663
5664static void in1_r3_D32(DisasContext *s, DisasOps *o)
5665{
5666    int r3 = get_field(s, r3);
5667    o->in1 = tcg_temp_new_i64();
5668    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5669}
5670#define SPEC_in1_r3_D32 SPEC_r3_even
5671
5672static void in1_e1(DisasContext *s, DisasOps *o)
5673{
5674    o->in1 = load_freg32_i64(get_field(s, r1));
5675}
5676#define SPEC_in1_e1 0
5677
5678static void in1_f1(DisasContext *s, DisasOps *o)
5679{
5680    o->in1 = load_freg(get_field(s, r1));
5681}
5682#define SPEC_in1_f1 0
5683
5684/* Load the high double word of an extended (128-bit) format FP number */
5685static void in1_x2h(DisasContext *s, DisasOps *o)
5686{
5687    o->in1 = load_freg(get_field(s, r2));
5688}
5689#define SPEC_in1_x2h SPEC_r2_f128
5690
5691static void in1_f3(DisasContext *s, DisasOps *o)
5692{
5693    o->in1 = load_freg(get_field(s, r3));
5694}
5695#define SPEC_in1_f3 0
5696
5697static void in1_la1(DisasContext *s, DisasOps *o)
5698{
5699    o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5700}
5701#define SPEC_in1_la1 0
5702
5703static void in1_la2(DisasContext *s, DisasOps *o)
5704{
5705    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5706    o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5707}
5708#define SPEC_in1_la2 0
5709
5710static void in1_m1_8u(DisasContext *s, DisasOps *o)
5711{
5712    in1_la1(s, o);
5713    o->in1 = tcg_temp_new_i64();
5714    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5715}
5716#define SPEC_in1_m1_8u 0
5717
5718static void in1_m1_16s(DisasContext *s, DisasOps *o)
5719{
5720    in1_la1(s, o);
5721    o->in1 = tcg_temp_new_i64();
5722    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5723}
5724#define SPEC_in1_m1_16s 0
5725
5726static void in1_m1_16u(DisasContext *s, DisasOps *o)
5727{
5728    in1_la1(s, o);
5729    o->in1 = tcg_temp_new_i64();
5730    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5731}
5732#define SPEC_in1_m1_16u 0
5733
5734static void in1_m1_32s(DisasContext *s, DisasOps *o)
5735{
5736    in1_la1(s, o);
5737    o->in1 = tcg_temp_new_i64();
5738    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5739}
5740#define SPEC_in1_m1_32s 0
5741
5742static void in1_m1_32u(DisasContext *s, DisasOps *o)
5743{
5744    in1_la1(s, o);
5745    o->in1 = tcg_temp_new_i64();
5746    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5747}
5748#define SPEC_in1_m1_32u 0
5749
5750static void in1_m1_64(DisasContext *s, DisasOps *o)
5751{
5752    in1_la1(s, o);
5753    o->in1 = tcg_temp_new_i64();
5754    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5755}
5756#define SPEC_in1_m1_64 0
5757
5758/* ====================================================================== */
5759/* The "INput 2" generators.  These load the second operand to an insn.  */
5760
5761static void in2_r1_o(DisasContext *s, DisasOps *o)
5762{
5763    o->in2 = regs[get_field(s, r1)];
5764    o->g_in2 = true;
5765}
5766#define SPEC_in2_r1_o 0
5767
5768static void in2_r1_16u(DisasContext *s, DisasOps *o)
5769{
5770    o->in2 = tcg_temp_new_i64();
5771    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5772}
5773#define SPEC_in2_r1_16u 0
5774
5775static void in2_r1_32u(DisasContext *s, DisasOps *o)
5776{
5777    o->in2 = tcg_temp_new_i64();
5778    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5779}
5780#define SPEC_in2_r1_32u 0
5781
5782static void in2_r1_D32(DisasContext *s, DisasOps *o)
5783{
5784    int r1 = get_field(s, r1);
5785    o->in2 = tcg_temp_new_i64();
5786    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5787}
5788#define SPEC_in2_r1_D32 SPEC_r1_even
5789
5790static void in2_r2(DisasContext *s, DisasOps *o)
5791{
5792    o->in2 = load_reg(get_field(s, r2));
5793}
5794#define SPEC_in2_r2 0
5795
5796static void in2_r2_o(DisasContext *s, DisasOps *o)
5797{
5798    o->in2 = regs[get_field(s, r2)];
5799    o->g_in2 = true;
5800}
5801#define SPEC_in2_r2_o 0
5802
5803static void in2_r2_nz(DisasContext *s, DisasOps *o)
5804{
5805    int r2 = get_field(s, r2);
5806    if (r2 != 0) {
5807        o->in2 = load_reg(r2);
5808    }
5809}
5810#define SPEC_in2_r2_nz 0
5811
5812static void in2_r2_8s(DisasContext *s, DisasOps *o)
5813{
5814    o->in2 = tcg_temp_new_i64();
5815    tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5816}
5817#define SPEC_in2_r2_8s 0
5818
5819static void in2_r2_8u(DisasContext *s, DisasOps *o)
5820{
5821    o->in2 = tcg_temp_new_i64();
5822    tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5823}
5824#define SPEC_in2_r2_8u 0
5825
5826static void in2_r2_16s(DisasContext *s, DisasOps *o)
5827{
5828    o->in2 = tcg_temp_new_i64();
5829    tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5830}
5831#define SPEC_in2_r2_16s 0
5832
5833static void in2_r2_16u(DisasContext *s, DisasOps *o)
5834{
5835    o->in2 = tcg_temp_new_i64();
5836    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5837}
5838#define SPEC_in2_r2_16u 0
5839
5840static void in2_r3(DisasContext *s, DisasOps *o)
5841{
5842    o->in2 = load_reg(get_field(s, r3));
5843}
5844#define SPEC_in2_r3 0
5845
5846static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5847{
5848    o->in2 = tcg_temp_new_i64();
5849    tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5850}
5851#define SPEC_in2_r3_sr32 0
5852
5853static void in2_r3_32u(DisasContext *s, DisasOps *o)
5854{
5855    o->in2 = tcg_temp_new_i64();
5856    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5857}
5858#define SPEC_in2_r3_32u 0
5859
5860static void in2_r2_32s(DisasContext *s, DisasOps *o)
5861{
5862    o->in2 = tcg_temp_new_i64();
5863    tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5864}
5865#define SPEC_in2_r2_32s 0
5866
5867static void in2_r2_32u(DisasContext *s, DisasOps *o)
5868{
5869    o->in2 = tcg_temp_new_i64();
5870    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5871}
5872#define SPEC_in2_r2_32u 0
5873
5874static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5875{
5876    o->in2 = tcg_temp_new_i64();
5877    tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5878}
5879#define SPEC_in2_r2_sr32 0
5880
5881static void in2_e2(DisasContext *s, DisasOps *o)
5882{
5883    o->in2 = load_freg32_i64(get_field(s, r2));
5884}
5885#define SPEC_in2_e2 0
5886
5887static void in2_f2(DisasContext *s, DisasOps *o)
5888{
5889    o->in2 = load_freg(get_field(s, r2));
5890}
5891#define SPEC_in2_f2 0
5892
5893/* Load the low double word of an extended (128-bit) format FP number */
5894static void in2_x2l(DisasContext *s, DisasOps *o)
5895{
5896    o->in2 = load_freg(get_field(s, r2) + 2);
5897}
5898#define SPEC_in2_x2l SPEC_r2_f128
5899
5900static void in2_ra2(DisasContext *s, DisasOps *o)
5901{
5902    int r2 = get_field(s, r2);
5903
5904    /* Note: *don't* treat !r2 as 0, use the reg value. */
5905    o->in2 = tcg_temp_new_i64();
5906    gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5907}
5908#define SPEC_in2_ra2 0
5909
5910static void in2_a2(DisasContext *s, DisasOps *o)
5911{
5912    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5913    o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5914}
5915#define SPEC_in2_a2 0
5916
5917static void in2_ri2(DisasContext *s, DisasOps *o)
5918{
5919    o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5920}
5921#define SPEC_in2_ri2 0
5922
5923static void in2_sh32(DisasContext *s, DisasOps *o)
5924{
5925    help_l2_shift(s, o, 31);
5926}
5927#define SPEC_in2_sh32 0
5928
5929static void in2_sh64(DisasContext *s, DisasOps *o)
5930{
5931    help_l2_shift(s, o, 63);
5932}
5933#define SPEC_in2_sh64 0
5934
5935static void in2_m2_8u(DisasContext *s, DisasOps *o)
5936{
5937    in2_a2(s, o);
5938    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5939}
5940#define SPEC_in2_m2_8u 0
5941
5942static void in2_m2_16s(DisasContext *s, DisasOps *o)
5943{
5944    in2_a2(s, o);
5945    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5946}
5947#define SPEC_in2_m2_16s 0
5948
5949static void in2_m2_16u(DisasContext *s, DisasOps *o)
5950{
5951    in2_a2(s, o);
5952    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5953}
5954#define SPEC_in2_m2_16u 0
5955
5956static void in2_m2_32s(DisasContext *s, DisasOps *o)
5957{
5958    in2_a2(s, o);
5959    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5960}
5961#define SPEC_in2_m2_32s 0
5962
5963static void in2_m2_32u(DisasContext *s, DisasOps *o)
5964{
5965    in2_a2(s, o);
5966    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5967}
5968#define SPEC_in2_m2_32u 0
5969
5970#ifndef CONFIG_USER_ONLY
5971static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5972{
5973    in2_a2(s, o);
5974    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5975}
5976#define SPEC_in2_m2_32ua 0
5977#endif
5978
5979static void in2_m2_64(DisasContext *s, DisasOps *o)
5980{
5981    in2_a2(s, o);
5982    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5983}
5984#define SPEC_in2_m2_64 0
5985
5986static void in2_m2_64w(DisasContext *s, DisasOps *o)
5987{
5988    in2_a2(s, o);
5989    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5990    gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5991}
5992#define SPEC_in2_m2_64w 0
5993
5994#ifndef CONFIG_USER_ONLY
5995static void in2_m2_64a(DisasContext *s, DisasOps *o)
5996{
5997    in2_a2(s, o);
5998    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5999}
6000#define SPEC_in2_m2_64a 0
6001#endif
6002
6003static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6004{
6005    in2_ri2(s, o);
6006    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6007}
6008#define SPEC_in2_mri2_16u 0
6009
6010static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6011{
6012    in2_ri2(s, o);
6013    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6014}
6015#define SPEC_in2_mri2_32s 0
6016
6017static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6018{
6019    in2_ri2(s, o);
6020    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6021}
6022#define SPEC_in2_mri2_32u 0
6023
6024static void in2_mri2_64(DisasContext *s, DisasOps *o)
6025{
6026    in2_ri2(s, o);
6027    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6028}
6029#define SPEC_in2_mri2_64 0
6030
6031static void in2_i2(DisasContext *s, DisasOps *o)
6032{
6033    o->in2 = tcg_const_i64(get_field(s, i2));
6034}
6035#define SPEC_in2_i2 0
6036
6037static void in2_i2_8u(DisasContext *s, DisasOps *o)
6038{
6039    o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6040}
6041#define SPEC_in2_i2_8u 0
6042
6043static void in2_i2_16u(DisasContext *s, DisasOps *o)
6044{
6045    o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6046}
6047#define SPEC_in2_i2_16u 0
6048
6049static void in2_i2_32u(DisasContext *s, DisasOps *o)
6050{
6051    o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6052}
6053#define SPEC_in2_i2_32u 0
6054
6055static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6056{
6057    uint64_t i2 = (uint16_t)get_field(s, i2);
6058    o->in2 = tcg_const_i64(i2 << s->insn->data);
6059}
6060#define SPEC_in2_i2_16u_shl 0
6061
6062static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6063{
6064    uint64_t i2 = (uint32_t)get_field(s, i2);
6065    o->in2 = tcg_const_i64(i2 << s->insn->data);
6066}
6067#define SPEC_in2_i2_32u_shl 0
6068
6069#ifndef CONFIG_USER_ONLY
6070static void in2_insn(DisasContext *s, DisasOps *o)
6071{
6072    o->in2 = tcg_const_i64(s->fields.raw_insn);
6073}
6074#define SPEC_in2_insn 0
6075#endif
6076
6077/* ====================================================================== */
6078
6079/* Find opc within the table of insns.  This is formulated as a switch
6080   statement so that (1) we get compile-time notice of cut-paste errors
6081   for duplicated opcodes, and (2) the compiler generates the binary
6082   search tree, rather than us having to post-process the table.  */
6083
6084#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6085    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6086
6087#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6088    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6089
6090#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6091    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6092
6093#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6094
6095enum DisasInsnEnum {
6096#include "insn-data.def"
6097};
6098
6099#undef E
6100#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6101    .opc = OPC,                                                             \
6102    .flags = FL,                                                            \
6103    .fmt = FMT_##FT,                                                        \
6104    .fac = FAC_##FC,                                                        \
6105    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6106    .name = #NM,                                                            \
6107    .help_in1 = in1_##I1,                                                   \
6108    .help_in2 = in2_##I2,                                                   \
6109    .help_prep = prep_##P,                                                  \
6110    .help_wout = wout_##W,                                                  \
6111    .help_cout = cout_##CC,                                                 \
6112    .help_op = op_##OP,                                                     \
6113    .data = D                                                               \
6114 },
6115
6116/* Allow 0 to be used for NULL in the table below.  */
6117#define in1_0  NULL
6118#define in2_0  NULL
6119#define prep_0  NULL
6120#define wout_0  NULL
6121#define cout_0  NULL
6122#define op_0  NULL
6123
6124#define SPEC_in1_0 0
6125#define SPEC_in2_0 0
6126#define SPEC_prep_0 0
6127#define SPEC_wout_0 0
6128
6129/* Give smaller names to the various facilities.  */
6130#define FAC_Z           S390_FEAT_ZARCH
6131#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6132#define FAC_DFP         S390_FEAT_DFP
6133#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6134#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6135#define FAC_EE          S390_FEAT_EXECUTE_EXT
6136#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6137#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6138#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6139#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6140#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6141#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6142#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6143#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6144#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6145#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6146#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6147#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6148#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6149#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6150#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6151#define FAC_SFLE        S390_FEAT_STFLE
6152#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6153#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6154#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6155#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6156#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6157#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6158#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6159#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6160#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6161#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6162#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6163#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6164#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6165#define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6166#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6167#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6168#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6169#define FAC_V           S390_FEAT_VECTOR /* vector facility */
6170#define FAC_VE          S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
6171#define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6172
6173static const DisasInsn insn_info[] = {
6174#include "insn-data.def"
6175};
6176
6177#undef E
6178#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6179    case OPC: return &insn_info[insn_ ## NM];
6180
6181static const DisasInsn *lookup_opc(uint16_t opc)
6182{
6183    switch (opc) {
6184#include "insn-data.def"
6185    default:
6186        return NULL;
6187    }
6188}
6189
6190#undef F
6191#undef E
6192#undef D
6193#undef C
6194
6195/* Extract a field from the insn.  The INSN should be left-aligned in
6196   the uint64_t so that we can more easily utilize the big-bit-endian
6197   definitions we extract from the Principals of Operation.  */
6198
6199static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6200{
6201    uint32_t r, m;
6202
6203    if (f->size == 0) {
6204        return;
6205    }
6206
6207    /* Zero extract the field from the insn.  */
6208    r = (insn << f->beg) >> (64 - f->size);
6209
6210    /* Sign-extend, or un-swap the field as necessary.  */
6211    switch (f->type) {
6212    case 0: /* unsigned */
6213        break;
6214    case 1: /* signed */
6215        assert(f->size <= 32);
6216        m = 1u << (f->size - 1);
6217        r = (r ^ m) - m;
6218        break;
6219    case 2: /* dl+dh split, signed 20 bit. */
6220        r = ((int8_t)r << 12) | (r >> 8);
6221        break;
6222    case 3: /* MSB stored in RXB */
6223        g_assert(f->size == 4);
6224        switch (f->beg) {
6225        case 8:
6226            r |= extract64(insn, 63 - 36, 1) << 4;
6227            break;
6228        case 12:
6229            r |= extract64(insn, 63 - 37, 1) << 4;
6230            break;
6231        case 16:
6232            r |= extract64(insn, 63 - 38, 1) << 4;
6233            break;
6234        case 32:
6235            r |= extract64(insn, 63 - 39, 1) << 4;
6236            break;
6237        default:
6238            g_assert_not_reached();
6239        }
6240        break;
6241    default:
6242        abort();
6243    }
6244
6245    /*
6246     * Validate that the "compressed" encoding we selected above is valid.
6247     * I.e. we haven't made two different original fields overlap.
6248     */
6249    assert(((o->presentC >> f->indexC) & 1) == 0);
6250    o->presentC |= 1 << f->indexC;
6251    o->presentO |= 1 << f->indexO;
6252
6253    o->c[f->indexC] = r;
6254}
6255
6256/* Lookup the insn at the current PC, extracting the operands into O and
6257   returning the info struct for the insn.  Returns NULL for invalid insn.  */
6258
6259static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6260{
6261    uint64_t insn, pc = s->base.pc_next;
6262    int op, op2, ilen;
6263    const DisasInsn *info;
6264
6265    if (unlikely(s->ex_value)) {
6266        /* Drop the EX data now, so that it's clear on exception paths.  */
6267        TCGv_i64 zero = tcg_const_i64(0);
6268        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6269        tcg_temp_free_i64(zero);
6270
6271        /* Extract the values saved by EXECUTE.  */
6272        insn = s->ex_value & 0xffffffffffff0000ull;
6273        ilen = s->ex_value & 0xf;
6274        op = insn >> 56;
6275    } else {
6276        insn = ld_code2(env, pc);
6277        op = (insn >> 8) & 0xff;
6278        ilen = get_ilen(op);
6279        switch (ilen) {
6280        case 2:
6281            insn = insn << 48;
6282            break;
6283        case 4:
6284            insn = ld_code4(env, pc) << 32;
6285            break;
6286        case 6:
6287            insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6288            break;
6289        default:
6290            g_assert_not_reached();
6291        }
6292    }
6293    s->pc_tmp = s->base.pc_next + ilen;
6294    s->ilen = ilen;
6295
6296    /* We can't actually determine the insn format until we've looked up
6297       the full insn opcode.  Which we can't do without locating the
6298       secondary opcode.  Assume by default that OP2 is at bit 40; for
6299       those smaller insns that don't actually have a secondary opcode
6300       this will correctly result in OP2 = 0. */
6301    switch (op) {
6302    case 0x01: /* E */
6303    case 0x80: /* S */
6304    case 0x82: /* S */
6305    case 0x93: /* S */
6306    case 0xb2: /* S, RRF, RRE, IE */
6307    case 0xb3: /* RRE, RRD, RRF */
6308    case 0xb9: /* RRE, RRF */
6309    case 0xe5: /* SSE, SIL */
6310        op2 = (insn << 8) >> 56;
6311        break;
6312    case 0xa5: /* RI */
6313    case 0xa7: /* RI */
6314    case 0xc0: /* RIL */
6315    case 0xc2: /* RIL */
6316    case 0xc4: /* RIL */
6317    case 0xc6: /* RIL */
6318    case 0xc8: /* SSF */
6319    case 0xcc: /* RIL */
6320        op2 = (insn << 12) >> 60;
6321        break;
6322    case 0xc5: /* MII */
6323    case 0xc7: /* SMI */
6324    case 0xd0 ... 0xdf: /* SS */
6325    case 0xe1: /* SS */
6326    case 0xe2: /* SS */
6327    case 0xe8: /* SS */
6328    case 0xe9: /* SS */
6329    case 0xea: /* SS */
6330    case 0xee ... 0xf3: /* SS */
6331    case 0xf8 ... 0xfd: /* SS */
6332        op2 = 0;
6333        break;
6334    default:
6335        op2 = (insn << 40) >> 56;
6336        break;
6337    }
6338
6339    memset(&s->fields, 0, sizeof(s->fields));
6340    s->fields.raw_insn = insn;
6341    s->fields.op = op;
6342    s->fields.op2 = op2;
6343
6344    /* Lookup the instruction.  */
6345    info = lookup_opc(op << 8 | op2);
6346    s->insn = info;
6347
6348    /* If we found it, extract the operands.  */
6349    if (info != NULL) {
6350        DisasFormat fmt = info->fmt;
6351        int i;
6352
6353        for (i = 0; i < NUM_C_FIELD; ++i) {
6354            extract_field(&s->fields, &format_info[fmt].op[i], insn);
6355        }
6356    }
6357    return info;
6358}
6359
6360static bool is_afp_reg(int reg)
6361{
6362    return reg % 2 || reg > 6;
6363}
6364
6365static bool is_fp_pair(int reg)
6366{
6367    /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6368    return !(reg & 0x2);
6369}
6370
6371static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6372{
6373    const DisasInsn *insn;
6374    DisasJumpType ret = DISAS_NEXT;
6375    DisasOps o = {};
6376    bool icount = false;
6377
6378    /* Search for the insn in the table.  */
6379    insn = extract_insn(env, s);
6380
6381    /* Emit insn_start now that we know the ILEN.  */
6382    tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen);
6383
6384    /* Not found means unimplemented/illegal opcode.  */
6385    if (insn == NULL) {
6386        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6387                      s->fields.op, s->fields.op2);
6388        gen_illegal_opcode(s);
6389        ret = DISAS_NORETURN;
6390        goto out;
6391    }
6392
6393#ifndef CONFIG_USER_ONLY
6394    if (s->base.tb->flags & FLAG_MASK_PER) {
6395        TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6396        gen_helper_per_ifetch(cpu_env, addr);
6397        tcg_temp_free_i64(addr);
6398    }
6399#endif
6400
6401    /* process flags */
6402    if (insn->flags) {
6403        /* privileged instruction */
6404        if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6405            gen_program_exception(s, PGM_PRIVILEGED);
6406            ret = DISAS_NORETURN;
6407            goto out;
6408        }
6409
6410        /* if AFP is not enabled, instructions and registers are forbidden */
6411        if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6412            uint8_t dxc = 0;
6413
6414            if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6415                dxc = 1;
6416            }
6417            if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6418                dxc = 1;
6419            }
6420            if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6421                dxc = 1;
6422            }
6423            if (insn->flags & IF_BFP) {
6424                dxc = 2;
6425            }
6426            if (insn->flags & IF_DFP) {
6427                dxc = 3;
6428            }
6429            if (insn->flags & IF_VEC) {
6430                dxc = 0xfe;
6431            }
6432            if (dxc) {
6433                gen_data_exception(dxc);
6434                ret = DISAS_NORETURN;
6435                goto out;
6436            }
6437        }
6438
6439        /* if vector instructions not enabled, executing them is forbidden */
6440        if (insn->flags & IF_VEC) {
6441            if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6442                gen_data_exception(0xfe);
6443                ret = DISAS_NORETURN;
6444                goto out;
6445            }
6446        }
6447
6448        /* input/output is the special case for icount mode */
6449        if (unlikely(insn->flags & IF_IO)) {
6450            icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6451            if (icount) {
6452                gen_io_start();
6453            }
6454        }
6455    }
6456
6457    /* Check for insn specification exceptions.  */
6458    if (insn->spec) {
6459        if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6460            (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6461            (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6462            (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6463            (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6464            gen_program_exception(s, PGM_SPECIFICATION);
6465            ret = DISAS_NORETURN;
6466            goto out;
6467        }
6468    }
6469
6470    /* Implement the instruction.  */
6471    if (insn->help_in1) {
6472        insn->help_in1(s, &o);
6473    }
6474    if (insn->help_in2) {
6475        insn->help_in2(s, &o);
6476    }
6477    if (insn->help_prep) {
6478        insn->help_prep(s, &o);
6479    }
6480    if (insn->help_op) {
6481        ret = insn->help_op(s, &o);
6482    }
6483    if (ret != DISAS_NORETURN) {
6484        if (insn->help_wout) {
6485            insn->help_wout(s, &o);
6486        }
6487        if (insn->help_cout) {
6488            insn->help_cout(s, &o);
6489        }
6490    }
6491
6492    /* Free any temporaries created by the helpers.  */
6493    if (o.out && !o.g_out) {
6494        tcg_temp_free_i64(o.out);
6495    }
6496    if (o.out2 && !o.g_out2) {
6497        tcg_temp_free_i64(o.out2);
6498    }
6499    if (o.in1 && !o.g_in1) {
6500        tcg_temp_free_i64(o.in1);
6501    }
6502    if (o.in2 && !o.g_in2) {
6503        tcg_temp_free_i64(o.in2);
6504    }
6505    if (o.addr1) {
6506        tcg_temp_free_i64(o.addr1);
6507    }
6508
6509    /* io should be the last instruction in tb when icount is enabled */
6510    if (unlikely(icount && ret == DISAS_NEXT)) {
6511        ret = DISAS_PC_STALE;
6512    }
6513
6514#ifndef CONFIG_USER_ONLY
6515    if (s->base.tb->flags & FLAG_MASK_PER) {
6516        /* An exception might be triggered, save PSW if not already done.  */
6517        if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6518            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6519        }
6520
6521        /* Call the helper to check for a possible PER exception.  */
6522        gen_helper_per_check_exception(cpu_env);
6523    }
6524#endif
6525
6526out:
6527    /* Advance to the next instruction.  */
6528    s->base.pc_next = s->pc_tmp;
6529    return ret;
6530}
6531
6532static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6533{
6534    DisasContext *dc = container_of(dcbase, DisasContext, base);
6535
6536    /* 31-bit mode */
6537    if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6538        dc->base.pc_first &= 0x7fffffff;
6539        dc->base.pc_next = dc->base.pc_first;
6540    }
6541
6542    dc->cc_op = CC_OP_DYNAMIC;
6543    dc->ex_value = dc->base.tb->cs_base;
6544    dc->do_debug = dc->base.singlestep_enabled;
6545}
6546
6547static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6548{
6549}
6550
6551static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6552{
6553}
6554
6555static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6556{
6557    CPUS390XState *env = cs->env_ptr;
6558    DisasContext *dc = container_of(dcbase, DisasContext, base);
6559
6560    dc->base.is_jmp = translate_one(env, dc);
6561    if (dc->base.is_jmp == DISAS_NEXT) {
6562        uint64_t page_start;
6563
6564        page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6565        if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6566            dc->base.is_jmp = DISAS_TOO_MANY;
6567        }
6568    }
6569}
6570
6571static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6572{
6573    DisasContext *dc = container_of(dcbase, DisasContext, base);
6574
6575    switch (dc->base.is_jmp) {
6576    case DISAS_GOTO_TB:
6577    case DISAS_NORETURN:
6578        break;
6579    case DISAS_TOO_MANY:
6580    case DISAS_PC_STALE:
6581    case DISAS_PC_STALE_NOCHAIN:
6582        update_psw_addr(dc);
6583        /* FALLTHRU */
6584    case DISAS_PC_UPDATED:
6585        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6586           cc op type is in env */
6587        update_cc_op(dc);
6588        /* FALLTHRU */
6589    case DISAS_PC_CC_UPDATED:
6590        /* Exit the TB, either by raising a debug exception or by return.  */
6591        if (dc->do_debug) {
6592            gen_exception(EXCP_DEBUG);
6593        } else if ((dc->base.tb->flags & FLAG_MASK_PER) ||
6594                   dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6595            tcg_gen_exit_tb(NULL, 0);
6596        } else {
6597            tcg_gen_lookup_and_goto_ptr();
6598        }
6599        break;
6600    default:
6601        g_assert_not_reached();
6602    }
6603}
6604
6605static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6606{
6607    DisasContext *dc = container_of(dcbase, DisasContext, base);
6608
6609    if (unlikely(dc->ex_value)) {
6610        /* ??? Unfortunately log_target_disas can't use host memory.  */
6611        qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6612    } else {
6613        qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6614        log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6615    }
6616}
6617
6618static const TranslatorOps s390x_tr_ops = {
6619    .init_disas_context = s390x_tr_init_disas_context,
6620    .tb_start           = s390x_tr_tb_start,
6621    .insn_start         = s390x_tr_insn_start,
6622    .translate_insn     = s390x_tr_translate_insn,
6623    .tb_stop            = s390x_tr_tb_stop,
6624    .disas_log          = s390x_tr_disas_log,
6625};
6626
6627void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
6628{
6629    DisasContext dc;
6630
6631    translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
6632}
6633
6634void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6635                          target_ulong *data)
6636{
6637    int cc_op = data[1];
6638
6639    env->psw.addr = data[0];
6640
6641    /* Update the CC opcode if it is not already up-to-date.  */
6642    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6643        env->cc_op = cc_op;
6644    }
6645
6646    /* Record ILEN.  */
6647    env->int_pgm_ilen = data[2];
6648}
6649