qemu/target/s390x/tcg/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "s390x-internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg/tcg-op.h"
  37#include "tcg/tcg-op-gvec.h"
  38#include "qemu/log.h"
  39#include "qemu/host-utils.h"
  40#include "exec/cpu_ldst.h"
  41#include "exec/gen-icount.h"
  42#include "exec/helper-proto.h"
  43#include "exec/helper-gen.h"
  44
  45#include "exec/translator.h"
  46#include "exec/log.h"
  47#include "qemu/atomic128.h"
  48
  49
  50/* Information that (most) every instruction needs to manipulate.  */
  51typedef struct DisasContext DisasContext;
  52typedef struct DisasInsn DisasInsn;
  53typedef struct DisasFields DisasFields;
  54
  55/*
  56 * Define a structure to hold the decoded fields.  We'll store each inside
  57 * an array indexed by an enum.  In order to conserve memory, we'll arrange
  58 * for fields that do not exist at the same time to overlap, thus the "C"
  59 * for compact.  For checking purposes there is an "O" for original index
  60 * as well that will be applied to availability bitmaps.
  61 */
  62
  63enum DisasFieldIndexO {
  64    FLD_O_r1,
  65    FLD_O_r2,
  66    FLD_O_r3,
  67    FLD_O_m1,
  68    FLD_O_m3,
  69    FLD_O_m4,
  70    FLD_O_m5,
  71    FLD_O_m6,
  72    FLD_O_b1,
  73    FLD_O_b2,
  74    FLD_O_b4,
  75    FLD_O_d1,
  76    FLD_O_d2,
  77    FLD_O_d4,
  78    FLD_O_x2,
  79    FLD_O_l1,
  80    FLD_O_l2,
  81    FLD_O_i1,
  82    FLD_O_i2,
  83    FLD_O_i3,
  84    FLD_O_i4,
  85    FLD_O_i5,
  86    FLD_O_v1,
  87    FLD_O_v2,
  88    FLD_O_v3,
  89    FLD_O_v4,
  90};
  91
  92enum DisasFieldIndexC {
  93    FLD_C_r1 = 0,
  94    FLD_C_m1 = 0,
  95    FLD_C_b1 = 0,
  96    FLD_C_i1 = 0,
  97    FLD_C_v1 = 0,
  98
  99    FLD_C_r2 = 1,
 100    FLD_C_b2 = 1,
 101    FLD_C_i2 = 1,
 102
 103    FLD_C_r3 = 2,
 104    FLD_C_m3 = 2,
 105    FLD_C_i3 = 2,
 106    FLD_C_v3 = 2,
 107
 108    FLD_C_m4 = 3,
 109    FLD_C_b4 = 3,
 110    FLD_C_i4 = 3,
 111    FLD_C_l1 = 3,
 112    FLD_C_v4 = 3,
 113
 114    FLD_C_i5 = 4,
 115    FLD_C_d1 = 4,
 116    FLD_C_m5 = 4,
 117
 118    FLD_C_d2 = 5,
 119    FLD_C_m6 = 5,
 120
 121    FLD_C_d4 = 6,
 122    FLD_C_x2 = 6,
 123    FLD_C_l2 = 6,
 124    FLD_C_v2 = 6,
 125
 126    NUM_C_FIELD = 7
 127};
 128
 129struct DisasFields {
 130    uint64_t raw_insn;
 131    unsigned op:8;
 132    unsigned op2:8;
 133    unsigned presentC:16;
 134    unsigned int presentO;
 135    int c[NUM_C_FIELD];
 136};
 137
 138struct DisasContext {
 139    DisasContextBase base;
 140    const DisasInsn *insn;
 141    TCGOp *insn_start;
 142    DisasFields fields;
 143    uint64_t ex_value;
 144    /*
 145     * During translate_one(), pc_tmp is used to determine the instruction
 146     * to be executed after base.pc_next - e.g. next sequential instruction
 147     * or a branch target.
 148     */
 149    uint64_t pc_tmp;
 150    uint32_t ilen;
 151    enum cc_op cc_op;
 152    bool exit_to_mainloop;
 153};
 154
 155/* Information carried about a condition to be evaluated.  */
 156typedef struct {
 157    TCGCond cond:8;
 158    bool is_64;
 159    bool g1;
 160    bool g2;
 161    union {
 162        struct { TCGv_i64 a, b; } s64;
 163        struct { TCGv_i32 a, b; } s32;
 164    } u;
 165} DisasCompare;
 166
 167#ifdef DEBUG_INLINE_BRANCHES
 168static uint64_t inline_branch_hit[CC_OP_MAX];
 169static uint64_t inline_branch_miss[CC_OP_MAX];
 170#endif
 171
 172static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
 173{
 174    TCGv_i64 tmp;
 175
 176    if (s->base.tb->flags & FLAG_MASK_32) {
 177        if (s->base.tb->flags & FLAG_MASK_64) {
 178            tcg_gen_movi_i64(out, pc);
 179            return;
 180        }
 181        pc |= 0x80000000;
 182    }
 183    assert(!(s->base.tb->flags & FLAG_MASK_64));
 184    tmp = tcg_const_i64(pc);
 185    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
 186    tcg_temp_free_i64(tmp);
 187}
 188
 189static TCGv_i64 psw_addr;
 190static TCGv_i64 psw_mask;
 191static TCGv_i64 gbea;
 192
 193static TCGv_i32 cc_op;
 194static TCGv_i64 cc_src;
 195static TCGv_i64 cc_dst;
 196static TCGv_i64 cc_vr;
 197
 198static char cpu_reg_names[16][4];
 199static TCGv_i64 regs[16];
 200
 201void s390x_translate_init(void)
 202{
 203    int i;
 204
 205    psw_addr = tcg_global_mem_new_i64(cpu_env,
 206                                      offsetof(CPUS390XState, psw.addr),
 207                                      "psw_addr");
 208    psw_mask = tcg_global_mem_new_i64(cpu_env,
 209                                      offsetof(CPUS390XState, psw.mask),
 210                                      "psw_mask");
 211    gbea = tcg_global_mem_new_i64(cpu_env,
 212                                  offsetof(CPUS390XState, gbea),
 213                                  "gbea");
 214
 215    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 216                                   "cc_op");
 217    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 218                                    "cc_src");
 219    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 220                                    "cc_dst");
 221    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 222                                   "cc_vr");
 223
 224    for (i = 0; i < 16; i++) {
 225        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 226        regs[i] = tcg_global_mem_new(cpu_env,
 227                                     offsetof(CPUS390XState, regs[i]),
 228                                     cpu_reg_names[i]);
 229    }
 230}
 231
 232static inline int vec_full_reg_offset(uint8_t reg)
 233{
 234    g_assert(reg < 32);
 235    return offsetof(CPUS390XState, vregs[reg][0]);
 236}
 237
 238static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
 239{
 240    /* Convert element size (es) - e.g. MO_8 - to bytes */
 241    const uint8_t bytes = 1 << es;
 242    int offs = enr * bytes;
 243
 244    /*
 245     * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
 246     * of the 16 byte vector, on both, little and big endian systems.
 247     *
 248     * Big Endian (target/possible host)
 249     * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
 250     * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
 251     * W:  [             0][             1] - [             2][             3]
 252     * DW: [                             0] - [                             1]
 253     *
 254     * Little Endian (possible host)
 255     * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
 256     * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
 257     * W:  [             1][             0] - [             3][             2]
 258     * DW: [                             0] - [                             1]
 259     *
 260     * For 16 byte elements, the two 8 byte halves will not form a host
 261     * int128 if the host is little endian, since they're in the wrong order.
 262     * Some operations (e.g. xor) do not care. For operations like addition,
 263     * the two 8 byte elements have to be loaded separately. Let's force all
 264     * 16 byte operations to handle it in a special way.
 265     */
 266    g_assert(es <= MO_64);
 267#if !HOST_BIG_ENDIAN
 268    offs ^= (8 - bytes);
 269#endif
 270    return offs + vec_full_reg_offset(reg);
 271}
 272
 273static inline int freg64_offset(uint8_t reg)
 274{
 275    g_assert(reg < 16);
 276    return vec_reg_offset(reg, 0, MO_64);
 277}
 278
 279static inline int freg32_offset(uint8_t reg)
 280{
 281    g_assert(reg < 16);
 282    return vec_reg_offset(reg, 0, MO_32);
 283}
 284
 285static TCGv_i64 load_reg(int reg)
 286{
 287    TCGv_i64 r = tcg_temp_new_i64();
 288    tcg_gen_mov_i64(r, regs[reg]);
 289    return r;
 290}
 291
 292static TCGv_i64 load_freg(int reg)
 293{
 294    TCGv_i64 r = tcg_temp_new_i64();
 295
 296    tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
 297    return r;
 298}
 299
 300static TCGv_i64 load_freg32_i64(int reg)
 301{
 302    TCGv_i64 r = tcg_temp_new_i64();
 303
 304    tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
 305    return r;
 306}
 307
 308static void store_reg(int reg, TCGv_i64 v)
 309{
 310    tcg_gen_mov_i64(regs[reg], v);
 311}
 312
 313static void store_freg(int reg, TCGv_i64 v)
 314{
 315    tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
 316}
 317
 318static void store_reg32_i64(int reg, TCGv_i64 v)
 319{
 320    /* 32 bit register writes keep the upper half */
 321    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 322}
 323
 324static void store_reg32h_i64(int reg, TCGv_i64 v)
 325{
 326    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 327}
 328
 329static void store_freg32_i64(int reg, TCGv_i64 v)
 330{
 331    tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
 332}
 333
 334static void return_low128(TCGv_i64 dest)
 335{
 336    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 337}
 338
 339static void update_psw_addr(DisasContext *s)
 340{
 341    /* psw.addr */
 342    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
 343}
 344
 345static void per_branch(DisasContext *s, bool to_next)
 346{
 347#ifndef CONFIG_USER_ONLY
 348    tcg_gen_movi_i64(gbea, s->base.pc_next);
 349
 350    if (s->base.tb->flags & FLAG_MASK_PER) {
 351        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
 352        gen_helper_per_branch(cpu_env, gbea, next_pc);
 353        if (to_next) {
 354            tcg_temp_free_i64(next_pc);
 355        }
 356    }
 357#endif
 358}
 359
 360static void per_branch_cond(DisasContext *s, TCGCond cond,
 361                            TCGv_i64 arg1, TCGv_i64 arg2)
 362{
 363#ifndef CONFIG_USER_ONLY
 364    if (s->base.tb->flags & FLAG_MASK_PER) {
 365        TCGLabel *lab = gen_new_label();
 366        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 367
 368        tcg_gen_movi_i64(gbea, s->base.pc_next);
 369        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 370
 371        gen_set_label(lab);
 372    } else {
 373        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
 374        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 375        tcg_temp_free_i64(pc);
 376    }
 377#endif
 378}
 379
 380static void per_breaking_event(DisasContext *s)
 381{
 382    tcg_gen_movi_i64(gbea, s->base.pc_next);
 383}
 384
 385static void update_cc_op(DisasContext *s)
 386{
 387    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 388        tcg_gen_movi_i32(cc_op, s->cc_op);
 389    }
 390}
 391
 392static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
 393                                uint64_t pc)
 394{
 395    return (uint64_t)translator_lduw(env, &s->base, pc);
 396}
 397
 398static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
 399                                uint64_t pc)
 400{
 401    return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
 402}
 403
 404static int get_mem_index(DisasContext *s)
 405{
 406#ifdef CONFIG_USER_ONLY
 407    return MMU_USER_IDX;
 408#else
 409    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
 410        return MMU_REAL_IDX;
 411    }
 412
 413    switch (s->base.tb->flags & FLAG_MASK_ASC) {
 414    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 415        return MMU_PRIMARY_IDX;
 416    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 417        return MMU_SECONDARY_IDX;
 418    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 419        return MMU_HOME_IDX;
 420    default:
 421        tcg_abort();
 422        break;
 423    }
 424#endif
 425}
 426
 427static void gen_exception(int excp)
 428{
 429    TCGv_i32 tmp = tcg_const_i32(excp);
 430    gen_helper_exception(cpu_env, tmp);
 431    tcg_temp_free_i32(tmp);
 432}
 433
 434static void gen_program_exception(DisasContext *s, int code)
 435{
 436    TCGv_i32 tmp;
 437
 438    /* Remember what pgm exception this was.  */
 439    tmp = tcg_const_i32(code);
 440    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 441    tcg_temp_free_i32(tmp);
 442
 443    tmp = tcg_const_i32(s->ilen);
 444    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 445    tcg_temp_free_i32(tmp);
 446
 447    /* update the psw */
 448    update_psw_addr(s);
 449
 450    /* Save off cc.  */
 451    update_cc_op(s);
 452
 453    /* Trigger exception.  */
 454    gen_exception(EXCP_PGM);
 455}
 456
 457static inline void gen_illegal_opcode(DisasContext *s)
 458{
 459    gen_program_exception(s, PGM_OPERATION);
 460}
 461
 462static inline void gen_data_exception(uint8_t dxc)
 463{
 464    TCGv_i32 tmp = tcg_const_i32(dxc);
 465    gen_helper_data_exception(cpu_env, tmp);
 466    tcg_temp_free_i32(tmp);
 467}
 468
 469static inline void gen_trap(DisasContext *s)
 470{
 471    /* Set DXC to 0xff */
 472    gen_data_exception(0xff);
 473}
 474
 475static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
 476                                  int64_t imm)
 477{
 478    tcg_gen_addi_i64(dst, src, imm);
 479    if (!(s->base.tb->flags & FLAG_MASK_64)) {
 480        if (s->base.tb->flags & FLAG_MASK_32) {
 481            tcg_gen_andi_i64(dst, dst, 0x7fffffff);
 482        } else {
 483            tcg_gen_andi_i64(dst, dst, 0x00ffffff);
 484        }
 485    }
 486}
 487
 488static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 489{
 490    TCGv_i64 tmp = tcg_temp_new_i64();
 491
 492    /*
 493     * Note that d2 is limited to 20 bits, signed.  If we crop negative
 494     * displacements early we create larger immediate addends.
 495     */
 496    if (b2 && x2) {
 497        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 498        gen_addi_and_wrap_i64(s, tmp, tmp, d2);
 499    } else if (b2) {
 500        gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
 501    } else if (x2) {
 502        gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
 503    } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
 504        if (s->base.tb->flags & FLAG_MASK_32) {
 505            tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
 506        } else {
 507            tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
 508        }
 509    } else {
 510        tcg_gen_movi_i64(tmp, d2);
 511    }
 512
 513    return tmp;
 514}
 515
 516static inline bool live_cc_data(DisasContext *s)
 517{
 518    return (s->cc_op != CC_OP_DYNAMIC
 519            && s->cc_op != CC_OP_STATIC
 520            && s->cc_op > 3);
 521}
 522
 523static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 524{
 525    if (live_cc_data(s)) {
 526        tcg_gen_discard_i64(cc_src);
 527        tcg_gen_discard_i64(cc_dst);
 528        tcg_gen_discard_i64(cc_vr);
 529    }
 530    s->cc_op = CC_OP_CONST0 + val;
 531}
 532
 533static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 534{
 535    if (live_cc_data(s)) {
 536        tcg_gen_discard_i64(cc_src);
 537        tcg_gen_discard_i64(cc_vr);
 538    }
 539    tcg_gen_mov_i64(cc_dst, dst);
 540    s->cc_op = op;
 541}
 542
 543static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 544                                  TCGv_i64 dst)
 545{
 546    if (live_cc_data(s)) {
 547        tcg_gen_discard_i64(cc_vr);
 548    }
 549    tcg_gen_mov_i64(cc_src, src);
 550    tcg_gen_mov_i64(cc_dst, dst);
 551    s->cc_op = op;
 552}
 553
 554static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 555                                  TCGv_i64 dst, TCGv_i64 vr)
 556{
 557    tcg_gen_mov_i64(cc_src, src);
 558    tcg_gen_mov_i64(cc_dst, dst);
 559    tcg_gen_mov_i64(cc_vr, vr);
 560    s->cc_op = op;
 561}
 562
 563static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 564{
 565    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 566}
 567
 568/* CC value is in env->cc_op */
 569static void set_cc_static(DisasContext *s)
 570{
 571    if (live_cc_data(s)) {
 572        tcg_gen_discard_i64(cc_src);
 573        tcg_gen_discard_i64(cc_dst);
 574        tcg_gen_discard_i64(cc_vr);
 575    }
 576    s->cc_op = CC_OP_STATIC;
 577}
 578
 579/* calculates cc into cc_op */
 580static void gen_op_calc_cc(DisasContext *s)
 581{
 582    TCGv_i32 local_cc_op = NULL;
 583    TCGv_i64 dummy = NULL;
 584
 585    switch (s->cc_op) {
 586    default:
 587        dummy = tcg_const_i64(0);
 588        /* FALLTHRU */
 589    case CC_OP_ADD_64:
 590    case CC_OP_SUB_64:
 591    case CC_OP_ADD_32:
 592    case CC_OP_SUB_32:
 593        local_cc_op = tcg_const_i32(s->cc_op);
 594        break;
 595    case CC_OP_CONST0:
 596    case CC_OP_CONST1:
 597    case CC_OP_CONST2:
 598    case CC_OP_CONST3:
 599    case CC_OP_STATIC:
 600    case CC_OP_DYNAMIC:
 601        break;
 602    }
 603
 604    switch (s->cc_op) {
 605    case CC_OP_CONST0:
 606    case CC_OP_CONST1:
 607    case CC_OP_CONST2:
 608    case CC_OP_CONST3:
 609        /* s->cc_op is the cc value */
 610        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 611        break;
 612    case CC_OP_STATIC:
 613        /* env->cc_op already is the cc value */
 614        break;
 615    case CC_OP_NZ:
 616    case CC_OP_ABS_64:
 617    case CC_OP_NABS_64:
 618    case CC_OP_ABS_32:
 619    case CC_OP_NABS_32:
 620    case CC_OP_LTGT0_32:
 621    case CC_OP_LTGT0_64:
 622    case CC_OP_COMP_32:
 623    case CC_OP_COMP_64:
 624    case CC_OP_NZ_F32:
 625    case CC_OP_NZ_F64:
 626    case CC_OP_FLOGR:
 627    case CC_OP_LCBB:
 628    case CC_OP_MULS_32:
 629        /* 1 argument */
 630        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 631        break;
 632    case CC_OP_ADDU:
 633    case CC_OP_ICM:
 634    case CC_OP_LTGT_32:
 635    case CC_OP_LTGT_64:
 636    case CC_OP_LTUGTU_32:
 637    case CC_OP_LTUGTU_64:
 638    case CC_OP_TM_32:
 639    case CC_OP_TM_64:
 640    case CC_OP_SLA:
 641    case CC_OP_SUBU:
 642    case CC_OP_NZ_F128:
 643    case CC_OP_VC:
 644    case CC_OP_MULS_64:
 645        /* 2 arguments */
 646        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 647        break;
 648    case CC_OP_ADD_64:
 649    case CC_OP_SUB_64:
 650    case CC_OP_ADD_32:
 651    case CC_OP_SUB_32:
 652        /* 3 arguments */
 653        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 654        break;
 655    case CC_OP_DYNAMIC:
 656        /* unknown operation - assume 3 arguments and cc_op in env */
 657        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 658        break;
 659    default:
 660        tcg_abort();
 661    }
 662
 663    if (local_cc_op) {
 664        tcg_temp_free_i32(local_cc_op);
 665    }
 666    if (dummy) {
 667        tcg_temp_free_i64(dummy);
 668    }
 669
 670    /* We now have cc in cc_op as constant */
 671    set_cc_static(s);
 672}
 673
 674static bool use_goto_tb(DisasContext *s, uint64_t dest)
 675{
 676    if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
 677        return false;
 678    }
 679    return translator_use_goto_tb(&s->base, dest);
 680}
 681
 682static void account_noninline_branch(DisasContext *s, int cc_op)
 683{
 684#ifdef DEBUG_INLINE_BRANCHES
 685    inline_branch_miss[cc_op]++;
 686#endif
 687}
 688
 689static void account_inline_branch(DisasContext *s, int cc_op)
 690{
 691#ifdef DEBUG_INLINE_BRANCHES
 692    inline_branch_hit[cc_op]++;
 693#endif
 694}
 695
 696/* Table of mask values to comparison codes, given a comparison as input.
 697   For such, CC=3 should not be possible.  */
 698static const TCGCond ltgt_cond[16] = {
 699    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 700    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 701    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 702    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 703    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 704    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 705    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 706    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 707};
 708
 709/* Table of mask values to comparison codes, given a logic op as input.
 710   For such, only CC=0 and CC=1 should be possible.  */
 711static const TCGCond nz_cond[16] = {
 712    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 713    TCG_COND_NEVER, TCG_COND_NEVER,
 714    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 715    TCG_COND_NE, TCG_COND_NE,
 716    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 717    TCG_COND_EQ, TCG_COND_EQ,
 718    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 719    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 720};
 721
 722/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 723   details required to generate a TCG comparison.  */
 724static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 725{
 726    TCGCond cond;
 727    enum cc_op old_cc_op = s->cc_op;
 728
 729    if (mask == 15 || mask == 0) {
 730        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 731        c->u.s32.a = cc_op;
 732        c->u.s32.b = cc_op;
 733        c->g1 = c->g2 = true;
 734        c->is_64 = false;
 735        return;
 736    }
 737
 738    /* Find the TCG condition for the mask + cc op.  */
 739    switch (old_cc_op) {
 740    case CC_OP_LTGT0_32:
 741    case CC_OP_LTGT0_64:
 742    case CC_OP_LTGT_32:
 743    case CC_OP_LTGT_64:
 744        cond = ltgt_cond[mask];
 745        if (cond == TCG_COND_NEVER) {
 746            goto do_dynamic;
 747        }
 748        account_inline_branch(s, old_cc_op);
 749        break;
 750
 751    case CC_OP_LTUGTU_32:
 752    case CC_OP_LTUGTU_64:
 753        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 754        if (cond == TCG_COND_NEVER) {
 755            goto do_dynamic;
 756        }
 757        account_inline_branch(s, old_cc_op);
 758        break;
 759
 760    case CC_OP_NZ:
 761        cond = nz_cond[mask];
 762        if (cond == TCG_COND_NEVER) {
 763            goto do_dynamic;
 764        }
 765        account_inline_branch(s, old_cc_op);
 766        break;
 767
 768    case CC_OP_TM_32:
 769    case CC_OP_TM_64:
 770        switch (mask) {
 771        case 8:
 772            cond = TCG_COND_EQ;
 773            break;
 774        case 4 | 2 | 1:
 775            cond = TCG_COND_NE;
 776            break;
 777        default:
 778            goto do_dynamic;
 779        }
 780        account_inline_branch(s, old_cc_op);
 781        break;
 782
 783    case CC_OP_ICM:
 784        switch (mask) {
 785        case 8:
 786            cond = TCG_COND_EQ;
 787            break;
 788        case 4 | 2 | 1:
 789        case 4 | 2:
 790            cond = TCG_COND_NE;
 791            break;
 792        default:
 793            goto do_dynamic;
 794        }
 795        account_inline_branch(s, old_cc_op);
 796        break;
 797
 798    case CC_OP_FLOGR:
 799        switch (mask & 0xa) {
 800        case 8: /* src == 0 -> no one bit found */
 801            cond = TCG_COND_EQ;
 802            break;
 803        case 2: /* src != 0 -> one bit found */
 804            cond = TCG_COND_NE;
 805            break;
 806        default:
 807            goto do_dynamic;
 808        }
 809        account_inline_branch(s, old_cc_op);
 810        break;
 811
 812    case CC_OP_ADDU:
 813    case CC_OP_SUBU:
 814        switch (mask) {
 815        case 8 | 2: /* result == 0 */
 816            cond = TCG_COND_EQ;
 817            break;
 818        case 4 | 1: /* result != 0 */
 819            cond = TCG_COND_NE;
 820            break;
 821        case 8 | 4: /* !carry (borrow) */
 822            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
 823            break;
 824        case 2 | 1: /* carry (!borrow) */
 825            cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
 826            break;
 827        default:
 828            goto do_dynamic;
 829        }
 830        account_inline_branch(s, old_cc_op);
 831        break;
 832
 833    default:
 834    do_dynamic:
 835        /* Calculate cc value.  */
 836        gen_op_calc_cc(s);
 837        /* FALLTHRU */
 838
 839    case CC_OP_STATIC:
 840        /* Jump based on CC.  We'll load up the real cond below;
 841           the assignment here merely avoids a compiler warning.  */
 842        account_noninline_branch(s, old_cc_op);
 843        old_cc_op = CC_OP_STATIC;
 844        cond = TCG_COND_NEVER;
 845        break;
 846    }
 847
 848    /* Load up the arguments of the comparison.  */
 849    c->is_64 = true;
 850    c->g1 = c->g2 = false;
 851    switch (old_cc_op) {
 852    case CC_OP_LTGT0_32:
 853        c->is_64 = false;
 854        c->u.s32.a = tcg_temp_new_i32();
 855        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 856        c->u.s32.b = tcg_const_i32(0);
 857        break;
 858    case CC_OP_LTGT_32:
 859    case CC_OP_LTUGTU_32:
 860        c->is_64 = false;
 861        c->u.s32.a = tcg_temp_new_i32();
 862        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 863        c->u.s32.b = tcg_temp_new_i32();
 864        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 865        break;
 866
 867    case CC_OP_LTGT0_64:
 868    case CC_OP_NZ:
 869    case CC_OP_FLOGR:
 870        c->u.s64.a = cc_dst;
 871        c->u.s64.b = tcg_const_i64(0);
 872        c->g1 = true;
 873        break;
 874    case CC_OP_LTGT_64:
 875    case CC_OP_LTUGTU_64:
 876        c->u.s64.a = cc_src;
 877        c->u.s64.b = cc_dst;
 878        c->g1 = c->g2 = true;
 879        break;
 880
 881    case CC_OP_TM_32:
 882    case CC_OP_TM_64:
 883    case CC_OP_ICM:
 884        c->u.s64.a = tcg_temp_new_i64();
 885        c->u.s64.b = tcg_const_i64(0);
 886        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 887        break;
 888
 889    case CC_OP_ADDU:
 890    case CC_OP_SUBU:
 891        c->is_64 = true;
 892        c->u.s64.b = tcg_const_i64(0);
 893        c->g1 = true;
 894        switch (mask) {
 895        case 8 | 2:
 896        case 4 | 1: /* result */
 897            c->u.s64.a = cc_dst;
 898            break;
 899        case 8 | 4:
 900        case 2 | 1: /* carry */
 901            c->u.s64.a = cc_src;
 902            break;
 903        default:
 904            g_assert_not_reached();
 905        }
 906        break;
 907
 908    case CC_OP_STATIC:
 909        c->is_64 = false;
 910        c->u.s32.a = cc_op;
 911        c->g1 = true;
 912        switch (mask) {
 913        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 914            cond = TCG_COND_NE;
 915            c->u.s32.b = tcg_const_i32(3);
 916            break;
 917        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 918            cond = TCG_COND_NE;
 919            c->u.s32.b = tcg_const_i32(2);
 920            break;
 921        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 922            cond = TCG_COND_NE;
 923            c->u.s32.b = tcg_const_i32(1);
 924            break;
 925        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 926            cond = TCG_COND_EQ;
 927            c->g1 = false;
 928            c->u.s32.a = tcg_temp_new_i32();
 929            c->u.s32.b = tcg_const_i32(0);
 930            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 931            break;
 932        case 0x8 | 0x4: /* cc < 2 */
 933            cond = TCG_COND_LTU;
 934            c->u.s32.b = tcg_const_i32(2);
 935            break;
 936        case 0x8: /* cc == 0 */
 937            cond = TCG_COND_EQ;
 938            c->u.s32.b = tcg_const_i32(0);
 939            break;
 940        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 941            cond = TCG_COND_NE;
 942            c->u.s32.b = tcg_const_i32(0);
 943            break;
 944        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 945            cond = TCG_COND_NE;
 946            c->g1 = false;
 947            c->u.s32.a = tcg_temp_new_i32();
 948            c->u.s32.b = tcg_const_i32(0);
 949            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 950            break;
 951        case 0x4: /* cc == 1 */
 952            cond = TCG_COND_EQ;
 953            c->u.s32.b = tcg_const_i32(1);
 954            break;
 955        case 0x2 | 0x1: /* cc > 1 */
 956            cond = TCG_COND_GTU;
 957            c->u.s32.b = tcg_const_i32(1);
 958            break;
 959        case 0x2: /* cc == 2 */
 960            cond = TCG_COND_EQ;
 961            c->u.s32.b = tcg_const_i32(2);
 962            break;
 963        case 0x1: /* cc == 3 */
 964            cond = TCG_COND_EQ;
 965            c->u.s32.b = tcg_const_i32(3);
 966            break;
 967        default:
 968            /* CC is masked by something else: (8 >> cc) & mask.  */
 969            cond = TCG_COND_NE;
 970            c->g1 = false;
 971            c->u.s32.a = tcg_const_i32(8);
 972            c->u.s32.b = tcg_const_i32(0);
 973            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 974            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 975            break;
 976        }
 977        break;
 978
 979    default:
 980        abort();
 981    }
 982    c->cond = cond;
 983}
 984
 985static void free_compare(DisasCompare *c)
 986{
 987    if (!c->g1) {
 988        if (c->is_64) {
 989            tcg_temp_free_i64(c->u.s64.a);
 990        } else {
 991            tcg_temp_free_i32(c->u.s32.a);
 992        }
 993    }
 994    if (!c->g2) {
 995        if (c->is_64) {
 996            tcg_temp_free_i64(c->u.s64.b);
 997        } else {
 998            tcg_temp_free_i32(c->u.s32.b);
 999        }
1000    }
1001}
1002
1003/* ====================================================================== */
1004/* Define the insn format enumeration.  */
1005#define F0(N)                         FMT_##N,
1006#define F1(N, X1)                     F0(N)
1007#define F2(N, X1, X2)                 F0(N)
1008#define F3(N, X1, X2, X3)             F0(N)
1009#define F4(N, X1, X2, X3, X4)         F0(N)
1010#define F5(N, X1, X2, X3, X4, X5)     F0(N)
1011#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1012
1013typedef enum {
1014#include "insn-format.h.inc"
1015} DisasFormat;
1016
1017#undef F0
1018#undef F1
1019#undef F2
1020#undef F3
1021#undef F4
1022#undef F5
1023#undef F6
1024
1025/* This is the way fields are to be accessed out of DisasFields.  */
1026#define have_field(S, F)  have_field1((S), FLD_O_##F)
1027#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1028
1029static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1030{
1031    return (s->fields.presentO >> c) & 1;
1032}
1033
1034static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1035                      enum DisasFieldIndexC c)
1036{
1037    assert(have_field1(s, o));
1038    return s->fields.c[c];
1039}
1040
1041/* Describe the layout of each field in each format.  */
1042typedef struct DisasField {
1043    unsigned int beg:8;
1044    unsigned int size:8;
1045    unsigned int type:2;
1046    unsigned int indexC:6;
1047    enum DisasFieldIndexO indexO:8;
1048} DisasField;
1049
1050typedef struct DisasFormatInfo {
1051    DisasField op[NUM_C_FIELD];
1052} DisasFormatInfo;
1053
1054#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1055#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1056#define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1057#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1058                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1059#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1060                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1061                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1062#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1063                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1064#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1065                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1066                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1067#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1068#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1069
1070#define F0(N)                     { { } },
1071#define F1(N, X1)                 { { X1 } },
1072#define F2(N, X1, X2)             { { X1, X2 } },
1073#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1074#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1075#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1076#define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1077
1078static const DisasFormatInfo format_info[] = {
1079#include "insn-format.h.inc"
1080};
1081
1082#undef F0
1083#undef F1
1084#undef F2
1085#undef F3
1086#undef F4
1087#undef F5
1088#undef F6
1089#undef R
1090#undef M
1091#undef V
1092#undef BD
1093#undef BXD
1094#undef BDL
1095#undef BXDL
1096#undef I
1097#undef L
1098
1099/* Generally, we'll extract operands into this structures, operate upon
1100   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1101   of routines below for more details.  */
1102typedef struct {
1103    bool g_out, g_out2, g_in1, g_in2;
1104    TCGv_i64 out, out2, in1, in2;
1105    TCGv_i64 addr1;
1106} DisasOps;
1107
1108/* Instructions can place constraints on their operands, raising specification
1109   exceptions if they are violated.  To make this easy to automate, each "in1",
1110   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1111   of the following, or 0.  To make this easy to document, we'll put the
1112   SPEC_<name> defines next to <name>.  */
1113
1114#define SPEC_r1_even    1
1115#define SPEC_r2_even    2
1116#define SPEC_r3_even    4
1117#define SPEC_r1_f128    8
1118#define SPEC_r2_f128    16
1119
1120/* Return values from translate_one, indicating the state of the TB.  */
1121
1122/* We are not using a goto_tb (for whatever reason), but have updated
1123   the PC (for whatever reason), so there's no need to do it again on
1124   exiting the TB.  */
1125#define DISAS_PC_UPDATED        DISAS_TARGET_0
1126
1127/* We have updated the PC and CC values.  */
1128#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1129
1130
1131/* Instruction flags */
1132#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1133#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1134#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1135#define IF_BFP      0x0008      /* binary floating point instruction */
1136#define IF_DFP      0x0010      /* decimal floating point instruction */
1137#define IF_PRIV     0x0020      /* privileged instruction */
1138#define IF_VEC      0x0040      /* vector instruction */
1139#define IF_IO       0x0080      /* input/output instruction */
1140
1141struct DisasInsn {
1142    unsigned opc:16;
1143    unsigned flags:16;
1144    DisasFormat fmt:8;
1145    unsigned fac:8;
1146    unsigned spec:8;
1147
1148    const char *name;
1149
1150    /* Pre-process arguments before HELP_OP.  */
1151    void (*help_in1)(DisasContext *, DisasOps *);
1152    void (*help_in2)(DisasContext *, DisasOps *);
1153    void (*help_prep)(DisasContext *, DisasOps *);
1154
1155    /*
1156     * Post-process output after HELP_OP.
1157     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1158     */
1159    void (*help_wout)(DisasContext *, DisasOps *);
1160    void (*help_cout)(DisasContext *, DisasOps *);
1161
1162    /* Implement the operation itself.  */
1163    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1164
1165    uint64_t data;
1166};
1167
1168/* ====================================================================== */
1169/* Miscellaneous helpers, used by several operations.  */
1170
1171static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1172{
1173    if (dest == s->pc_tmp) {
1174        per_branch(s, true);
1175        return DISAS_NEXT;
1176    }
1177    if (use_goto_tb(s, dest)) {
1178        update_cc_op(s);
1179        per_breaking_event(s);
1180        tcg_gen_goto_tb(0);
1181        tcg_gen_movi_i64(psw_addr, dest);
1182        tcg_gen_exit_tb(s->base.tb, 0);
1183        return DISAS_NORETURN;
1184    } else {
1185        tcg_gen_movi_i64(psw_addr, dest);
1186        per_branch(s, false);
1187        return DISAS_PC_UPDATED;
1188    }
1189}
1190
1191static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1192                                 bool is_imm, int imm, TCGv_i64 cdest)
1193{
1194    DisasJumpType ret;
1195    uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1196    TCGLabel *lab;
1197
1198    /* Take care of the special cases first.  */
1199    if (c->cond == TCG_COND_NEVER) {
1200        ret = DISAS_NEXT;
1201        goto egress;
1202    }
1203    if (is_imm) {
1204        if (dest == s->pc_tmp) {
1205            /* Branch to next.  */
1206            per_branch(s, true);
1207            ret = DISAS_NEXT;
1208            goto egress;
1209        }
1210        if (c->cond == TCG_COND_ALWAYS) {
1211            ret = help_goto_direct(s, dest);
1212            goto egress;
1213        }
1214    } else {
1215        if (!cdest) {
1216            /* E.g. bcr %r0 -> no branch.  */
1217            ret = DISAS_NEXT;
1218            goto egress;
1219        }
1220        if (c->cond == TCG_COND_ALWAYS) {
1221            tcg_gen_mov_i64(psw_addr, cdest);
1222            per_branch(s, false);
1223            ret = DISAS_PC_UPDATED;
1224            goto egress;
1225        }
1226    }
1227
1228    if (use_goto_tb(s, s->pc_tmp)) {
1229        if (is_imm && use_goto_tb(s, dest)) {
1230            /* Both exits can use goto_tb.  */
1231            update_cc_op(s);
1232
1233            lab = gen_new_label();
1234            if (c->is_64) {
1235                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1236            } else {
1237                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1238            }
1239
1240            /* Branch not taken.  */
1241            tcg_gen_goto_tb(0);
1242            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1243            tcg_gen_exit_tb(s->base.tb, 0);
1244
1245            /* Branch taken.  */
1246            gen_set_label(lab);
1247            per_breaking_event(s);
1248            tcg_gen_goto_tb(1);
1249            tcg_gen_movi_i64(psw_addr, dest);
1250            tcg_gen_exit_tb(s->base.tb, 1);
1251
1252            ret = DISAS_NORETURN;
1253        } else {
1254            /* Fallthru can use goto_tb, but taken branch cannot.  */
1255            /* Store taken branch destination before the brcond.  This
1256               avoids having to allocate a new local temp to hold it.
1257               We'll overwrite this in the not taken case anyway.  */
1258            if (!is_imm) {
1259                tcg_gen_mov_i64(psw_addr, cdest);
1260            }
1261
1262            lab = gen_new_label();
1263            if (c->is_64) {
1264                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1265            } else {
1266                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1267            }
1268
1269            /* Branch not taken.  */
1270            update_cc_op(s);
1271            tcg_gen_goto_tb(0);
1272            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1273            tcg_gen_exit_tb(s->base.tb, 0);
1274
1275            gen_set_label(lab);
1276            if (is_imm) {
1277                tcg_gen_movi_i64(psw_addr, dest);
1278            }
1279            per_breaking_event(s);
1280            ret = DISAS_PC_UPDATED;
1281        }
1282    } else {
1283        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1284           Most commonly we're single-stepping or some other condition that
1285           disables all use of goto_tb.  Just update the PC and exit.  */
1286
1287        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1288        if (is_imm) {
1289            cdest = tcg_const_i64(dest);
1290        }
1291
1292        if (c->is_64) {
1293            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1294                                cdest, next);
1295            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1296        } else {
1297            TCGv_i32 t0 = tcg_temp_new_i32();
1298            TCGv_i64 t1 = tcg_temp_new_i64();
1299            TCGv_i64 z = tcg_const_i64(0);
1300            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1301            tcg_gen_extu_i32_i64(t1, t0);
1302            tcg_temp_free_i32(t0);
1303            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1304            per_branch_cond(s, TCG_COND_NE, t1, z);
1305            tcg_temp_free_i64(t1);
1306            tcg_temp_free_i64(z);
1307        }
1308
1309        if (is_imm) {
1310            tcg_temp_free_i64(cdest);
1311        }
1312        tcg_temp_free_i64(next);
1313
1314        ret = DISAS_PC_UPDATED;
1315    }
1316
1317 egress:
1318    free_compare(c);
1319    return ret;
1320}
1321
1322/* ====================================================================== */
1323/* The operations.  These perform the bulk of the work for any insn,
1324   usually after the operands have been loaded and output initialized.  */
1325
1326static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1327{
1328    tcg_gen_abs_i64(o->out, o->in2);
1329    return DISAS_NEXT;
1330}
1331
1332static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1333{
1334    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1335    return DISAS_NEXT;
1336}
1337
1338static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1339{
1340    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1341    return DISAS_NEXT;
1342}
1343
1344static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1345{
1346    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1347    tcg_gen_mov_i64(o->out2, o->in2);
1348    return DISAS_NEXT;
1349}
1350
1351static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1352{
1353    tcg_gen_add_i64(o->out, o->in1, o->in2);
1354    return DISAS_NEXT;
1355}
1356
1357static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1358{
1359    tcg_gen_movi_i64(cc_src, 0);
1360    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1361    return DISAS_NEXT;
1362}
1363
1364/* Compute carry into cc_src. */
1365static void compute_carry(DisasContext *s)
1366{
1367    switch (s->cc_op) {
1368    case CC_OP_ADDU:
1369        /* The carry value is already in cc_src (1,0). */
1370        break;
1371    case CC_OP_SUBU:
1372        tcg_gen_addi_i64(cc_src, cc_src, 1);
1373        break;
1374    default:
1375        gen_op_calc_cc(s);
1376        /* fall through */
1377    case CC_OP_STATIC:
1378        /* The carry flag is the msb of CC; compute into cc_src. */
1379        tcg_gen_extu_i32_i64(cc_src, cc_op);
1380        tcg_gen_shri_i64(cc_src, cc_src, 1);
1381        break;
1382    }
1383}
1384
1385static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1386{
1387    compute_carry(s);
1388    tcg_gen_add_i64(o->out, o->in1, o->in2);
1389    tcg_gen_add_i64(o->out, o->out, cc_src);
1390    return DISAS_NEXT;
1391}
1392
1393static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1394{
1395    compute_carry(s);
1396
1397    TCGv_i64 zero = tcg_const_i64(0);
1398    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1399    tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1400    tcg_temp_free_i64(zero);
1401
1402    return DISAS_NEXT;
1403}
1404
1405static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1406{
1407    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1408
1409    o->in1 = tcg_temp_new_i64();
1410    if (non_atomic) {
1411        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1412    } else {
1413        /* Perform the atomic addition in memory. */
1414        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1415                                     s->insn->data);
1416    }
1417
1418    /* Recompute also for atomic case: needed for setting CC. */
1419    tcg_gen_add_i64(o->out, o->in1, o->in2);
1420
1421    if (non_atomic) {
1422        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1423    }
1424    return DISAS_NEXT;
1425}
1426
1427static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1428{
1429    bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1430
1431    o->in1 = tcg_temp_new_i64();
1432    if (non_atomic) {
1433        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1434    } else {
1435        /* Perform the atomic addition in memory. */
1436        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1437                                     s->insn->data);
1438    }
1439
1440    /* Recompute also for atomic case: needed for setting CC. */
1441    tcg_gen_movi_i64(cc_src, 0);
1442    tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1443
1444    if (non_atomic) {
1445        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1446    }
1447    return DISAS_NEXT;
1448}
1449
1450static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1451{
1452    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1453    return DISAS_NEXT;
1454}
1455
1456static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1457{
1458    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1459    return DISAS_NEXT;
1460}
1461
1462static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1463{
1464    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1465    return_low128(o->out2);
1466    return DISAS_NEXT;
1467}
1468
1469static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1470{
1471    tcg_gen_and_i64(o->out, o->in1, o->in2);
1472    return DISAS_NEXT;
1473}
1474
1475static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1476{
1477    int shift = s->insn->data & 0xff;
1478    int size = s->insn->data >> 8;
1479    uint64_t mask = ((1ull << size) - 1) << shift;
1480
1481    assert(!o->g_in2);
1482    tcg_gen_shli_i64(o->in2, o->in2, shift);
1483    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1484    tcg_gen_and_i64(o->out, o->in1, o->in2);
1485
1486    /* Produce the CC from only the bits manipulated.  */
1487    tcg_gen_andi_i64(cc_dst, o->out, mask);
1488    set_cc_nz_u64(s, cc_dst);
1489    return DISAS_NEXT;
1490}
1491
1492static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1493{
1494    tcg_gen_andc_i64(o->out, o->in1, o->in2);
1495    return DISAS_NEXT;
1496}
1497
1498static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1499{
1500    tcg_gen_orc_i64(o->out, o->in1, o->in2);
1501    return DISAS_NEXT;
1502}
1503
1504static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1505{
1506    tcg_gen_nand_i64(o->out, o->in1, o->in2);
1507    return DISAS_NEXT;
1508}
1509
1510static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1511{
1512    tcg_gen_nor_i64(o->out, o->in1, o->in2);
1513    return DISAS_NEXT;
1514}
1515
1516static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1517{
1518    tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1519    return DISAS_NEXT;
1520}
1521
1522static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1523{
1524    o->in1 = tcg_temp_new_i64();
1525
1526    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1527        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1528    } else {
1529        /* Perform the atomic operation in memory. */
1530        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1531                                     s->insn->data);
1532    }
1533
1534    /* Recompute also for atomic case: needed for setting CC. */
1535    tcg_gen_and_i64(o->out, o->in1, o->in2);
1536
1537    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1538        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1539    }
1540    return DISAS_NEXT;
1541}
1542
1543static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1544{
1545    pc_to_link_info(o->out, s, s->pc_tmp);
1546    if (o->in2) {
1547        tcg_gen_mov_i64(psw_addr, o->in2);
1548        per_branch(s, false);
1549        return DISAS_PC_UPDATED;
1550    } else {
1551        return DISAS_NEXT;
1552    }
1553}
1554
1555static void save_link_info(DisasContext *s, DisasOps *o)
1556{
1557    TCGv_i64 t;
1558
1559    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1560        pc_to_link_info(o->out, s, s->pc_tmp);
1561        return;
1562    }
1563    gen_op_calc_cc(s);
1564    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1565    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1566    t = tcg_temp_new_i64();
1567    tcg_gen_shri_i64(t, psw_mask, 16);
1568    tcg_gen_andi_i64(t, t, 0x0f000000);
1569    tcg_gen_or_i64(o->out, o->out, t);
1570    tcg_gen_extu_i32_i64(t, cc_op);
1571    tcg_gen_shli_i64(t, t, 28);
1572    tcg_gen_or_i64(o->out, o->out, t);
1573    tcg_temp_free_i64(t);
1574}
1575
1576static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1577{
1578    save_link_info(s, o);
1579    if (o->in2) {
1580        tcg_gen_mov_i64(psw_addr, o->in2);
1581        per_branch(s, false);
1582        return DISAS_PC_UPDATED;
1583    } else {
1584        return DISAS_NEXT;
1585    }
1586}
1587
1588static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1589{
1590    pc_to_link_info(o->out, s, s->pc_tmp);
1591    return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1592}
1593
1594static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1595{
1596    int m1 = get_field(s, m1);
1597    bool is_imm = have_field(s, i2);
1598    int imm = is_imm ? get_field(s, i2) : 0;
1599    DisasCompare c;
1600
1601    /* BCR with R2 = 0 causes no branching */
1602    if (have_field(s, r2) && get_field(s, r2) == 0) {
1603        if (m1 == 14) {
1604            /* Perform serialization */
1605            /* FIXME: check for fast-BCR-serialization facility */
1606            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1607        }
1608        if (m1 == 15) {
1609            /* Perform serialization */
1610            /* FIXME: perform checkpoint-synchronisation */
1611            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1612        }
1613        return DISAS_NEXT;
1614    }
1615
1616    disas_jcc(s, &c, m1);
1617    return help_branch(s, &c, is_imm, imm, o->in2);
1618}
1619
1620static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1621{
1622    int r1 = get_field(s, r1);
1623    bool is_imm = have_field(s, i2);
1624    int imm = is_imm ? get_field(s, i2) : 0;
1625    DisasCompare c;
1626    TCGv_i64 t;
1627
1628    c.cond = TCG_COND_NE;
1629    c.is_64 = false;
1630    c.g1 = false;
1631    c.g2 = false;
1632
1633    t = tcg_temp_new_i64();
1634    tcg_gen_subi_i64(t, regs[r1], 1);
1635    store_reg32_i64(r1, t);
1636    c.u.s32.a = tcg_temp_new_i32();
1637    c.u.s32.b = tcg_const_i32(0);
1638    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1639    tcg_temp_free_i64(t);
1640
1641    return help_branch(s, &c, is_imm, imm, o->in2);
1642}
1643
1644static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1645{
1646    int r1 = get_field(s, r1);
1647    int imm = get_field(s, i2);
1648    DisasCompare c;
1649    TCGv_i64 t;
1650
1651    c.cond = TCG_COND_NE;
1652    c.is_64 = false;
1653    c.g1 = false;
1654    c.g2 = false;
1655
1656    t = tcg_temp_new_i64();
1657    tcg_gen_shri_i64(t, regs[r1], 32);
1658    tcg_gen_subi_i64(t, t, 1);
1659    store_reg32h_i64(r1, t);
1660    c.u.s32.a = tcg_temp_new_i32();
1661    c.u.s32.b = tcg_const_i32(0);
1662    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1663    tcg_temp_free_i64(t);
1664
1665    return help_branch(s, &c, 1, imm, o->in2);
1666}
1667
1668static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1669{
1670    int r1 = get_field(s, r1);
1671    bool is_imm = have_field(s, i2);
1672    int imm = is_imm ? get_field(s, i2) : 0;
1673    DisasCompare c;
1674
1675    c.cond = TCG_COND_NE;
1676    c.is_64 = true;
1677    c.g1 = true;
1678    c.g2 = false;
1679
1680    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1681    c.u.s64.a = regs[r1];
1682    c.u.s64.b = tcg_const_i64(0);
1683
1684    return help_branch(s, &c, is_imm, imm, o->in2);
1685}
1686
1687static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1688{
1689    int r1 = get_field(s, r1);
1690    int r3 = get_field(s, r3);
1691    bool is_imm = have_field(s, i2);
1692    int imm = is_imm ? get_field(s, i2) : 0;
1693    DisasCompare c;
1694    TCGv_i64 t;
1695
1696    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1697    c.is_64 = false;
1698    c.g1 = false;
1699    c.g2 = false;
1700
1701    t = tcg_temp_new_i64();
1702    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1703    c.u.s32.a = tcg_temp_new_i32();
1704    c.u.s32.b = tcg_temp_new_i32();
1705    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1706    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1707    store_reg32_i64(r1, t);
1708    tcg_temp_free_i64(t);
1709
1710    return help_branch(s, &c, is_imm, imm, o->in2);
1711}
1712
1713static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1714{
1715    int r1 = get_field(s, r1);
1716    int r3 = get_field(s, r3);
1717    bool is_imm = have_field(s, i2);
1718    int imm = is_imm ? get_field(s, i2) : 0;
1719    DisasCompare c;
1720
1721    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1722    c.is_64 = true;
1723
1724    if (r1 == (r3 | 1)) {
1725        c.u.s64.b = load_reg(r3 | 1);
1726        c.g2 = false;
1727    } else {
1728        c.u.s64.b = regs[r3 | 1];
1729        c.g2 = true;
1730    }
1731
1732    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1733    c.u.s64.a = regs[r1];
1734    c.g1 = true;
1735
1736    return help_branch(s, &c, is_imm, imm, o->in2);
1737}
1738
1739static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1740{
1741    int imm, m3 = get_field(s, m3);
1742    bool is_imm;
1743    DisasCompare c;
1744
1745    c.cond = ltgt_cond[m3];
1746    if (s->insn->data) {
1747        c.cond = tcg_unsigned_cond(c.cond);
1748    }
1749    c.is_64 = c.g1 = c.g2 = true;
1750    c.u.s64.a = o->in1;
1751    c.u.s64.b = o->in2;
1752
1753    is_imm = have_field(s, i4);
1754    if (is_imm) {
1755        imm = get_field(s, i4);
1756    } else {
1757        imm = 0;
1758        o->out = get_address(s, 0, get_field(s, b4),
1759                             get_field(s, d4));
1760    }
1761
1762    return help_branch(s, &c, is_imm, imm, o->out);
1763}
1764
1765static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1766{
1767    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1768    set_cc_static(s);
1769    return DISAS_NEXT;
1770}
1771
1772static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1773{
1774    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1775    set_cc_static(s);
1776    return DISAS_NEXT;
1777}
1778
1779static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1780{
1781    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1782    set_cc_static(s);
1783    return DISAS_NEXT;
1784}
1785
1786static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1787                                   bool m4_with_fpe)
1788{
1789    const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1790    uint8_t m3 = get_field(s, m3);
1791    uint8_t m4 = get_field(s, m4);
1792
1793    /* m3 field was introduced with FPE */
1794    if (!fpe && m3_with_fpe) {
1795        m3 = 0;
1796    }
1797    /* m4 field was introduced with FPE */
1798    if (!fpe && m4_with_fpe) {
1799        m4 = 0;
1800    }
1801
1802    /* Check for valid rounding modes. Mode 3 was introduced later. */
1803    if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1804        gen_program_exception(s, PGM_SPECIFICATION);
1805        return NULL;
1806    }
1807
1808    return tcg_const_i32(deposit32(m3, 4, 4, m4));
1809}
1810
1811static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1812{
1813    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1814
1815    if (!m34) {
1816        return DISAS_NORETURN;
1817    }
1818    gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1819    tcg_temp_free_i32(m34);
1820    set_cc_static(s);
1821    return DISAS_NEXT;
1822}
1823
1824static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1825{
1826    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1827
1828    if (!m34) {
1829        return DISAS_NORETURN;
1830    }
1831    gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1832    tcg_temp_free_i32(m34);
1833    set_cc_static(s);
1834    return DISAS_NEXT;
1835}
1836
1837static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1838{
1839    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1840
1841    if (!m34) {
1842        return DISAS_NORETURN;
1843    }
1844    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1845    tcg_temp_free_i32(m34);
1846    set_cc_static(s);
1847    return DISAS_NEXT;
1848}
1849
1850static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1851{
1852    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1853
1854    if (!m34) {
1855        return DISAS_NORETURN;
1856    }
1857    gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1858    tcg_temp_free_i32(m34);
1859    set_cc_static(s);
1860    return DISAS_NEXT;
1861}
1862
1863static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1864{
1865    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1866
1867    if (!m34) {
1868        return DISAS_NORETURN;
1869    }
1870    gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1871    tcg_temp_free_i32(m34);
1872    set_cc_static(s);
1873    return DISAS_NEXT;
1874}
1875
1876static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1877{
1878    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1879
1880    if (!m34) {
1881        return DISAS_NORETURN;
1882    }
1883    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1884    tcg_temp_free_i32(m34);
1885    set_cc_static(s);
1886    return DISAS_NEXT;
1887}
1888
1889static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1890{
1891    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892
1893    if (!m34) {
1894        return DISAS_NORETURN;
1895    }
1896    gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1897    tcg_temp_free_i32(m34);
1898    set_cc_static(s);
1899    return DISAS_NEXT;
1900}
1901
1902static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1903{
1904    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905
1906    if (!m34) {
1907        return DISAS_NORETURN;
1908    }
1909    gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1910    tcg_temp_free_i32(m34);
1911    set_cc_static(s);
1912    return DISAS_NEXT;
1913}
1914
1915static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1916{
1917    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1918
1919    if (!m34) {
1920        return DISAS_NORETURN;
1921    }
1922    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1923    tcg_temp_free_i32(m34);
1924    set_cc_static(s);
1925    return DISAS_NEXT;
1926}
1927
1928static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1929{
1930    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1931
1932    if (!m34) {
1933        return DISAS_NORETURN;
1934    }
1935    gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1936    tcg_temp_free_i32(m34);
1937    set_cc_static(s);
1938    return DISAS_NEXT;
1939}
1940
1941static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1942{
1943    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1944
1945    if (!m34) {
1946        return DISAS_NORETURN;
1947    }
1948    gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1949    tcg_temp_free_i32(m34);
1950    set_cc_static(s);
1951    return DISAS_NEXT;
1952}
1953
1954static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1955{
1956    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1957
1958    if (!m34) {
1959        return DISAS_NORETURN;
1960    }
1961    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1962    tcg_temp_free_i32(m34);
1963    set_cc_static(s);
1964    return DISAS_NEXT;
1965}
1966
1967static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1968{
1969    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1970
1971    if (!m34) {
1972        return DISAS_NORETURN;
1973    }
1974    gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1975    tcg_temp_free_i32(m34);
1976    return DISAS_NEXT;
1977}
1978
1979static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1980{
1981    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1982
1983    if (!m34) {
1984        return DISAS_NORETURN;
1985    }
1986    gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1987    tcg_temp_free_i32(m34);
1988    return DISAS_NEXT;
1989}
1990
1991static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1992{
1993    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1994
1995    if (!m34) {
1996        return DISAS_NORETURN;
1997    }
1998    gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
1999    tcg_temp_free_i32(m34);
2000    return_low128(o->out2);
2001    return DISAS_NEXT;
2002}
2003
2004static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2005{
2006    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2007
2008    if (!m34) {
2009        return DISAS_NORETURN;
2010    }
2011    gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2012    tcg_temp_free_i32(m34);
2013    return DISAS_NEXT;
2014}
2015
2016static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2017{
2018    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2019
2020    if (!m34) {
2021        return DISAS_NORETURN;
2022    }
2023    gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2024    tcg_temp_free_i32(m34);
2025    return DISAS_NEXT;
2026}
2027
2028static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2029{
2030    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2031
2032    if (!m34) {
2033        return DISAS_NORETURN;
2034    }
2035    gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2036    tcg_temp_free_i32(m34);
2037    return_low128(o->out2);
2038    return DISAS_NEXT;
2039}
2040
2041static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2042{
2043    int r2 = get_field(s, r2);
2044    TCGv_i64 len = tcg_temp_new_i64();
2045
2046    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2047    set_cc_static(s);
2048    return_low128(o->out);
2049
2050    tcg_gen_add_i64(regs[r2], regs[r2], len);
2051    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2052    tcg_temp_free_i64(len);
2053
2054    return DISAS_NEXT;
2055}
2056
2057static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2058{
2059    int l = get_field(s, l1);
2060    TCGv_i32 vl;
2061
2062    switch (l + 1) {
2063    case 1:
2064        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2065        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2066        break;
2067    case 2:
2068        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2069        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2070        break;
2071    case 4:
2072        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2073        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2074        break;
2075    case 8:
2076        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2077        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2078        break;
2079    default:
2080        vl = tcg_const_i32(l);
2081        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2082        tcg_temp_free_i32(vl);
2083        set_cc_static(s);
2084        return DISAS_NEXT;
2085    }
2086    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2087    return DISAS_NEXT;
2088}
2089
2090static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2091{
2092    int r1 = get_field(s, r1);
2093    int r2 = get_field(s, r2);
2094    TCGv_i32 t1, t2;
2095
2096    /* r1 and r2 must be even.  */
2097    if (r1 & 1 || r2 & 1) {
2098        gen_program_exception(s, PGM_SPECIFICATION);
2099        return DISAS_NORETURN;
2100    }
2101
2102    t1 = tcg_const_i32(r1);
2103    t2 = tcg_const_i32(r2);
2104    gen_helper_clcl(cc_op, cpu_env, t1, t2);
2105    tcg_temp_free_i32(t1);
2106    tcg_temp_free_i32(t2);
2107    set_cc_static(s);
2108    return DISAS_NEXT;
2109}
2110
2111static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2112{
2113    int r1 = get_field(s, r1);
2114    int r3 = get_field(s, r3);
2115    TCGv_i32 t1, t3;
2116
2117    /* r1 and r3 must be even.  */
2118    if (r1 & 1 || r3 & 1) {
2119        gen_program_exception(s, PGM_SPECIFICATION);
2120        return DISAS_NORETURN;
2121    }
2122
2123    t1 = tcg_const_i32(r1);
2124    t3 = tcg_const_i32(r3);
2125    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2126    tcg_temp_free_i32(t1);
2127    tcg_temp_free_i32(t3);
2128    set_cc_static(s);
2129    return DISAS_NEXT;
2130}
2131
2132static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2133{
2134    int r1 = get_field(s, r1);
2135    int r3 = get_field(s, r3);
2136    TCGv_i32 t1, t3;
2137
2138    /* r1 and r3 must be even.  */
2139    if (r1 & 1 || r3 & 1) {
2140        gen_program_exception(s, PGM_SPECIFICATION);
2141        return DISAS_NORETURN;
2142    }
2143
2144    t1 = tcg_const_i32(r1);
2145    t3 = tcg_const_i32(r3);
2146    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2147    tcg_temp_free_i32(t1);
2148    tcg_temp_free_i32(t3);
2149    set_cc_static(s);
2150    return DISAS_NEXT;
2151}
2152
2153static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2154{
2155    TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2156    TCGv_i32 t1 = tcg_temp_new_i32();
2157    tcg_gen_extrl_i64_i32(t1, o->in1);
2158    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2159    set_cc_static(s);
2160    tcg_temp_free_i32(t1);
2161    tcg_temp_free_i32(m3);
2162    return DISAS_NEXT;
2163}
2164
2165static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2166{
2167    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2168    set_cc_static(s);
2169    return_low128(o->in2);
2170    return DISAS_NEXT;
2171}
2172
2173static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2174{
2175    TCGv_i64 t = tcg_temp_new_i64();
2176    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2177    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2178    tcg_gen_or_i64(o->out, o->out, t);
2179    tcg_temp_free_i64(t);
2180    return DISAS_NEXT;
2181}
2182
2183static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2184{
2185    int d2 = get_field(s, d2);
2186    int b2 = get_field(s, b2);
2187    TCGv_i64 addr, cc;
2188
2189    /* Note that in1 = R3 (new value) and
2190       in2 = (zero-extended) R1 (expected value).  */
2191
2192    addr = get_address(s, 0, b2, d2);
2193    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2194                               get_mem_index(s), s->insn->data | MO_ALIGN);
2195    tcg_temp_free_i64(addr);
2196
2197    /* Are the memory and expected values (un)equal?  Note that this setcond
2198       produces the output CC value, thus the NE sense of the test.  */
2199    cc = tcg_temp_new_i64();
2200    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2201    tcg_gen_extrl_i64_i32(cc_op, cc);
2202    tcg_temp_free_i64(cc);
2203    set_cc_static(s);
2204
2205    return DISAS_NEXT;
2206}
2207
2208static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2209{
2210    int r1 = get_field(s, r1);
2211    int r3 = get_field(s, r3);
2212    int d2 = get_field(s, d2);
2213    int b2 = get_field(s, b2);
2214    DisasJumpType ret = DISAS_NEXT;
2215    TCGv_i64 addr;
2216    TCGv_i32 t_r1, t_r3;
2217
2218    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2219    addr = get_address(s, 0, b2, d2);
2220    t_r1 = tcg_const_i32(r1);
2221    t_r3 = tcg_const_i32(r3);
2222    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2223        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2224    } else if (HAVE_CMPXCHG128) {
2225        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2226    } else {
2227        gen_helper_exit_atomic(cpu_env);
2228        ret = DISAS_NORETURN;
2229    }
2230    tcg_temp_free_i64(addr);
2231    tcg_temp_free_i32(t_r1);
2232    tcg_temp_free_i32(t_r3);
2233
2234    set_cc_static(s);
2235    return ret;
2236}
2237
2238static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2239{
2240    int r3 = get_field(s, r3);
2241    TCGv_i32 t_r3 = tcg_const_i32(r3);
2242
2243    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2244        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2245    } else {
2246        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2247    }
2248    tcg_temp_free_i32(t_r3);
2249
2250    set_cc_static(s);
2251    return DISAS_NEXT;
2252}
2253
2254#ifndef CONFIG_USER_ONLY
2255static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2256{
2257    MemOp mop = s->insn->data;
2258    TCGv_i64 addr, old, cc;
2259    TCGLabel *lab = gen_new_label();
2260
2261    /* Note that in1 = R1 (zero-extended expected value),
2262       out = R1 (original reg), out2 = R1+1 (new value).  */
2263
2264    addr = tcg_temp_new_i64();
2265    old = tcg_temp_new_i64();
2266    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2267    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2268                               get_mem_index(s), mop | MO_ALIGN);
2269    tcg_temp_free_i64(addr);
2270
2271    /* Are the memory and expected values (un)equal?  */
2272    cc = tcg_temp_new_i64();
2273    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2274    tcg_gen_extrl_i64_i32(cc_op, cc);
2275
2276    /* Write back the output now, so that it happens before the
2277       following branch, so that we don't need local temps.  */
2278    if ((mop & MO_SIZE) == MO_32) {
2279        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2280    } else {
2281        tcg_gen_mov_i64(o->out, old);
2282    }
2283    tcg_temp_free_i64(old);
2284
2285    /* If the comparison was equal, and the LSB of R2 was set,
2286       then we need to flush the TLB (for all cpus).  */
2287    tcg_gen_xori_i64(cc, cc, 1);
2288    tcg_gen_and_i64(cc, cc, o->in2);
2289    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2290    tcg_temp_free_i64(cc);
2291
2292    gen_helper_purge(cpu_env);
2293    gen_set_label(lab);
2294
2295    return DISAS_NEXT;
2296}
2297#endif
2298
2299static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2300{
2301    TCGv_i64 t1 = tcg_temp_new_i64();
2302    TCGv_i32 t2 = tcg_temp_new_i32();
2303    tcg_gen_extrl_i64_i32(t2, o->in1);
2304    gen_helper_cvd(t1, t2);
2305    tcg_temp_free_i32(t2);
2306    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2307    tcg_temp_free_i64(t1);
2308    return DISAS_NEXT;
2309}
2310
2311static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2312{
2313    int m3 = get_field(s, m3);
2314    TCGLabel *lab = gen_new_label();
2315    TCGCond c;
2316
2317    c = tcg_invert_cond(ltgt_cond[m3]);
2318    if (s->insn->data) {
2319        c = tcg_unsigned_cond(c);
2320    }
2321    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2322
2323    /* Trap.  */
2324    gen_trap(s);
2325
2326    gen_set_label(lab);
2327    return DISAS_NEXT;
2328}
2329
2330static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2331{
2332    int m3 = get_field(s, m3);
2333    int r1 = get_field(s, r1);
2334    int r2 = get_field(s, r2);
2335    TCGv_i32 tr1, tr2, chk;
2336
2337    /* R1 and R2 must both be even.  */
2338    if ((r1 | r2) & 1) {
2339        gen_program_exception(s, PGM_SPECIFICATION);
2340        return DISAS_NORETURN;
2341    }
2342    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2343        m3 = 0;
2344    }
2345
2346    tr1 = tcg_const_i32(r1);
2347    tr2 = tcg_const_i32(r2);
2348    chk = tcg_const_i32(m3);
2349
2350    switch (s->insn->data) {
2351    case 12:
2352        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2353        break;
2354    case 14:
2355        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2356        break;
2357    case 21:
2358        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2359        break;
2360    case 24:
2361        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2362        break;
2363    case 41:
2364        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2365        break;
2366    case 42:
2367        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2368        break;
2369    default:
2370        g_assert_not_reached();
2371    }
2372
2373    tcg_temp_free_i32(tr1);
2374    tcg_temp_free_i32(tr2);
2375    tcg_temp_free_i32(chk);
2376    set_cc_static(s);
2377    return DISAS_NEXT;
2378}
2379
2380#ifndef CONFIG_USER_ONLY
2381static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2382{
2383    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2384    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2385    TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2386
2387    gen_helper_diag(cpu_env, r1, r3, func_code);
2388
2389    tcg_temp_free_i32(func_code);
2390    tcg_temp_free_i32(r3);
2391    tcg_temp_free_i32(r1);
2392    return DISAS_NEXT;
2393}
2394#endif
2395
2396static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2397{
2398    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2399    return_low128(o->out);
2400    return DISAS_NEXT;
2401}
2402
2403static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2404{
2405    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2406    return_low128(o->out);
2407    return DISAS_NEXT;
2408}
2409
2410static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2411{
2412    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2413    return_low128(o->out);
2414    return DISAS_NEXT;
2415}
2416
2417static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2418{
2419    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2420    return_low128(o->out);
2421    return DISAS_NEXT;
2422}
2423
2424static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2425{
2426    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2427    return DISAS_NEXT;
2428}
2429
2430static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2431{
2432    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2433    return DISAS_NEXT;
2434}
2435
2436static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2437{
2438    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2439    return_low128(o->out2);
2440    return DISAS_NEXT;
2441}
2442
2443static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2444{
2445    int r2 = get_field(s, r2);
2446    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2447    return DISAS_NEXT;
2448}
2449
2450static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2451{
2452    /* No cache information provided.  */
2453    tcg_gen_movi_i64(o->out, -1);
2454    return DISAS_NEXT;
2455}
2456
2457static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2458{
2459    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2460    return DISAS_NEXT;
2461}
2462
2463static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2464{
2465    int r1 = get_field(s, r1);
2466    int r2 = get_field(s, r2);
2467    TCGv_i64 t = tcg_temp_new_i64();
2468
2469    /* Note the "subsequently" in the PoO, which implies a defined result
2470       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2471    tcg_gen_shri_i64(t, psw_mask, 32);
2472    store_reg32_i64(r1, t);
2473    if (r2 != 0) {
2474        store_reg32_i64(r2, psw_mask);
2475    }
2476
2477    tcg_temp_free_i64(t);
2478    return DISAS_NEXT;
2479}
2480
2481static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2482{
2483    int r1 = get_field(s, r1);
2484    TCGv_i32 ilen;
2485    TCGv_i64 v1;
2486
2487    /* Nested EXECUTE is not allowed.  */
2488    if (unlikely(s->ex_value)) {
2489        gen_program_exception(s, PGM_EXECUTE);
2490        return DISAS_NORETURN;
2491    }
2492
2493    update_psw_addr(s);
2494    update_cc_op(s);
2495
2496    if (r1 == 0) {
2497        v1 = tcg_const_i64(0);
2498    } else {
2499        v1 = regs[r1];
2500    }
2501
2502    ilen = tcg_const_i32(s->ilen);
2503    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2504    tcg_temp_free_i32(ilen);
2505
2506    if (r1 == 0) {
2507        tcg_temp_free_i64(v1);
2508    }
2509
2510    return DISAS_PC_CC_UPDATED;
2511}
2512
2513static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2514{
2515    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2516
2517    if (!m34) {
2518        return DISAS_NORETURN;
2519    }
2520    gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2521    tcg_temp_free_i32(m34);
2522    return DISAS_NEXT;
2523}
2524
2525static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2526{
2527    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2528
2529    if (!m34) {
2530        return DISAS_NORETURN;
2531    }
2532    gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2533    tcg_temp_free_i32(m34);
2534    return DISAS_NEXT;
2535}
2536
2537static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2538{
2539    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2540
2541    if (!m34) {
2542        return DISAS_NORETURN;
2543    }
2544    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2545    return_low128(o->out2);
2546    tcg_temp_free_i32(m34);
2547    return DISAS_NEXT;
2548}
2549
2550static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2551{
2552    /* We'll use the original input for cc computation, since we get to
2553       compare that against 0, which ought to be better than comparing
2554       the real output against 64.  It also lets cc_dst be a convenient
2555       temporary during our computation.  */
2556    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2557
2558    /* R1 = IN ? CLZ(IN) : 64.  */
2559    tcg_gen_clzi_i64(o->out, o->in2, 64);
2560
2561    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2562       value by 64, which is undefined.  But since the shift is 64 iff the
2563       input is zero, we still get the correct result after and'ing.  */
2564    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2565    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2566    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2567    return DISAS_NEXT;
2568}
2569
2570static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2571{
2572    int m3 = get_field(s, m3);
2573    int pos, len, base = s->insn->data;
2574    TCGv_i64 tmp = tcg_temp_new_i64();
2575    uint64_t ccm;
2576
2577    switch (m3) {
2578    case 0xf:
2579        /* Effectively a 32-bit load.  */
2580        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2581        len = 32;
2582        goto one_insert;
2583
2584    case 0xc:
2585    case 0x6:
2586    case 0x3:
2587        /* Effectively a 16-bit load.  */
2588        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2589        len = 16;
2590        goto one_insert;
2591
2592    case 0x8:
2593    case 0x4:
2594    case 0x2:
2595    case 0x1:
2596        /* Effectively an 8-bit load.  */
2597        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2598        len = 8;
2599        goto one_insert;
2600
2601    one_insert:
2602        pos = base + ctz32(m3) * 8;
2603        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2604        ccm = ((1ull << len) - 1) << pos;
2605        break;
2606
2607    default:
2608        /* This is going to be a sequence of loads and inserts.  */
2609        pos = base + 32 - 8;
2610        ccm = 0;
2611        while (m3) {
2612            if (m3 & 0x8) {
2613                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2614                tcg_gen_addi_i64(o->in2, o->in2, 1);
2615                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2616                ccm |= 0xffull << pos;
2617            }
2618            m3 = (m3 << 1) & 0xf;
2619            pos -= 8;
2620        }
2621        break;
2622    }
2623
2624    tcg_gen_movi_i64(tmp, ccm);
2625    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2626    tcg_temp_free_i64(tmp);
2627    return DISAS_NEXT;
2628}
2629
2630static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2631{
2632    int shift = s->insn->data & 0xff;
2633    int size = s->insn->data >> 8;
2634    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2635    return DISAS_NEXT;
2636}
2637
2638static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2639{
2640    TCGv_i64 t1, t2;
2641
2642    gen_op_calc_cc(s);
2643    t1 = tcg_temp_new_i64();
2644    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2645    t2 = tcg_temp_new_i64();
2646    tcg_gen_extu_i32_i64(t2, cc_op);
2647    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2648    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2649    tcg_temp_free_i64(t1);
2650    tcg_temp_free_i64(t2);
2651    return DISAS_NEXT;
2652}
2653
2654#ifndef CONFIG_USER_ONLY
2655static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2656{
2657    TCGv_i32 m4;
2658
2659    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2660        m4 = tcg_const_i32(get_field(s, m4));
2661    } else {
2662        m4 = tcg_const_i32(0);
2663    }
2664    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2665    tcg_temp_free_i32(m4);
2666    return DISAS_NEXT;
2667}
2668
2669static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2670{
2671    TCGv_i32 m4;
2672
2673    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2674        m4 = tcg_const_i32(get_field(s, m4));
2675    } else {
2676        m4 = tcg_const_i32(0);
2677    }
2678    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2679    tcg_temp_free_i32(m4);
2680    return DISAS_NEXT;
2681}
2682
2683static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2684{
2685    gen_helper_iske(o->out, cpu_env, o->in2);
2686    return DISAS_NEXT;
2687}
2688#endif
2689
2690static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2691{
2692    int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2693    int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2694    int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2695    TCGv_i32 t_r1, t_r2, t_r3, type;
2696
2697    switch (s->insn->data) {
2698    case S390_FEAT_TYPE_KMA:
2699        if (r3 == r1 || r3 == r2) {
2700            gen_program_exception(s, PGM_SPECIFICATION);
2701            return DISAS_NORETURN;
2702        }
2703        /* FALL THROUGH */
2704    case S390_FEAT_TYPE_KMCTR:
2705        if (r3 & 1 || !r3) {
2706            gen_program_exception(s, PGM_SPECIFICATION);
2707            return DISAS_NORETURN;
2708        }
2709        /* FALL THROUGH */
2710    case S390_FEAT_TYPE_PPNO:
2711    case S390_FEAT_TYPE_KMF:
2712    case S390_FEAT_TYPE_KMC:
2713    case S390_FEAT_TYPE_KMO:
2714    case S390_FEAT_TYPE_KM:
2715        if (r1 & 1 || !r1) {
2716            gen_program_exception(s, PGM_SPECIFICATION);
2717            return DISAS_NORETURN;
2718        }
2719        /* FALL THROUGH */
2720    case S390_FEAT_TYPE_KMAC:
2721    case S390_FEAT_TYPE_KIMD:
2722    case S390_FEAT_TYPE_KLMD:
2723        if (r2 & 1 || !r2) {
2724            gen_program_exception(s, PGM_SPECIFICATION);
2725            return DISAS_NORETURN;
2726        }
2727        /* FALL THROUGH */
2728    case S390_FEAT_TYPE_PCKMO:
2729    case S390_FEAT_TYPE_PCC:
2730        break;
2731    default:
2732        g_assert_not_reached();
2733    };
2734
2735    t_r1 = tcg_const_i32(r1);
2736    t_r2 = tcg_const_i32(r2);
2737    t_r3 = tcg_const_i32(r3);
2738    type = tcg_const_i32(s->insn->data);
2739    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2740    set_cc_static(s);
2741    tcg_temp_free_i32(t_r1);
2742    tcg_temp_free_i32(t_r2);
2743    tcg_temp_free_i32(t_r3);
2744    tcg_temp_free_i32(type);
2745    return DISAS_NEXT;
2746}
2747
2748static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2749{
2750    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2751    set_cc_static(s);
2752    return DISAS_NEXT;
2753}
2754
2755static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2756{
2757    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2758    set_cc_static(s);
2759    return DISAS_NEXT;
2760}
2761
2762static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2763{
2764    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2765    set_cc_static(s);
2766    return DISAS_NEXT;
2767}
2768
2769static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2770{
2771    /* The real output is indeed the original value in memory;
2772       recompute the addition for the computation of CC.  */
2773    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2774                                 s->insn->data | MO_ALIGN);
2775    /* However, we need to recompute the addition for setting CC.  */
2776    tcg_gen_add_i64(o->out, o->in1, o->in2);
2777    return DISAS_NEXT;
2778}
2779
2780static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2781{
2782    /* The real output is indeed the original value in memory;
2783       recompute the addition for the computation of CC.  */
2784    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2785                                 s->insn->data | MO_ALIGN);
2786    /* However, we need to recompute the operation for setting CC.  */
2787    tcg_gen_and_i64(o->out, o->in1, o->in2);
2788    return DISAS_NEXT;
2789}
2790
2791static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2792{
2793    /* The real output is indeed the original value in memory;
2794       recompute the addition for the computation of CC.  */
2795    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2796                                s->insn->data | MO_ALIGN);
2797    /* However, we need to recompute the operation for setting CC.  */
2798    tcg_gen_or_i64(o->out, o->in1, o->in2);
2799    return DISAS_NEXT;
2800}
2801
2802static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2803{
2804    /* The real output is indeed the original value in memory;
2805       recompute the addition for the computation of CC.  */
2806    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2807                                 s->insn->data | MO_ALIGN);
2808    /* However, we need to recompute the operation for setting CC.  */
2809    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2810    return DISAS_NEXT;
2811}
2812
2813static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2814{
2815    gen_helper_ldeb(o->out, cpu_env, o->in2);
2816    return DISAS_NEXT;
2817}
2818
2819static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2820{
2821    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2822
2823    if (!m34) {
2824        return DISAS_NORETURN;
2825    }
2826    gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2827    tcg_temp_free_i32(m34);
2828    return DISAS_NEXT;
2829}
2830
2831static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2832{
2833    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2834
2835    if (!m34) {
2836        return DISAS_NORETURN;
2837    }
2838    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2839    tcg_temp_free_i32(m34);
2840    return DISAS_NEXT;
2841}
2842
2843static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2844{
2845    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2846
2847    if (!m34) {
2848        return DISAS_NORETURN;
2849    }
2850    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2851    tcg_temp_free_i32(m34);
2852    return DISAS_NEXT;
2853}
2854
2855static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2856{
2857    gen_helper_lxdb(o->out, cpu_env, o->in2);
2858    return_low128(o->out2);
2859    return DISAS_NEXT;
2860}
2861
2862static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2863{
2864    gen_helper_lxeb(o->out, cpu_env, o->in2);
2865    return_low128(o->out2);
2866    return DISAS_NEXT;
2867}
2868
2869static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2870{
2871    tcg_gen_shli_i64(o->out, o->in2, 32);
2872    return DISAS_NEXT;
2873}
2874
2875static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2876{
2877    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2878    return DISAS_NEXT;
2879}
2880
2881static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2882{
2883    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2884    return DISAS_NEXT;
2885}
2886
2887static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2888{
2889    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2890    return DISAS_NEXT;
2891}
2892
2893static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2894{
2895    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2896    return DISAS_NEXT;
2897}
2898
2899static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2900{
2901    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2902    return DISAS_NEXT;
2903}
2904
2905static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2906{
2907    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2908    return DISAS_NEXT;
2909}
2910
2911static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2912{
2913    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2914    return DISAS_NEXT;
2915}
2916
2917static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2918{
2919    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2920    return DISAS_NEXT;
2921}
2922
2923static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2924{
2925    TCGLabel *lab = gen_new_label();
2926    store_reg32_i64(get_field(s, r1), o->in2);
2927    /* The value is stored even in case of trap. */
2928    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2929    gen_trap(s);
2930    gen_set_label(lab);
2931    return DISAS_NEXT;
2932}
2933
2934static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2935{
2936    TCGLabel *lab = gen_new_label();
2937    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2938    /* The value is stored even in case of trap. */
2939    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2940    gen_trap(s);
2941    gen_set_label(lab);
2942    return DISAS_NEXT;
2943}
2944
2945static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2946{
2947    TCGLabel *lab = gen_new_label();
2948    store_reg32h_i64(get_field(s, r1), o->in2);
2949    /* The value is stored even in case of trap. */
2950    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2951    gen_trap(s);
2952    gen_set_label(lab);
2953    return DISAS_NEXT;
2954}
2955
2956static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2957{
2958    TCGLabel *lab = gen_new_label();
2959    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2960    /* The value is stored even in case of trap. */
2961    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2962    gen_trap(s);
2963    gen_set_label(lab);
2964    return DISAS_NEXT;
2965}
2966
2967static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2968{
2969    TCGLabel *lab = gen_new_label();
2970    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2971    /* The value is stored even in case of trap. */
2972    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2973    gen_trap(s);
2974    gen_set_label(lab);
2975    return DISAS_NEXT;
2976}
2977
2978static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2979{
2980    DisasCompare c;
2981
2982    if (have_field(s, m3)) {
2983        /* LOAD * ON CONDITION */
2984        disas_jcc(s, &c, get_field(s, m3));
2985    } else {
2986        /* SELECT */
2987        disas_jcc(s, &c, get_field(s, m4));
2988    }
2989
2990    if (c.is_64) {
2991        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2992                            o->in2, o->in1);
2993        free_compare(&c);
2994    } else {
2995        TCGv_i32 t32 = tcg_temp_new_i32();
2996        TCGv_i64 t, z;
2997
2998        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2999        free_compare(&c);
3000
3001        t = tcg_temp_new_i64();
3002        tcg_gen_extu_i32_i64(t, t32);
3003        tcg_temp_free_i32(t32);
3004
3005        z = tcg_const_i64(0);
3006        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3007        tcg_temp_free_i64(t);
3008        tcg_temp_free_i64(z);
3009    }
3010
3011    return DISAS_NEXT;
3012}
3013
3014#ifndef CONFIG_USER_ONLY
3015static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3016{
3017    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3018    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3019    gen_helper_lctl(cpu_env, r1, o->in2, r3);
3020    tcg_temp_free_i32(r1);
3021    tcg_temp_free_i32(r3);
3022    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3023    s->exit_to_mainloop = true;
3024    return DISAS_TOO_MANY;
3025}
3026
3027static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3028{
3029    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3030    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3031    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3032    tcg_temp_free_i32(r1);
3033    tcg_temp_free_i32(r3);
3034    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3035    s->exit_to_mainloop = true;
3036    return DISAS_TOO_MANY;
3037}
3038
3039static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3040{
3041    gen_helper_lra(o->out, cpu_env, o->in2);
3042    set_cc_static(s);
3043    return DISAS_NEXT;
3044}
3045
3046static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3047{
3048    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3049    return DISAS_NEXT;
3050}
3051
3052static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3053{
3054    TCGv_i64 t1, t2;
3055
3056    per_breaking_event(s);
3057
3058    t1 = tcg_temp_new_i64();
3059    t2 = tcg_temp_new_i64();
3060    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3061                        MO_TEUL | MO_ALIGN_8);
3062    tcg_gen_addi_i64(o->in2, o->in2, 4);
3063    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3064    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3065    tcg_gen_shli_i64(t1, t1, 32);
3066    gen_helper_load_psw(cpu_env, t1, t2);
3067    tcg_temp_free_i64(t1);
3068    tcg_temp_free_i64(t2);
3069    return DISAS_NORETURN;
3070}
3071
3072static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3073{
3074    TCGv_i64 t1, t2;
3075
3076    per_breaking_event(s);
3077
3078    t1 = tcg_temp_new_i64();
3079    t2 = tcg_temp_new_i64();
3080    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3081                        MO_TEUQ | MO_ALIGN_8);
3082    tcg_gen_addi_i64(o->in2, o->in2, 8);
3083    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3084    gen_helper_load_psw(cpu_env, t1, t2);
3085    tcg_temp_free_i64(t1);
3086    tcg_temp_free_i64(t2);
3087    return DISAS_NORETURN;
3088}
3089#endif
3090
3091static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3092{
3093    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3094    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3095    gen_helper_lam(cpu_env, r1, o->in2, r3);
3096    tcg_temp_free_i32(r1);
3097    tcg_temp_free_i32(r3);
3098    return DISAS_NEXT;
3099}
3100
3101static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3102{
3103    int r1 = get_field(s, r1);
3104    int r3 = get_field(s, r3);
3105    TCGv_i64 t1, t2;
3106
3107    /* Only one register to read. */
3108    t1 = tcg_temp_new_i64();
3109    if (unlikely(r1 == r3)) {
3110        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3111        store_reg32_i64(r1, t1);
3112        tcg_temp_free(t1);
3113        return DISAS_NEXT;
3114    }
3115
3116    /* First load the values of the first and last registers to trigger
3117       possible page faults. */
3118    t2 = tcg_temp_new_i64();
3119    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3120    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3121    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3122    store_reg32_i64(r1, t1);
3123    store_reg32_i64(r3, t2);
3124
3125    /* Only two registers to read. */
3126    if (((r1 + 1) & 15) == r3) {
3127        tcg_temp_free(t2);
3128        tcg_temp_free(t1);
3129        return DISAS_NEXT;
3130    }
3131
3132    /* Then load the remaining registers. Page fault can't occur. */
3133    r3 = (r3 - 1) & 15;
3134    tcg_gen_movi_i64(t2, 4);
3135    while (r1 != r3) {
3136        r1 = (r1 + 1) & 15;
3137        tcg_gen_add_i64(o->in2, o->in2, t2);
3138        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3139        store_reg32_i64(r1, t1);
3140    }
3141    tcg_temp_free(t2);
3142    tcg_temp_free(t1);
3143
3144    return DISAS_NEXT;
3145}
3146
3147static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3148{
3149    int r1 = get_field(s, r1);
3150    int r3 = get_field(s, r3);
3151    TCGv_i64 t1, t2;
3152
3153    /* Only one register to read. */
3154    t1 = tcg_temp_new_i64();
3155    if (unlikely(r1 == r3)) {
3156        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3157        store_reg32h_i64(r1, t1);
3158        tcg_temp_free(t1);
3159        return DISAS_NEXT;
3160    }
3161
3162    /* First load the values of the first and last registers to trigger
3163       possible page faults. */
3164    t2 = tcg_temp_new_i64();
3165    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3166    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3167    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3168    store_reg32h_i64(r1, t1);
3169    store_reg32h_i64(r3, t2);
3170
3171    /* Only two registers to read. */
3172    if (((r1 + 1) & 15) == r3) {
3173        tcg_temp_free(t2);
3174        tcg_temp_free(t1);
3175        return DISAS_NEXT;
3176    }
3177
3178    /* Then load the remaining registers. Page fault can't occur. */
3179    r3 = (r3 - 1) & 15;
3180    tcg_gen_movi_i64(t2, 4);
3181    while (r1 != r3) {
3182        r1 = (r1 + 1) & 15;
3183        tcg_gen_add_i64(o->in2, o->in2, t2);
3184        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3185        store_reg32h_i64(r1, t1);
3186    }
3187    tcg_temp_free(t2);
3188    tcg_temp_free(t1);
3189
3190    return DISAS_NEXT;
3191}
3192
3193static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3194{
3195    int r1 = get_field(s, r1);
3196    int r3 = get_field(s, r3);
3197    TCGv_i64 t1, t2;
3198
3199    /* Only one register to read. */
3200    if (unlikely(r1 == r3)) {
3201        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3202        return DISAS_NEXT;
3203    }
3204
3205    /* First load the values of the first and last registers to trigger
3206       possible page faults. */
3207    t1 = tcg_temp_new_i64();
3208    t2 = tcg_temp_new_i64();
3209    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3210    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3211    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3212    tcg_gen_mov_i64(regs[r1], t1);
3213    tcg_temp_free(t2);
3214
3215    /* Only two registers to read. */
3216    if (((r1 + 1) & 15) == r3) {
3217        tcg_temp_free(t1);
3218        return DISAS_NEXT;
3219    }
3220
3221    /* Then load the remaining registers. Page fault can't occur. */
3222    r3 = (r3 - 1) & 15;
3223    tcg_gen_movi_i64(t1, 8);
3224    while (r1 != r3) {
3225        r1 = (r1 + 1) & 15;
3226        tcg_gen_add_i64(o->in2, o->in2, t1);
3227        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3228    }
3229    tcg_temp_free(t1);
3230
3231    return DISAS_NEXT;
3232}
3233
3234static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3235{
3236    TCGv_i64 a1, a2;
3237    MemOp mop = s->insn->data;
3238
3239    /* In a parallel context, stop the world and single step.  */
3240    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3241        update_psw_addr(s);
3242        update_cc_op(s);
3243        gen_exception(EXCP_ATOMIC);
3244        return DISAS_NORETURN;
3245    }
3246
3247    /* In a serial context, perform the two loads ... */
3248    a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3249    a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3250    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3251    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3252    tcg_temp_free_i64(a1);
3253    tcg_temp_free_i64(a2);
3254
3255    /* ... and indicate that we performed them while interlocked.  */
3256    gen_op_movi_cc(s, 0);
3257    return DISAS_NEXT;
3258}
3259
3260static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3261{
3262    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3263        gen_helper_lpq(o->out, cpu_env, o->in2);
3264    } else if (HAVE_ATOMIC128) {
3265        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3266    } else {
3267        gen_helper_exit_atomic(cpu_env);
3268        return DISAS_NORETURN;
3269    }
3270    return_low128(o->out2);
3271    return DISAS_NEXT;
3272}
3273
3274#ifndef CONFIG_USER_ONLY
3275static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3276{
3277    tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3278    return DISAS_NEXT;
3279}
3280#endif
3281
3282static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3283{
3284    tcg_gen_andi_i64(o->out, o->in2, -256);
3285    return DISAS_NEXT;
3286}
3287
3288static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3289{
3290    const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3291
3292    if (get_field(s, m3) > 6) {
3293        gen_program_exception(s, PGM_SPECIFICATION);
3294        return DISAS_NORETURN;
3295    }
3296
3297    tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3298    tcg_gen_neg_i64(o->addr1, o->addr1);
3299    tcg_gen_movi_i64(o->out, 16);
3300    tcg_gen_umin_i64(o->out, o->out, o->addr1);
3301    gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3302    return DISAS_NEXT;
3303}
3304
3305static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3306{
3307#if !defined(CONFIG_USER_ONLY)
3308    TCGv_i32 i2;
3309#endif
3310    const uint16_t monitor_class = get_field(s, i2);
3311
3312    if (monitor_class & 0xff00) {
3313        gen_program_exception(s, PGM_SPECIFICATION);
3314        return DISAS_NORETURN;
3315    }
3316
3317#if !defined(CONFIG_USER_ONLY)
3318    i2 = tcg_const_i32(monitor_class);
3319    gen_helper_monitor_call(cpu_env, o->addr1, i2);
3320    tcg_temp_free_i32(i2);
3321#endif
3322    /* Defaults to a NOP. */
3323    return DISAS_NEXT;
3324}
3325
3326static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3327{
3328    o->out = o->in2;
3329    o->g_out = o->g_in2;
3330    o->in2 = NULL;
3331    o->g_in2 = false;
3332    return DISAS_NEXT;
3333}
3334
3335static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3336{
3337    int b2 = get_field(s, b2);
3338    TCGv ar1 = tcg_temp_new_i64();
3339
3340    o->out = o->in2;
3341    o->g_out = o->g_in2;
3342    o->in2 = NULL;
3343    o->g_in2 = false;
3344
3345    switch (s->base.tb->flags & FLAG_MASK_ASC) {
3346    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3347        tcg_gen_movi_i64(ar1, 0);
3348        break;
3349    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3350        tcg_gen_movi_i64(ar1, 1);
3351        break;
3352    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3353        if (b2) {
3354            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3355        } else {
3356            tcg_gen_movi_i64(ar1, 0);
3357        }
3358        break;
3359    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3360        tcg_gen_movi_i64(ar1, 2);
3361        break;
3362    }
3363
3364    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3365    tcg_temp_free_i64(ar1);
3366
3367    return DISAS_NEXT;
3368}
3369
3370static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3371{
3372    o->out = o->in1;
3373    o->out2 = o->in2;
3374    o->g_out = o->g_in1;
3375    o->g_out2 = o->g_in2;
3376    o->in1 = NULL;
3377    o->in2 = NULL;
3378    o->g_in1 = o->g_in2 = false;
3379    return DISAS_NEXT;
3380}
3381
3382static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3383{
3384    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3385    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3386    tcg_temp_free_i32(l);
3387    return DISAS_NEXT;
3388}
3389
3390static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3391{
3392    gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3393    return DISAS_NEXT;
3394}
3395
3396static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3397{
3398    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3399    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3400    tcg_temp_free_i32(l);
3401    return DISAS_NEXT;
3402}
3403
3404static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3405{
3406    int r1 = get_field(s, r1);
3407    int r2 = get_field(s, r2);
3408    TCGv_i32 t1, t2;
3409
3410    /* r1 and r2 must be even.  */
3411    if (r1 & 1 || r2 & 1) {
3412        gen_program_exception(s, PGM_SPECIFICATION);
3413        return DISAS_NORETURN;
3414    }
3415
3416    t1 = tcg_const_i32(r1);
3417    t2 = tcg_const_i32(r2);
3418    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3419    tcg_temp_free_i32(t1);
3420    tcg_temp_free_i32(t2);
3421    set_cc_static(s);
3422    return DISAS_NEXT;
3423}
3424
3425static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3426{
3427    int r1 = get_field(s, r1);
3428    int r3 = get_field(s, r3);
3429    TCGv_i32 t1, t3;
3430
3431    /* r1 and r3 must be even.  */
3432    if (r1 & 1 || r3 & 1) {
3433        gen_program_exception(s, PGM_SPECIFICATION);
3434        return DISAS_NORETURN;
3435    }
3436
3437    t1 = tcg_const_i32(r1);
3438    t3 = tcg_const_i32(r3);
3439    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3440    tcg_temp_free_i32(t1);
3441    tcg_temp_free_i32(t3);
3442    set_cc_static(s);
3443    return DISAS_NEXT;
3444}
3445
3446static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3447{
3448    int r1 = get_field(s, r1);
3449    int r3 = get_field(s, r3);
3450    TCGv_i32 t1, t3;
3451
3452    /* r1 and r3 must be even.  */
3453    if (r1 & 1 || r3 & 1) {
3454        gen_program_exception(s, PGM_SPECIFICATION);
3455        return DISAS_NORETURN;
3456    }
3457
3458    t1 = tcg_const_i32(r1);
3459    t3 = tcg_const_i32(r3);
3460    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3461    tcg_temp_free_i32(t1);
3462    tcg_temp_free_i32(t3);
3463    set_cc_static(s);
3464    return DISAS_NEXT;
3465}
3466
3467static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3468{
3469    int r3 = get_field(s, r3);
3470    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3471    set_cc_static(s);
3472    return DISAS_NEXT;
3473}
3474
3475#ifndef CONFIG_USER_ONLY
3476static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3477{
3478    int r1 = get_field(s, l1);
3479    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3480    set_cc_static(s);
3481    return DISAS_NEXT;
3482}
3483
3484static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3485{
3486    int r1 = get_field(s, l1);
3487    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3488    set_cc_static(s);
3489    return DISAS_NEXT;
3490}
3491#endif
3492
3493static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3494{
3495    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3496    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3497    tcg_temp_free_i32(l);
3498    return DISAS_NEXT;
3499}
3500
3501static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3502{
3503    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3504    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3505    tcg_temp_free_i32(l);
3506    return DISAS_NEXT;
3507}
3508
3509static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3510{
3511    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3512    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3513
3514    gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3515    tcg_temp_free_i32(t1);
3516    tcg_temp_free_i32(t2);
3517    set_cc_static(s);
3518    return DISAS_NEXT;
3519}
3520
3521static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3522{
3523    TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3524    TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3525
3526    gen_helper_mvst(cc_op, cpu_env, t1, t2);
3527    tcg_temp_free_i32(t1);
3528    tcg_temp_free_i32(t2);
3529    set_cc_static(s);
3530    return DISAS_NEXT;
3531}
3532
3533static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3534{
3535    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3536    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3537    tcg_temp_free_i32(l);
3538    return DISAS_NEXT;
3539}
3540
3541static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3542{
3543    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3544    return DISAS_NEXT;
3545}
3546
3547static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3548{
3549    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3550    return DISAS_NEXT;
3551}
3552
3553static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3554{
3555    tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3556    return DISAS_NEXT;
3557}
3558
3559static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3560{
3561    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3562    return DISAS_NEXT;
3563}
3564
3565static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3566{
3567    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3568    return DISAS_NEXT;
3569}
3570
3571static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3572{
3573    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3574    return DISAS_NEXT;
3575}
3576
3577static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3578{
3579    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3580    return_low128(o->out2);
3581    return DISAS_NEXT;
3582}
3583
3584static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3585{
3586    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3587    return_low128(o->out2);
3588    return DISAS_NEXT;
3589}
3590
3591static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3592{
3593    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3594    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3595    tcg_temp_free_i64(r3);
3596    return DISAS_NEXT;
3597}
3598
3599static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3600{
3601    TCGv_i64 r3 = load_freg(get_field(s, r3));
3602    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3603    tcg_temp_free_i64(r3);
3604    return DISAS_NEXT;
3605}
3606
3607static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3608{
3609    TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3610    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3611    tcg_temp_free_i64(r3);
3612    return DISAS_NEXT;
3613}
3614
3615static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3616{
3617    TCGv_i64 r3 = load_freg(get_field(s, r3));
3618    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3619    tcg_temp_free_i64(r3);
3620    return DISAS_NEXT;
3621}
3622
3623static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3624{
3625    TCGv_i64 z, n;
3626    z = tcg_const_i64(0);
3627    n = tcg_temp_new_i64();
3628    tcg_gen_neg_i64(n, o->in2);
3629    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3630    tcg_temp_free_i64(n);
3631    tcg_temp_free_i64(z);
3632    return DISAS_NEXT;
3633}
3634
3635static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3636{
3637    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3638    return DISAS_NEXT;
3639}
3640
3641static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3642{
3643    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3644    return DISAS_NEXT;
3645}
3646
3647static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3648{
3649    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3650    tcg_gen_mov_i64(o->out2, o->in2);
3651    return DISAS_NEXT;
3652}
3653
3654static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3655{
3656    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3657    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3658    tcg_temp_free_i32(l);
3659    set_cc_static(s);
3660    return DISAS_NEXT;
3661}
3662
3663static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3664{
3665    tcg_gen_neg_i64(o->out, o->in2);
3666    return DISAS_NEXT;
3667}
3668
3669static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3670{
3671    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3672    return DISAS_NEXT;
3673}
3674
3675static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3676{
3677    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3678    return DISAS_NEXT;
3679}
3680
3681static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3682{
3683    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3684    tcg_gen_mov_i64(o->out2, o->in2);
3685    return DISAS_NEXT;
3686}
3687
3688static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3689{
3690    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3691    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3692    tcg_temp_free_i32(l);
3693    set_cc_static(s);
3694    return DISAS_NEXT;
3695}
3696
3697static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3698{
3699    tcg_gen_or_i64(o->out, o->in1, o->in2);
3700    return DISAS_NEXT;
3701}
3702
3703static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3704{
3705    int shift = s->insn->data & 0xff;
3706    int size = s->insn->data >> 8;
3707    uint64_t mask = ((1ull << size) - 1) << shift;
3708
3709    assert(!o->g_in2);
3710    tcg_gen_shli_i64(o->in2, o->in2, shift);
3711    tcg_gen_or_i64(o->out, o->in1, o->in2);
3712
3713    /* Produce the CC from only the bits manipulated.  */
3714    tcg_gen_andi_i64(cc_dst, o->out, mask);
3715    set_cc_nz_u64(s, cc_dst);
3716    return DISAS_NEXT;
3717}
3718
3719static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3720{
3721    o->in1 = tcg_temp_new_i64();
3722
3723    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3724        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3725    } else {
3726        /* Perform the atomic operation in memory. */
3727        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3728                                    s->insn->data);
3729    }
3730
3731    /* Recompute also for atomic case: needed for setting CC. */
3732    tcg_gen_or_i64(o->out, o->in1, o->in2);
3733
3734    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3735        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3736    }
3737    return DISAS_NEXT;
3738}
3739
3740static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3741{
3742    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3743    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3744    tcg_temp_free_i32(l);
3745    return DISAS_NEXT;
3746}
3747
3748static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3749{
3750    int l2 = get_field(s, l2) + 1;
3751    TCGv_i32 l;
3752
3753    /* The length must not exceed 32 bytes.  */
3754    if (l2 > 32) {
3755        gen_program_exception(s, PGM_SPECIFICATION);
3756        return DISAS_NORETURN;
3757    }
3758    l = tcg_const_i32(l2);
3759    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3760    tcg_temp_free_i32(l);
3761    return DISAS_NEXT;
3762}
3763
3764static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3765{
3766    int l2 = get_field(s, l2) + 1;
3767    TCGv_i32 l;
3768
3769    /* The length must be even and should not exceed 64 bytes.  */
3770    if ((l2 & 1) || (l2 > 64)) {
3771        gen_program_exception(s, PGM_SPECIFICATION);
3772        return DISAS_NORETURN;
3773    }
3774    l = tcg_const_i32(l2);
3775    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3776    tcg_temp_free_i32(l);
3777    return DISAS_NEXT;
3778}
3779
3780static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3781{
3782    const uint8_t m3 = get_field(s, m3);
3783
3784    if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3785        tcg_gen_ctpop_i64(o->out, o->in2);
3786    } else {
3787        gen_helper_popcnt(o->out, o->in2);
3788    }
3789    return DISAS_NEXT;
3790}
3791
3792#ifndef CONFIG_USER_ONLY
3793static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3794{
3795    gen_helper_ptlb(cpu_env);
3796    return DISAS_NEXT;
3797}
3798#endif
3799
3800static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3801{
3802    int i3 = get_field(s, i3);
3803    int i4 = get_field(s, i4);
3804    int i5 = get_field(s, i5);
3805    int do_zero = i4 & 0x80;
3806    uint64_t mask, imask, pmask;
3807    int pos, len, rot;
3808
3809    /* Adjust the arguments for the specific insn.  */
3810    switch (s->fields.op2) {
3811    case 0x55: /* risbg */
3812    case 0x59: /* risbgn */
3813        i3 &= 63;
3814        i4 &= 63;
3815        pmask = ~0;
3816        break;
3817    case 0x5d: /* risbhg */
3818        i3 &= 31;
3819        i4 &= 31;
3820        pmask = 0xffffffff00000000ull;
3821        break;
3822    case 0x51: /* risblg */
3823        i3 = (i3 & 31) + 32;
3824        i4 = (i4 & 31) + 32;
3825        pmask = 0x00000000ffffffffull;
3826        break;
3827    default:
3828        g_assert_not_reached();
3829    }
3830
3831    /* MASK is the set of bits to be inserted from R2. */
3832    if (i3 <= i4) {
3833        /* [0...i3---i4...63] */
3834        mask = (-1ull >> i3) & (-1ull << (63 - i4));
3835    } else {
3836        /* [0---i4...i3---63] */
3837        mask = (-1ull >> i3) | (-1ull << (63 - i4));
3838    }
3839    /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3840    mask &= pmask;
3841
3842    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3843       insns, we need to keep the other half of the register.  */
3844    imask = ~mask | ~pmask;
3845    if (do_zero) {
3846        imask = ~pmask;
3847    }
3848
3849    len = i4 - i3 + 1;
3850    pos = 63 - i4;
3851    rot = i5 & 63;
3852
3853    /* In some cases we can implement this with extract.  */
3854    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3855        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3856        return DISAS_NEXT;
3857    }
3858
3859    /* In some cases we can implement this with deposit.  */
3860    if (len > 0 && (imask == 0 || ~mask == imask)) {
3861        /* Note that we rotate the bits to be inserted to the lsb, not to
3862           the position as described in the PoO.  */
3863        rot = (rot - pos) & 63;
3864    } else {
3865        pos = -1;
3866    }
3867
3868    /* Rotate the input as necessary.  */
3869    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3870
3871    /* Insert the selected bits into the output.  */
3872    if (pos >= 0) {
3873        if (imask == 0) {
3874            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3875        } else {
3876            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3877        }
3878    } else if (imask == 0) {
3879        tcg_gen_andi_i64(o->out, o->in2, mask);
3880    } else {
3881        tcg_gen_andi_i64(o->in2, o->in2, mask);
3882        tcg_gen_andi_i64(o->out, o->out, imask);
3883        tcg_gen_or_i64(o->out, o->out, o->in2);
3884    }
3885    return DISAS_NEXT;
3886}
3887
3888static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3889{
3890    int i3 = get_field(s, i3);
3891    int i4 = get_field(s, i4);
3892    int i5 = get_field(s, i5);
3893    uint64_t mask;
3894
3895    /* If this is a test-only form, arrange to discard the result.  */
3896    if (i3 & 0x80) {
3897        o->out = tcg_temp_new_i64();
3898        o->g_out = false;
3899    }
3900
3901    i3 &= 63;
3902    i4 &= 63;
3903    i5 &= 63;
3904
3905    /* MASK is the set of bits to be operated on from R2.
3906       Take care for I3/I4 wraparound.  */
3907    mask = ~0ull >> i3;
3908    if (i3 <= i4) {
3909        mask ^= ~0ull >> i4 >> 1;
3910    } else {
3911        mask |= ~(~0ull >> i4 >> 1);
3912    }
3913
3914    /* Rotate the input as necessary.  */
3915    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3916
3917    /* Operate.  */
3918    switch (s->fields.op2) {
3919    case 0x54: /* AND */
3920        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3921        tcg_gen_and_i64(o->out, o->out, o->in2);
3922        break;
3923    case 0x56: /* OR */
3924        tcg_gen_andi_i64(o->in2, o->in2, mask);
3925        tcg_gen_or_i64(o->out, o->out, o->in2);
3926        break;
3927    case 0x57: /* XOR */
3928        tcg_gen_andi_i64(o->in2, o->in2, mask);
3929        tcg_gen_xor_i64(o->out, o->out, o->in2);
3930        break;
3931    default:
3932        abort();
3933    }
3934
3935    /* Set the CC.  */
3936    tcg_gen_andi_i64(cc_dst, o->out, mask);
3937    set_cc_nz_u64(s, cc_dst);
3938    return DISAS_NEXT;
3939}
3940
3941static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3942{
3943    tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3944    return DISAS_NEXT;
3945}
3946
3947static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3948{
3949    tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3950    return DISAS_NEXT;
3951}
3952
3953static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3954{
3955    tcg_gen_bswap64_i64(o->out, o->in2);
3956    return DISAS_NEXT;
3957}
3958
3959static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3960{
3961    TCGv_i32 t1 = tcg_temp_new_i32();
3962    TCGv_i32 t2 = tcg_temp_new_i32();
3963    TCGv_i32 to = tcg_temp_new_i32();
3964    tcg_gen_extrl_i64_i32(t1, o->in1);
3965    tcg_gen_extrl_i64_i32(t2, o->in2);
3966    tcg_gen_rotl_i32(to, t1, t2);
3967    tcg_gen_extu_i32_i64(o->out, to);
3968    tcg_temp_free_i32(t1);
3969    tcg_temp_free_i32(t2);
3970    tcg_temp_free_i32(to);
3971    return DISAS_NEXT;
3972}
3973
3974static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3975{
3976    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3977    return DISAS_NEXT;
3978}
3979
3980#ifndef CONFIG_USER_ONLY
3981static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3982{
3983    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3984    set_cc_static(s);
3985    return DISAS_NEXT;
3986}
3987
3988static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3989{
3990    gen_helper_sacf(cpu_env, o->in2);
3991    /* Addressing mode has changed, so end the block.  */
3992    return DISAS_TOO_MANY;
3993}
3994#endif
3995
3996static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3997{
3998    int sam = s->insn->data;
3999    TCGv_i64 tsam;
4000    uint64_t mask;
4001
4002    switch (sam) {
4003    case 0:
4004        mask = 0xffffff;
4005        break;
4006    case 1:
4007        mask = 0x7fffffff;
4008        break;
4009    default:
4010        mask = -1;
4011        break;
4012    }
4013
4014    /* Bizarre but true, we check the address of the current insn for the
4015       specification exception, not the next to be executed.  Thus the PoO
4016       documents that Bad Things Happen two bytes before the end.  */
4017    if (s->base.pc_next & ~mask) {
4018        gen_program_exception(s, PGM_SPECIFICATION);
4019        return DISAS_NORETURN;
4020    }
4021    s->pc_tmp &= mask;
4022
4023    tsam = tcg_const_i64(sam);
4024    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4025    tcg_temp_free_i64(tsam);
4026
4027    /* Always exit the TB, since we (may have) changed execution mode.  */
4028    return DISAS_TOO_MANY;
4029}
4030
4031static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4032{
4033    int r1 = get_field(s, r1);
4034    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4035    return DISAS_NEXT;
4036}
4037
4038static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4039{
4040    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4041    return DISAS_NEXT;
4042}
4043
4044static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4045{
4046    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4047    return DISAS_NEXT;
4048}
4049
4050static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4051{
4052    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4053    return_low128(o->out2);
4054    return DISAS_NEXT;
4055}
4056
4057static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4058{
4059    gen_helper_sqeb(o->out, cpu_env, o->in2);
4060    return DISAS_NEXT;
4061}
4062
4063static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4064{
4065    gen_helper_sqdb(o->out, cpu_env, o->in2);
4066    return DISAS_NEXT;
4067}
4068
4069static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4070{
4071    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4072    return_low128(o->out2);
4073    return DISAS_NEXT;
4074}
4075
4076#ifndef CONFIG_USER_ONLY
4077static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4078{
4079    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4080    set_cc_static(s);
4081    return DISAS_NEXT;
4082}
4083
4084static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4085{
4086    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4087    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4088    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4089    set_cc_static(s);
4090    tcg_temp_free_i32(r1);
4091    tcg_temp_free_i32(r3);
4092    return DISAS_NEXT;
4093}
4094#endif
4095
4096static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4097{
4098    DisasCompare c;
4099    TCGv_i64 a, h;
4100    TCGLabel *lab;
4101    int r1;
4102
4103    disas_jcc(s, &c, get_field(s, m3));
4104
4105    /* We want to store when the condition is fulfilled, so branch
4106       out when it's not */
4107    c.cond = tcg_invert_cond(c.cond);
4108
4109    lab = gen_new_label();
4110    if (c.is_64) {
4111        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4112    } else {
4113        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4114    }
4115    free_compare(&c);
4116
4117    r1 = get_field(s, r1);
4118    a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4119    switch (s->insn->data) {
4120    case 1: /* STOCG */
4121        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4122        break;
4123    case 0: /* STOC */
4124        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4125        break;
4126    case 2: /* STOCFH */
4127        h = tcg_temp_new_i64();
4128        tcg_gen_shri_i64(h, regs[r1], 32);
4129        tcg_gen_qemu_st32(h, a, get_mem_index(s));
4130        tcg_temp_free_i64(h);
4131        break;
4132    default:
4133        g_assert_not_reached();
4134    }
4135    tcg_temp_free_i64(a);
4136
4137    gen_set_label(lab);
4138    return DISAS_NEXT;
4139}
4140
4141static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4142{
4143    TCGv_i64 t;
4144    uint64_t sign = 1ull << s->insn->data;
4145    if (s->insn->data == 31) {
4146        t = tcg_temp_new_i64();
4147        tcg_gen_shli_i64(t, o->in1, 32);
4148    } else {
4149        t = o->in1;
4150    }
4151    gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4152    if (s->insn->data == 31) {
4153        tcg_temp_free_i64(t);
4154    }
4155    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4156    /* The arithmetic left shift is curious in that it does not affect
4157       the sign bit.  Copy that over from the source unchanged.  */
4158    tcg_gen_andi_i64(o->out, o->out, ~sign);
4159    tcg_gen_andi_i64(o->in1, o->in1, sign);
4160    tcg_gen_or_i64(o->out, o->out, o->in1);
4161    return DISAS_NEXT;
4162}
4163
4164static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4165{
4166    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4167    return DISAS_NEXT;
4168}
4169
4170static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4171{
4172    tcg_gen_sar_i64(o->out, o->in1, o->in2);
4173    return DISAS_NEXT;
4174}
4175
4176static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4177{
4178    tcg_gen_shr_i64(o->out, o->in1, o->in2);
4179    return DISAS_NEXT;
4180}
4181
4182static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4183{
4184    gen_helper_sfpc(cpu_env, o->in2);
4185    return DISAS_NEXT;
4186}
4187
4188static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4189{
4190    gen_helper_sfas(cpu_env, o->in2);
4191    return DISAS_NEXT;
4192}
4193
4194static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4195{
4196    /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4197    tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4198    gen_helper_srnm(cpu_env, o->addr1);
4199    return DISAS_NEXT;
4200}
4201
4202static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4203{
4204    /* Bits 0-55 are are ignored. */
4205    tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4206    gen_helper_srnm(cpu_env, o->addr1);
4207    return DISAS_NEXT;
4208}
4209
4210static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4211{
4212    TCGv_i64 tmp = tcg_temp_new_i64();
4213
4214    /* Bits other than 61-63 are ignored. */
4215    tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4216
4217    /* No need to call a helper, we don't implement dfp */
4218    tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4219    tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4220    tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4221
4222    tcg_temp_free_i64(tmp);
4223    return DISAS_NEXT;
4224}
4225
4226static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4227{
4228    tcg_gen_extrl_i64_i32(cc_op, o->in1);
4229    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4230    set_cc_static(s);
4231
4232    tcg_gen_shri_i64(o->in1, o->in1, 24);
4233    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4234    return DISAS_NEXT;
4235}
4236
4237static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4238{
4239    int b1 = get_field(s, b1);
4240    int d1 = get_field(s, d1);
4241    int b2 = get_field(s, b2);
4242    int d2 = get_field(s, d2);
4243    int r3 = get_field(s, r3);
4244    TCGv_i64 tmp = tcg_temp_new_i64();
4245
4246    /* fetch all operands first */
4247    o->in1 = tcg_temp_new_i64();
4248    tcg_gen_addi_i64(o->in1, regs[b1], d1);
4249    o->in2 = tcg_temp_new_i64();
4250    tcg_gen_addi_i64(o->in2, regs[b2], d2);
4251    o->addr1 = tcg_temp_new_i64();
4252    gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4253
4254    /* load the third operand into r3 before modifying anything */
4255    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4256
4257    /* subtract CPU timer from first operand and store in GR0 */
4258    gen_helper_stpt(tmp, cpu_env);
4259    tcg_gen_sub_i64(regs[0], o->in1, tmp);
4260
4261    /* store second operand in GR1 */
4262    tcg_gen_mov_i64(regs[1], o->in2);
4263
4264    tcg_temp_free_i64(tmp);
4265    return DISAS_NEXT;
4266}
4267
4268#ifndef CONFIG_USER_ONLY
4269static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4270{
4271    tcg_gen_shri_i64(o->in2, o->in2, 4);
4272    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4273    return DISAS_NEXT;
4274}
4275
4276static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4277{
4278    gen_helper_sske(cpu_env, o->in1, o->in2);
4279    return DISAS_NEXT;
4280}
4281
4282static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4283{
4284    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4285    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4286    s->exit_to_mainloop = true;
4287    return DISAS_TOO_MANY;
4288}
4289
4290static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4291{
4292    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4293    return DISAS_NEXT;
4294}
4295#endif
4296
4297static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4298{
4299    gen_helper_stck(o->out, cpu_env);
4300    /* ??? We don't implement clock states.  */
4301    gen_op_movi_cc(s, 0);
4302    return DISAS_NEXT;
4303}
4304
4305static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4306{
4307    TCGv_i64 c1 = tcg_temp_new_i64();
4308    TCGv_i64 c2 = tcg_temp_new_i64();
4309    TCGv_i64 todpr = tcg_temp_new_i64();
4310    gen_helper_stck(c1, cpu_env);
4311    /* 16 bit value store in an uint32_t (only valid bits set) */
4312    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4313    /* Shift the 64-bit value into its place as a zero-extended
4314       104-bit value.  Note that "bit positions 64-103 are always
4315       non-zero so that they compare differently to STCK"; we set
4316       the least significant bit to 1.  */
4317    tcg_gen_shli_i64(c2, c1, 56);
4318    tcg_gen_shri_i64(c1, c1, 8);
4319    tcg_gen_ori_i64(c2, c2, 0x10000);
4320    tcg_gen_or_i64(c2, c2, todpr);
4321    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4322    tcg_gen_addi_i64(o->in2, o->in2, 8);
4323    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4324    tcg_temp_free_i64(c1);
4325    tcg_temp_free_i64(c2);
4326    tcg_temp_free_i64(todpr);
4327    /* ??? We don't implement clock states.  */
4328    gen_op_movi_cc(s, 0);
4329    return DISAS_NEXT;
4330}
4331
4332#ifndef CONFIG_USER_ONLY
4333static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4334{
4335    gen_helper_sck(cc_op, cpu_env, o->in2);
4336    set_cc_static(s);
4337    return DISAS_NEXT;
4338}
4339
4340static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4341{
4342    gen_helper_sckc(cpu_env, o->in2);
4343    return DISAS_NEXT;
4344}
4345
4346static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4347{
4348    gen_helper_sckpf(cpu_env, regs[0]);
4349    return DISAS_NEXT;
4350}
4351
4352static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4353{
4354    gen_helper_stckc(o->out, cpu_env);
4355    return DISAS_NEXT;
4356}
4357
4358static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4359{
4360    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4361    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4362    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4363    tcg_temp_free_i32(r1);
4364    tcg_temp_free_i32(r3);
4365    return DISAS_NEXT;
4366}
4367
4368static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4369{
4370    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4371    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4372    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4373    tcg_temp_free_i32(r1);
4374    tcg_temp_free_i32(r3);
4375    return DISAS_NEXT;
4376}
4377
4378static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4379{
4380    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4381    return DISAS_NEXT;
4382}
4383
4384static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4385{
4386    gen_helper_spt(cpu_env, o->in2);
4387    return DISAS_NEXT;
4388}
4389
4390static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4391{
4392    gen_helper_stfl(cpu_env);
4393    return DISAS_NEXT;
4394}
4395
4396static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4397{
4398    gen_helper_stpt(o->out, cpu_env);
4399    return DISAS_NEXT;
4400}
4401
4402static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4403{
4404    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4405    set_cc_static(s);
4406    return DISAS_NEXT;
4407}
4408
4409static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4410{
4411    gen_helper_spx(cpu_env, o->in2);
4412    return DISAS_NEXT;
4413}
4414
4415static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4416{
4417    gen_helper_xsch(cpu_env, regs[1]);
4418    set_cc_static(s);
4419    return DISAS_NEXT;
4420}
4421
4422static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4423{
4424    gen_helper_csch(cpu_env, regs[1]);
4425    set_cc_static(s);
4426    return DISAS_NEXT;
4427}
4428
4429static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4430{
4431    gen_helper_hsch(cpu_env, regs[1]);
4432    set_cc_static(s);
4433    return DISAS_NEXT;
4434}
4435
4436static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4437{
4438    gen_helper_msch(cpu_env, regs[1], o->in2);
4439    set_cc_static(s);
4440    return DISAS_NEXT;
4441}
4442
4443static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4444{
4445    gen_helper_rchp(cpu_env, regs[1]);
4446    set_cc_static(s);
4447    return DISAS_NEXT;
4448}
4449
4450static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4451{
4452    gen_helper_rsch(cpu_env, regs[1]);
4453    set_cc_static(s);
4454    return DISAS_NEXT;
4455}
4456
4457static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4458{
4459    gen_helper_sal(cpu_env, regs[1]);
4460    return DISAS_NEXT;
4461}
4462
4463static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4464{
4465    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4466    return DISAS_NEXT;
4467}
4468
4469static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4470{
4471    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4472    gen_op_movi_cc(s, 3);
4473    return DISAS_NEXT;
4474}
4475
4476static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4477{
4478    /* The instruction is suppressed if not provided. */
4479    return DISAS_NEXT;
4480}
4481
4482static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4483{
4484    gen_helper_ssch(cpu_env, regs[1], o->in2);
4485    set_cc_static(s);
4486    return DISAS_NEXT;
4487}
4488
4489static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4490{
4491    gen_helper_stsch(cpu_env, regs[1], o->in2);
4492    set_cc_static(s);
4493    return DISAS_NEXT;
4494}
4495
4496static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4497{
4498    gen_helper_stcrw(cpu_env, o->in2);
4499    set_cc_static(s);
4500    return DISAS_NEXT;
4501}
4502
4503static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4504{
4505    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4506    set_cc_static(s);
4507    return DISAS_NEXT;
4508}
4509
4510static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4511{
4512    gen_helper_tsch(cpu_env, regs[1], o->in2);
4513    set_cc_static(s);
4514    return DISAS_NEXT;
4515}
4516
4517static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4518{
4519    gen_helper_chsc(cpu_env, o->in2);
4520    set_cc_static(s);
4521    return DISAS_NEXT;
4522}
4523
4524static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4525{
4526    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4527    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4528    return DISAS_NEXT;
4529}
4530
4531static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4532{
4533    uint64_t i2 = get_field(s, i2);
4534    TCGv_i64 t;
4535
4536    /* It is important to do what the instruction name says: STORE THEN.
4537       If we let the output hook perform the store then if we fault and
4538       restart, we'll have the wrong SYSTEM MASK in place.  */
4539    t = tcg_temp_new_i64();
4540    tcg_gen_shri_i64(t, psw_mask, 56);
4541    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4542    tcg_temp_free_i64(t);
4543
4544    if (s->fields.op == 0xac) {
4545        tcg_gen_andi_i64(psw_mask, psw_mask,
4546                         (i2 << 56) | 0x00ffffffffffffffull);
4547    } else {
4548        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4549    }
4550
4551    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4552    s->exit_to_mainloop = true;
4553    return DISAS_TOO_MANY;
4554}
4555
4556static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4557{
4558    tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4559
4560    if (s->base.tb->flags & FLAG_MASK_PER) {
4561        update_psw_addr(s);
4562        gen_helper_per_store_real(cpu_env);
4563    }
4564    return DISAS_NEXT;
4565}
4566#endif
4567
4568static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4569{
4570    gen_helper_stfle(cc_op, cpu_env, o->in2);
4571    set_cc_static(s);
4572    return DISAS_NEXT;
4573}
4574
4575static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4576{
4577    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4578    return DISAS_NEXT;
4579}
4580
4581static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4582{
4583    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4584    return DISAS_NEXT;
4585}
4586
4587static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4588{
4589    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4590    return DISAS_NEXT;
4591}
4592
4593static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4594{
4595    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4596    return DISAS_NEXT;
4597}
4598
4599static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4600{
4601    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4602    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4603    gen_helper_stam(cpu_env, r1, o->in2, r3);
4604    tcg_temp_free_i32(r1);
4605    tcg_temp_free_i32(r3);
4606    return DISAS_NEXT;
4607}
4608
4609static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4610{
4611    int m3 = get_field(s, m3);
4612    int pos, base = s->insn->data;
4613    TCGv_i64 tmp = tcg_temp_new_i64();
4614
4615    pos = base + ctz32(m3) * 8;
4616    switch (m3) {
4617    case 0xf:
4618        /* Effectively a 32-bit store.  */
4619        tcg_gen_shri_i64(tmp, o->in1, pos);
4620        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4621        break;
4622
4623    case 0xc:
4624    case 0x6:
4625    case 0x3:
4626        /* Effectively a 16-bit store.  */
4627        tcg_gen_shri_i64(tmp, o->in1, pos);
4628        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4629        break;
4630
4631    case 0x8:
4632    case 0x4:
4633    case 0x2:
4634    case 0x1:
4635        /* Effectively an 8-bit store.  */
4636        tcg_gen_shri_i64(tmp, o->in1, pos);
4637        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4638        break;
4639
4640    default:
4641        /* This is going to be a sequence of shifts and stores.  */
4642        pos = base + 32 - 8;
4643        while (m3) {
4644            if (m3 & 0x8) {
4645                tcg_gen_shri_i64(tmp, o->in1, pos);
4646                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4647                tcg_gen_addi_i64(o->in2, o->in2, 1);
4648            }
4649            m3 = (m3 << 1) & 0xf;
4650            pos -= 8;
4651        }
4652        break;
4653    }
4654    tcg_temp_free_i64(tmp);
4655    return DISAS_NEXT;
4656}
4657
4658static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4659{
4660    int r1 = get_field(s, r1);
4661    int r3 = get_field(s, r3);
4662    int size = s->insn->data;
4663    TCGv_i64 tsize = tcg_const_i64(size);
4664
4665    while (1) {
4666        if (size == 8) {
4667            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4668        } else {
4669            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4670        }
4671        if (r1 == r3) {
4672            break;
4673        }
4674        tcg_gen_add_i64(o->in2, o->in2, tsize);
4675        r1 = (r1 + 1) & 15;
4676    }
4677
4678    tcg_temp_free_i64(tsize);
4679    return DISAS_NEXT;
4680}
4681
4682static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4683{
4684    int r1 = get_field(s, r1);
4685    int r3 = get_field(s, r3);
4686    TCGv_i64 t = tcg_temp_new_i64();
4687    TCGv_i64 t4 = tcg_const_i64(4);
4688    TCGv_i64 t32 = tcg_const_i64(32);
4689
4690    while (1) {
4691        tcg_gen_shl_i64(t, regs[r1], t32);
4692        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4693        if (r1 == r3) {
4694            break;
4695        }
4696        tcg_gen_add_i64(o->in2, o->in2, t4);
4697        r1 = (r1 + 1) & 15;
4698    }
4699
4700    tcg_temp_free_i64(t);
4701    tcg_temp_free_i64(t4);
4702    tcg_temp_free_i64(t32);
4703    return DISAS_NEXT;
4704}
4705
4706static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4707{
4708    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4709        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4710    } else if (HAVE_ATOMIC128) {
4711        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4712    } else {
4713        gen_helper_exit_atomic(cpu_env);
4714        return DISAS_NORETURN;
4715    }
4716    return DISAS_NEXT;
4717}
4718
4719static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4720{
4721    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4722    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4723
4724    gen_helper_srst(cpu_env, r1, r2);
4725
4726    tcg_temp_free_i32(r1);
4727    tcg_temp_free_i32(r2);
4728    set_cc_static(s);
4729    return DISAS_NEXT;
4730}
4731
4732static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4733{
4734    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4735    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4736
4737    gen_helper_srstu(cpu_env, r1, r2);
4738
4739    tcg_temp_free_i32(r1);
4740    tcg_temp_free_i32(r2);
4741    set_cc_static(s);
4742    return DISAS_NEXT;
4743}
4744
4745static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4746{
4747    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4748    return DISAS_NEXT;
4749}
4750
4751static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4752{
4753    tcg_gen_movi_i64(cc_src, 0);
4754    tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4755    return DISAS_NEXT;
4756}
4757
4758/* Compute borrow (0, -1) into cc_src. */
4759static void compute_borrow(DisasContext *s)
4760{
4761    switch (s->cc_op) {
4762    case CC_OP_SUBU:
4763        /* The borrow value is already in cc_src (0,-1). */
4764        break;
4765    default:
4766        gen_op_calc_cc(s);
4767        /* fall through */
4768    case CC_OP_STATIC:
4769        /* The carry flag is the msb of CC; compute into cc_src. */
4770        tcg_gen_extu_i32_i64(cc_src, cc_op);
4771        tcg_gen_shri_i64(cc_src, cc_src, 1);
4772        /* fall through */
4773    case CC_OP_ADDU:
4774        /* Convert carry (1,0) to borrow (0,-1). */
4775        tcg_gen_subi_i64(cc_src, cc_src, 1);
4776        break;
4777    }
4778}
4779
4780static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4781{
4782    compute_borrow(s);
4783
4784    /* Borrow is {0, -1}, so add to subtract. */
4785    tcg_gen_add_i64(o->out, o->in1, cc_src);
4786    tcg_gen_sub_i64(o->out, o->out, o->in2);
4787    return DISAS_NEXT;
4788}
4789
4790static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4791{
4792    compute_borrow(s);
4793
4794    /*
4795     * Borrow is {0, -1}, so add to subtract; replicate the
4796     * borrow input to produce 128-bit -1 for the addition.
4797     */
4798    TCGv_i64 zero = tcg_const_i64(0);
4799    tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4800    tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4801    tcg_temp_free_i64(zero);
4802
4803    return DISAS_NEXT;
4804}
4805
4806static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4807{
4808    TCGv_i32 t;
4809
4810    update_psw_addr(s);
4811    update_cc_op(s);
4812
4813    t = tcg_const_i32(get_field(s, i1) & 0xff);
4814    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4815    tcg_temp_free_i32(t);
4816
4817    t = tcg_const_i32(s->ilen);
4818    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4819    tcg_temp_free_i32(t);
4820
4821    gen_exception(EXCP_SVC);
4822    return DISAS_NORETURN;
4823}
4824
4825static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4826{
4827    int cc = 0;
4828
4829    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4830    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4831    gen_op_movi_cc(s, cc);
4832    return DISAS_NEXT;
4833}
4834
4835static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4836{
4837    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4838    set_cc_static(s);
4839    return DISAS_NEXT;
4840}
4841
4842static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4843{
4844    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4845    set_cc_static(s);
4846    return DISAS_NEXT;
4847}
4848
4849static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4850{
4851    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4852    set_cc_static(s);
4853    return DISAS_NEXT;
4854}
4855
4856#ifndef CONFIG_USER_ONLY
4857
4858static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4859{
4860    gen_helper_testblock(cc_op, cpu_env, o->in2);
4861    set_cc_static(s);
4862    return DISAS_NEXT;
4863}
4864
4865static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4866{
4867    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4868    set_cc_static(s);
4869    return DISAS_NEXT;
4870}
4871
4872#endif
4873
4874static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4875{
4876    TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4877    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4878    tcg_temp_free_i32(l1);
4879    set_cc_static(s);
4880    return DISAS_NEXT;
4881}
4882
4883static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4884{
4885    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4886    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4887    tcg_temp_free_i32(l);
4888    set_cc_static(s);
4889    return DISAS_NEXT;
4890}
4891
4892static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4893{
4894    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4895    return_low128(o->out2);
4896    set_cc_static(s);
4897    return DISAS_NEXT;
4898}
4899
4900static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4901{
4902    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4903    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4904    tcg_temp_free_i32(l);
4905    set_cc_static(s);
4906    return DISAS_NEXT;
4907}
4908
4909static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4910{
4911    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4912    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4913    tcg_temp_free_i32(l);
4914    set_cc_static(s);
4915    return DISAS_NEXT;
4916}
4917
4918static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4919{
4920    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4921    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4922    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4923    TCGv_i32 tst = tcg_temp_new_i32();
4924    int m3 = get_field(s, m3);
4925
4926    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4927        m3 = 0;
4928    }
4929    if (m3 & 1) {
4930        tcg_gen_movi_i32(tst, -1);
4931    } else {
4932        tcg_gen_extrl_i64_i32(tst, regs[0]);
4933        if (s->insn->opc & 3) {
4934            tcg_gen_ext8u_i32(tst, tst);
4935        } else {
4936            tcg_gen_ext16u_i32(tst, tst);
4937        }
4938    }
4939    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4940
4941    tcg_temp_free_i32(r1);
4942    tcg_temp_free_i32(r2);
4943    tcg_temp_free_i32(sizes);
4944    tcg_temp_free_i32(tst);
4945    set_cc_static(s);
4946    return DISAS_NEXT;
4947}
4948
4949static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4950{
4951    TCGv_i32 t1 = tcg_const_i32(0xff);
4952    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4953    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4954    tcg_temp_free_i32(t1);
4955    set_cc_static(s);
4956    return DISAS_NEXT;
4957}
4958
4959static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4960{
4961    TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4962    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4963    tcg_temp_free_i32(l);
4964    return DISAS_NEXT;
4965}
4966
4967static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4968{
4969    int l1 = get_field(s, l1) + 1;
4970    TCGv_i32 l;
4971
4972    /* The length must not exceed 32 bytes.  */
4973    if (l1 > 32) {
4974        gen_program_exception(s, PGM_SPECIFICATION);
4975        return DISAS_NORETURN;
4976    }
4977    l = tcg_const_i32(l1);
4978    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4979    tcg_temp_free_i32(l);
4980    set_cc_static(s);
4981    return DISAS_NEXT;
4982}
4983
4984static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4985{
4986    int l1 = get_field(s, l1) + 1;
4987    TCGv_i32 l;
4988
4989    /* The length must be even and should not exceed 64 bytes.  */
4990    if ((l1 & 1) || (l1 > 64)) {
4991        gen_program_exception(s, PGM_SPECIFICATION);
4992        return DISAS_NORETURN;
4993    }
4994    l = tcg_const_i32(l1);
4995    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4996    tcg_temp_free_i32(l);
4997    set_cc_static(s);
4998    return DISAS_NEXT;
4999}
5000
5001
5002static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
5003{
5004    int d1 = get_field(s, d1);
5005    int d2 = get_field(s, d2);
5006    int b1 = get_field(s, b1);
5007    int b2 = get_field(s, b2);
5008    int l = get_field(s, l1);
5009    TCGv_i32 t32;
5010
5011    o->addr1 = get_address(s, 0, b1, d1);
5012
5013    /* If the addresses are identical, this is a store/memset of zero.  */
5014    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
5015        o->in2 = tcg_const_i64(0);
5016
5017        l++;
5018        while (l >= 8) {
5019            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
5020            l -= 8;
5021            if (l > 0) {
5022                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
5023            }
5024        }
5025        if (l >= 4) {
5026            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5027            l -= 4;
5028            if (l > 0) {
5029                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5030            }
5031        }
5032        if (l >= 2) {
5033            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5034            l -= 2;
5035            if (l > 0) {
5036                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5037            }
5038        }
5039        if (l) {
5040            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5041        }
5042        gen_op_movi_cc(s, 0);
5043        return DISAS_NEXT;
5044    }
5045
5046    /* But in general we'll defer to a helper.  */
5047    o->in2 = get_address(s, 0, b2, d2);
5048    t32 = tcg_const_i32(l);
5049    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5050    tcg_temp_free_i32(t32);
5051    set_cc_static(s);
5052    return DISAS_NEXT;
5053}
5054
5055static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5056{
5057    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5058    return DISAS_NEXT;
5059}
5060
5061static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5062{
5063    int shift = s->insn->data & 0xff;
5064    int size = s->insn->data >> 8;
5065    uint64_t mask = ((1ull << size) - 1) << shift;
5066
5067    assert(!o->g_in2);
5068    tcg_gen_shli_i64(o->in2, o->in2, shift);
5069    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5070
5071    /* Produce the CC from only the bits manipulated.  */
5072    tcg_gen_andi_i64(cc_dst, o->out, mask);
5073    set_cc_nz_u64(s, cc_dst);
5074    return DISAS_NEXT;
5075}
5076
5077static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5078{
5079    o->in1 = tcg_temp_new_i64();
5080
5081    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5082        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5083    } else {
5084        /* Perform the atomic operation in memory. */
5085        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5086                                     s->insn->data);
5087    }
5088
5089    /* Recompute also for atomic case: needed for setting CC. */
5090    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5091
5092    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5093        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5094    }
5095    return DISAS_NEXT;
5096}
5097
5098static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5099{
5100    o->out = tcg_const_i64(0);
5101    return DISAS_NEXT;
5102}
5103
5104static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5105{
5106    o->out = tcg_const_i64(0);
5107    o->out2 = o->out;
5108    o->g_out2 = true;
5109    return DISAS_NEXT;
5110}
5111
5112#ifndef CONFIG_USER_ONLY
5113static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5114{
5115    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5116
5117    gen_helper_clp(cpu_env, r2);
5118    tcg_temp_free_i32(r2);
5119    set_cc_static(s);
5120    return DISAS_NEXT;
5121}
5122
5123static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5124{
5125    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5126    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5127
5128    gen_helper_pcilg(cpu_env, r1, r2);
5129    tcg_temp_free_i32(r1);
5130    tcg_temp_free_i32(r2);
5131    set_cc_static(s);
5132    return DISAS_NEXT;
5133}
5134
5135static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5136{
5137    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5138    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5139
5140    gen_helper_pcistg(cpu_env, r1, r2);
5141    tcg_temp_free_i32(r1);
5142    tcg_temp_free_i32(r2);
5143    set_cc_static(s);
5144    return DISAS_NEXT;
5145}
5146
5147static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5148{
5149    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5150    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5151
5152    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5153    tcg_temp_free_i32(ar);
5154    tcg_temp_free_i32(r1);
5155    set_cc_static(s);
5156    return DISAS_NEXT;
5157}
5158
5159static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5160{
5161    gen_helper_sic(cpu_env, o->in1, o->in2);
5162    return DISAS_NEXT;
5163}
5164
5165static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5166{
5167    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5168    TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5169
5170    gen_helper_rpcit(cpu_env, r1, r2);
5171    tcg_temp_free_i32(r1);
5172    tcg_temp_free_i32(r2);
5173    set_cc_static(s);
5174    return DISAS_NEXT;
5175}
5176
5177static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5178{
5179    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5180    TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5181    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5182
5183    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5184    tcg_temp_free_i32(ar);
5185    tcg_temp_free_i32(r1);
5186    tcg_temp_free_i32(r3);
5187    set_cc_static(s);
5188    return DISAS_NEXT;
5189}
5190
5191static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5192{
5193    TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5194    TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5195
5196    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5197    tcg_temp_free_i32(ar);
5198    tcg_temp_free_i32(r1);
5199    set_cc_static(s);
5200    return DISAS_NEXT;
5201}
5202#endif
5203
5204#include "translate_vx.c.inc"
5205
5206/* ====================================================================== */
5207/* The "Cc OUTput" generators.  Given the generated output (and in some cases
5208   the original inputs), update the various cc data structures in order to
5209   be able to compute the new condition code.  */
5210
5211static void cout_abs32(DisasContext *s, DisasOps *o)
5212{
5213    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5214}
5215
5216static void cout_abs64(DisasContext *s, DisasOps *o)
5217{
5218    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5219}
5220
5221static void cout_adds32(DisasContext *s, DisasOps *o)
5222{
5223    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5224}
5225
5226static void cout_adds64(DisasContext *s, DisasOps *o)
5227{
5228    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5229}
5230
5231static void cout_addu32(DisasContext *s, DisasOps *o)
5232{
5233    tcg_gen_shri_i64(cc_src, o->out, 32);
5234    tcg_gen_ext32u_i64(cc_dst, o->out);
5235    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5236}
5237
5238static void cout_addu64(DisasContext *s, DisasOps *o)
5239{
5240    gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5241}
5242
5243static void cout_cmps32(DisasContext *s, DisasOps *o)
5244{
5245    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5246}
5247
5248static void cout_cmps64(DisasContext *s, DisasOps *o)
5249{
5250    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5251}
5252
5253static void cout_cmpu32(DisasContext *s, DisasOps *o)
5254{
5255    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5256}
5257
5258static void cout_cmpu64(DisasContext *s, DisasOps *o)
5259{
5260    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5261}
5262
5263static void cout_f32(DisasContext *s, DisasOps *o)
5264{
5265    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5266}
5267
5268static void cout_f64(DisasContext *s, DisasOps *o)
5269{
5270    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5271}
5272
5273static void cout_f128(DisasContext *s, DisasOps *o)
5274{
5275    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5276}
5277
5278static void cout_nabs32(DisasContext *s, DisasOps *o)
5279{
5280    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5281}
5282
5283static void cout_nabs64(DisasContext *s, DisasOps *o)
5284{
5285    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5286}
5287
5288static void cout_neg32(DisasContext *s, DisasOps *o)
5289{
5290    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5291}
5292
5293static void cout_neg64(DisasContext *s, DisasOps *o)
5294{
5295    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5296}
5297
5298static void cout_nz32(DisasContext *s, DisasOps *o)
5299{
5300    tcg_gen_ext32u_i64(cc_dst, o->out);
5301    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5302}
5303
5304static void cout_nz64(DisasContext *s, DisasOps *o)
5305{
5306    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5307}
5308
5309static void cout_s32(DisasContext *s, DisasOps *o)
5310{
5311    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5312}
5313
5314static void cout_s64(DisasContext *s, DisasOps *o)
5315{
5316    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5317}
5318
5319static void cout_subs32(DisasContext *s, DisasOps *o)
5320{
5321    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5322}
5323
5324static void cout_subs64(DisasContext *s, DisasOps *o)
5325{
5326    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5327}
5328
5329static void cout_subu32(DisasContext *s, DisasOps *o)
5330{
5331    tcg_gen_sari_i64(cc_src, o->out, 32);
5332    tcg_gen_ext32u_i64(cc_dst, o->out);
5333    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5334}
5335
5336static void cout_subu64(DisasContext *s, DisasOps *o)
5337{
5338    gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5339}
5340
5341static void cout_tm32(DisasContext *s, DisasOps *o)
5342{
5343    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5344}
5345
5346static void cout_tm64(DisasContext *s, DisasOps *o)
5347{
5348    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5349}
5350
5351static void cout_muls32(DisasContext *s, DisasOps *o)
5352{
5353    gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5354}
5355
5356static void cout_muls64(DisasContext *s, DisasOps *o)
5357{
5358    /* out contains "high" part, out2 contains "low" part of 128 bit result */
5359    gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5360}
5361
5362/* ====================================================================== */
5363/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5364   with the TCG register to which we will write.  Used in combination with
5365   the "wout" generators, in some cases we need a new temporary, and in
5366   some cases we can write to a TCG global.  */
5367
5368static void prep_new(DisasContext *s, DisasOps *o)
5369{
5370    o->out = tcg_temp_new_i64();
5371}
5372#define SPEC_prep_new 0
5373
5374static void prep_new_P(DisasContext *s, DisasOps *o)
5375{
5376    o->out = tcg_temp_new_i64();
5377    o->out2 = tcg_temp_new_i64();
5378}
5379#define SPEC_prep_new_P 0
5380
5381static void prep_r1(DisasContext *s, DisasOps *o)
5382{
5383    o->out = regs[get_field(s, r1)];
5384    o->g_out = true;
5385}
5386#define SPEC_prep_r1 0
5387
5388static void prep_r1_P(DisasContext *s, DisasOps *o)
5389{
5390    int r1 = get_field(s, r1);
5391    o->out = regs[r1];
5392    o->out2 = regs[r1 + 1];
5393    o->g_out = o->g_out2 = true;
5394}
5395#define SPEC_prep_r1_P SPEC_r1_even
5396
5397/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5398static void prep_x1(DisasContext *s, DisasOps *o)
5399{
5400    o->out = load_freg(get_field(s, r1));
5401    o->out2 = load_freg(get_field(s, r1) + 2);
5402}
5403#define SPEC_prep_x1 SPEC_r1_f128
5404
5405/* ====================================================================== */
5406/* The "Write OUTput" generators.  These generally perform some non-trivial
5407   copy of data to TCG globals, or to main memory.  The trivial cases are
5408   generally handled by having a "prep" generator install the TCG global
5409   as the destination of the operation.  */
5410
5411static void wout_r1(DisasContext *s, DisasOps *o)
5412{
5413    store_reg(get_field(s, r1), o->out);
5414}
5415#define SPEC_wout_r1 0
5416
5417static void wout_out2_r1(DisasContext *s, DisasOps *o)
5418{
5419    store_reg(get_field(s, r1), o->out2);
5420}
5421#define SPEC_wout_out2_r1 0
5422
5423static void wout_r1_8(DisasContext *s, DisasOps *o)
5424{
5425    int r1 = get_field(s, r1);
5426    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5427}
5428#define SPEC_wout_r1_8 0
5429
5430static void wout_r1_16(DisasContext *s, DisasOps *o)
5431{
5432    int r1 = get_field(s, r1);
5433    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5434}
5435#define SPEC_wout_r1_16 0
5436
5437static void wout_r1_32(DisasContext *s, DisasOps *o)
5438{
5439    store_reg32_i64(get_field(s, r1), o->out);
5440}
5441#define SPEC_wout_r1_32 0
5442
5443static void wout_r1_32h(DisasContext *s, DisasOps *o)
5444{
5445    store_reg32h_i64(get_field(s, r1), o->out);
5446}
5447#define SPEC_wout_r1_32h 0
5448
5449static void wout_r1_P32(DisasContext *s, DisasOps *o)
5450{
5451    int r1 = get_field(s, r1);
5452    store_reg32_i64(r1, o->out);
5453    store_reg32_i64(r1 + 1, o->out2);
5454}
5455#define SPEC_wout_r1_P32 SPEC_r1_even
5456
5457static void wout_r1_D32(DisasContext *s, DisasOps *o)
5458{
5459    int r1 = get_field(s, r1);
5460    TCGv_i64 t = tcg_temp_new_i64();
5461    store_reg32_i64(r1 + 1, o->out);
5462    tcg_gen_shri_i64(t, o->out, 32);
5463    store_reg32_i64(r1, t);
5464    tcg_temp_free_i64(t);
5465}
5466#define SPEC_wout_r1_D32 SPEC_r1_even
5467
5468static void wout_r3_P32(DisasContext *s, DisasOps *o)
5469{
5470    int r3 = get_field(s, r3);
5471    store_reg32_i64(r3, o->out);
5472    store_reg32_i64(r3 + 1, o->out2);
5473}
5474#define SPEC_wout_r3_P32 SPEC_r3_even
5475
5476static void wout_r3_P64(DisasContext *s, DisasOps *o)
5477{
5478    int r3 = get_field(s, r3);
5479    store_reg(r3, o->out);
5480    store_reg(r3 + 1, o->out2);
5481}
5482#define SPEC_wout_r3_P64 SPEC_r3_even
5483
5484static void wout_e1(DisasContext *s, DisasOps *o)
5485{
5486    store_freg32_i64(get_field(s, r1), o->out);
5487}
5488#define SPEC_wout_e1 0
5489
5490static void wout_f1(DisasContext *s, DisasOps *o)
5491{
5492    store_freg(get_field(s, r1), o->out);
5493}
5494#define SPEC_wout_f1 0
5495
5496static void wout_x1(DisasContext *s, DisasOps *o)
5497{
5498    int f1 = get_field(s, r1);
5499    store_freg(f1, o->out);
5500    store_freg(f1 + 2, o->out2);
5501}
5502#define SPEC_wout_x1 SPEC_r1_f128
5503
5504static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5505{
5506    if (get_field(s, r1) != get_field(s, r2)) {
5507        store_reg32_i64(get_field(s, r1), o->out);
5508    }
5509}
5510#define SPEC_wout_cond_r1r2_32 0
5511
5512static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5513{
5514    if (get_field(s, r1) != get_field(s, r2)) {
5515        store_freg32_i64(get_field(s, r1), o->out);
5516    }
5517}
5518#define SPEC_wout_cond_e1e2 0
5519
5520static void wout_m1_8(DisasContext *s, DisasOps *o)
5521{
5522    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5523}
5524#define SPEC_wout_m1_8 0
5525
5526static void wout_m1_16(DisasContext *s, DisasOps *o)
5527{
5528    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5529}
5530#define SPEC_wout_m1_16 0
5531
5532#ifndef CONFIG_USER_ONLY
5533static void wout_m1_16a(DisasContext *s, DisasOps *o)
5534{
5535    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5536}
5537#define SPEC_wout_m1_16a 0
5538#endif
5539
5540static void wout_m1_32(DisasContext *s, DisasOps *o)
5541{
5542    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5543}
5544#define SPEC_wout_m1_32 0
5545
5546#ifndef CONFIG_USER_ONLY
5547static void wout_m1_32a(DisasContext *s, DisasOps *o)
5548{
5549    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5550}
5551#define SPEC_wout_m1_32a 0
5552#endif
5553
5554static void wout_m1_64(DisasContext *s, DisasOps *o)
5555{
5556    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5557}
5558#define SPEC_wout_m1_64 0
5559
5560#ifndef CONFIG_USER_ONLY
5561static void wout_m1_64a(DisasContext *s, DisasOps *o)
5562{
5563    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5564}
5565#define SPEC_wout_m1_64a 0
5566#endif
5567
5568static void wout_m2_32(DisasContext *s, DisasOps *o)
5569{
5570    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5571}
5572#define SPEC_wout_m2_32 0
5573
5574static void wout_in2_r1(DisasContext *s, DisasOps *o)
5575{
5576    store_reg(get_field(s, r1), o->in2);
5577}
5578#define SPEC_wout_in2_r1 0
5579
5580static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5581{
5582    store_reg32_i64(get_field(s, r1), o->in2);
5583}
5584#define SPEC_wout_in2_r1_32 0
5585
5586/* ====================================================================== */
5587/* The "INput 1" generators.  These load the first operand to an insn.  */
5588
5589static void in1_r1(DisasContext *s, DisasOps *o)
5590{
5591    o->in1 = load_reg(get_field(s, r1));
5592}
5593#define SPEC_in1_r1 0
5594
5595static void in1_r1_o(DisasContext *s, DisasOps *o)
5596{
5597    o->in1 = regs[get_field(s, r1)];
5598    o->g_in1 = true;
5599}
5600#define SPEC_in1_r1_o 0
5601
5602static void in1_r1_32s(DisasContext *s, DisasOps *o)
5603{
5604    o->in1 = tcg_temp_new_i64();
5605    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5606}
5607#define SPEC_in1_r1_32s 0
5608
5609static void in1_r1_32u(DisasContext *s, DisasOps *o)
5610{
5611    o->in1 = tcg_temp_new_i64();
5612    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5613}
5614#define SPEC_in1_r1_32u 0
5615
5616static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5617{
5618    o->in1 = tcg_temp_new_i64();
5619    tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5620}
5621#define SPEC_in1_r1_sr32 0
5622
5623static void in1_r1p1(DisasContext *s, DisasOps *o)
5624{
5625    o->in1 = load_reg(get_field(s, r1) + 1);
5626}
5627#define SPEC_in1_r1p1 SPEC_r1_even
5628
5629static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5630{
5631    o->in1 = regs[get_field(s, r1) + 1];
5632    o->g_in1 = true;
5633}
5634#define SPEC_in1_r1p1_o SPEC_r1_even
5635
5636static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5637{
5638    o->in1 = tcg_temp_new_i64();
5639    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5640}
5641#define SPEC_in1_r1p1_32s SPEC_r1_even
5642
5643static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5644{
5645    o->in1 = tcg_temp_new_i64();
5646    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5647}
5648#define SPEC_in1_r1p1_32u SPEC_r1_even
5649
5650static void in1_r1_D32(DisasContext *s, DisasOps *o)
5651{
5652    int r1 = get_field(s, r1);
5653    o->in1 = tcg_temp_new_i64();
5654    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5655}
5656#define SPEC_in1_r1_D32 SPEC_r1_even
5657
5658static void in1_r2(DisasContext *s, DisasOps *o)
5659{
5660    o->in1 = load_reg(get_field(s, r2));
5661}
5662#define SPEC_in1_r2 0
5663
5664static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5665{
5666    o->in1 = tcg_temp_new_i64();
5667    tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5668}
5669#define SPEC_in1_r2_sr32 0
5670
5671static void in1_r2_32u(DisasContext *s, DisasOps *o)
5672{
5673    o->in1 = tcg_temp_new_i64();
5674    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5675}
5676#define SPEC_in1_r2_32u 0
5677
5678static void in1_r3(DisasContext *s, DisasOps *o)
5679{
5680    o->in1 = load_reg(get_field(s, r3));
5681}
5682#define SPEC_in1_r3 0
5683
5684static void in1_r3_o(DisasContext *s, DisasOps *o)
5685{
5686    o->in1 = regs[get_field(s, r3)];
5687    o->g_in1 = true;
5688}
5689#define SPEC_in1_r3_o 0
5690
5691static void in1_r3_32s(DisasContext *s, DisasOps *o)
5692{
5693    o->in1 = tcg_temp_new_i64();
5694    tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5695}
5696#define SPEC_in1_r3_32s 0
5697
5698static void in1_r3_32u(DisasContext *s, DisasOps *o)
5699{
5700    o->in1 = tcg_temp_new_i64();
5701    tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5702}
5703#define SPEC_in1_r3_32u 0
5704
5705static void in1_r3_D32(DisasContext *s, DisasOps *o)
5706{
5707    int r3 = get_field(s, r3);
5708    o->in1 = tcg_temp_new_i64();
5709    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5710}
5711#define SPEC_in1_r3_D32 SPEC_r3_even
5712
5713static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5714{
5715    o->in1 = tcg_temp_new_i64();
5716    tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5717}
5718#define SPEC_in1_r3_sr32 0
5719
5720static void in1_e1(DisasContext *s, DisasOps *o)
5721{
5722    o->in1 = load_freg32_i64(get_field(s, r1));
5723}
5724#define SPEC_in1_e1 0
5725
5726static void in1_f1(DisasContext *s, DisasOps *o)
5727{
5728    o->in1 = load_freg(get_field(s, r1));
5729}
5730#define SPEC_in1_f1 0
5731
5732/* Load the high double word of an extended (128-bit) format FP number */
5733static void in1_x2h(DisasContext *s, DisasOps *o)
5734{
5735    o->in1 = load_freg(get_field(s, r2));
5736}
5737#define SPEC_in1_x2h SPEC_r2_f128
5738
5739static void in1_f3(DisasContext *s, DisasOps *o)
5740{
5741    o->in1 = load_freg(get_field(s, r3));
5742}
5743#define SPEC_in1_f3 0
5744
5745static void in1_la1(DisasContext *s, DisasOps *o)
5746{
5747    o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5748}
5749#define SPEC_in1_la1 0
5750
5751static void in1_la2(DisasContext *s, DisasOps *o)
5752{
5753    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5754    o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5755}
5756#define SPEC_in1_la2 0
5757
5758static void in1_m1_8u(DisasContext *s, DisasOps *o)
5759{
5760    in1_la1(s, o);
5761    o->in1 = tcg_temp_new_i64();
5762    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5763}
5764#define SPEC_in1_m1_8u 0
5765
5766static void in1_m1_16s(DisasContext *s, DisasOps *o)
5767{
5768    in1_la1(s, o);
5769    o->in1 = tcg_temp_new_i64();
5770    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5771}
5772#define SPEC_in1_m1_16s 0
5773
5774static void in1_m1_16u(DisasContext *s, DisasOps *o)
5775{
5776    in1_la1(s, o);
5777    o->in1 = tcg_temp_new_i64();
5778    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5779}
5780#define SPEC_in1_m1_16u 0
5781
5782static void in1_m1_32s(DisasContext *s, DisasOps *o)
5783{
5784    in1_la1(s, o);
5785    o->in1 = tcg_temp_new_i64();
5786    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5787}
5788#define SPEC_in1_m1_32s 0
5789
5790static void in1_m1_32u(DisasContext *s, DisasOps *o)
5791{
5792    in1_la1(s, o);
5793    o->in1 = tcg_temp_new_i64();
5794    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5795}
5796#define SPEC_in1_m1_32u 0
5797
5798static void in1_m1_64(DisasContext *s, DisasOps *o)
5799{
5800    in1_la1(s, o);
5801    o->in1 = tcg_temp_new_i64();
5802    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5803}
5804#define SPEC_in1_m1_64 0
5805
5806/* ====================================================================== */
5807/* The "INput 2" generators.  These load the second operand to an insn.  */
5808
5809static void in2_r1_o(DisasContext *s, DisasOps *o)
5810{
5811    o->in2 = regs[get_field(s, r1)];
5812    o->g_in2 = true;
5813}
5814#define SPEC_in2_r1_o 0
5815
5816static void in2_r1_16u(DisasContext *s, DisasOps *o)
5817{
5818    o->in2 = tcg_temp_new_i64();
5819    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5820}
5821#define SPEC_in2_r1_16u 0
5822
5823static void in2_r1_32u(DisasContext *s, DisasOps *o)
5824{
5825    o->in2 = tcg_temp_new_i64();
5826    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5827}
5828#define SPEC_in2_r1_32u 0
5829
5830static void in2_r1_D32(DisasContext *s, DisasOps *o)
5831{
5832    int r1 = get_field(s, r1);
5833    o->in2 = tcg_temp_new_i64();
5834    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5835}
5836#define SPEC_in2_r1_D32 SPEC_r1_even
5837
5838static void in2_r2(DisasContext *s, DisasOps *o)
5839{
5840    o->in2 = load_reg(get_field(s, r2));
5841}
5842#define SPEC_in2_r2 0
5843
5844static void in2_r2_o(DisasContext *s, DisasOps *o)
5845{
5846    o->in2 = regs[get_field(s, r2)];
5847    o->g_in2 = true;
5848}
5849#define SPEC_in2_r2_o 0
5850
5851static void in2_r2_nz(DisasContext *s, DisasOps *o)
5852{
5853    int r2 = get_field(s, r2);
5854    if (r2 != 0) {
5855        o->in2 = load_reg(r2);
5856    }
5857}
5858#define SPEC_in2_r2_nz 0
5859
5860static void in2_r2_8s(DisasContext *s, DisasOps *o)
5861{
5862    o->in2 = tcg_temp_new_i64();
5863    tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5864}
5865#define SPEC_in2_r2_8s 0
5866
5867static void in2_r2_8u(DisasContext *s, DisasOps *o)
5868{
5869    o->in2 = tcg_temp_new_i64();
5870    tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5871}
5872#define SPEC_in2_r2_8u 0
5873
5874static void in2_r2_16s(DisasContext *s, DisasOps *o)
5875{
5876    o->in2 = tcg_temp_new_i64();
5877    tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5878}
5879#define SPEC_in2_r2_16s 0
5880
5881static void in2_r2_16u(DisasContext *s, DisasOps *o)
5882{
5883    o->in2 = tcg_temp_new_i64();
5884    tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5885}
5886#define SPEC_in2_r2_16u 0
5887
5888static void in2_r3(DisasContext *s, DisasOps *o)
5889{
5890    o->in2 = load_reg(get_field(s, r3));
5891}
5892#define SPEC_in2_r3 0
5893
5894static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5895{
5896    o->in2 = tcg_temp_new_i64();
5897    tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5898}
5899#define SPEC_in2_r3_sr32 0
5900
5901static void in2_r3_32u(DisasContext *s, DisasOps *o)
5902{
5903    o->in2 = tcg_temp_new_i64();
5904    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5905}
5906#define SPEC_in2_r3_32u 0
5907
5908static void in2_r2_32s(DisasContext *s, DisasOps *o)
5909{
5910    o->in2 = tcg_temp_new_i64();
5911    tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5912}
5913#define SPEC_in2_r2_32s 0
5914
5915static void in2_r2_32u(DisasContext *s, DisasOps *o)
5916{
5917    o->in2 = tcg_temp_new_i64();
5918    tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5919}
5920#define SPEC_in2_r2_32u 0
5921
5922static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5923{
5924    o->in2 = tcg_temp_new_i64();
5925    tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5926}
5927#define SPEC_in2_r2_sr32 0
5928
5929static void in2_e2(DisasContext *s, DisasOps *o)
5930{
5931    o->in2 = load_freg32_i64(get_field(s, r2));
5932}
5933#define SPEC_in2_e2 0
5934
5935static void in2_f2(DisasContext *s, DisasOps *o)
5936{
5937    o->in2 = load_freg(get_field(s, r2));
5938}
5939#define SPEC_in2_f2 0
5940
5941/* Load the low double word of an extended (128-bit) format FP number */
5942static void in2_x2l(DisasContext *s, DisasOps *o)
5943{
5944    o->in2 = load_freg(get_field(s, r2) + 2);
5945}
5946#define SPEC_in2_x2l SPEC_r2_f128
5947
5948static void in2_ra2(DisasContext *s, DisasOps *o)
5949{
5950    int r2 = get_field(s, r2);
5951
5952    /* Note: *don't* treat !r2 as 0, use the reg value. */
5953    o->in2 = tcg_temp_new_i64();
5954    gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5955}
5956#define SPEC_in2_ra2 0
5957
5958static void in2_a2(DisasContext *s, DisasOps *o)
5959{
5960    int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5961    o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5962}
5963#define SPEC_in2_a2 0
5964
5965static void in2_ri2(DisasContext *s, DisasOps *o)
5966{
5967    o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5968}
5969#define SPEC_in2_ri2 0
5970
5971static void in2_sh(DisasContext *s, DisasOps *o)
5972{
5973    int b2 = get_field(s, b2);
5974    int d2 = get_field(s, d2);
5975
5976    if (b2 == 0) {
5977        o->in2 = tcg_const_i64(d2 & 0x3f);
5978    } else {
5979        o->in2 = get_address(s, 0, b2, d2);
5980        tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5981    }
5982}
5983#define SPEC_in2_sh 0
5984
5985static void in2_m2_8u(DisasContext *s, DisasOps *o)
5986{
5987    in2_a2(s, o);
5988    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5989}
5990#define SPEC_in2_m2_8u 0
5991
5992static void in2_m2_16s(DisasContext *s, DisasOps *o)
5993{
5994    in2_a2(s, o);
5995    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5996}
5997#define SPEC_in2_m2_16s 0
5998
5999static void in2_m2_16u(DisasContext *s, DisasOps *o)
6000{
6001    in2_a2(s, o);
6002    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6003}
6004#define SPEC_in2_m2_16u 0
6005
6006static void in2_m2_32s(DisasContext *s, DisasOps *o)
6007{
6008    in2_a2(s, o);
6009    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6010}
6011#define SPEC_in2_m2_32s 0
6012
6013static void in2_m2_32u(DisasContext *s, DisasOps *o)
6014{
6015    in2_a2(s, o);
6016    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6017}
6018#define SPEC_in2_m2_32u 0
6019
6020#ifndef CONFIG_USER_ONLY
6021static void in2_m2_32ua(DisasContext *s, DisasOps *o)
6022{
6023    in2_a2(s, o);
6024    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
6025}
6026#define SPEC_in2_m2_32ua 0
6027#endif
6028
6029static void in2_m2_64(DisasContext *s, DisasOps *o)
6030{
6031    in2_a2(s, o);
6032    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6033}
6034#define SPEC_in2_m2_64 0
6035
6036static void in2_m2_64w(DisasContext *s, DisasOps *o)
6037{
6038    in2_a2(s, o);
6039    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6040    gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6041}
6042#define SPEC_in2_m2_64w 0
6043
6044#ifndef CONFIG_USER_ONLY
6045static void in2_m2_64a(DisasContext *s, DisasOps *o)
6046{
6047    in2_a2(s, o);
6048    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
6049}
6050#define SPEC_in2_m2_64a 0
6051#endif
6052
6053static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6054{
6055    in2_ri2(s, o);
6056    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6057}
6058#define SPEC_in2_mri2_16u 0
6059
6060static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6061{
6062    in2_ri2(s, o);
6063    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6064}
6065#define SPEC_in2_mri2_32s 0
6066
6067static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6068{
6069    in2_ri2(s, o);
6070    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6071}
6072#define SPEC_in2_mri2_32u 0
6073
6074static void in2_mri2_64(DisasContext *s, DisasOps *o)
6075{
6076    in2_ri2(s, o);
6077    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6078}
6079#define SPEC_in2_mri2_64 0
6080
6081static void in2_i2(DisasContext *s, DisasOps *o)
6082{
6083    o->in2 = tcg_const_i64(get_field(s, i2));
6084}
6085#define SPEC_in2_i2 0
6086
6087static void in2_i2_8u(DisasContext *s, DisasOps *o)
6088{
6089    o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6090}
6091#define SPEC_in2_i2_8u 0
6092
6093static void in2_i2_16u(DisasContext *s, DisasOps *o)
6094{
6095    o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6096}
6097#define SPEC_in2_i2_16u 0
6098
6099static void in2_i2_32u(DisasContext *s, DisasOps *o)
6100{
6101    o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6102}
6103#define SPEC_in2_i2_32u 0
6104
6105static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6106{
6107    uint64_t i2 = (uint16_t)get_field(s, i2);
6108    o->in2 = tcg_const_i64(i2 << s->insn->data);
6109}
6110#define SPEC_in2_i2_16u_shl 0
6111
6112static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6113{
6114    uint64_t i2 = (uint32_t)get_field(s, i2);
6115    o->in2 = tcg_const_i64(i2 << s->insn->data);
6116}
6117#define SPEC_in2_i2_32u_shl 0
6118
6119#ifndef CONFIG_USER_ONLY
6120static void in2_insn(DisasContext *s, DisasOps *o)
6121{
6122    o->in2 = tcg_const_i64(s->fields.raw_insn);
6123}
6124#define SPEC_in2_insn 0
6125#endif
6126
6127/* ====================================================================== */
6128
6129/* Find opc within the table of insns.  This is formulated as a switch
6130   statement so that (1) we get compile-time notice of cut-paste errors
6131   for duplicated opcodes, and (2) the compiler generates the binary
6132   search tree, rather than us having to post-process the table.  */
6133
6134#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6135    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6136
6137#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6138    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6139
6140#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6141    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6142
6143#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6144
6145enum DisasInsnEnum {
6146#include "insn-data.h.inc"
6147};
6148
6149#undef E
6150#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6151    .opc = OPC,                                                             \
6152    .flags = FL,                                                            \
6153    .fmt = FMT_##FT,                                                        \
6154    .fac = FAC_##FC,                                                        \
6155    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6156    .name = #NM,                                                            \
6157    .help_in1 = in1_##I1,                                                   \
6158    .help_in2 = in2_##I2,                                                   \
6159    .help_prep = prep_##P,                                                  \
6160    .help_wout = wout_##W,                                                  \
6161    .help_cout = cout_##CC,                                                 \
6162    .help_op = op_##OP,                                                     \
6163    .data = D                                                               \
6164 },
6165
6166/* Allow 0 to be used for NULL in the table below.  */
6167#define in1_0  NULL
6168#define in2_0  NULL
6169#define prep_0  NULL
6170#define wout_0  NULL
6171#define cout_0  NULL
6172#define op_0  NULL
6173
6174#define SPEC_in1_0 0
6175#define SPEC_in2_0 0
6176#define SPEC_prep_0 0
6177#define SPEC_wout_0 0
6178
6179/* Give smaller names to the various facilities.  */
6180#define FAC_Z           S390_FEAT_ZARCH
6181#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6182#define FAC_DFP         S390_FEAT_DFP
6183#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6184#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6185#define FAC_EE          S390_FEAT_EXECUTE_EXT
6186#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6187#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6188#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6189#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6190#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6191#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6192#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6193#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6194#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6195#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6196#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6197#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6198#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6199#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6200#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6201#define FAC_SFLE        S390_FEAT_STFLE
6202#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6203#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6204#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6205#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6206#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6207#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6208#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6209#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6210#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6211#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6212#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6213#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6214#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6215#define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6216#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6217#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6218#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6219#define FAC_V           S390_FEAT_VECTOR /* vector facility */
6220#define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6221#define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6222#define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6223#define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6224
6225static const DisasInsn insn_info[] = {
6226#include "insn-data.h.inc"
6227};
6228
6229#undef E
6230#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6231    case OPC: return &insn_info[insn_ ## NM];
6232
6233static const DisasInsn *lookup_opc(uint16_t opc)
6234{
6235    switch (opc) {
6236#include "insn-data.h.inc"
6237    default:
6238        return NULL;
6239    }
6240}
6241
6242#undef F
6243#undef E
6244#undef D
6245#undef C
6246
6247/* Extract a field from the insn.  The INSN should be left-aligned in
6248   the uint64_t so that we can more easily utilize the big-bit-endian
6249   definitions we extract from the Principals of Operation.  */
6250
6251static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6252{
6253    uint32_t r, m;
6254
6255    if (f->size == 0) {
6256        return;
6257    }
6258
6259    /* Zero extract the field from the insn.  */
6260    r = (insn << f->beg) >> (64 - f->size);
6261
6262    /* Sign-extend, or un-swap the field as necessary.  */
6263    switch (f->type) {
6264    case 0: /* unsigned */
6265        break;
6266    case 1: /* signed */
6267        assert(f->size <= 32);
6268        m = 1u << (f->size - 1);
6269        r = (r ^ m) - m;
6270        break;
6271    case 2: /* dl+dh split, signed 20 bit. */
6272        r = ((int8_t)r << 12) | (r >> 8);
6273        break;
6274    case 3: /* MSB stored in RXB */
6275        g_assert(f->size == 4);
6276        switch (f->beg) {
6277        case 8:
6278            r |= extract64(insn, 63 - 36, 1) << 4;
6279            break;
6280        case 12:
6281            r |= extract64(insn, 63 - 37, 1) << 4;
6282            break;
6283        case 16:
6284            r |= extract64(insn, 63 - 38, 1) << 4;
6285            break;
6286        case 32:
6287            r |= extract64(insn, 63 - 39, 1) << 4;
6288            break;
6289        default:
6290            g_assert_not_reached();
6291        }
6292        break;
6293    default:
6294        abort();
6295    }
6296
6297    /*
6298     * Validate that the "compressed" encoding we selected above is valid.
6299     * I.e. we haven't made two different original fields overlap.
6300     */
6301    assert(((o->presentC >> f->indexC) & 1) == 0);
6302    o->presentC |= 1 << f->indexC;
6303    o->presentO |= 1 << f->indexO;
6304
6305    o->c[f->indexC] = r;
6306}
6307
6308/* Lookup the insn at the current PC, extracting the operands into O and
6309   returning the info struct for the insn.  Returns NULL for invalid insn.  */
6310
6311static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6312{
6313    uint64_t insn, pc = s->base.pc_next;
6314    int op, op2, ilen;
6315    const DisasInsn *info;
6316
6317    if (unlikely(s->ex_value)) {
6318        /* Drop the EX data now, so that it's clear on exception paths.  */
6319        TCGv_i64 zero = tcg_const_i64(0);
6320        int i;
6321        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6322        tcg_temp_free_i64(zero);
6323
6324        /* Extract the values saved by EXECUTE.  */
6325        insn = s->ex_value & 0xffffffffffff0000ull;
6326        ilen = s->ex_value & 0xf;
6327        /* register insn bytes with translator so plugins work */
6328        for (i = 0; i < ilen; i++) {
6329            uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6330            translator_fake_ldb(byte, pc + i);
6331        }
6332        op = insn >> 56;
6333    } else {
6334        insn = ld_code2(env, s, pc);
6335        op = (insn >> 8) & 0xff;
6336        ilen = get_ilen(op);
6337        switch (ilen) {
6338        case 2:
6339            insn = insn << 48;
6340            break;
6341        case 4:
6342            insn = ld_code4(env, s, pc) << 32;
6343            break;
6344        case 6:
6345            insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6346            break;
6347        default:
6348            g_assert_not_reached();
6349        }
6350    }
6351    s->pc_tmp = s->base.pc_next + ilen;
6352    s->ilen = ilen;
6353
6354    /* We can't actually determine the insn format until we've looked up
6355       the full insn opcode.  Which we can't do without locating the
6356       secondary opcode.  Assume by default that OP2 is at bit 40; for
6357       those smaller insns that don't actually have a secondary opcode
6358       this will correctly result in OP2 = 0. */
6359    switch (op) {
6360    case 0x01: /* E */
6361    case 0x80: /* S */
6362    case 0x82: /* S */
6363    case 0x93: /* S */
6364    case 0xb2: /* S, RRF, RRE, IE */
6365    case 0xb3: /* RRE, RRD, RRF */
6366    case 0xb9: /* RRE, RRF */
6367    case 0xe5: /* SSE, SIL */
6368        op2 = (insn << 8) >> 56;
6369        break;
6370    case 0xa5: /* RI */
6371    case 0xa7: /* RI */
6372    case 0xc0: /* RIL */
6373    case 0xc2: /* RIL */
6374    case 0xc4: /* RIL */
6375    case 0xc6: /* RIL */
6376    case 0xc8: /* SSF */
6377    case 0xcc: /* RIL */
6378        op2 = (insn << 12) >> 60;
6379        break;
6380    case 0xc5: /* MII */
6381    case 0xc7: /* SMI */
6382    case 0xd0 ... 0xdf: /* SS */
6383    case 0xe1: /* SS */
6384    case 0xe2: /* SS */
6385    case 0xe8: /* SS */
6386    case 0xe9: /* SS */
6387    case 0xea: /* SS */
6388    case 0xee ... 0xf3: /* SS */
6389    case 0xf8 ... 0xfd: /* SS */
6390        op2 = 0;
6391        break;
6392    default:
6393        op2 = (insn << 40) >> 56;
6394        break;
6395    }
6396
6397    memset(&s->fields, 0, sizeof(s->fields));
6398    s->fields.raw_insn = insn;
6399    s->fields.op = op;
6400    s->fields.op2 = op2;
6401
6402    /* Lookup the instruction.  */
6403    info = lookup_opc(op << 8 | op2);
6404    s->insn = info;
6405
6406    /* If we found it, extract the operands.  */
6407    if (info != NULL) {
6408        DisasFormat fmt = info->fmt;
6409        int i;
6410
6411        for (i = 0; i < NUM_C_FIELD; ++i) {
6412            extract_field(&s->fields, &format_info[fmt].op[i], insn);
6413        }
6414    }
6415    return info;
6416}
6417
6418static bool is_afp_reg(int reg)
6419{
6420    return reg % 2 || reg > 6;
6421}
6422
6423static bool is_fp_pair(int reg)
6424{
6425    /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6426    return !(reg & 0x2);
6427}
6428
6429static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6430{
6431    const DisasInsn *insn;
6432    DisasJumpType ret = DISAS_NEXT;
6433    DisasOps o = {};
6434    bool icount = false;
6435
6436    /* Search for the insn in the table.  */
6437    insn = extract_insn(env, s);
6438
6439    /* Update insn_start now that we know the ILEN.  */
6440    tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6441
6442    /* Not found means unimplemented/illegal opcode.  */
6443    if (insn == NULL) {
6444        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6445                      s->fields.op, s->fields.op2);
6446        gen_illegal_opcode(s);
6447        ret = DISAS_NORETURN;
6448        goto out;
6449    }
6450
6451#ifndef CONFIG_USER_ONLY
6452    if (s->base.tb->flags & FLAG_MASK_PER) {
6453        TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6454        gen_helper_per_ifetch(cpu_env, addr);
6455        tcg_temp_free_i64(addr);
6456    }
6457#endif
6458
6459    /* process flags */
6460    if (insn->flags) {
6461        /* privileged instruction */
6462        if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6463            gen_program_exception(s, PGM_PRIVILEGED);
6464            ret = DISAS_NORETURN;
6465            goto out;
6466        }
6467
6468        /* if AFP is not enabled, instructions and registers are forbidden */
6469        if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6470            uint8_t dxc = 0;
6471
6472            if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6473                dxc = 1;
6474            }
6475            if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6476                dxc = 1;
6477            }
6478            if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6479                dxc = 1;
6480            }
6481            if (insn->flags & IF_BFP) {
6482                dxc = 2;
6483            }
6484            if (insn->flags & IF_DFP) {
6485                dxc = 3;
6486            }
6487            if (insn->flags & IF_VEC) {
6488                dxc = 0xfe;
6489            }
6490            if (dxc) {
6491                gen_data_exception(dxc);
6492                ret = DISAS_NORETURN;
6493                goto out;
6494            }
6495        }
6496
6497        /* if vector instructions not enabled, executing them is forbidden */
6498        if (insn->flags & IF_VEC) {
6499            if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6500                gen_data_exception(0xfe);
6501                ret = DISAS_NORETURN;
6502                goto out;
6503            }
6504        }
6505
6506        /* input/output is the special case for icount mode */
6507        if (unlikely(insn->flags & IF_IO)) {
6508            icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6509            if (icount) {
6510                gen_io_start();
6511            }
6512        }
6513    }
6514
6515    /* Check for insn specification exceptions.  */
6516    if (insn->spec) {
6517        if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6518            (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6519            (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6520            (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6521            (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6522            gen_program_exception(s, PGM_SPECIFICATION);
6523            ret = DISAS_NORETURN;
6524            goto out;
6525        }
6526    }
6527
6528    /* Implement the instruction.  */
6529    if (insn->help_in1) {
6530        insn->help_in1(s, &o);
6531    }
6532    if (insn->help_in2) {
6533        insn->help_in2(s, &o);
6534    }
6535    if (insn->help_prep) {
6536        insn->help_prep(s, &o);
6537    }
6538    if (insn->help_op) {
6539        ret = insn->help_op(s, &o);
6540    }
6541    if (ret != DISAS_NORETURN) {
6542        if (insn->help_wout) {
6543            insn->help_wout(s, &o);
6544        }
6545        if (insn->help_cout) {
6546            insn->help_cout(s, &o);
6547        }
6548    }
6549
6550    /* Free any temporaries created by the helpers.  */
6551    if (o.out && !o.g_out) {
6552        tcg_temp_free_i64(o.out);
6553    }
6554    if (o.out2 && !o.g_out2) {
6555        tcg_temp_free_i64(o.out2);
6556    }
6557    if (o.in1 && !o.g_in1) {
6558        tcg_temp_free_i64(o.in1);
6559    }
6560    if (o.in2 && !o.g_in2) {
6561        tcg_temp_free_i64(o.in2);
6562    }
6563    if (o.addr1) {
6564        tcg_temp_free_i64(o.addr1);
6565    }
6566
6567    /* io should be the last instruction in tb when icount is enabled */
6568    if (unlikely(icount && ret == DISAS_NEXT)) {
6569        ret = DISAS_TOO_MANY;
6570    }
6571
6572#ifndef CONFIG_USER_ONLY
6573    if (s->base.tb->flags & FLAG_MASK_PER) {
6574        /* An exception might be triggered, save PSW if not already done.  */
6575        if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6576            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6577        }
6578
6579        /* Call the helper to check for a possible PER exception.  */
6580        gen_helper_per_check_exception(cpu_env);
6581    }
6582#endif
6583
6584out:
6585    /* Advance to the next instruction.  */
6586    s->base.pc_next = s->pc_tmp;
6587    return ret;
6588}
6589
6590static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6591{
6592    DisasContext *dc = container_of(dcbase, DisasContext, base);
6593
6594    /* 31-bit mode */
6595    if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6596        dc->base.pc_first &= 0x7fffffff;
6597        dc->base.pc_next = dc->base.pc_first;
6598    }
6599
6600    dc->cc_op = CC_OP_DYNAMIC;
6601    dc->ex_value = dc->base.tb->cs_base;
6602    dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6603}
6604
6605static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6606{
6607}
6608
6609static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6610{
6611    DisasContext *dc = container_of(dcbase, DisasContext, base);
6612
6613    /* Delay the set of ilen until we've read the insn. */
6614    tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6615    dc->insn_start = tcg_last_op();
6616}
6617
6618static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6619                                uint64_t pc)
6620{
6621    uint64_t insn = cpu_lduw_code(env, pc);
6622
6623    return pc + get_ilen((insn >> 8) & 0xff);
6624}
6625
6626static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6627{
6628    CPUS390XState *env = cs->env_ptr;
6629    DisasContext *dc = container_of(dcbase, DisasContext, base);
6630
6631    dc->base.is_jmp = translate_one(env, dc);
6632    if (dc->base.is_jmp == DISAS_NEXT) {
6633        if (dc->ex_value ||
6634            !is_same_page(dcbase, dc->base.pc_next) ||
6635            !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6636            dc->base.is_jmp = DISAS_TOO_MANY;
6637        }
6638    }
6639}
6640
6641static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6642{
6643    DisasContext *dc = container_of(dcbase, DisasContext, base);
6644
6645    switch (dc->base.is_jmp) {
6646    case DISAS_NORETURN:
6647        break;
6648    case DISAS_TOO_MANY:
6649        update_psw_addr(dc);
6650        /* FALLTHRU */
6651    case DISAS_PC_UPDATED:
6652        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6653           cc op type is in env */
6654        update_cc_op(dc);
6655        /* FALLTHRU */
6656    case DISAS_PC_CC_UPDATED:
6657        /* Exit the TB, either by raising a debug exception or by return.  */
6658        if (dc->exit_to_mainloop) {
6659            tcg_gen_exit_tb(NULL, 0);
6660        } else {
6661            tcg_gen_lookup_and_goto_ptr();
6662        }
6663        break;
6664    default:
6665        g_assert_not_reached();
6666    }
6667}
6668
6669static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6670                               CPUState *cs, FILE *logfile)
6671{
6672    DisasContext *dc = container_of(dcbase, DisasContext, base);
6673
6674    if (unlikely(dc->ex_value)) {
6675        /* ??? Unfortunately target_disas can't use host memory.  */
6676        fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6677    } else {
6678        fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6679        target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6680    }
6681}
6682
6683static const TranslatorOps s390x_tr_ops = {
6684    .init_disas_context = s390x_tr_init_disas_context,
6685    .tb_start           = s390x_tr_tb_start,
6686    .insn_start         = s390x_tr_insn_start,
6687    .translate_insn     = s390x_tr_translate_insn,
6688    .tb_stop            = s390x_tr_tb_stop,
6689    .disas_log          = s390x_tr_disas_log,
6690};
6691
6692void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
6693                           target_ulong pc, void *host_pc)
6694{
6695    DisasContext dc;
6696
6697    translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6698}
6699
6700void s390x_restore_state_to_opc(CPUState *cs,
6701                                const TranslationBlock *tb,
6702                                const uint64_t *data)
6703{
6704    S390CPU *cpu = S390_CPU(cs);
6705    CPUS390XState *env = &cpu->env;
6706    int cc_op = data[1];
6707
6708    env->psw.addr = data[0];
6709
6710    /* Update the CC opcode if it is not already up-to-date.  */
6711    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6712        env->cc_op = cc_op;
6713    }
6714
6715    /* Record ILEN.  */
6716    env->int_pgm_ilen = data[2];
6717}
6718