qemu/target/s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg-op.h"
  37#include "tcg-op-gvec.h"
  38#include "qemu/log.h"
  39#include "qemu/host-utils.h"
  40#include "exec/cpu_ldst.h"
  41#include "exec/gen-icount.h"
  42#include "exec/helper-proto.h"
  43#include "exec/helper-gen.h"
  44
  45#include "trace-tcg.h"
  46#include "exec/translator.h"
  47#include "exec/log.h"
  48#include "qemu/atomic128.h"
  49
  50
  51/* Information that (most) every instruction needs to manipulate.  */
  52typedef struct DisasContext DisasContext;
  53typedef struct DisasInsn DisasInsn;
  54typedef struct DisasFields DisasFields;
  55
  56struct DisasContext {
  57    DisasContextBase base;
  58    const DisasInsn *insn;
  59    DisasFields *fields;
  60    uint64_t ex_value;
  61    /*
  62     * During translate_one(), pc_tmp is used to determine the instruction
  63     * to be executed after base.pc_next - e.g. next sequential instruction
  64     * or a branch target.
  65     */
  66    uint64_t pc_tmp;
  67    uint32_t ilen;
  68    enum cc_op cc_op;
  69    bool do_debug;
  70};
  71
  72/* Information carried about a condition to be evaluated.  */
  73typedef struct {
  74    TCGCond cond:8;
  75    bool is_64;
  76    bool g1;
  77    bool g2;
  78    union {
  79        struct { TCGv_i64 a, b; } s64;
  80        struct { TCGv_i32 a, b; } s32;
  81    } u;
  82} DisasCompare;
  83
  84#ifdef DEBUG_INLINE_BRANCHES
  85static uint64_t inline_branch_hit[CC_OP_MAX];
  86static uint64_t inline_branch_miss[CC_OP_MAX];
  87#endif
  88
  89static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
  90{
  91    TCGv_i64 tmp;
  92
  93    if (s->base.tb->flags & FLAG_MASK_32) {
  94        if (s->base.tb->flags & FLAG_MASK_64) {
  95            tcg_gen_movi_i64(out, pc);
  96            return;
  97        }
  98        pc |= 0x80000000;
  99    }
 100    assert(!(s->base.tb->flags & FLAG_MASK_64));
 101    tmp = tcg_const_i64(pc);
 102    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
 103    tcg_temp_free_i64(tmp);
 104}
 105
 106static TCGv_i64 psw_addr;
 107static TCGv_i64 psw_mask;
 108static TCGv_i64 gbea;
 109
 110static TCGv_i32 cc_op;
 111static TCGv_i64 cc_src;
 112static TCGv_i64 cc_dst;
 113static TCGv_i64 cc_vr;
 114
 115static char cpu_reg_names[16][4];
 116static TCGv_i64 regs[16];
 117
 118void s390x_translate_init(void)
 119{
 120    int i;
 121
 122    psw_addr = tcg_global_mem_new_i64(cpu_env,
 123                                      offsetof(CPUS390XState, psw.addr),
 124                                      "psw_addr");
 125    psw_mask = tcg_global_mem_new_i64(cpu_env,
 126                                      offsetof(CPUS390XState, psw.mask),
 127                                      "psw_mask");
 128    gbea = tcg_global_mem_new_i64(cpu_env,
 129                                  offsetof(CPUS390XState, gbea),
 130                                  "gbea");
 131
 132    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 133                                   "cc_op");
 134    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 135                                    "cc_src");
 136    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 137                                    "cc_dst");
 138    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 139                                   "cc_vr");
 140
 141    for (i = 0; i < 16; i++) {
 142        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 143        regs[i] = tcg_global_mem_new(cpu_env,
 144                                     offsetof(CPUS390XState, regs[i]),
 145                                     cpu_reg_names[i]);
 146    }
 147}
 148
 149static inline int vec_full_reg_offset(uint8_t reg)
 150{
 151    g_assert(reg < 32);
 152    return offsetof(CPUS390XState, vregs[reg][0].d);
 153}
 154
 155static inline int vec_reg_offset(uint8_t reg, uint8_t enr, TCGMemOp es)
 156{
 157    /* Convert element size (es) - e.g. MO_8 - to bytes */
 158    const uint8_t bytes = 1 << es;
 159    int offs = enr * bytes;
 160
 161    /*
 162     * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
 163     * of the 16 byte vector, on both, little and big endian systems.
 164     *
 165     * Big Endian (target/possible host)
 166     * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
 167     * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
 168     * W:  [             0][             1] - [             2][             3]
 169     * DW: [                             0] - [                             1]
 170     *
 171     * Little Endian (possible host)
 172     * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
 173     * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
 174     * W:  [             1][             0] - [             3][             2]
 175     * DW: [                             0] - [                             1]
 176     *
 177     * For 16 byte elements, the two 8 byte halves will not form a host
 178     * int128 if the host is little endian, since they're in the wrong order.
 179     * Some operations (e.g. xor) do not care. For operations like addition,
 180     * the two 8 byte elements have to be loaded separately. Let's force all
 181     * 16 byte operations to handle it in a special way.
 182     */
 183    g_assert(es <= MO_64);
 184#ifndef HOST_WORDS_BIGENDIAN
 185    offs ^= (8 - bytes);
 186#endif
 187    return offs + vec_full_reg_offset(reg);
 188}
 189
 190static inline int freg64_offset(uint8_t reg)
 191{
 192    g_assert(reg < 16);
 193    return vec_reg_offset(reg, 0, MO_64);
 194}
 195
 196static inline int freg32_offset(uint8_t reg)
 197{
 198    g_assert(reg < 16);
 199    return vec_reg_offset(reg, 0, MO_32);
 200}
 201
 202static TCGv_i64 load_reg(int reg)
 203{
 204    TCGv_i64 r = tcg_temp_new_i64();
 205    tcg_gen_mov_i64(r, regs[reg]);
 206    return r;
 207}
 208
 209static TCGv_i64 load_freg(int reg)
 210{
 211    TCGv_i64 r = tcg_temp_new_i64();
 212
 213    tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
 214    return r;
 215}
 216
 217static TCGv_i64 load_freg32_i64(int reg)
 218{
 219    TCGv_i64 r = tcg_temp_new_i64();
 220
 221    tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
 222    return r;
 223}
 224
 225static void store_reg(int reg, TCGv_i64 v)
 226{
 227    tcg_gen_mov_i64(regs[reg], v);
 228}
 229
 230static void store_freg(int reg, TCGv_i64 v)
 231{
 232    tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
 233}
 234
 235static void store_reg32_i64(int reg, TCGv_i64 v)
 236{
 237    /* 32 bit register writes keep the upper half */
 238    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 239}
 240
 241static void store_reg32h_i64(int reg, TCGv_i64 v)
 242{
 243    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 244}
 245
 246static void store_freg32_i64(int reg, TCGv_i64 v)
 247{
 248    tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
 249}
 250
 251static void return_low128(TCGv_i64 dest)
 252{
 253    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 254}
 255
 256static void update_psw_addr(DisasContext *s)
 257{
 258    /* psw.addr */
 259    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
 260}
 261
 262static void per_branch(DisasContext *s, bool to_next)
 263{
 264#ifndef CONFIG_USER_ONLY
 265    tcg_gen_movi_i64(gbea, s->base.pc_next);
 266
 267    if (s->base.tb->flags & FLAG_MASK_PER) {
 268        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
 269        gen_helper_per_branch(cpu_env, gbea, next_pc);
 270        if (to_next) {
 271            tcg_temp_free_i64(next_pc);
 272        }
 273    }
 274#endif
 275}
 276
 277static void per_branch_cond(DisasContext *s, TCGCond cond,
 278                            TCGv_i64 arg1, TCGv_i64 arg2)
 279{
 280#ifndef CONFIG_USER_ONLY
 281    if (s->base.tb->flags & FLAG_MASK_PER) {
 282        TCGLabel *lab = gen_new_label();
 283        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 284
 285        tcg_gen_movi_i64(gbea, s->base.pc_next);
 286        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 287
 288        gen_set_label(lab);
 289    } else {
 290        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
 291        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 292        tcg_temp_free_i64(pc);
 293    }
 294#endif
 295}
 296
 297static void per_breaking_event(DisasContext *s)
 298{
 299    tcg_gen_movi_i64(gbea, s->base.pc_next);
 300}
 301
 302static void update_cc_op(DisasContext *s)
 303{
 304    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 305        tcg_gen_movi_i32(cc_op, s->cc_op);
 306    }
 307}
 308
 309static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 310{
 311    return (uint64_t)cpu_lduw_code(env, pc);
 312}
 313
 314static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 315{
 316    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 317}
 318
 319static int get_mem_index(DisasContext *s)
 320{
 321    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
 322        return MMU_REAL_IDX;
 323    }
 324
 325    switch (s->base.tb->flags & FLAG_MASK_ASC) {
 326    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 327        return MMU_PRIMARY_IDX;
 328    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 329        return MMU_SECONDARY_IDX;
 330    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 331        return MMU_HOME_IDX;
 332    default:
 333        tcg_abort();
 334        break;
 335    }
 336}
 337
 338static void gen_exception(int excp)
 339{
 340    TCGv_i32 tmp = tcg_const_i32(excp);
 341    gen_helper_exception(cpu_env, tmp);
 342    tcg_temp_free_i32(tmp);
 343}
 344
 345static void gen_program_exception(DisasContext *s, int code)
 346{
 347    TCGv_i32 tmp;
 348
 349    /* Remember what pgm exeption this was.  */
 350    tmp = tcg_const_i32(code);
 351    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 352    tcg_temp_free_i32(tmp);
 353
 354    tmp = tcg_const_i32(s->ilen);
 355    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 356    tcg_temp_free_i32(tmp);
 357
 358    /* update the psw */
 359    update_psw_addr(s);
 360
 361    /* Save off cc.  */
 362    update_cc_op(s);
 363
 364    /* Trigger exception.  */
 365    gen_exception(EXCP_PGM);
 366}
 367
 368static inline void gen_illegal_opcode(DisasContext *s)
 369{
 370    gen_program_exception(s, PGM_OPERATION);
 371}
 372
 373static inline void gen_data_exception(uint8_t dxc)
 374{
 375    TCGv_i32 tmp = tcg_const_i32(dxc);
 376    gen_helper_data_exception(cpu_env, tmp);
 377    tcg_temp_free_i32(tmp);
 378}
 379
 380static inline void gen_trap(DisasContext *s)
 381{
 382    /* Set DXC to 0xff */
 383    gen_data_exception(0xff);
 384}
 385
 386static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
 387                                  int64_t imm)
 388{
 389    tcg_gen_addi_i64(dst, src, imm);
 390    if (!(s->base.tb->flags & FLAG_MASK_64)) {
 391        if (s->base.tb->flags & FLAG_MASK_32) {
 392            tcg_gen_andi_i64(dst, dst, 0x7fffffff);
 393        } else {
 394            tcg_gen_andi_i64(dst, dst, 0x00ffffff);
 395        }
 396    }
 397}
 398
 399static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 400{
 401    TCGv_i64 tmp = tcg_temp_new_i64();
 402
 403    /*
 404     * Note that d2 is limited to 20 bits, signed.  If we crop negative
 405     * displacements early we create larger immedate addends.
 406     */
 407    if (b2 && x2) {
 408        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 409        gen_addi_and_wrap_i64(s, tmp, tmp, d2);
 410    } else if (b2) {
 411        gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
 412    } else if (x2) {
 413        gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
 414    } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
 415        if (s->base.tb->flags & FLAG_MASK_32) {
 416            tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
 417        } else {
 418            tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
 419        }
 420    } else {
 421        tcg_gen_movi_i64(tmp, d2);
 422    }
 423
 424    return tmp;
 425}
 426
 427static inline bool live_cc_data(DisasContext *s)
 428{
 429    return (s->cc_op != CC_OP_DYNAMIC
 430            && s->cc_op != CC_OP_STATIC
 431            && s->cc_op > 3);
 432}
 433
 434static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 435{
 436    if (live_cc_data(s)) {
 437        tcg_gen_discard_i64(cc_src);
 438        tcg_gen_discard_i64(cc_dst);
 439        tcg_gen_discard_i64(cc_vr);
 440    }
 441    s->cc_op = CC_OP_CONST0 + val;
 442}
 443
 444static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 445{
 446    if (live_cc_data(s)) {
 447        tcg_gen_discard_i64(cc_src);
 448        tcg_gen_discard_i64(cc_vr);
 449    }
 450    tcg_gen_mov_i64(cc_dst, dst);
 451    s->cc_op = op;
 452}
 453
 454static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 455                                  TCGv_i64 dst)
 456{
 457    if (live_cc_data(s)) {
 458        tcg_gen_discard_i64(cc_vr);
 459    }
 460    tcg_gen_mov_i64(cc_src, src);
 461    tcg_gen_mov_i64(cc_dst, dst);
 462    s->cc_op = op;
 463}
 464
 465static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 466                                  TCGv_i64 dst, TCGv_i64 vr)
 467{
 468    tcg_gen_mov_i64(cc_src, src);
 469    tcg_gen_mov_i64(cc_dst, dst);
 470    tcg_gen_mov_i64(cc_vr, vr);
 471    s->cc_op = op;
 472}
 473
 474static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 475{
 476    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 477}
 478
 479static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 480{
 481    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 482}
 483
 484static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 485{
 486    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 487}
 488
 489static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 490{
 491    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 492}
 493
 494/* CC value is in env->cc_op */
 495static void set_cc_static(DisasContext *s)
 496{
 497    if (live_cc_data(s)) {
 498        tcg_gen_discard_i64(cc_src);
 499        tcg_gen_discard_i64(cc_dst);
 500        tcg_gen_discard_i64(cc_vr);
 501    }
 502    s->cc_op = CC_OP_STATIC;
 503}
 504
 505/* calculates cc into cc_op */
 506static void gen_op_calc_cc(DisasContext *s)
 507{
 508    TCGv_i32 local_cc_op = NULL;
 509    TCGv_i64 dummy = NULL;
 510
 511    switch (s->cc_op) {
 512    default:
 513        dummy = tcg_const_i64(0);
 514        /* FALLTHRU */
 515    case CC_OP_ADD_64:
 516    case CC_OP_ADDU_64:
 517    case CC_OP_ADDC_64:
 518    case CC_OP_SUB_64:
 519    case CC_OP_SUBU_64:
 520    case CC_OP_SUBB_64:
 521    case CC_OP_ADD_32:
 522    case CC_OP_ADDU_32:
 523    case CC_OP_ADDC_32:
 524    case CC_OP_SUB_32:
 525    case CC_OP_SUBU_32:
 526    case CC_OP_SUBB_32:
 527        local_cc_op = tcg_const_i32(s->cc_op);
 528        break;
 529    case CC_OP_CONST0:
 530    case CC_OP_CONST1:
 531    case CC_OP_CONST2:
 532    case CC_OP_CONST3:
 533    case CC_OP_STATIC:
 534    case CC_OP_DYNAMIC:
 535        break;
 536    }
 537
 538    switch (s->cc_op) {
 539    case CC_OP_CONST0:
 540    case CC_OP_CONST1:
 541    case CC_OP_CONST2:
 542    case CC_OP_CONST3:
 543        /* s->cc_op is the cc value */
 544        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 545        break;
 546    case CC_OP_STATIC:
 547        /* env->cc_op already is the cc value */
 548        break;
 549    case CC_OP_NZ:
 550    case CC_OP_ABS_64:
 551    case CC_OP_NABS_64:
 552    case CC_OP_ABS_32:
 553    case CC_OP_NABS_32:
 554    case CC_OP_LTGT0_32:
 555    case CC_OP_LTGT0_64:
 556    case CC_OP_COMP_32:
 557    case CC_OP_COMP_64:
 558    case CC_OP_NZ_F32:
 559    case CC_OP_NZ_F64:
 560    case CC_OP_FLOGR:
 561    case CC_OP_LCBB:
 562        /* 1 argument */
 563        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 564        break;
 565    case CC_OP_ICM:
 566    case CC_OP_LTGT_32:
 567    case CC_OP_LTGT_64:
 568    case CC_OP_LTUGTU_32:
 569    case CC_OP_LTUGTU_64:
 570    case CC_OP_TM_32:
 571    case CC_OP_TM_64:
 572    case CC_OP_SLA_32:
 573    case CC_OP_SLA_64:
 574    case CC_OP_NZ_F128:
 575        /* 2 arguments */
 576        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 577        break;
 578    case CC_OP_ADD_64:
 579    case CC_OP_ADDU_64:
 580    case CC_OP_ADDC_64:
 581    case CC_OP_SUB_64:
 582    case CC_OP_SUBU_64:
 583    case CC_OP_SUBB_64:
 584    case CC_OP_ADD_32:
 585    case CC_OP_ADDU_32:
 586    case CC_OP_ADDC_32:
 587    case CC_OP_SUB_32:
 588    case CC_OP_SUBU_32:
 589    case CC_OP_SUBB_32:
 590        /* 3 arguments */
 591        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 592        break;
 593    case CC_OP_DYNAMIC:
 594        /* unknown operation - assume 3 arguments and cc_op in env */
 595        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 596        break;
 597    default:
 598        tcg_abort();
 599    }
 600
 601    if (local_cc_op) {
 602        tcg_temp_free_i32(local_cc_op);
 603    }
 604    if (dummy) {
 605        tcg_temp_free_i64(dummy);
 606    }
 607
 608    /* We now have cc in cc_op as constant */
 609    set_cc_static(s);
 610}
 611
 612static bool use_exit_tb(DisasContext *s)
 613{
 614    return s->base.singlestep_enabled ||
 615            (tb_cflags(s->base.tb) & CF_LAST_IO) ||
 616            (s->base.tb->flags & FLAG_MASK_PER);
 617}
 618
 619static bool use_goto_tb(DisasContext *s, uint64_t dest)
 620{
 621    if (unlikely(use_exit_tb(s))) {
 622        return false;
 623    }
 624#ifndef CONFIG_USER_ONLY
 625    return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
 626           (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
 627#else
 628    return true;
 629#endif
 630}
 631
 632static void account_noninline_branch(DisasContext *s, int cc_op)
 633{
 634#ifdef DEBUG_INLINE_BRANCHES
 635    inline_branch_miss[cc_op]++;
 636#endif
 637}
 638
 639static void account_inline_branch(DisasContext *s, int cc_op)
 640{
 641#ifdef DEBUG_INLINE_BRANCHES
 642    inline_branch_hit[cc_op]++;
 643#endif
 644}
 645
 646/* Table of mask values to comparison codes, given a comparison as input.
 647   For such, CC=3 should not be possible.  */
 648static const TCGCond ltgt_cond[16] = {
 649    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 650    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 651    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 652    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 653    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 654    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 655    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 656    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 657};
 658
 659/* Table of mask values to comparison codes, given a logic op as input.
 660   For such, only CC=0 and CC=1 should be possible.  */
 661static const TCGCond nz_cond[16] = {
 662    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 663    TCG_COND_NEVER, TCG_COND_NEVER,
 664    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 665    TCG_COND_NE, TCG_COND_NE,
 666    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 667    TCG_COND_EQ, TCG_COND_EQ,
 668    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 669    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 670};
 671
 672/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 673   details required to generate a TCG comparison.  */
 674static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 675{
 676    TCGCond cond;
 677    enum cc_op old_cc_op = s->cc_op;
 678
 679    if (mask == 15 || mask == 0) {
 680        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 681        c->u.s32.a = cc_op;
 682        c->u.s32.b = cc_op;
 683        c->g1 = c->g2 = true;
 684        c->is_64 = false;
 685        return;
 686    }
 687
 688    /* Find the TCG condition for the mask + cc op.  */
 689    switch (old_cc_op) {
 690    case CC_OP_LTGT0_32:
 691    case CC_OP_LTGT0_64:
 692    case CC_OP_LTGT_32:
 693    case CC_OP_LTGT_64:
 694        cond = ltgt_cond[mask];
 695        if (cond == TCG_COND_NEVER) {
 696            goto do_dynamic;
 697        }
 698        account_inline_branch(s, old_cc_op);
 699        break;
 700
 701    case CC_OP_LTUGTU_32:
 702    case CC_OP_LTUGTU_64:
 703        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 704        if (cond == TCG_COND_NEVER) {
 705            goto do_dynamic;
 706        }
 707        account_inline_branch(s, old_cc_op);
 708        break;
 709
 710    case CC_OP_NZ:
 711        cond = nz_cond[mask];
 712        if (cond == TCG_COND_NEVER) {
 713            goto do_dynamic;
 714        }
 715        account_inline_branch(s, old_cc_op);
 716        break;
 717
 718    case CC_OP_TM_32:
 719    case CC_OP_TM_64:
 720        switch (mask) {
 721        case 8:
 722            cond = TCG_COND_EQ;
 723            break;
 724        case 4 | 2 | 1:
 725            cond = TCG_COND_NE;
 726            break;
 727        default:
 728            goto do_dynamic;
 729        }
 730        account_inline_branch(s, old_cc_op);
 731        break;
 732
 733    case CC_OP_ICM:
 734        switch (mask) {
 735        case 8:
 736            cond = TCG_COND_EQ;
 737            break;
 738        case 4 | 2 | 1:
 739        case 4 | 2:
 740            cond = TCG_COND_NE;
 741            break;
 742        default:
 743            goto do_dynamic;
 744        }
 745        account_inline_branch(s, old_cc_op);
 746        break;
 747
 748    case CC_OP_FLOGR:
 749        switch (mask & 0xa) {
 750        case 8: /* src == 0 -> no one bit found */
 751            cond = TCG_COND_EQ;
 752            break;
 753        case 2: /* src != 0 -> one bit found */
 754            cond = TCG_COND_NE;
 755            break;
 756        default:
 757            goto do_dynamic;
 758        }
 759        account_inline_branch(s, old_cc_op);
 760        break;
 761
 762    case CC_OP_ADDU_32:
 763    case CC_OP_ADDU_64:
 764        switch (mask) {
 765        case 8 | 2: /* vr == 0 */
 766            cond = TCG_COND_EQ;
 767            break;
 768        case 4 | 1: /* vr != 0 */
 769            cond = TCG_COND_NE;
 770            break;
 771        case 8 | 4: /* no carry -> vr >= src */
 772            cond = TCG_COND_GEU;
 773            break;
 774        case 2 | 1: /* carry -> vr < src */
 775            cond = TCG_COND_LTU;
 776            break;
 777        default:
 778            goto do_dynamic;
 779        }
 780        account_inline_branch(s, old_cc_op);
 781        break;
 782
 783    case CC_OP_SUBU_32:
 784    case CC_OP_SUBU_64:
 785        /* Note that CC=0 is impossible; treat it as dont-care.  */
 786        switch (mask & 7) {
 787        case 2: /* zero -> op1 == op2 */
 788            cond = TCG_COND_EQ;
 789            break;
 790        case 4 | 1: /* !zero -> op1 != op2 */
 791            cond = TCG_COND_NE;
 792            break;
 793        case 4: /* borrow (!carry) -> op1 < op2 */
 794            cond = TCG_COND_LTU;
 795            break;
 796        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
 797            cond = TCG_COND_GEU;
 798            break;
 799        default:
 800            goto do_dynamic;
 801        }
 802        account_inline_branch(s, old_cc_op);
 803        break;
 804
 805    default:
 806    do_dynamic:
 807        /* Calculate cc value.  */
 808        gen_op_calc_cc(s);
 809        /* FALLTHRU */
 810
 811    case CC_OP_STATIC:
 812        /* Jump based on CC.  We'll load up the real cond below;
 813           the assignment here merely avoids a compiler warning.  */
 814        account_noninline_branch(s, old_cc_op);
 815        old_cc_op = CC_OP_STATIC;
 816        cond = TCG_COND_NEVER;
 817        break;
 818    }
 819
 820    /* Load up the arguments of the comparison.  */
 821    c->is_64 = true;
 822    c->g1 = c->g2 = false;
 823    switch (old_cc_op) {
 824    case CC_OP_LTGT0_32:
 825        c->is_64 = false;
 826        c->u.s32.a = tcg_temp_new_i32();
 827        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 828        c->u.s32.b = tcg_const_i32(0);
 829        break;
 830    case CC_OP_LTGT_32:
 831    case CC_OP_LTUGTU_32:
 832    case CC_OP_SUBU_32:
 833        c->is_64 = false;
 834        c->u.s32.a = tcg_temp_new_i32();
 835        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 836        c->u.s32.b = tcg_temp_new_i32();
 837        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 838        break;
 839
 840    case CC_OP_LTGT0_64:
 841    case CC_OP_NZ:
 842    case CC_OP_FLOGR:
 843        c->u.s64.a = cc_dst;
 844        c->u.s64.b = tcg_const_i64(0);
 845        c->g1 = true;
 846        break;
 847    case CC_OP_LTGT_64:
 848    case CC_OP_LTUGTU_64:
 849    case CC_OP_SUBU_64:
 850        c->u.s64.a = cc_src;
 851        c->u.s64.b = cc_dst;
 852        c->g1 = c->g2 = true;
 853        break;
 854
 855    case CC_OP_TM_32:
 856    case CC_OP_TM_64:
 857    case CC_OP_ICM:
 858        c->u.s64.a = tcg_temp_new_i64();
 859        c->u.s64.b = tcg_const_i64(0);
 860        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 861        break;
 862
 863    case CC_OP_ADDU_32:
 864        c->is_64 = false;
 865        c->u.s32.a = tcg_temp_new_i32();
 866        c->u.s32.b = tcg_temp_new_i32();
 867        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
 868        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 869            tcg_gen_movi_i32(c->u.s32.b, 0);
 870        } else {
 871            tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
 872        }
 873        break;
 874
 875    case CC_OP_ADDU_64:
 876        c->u.s64.a = cc_vr;
 877        c->g1 = true;
 878        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 879            c->u.s64.b = tcg_const_i64(0);
 880        } else {
 881            c->u.s64.b = cc_src;
 882            c->g2 = true;
 883        }
 884        break;
 885
 886    case CC_OP_STATIC:
 887        c->is_64 = false;
 888        c->u.s32.a = cc_op;
 889        c->g1 = true;
 890        switch (mask) {
 891        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 892            cond = TCG_COND_NE;
 893            c->u.s32.b = tcg_const_i32(3);
 894            break;
 895        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 896            cond = TCG_COND_NE;
 897            c->u.s32.b = tcg_const_i32(2);
 898            break;
 899        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 900            cond = TCG_COND_NE;
 901            c->u.s32.b = tcg_const_i32(1);
 902            break;
 903        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 904            cond = TCG_COND_EQ;
 905            c->g1 = false;
 906            c->u.s32.a = tcg_temp_new_i32();
 907            c->u.s32.b = tcg_const_i32(0);
 908            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 909            break;
 910        case 0x8 | 0x4: /* cc < 2 */
 911            cond = TCG_COND_LTU;
 912            c->u.s32.b = tcg_const_i32(2);
 913            break;
 914        case 0x8: /* cc == 0 */
 915            cond = TCG_COND_EQ;
 916            c->u.s32.b = tcg_const_i32(0);
 917            break;
 918        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 919            cond = TCG_COND_NE;
 920            c->u.s32.b = tcg_const_i32(0);
 921            break;
 922        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 923            cond = TCG_COND_NE;
 924            c->g1 = false;
 925            c->u.s32.a = tcg_temp_new_i32();
 926            c->u.s32.b = tcg_const_i32(0);
 927            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 928            break;
 929        case 0x4: /* cc == 1 */
 930            cond = TCG_COND_EQ;
 931            c->u.s32.b = tcg_const_i32(1);
 932            break;
 933        case 0x2 | 0x1: /* cc > 1 */
 934            cond = TCG_COND_GTU;
 935            c->u.s32.b = tcg_const_i32(1);
 936            break;
 937        case 0x2: /* cc == 2 */
 938            cond = TCG_COND_EQ;
 939            c->u.s32.b = tcg_const_i32(2);
 940            break;
 941        case 0x1: /* cc == 3 */
 942            cond = TCG_COND_EQ;
 943            c->u.s32.b = tcg_const_i32(3);
 944            break;
 945        default:
 946            /* CC is masked by something else: (8 >> cc) & mask.  */
 947            cond = TCG_COND_NE;
 948            c->g1 = false;
 949            c->u.s32.a = tcg_const_i32(8);
 950            c->u.s32.b = tcg_const_i32(0);
 951            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 952            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 953            break;
 954        }
 955        break;
 956
 957    default:
 958        abort();
 959    }
 960    c->cond = cond;
 961}
 962
 963static void free_compare(DisasCompare *c)
 964{
 965    if (!c->g1) {
 966        if (c->is_64) {
 967            tcg_temp_free_i64(c->u.s64.a);
 968        } else {
 969            tcg_temp_free_i32(c->u.s32.a);
 970        }
 971    }
 972    if (!c->g2) {
 973        if (c->is_64) {
 974            tcg_temp_free_i64(c->u.s64.b);
 975        } else {
 976            tcg_temp_free_i32(c->u.s32.b);
 977        }
 978    }
 979}
 980
 981/* ====================================================================== */
 982/* Define the insn format enumeration.  */
 983#define F0(N)                         FMT_##N,
 984#define F1(N, X1)                     F0(N)
 985#define F2(N, X1, X2)                 F0(N)
 986#define F3(N, X1, X2, X3)             F0(N)
 987#define F4(N, X1, X2, X3, X4)         F0(N)
 988#define F5(N, X1, X2, X3, X4, X5)     F0(N)
 989#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
 990
 991typedef enum {
 992#include "insn-format.def"
 993} DisasFormat;
 994
 995#undef F0
 996#undef F1
 997#undef F2
 998#undef F3
 999#undef F4
1000#undef F5
1001#undef F6
1002
1003/* Define a structure to hold the decoded fields.  We'll store each inside
1004   an array indexed by an enum.  In order to conserve memory, we'll arrange
1005   for fields that do not exist at the same time to overlap, thus the "C"
1006   for compact.  For checking purposes there is an "O" for original index
1007   as well that will be applied to availability bitmaps.  */
1008
1009enum DisasFieldIndexO {
1010    FLD_O_r1,
1011    FLD_O_r2,
1012    FLD_O_r3,
1013    FLD_O_m1,
1014    FLD_O_m3,
1015    FLD_O_m4,
1016    FLD_O_m5,
1017    FLD_O_m6,
1018    FLD_O_b1,
1019    FLD_O_b2,
1020    FLD_O_b4,
1021    FLD_O_d1,
1022    FLD_O_d2,
1023    FLD_O_d4,
1024    FLD_O_x2,
1025    FLD_O_l1,
1026    FLD_O_l2,
1027    FLD_O_i1,
1028    FLD_O_i2,
1029    FLD_O_i3,
1030    FLD_O_i4,
1031    FLD_O_i5,
1032    FLD_O_v1,
1033    FLD_O_v2,
1034    FLD_O_v3,
1035    FLD_O_v4,
1036};
1037
1038enum DisasFieldIndexC {
1039    FLD_C_r1 = 0,
1040    FLD_C_m1 = 0,
1041    FLD_C_b1 = 0,
1042    FLD_C_i1 = 0,
1043    FLD_C_v1 = 0,
1044
1045    FLD_C_r2 = 1,
1046    FLD_C_b2 = 1,
1047    FLD_C_i2 = 1,
1048
1049    FLD_C_r3 = 2,
1050    FLD_C_m3 = 2,
1051    FLD_C_i3 = 2,
1052    FLD_C_v3 = 2,
1053
1054    FLD_C_m4 = 3,
1055    FLD_C_b4 = 3,
1056    FLD_C_i4 = 3,
1057    FLD_C_l1 = 3,
1058    FLD_C_v4 = 3,
1059
1060    FLD_C_i5 = 4,
1061    FLD_C_d1 = 4,
1062    FLD_C_m5 = 4,
1063
1064    FLD_C_d2 = 5,
1065    FLD_C_m6 = 5,
1066
1067    FLD_C_d4 = 6,
1068    FLD_C_x2 = 6,
1069    FLD_C_l2 = 6,
1070    FLD_C_v2 = 6,
1071
1072    NUM_C_FIELD = 7
1073};
1074
1075struct DisasFields {
1076    uint64_t raw_insn;
1077    unsigned op:8;
1078    unsigned op2:8;
1079    unsigned presentC:16;
1080    unsigned int presentO;
1081    int c[NUM_C_FIELD];
1082};
1083
1084/* This is the way fields are to be accessed out of DisasFields.  */
1085#define have_field(S, F)  have_field1((S), FLD_O_##F)
1086#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1087
1088static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1089{
1090    return (f->presentO >> c) & 1;
1091}
1092
1093static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1094                      enum DisasFieldIndexC c)
1095{
1096    assert(have_field1(f, o));
1097    return f->c[c];
1098}
1099
1100/* Describe the layout of each field in each format.  */
1101typedef struct DisasField {
1102    unsigned int beg:8;
1103    unsigned int size:8;
1104    unsigned int type:2;
1105    unsigned int indexC:6;
1106    enum DisasFieldIndexO indexO:8;
1107} DisasField;
1108
1109typedef struct DisasFormatInfo {
1110    DisasField op[NUM_C_FIELD];
1111} DisasFormatInfo;
1112
1113#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1114#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1115#define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1116#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1117                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1118#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1119                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1120                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1121#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1122                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1123#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1125                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1126#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1127#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1128
1129#define F0(N)                     { { } },
1130#define F1(N, X1)                 { { X1 } },
1131#define F2(N, X1, X2)             { { X1, X2 } },
1132#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1133#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1134#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1135#define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1136
1137static const DisasFormatInfo format_info[] = {
1138#include "insn-format.def"
1139};
1140
1141#undef F0
1142#undef F1
1143#undef F2
1144#undef F3
1145#undef F4
1146#undef F5
1147#undef F6
1148#undef R
1149#undef M
1150#undef V
1151#undef BD
1152#undef BXD
1153#undef BDL
1154#undef BXDL
1155#undef I
1156#undef L
1157
1158/* Generally, we'll extract operands into this structures, operate upon
1159   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1160   of routines below for more details.  */
1161typedef struct {
1162    bool g_out, g_out2, g_in1, g_in2;
1163    TCGv_i64 out, out2, in1, in2;
1164    TCGv_i64 addr1;
1165} DisasOps;
1166
1167/* Instructions can place constraints on their operands, raising specification
1168   exceptions if they are violated.  To make this easy to automate, each "in1",
1169   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1170   of the following, or 0.  To make this easy to document, we'll put the
1171   SPEC_<name> defines next to <name>.  */
1172
1173#define SPEC_r1_even    1
1174#define SPEC_r2_even    2
1175#define SPEC_r3_even    4
1176#define SPEC_r1_f128    8
1177#define SPEC_r2_f128    16
1178
1179/* Return values from translate_one, indicating the state of the TB.  */
1180
1181/* We are not using a goto_tb (for whatever reason), but have updated
1182   the PC (for whatever reason), so there's no need to do it again on
1183   exiting the TB.  */
1184#define DISAS_PC_UPDATED        DISAS_TARGET_0
1185
1186/* We have emitted one or more goto_tb.  No fixup required.  */
1187#define DISAS_GOTO_TB           DISAS_TARGET_1
1188
1189/* We have updated the PC and CC values.  */
1190#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1191
1192/* We are exiting the TB, but have neither emitted a goto_tb, nor
1193   updated the PC for the next instruction to be executed.  */
1194#define DISAS_PC_STALE          DISAS_TARGET_3
1195
1196/* We are exiting the TB to the main loop.  */
1197#define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1198
1199
1200/* Instruction flags */
1201#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1202#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1203#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1204#define IF_BFP      0x0008      /* binary floating point instruction */
1205#define IF_DFP      0x0010      /* decimal floating point instruction */
1206#define IF_PRIV     0x0020      /* privileged instruction */
1207#define IF_VEC      0x0040      /* vector instruction */
1208
1209struct DisasInsn {
1210    unsigned opc:16;
1211    unsigned flags:16;
1212    DisasFormat fmt:8;
1213    unsigned fac:8;
1214    unsigned spec:8;
1215
1216    const char *name;
1217
1218    /* Pre-process arguments before HELP_OP.  */
1219    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1220    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1221    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1222
1223    /*
1224     * Post-process output after HELP_OP.
1225     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1226     */
1227    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1228    void (*help_cout)(DisasContext *, DisasOps *);
1229
1230    /* Implement the operation itself.  */
1231    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1232
1233    uint64_t data;
1234};
1235
1236/* ====================================================================== */
1237/* Miscellaneous helpers, used by several operations.  */
1238
1239static void help_l2_shift(DisasContext *s, DisasFields *f,
1240                          DisasOps *o, int mask)
1241{
1242    int b2 = get_field(f, b2);
1243    int d2 = get_field(f, d2);
1244
1245    if (b2 == 0) {
1246        o->in2 = tcg_const_i64(d2 & mask);
1247    } else {
1248        o->in2 = get_address(s, 0, b2, d2);
1249        tcg_gen_andi_i64(o->in2, o->in2, mask);
1250    }
1251}
1252
1253static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1254{
1255    if (dest == s->pc_tmp) {
1256        per_branch(s, true);
1257        return DISAS_NEXT;
1258    }
1259    if (use_goto_tb(s, dest)) {
1260        update_cc_op(s);
1261        per_breaking_event(s);
1262        tcg_gen_goto_tb(0);
1263        tcg_gen_movi_i64(psw_addr, dest);
1264        tcg_gen_exit_tb(s->base.tb, 0);
1265        return DISAS_GOTO_TB;
1266    } else {
1267        tcg_gen_movi_i64(psw_addr, dest);
1268        per_branch(s, false);
1269        return DISAS_PC_UPDATED;
1270    }
1271}
1272
1273static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1274                                 bool is_imm, int imm, TCGv_i64 cdest)
1275{
1276    DisasJumpType ret;
1277    uint64_t dest = s->base.pc_next + 2 * imm;
1278    TCGLabel *lab;
1279
1280    /* Take care of the special cases first.  */
1281    if (c->cond == TCG_COND_NEVER) {
1282        ret = DISAS_NEXT;
1283        goto egress;
1284    }
1285    if (is_imm) {
1286        if (dest == s->pc_tmp) {
1287            /* Branch to next.  */
1288            per_branch(s, true);
1289            ret = DISAS_NEXT;
1290            goto egress;
1291        }
1292        if (c->cond == TCG_COND_ALWAYS) {
1293            ret = help_goto_direct(s, dest);
1294            goto egress;
1295        }
1296    } else {
1297        if (!cdest) {
1298            /* E.g. bcr %r0 -> no branch.  */
1299            ret = DISAS_NEXT;
1300            goto egress;
1301        }
1302        if (c->cond == TCG_COND_ALWAYS) {
1303            tcg_gen_mov_i64(psw_addr, cdest);
1304            per_branch(s, false);
1305            ret = DISAS_PC_UPDATED;
1306            goto egress;
1307        }
1308    }
1309
1310    if (use_goto_tb(s, s->pc_tmp)) {
1311        if (is_imm && use_goto_tb(s, dest)) {
1312            /* Both exits can use goto_tb.  */
1313            update_cc_op(s);
1314
1315            lab = gen_new_label();
1316            if (c->is_64) {
1317                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1318            } else {
1319                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1320            }
1321
1322            /* Branch not taken.  */
1323            tcg_gen_goto_tb(0);
1324            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1325            tcg_gen_exit_tb(s->base.tb, 0);
1326
1327            /* Branch taken.  */
1328            gen_set_label(lab);
1329            per_breaking_event(s);
1330            tcg_gen_goto_tb(1);
1331            tcg_gen_movi_i64(psw_addr, dest);
1332            tcg_gen_exit_tb(s->base.tb, 1);
1333
1334            ret = DISAS_GOTO_TB;
1335        } else {
1336            /* Fallthru can use goto_tb, but taken branch cannot.  */
1337            /* Store taken branch destination before the brcond.  This
1338               avoids having to allocate a new local temp to hold it.
1339               We'll overwrite this in the not taken case anyway.  */
1340            if (!is_imm) {
1341                tcg_gen_mov_i64(psw_addr, cdest);
1342            }
1343
1344            lab = gen_new_label();
1345            if (c->is_64) {
1346                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1347            } else {
1348                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1349            }
1350
1351            /* Branch not taken.  */
1352            update_cc_op(s);
1353            tcg_gen_goto_tb(0);
1354            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1355            tcg_gen_exit_tb(s->base.tb, 0);
1356
1357            gen_set_label(lab);
1358            if (is_imm) {
1359                tcg_gen_movi_i64(psw_addr, dest);
1360            }
1361            per_breaking_event(s);
1362            ret = DISAS_PC_UPDATED;
1363        }
1364    } else {
1365        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1366           Most commonly we're single-stepping or some other condition that
1367           disables all use of goto_tb.  Just update the PC and exit.  */
1368
1369        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1370        if (is_imm) {
1371            cdest = tcg_const_i64(dest);
1372        }
1373
1374        if (c->is_64) {
1375            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1376                                cdest, next);
1377            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1378        } else {
1379            TCGv_i32 t0 = tcg_temp_new_i32();
1380            TCGv_i64 t1 = tcg_temp_new_i64();
1381            TCGv_i64 z = tcg_const_i64(0);
1382            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1383            tcg_gen_extu_i32_i64(t1, t0);
1384            tcg_temp_free_i32(t0);
1385            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1386            per_branch_cond(s, TCG_COND_NE, t1, z);
1387            tcg_temp_free_i64(t1);
1388            tcg_temp_free_i64(z);
1389        }
1390
1391        if (is_imm) {
1392            tcg_temp_free_i64(cdest);
1393        }
1394        tcg_temp_free_i64(next);
1395
1396        ret = DISAS_PC_UPDATED;
1397    }
1398
1399 egress:
1400    free_compare(c);
1401    return ret;
1402}
1403
1404/* ====================================================================== */
1405/* The operations.  These perform the bulk of the work for any insn,
1406   usually after the operands have been loaded and output initialized.  */
1407
1408static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1409{
1410    TCGv_i64 z, n;
1411    z = tcg_const_i64(0);
1412    n = tcg_temp_new_i64();
1413    tcg_gen_neg_i64(n, o->in2);
1414    tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1415    tcg_temp_free_i64(n);
1416    tcg_temp_free_i64(z);
1417    return DISAS_NEXT;
1418}
1419
1420static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1421{
1422    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1423    return DISAS_NEXT;
1424}
1425
1426static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1427{
1428    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1429    return DISAS_NEXT;
1430}
1431
1432static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1433{
1434    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1435    tcg_gen_mov_i64(o->out2, o->in2);
1436    return DISAS_NEXT;
1437}
1438
1439static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1440{
1441    tcg_gen_add_i64(o->out, o->in1, o->in2);
1442    return DISAS_NEXT;
1443}
1444
1445static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1446{
1447    DisasCompare cmp;
1448    TCGv_i64 carry;
1449
1450    tcg_gen_add_i64(o->out, o->in1, o->in2);
1451
1452    /* The carry flag is the msb of CC, therefore the branch mask that would
1453       create that comparison is 3.  Feeding the generated comparison to
1454       setcond produces the carry flag that we desire.  */
1455    disas_jcc(s, &cmp, 3);
1456    carry = tcg_temp_new_i64();
1457    if (cmp.is_64) {
1458        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1459    } else {
1460        TCGv_i32 t = tcg_temp_new_i32();
1461        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1462        tcg_gen_extu_i32_i64(carry, t);
1463        tcg_temp_free_i32(t);
1464    }
1465    free_compare(&cmp);
1466
1467    tcg_gen_add_i64(o->out, o->out, carry);
1468    tcg_temp_free_i64(carry);
1469    return DISAS_NEXT;
1470}
1471
1472static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1473{
1474    o->in1 = tcg_temp_new_i64();
1475
1476    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1477        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478    } else {
1479        /* Perform the atomic addition in memory. */
1480        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1481                                     s->insn->data);
1482    }
1483
1484    /* Recompute also for atomic case: needed for setting CC. */
1485    tcg_gen_add_i64(o->out, o->in1, o->in2);
1486
1487    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1488        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489    }
1490    return DISAS_NEXT;
1491}
1492
1493static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1494{
1495    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1496    return DISAS_NEXT;
1497}
1498
1499static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1500{
1501    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1502    return DISAS_NEXT;
1503}
1504
1505static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1506{
1507    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1508    return_low128(o->out2);
1509    return DISAS_NEXT;
1510}
1511
1512static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1513{
1514    tcg_gen_and_i64(o->out, o->in1, o->in2);
1515    return DISAS_NEXT;
1516}
1517
1518static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1519{
1520    int shift = s->insn->data & 0xff;
1521    int size = s->insn->data >> 8;
1522    uint64_t mask = ((1ull << size) - 1) << shift;
1523
1524    assert(!o->g_in2);
1525    tcg_gen_shli_i64(o->in2, o->in2, shift);
1526    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1527    tcg_gen_and_i64(o->out, o->in1, o->in2);
1528
1529    /* Produce the CC from only the bits manipulated.  */
1530    tcg_gen_andi_i64(cc_dst, o->out, mask);
1531    set_cc_nz_u64(s, cc_dst);
1532    return DISAS_NEXT;
1533}
1534
1535static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1536{
1537    o->in1 = tcg_temp_new_i64();
1538
1539    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1540        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1541    } else {
1542        /* Perform the atomic operation in memory. */
1543        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1544                                     s->insn->data);
1545    }
1546
1547    /* Recompute also for atomic case: needed for setting CC. */
1548    tcg_gen_and_i64(o->out, o->in1, o->in2);
1549
1550    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1551        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1552    }
1553    return DISAS_NEXT;
1554}
1555
1556static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1557{
1558    pc_to_link_info(o->out, s, s->pc_tmp);
1559    if (o->in2) {
1560        tcg_gen_mov_i64(psw_addr, o->in2);
1561        per_branch(s, false);
1562        return DISAS_PC_UPDATED;
1563    } else {
1564        return DISAS_NEXT;
1565    }
1566}
1567
1568static void save_link_info(DisasContext *s, DisasOps *o)
1569{
1570    TCGv_i64 t;
1571
1572    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1573        pc_to_link_info(o->out, s, s->pc_tmp);
1574        return;
1575    }
1576    gen_op_calc_cc(s);
1577    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1578    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1579    t = tcg_temp_new_i64();
1580    tcg_gen_shri_i64(t, psw_mask, 16);
1581    tcg_gen_andi_i64(t, t, 0x0f000000);
1582    tcg_gen_or_i64(o->out, o->out, t);
1583    tcg_gen_extu_i32_i64(t, cc_op);
1584    tcg_gen_shli_i64(t, t, 28);
1585    tcg_gen_or_i64(o->out, o->out, t);
1586    tcg_temp_free_i64(t);
1587}
1588
1589static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1590{
1591    save_link_info(s, o);
1592    if (o->in2) {
1593        tcg_gen_mov_i64(psw_addr, o->in2);
1594        per_branch(s, false);
1595        return DISAS_PC_UPDATED;
1596    } else {
1597        return DISAS_NEXT;
1598    }
1599}
1600
1601static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1602{
1603    pc_to_link_info(o->out, s, s->pc_tmp);
1604    return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1605}
1606
1607static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1608{
1609    int m1 = get_field(s->fields, m1);
1610    bool is_imm = have_field(s->fields, i2);
1611    int imm = is_imm ? get_field(s->fields, i2) : 0;
1612    DisasCompare c;
1613
1614    /* BCR with R2 = 0 causes no branching */
1615    if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1616        if (m1 == 14) {
1617            /* Perform serialization */
1618            /* FIXME: check for fast-BCR-serialization facility */
1619            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1620        }
1621        if (m1 == 15) {
1622            /* Perform serialization */
1623            /* FIXME: perform checkpoint-synchronisation */
1624            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1625        }
1626        return DISAS_NEXT;
1627    }
1628
1629    disas_jcc(s, &c, m1);
1630    return help_branch(s, &c, is_imm, imm, o->in2);
1631}
1632
1633static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1634{
1635    int r1 = get_field(s->fields, r1);
1636    bool is_imm = have_field(s->fields, i2);
1637    int imm = is_imm ? get_field(s->fields, i2) : 0;
1638    DisasCompare c;
1639    TCGv_i64 t;
1640
1641    c.cond = TCG_COND_NE;
1642    c.is_64 = false;
1643    c.g1 = false;
1644    c.g2 = false;
1645
1646    t = tcg_temp_new_i64();
1647    tcg_gen_subi_i64(t, regs[r1], 1);
1648    store_reg32_i64(r1, t);
1649    c.u.s32.a = tcg_temp_new_i32();
1650    c.u.s32.b = tcg_const_i32(0);
1651    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1652    tcg_temp_free_i64(t);
1653
1654    return help_branch(s, &c, is_imm, imm, o->in2);
1655}
1656
1657static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1658{
1659    int r1 = get_field(s->fields, r1);
1660    int imm = get_field(s->fields, i2);
1661    DisasCompare c;
1662    TCGv_i64 t;
1663
1664    c.cond = TCG_COND_NE;
1665    c.is_64 = false;
1666    c.g1 = false;
1667    c.g2 = false;
1668
1669    t = tcg_temp_new_i64();
1670    tcg_gen_shri_i64(t, regs[r1], 32);
1671    tcg_gen_subi_i64(t, t, 1);
1672    store_reg32h_i64(r1, t);
1673    c.u.s32.a = tcg_temp_new_i32();
1674    c.u.s32.b = tcg_const_i32(0);
1675    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1676    tcg_temp_free_i64(t);
1677
1678    return help_branch(s, &c, 1, imm, o->in2);
1679}
1680
1681static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1682{
1683    int r1 = get_field(s->fields, r1);
1684    bool is_imm = have_field(s->fields, i2);
1685    int imm = is_imm ? get_field(s->fields, i2) : 0;
1686    DisasCompare c;
1687
1688    c.cond = TCG_COND_NE;
1689    c.is_64 = true;
1690    c.g1 = true;
1691    c.g2 = false;
1692
1693    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1694    c.u.s64.a = regs[r1];
1695    c.u.s64.b = tcg_const_i64(0);
1696
1697    return help_branch(s, &c, is_imm, imm, o->in2);
1698}
1699
1700static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1701{
1702    int r1 = get_field(s->fields, r1);
1703    int r3 = get_field(s->fields, r3);
1704    bool is_imm = have_field(s->fields, i2);
1705    int imm = is_imm ? get_field(s->fields, i2) : 0;
1706    DisasCompare c;
1707    TCGv_i64 t;
1708
1709    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1710    c.is_64 = false;
1711    c.g1 = false;
1712    c.g2 = false;
1713
1714    t = tcg_temp_new_i64();
1715    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1716    c.u.s32.a = tcg_temp_new_i32();
1717    c.u.s32.b = tcg_temp_new_i32();
1718    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1719    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1720    store_reg32_i64(r1, t);
1721    tcg_temp_free_i64(t);
1722
1723    return help_branch(s, &c, is_imm, imm, o->in2);
1724}
1725
1726static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1727{
1728    int r1 = get_field(s->fields, r1);
1729    int r3 = get_field(s->fields, r3);
1730    bool is_imm = have_field(s->fields, i2);
1731    int imm = is_imm ? get_field(s->fields, i2) : 0;
1732    DisasCompare c;
1733
1734    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1735    c.is_64 = true;
1736
1737    if (r1 == (r3 | 1)) {
1738        c.u.s64.b = load_reg(r3 | 1);
1739        c.g2 = false;
1740    } else {
1741        c.u.s64.b = regs[r3 | 1];
1742        c.g2 = true;
1743    }
1744
1745    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1746    c.u.s64.a = regs[r1];
1747    c.g1 = true;
1748
1749    return help_branch(s, &c, is_imm, imm, o->in2);
1750}
1751
1752static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1753{
1754    int imm, m3 = get_field(s->fields, m3);
1755    bool is_imm;
1756    DisasCompare c;
1757
1758    c.cond = ltgt_cond[m3];
1759    if (s->insn->data) {
1760        c.cond = tcg_unsigned_cond(c.cond);
1761    }
1762    c.is_64 = c.g1 = c.g2 = true;
1763    c.u.s64.a = o->in1;
1764    c.u.s64.b = o->in2;
1765
1766    is_imm = have_field(s->fields, i4);
1767    if (is_imm) {
1768        imm = get_field(s->fields, i4);
1769    } else {
1770        imm = 0;
1771        o->out = get_address(s, 0, get_field(s->fields, b4),
1772                             get_field(s->fields, d4));
1773    }
1774
1775    return help_branch(s, &c, is_imm, imm, o->out);
1776}
1777
1778static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1779{
1780    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1781    set_cc_static(s);
1782    return DISAS_NEXT;
1783}
1784
1785static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1786{
1787    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1788    set_cc_static(s);
1789    return DISAS_NEXT;
1790}
1791
1792static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1793{
1794    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1795    set_cc_static(s);
1796    return DISAS_NEXT;
1797}
1798
1799static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1800                                   bool m4_with_fpe)
1801{
1802    const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1803    uint8_t m3 = get_field(s->fields, m3);
1804    uint8_t m4 = get_field(s->fields, m4);
1805
1806    /* m3 field was introduced with FPE */
1807    if (!fpe && m3_with_fpe) {
1808        m3 = 0;
1809    }
1810    /* m4 field was introduced with FPE */
1811    if (!fpe && m4_with_fpe) {
1812        m4 = 0;
1813    }
1814
1815    /* Check for valid rounding modes. Mode 3 was introduced later. */
1816    if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1817        gen_program_exception(s, PGM_SPECIFICATION);
1818        return NULL;
1819    }
1820
1821    return tcg_const_i32(deposit32(m3, 4, 4, m4));
1822}
1823
1824static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1825{
1826    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1827
1828    if (!m34) {
1829        return DISAS_NORETURN;
1830    }
1831    gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1832    tcg_temp_free_i32(m34);
1833    gen_set_cc_nz_f32(s, o->in2);
1834    return DISAS_NEXT;
1835}
1836
1837static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1838{
1839    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1840
1841    if (!m34) {
1842        return DISAS_NORETURN;
1843    }
1844    gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1845    tcg_temp_free_i32(m34);
1846    gen_set_cc_nz_f64(s, o->in2);
1847    return DISAS_NEXT;
1848}
1849
1850static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1851{
1852    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1853
1854    if (!m34) {
1855        return DISAS_NORETURN;
1856    }
1857    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1858    tcg_temp_free_i32(m34);
1859    gen_set_cc_nz_f128(s, o->in1, o->in2);
1860    return DISAS_NEXT;
1861}
1862
1863static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1864{
1865    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1866
1867    if (!m34) {
1868        return DISAS_NORETURN;
1869    }
1870    gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1871    tcg_temp_free_i32(m34);
1872    gen_set_cc_nz_f32(s, o->in2);
1873    return DISAS_NEXT;
1874}
1875
1876static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1877{
1878    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1879
1880    if (!m34) {
1881        return DISAS_NORETURN;
1882    }
1883    gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1884    tcg_temp_free_i32(m34);
1885    gen_set_cc_nz_f64(s, o->in2);
1886    return DISAS_NEXT;
1887}
1888
1889static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1890{
1891    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1892
1893    if (!m34) {
1894        return DISAS_NORETURN;
1895    }
1896    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1897    tcg_temp_free_i32(m34);
1898    gen_set_cc_nz_f128(s, o->in1, o->in2);
1899    return DISAS_NEXT;
1900}
1901
1902static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1903{
1904    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905
1906    if (!m34) {
1907        return DISAS_NORETURN;
1908    }
1909    gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1910    tcg_temp_free_i32(m34);
1911    gen_set_cc_nz_f32(s, o->in2);
1912    return DISAS_NEXT;
1913}
1914
1915static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1916{
1917    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1918
1919    if (!m34) {
1920        return DISAS_NORETURN;
1921    }
1922    gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1923    tcg_temp_free_i32(m34);
1924    gen_set_cc_nz_f64(s, o->in2);
1925    return DISAS_NEXT;
1926}
1927
1928static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1929{
1930    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1931
1932    if (!m34) {
1933        return DISAS_NORETURN;
1934    }
1935    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1936    tcg_temp_free_i32(m34);
1937    gen_set_cc_nz_f128(s, o->in1, o->in2);
1938    return DISAS_NEXT;
1939}
1940
1941static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1942{
1943    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1944
1945    if (!m34) {
1946        return DISAS_NORETURN;
1947    }
1948    gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1949    tcg_temp_free_i32(m34);
1950    gen_set_cc_nz_f32(s, o->in2);
1951    return DISAS_NEXT;
1952}
1953
1954static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1955{
1956    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1957
1958    if (!m34) {
1959        return DISAS_NORETURN;
1960    }
1961    gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1962    tcg_temp_free_i32(m34);
1963    gen_set_cc_nz_f64(s, o->in2);
1964    return DISAS_NEXT;
1965}
1966
1967static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1968{
1969    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1970
1971    if (!m34) {
1972        return DISAS_NORETURN;
1973    }
1974    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1975    tcg_temp_free_i32(m34);
1976    gen_set_cc_nz_f128(s, o->in1, o->in2);
1977    return DISAS_NEXT;
1978}
1979
1980static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1981{
1982    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1983
1984    if (!m34) {
1985        return DISAS_NORETURN;
1986    }
1987    gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1988    tcg_temp_free_i32(m34);
1989    return DISAS_NEXT;
1990}
1991
1992static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1993{
1994    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1995
1996    if (!m34) {
1997        return DISAS_NORETURN;
1998    }
1999    gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2000    tcg_temp_free_i32(m34);
2001    return DISAS_NEXT;
2002}
2003
2004static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2005{
2006    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2007
2008    if (!m34) {
2009        return DISAS_NORETURN;
2010    }
2011    gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2012    tcg_temp_free_i32(m34);
2013    return_low128(o->out2);
2014    return DISAS_NEXT;
2015}
2016
2017static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2018{
2019    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2020
2021    if (!m34) {
2022        return DISAS_NORETURN;
2023    }
2024    gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2025    tcg_temp_free_i32(m34);
2026    return DISAS_NEXT;
2027}
2028
2029static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2030{
2031    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2032
2033    if (!m34) {
2034        return DISAS_NORETURN;
2035    }
2036    gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2037    tcg_temp_free_i32(m34);
2038    return DISAS_NEXT;
2039}
2040
2041static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2042{
2043    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2044
2045    if (!m34) {
2046        return DISAS_NORETURN;
2047    }
2048    gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2049    tcg_temp_free_i32(m34);
2050    return_low128(o->out2);
2051    return DISAS_NEXT;
2052}
2053
2054static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2055{
2056    int r2 = get_field(s->fields, r2);
2057    TCGv_i64 len = tcg_temp_new_i64();
2058
2059    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2060    set_cc_static(s);
2061    return_low128(o->out);
2062
2063    tcg_gen_add_i64(regs[r2], regs[r2], len);
2064    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2065    tcg_temp_free_i64(len);
2066
2067    return DISAS_NEXT;
2068}
2069
2070static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2071{
2072    int l = get_field(s->fields, l1);
2073    TCGv_i32 vl;
2074
2075    switch (l + 1) {
2076    case 1:
2077        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2078        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2079        break;
2080    case 2:
2081        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2082        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2083        break;
2084    case 4:
2085        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2086        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2087        break;
2088    case 8:
2089        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2090        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2091        break;
2092    default:
2093        vl = tcg_const_i32(l);
2094        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2095        tcg_temp_free_i32(vl);
2096        set_cc_static(s);
2097        return DISAS_NEXT;
2098    }
2099    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2100    return DISAS_NEXT;
2101}
2102
2103static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2104{
2105    int r1 = get_field(s->fields, r1);
2106    int r2 = get_field(s->fields, r2);
2107    TCGv_i32 t1, t2;
2108
2109    /* r1 and r2 must be even.  */
2110    if (r1 & 1 || r2 & 1) {
2111        gen_program_exception(s, PGM_SPECIFICATION);
2112        return DISAS_NORETURN;
2113    }
2114
2115    t1 = tcg_const_i32(r1);
2116    t2 = tcg_const_i32(r2);
2117    gen_helper_clcl(cc_op, cpu_env, t1, t2);
2118    tcg_temp_free_i32(t1);
2119    tcg_temp_free_i32(t2);
2120    set_cc_static(s);
2121    return DISAS_NEXT;
2122}
2123
2124static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2125{
2126    int r1 = get_field(s->fields, r1);
2127    int r3 = get_field(s->fields, r3);
2128    TCGv_i32 t1, t3;
2129
2130    /* r1 and r3 must be even.  */
2131    if (r1 & 1 || r3 & 1) {
2132        gen_program_exception(s, PGM_SPECIFICATION);
2133        return DISAS_NORETURN;
2134    }
2135
2136    t1 = tcg_const_i32(r1);
2137    t3 = tcg_const_i32(r3);
2138    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2139    tcg_temp_free_i32(t1);
2140    tcg_temp_free_i32(t3);
2141    set_cc_static(s);
2142    return DISAS_NEXT;
2143}
2144
2145static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2146{
2147    int r1 = get_field(s->fields, r1);
2148    int r3 = get_field(s->fields, r3);
2149    TCGv_i32 t1, t3;
2150
2151    /* r1 and r3 must be even.  */
2152    if (r1 & 1 || r3 & 1) {
2153        gen_program_exception(s, PGM_SPECIFICATION);
2154        return DISAS_NORETURN;
2155    }
2156
2157    t1 = tcg_const_i32(r1);
2158    t3 = tcg_const_i32(r3);
2159    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2160    tcg_temp_free_i32(t1);
2161    tcg_temp_free_i32(t3);
2162    set_cc_static(s);
2163    return DISAS_NEXT;
2164}
2165
2166static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2167{
2168    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2169    TCGv_i32 t1 = tcg_temp_new_i32();
2170    tcg_gen_extrl_i64_i32(t1, o->in1);
2171    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2172    set_cc_static(s);
2173    tcg_temp_free_i32(t1);
2174    tcg_temp_free_i32(m3);
2175    return DISAS_NEXT;
2176}
2177
2178static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2179{
2180    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2181    set_cc_static(s);
2182    return_low128(o->in2);
2183    return DISAS_NEXT;
2184}
2185
2186static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2187{
2188    TCGv_i64 t = tcg_temp_new_i64();
2189    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2190    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2191    tcg_gen_or_i64(o->out, o->out, t);
2192    tcg_temp_free_i64(t);
2193    return DISAS_NEXT;
2194}
2195
2196static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2197{
2198    int d2 = get_field(s->fields, d2);
2199    int b2 = get_field(s->fields, b2);
2200    TCGv_i64 addr, cc;
2201
2202    /* Note that in1 = R3 (new value) and
2203       in2 = (zero-extended) R1 (expected value).  */
2204
2205    addr = get_address(s, 0, b2, d2);
2206    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2207                               get_mem_index(s), s->insn->data | MO_ALIGN);
2208    tcg_temp_free_i64(addr);
2209
2210    /* Are the memory and expected values (un)equal?  Note that this setcond
2211       produces the output CC value, thus the NE sense of the test.  */
2212    cc = tcg_temp_new_i64();
2213    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2214    tcg_gen_extrl_i64_i32(cc_op, cc);
2215    tcg_temp_free_i64(cc);
2216    set_cc_static(s);
2217
2218    return DISAS_NEXT;
2219}
2220
2221static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2222{
2223    int r1 = get_field(s->fields, r1);
2224    int r3 = get_field(s->fields, r3);
2225    int d2 = get_field(s->fields, d2);
2226    int b2 = get_field(s->fields, b2);
2227    DisasJumpType ret = DISAS_NEXT;
2228    TCGv_i64 addr;
2229    TCGv_i32 t_r1, t_r3;
2230
2231    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2232    addr = get_address(s, 0, b2, d2);
2233    t_r1 = tcg_const_i32(r1);
2234    t_r3 = tcg_const_i32(r3);
2235    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2236        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2237    } else if (HAVE_CMPXCHG128) {
2238        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2239    } else {
2240        gen_helper_exit_atomic(cpu_env);
2241        ret = DISAS_NORETURN;
2242    }
2243    tcg_temp_free_i64(addr);
2244    tcg_temp_free_i32(t_r1);
2245    tcg_temp_free_i32(t_r3);
2246
2247    set_cc_static(s);
2248    return ret;
2249}
2250
2251static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2252{
2253    int r3 = get_field(s->fields, r3);
2254    TCGv_i32 t_r3 = tcg_const_i32(r3);
2255
2256    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2257        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2258    } else {
2259        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260    }
2261    tcg_temp_free_i32(t_r3);
2262
2263    set_cc_static(s);
2264    return DISAS_NEXT;
2265}
2266
2267#ifndef CONFIG_USER_ONLY
2268static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2269{
2270    TCGMemOp mop = s->insn->data;
2271    TCGv_i64 addr, old, cc;
2272    TCGLabel *lab = gen_new_label();
2273
2274    /* Note that in1 = R1 (zero-extended expected value),
2275       out = R1 (original reg), out2 = R1+1 (new value).  */
2276
2277    addr = tcg_temp_new_i64();
2278    old = tcg_temp_new_i64();
2279    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2280    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2281                               get_mem_index(s), mop | MO_ALIGN);
2282    tcg_temp_free_i64(addr);
2283
2284    /* Are the memory and expected values (un)equal?  */
2285    cc = tcg_temp_new_i64();
2286    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2287    tcg_gen_extrl_i64_i32(cc_op, cc);
2288
2289    /* Write back the output now, so that it happens before the
2290       following branch, so that we don't need local temps.  */
2291    if ((mop & MO_SIZE) == MO_32) {
2292        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2293    } else {
2294        tcg_gen_mov_i64(o->out, old);
2295    }
2296    tcg_temp_free_i64(old);
2297
2298    /* If the comparison was equal, and the LSB of R2 was set,
2299       then we need to flush the TLB (for all cpus).  */
2300    tcg_gen_xori_i64(cc, cc, 1);
2301    tcg_gen_and_i64(cc, cc, o->in2);
2302    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2303    tcg_temp_free_i64(cc);
2304
2305    gen_helper_purge(cpu_env);
2306    gen_set_label(lab);
2307
2308    return DISAS_NEXT;
2309}
2310#endif
2311
2312static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2313{
2314    TCGv_i64 t1 = tcg_temp_new_i64();
2315    TCGv_i32 t2 = tcg_temp_new_i32();
2316    tcg_gen_extrl_i64_i32(t2, o->in1);
2317    gen_helper_cvd(t1, t2);
2318    tcg_temp_free_i32(t2);
2319    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2320    tcg_temp_free_i64(t1);
2321    return DISAS_NEXT;
2322}
2323
2324static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2325{
2326    int m3 = get_field(s->fields, m3);
2327    TCGLabel *lab = gen_new_label();
2328    TCGCond c;
2329
2330    c = tcg_invert_cond(ltgt_cond[m3]);
2331    if (s->insn->data) {
2332        c = tcg_unsigned_cond(c);
2333    }
2334    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2335
2336    /* Trap.  */
2337    gen_trap(s);
2338
2339    gen_set_label(lab);
2340    return DISAS_NEXT;
2341}
2342
2343static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2344{
2345    int m3 = get_field(s->fields, m3);
2346    int r1 = get_field(s->fields, r1);
2347    int r2 = get_field(s->fields, r2);
2348    TCGv_i32 tr1, tr2, chk;
2349
2350    /* R1 and R2 must both be even.  */
2351    if ((r1 | r2) & 1) {
2352        gen_program_exception(s, PGM_SPECIFICATION);
2353        return DISAS_NORETURN;
2354    }
2355    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2356        m3 = 0;
2357    }
2358
2359    tr1 = tcg_const_i32(r1);
2360    tr2 = tcg_const_i32(r2);
2361    chk = tcg_const_i32(m3);
2362
2363    switch (s->insn->data) {
2364    case 12:
2365        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2366        break;
2367    case 14:
2368        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2369        break;
2370    case 21:
2371        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2372        break;
2373    case 24:
2374        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2375        break;
2376    case 41:
2377        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2378        break;
2379    case 42:
2380        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2381        break;
2382    default:
2383        g_assert_not_reached();
2384    }
2385
2386    tcg_temp_free_i32(tr1);
2387    tcg_temp_free_i32(tr2);
2388    tcg_temp_free_i32(chk);
2389    set_cc_static(s);
2390    return DISAS_NEXT;
2391}
2392
2393#ifndef CONFIG_USER_ONLY
2394static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2395{
2396    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2397    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2398    TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2399
2400    gen_helper_diag(cpu_env, r1, r3, func_code);
2401
2402    tcg_temp_free_i32(func_code);
2403    tcg_temp_free_i32(r3);
2404    tcg_temp_free_i32(r1);
2405    return DISAS_NEXT;
2406}
2407#endif
2408
2409static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2410{
2411    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2412    return_low128(o->out);
2413    return DISAS_NEXT;
2414}
2415
2416static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2417{
2418    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2419    return_low128(o->out);
2420    return DISAS_NEXT;
2421}
2422
2423static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2424{
2425    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2426    return_low128(o->out);
2427    return DISAS_NEXT;
2428}
2429
2430static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2431{
2432    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2433    return_low128(o->out);
2434    return DISAS_NEXT;
2435}
2436
2437static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2438{
2439    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2440    return DISAS_NEXT;
2441}
2442
2443static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2444{
2445    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2446    return DISAS_NEXT;
2447}
2448
2449static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2450{
2451    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2452    return_low128(o->out2);
2453    return DISAS_NEXT;
2454}
2455
2456static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2457{
2458    int r2 = get_field(s->fields, r2);
2459    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2460    return DISAS_NEXT;
2461}
2462
2463static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2464{
2465    /* No cache information provided.  */
2466    tcg_gen_movi_i64(o->out, -1);
2467    return DISAS_NEXT;
2468}
2469
2470static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2471{
2472    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2473    return DISAS_NEXT;
2474}
2475
2476static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2477{
2478    int r1 = get_field(s->fields, r1);
2479    int r2 = get_field(s->fields, r2);
2480    TCGv_i64 t = tcg_temp_new_i64();
2481
2482    /* Note the "subsequently" in the PoO, which implies a defined result
2483       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2484    tcg_gen_shri_i64(t, psw_mask, 32);
2485    store_reg32_i64(r1, t);
2486    if (r2 != 0) {
2487        store_reg32_i64(r2, psw_mask);
2488    }
2489
2490    tcg_temp_free_i64(t);
2491    return DISAS_NEXT;
2492}
2493
2494static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2495{
2496    int r1 = get_field(s->fields, r1);
2497    TCGv_i32 ilen;
2498    TCGv_i64 v1;
2499
2500    /* Nested EXECUTE is not allowed.  */
2501    if (unlikely(s->ex_value)) {
2502        gen_program_exception(s, PGM_EXECUTE);
2503        return DISAS_NORETURN;
2504    }
2505
2506    update_psw_addr(s);
2507    update_cc_op(s);
2508
2509    if (r1 == 0) {
2510        v1 = tcg_const_i64(0);
2511    } else {
2512        v1 = regs[r1];
2513    }
2514
2515    ilen = tcg_const_i32(s->ilen);
2516    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2517    tcg_temp_free_i32(ilen);
2518
2519    if (r1 == 0) {
2520        tcg_temp_free_i64(v1);
2521    }
2522
2523    return DISAS_PC_CC_UPDATED;
2524}
2525
2526static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2527{
2528    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2529
2530    if (!m34) {
2531        return DISAS_NORETURN;
2532    }
2533    gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2534    tcg_temp_free_i32(m34);
2535    return DISAS_NEXT;
2536}
2537
2538static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2539{
2540    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2541
2542    if (!m34) {
2543        return DISAS_NORETURN;
2544    }
2545    gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2546    tcg_temp_free_i32(m34);
2547    return DISAS_NEXT;
2548}
2549
2550static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2551{
2552    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2553
2554    if (!m34) {
2555        return DISAS_NORETURN;
2556    }
2557    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2558    return_low128(o->out2);
2559    tcg_temp_free_i32(m34);
2560    return DISAS_NEXT;
2561}
2562
2563static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2564{
2565    /* We'll use the original input for cc computation, since we get to
2566       compare that against 0, which ought to be better than comparing
2567       the real output against 64.  It also lets cc_dst be a convenient
2568       temporary during our computation.  */
2569    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2570
2571    /* R1 = IN ? CLZ(IN) : 64.  */
2572    tcg_gen_clzi_i64(o->out, o->in2, 64);
2573
2574    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2575       value by 64, which is undefined.  But since the shift is 64 iff the
2576       input is zero, we still get the correct result after and'ing.  */
2577    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2578    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2579    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2580    return DISAS_NEXT;
2581}
2582
2583static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2584{
2585    int m3 = get_field(s->fields, m3);
2586    int pos, len, base = s->insn->data;
2587    TCGv_i64 tmp = tcg_temp_new_i64();
2588    uint64_t ccm;
2589
2590    switch (m3) {
2591    case 0xf:
2592        /* Effectively a 32-bit load.  */
2593        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2594        len = 32;
2595        goto one_insert;
2596
2597    case 0xc:
2598    case 0x6:
2599    case 0x3:
2600        /* Effectively a 16-bit load.  */
2601        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2602        len = 16;
2603        goto one_insert;
2604
2605    case 0x8:
2606    case 0x4:
2607    case 0x2:
2608    case 0x1:
2609        /* Effectively an 8-bit load.  */
2610        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2611        len = 8;
2612        goto one_insert;
2613
2614    one_insert:
2615        pos = base + ctz32(m3) * 8;
2616        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2617        ccm = ((1ull << len) - 1) << pos;
2618        break;
2619
2620    default:
2621        /* This is going to be a sequence of loads and inserts.  */
2622        pos = base + 32 - 8;
2623        ccm = 0;
2624        while (m3) {
2625            if (m3 & 0x8) {
2626                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2627                tcg_gen_addi_i64(o->in2, o->in2, 1);
2628                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2629                ccm |= 0xff << pos;
2630            }
2631            m3 = (m3 << 1) & 0xf;
2632            pos -= 8;
2633        }
2634        break;
2635    }
2636
2637    tcg_gen_movi_i64(tmp, ccm);
2638    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2639    tcg_temp_free_i64(tmp);
2640    return DISAS_NEXT;
2641}
2642
2643static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2644{
2645    int shift = s->insn->data & 0xff;
2646    int size = s->insn->data >> 8;
2647    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2648    return DISAS_NEXT;
2649}
2650
2651static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2652{
2653    TCGv_i64 t1, t2;
2654
2655    gen_op_calc_cc(s);
2656    t1 = tcg_temp_new_i64();
2657    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2658    t2 = tcg_temp_new_i64();
2659    tcg_gen_extu_i32_i64(t2, cc_op);
2660    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2661    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2662    tcg_temp_free_i64(t1);
2663    tcg_temp_free_i64(t2);
2664    return DISAS_NEXT;
2665}
2666
2667#ifndef CONFIG_USER_ONLY
2668static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2669{
2670    TCGv_i32 m4;
2671
2672    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2673        m4 = tcg_const_i32(get_field(s->fields, m4));
2674    } else {
2675        m4 = tcg_const_i32(0);
2676    }
2677    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2678    tcg_temp_free_i32(m4);
2679    return DISAS_NEXT;
2680}
2681
2682static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2683{
2684    TCGv_i32 m4;
2685
2686    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2687        m4 = tcg_const_i32(get_field(s->fields, m4));
2688    } else {
2689        m4 = tcg_const_i32(0);
2690    }
2691    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2692    tcg_temp_free_i32(m4);
2693    return DISAS_NEXT;
2694}
2695
2696static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2697{
2698    gen_helper_iske(o->out, cpu_env, o->in2);
2699    return DISAS_NEXT;
2700}
2701#endif
2702
2703static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2704{
2705    int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2706    int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2707    int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2708    TCGv_i32 t_r1, t_r2, t_r3, type;
2709
2710    switch (s->insn->data) {
2711    case S390_FEAT_TYPE_KMCTR:
2712        if (r3 & 1 || !r3) {
2713            gen_program_exception(s, PGM_SPECIFICATION);
2714            return DISAS_NORETURN;
2715        }
2716        /* FALL THROUGH */
2717    case S390_FEAT_TYPE_PPNO:
2718    case S390_FEAT_TYPE_KMF:
2719    case S390_FEAT_TYPE_KMC:
2720    case S390_FEAT_TYPE_KMO:
2721    case S390_FEAT_TYPE_KM:
2722        if (r1 & 1 || !r1) {
2723            gen_program_exception(s, PGM_SPECIFICATION);
2724            return DISAS_NORETURN;
2725        }
2726        /* FALL THROUGH */
2727    case S390_FEAT_TYPE_KMAC:
2728    case S390_FEAT_TYPE_KIMD:
2729    case S390_FEAT_TYPE_KLMD:
2730        if (r2 & 1 || !r2) {
2731            gen_program_exception(s, PGM_SPECIFICATION);
2732            return DISAS_NORETURN;
2733        }
2734        /* FALL THROUGH */
2735    case S390_FEAT_TYPE_PCKMO:
2736    case S390_FEAT_TYPE_PCC:
2737        break;
2738    default:
2739        g_assert_not_reached();
2740    };
2741
2742    t_r1 = tcg_const_i32(r1);
2743    t_r2 = tcg_const_i32(r2);
2744    t_r3 = tcg_const_i32(r3);
2745    type = tcg_const_i32(s->insn->data);
2746    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2747    set_cc_static(s);
2748    tcg_temp_free_i32(t_r1);
2749    tcg_temp_free_i32(t_r2);
2750    tcg_temp_free_i32(t_r3);
2751    tcg_temp_free_i32(type);
2752    return DISAS_NEXT;
2753}
2754
2755static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2756{
2757    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2758    set_cc_static(s);
2759    return DISAS_NEXT;
2760}
2761
2762static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2763{
2764    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2765    set_cc_static(s);
2766    return DISAS_NEXT;
2767}
2768
2769static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2770{
2771    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2772    set_cc_static(s);
2773    return DISAS_NEXT;
2774}
2775
2776static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2777{
2778    /* The real output is indeed the original value in memory;
2779       recompute the addition for the computation of CC.  */
2780    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2781                                 s->insn->data | MO_ALIGN);
2782    /* However, we need to recompute the addition for setting CC.  */
2783    tcg_gen_add_i64(o->out, o->in1, o->in2);
2784    return DISAS_NEXT;
2785}
2786
2787static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2788{
2789    /* The real output is indeed the original value in memory;
2790       recompute the addition for the computation of CC.  */
2791    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2792                                 s->insn->data | MO_ALIGN);
2793    /* However, we need to recompute the operation for setting CC.  */
2794    tcg_gen_and_i64(o->out, o->in1, o->in2);
2795    return DISAS_NEXT;
2796}
2797
2798static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2799{
2800    /* The real output is indeed the original value in memory;
2801       recompute the addition for the computation of CC.  */
2802    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2803                                s->insn->data | MO_ALIGN);
2804    /* However, we need to recompute the operation for setting CC.  */
2805    tcg_gen_or_i64(o->out, o->in1, o->in2);
2806    return DISAS_NEXT;
2807}
2808
2809static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2810{
2811    /* The real output is indeed the original value in memory;
2812       recompute the addition for the computation of CC.  */
2813    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2814                                 s->insn->data | MO_ALIGN);
2815    /* However, we need to recompute the operation for setting CC.  */
2816    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2817    return DISAS_NEXT;
2818}
2819
2820static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2821{
2822    gen_helper_ldeb(o->out, cpu_env, o->in2);
2823    return DISAS_NEXT;
2824}
2825
2826static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2827{
2828    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2829
2830    if (!m34) {
2831        return DISAS_NORETURN;
2832    }
2833    gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2834    tcg_temp_free_i32(m34);
2835    return DISAS_NEXT;
2836}
2837
2838static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2839{
2840    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2841
2842    if (!m34) {
2843        return DISAS_NORETURN;
2844    }
2845    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2846    tcg_temp_free_i32(m34);
2847    return DISAS_NEXT;
2848}
2849
2850static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2851{
2852    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2853
2854    if (!m34) {
2855        return DISAS_NORETURN;
2856    }
2857    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2858    tcg_temp_free_i32(m34);
2859    return DISAS_NEXT;
2860}
2861
2862static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2863{
2864    gen_helper_lxdb(o->out, cpu_env, o->in2);
2865    return_low128(o->out2);
2866    return DISAS_NEXT;
2867}
2868
2869static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2870{
2871    gen_helper_lxeb(o->out, cpu_env, o->in2);
2872    return_low128(o->out2);
2873    return DISAS_NEXT;
2874}
2875
2876static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2877{
2878    tcg_gen_shli_i64(o->out, o->in2, 32);
2879    return DISAS_NEXT;
2880}
2881
2882static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2883{
2884    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2885    return DISAS_NEXT;
2886}
2887
2888static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2889{
2890    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2891    return DISAS_NEXT;
2892}
2893
2894static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2895{
2896    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2897    return DISAS_NEXT;
2898}
2899
2900static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2901{
2902    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2903    return DISAS_NEXT;
2904}
2905
2906static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2907{
2908    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2909    return DISAS_NEXT;
2910}
2911
2912static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2913{
2914    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2915    return DISAS_NEXT;
2916}
2917
2918static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2919{
2920    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2921    return DISAS_NEXT;
2922}
2923
2924static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2925{
2926    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2927    return DISAS_NEXT;
2928}
2929
2930static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2931{
2932    TCGLabel *lab = gen_new_label();
2933    store_reg32_i64(get_field(s->fields, r1), o->in2);
2934    /* The value is stored even in case of trap. */
2935    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2936    gen_trap(s);
2937    gen_set_label(lab);
2938    return DISAS_NEXT;
2939}
2940
2941static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2942{
2943    TCGLabel *lab = gen_new_label();
2944    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2945    /* The value is stored even in case of trap. */
2946    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2947    gen_trap(s);
2948    gen_set_label(lab);
2949    return DISAS_NEXT;
2950}
2951
2952static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2953{
2954    TCGLabel *lab = gen_new_label();
2955    store_reg32h_i64(get_field(s->fields, r1), o->in2);
2956    /* The value is stored even in case of trap. */
2957    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2958    gen_trap(s);
2959    gen_set_label(lab);
2960    return DISAS_NEXT;
2961}
2962
2963static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2964{
2965    TCGLabel *lab = gen_new_label();
2966    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2967    /* The value is stored even in case of trap. */
2968    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2969    gen_trap(s);
2970    gen_set_label(lab);
2971    return DISAS_NEXT;
2972}
2973
2974static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2975{
2976    TCGLabel *lab = gen_new_label();
2977    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2978    /* The value is stored even in case of trap. */
2979    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2980    gen_trap(s);
2981    gen_set_label(lab);
2982    return DISAS_NEXT;
2983}
2984
2985static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2986{
2987    DisasCompare c;
2988
2989    disas_jcc(s, &c, get_field(s->fields, m3));
2990
2991    if (c.is_64) {
2992        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2993                            o->in2, o->in1);
2994        free_compare(&c);
2995    } else {
2996        TCGv_i32 t32 = tcg_temp_new_i32();
2997        TCGv_i64 t, z;
2998
2999        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3000        free_compare(&c);
3001
3002        t = tcg_temp_new_i64();
3003        tcg_gen_extu_i32_i64(t, t32);
3004        tcg_temp_free_i32(t32);
3005
3006        z = tcg_const_i64(0);
3007        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3008        tcg_temp_free_i64(t);
3009        tcg_temp_free_i64(z);
3010    }
3011
3012    return DISAS_NEXT;
3013}
3014
3015#ifndef CONFIG_USER_ONLY
3016static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3017{
3018    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3019    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3020    gen_helper_lctl(cpu_env, r1, o->in2, r3);
3021    tcg_temp_free_i32(r1);
3022    tcg_temp_free_i32(r3);
3023    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3024    return DISAS_PC_STALE_NOCHAIN;
3025}
3026
3027static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3028{
3029    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3030    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3031    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3032    tcg_temp_free_i32(r1);
3033    tcg_temp_free_i32(r3);
3034    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3035    return DISAS_PC_STALE_NOCHAIN;
3036}
3037
3038static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3039{
3040    gen_helper_lra(o->out, cpu_env, o->in2);
3041    set_cc_static(s);
3042    return DISAS_NEXT;
3043}
3044
3045static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3046{
3047    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3048    return DISAS_NEXT;
3049}
3050
3051static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3052{
3053    TCGv_i64 t1, t2;
3054
3055    per_breaking_event(s);
3056
3057    t1 = tcg_temp_new_i64();
3058    t2 = tcg_temp_new_i64();
3059    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3060                        MO_TEUL | MO_ALIGN_8);
3061    tcg_gen_addi_i64(o->in2, o->in2, 4);
3062    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3063    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3064    tcg_gen_shli_i64(t1, t1, 32);
3065    gen_helper_load_psw(cpu_env, t1, t2);
3066    tcg_temp_free_i64(t1);
3067    tcg_temp_free_i64(t2);
3068    return DISAS_NORETURN;
3069}
3070
3071static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3072{
3073    TCGv_i64 t1, t2;
3074
3075    per_breaking_event(s);
3076
3077    t1 = tcg_temp_new_i64();
3078    t2 = tcg_temp_new_i64();
3079    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3080                        MO_TEQ | MO_ALIGN_8);
3081    tcg_gen_addi_i64(o->in2, o->in2, 8);
3082    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3083    gen_helper_load_psw(cpu_env, t1, t2);
3084    tcg_temp_free_i64(t1);
3085    tcg_temp_free_i64(t2);
3086    return DISAS_NORETURN;
3087}
3088#endif
3089
3090static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3091{
3092    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3093    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3094    gen_helper_lam(cpu_env, r1, o->in2, r3);
3095    tcg_temp_free_i32(r1);
3096    tcg_temp_free_i32(r3);
3097    return DISAS_NEXT;
3098}
3099
3100static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3101{
3102    int r1 = get_field(s->fields, r1);
3103    int r3 = get_field(s->fields, r3);
3104    TCGv_i64 t1, t2;
3105
3106    /* Only one register to read. */
3107    t1 = tcg_temp_new_i64();
3108    if (unlikely(r1 == r3)) {
3109        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3110        store_reg32_i64(r1, t1);
3111        tcg_temp_free(t1);
3112        return DISAS_NEXT;
3113    }
3114
3115    /* First load the values of the first and last registers to trigger
3116       possible page faults. */
3117    t2 = tcg_temp_new_i64();
3118    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3119    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3120    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3121    store_reg32_i64(r1, t1);
3122    store_reg32_i64(r3, t2);
3123
3124    /* Only two registers to read. */
3125    if (((r1 + 1) & 15) == r3) {
3126        tcg_temp_free(t2);
3127        tcg_temp_free(t1);
3128        return DISAS_NEXT;
3129    }
3130
3131    /* Then load the remaining registers. Page fault can't occur. */
3132    r3 = (r3 - 1) & 15;
3133    tcg_gen_movi_i64(t2, 4);
3134    while (r1 != r3) {
3135        r1 = (r1 + 1) & 15;
3136        tcg_gen_add_i64(o->in2, o->in2, t2);
3137        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3138        store_reg32_i64(r1, t1);
3139    }
3140    tcg_temp_free(t2);
3141    tcg_temp_free(t1);
3142
3143    return DISAS_NEXT;
3144}
3145
3146static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3147{
3148    int r1 = get_field(s->fields, r1);
3149    int r3 = get_field(s->fields, r3);
3150    TCGv_i64 t1, t2;
3151
3152    /* Only one register to read. */
3153    t1 = tcg_temp_new_i64();
3154    if (unlikely(r1 == r3)) {
3155        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3156        store_reg32h_i64(r1, t1);
3157        tcg_temp_free(t1);
3158        return DISAS_NEXT;
3159    }
3160
3161    /* First load the values of the first and last registers to trigger
3162       possible page faults. */
3163    t2 = tcg_temp_new_i64();
3164    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3165    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3166    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3167    store_reg32h_i64(r1, t1);
3168    store_reg32h_i64(r3, t2);
3169
3170    /* Only two registers to read. */
3171    if (((r1 + 1) & 15) == r3) {
3172        tcg_temp_free(t2);
3173        tcg_temp_free(t1);
3174        return DISAS_NEXT;
3175    }
3176
3177    /* Then load the remaining registers. Page fault can't occur. */
3178    r3 = (r3 - 1) & 15;
3179    tcg_gen_movi_i64(t2, 4);
3180    while (r1 != r3) {
3181        r1 = (r1 + 1) & 15;
3182        tcg_gen_add_i64(o->in2, o->in2, t2);
3183        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3184        store_reg32h_i64(r1, t1);
3185    }
3186    tcg_temp_free(t2);
3187    tcg_temp_free(t1);
3188
3189    return DISAS_NEXT;
3190}
3191
3192static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3193{
3194    int r1 = get_field(s->fields, r1);
3195    int r3 = get_field(s->fields, r3);
3196    TCGv_i64 t1, t2;
3197
3198    /* Only one register to read. */
3199    if (unlikely(r1 == r3)) {
3200        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3201        return DISAS_NEXT;
3202    }
3203
3204    /* First load the values of the first and last registers to trigger
3205       possible page faults. */
3206    t1 = tcg_temp_new_i64();
3207    t2 = tcg_temp_new_i64();
3208    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3209    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3210    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3211    tcg_gen_mov_i64(regs[r1], t1);
3212    tcg_temp_free(t2);
3213
3214    /* Only two registers to read. */
3215    if (((r1 + 1) & 15) == r3) {
3216        tcg_temp_free(t1);
3217        return DISAS_NEXT;
3218    }
3219
3220    /* Then load the remaining registers. Page fault can't occur. */
3221    r3 = (r3 - 1) & 15;
3222    tcg_gen_movi_i64(t1, 8);
3223    while (r1 != r3) {
3224        r1 = (r1 + 1) & 15;
3225        tcg_gen_add_i64(o->in2, o->in2, t1);
3226        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3227    }
3228    tcg_temp_free(t1);
3229
3230    return DISAS_NEXT;
3231}
3232
3233static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3234{
3235    TCGv_i64 a1, a2;
3236    TCGMemOp mop = s->insn->data;
3237
3238    /* In a parallel context, stop the world and single step.  */
3239    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3240        update_psw_addr(s);
3241        update_cc_op(s);
3242        gen_exception(EXCP_ATOMIC);
3243        return DISAS_NORETURN;
3244    }
3245
3246    /* In a serial context, perform the two loads ... */
3247    a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3248    a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3249    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3250    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3251    tcg_temp_free_i64(a1);
3252    tcg_temp_free_i64(a2);
3253
3254    /* ... and indicate that we performed them while interlocked.  */
3255    gen_op_movi_cc(s, 0);
3256    return DISAS_NEXT;
3257}
3258
3259static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3260{
3261    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3262        gen_helper_lpq(o->out, cpu_env, o->in2);
3263    } else if (HAVE_ATOMIC128) {
3264        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3265    } else {
3266        gen_helper_exit_atomic(cpu_env);
3267        return DISAS_NORETURN;
3268    }
3269    return_low128(o->out2);
3270    return DISAS_NEXT;
3271}
3272
3273#ifndef CONFIG_USER_ONLY
3274static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3275{
3276    gen_helper_lura(o->out, cpu_env, o->in2);
3277    return DISAS_NEXT;
3278}
3279
3280static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3281{
3282    gen_helper_lurag(o->out, cpu_env, o->in2);
3283    return DISAS_NEXT;
3284}
3285#endif
3286
3287static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3288{
3289    tcg_gen_andi_i64(o->out, o->in2, -256);
3290    return DISAS_NEXT;
3291}
3292
3293static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3294{
3295    const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3296
3297    if (get_field(s->fields, m3) > 6) {
3298        gen_program_exception(s, PGM_SPECIFICATION);
3299        return DISAS_NORETURN;
3300    }
3301
3302    tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3303    tcg_gen_neg_i64(o->addr1, o->addr1);
3304    tcg_gen_movi_i64(o->out, 16);
3305    tcg_gen_umin_i64(o->out, o->out, o->addr1);
3306    gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3307    return DISAS_NEXT;
3308}
3309
3310static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3311{
3312    o->out = o->in2;
3313    o->g_out = o->g_in2;
3314    o->in2 = NULL;
3315    o->g_in2 = false;
3316    return DISAS_NEXT;
3317}
3318
3319static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3320{
3321    int b2 = get_field(s->fields, b2);
3322    TCGv ar1 = tcg_temp_new_i64();
3323
3324    o->out = o->in2;
3325    o->g_out = o->g_in2;
3326    o->in2 = NULL;
3327    o->g_in2 = false;
3328
3329    switch (s->base.tb->flags & FLAG_MASK_ASC) {
3330    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3331        tcg_gen_movi_i64(ar1, 0);
3332        break;
3333    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3334        tcg_gen_movi_i64(ar1, 1);
3335        break;
3336    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3337        if (b2) {
3338            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3339        } else {
3340            tcg_gen_movi_i64(ar1, 0);
3341        }
3342        break;
3343    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3344        tcg_gen_movi_i64(ar1, 2);
3345        break;
3346    }
3347
3348    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3349    tcg_temp_free_i64(ar1);
3350
3351    return DISAS_NEXT;
3352}
3353
3354static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3355{
3356    o->out = o->in1;
3357    o->out2 = o->in2;
3358    o->g_out = o->g_in1;
3359    o->g_out2 = o->g_in2;
3360    o->in1 = NULL;
3361    o->in2 = NULL;
3362    o->g_in1 = o->g_in2 = false;
3363    return DISAS_NEXT;
3364}
3365
3366static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3367{
3368    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3369    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3370    tcg_temp_free_i32(l);
3371    return DISAS_NEXT;
3372}
3373
3374static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3375{
3376    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3377    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3378    tcg_temp_free_i32(l);
3379    return DISAS_NEXT;
3380}
3381
3382static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3383{
3384    int r1 = get_field(s->fields, r1);
3385    int r2 = get_field(s->fields, r2);
3386    TCGv_i32 t1, t2;
3387
3388    /* r1 and r2 must be even.  */
3389    if (r1 & 1 || r2 & 1) {
3390        gen_program_exception(s, PGM_SPECIFICATION);
3391        return DISAS_NORETURN;
3392    }
3393
3394    t1 = tcg_const_i32(r1);
3395    t2 = tcg_const_i32(r2);
3396    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3397    tcg_temp_free_i32(t1);
3398    tcg_temp_free_i32(t2);
3399    set_cc_static(s);
3400    return DISAS_NEXT;
3401}
3402
3403static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3404{
3405    int r1 = get_field(s->fields, r1);
3406    int r3 = get_field(s->fields, r3);
3407    TCGv_i32 t1, t3;
3408
3409    /* r1 and r3 must be even.  */
3410    if (r1 & 1 || r3 & 1) {
3411        gen_program_exception(s, PGM_SPECIFICATION);
3412        return DISAS_NORETURN;
3413    }
3414
3415    t1 = tcg_const_i32(r1);
3416    t3 = tcg_const_i32(r3);
3417    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3418    tcg_temp_free_i32(t1);
3419    tcg_temp_free_i32(t3);
3420    set_cc_static(s);
3421    return DISAS_NEXT;
3422}
3423
3424static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3425{
3426    int r1 = get_field(s->fields, r1);
3427    int r3 = get_field(s->fields, r3);
3428    TCGv_i32 t1, t3;
3429
3430    /* r1 and r3 must be even.  */
3431    if (r1 & 1 || r3 & 1) {
3432        gen_program_exception(s, PGM_SPECIFICATION);
3433        return DISAS_NORETURN;
3434    }
3435
3436    t1 = tcg_const_i32(r1);
3437    t3 = tcg_const_i32(r3);
3438    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3439    tcg_temp_free_i32(t1);
3440    tcg_temp_free_i32(t3);
3441    set_cc_static(s);
3442    return DISAS_NEXT;
3443}
3444
3445static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3446{
3447    int r3 = get_field(s->fields, r3);
3448    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3449    set_cc_static(s);
3450    return DISAS_NEXT;
3451}
3452
3453#ifndef CONFIG_USER_ONLY
3454static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3455{
3456    int r1 = get_field(s->fields, l1);
3457    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3458    set_cc_static(s);
3459    return DISAS_NEXT;
3460}
3461
3462static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3463{
3464    int r1 = get_field(s->fields, l1);
3465    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3466    set_cc_static(s);
3467    return DISAS_NEXT;
3468}
3469#endif
3470
3471static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3472{
3473    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3474    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3475    tcg_temp_free_i32(l);
3476    return DISAS_NEXT;
3477}
3478
3479static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3480{
3481    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3482    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3483    tcg_temp_free_i32(l);
3484    return DISAS_NEXT;
3485}
3486
3487static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3488{
3489    gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3490    set_cc_static(s);
3491    return DISAS_NEXT;
3492}
3493
3494static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3495{
3496    gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3497    set_cc_static(s);
3498    return_low128(o->in2);
3499    return DISAS_NEXT;
3500}
3501
3502static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3503{
3504    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3505    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3506    tcg_temp_free_i32(l);
3507    return DISAS_NEXT;
3508}
3509
3510static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3511{
3512    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3513    return DISAS_NEXT;
3514}
3515
3516static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3517{
3518    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3519    return DISAS_NEXT;
3520}
3521
3522static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3523{
3524    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3525    return DISAS_NEXT;
3526}
3527
3528static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3529{
3530    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3531    return DISAS_NEXT;
3532}
3533
3534static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3535{
3536    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3537    return DISAS_NEXT;
3538}
3539
3540static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3541{
3542    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3543    return_low128(o->out2);
3544    return DISAS_NEXT;
3545}
3546
3547static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3548{
3549    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3550    return_low128(o->out2);
3551    return DISAS_NEXT;
3552}
3553
3554static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3555{
3556    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3557    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3558    tcg_temp_free_i64(r3);
3559    return DISAS_NEXT;
3560}
3561
3562static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3563{
3564    TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3565    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3566    tcg_temp_free_i64(r3);
3567    return DISAS_NEXT;
3568}
3569
3570static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3571{
3572    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3573    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3574    tcg_temp_free_i64(r3);
3575    return DISAS_NEXT;
3576}
3577
3578static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3579{
3580    TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3581    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3582    tcg_temp_free_i64(r3);
3583    return DISAS_NEXT;
3584}
3585
3586static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3587{
3588    TCGv_i64 z, n;
3589    z = tcg_const_i64(0);
3590    n = tcg_temp_new_i64();
3591    tcg_gen_neg_i64(n, o->in2);
3592    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3593    tcg_temp_free_i64(n);
3594    tcg_temp_free_i64(z);
3595    return DISAS_NEXT;
3596}
3597
3598static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3599{
3600    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3601    return DISAS_NEXT;
3602}
3603
3604static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3605{
3606    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3607    return DISAS_NEXT;
3608}
3609
3610static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3611{
3612    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3613    tcg_gen_mov_i64(o->out2, o->in2);
3614    return DISAS_NEXT;
3615}
3616
3617static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3618{
3619    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3620    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3621    tcg_temp_free_i32(l);
3622    set_cc_static(s);
3623    return DISAS_NEXT;
3624}
3625
3626static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3627{
3628    tcg_gen_neg_i64(o->out, o->in2);
3629    return DISAS_NEXT;
3630}
3631
3632static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3633{
3634    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3635    return DISAS_NEXT;
3636}
3637
3638static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3639{
3640    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3641    return DISAS_NEXT;
3642}
3643
3644static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3645{
3646    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3647    tcg_gen_mov_i64(o->out2, o->in2);
3648    return DISAS_NEXT;
3649}
3650
3651static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3652{
3653    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3654    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3655    tcg_temp_free_i32(l);
3656    set_cc_static(s);
3657    return DISAS_NEXT;
3658}
3659
3660static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3661{
3662    tcg_gen_or_i64(o->out, o->in1, o->in2);
3663    return DISAS_NEXT;
3664}
3665
3666static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3667{
3668    int shift = s->insn->data & 0xff;
3669    int size = s->insn->data >> 8;
3670    uint64_t mask = ((1ull << size) - 1) << shift;
3671
3672    assert(!o->g_in2);
3673    tcg_gen_shli_i64(o->in2, o->in2, shift);
3674    tcg_gen_or_i64(o->out, o->in1, o->in2);
3675
3676    /* Produce the CC from only the bits manipulated.  */
3677    tcg_gen_andi_i64(cc_dst, o->out, mask);
3678    set_cc_nz_u64(s, cc_dst);
3679    return DISAS_NEXT;
3680}
3681
3682static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3683{
3684    o->in1 = tcg_temp_new_i64();
3685
3686    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3687        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3688    } else {
3689        /* Perform the atomic operation in memory. */
3690        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3691                                    s->insn->data);
3692    }
3693
3694    /* Recompute also for atomic case: needed for setting CC. */
3695    tcg_gen_or_i64(o->out, o->in1, o->in2);
3696
3697    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3698        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3699    }
3700    return DISAS_NEXT;
3701}
3702
3703static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3704{
3705    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3706    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3707    tcg_temp_free_i32(l);
3708    return DISAS_NEXT;
3709}
3710
3711static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3712{
3713    int l2 = get_field(s->fields, l2) + 1;
3714    TCGv_i32 l;
3715
3716    /* The length must not exceed 32 bytes.  */
3717    if (l2 > 32) {
3718        gen_program_exception(s, PGM_SPECIFICATION);
3719        return DISAS_NORETURN;
3720    }
3721    l = tcg_const_i32(l2);
3722    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3723    tcg_temp_free_i32(l);
3724    return DISAS_NEXT;
3725}
3726
3727static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3728{
3729    int l2 = get_field(s->fields, l2) + 1;
3730    TCGv_i32 l;
3731
3732    /* The length must be even and should not exceed 64 bytes.  */
3733    if ((l2 & 1) || (l2 > 64)) {
3734        gen_program_exception(s, PGM_SPECIFICATION);
3735        return DISAS_NORETURN;
3736    }
3737    l = tcg_const_i32(l2);
3738    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3739    tcg_temp_free_i32(l);
3740    return DISAS_NEXT;
3741}
3742
3743static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3744{
3745    gen_helper_popcnt(o->out, o->in2);
3746    return DISAS_NEXT;
3747}
3748
3749#ifndef CONFIG_USER_ONLY
3750static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3751{
3752    gen_helper_ptlb(cpu_env);
3753    return DISAS_NEXT;
3754}
3755#endif
3756
3757static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3758{
3759    int i3 = get_field(s->fields, i3);
3760    int i4 = get_field(s->fields, i4);
3761    int i5 = get_field(s->fields, i5);
3762    int do_zero = i4 & 0x80;
3763    uint64_t mask, imask, pmask;
3764    int pos, len, rot;
3765
3766    /* Adjust the arguments for the specific insn.  */
3767    switch (s->fields->op2) {
3768    case 0x55: /* risbg */
3769    case 0x59: /* risbgn */
3770        i3 &= 63;
3771        i4 &= 63;
3772        pmask = ~0;
3773        break;
3774    case 0x5d: /* risbhg */
3775        i3 &= 31;
3776        i4 &= 31;
3777        pmask = 0xffffffff00000000ull;
3778        break;
3779    case 0x51: /* risblg */
3780        i3 &= 31;
3781        i4 &= 31;
3782        pmask = 0x00000000ffffffffull;
3783        break;
3784    default:
3785        g_assert_not_reached();
3786    }
3787
3788    /* MASK is the set of bits to be inserted from R2.
3789       Take care for I3/I4 wraparound.  */
3790    mask = pmask >> i3;
3791    if (i3 <= i4) {
3792        mask ^= pmask >> i4 >> 1;
3793    } else {
3794        mask |= ~(pmask >> i4 >> 1);
3795    }
3796    mask &= pmask;
3797
3798    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3799       insns, we need to keep the other half of the register.  */
3800    imask = ~mask | ~pmask;
3801    if (do_zero) {
3802        imask = ~pmask;
3803    }
3804
3805    len = i4 - i3 + 1;
3806    pos = 63 - i4;
3807    rot = i5 & 63;
3808    if (s->fields->op2 == 0x5d) {
3809        pos += 32;
3810    }
3811
3812    /* In some cases we can implement this with extract.  */
3813    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3814        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3815        return DISAS_NEXT;
3816    }
3817
3818    /* In some cases we can implement this with deposit.  */
3819    if (len > 0 && (imask == 0 || ~mask == imask)) {
3820        /* Note that we rotate the bits to be inserted to the lsb, not to
3821           the position as described in the PoO.  */
3822        rot = (rot - pos) & 63;
3823    } else {
3824        pos = -1;
3825    }
3826
3827    /* Rotate the input as necessary.  */
3828    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3829
3830    /* Insert the selected bits into the output.  */
3831    if (pos >= 0) {
3832        if (imask == 0) {
3833            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3834        } else {
3835            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3836        }
3837    } else if (imask == 0) {
3838        tcg_gen_andi_i64(o->out, o->in2, mask);
3839    } else {
3840        tcg_gen_andi_i64(o->in2, o->in2, mask);
3841        tcg_gen_andi_i64(o->out, o->out, imask);
3842        tcg_gen_or_i64(o->out, o->out, o->in2);
3843    }
3844    return DISAS_NEXT;
3845}
3846
3847static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3848{
3849    int i3 = get_field(s->fields, i3);
3850    int i4 = get_field(s->fields, i4);
3851    int i5 = get_field(s->fields, i5);
3852    uint64_t mask;
3853
3854    /* If this is a test-only form, arrange to discard the result.  */
3855    if (i3 & 0x80) {
3856        o->out = tcg_temp_new_i64();
3857        o->g_out = false;
3858    }
3859
3860    i3 &= 63;
3861    i4 &= 63;
3862    i5 &= 63;
3863
3864    /* MASK is the set of bits to be operated on from R2.
3865       Take care for I3/I4 wraparound.  */
3866    mask = ~0ull >> i3;
3867    if (i3 <= i4) {
3868        mask ^= ~0ull >> i4 >> 1;
3869    } else {
3870        mask |= ~(~0ull >> i4 >> 1);
3871    }
3872
3873    /* Rotate the input as necessary.  */
3874    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3875
3876    /* Operate.  */
3877    switch (s->fields->op2) {
3878    case 0x55: /* AND */
3879        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3880        tcg_gen_and_i64(o->out, o->out, o->in2);
3881        break;
3882    case 0x56: /* OR */
3883        tcg_gen_andi_i64(o->in2, o->in2, mask);
3884        tcg_gen_or_i64(o->out, o->out, o->in2);
3885        break;
3886    case 0x57: /* XOR */
3887        tcg_gen_andi_i64(o->in2, o->in2, mask);
3888        tcg_gen_xor_i64(o->out, o->out, o->in2);
3889        break;
3890    default:
3891        abort();
3892    }
3893
3894    /* Set the CC.  */
3895    tcg_gen_andi_i64(cc_dst, o->out, mask);
3896    set_cc_nz_u64(s, cc_dst);
3897    return DISAS_NEXT;
3898}
3899
3900static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3901{
3902    tcg_gen_bswap16_i64(o->out, o->in2);
3903    return DISAS_NEXT;
3904}
3905
3906static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3907{
3908    tcg_gen_bswap32_i64(o->out, o->in2);
3909    return DISAS_NEXT;
3910}
3911
3912static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3913{
3914    tcg_gen_bswap64_i64(o->out, o->in2);
3915    return DISAS_NEXT;
3916}
3917
3918static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3919{
3920    TCGv_i32 t1 = tcg_temp_new_i32();
3921    TCGv_i32 t2 = tcg_temp_new_i32();
3922    TCGv_i32 to = tcg_temp_new_i32();
3923    tcg_gen_extrl_i64_i32(t1, o->in1);
3924    tcg_gen_extrl_i64_i32(t2, o->in2);
3925    tcg_gen_rotl_i32(to, t1, t2);
3926    tcg_gen_extu_i32_i64(o->out, to);
3927    tcg_temp_free_i32(t1);
3928    tcg_temp_free_i32(t2);
3929    tcg_temp_free_i32(to);
3930    return DISAS_NEXT;
3931}
3932
3933static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3934{
3935    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3936    return DISAS_NEXT;
3937}
3938
3939#ifndef CONFIG_USER_ONLY
3940static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3941{
3942    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3943    set_cc_static(s);
3944    return DISAS_NEXT;
3945}
3946
3947static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3948{
3949    gen_helper_sacf(cpu_env, o->in2);
3950    /* Addressing mode has changed, so end the block.  */
3951    return DISAS_PC_STALE;
3952}
3953#endif
3954
3955static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3956{
3957    int sam = s->insn->data;
3958    TCGv_i64 tsam;
3959    uint64_t mask;
3960
3961    switch (sam) {
3962    case 0:
3963        mask = 0xffffff;
3964        break;
3965    case 1:
3966        mask = 0x7fffffff;
3967        break;
3968    default:
3969        mask = -1;
3970        break;
3971    }
3972
3973    /* Bizarre but true, we check the address of the current insn for the
3974       specification exception, not the next to be executed.  Thus the PoO
3975       documents that Bad Things Happen two bytes before the end.  */
3976    if (s->base.pc_next & ~mask) {
3977        gen_program_exception(s, PGM_SPECIFICATION);
3978        return DISAS_NORETURN;
3979    }
3980    s->pc_tmp &= mask;
3981
3982    tsam = tcg_const_i64(sam);
3983    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3984    tcg_temp_free_i64(tsam);
3985
3986    /* Always exit the TB, since we (may have) changed execution mode.  */
3987    return DISAS_PC_STALE;
3988}
3989
3990static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3991{
3992    int r1 = get_field(s->fields, r1);
3993    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3994    return DISAS_NEXT;
3995}
3996
3997static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3998{
3999    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4000    return DISAS_NEXT;
4001}
4002
4003static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4004{
4005    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4006    return DISAS_NEXT;
4007}
4008
4009static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4010{
4011    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4012    return_low128(o->out2);
4013    return DISAS_NEXT;
4014}
4015
4016static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4017{
4018    gen_helper_sqeb(o->out, cpu_env, o->in2);
4019    return DISAS_NEXT;
4020}
4021
4022static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4023{
4024    gen_helper_sqdb(o->out, cpu_env, o->in2);
4025    return DISAS_NEXT;
4026}
4027
4028static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4029{
4030    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4031    return_low128(o->out2);
4032    return DISAS_NEXT;
4033}
4034
4035#ifndef CONFIG_USER_ONLY
4036static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4037{
4038    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4039    set_cc_static(s);
4040    return DISAS_NEXT;
4041}
4042
4043static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4044{
4045    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4046    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4047    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4048    set_cc_static(s);
4049    tcg_temp_free_i32(r1);
4050    tcg_temp_free_i32(r3);
4051    return DISAS_NEXT;
4052}
4053#endif
4054
4055static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4056{
4057    DisasCompare c;
4058    TCGv_i64 a, h;
4059    TCGLabel *lab;
4060    int r1;
4061
4062    disas_jcc(s, &c, get_field(s->fields, m3));
4063
4064    /* We want to store when the condition is fulfilled, so branch
4065       out when it's not */
4066    c.cond = tcg_invert_cond(c.cond);
4067
4068    lab = gen_new_label();
4069    if (c.is_64) {
4070        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4071    } else {
4072        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4073    }
4074    free_compare(&c);
4075
4076    r1 = get_field(s->fields, r1);
4077    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4078    switch (s->insn->data) {
4079    case 1: /* STOCG */
4080        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4081        break;
4082    case 0: /* STOC */
4083        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4084        break;
4085    case 2: /* STOCFH */
4086        h = tcg_temp_new_i64();
4087        tcg_gen_shri_i64(h, regs[r1], 32);
4088        tcg_gen_qemu_st32(h, a, get_mem_index(s));
4089        tcg_temp_free_i64(h);
4090        break;
4091    default:
4092        g_assert_not_reached();
4093    }
4094    tcg_temp_free_i64(a);
4095
4096    gen_set_label(lab);
4097    return DISAS_NEXT;
4098}
4099
4100static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4101{
4102    uint64_t sign = 1ull << s->insn->data;
4103    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4104    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4105    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4106    /* The arithmetic left shift is curious in that it does not affect
4107       the sign bit.  Copy that over from the source unchanged.  */
4108    tcg_gen_andi_i64(o->out, o->out, ~sign);
4109    tcg_gen_andi_i64(o->in1, o->in1, sign);
4110    tcg_gen_or_i64(o->out, o->out, o->in1);
4111    return DISAS_NEXT;
4112}
4113
4114static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4115{
4116    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4117    return DISAS_NEXT;
4118}
4119
4120static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4121{
4122    tcg_gen_sar_i64(o->out, o->in1, o->in2);
4123    return DISAS_NEXT;
4124}
4125
4126static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4127{
4128    tcg_gen_shr_i64(o->out, o->in1, o->in2);
4129    return DISAS_NEXT;
4130}
4131
4132static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4133{
4134    gen_helper_sfpc(cpu_env, o->in2);
4135    return DISAS_NEXT;
4136}
4137
4138static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4139{
4140    gen_helper_sfas(cpu_env, o->in2);
4141    return DISAS_NEXT;
4142}
4143
4144static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4145{
4146    /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4147    tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4148    gen_helper_srnm(cpu_env, o->addr1);
4149    return DISAS_NEXT;
4150}
4151
4152static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4153{
4154    /* Bits 0-55 are are ignored. */
4155    tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4156    gen_helper_srnm(cpu_env, o->addr1);
4157    return DISAS_NEXT;
4158}
4159
4160static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4161{
4162    TCGv_i64 tmp = tcg_temp_new_i64();
4163
4164    /* Bits other than 61-63 are ignored. */
4165    tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4166
4167    /* No need to call a helper, we don't implement dfp */
4168    tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4169    tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4170    tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4171
4172    tcg_temp_free_i64(tmp);
4173    return DISAS_NEXT;
4174}
4175
4176static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4177{
4178    tcg_gen_extrl_i64_i32(cc_op, o->in1);
4179    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4180    set_cc_static(s);
4181
4182    tcg_gen_shri_i64(o->in1, o->in1, 24);
4183    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4184    return DISAS_NEXT;
4185}
4186
4187static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4188{
4189    int b1 = get_field(s->fields, b1);
4190    int d1 = get_field(s->fields, d1);
4191    int b2 = get_field(s->fields, b2);
4192    int d2 = get_field(s->fields, d2);
4193    int r3 = get_field(s->fields, r3);
4194    TCGv_i64 tmp = tcg_temp_new_i64();
4195
4196    /* fetch all operands first */
4197    o->in1 = tcg_temp_new_i64();
4198    tcg_gen_addi_i64(o->in1, regs[b1], d1);
4199    o->in2 = tcg_temp_new_i64();
4200    tcg_gen_addi_i64(o->in2, regs[b2], d2);
4201    o->addr1 = get_address(s, 0, r3, 0);
4202
4203    /* load the third operand into r3 before modifying anything */
4204    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4205
4206    /* subtract CPU timer from first operand and store in GR0 */
4207    gen_helper_stpt(tmp, cpu_env);
4208    tcg_gen_sub_i64(regs[0], o->in1, tmp);
4209
4210    /* store second operand in GR1 */
4211    tcg_gen_mov_i64(regs[1], o->in2);
4212
4213    tcg_temp_free_i64(tmp);
4214    return DISAS_NEXT;
4215}
4216
4217#ifndef CONFIG_USER_ONLY
4218static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4219{
4220    tcg_gen_shri_i64(o->in2, o->in2, 4);
4221    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4222    return DISAS_NEXT;
4223}
4224
4225static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4226{
4227    gen_helper_sske(cpu_env, o->in1, o->in2);
4228    return DISAS_NEXT;
4229}
4230
4231static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4232{
4233    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4234    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4235    return DISAS_PC_STALE_NOCHAIN;
4236}
4237
4238static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4239{
4240    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4241    return DISAS_NEXT;
4242}
4243#endif
4244
4245static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4246{
4247    gen_helper_stck(o->out, cpu_env);
4248    /* ??? We don't implement clock states.  */
4249    gen_op_movi_cc(s, 0);
4250    return DISAS_NEXT;
4251}
4252
4253static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4254{
4255    TCGv_i64 c1 = tcg_temp_new_i64();
4256    TCGv_i64 c2 = tcg_temp_new_i64();
4257    TCGv_i64 todpr = tcg_temp_new_i64();
4258    gen_helper_stck(c1, cpu_env);
4259    /* 16 bit value store in an uint32_t (only valid bits set) */
4260    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4261    /* Shift the 64-bit value into its place as a zero-extended
4262       104-bit value.  Note that "bit positions 64-103 are always
4263       non-zero so that they compare differently to STCK"; we set
4264       the least significant bit to 1.  */
4265    tcg_gen_shli_i64(c2, c1, 56);
4266    tcg_gen_shri_i64(c1, c1, 8);
4267    tcg_gen_ori_i64(c2, c2, 0x10000);
4268    tcg_gen_or_i64(c2, c2, todpr);
4269    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4270    tcg_gen_addi_i64(o->in2, o->in2, 8);
4271    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4272    tcg_temp_free_i64(c1);
4273    tcg_temp_free_i64(c2);
4274    tcg_temp_free_i64(todpr);
4275    /* ??? We don't implement clock states.  */
4276    gen_op_movi_cc(s, 0);
4277    return DISAS_NEXT;
4278}
4279
4280#ifndef CONFIG_USER_ONLY
4281static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4282{
4283    tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4284    gen_helper_sck(cc_op, cpu_env, o->in1);
4285    set_cc_static(s);
4286    return DISAS_NEXT;
4287}
4288
4289static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4290{
4291    gen_helper_sckc(cpu_env, o->in2);
4292    return DISAS_NEXT;
4293}
4294
4295static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4296{
4297    gen_helper_sckpf(cpu_env, regs[0]);
4298    return DISAS_NEXT;
4299}
4300
4301static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4302{
4303    gen_helper_stckc(o->out, cpu_env);
4304    return DISAS_NEXT;
4305}
4306
4307static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4308{
4309    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4310    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4311    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4312    tcg_temp_free_i32(r1);
4313    tcg_temp_free_i32(r3);
4314    return DISAS_NEXT;
4315}
4316
4317static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4318{
4319    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4320    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4321    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4322    tcg_temp_free_i32(r1);
4323    tcg_temp_free_i32(r3);
4324    return DISAS_NEXT;
4325}
4326
4327static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4328{
4329    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4330    return DISAS_NEXT;
4331}
4332
4333static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4334{
4335    gen_helper_spt(cpu_env, o->in2);
4336    return DISAS_NEXT;
4337}
4338
4339static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4340{
4341    gen_helper_stfl(cpu_env);
4342    return DISAS_NEXT;
4343}
4344
4345static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4346{
4347    gen_helper_stpt(o->out, cpu_env);
4348    return DISAS_NEXT;
4349}
4350
4351static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4352{
4353    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4354    set_cc_static(s);
4355    return DISAS_NEXT;
4356}
4357
4358static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4359{
4360    gen_helper_spx(cpu_env, o->in2);
4361    return DISAS_NEXT;
4362}
4363
4364static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4365{
4366    gen_helper_xsch(cpu_env, regs[1]);
4367    set_cc_static(s);
4368    return DISAS_NEXT;
4369}
4370
4371static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4372{
4373    gen_helper_csch(cpu_env, regs[1]);
4374    set_cc_static(s);
4375    return DISAS_NEXT;
4376}
4377
4378static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4379{
4380    gen_helper_hsch(cpu_env, regs[1]);
4381    set_cc_static(s);
4382    return DISAS_NEXT;
4383}
4384
4385static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4386{
4387    gen_helper_msch(cpu_env, regs[1], o->in2);
4388    set_cc_static(s);
4389    return DISAS_NEXT;
4390}
4391
4392static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4393{
4394    gen_helper_rchp(cpu_env, regs[1]);
4395    set_cc_static(s);
4396    return DISAS_NEXT;
4397}
4398
4399static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4400{
4401    gen_helper_rsch(cpu_env, regs[1]);
4402    set_cc_static(s);
4403    return DISAS_NEXT;
4404}
4405
4406static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4407{
4408    gen_helper_sal(cpu_env, regs[1]);
4409    return DISAS_NEXT;
4410}
4411
4412static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4413{
4414    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4415    return DISAS_NEXT;
4416}
4417
4418static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4419{
4420    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4421    gen_op_movi_cc(s, 3);
4422    return DISAS_NEXT;
4423}
4424
4425static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4426{
4427    /* The instruction is suppressed if not provided. */
4428    return DISAS_NEXT;
4429}
4430
4431static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4432{
4433    gen_helper_ssch(cpu_env, regs[1], o->in2);
4434    set_cc_static(s);
4435    return DISAS_NEXT;
4436}
4437
4438static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4439{
4440    gen_helper_stsch(cpu_env, regs[1], o->in2);
4441    set_cc_static(s);
4442    return DISAS_NEXT;
4443}
4444
4445static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4446{
4447    gen_helper_stcrw(cpu_env, o->in2);
4448    set_cc_static(s);
4449    return DISAS_NEXT;
4450}
4451
4452static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4453{
4454    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4455    set_cc_static(s);
4456    return DISAS_NEXT;
4457}
4458
4459static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4460{
4461    gen_helper_tsch(cpu_env, regs[1], o->in2);
4462    set_cc_static(s);
4463    return DISAS_NEXT;
4464}
4465
4466static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4467{
4468    gen_helper_chsc(cpu_env, o->in2);
4469    set_cc_static(s);
4470    return DISAS_NEXT;
4471}
4472
4473static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4474{
4475    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4476    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4477    return DISAS_NEXT;
4478}
4479
4480static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4481{
4482    uint64_t i2 = get_field(s->fields, i2);
4483    TCGv_i64 t;
4484
4485    /* It is important to do what the instruction name says: STORE THEN.
4486       If we let the output hook perform the store then if we fault and
4487       restart, we'll have the wrong SYSTEM MASK in place.  */
4488    t = tcg_temp_new_i64();
4489    tcg_gen_shri_i64(t, psw_mask, 56);
4490    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4491    tcg_temp_free_i64(t);
4492
4493    if (s->fields->op == 0xac) {
4494        tcg_gen_andi_i64(psw_mask, psw_mask,
4495                         (i2 << 56) | 0x00ffffffffffffffull);
4496    } else {
4497        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4498    }
4499
4500    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4501    return DISAS_PC_STALE_NOCHAIN;
4502}
4503
4504static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4505{
4506    gen_helper_stura(cpu_env, o->in2, o->in1);
4507    return DISAS_NEXT;
4508}
4509
4510static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4511{
4512    gen_helper_sturg(cpu_env, o->in2, o->in1);
4513    return DISAS_NEXT;
4514}
4515#endif
4516
4517static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4518{
4519    gen_helper_stfle(cc_op, cpu_env, o->in2);
4520    set_cc_static(s);
4521    return DISAS_NEXT;
4522}
4523
4524static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4525{
4526    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4527    return DISAS_NEXT;
4528}
4529
4530static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4531{
4532    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4533    return DISAS_NEXT;
4534}
4535
4536static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4537{
4538    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4539    return DISAS_NEXT;
4540}
4541
4542static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4543{
4544    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4545    return DISAS_NEXT;
4546}
4547
4548static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4549{
4550    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4551    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4552    gen_helper_stam(cpu_env, r1, o->in2, r3);
4553    tcg_temp_free_i32(r1);
4554    tcg_temp_free_i32(r3);
4555    return DISAS_NEXT;
4556}
4557
4558static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4559{
4560    int m3 = get_field(s->fields, m3);
4561    int pos, base = s->insn->data;
4562    TCGv_i64 tmp = tcg_temp_new_i64();
4563
4564    pos = base + ctz32(m3) * 8;
4565    switch (m3) {
4566    case 0xf:
4567        /* Effectively a 32-bit store.  */
4568        tcg_gen_shri_i64(tmp, o->in1, pos);
4569        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4570        break;
4571
4572    case 0xc:
4573    case 0x6:
4574    case 0x3:
4575        /* Effectively a 16-bit store.  */
4576        tcg_gen_shri_i64(tmp, o->in1, pos);
4577        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4578        break;
4579
4580    case 0x8:
4581    case 0x4:
4582    case 0x2:
4583    case 0x1:
4584        /* Effectively an 8-bit store.  */
4585        tcg_gen_shri_i64(tmp, o->in1, pos);
4586        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4587        break;
4588
4589    default:
4590        /* This is going to be a sequence of shifts and stores.  */
4591        pos = base + 32 - 8;
4592        while (m3) {
4593            if (m3 & 0x8) {
4594                tcg_gen_shri_i64(tmp, o->in1, pos);
4595                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4596                tcg_gen_addi_i64(o->in2, o->in2, 1);
4597            }
4598            m3 = (m3 << 1) & 0xf;
4599            pos -= 8;
4600        }
4601        break;
4602    }
4603    tcg_temp_free_i64(tmp);
4604    return DISAS_NEXT;
4605}
4606
4607static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4608{
4609    int r1 = get_field(s->fields, r1);
4610    int r3 = get_field(s->fields, r3);
4611    int size = s->insn->data;
4612    TCGv_i64 tsize = tcg_const_i64(size);
4613
4614    while (1) {
4615        if (size == 8) {
4616            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4617        } else {
4618            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4619        }
4620        if (r1 == r3) {
4621            break;
4622        }
4623        tcg_gen_add_i64(o->in2, o->in2, tsize);
4624        r1 = (r1 + 1) & 15;
4625    }
4626
4627    tcg_temp_free_i64(tsize);
4628    return DISAS_NEXT;
4629}
4630
4631static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4632{
4633    int r1 = get_field(s->fields, r1);
4634    int r3 = get_field(s->fields, r3);
4635    TCGv_i64 t = tcg_temp_new_i64();
4636    TCGv_i64 t4 = tcg_const_i64(4);
4637    TCGv_i64 t32 = tcg_const_i64(32);
4638
4639    while (1) {
4640        tcg_gen_shl_i64(t, regs[r1], t32);
4641        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4642        if (r1 == r3) {
4643            break;
4644        }
4645        tcg_gen_add_i64(o->in2, o->in2, t4);
4646        r1 = (r1 + 1) & 15;
4647    }
4648
4649    tcg_temp_free_i64(t);
4650    tcg_temp_free_i64(t4);
4651    tcg_temp_free_i64(t32);
4652    return DISAS_NEXT;
4653}
4654
4655static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4656{
4657    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4658        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4659    } else if (HAVE_ATOMIC128) {
4660        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4661    } else {
4662        gen_helper_exit_atomic(cpu_env);
4663        return DISAS_NORETURN;
4664    }
4665    return DISAS_NEXT;
4666}
4667
4668static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4669{
4670    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4671    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4672
4673    gen_helper_srst(cpu_env, r1, r2);
4674
4675    tcg_temp_free_i32(r1);
4676    tcg_temp_free_i32(r2);
4677    set_cc_static(s);
4678    return DISAS_NEXT;
4679}
4680
4681static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4682{
4683    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4684    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4685
4686    gen_helper_srstu(cpu_env, r1, r2);
4687
4688    tcg_temp_free_i32(r1);
4689    tcg_temp_free_i32(r2);
4690    set_cc_static(s);
4691    return DISAS_NEXT;
4692}
4693
4694static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4695{
4696    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4697    return DISAS_NEXT;
4698}
4699
4700static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4701{
4702    DisasCompare cmp;
4703    TCGv_i64 borrow;
4704
4705    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4706
4707    /* The !borrow flag is the msb of CC.  Since we want the inverse of
4708       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
4709    disas_jcc(s, &cmp, 8 | 4);
4710    borrow = tcg_temp_new_i64();
4711    if (cmp.is_64) {
4712        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4713    } else {
4714        TCGv_i32 t = tcg_temp_new_i32();
4715        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4716        tcg_gen_extu_i32_i64(borrow, t);
4717        tcg_temp_free_i32(t);
4718    }
4719    free_compare(&cmp);
4720
4721    tcg_gen_sub_i64(o->out, o->out, borrow);
4722    tcg_temp_free_i64(borrow);
4723    return DISAS_NEXT;
4724}
4725
4726static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4727{
4728    TCGv_i32 t;
4729
4730    update_psw_addr(s);
4731    update_cc_op(s);
4732
4733    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4734    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4735    tcg_temp_free_i32(t);
4736
4737    t = tcg_const_i32(s->ilen);
4738    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4739    tcg_temp_free_i32(t);
4740
4741    gen_exception(EXCP_SVC);
4742    return DISAS_NORETURN;
4743}
4744
4745static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4746{
4747    int cc = 0;
4748
4749    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4750    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4751    gen_op_movi_cc(s, cc);
4752    return DISAS_NEXT;
4753}
4754
4755static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4756{
4757    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4758    set_cc_static(s);
4759    return DISAS_NEXT;
4760}
4761
4762static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4763{
4764    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4765    set_cc_static(s);
4766    return DISAS_NEXT;
4767}
4768
4769static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4770{
4771    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4772    set_cc_static(s);
4773    return DISAS_NEXT;
4774}
4775
4776#ifndef CONFIG_USER_ONLY
4777
4778static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4779{
4780    gen_helper_testblock(cc_op, cpu_env, o->in2);
4781    set_cc_static(s);
4782    return DISAS_NEXT;
4783}
4784
4785static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4786{
4787    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4788    set_cc_static(s);
4789    return DISAS_NEXT;
4790}
4791
4792#endif
4793
4794static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4795{
4796    TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4797    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4798    tcg_temp_free_i32(l1);
4799    set_cc_static(s);
4800    return DISAS_NEXT;
4801}
4802
4803static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4804{
4805    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4806    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4807    tcg_temp_free_i32(l);
4808    set_cc_static(s);
4809    return DISAS_NEXT;
4810}
4811
4812static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4813{
4814    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4815    return_low128(o->out2);
4816    set_cc_static(s);
4817    return DISAS_NEXT;
4818}
4819
4820static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4821{
4822    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4823    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4824    tcg_temp_free_i32(l);
4825    set_cc_static(s);
4826    return DISAS_NEXT;
4827}
4828
4829static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4830{
4831    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4832    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4833    tcg_temp_free_i32(l);
4834    set_cc_static(s);
4835    return DISAS_NEXT;
4836}
4837
4838static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4839{
4840    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4841    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4842    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4843    TCGv_i32 tst = tcg_temp_new_i32();
4844    int m3 = get_field(s->fields, m3);
4845
4846    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4847        m3 = 0;
4848    }
4849    if (m3 & 1) {
4850        tcg_gen_movi_i32(tst, -1);
4851    } else {
4852        tcg_gen_extrl_i64_i32(tst, regs[0]);
4853        if (s->insn->opc & 3) {
4854            tcg_gen_ext8u_i32(tst, tst);
4855        } else {
4856            tcg_gen_ext16u_i32(tst, tst);
4857        }
4858    }
4859    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4860
4861    tcg_temp_free_i32(r1);
4862    tcg_temp_free_i32(r2);
4863    tcg_temp_free_i32(sizes);
4864    tcg_temp_free_i32(tst);
4865    set_cc_static(s);
4866    return DISAS_NEXT;
4867}
4868
4869static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4870{
4871    TCGv_i32 t1 = tcg_const_i32(0xff);
4872    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4873    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4874    tcg_temp_free_i32(t1);
4875    set_cc_static(s);
4876    return DISAS_NEXT;
4877}
4878
4879static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4880{
4881    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4882    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4883    tcg_temp_free_i32(l);
4884    return DISAS_NEXT;
4885}
4886
4887static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4888{
4889    int l1 = get_field(s->fields, l1) + 1;
4890    TCGv_i32 l;
4891
4892    /* The length must not exceed 32 bytes.  */
4893    if (l1 > 32) {
4894        gen_program_exception(s, PGM_SPECIFICATION);
4895        return DISAS_NORETURN;
4896    }
4897    l = tcg_const_i32(l1);
4898    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4899    tcg_temp_free_i32(l);
4900    set_cc_static(s);
4901    return DISAS_NEXT;
4902}
4903
4904static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4905{
4906    int l1 = get_field(s->fields, l1) + 1;
4907    TCGv_i32 l;
4908
4909    /* The length must be even and should not exceed 64 bytes.  */
4910    if ((l1 & 1) || (l1 > 64)) {
4911        gen_program_exception(s, PGM_SPECIFICATION);
4912        return DISAS_NORETURN;
4913    }
4914    l = tcg_const_i32(l1);
4915    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4916    tcg_temp_free_i32(l);
4917    set_cc_static(s);
4918    return DISAS_NEXT;
4919}
4920
4921
4922static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4923{
4924    int d1 = get_field(s->fields, d1);
4925    int d2 = get_field(s->fields, d2);
4926    int b1 = get_field(s->fields, b1);
4927    int b2 = get_field(s->fields, b2);
4928    int l = get_field(s->fields, l1);
4929    TCGv_i32 t32;
4930
4931    o->addr1 = get_address(s, 0, b1, d1);
4932
4933    /* If the addresses are identical, this is a store/memset of zero.  */
4934    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4935        o->in2 = tcg_const_i64(0);
4936
4937        l++;
4938        while (l >= 8) {
4939            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4940            l -= 8;
4941            if (l > 0) {
4942                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4943            }
4944        }
4945        if (l >= 4) {
4946            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4947            l -= 4;
4948            if (l > 0) {
4949                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4950            }
4951        }
4952        if (l >= 2) {
4953            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4954            l -= 2;
4955            if (l > 0) {
4956                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4957            }
4958        }
4959        if (l) {
4960            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4961        }
4962        gen_op_movi_cc(s, 0);
4963        return DISAS_NEXT;
4964    }
4965
4966    /* But in general we'll defer to a helper.  */
4967    o->in2 = get_address(s, 0, b2, d2);
4968    t32 = tcg_const_i32(l);
4969    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4970    tcg_temp_free_i32(t32);
4971    set_cc_static(s);
4972    return DISAS_NEXT;
4973}
4974
4975static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4976{
4977    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4978    return DISAS_NEXT;
4979}
4980
4981static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4982{
4983    int shift = s->insn->data & 0xff;
4984    int size = s->insn->data >> 8;
4985    uint64_t mask = ((1ull << size) - 1) << shift;
4986
4987    assert(!o->g_in2);
4988    tcg_gen_shli_i64(o->in2, o->in2, shift);
4989    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4990
4991    /* Produce the CC from only the bits manipulated.  */
4992    tcg_gen_andi_i64(cc_dst, o->out, mask);
4993    set_cc_nz_u64(s, cc_dst);
4994    return DISAS_NEXT;
4995}
4996
4997static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4998{
4999    o->in1 = tcg_temp_new_i64();
5000
5001    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5002        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5003    } else {
5004        /* Perform the atomic operation in memory. */
5005        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5006                                     s->insn->data);
5007    }
5008
5009    /* Recompute also for atomic case: needed for setting CC. */
5010    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5011
5012    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5013        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5014    }
5015    return DISAS_NEXT;
5016}
5017
5018static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5019{
5020    o->out = tcg_const_i64(0);
5021    return DISAS_NEXT;
5022}
5023
5024static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5025{
5026    o->out = tcg_const_i64(0);
5027    o->out2 = o->out;
5028    o->g_out2 = true;
5029    return DISAS_NEXT;
5030}
5031
5032#ifndef CONFIG_USER_ONLY
5033static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5034{
5035    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5036
5037    gen_helper_clp(cpu_env, r2);
5038    tcg_temp_free_i32(r2);
5039    set_cc_static(s);
5040    return DISAS_NEXT;
5041}
5042
5043static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5044{
5045    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5046    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5047
5048    gen_helper_pcilg(cpu_env, r1, r2);
5049    tcg_temp_free_i32(r1);
5050    tcg_temp_free_i32(r2);
5051    set_cc_static(s);
5052    return DISAS_NEXT;
5053}
5054
5055static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5056{
5057    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5058    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5059
5060    gen_helper_pcistg(cpu_env, r1, r2);
5061    tcg_temp_free_i32(r1);
5062    tcg_temp_free_i32(r2);
5063    set_cc_static(s);
5064    return DISAS_NEXT;
5065}
5066
5067static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5068{
5069    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5070    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5071
5072    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5073    tcg_temp_free_i32(ar);
5074    tcg_temp_free_i32(r1);
5075    set_cc_static(s);
5076    return DISAS_NEXT;
5077}
5078
5079static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5080{
5081    gen_helper_sic(cpu_env, o->in1, o->in2);
5082    return DISAS_NEXT;
5083}
5084
5085static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5086{
5087    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5088    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5089
5090    gen_helper_rpcit(cpu_env, r1, r2);
5091    tcg_temp_free_i32(r1);
5092    tcg_temp_free_i32(r2);
5093    set_cc_static(s);
5094    return DISAS_NEXT;
5095}
5096
5097static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5098{
5099    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5100    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5101    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5102
5103    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5104    tcg_temp_free_i32(ar);
5105    tcg_temp_free_i32(r1);
5106    tcg_temp_free_i32(r3);
5107    set_cc_static(s);
5108    return DISAS_NEXT;
5109}
5110
5111static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5112{
5113    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5114    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5115
5116    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5117    tcg_temp_free_i32(ar);
5118    tcg_temp_free_i32(r1);
5119    set_cc_static(s);
5120    return DISAS_NEXT;
5121}
5122#endif
5123
5124#include "translate_vx.inc.c"
5125
5126/* ====================================================================== */
5127/* The "Cc OUTput" generators.  Given the generated output (and in some cases
5128   the original inputs), update the various cc data structures in order to
5129   be able to compute the new condition code.  */
5130
5131static void cout_abs32(DisasContext *s, DisasOps *o)
5132{
5133    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5134}
5135
5136static void cout_abs64(DisasContext *s, DisasOps *o)
5137{
5138    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5139}
5140
5141static void cout_adds32(DisasContext *s, DisasOps *o)
5142{
5143    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5144}
5145
5146static void cout_adds64(DisasContext *s, DisasOps *o)
5147{
5148    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5149}
5150
5151static void cout_addu32(DisasContext *s, DisasOps *o)
5152{
5153    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5154}
5155
5156static void cout_addu64(DisasContext *s, DisasOps *o)
5157{
5158    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5159}
5160
5161static void cout_addc32(DisasContext *s, DisasOps *o)
5162{
5163    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5164}
5165
5166static void cout_addc64(DisasContext *s, DisasOps *o)
5167{
5168    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5169}
5170
5171static void cout_cmps32(DisasContext *s, DisasOps *o)
5172{
5173    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5174}
5175
5176static void cout_cmps64(DisasContext *s, DisasOps *o)
5177{
5178    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5179}
5180
5181static void cout_cmpu32(DisasContext *s, DisasOps *o)
5182{
5183    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5184}
5185
5186static void cout_cmpu64(DisasContext *s, DisasOps *o)
5187{
5188    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5189}
5190
5191static void cout_f32(DisasContext *s, DisasOps *o)
5192{
5193    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5194}
5195
5196static void cout_f64(DisasContext *s, DisasOps *o)
5197{
5198    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5199}
5200
5201static void cout_f128(DisasContext *s, DisasOps *o)
5202{
5203    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5204}
5205
5206static void cout_nabs32(DisasContext *s, DisasOps *o)
5207{
5208    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5209}
5210
5211static void cout_nabs64(DisasContext *s, DisasOps *o)
5212{
5213    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5214}
5215
5216static void cout_neg32(DisasContext *s, DisasOps *o)
5217{
5218    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5219}
5220
5221static void cout_neg64(DisasContext *s, DisasOps *o)
5222{
5223    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5224}
5225
5226static void cout_nz32(DisasContext *s, DisasOps *o)
5227{
5228    tcg_gen_ext32u_i64(cc_dst, o->out);
5229    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5230}
5231
5232static void cout_nz64(DisasContext *s, DisasOps *o)
5233{
5234    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5235}
5236
5237static void cout_s32(DisasContext *s, DisasOps *o)
5238{
5239    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5240}
5241
5242static void cout_s64(DisasContext *s, DisasOps *o)
5243{
5244    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5245}
5246
5247static void cout_subs32(DisasContext *s, DisasOps *o)
5248{
5249    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5250}
5251
5252static void cout_subs64(DisasContext *s, DisasOps *o)
5253{
5254    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5255}
5256
5257static void cout_subu32(DisasContext *s, DisasOps *o)
5258{
5259    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5260}
5261
5262static void cout_subu64(DisasContext *s, DisasOps *o)
5263{
5264    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5265}
5266
5267static void cout_subb32(DisasContext *s, DisasOps *o)
5268{
5269    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5270}
5271
5272static void cout_subb64(DisasContext *s, DisasOps *o)
5273{
5274    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5275}
5276
5277static void cout_tm32(DisasContext *s, DisasOps *o)
5278{
5279    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5280}
5281
5282static void cout_tm64(DisasContext *s, DisasOps *o)
5283{
5284    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5285}
5286
5287/* ====================================================================== */
5288/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5289   with the TCG register to which we will write.  Used in combination with
5290   the "wout" generators, in some cases we need a new temporary, and in
5291   some cases we can write to a TCG global.  */
5292
5293static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5294{
5295    o->out = tcg_temp_new_i64();
5296}
5297#define SPEC_prep_new 0
5298
5299static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5300{
5301    o->out = tcg_temp_new_i64();
5302    o->out2 = tcg_temp_new_i64();
5303}
5304#define SPEC_prep_new_P 0
5305
5306static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5307{
5308    o->out = regs[get_field(f, r1)];
5309    o->g_out = true;
5310}
5311#define SPEC_prep_r1 0
5312
5313static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5314{
5315    int r1 = get_field(f, r1);
5316    o->out = regs[r1];
5317    o->out2 = regs[r1 + 1];
5318    o->g_out = o->g_out2 = true;
5319}
5320#define SPEC_prep_r1_P SPEC_r1_even
5321
5322/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5323static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5324{
5325    o->out = load_freg(get_field(f, r1));
5326    o->out2 = load_freg(get_field(f, r1) + 2);
5327}
5328#define SPEC_prep_x1 SPEC_r1_f128
5329
5330/* ====================================================================== */
5331/* The "Write OUTput" generators.  These generally perform some non-trivial
5332   copy of data to TCG globals, or to main memory.  The trivial cases are
5333   generally handled by having a "prep" generator install the TCG global
5334   as the destination of the operation.  */
5335
5336static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5337{
5338    store_reg(get_field(f, r1), o->out);
5339}
5340#define SPEC_wout_r1 0
5341
5342static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5343{
5344    int r1 = get_field(f, r1);
5345    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5346}
5347#define SPEC_wout_r1_8 0
5348
5349static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5350{
5351    int r1 = get_field(f, r1);
5352    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5353}
5354#define SPEC_wout_r1_16 0
5355
5356static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5357{
5358    store_reg32_i64(get_field(f, r1), o->out);
5359}
5360#define SPEC_wout_r1_32 0
5361
5362static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5363{
5364    store_reg32h_i64(get_field(f, r1), o->out);
5365}
5366#define SPEC_wout_r1_32h 0
5367
5368static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5369{
5370    int r1 = get_field(f, r1);
5371    store_reg32_i64(r1, o->out);
5372    store_reg32_i64(r1 + 1, o->out2);
5373}
5374#define SPEC_wout_r1_P32 SPEC_r1_even
5375
5376static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5377{
5378    int r1 = get_field(f, r1);
5379    store_reg32_i64(r1 + 1, o->out);
5380    tcg_gen_shri_i64(o->out, o->out, 32);
5381    store_reg32_i64(r1, o->out);
5382}
5383#define SPEC_wout_r1_D32 SPEC_r1_even
5384
5385static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5386{
5387    int r3 = get_field(f, r3);
5388    store_reg32_i64(r3, o->out);
5389    store_reg32_i64(r3 + 1, o->out2);
5390}
5391#define SPEC_wout_r3_P32 SPEC_r3_even
5392
5393static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5394{
5395    int r3 = get_field(f, r3);
5396    store_reg(r3, o->out);
5397    store_reg(r3 + 1, o->out2);
5398}
5399#define SPEC_wout_r3_P64 SPEC_r3_even
5400
5401static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5402{
5403    store_freg32_i64(get_field(f, r1), o->out);
5404}
5405#define SPEC_wout_e1 0
5406
5407static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5408{
5409    store_freg(get_field(f, r1), o->out);
5410}
5411#define SPEC_wout_f1 0
5412
5413static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5414{
5415    int f1 = get_field(s->fields, r1);
5416    store_freg(f1, o->out);
5417    store_freg(f1 + 2, o->out2);
5418}
5419#define SPEC_wout_x1 SPEC_r1_f128
5420
5421static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5422{
5423    if (get_field(f, r1) != get_field(f, r2)) {
5424        store_reg32_i64(get_field(f, r1), o->out);
5425    }
5426}
5427#define SPEC_wout_cond_r1r2_32 0
5428
5429static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5430{
5431    if (get_field(f, r1) != get_field(f, r2)) {
5432        store_freg32_i64(get_field(f, r1), o->out);
5433    }
5434}
5435#define SPEC_wout_cond_e1e2 0
5436
5437static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5438{
5439    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5440}
5441#define SPEC_wout_m1_8 0
5442
5443static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5444{
5445    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5446}
5447#define SPEC_wout_m1_16 0
5448
5449#ifndef CONFIG_USER_ONLY
5450static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5451{
5452    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5453}
5454#define SPEC_wout_m1_16a 0
5455#endif
5456
5457static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5458{
5459    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5460}
5461#define SPEC_wout_m1_32 0
5462
5463#ifndef CONFIG_USER_ONLY
5464static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5465{
5466    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5467}
5468#define SPEC_wout_m1_32a 0
5469#endif
5470
5471static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5472{
5473    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5474}
5475#define SPEC_wout_m1_64 0
5476
5477#ifndef CONFIG_USER_ONLY
5478static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5479{
5480    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5481}
5482#define SPEC_wout_m1_64a 0
5483#endif
5484
5485static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5486{
5487    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5488}
5489#define SPEC_wout_m2_32 0
5490
5491static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5492{
5493    store_reg(get_field(f, r1), o->in2);
5494}
5495#define SPEC_wout_in2_r1 0
5496
5497static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5498{
5499    store_reg32_i64(get_field(f, r1), o->in2);
5500}
5501#define SPEC_wout_in2_r1_32 0
5502
5503/* ====================================================================== */
5504/* The "INput 1" generators.  These load the first operand to an insn.  */
5505
5506static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5507{
5508    o->in1 = load_reg(get_field(f, r1));
5509}
5510#define SPEC_in1_r1 0
5511
5512static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5513{
5514    o->in1 = regs[get_field(f, r1)];
5515    o->g_in1 = true;
5516}
5517#define SPEC_in1_r1_o 0
5518
5519static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5520{
5521    o->in1 = tcg_temp_new_i64();
5522    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5523}
5524#define SPEC_in1_r1_32s 0
5525
5526static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5527{
5528    o->in1 = tcg_temp_new_i64();
5529    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5530}
5531#define SPEC_in1_r1_32u 0
5532
5533static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5534{
5535    o->in1 = tcg_temp_new_i64();
5536    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5537}
5538#define SPEC_in1_r1_sr32 0
5539
5540static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5541{
5542    o->in1 = load_reg(get_field(f, r1) + 1);
5543}
5544#define SPEC_in1_r1p1 SPEC_r1_even
5545
5546static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5547{
5548    o->in1 = tcg_temp_new_i64();
5549    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5550}
5551#define SPEC_in1_r1p1_32s SPEC_r1_even
5552
5553static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5554{
5555    o->in1 = tcg_temp_new_i64();
5556    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5557}
5558#define SPEC_in1_r1p1_32u SPEC_r1_even
5559
5560static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5561{
5562    int r1 = get_field(f, r1);
5563    o->in1 = tcg_temp_new_i64();
5564    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5565}
5566#define SPEC_in1_r1_D32 SPEC_r1_even
5567
5568static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5569{
5570    o->in1 = load_reg(get_field(f, r2));
5571}
5572#define SPEC_in1_r2 0
5573
5574static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5575{
5576    o->in1 = tcg_temp_new_i64();
5577    tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5578}
5579#define SPEC_in1_r2_sr32 0
5580
5581static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5582{
5583    o->in1 = load_reg(get_field(f, r3));
5584}
5585#define SPEC_in1_r3 0
5586
5587static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5588{
5589    o->in1 = regs[get_field(f, r3)];
5590    o->g_in1 = true;
5591}
5592#define SPEC_in1_r3_o 0
5593
5594static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5595{
5596    o->in1 = tcg_temp_new_i64();
5597    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5598}
5599#define SPEC_in1_r3_32s 0
5600
5601static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5602{
5603    o->in1 = tcg_temp_new_i64();
5604    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5605}
5606#define SPEC_in1_r3_32u 0
5607
5608static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5609{
5610    int r3 = get_field(f, r3);
5611    o->in1 = tcg_temp_new_i64();
5612    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5613}
5614#define SPEC_in1_r3_D32 SPEC_r3_even
5615
5616static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5617{
5618    o->in1 = load_freg32_i64(get_field(f, r1));
5619}
5620#define SPEC_in1_e1 0
5621
5622static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5623{
5624    o->in1 = load_freg(get_field(f, r1));
5625}
5626#define SPEC_in1_f1 0
5627
5628/* Load the high double word of an extended (128-bit) format FP number */
5629static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o)
5630{
5631    o->in1 = load_freg(get_field(f, r2));
5632}
5633#define SPEC_in1_x2h SPEC_r2_f128
5634
5635static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o)
5636{
5637    o->in1 = load_freg(get_field(f, r3));
5638}
5639#define SPEC_in1_f3 0
5640
5641static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5642{
5643    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5644}
5645#define SPEC_in1_la1 0
5646
5647static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5648{
5649    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5650    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5651}
5652#define SPEC_in1_la2 0
5653
5654static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5655{
5656    in1_la1(s, f, o);
5657    o->in1 = tcg_temp_new_i64();
5658    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5659}
5660#define SPEC_in1_m1_8u 0
5661
5662static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5663{
5664    in1_la1(s, f, o);
5665    o->in1 = tcg_temp_new_i64();
5666    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5667}
5668#define SPEC_in1_m1_16s 0
5669
5670static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5671{
5672    in1_la1(s, f, o);
5673    o->in1 = tcg_temp_new_i64();
5674    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5675}
5676#define SPEC_in1_m1_16u 0
5677
5678static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5679{
5680    in1_la1(s, f, o);
5681    o->in1 = tcg_temp_new_i64();
5682    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5683}
5684#define SPEC_in1_m1_32s 0
5685
5686static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5687{
5688    in1_la1(s, f, o);
5689    o->in1 = tcg_temp_new_i64();
5690    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5691}
5692#define SPEC_in1_m1_32u 0
5693
5694static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5695{
5696    in1_la1(s, f, o);
5697    o->in1 = tcg_temp_new_i64();
5698    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5699}
5700#define SPEC_in1_m1_64 0
5701
5702/* ====================================================================== */
5703/* The "INput 2" generators.  These load the second operand to an insn.  */
5704
5705static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5706{
5707    o->in2 = regs[get_field(f, r1)];
5708    o->g_in2 = true;
5709}
5710#define SPEC_in2_r1_o 0
5711
5712static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5713{
5714    o->in2 = tcg_temp_new_i64();
5715    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5716}
5717#define SPEC_in2_r1_16u 0
5718
5719static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5720{
5721    o->in2 = tcg_temp_new_i64();
5722    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5723}
5724#define SPEC_in2_r1_32u 0
5725
5726static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5727{
5728    int r1 = get_field(f, r1);
5729    o->in2 = tcg_temp_new_i64();
5730    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5731}
5732#define SPEC_in2_r1_D32 SPEC_r1_even
5733
5734static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5735{
5736    o->in2 = load_reg(get_field(f, r2));
5737}
5738#define SPEC_in2_r2 0
5739
5740static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5741{
5742    o->in2 = regs[get_field(f, r2)];
5743    o->g_in2 = true;
5744}
5745#define SPEC_in2_r2_o 0
5746
5747static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5748{
5749    int r2 = get_field(f, r2);
5750    if (r2 != 0) {
5751        o->in2 = load_reg(r2);
5752    }
5753}
5754#define SPEC_in2_r2_nz 0
5755
5756static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5757{
5758    o->in2 = tcg_temp_new_i64();
5759    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5760}
5761#define SPEC_in2_r2_8s 0
5762
5763static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5764{
5765    o->in2 = tcg_temp_new_i64();
5766    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5767}
5768#define SPEC_in2_r2_8u 0
5769
5770static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5771{
5772    o->in2 = tcg_temp_new_i64();
5773    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5774}
5775#define SPEC_in2_r2_16s 0
5776
5777static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5778{
5779    o->in2 = tcg_temp_new_i64();
5780    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5781}
5782#define SPEC_in2_r2_16u 0
5783
5784static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5785{
5786    o->in2 = load_reg(get_field(f, r3));
5787}
5788#define SPEC_in2_r3 0
5789
5790static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5791{
5792    o->in2 = tcg_temp_new_i64();
5793    tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5794}
5795#define SPEC_in2_r3_sr32 0
5796
5797static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5798{
5799    o->in2 = tcg_temp_new_i64();
5800    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]);
5801}
5802#define SPEC_in2_r3_32u 0
5803
5804static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5805{
5806    o->in2 = tcg_temp_new_i64();
5807    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5808}
5809#define SPEC_in2_r2_32s 0
5810
5811static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5812{
5813    o->in2 = tcg_temp_new_i64();
5814    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5815}
5816#define SPEC_in2_r2_32u 0
5817
5818static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5819{
5820    o->in2 = tcg_temp_new_i64();
5821    tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5822}
5823#define SPEC_in2_r2_sr32 0
5824
5825static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5826{
5827    o->in2 = load_freg32_i64(get_field(f, r2));
5828}
5829#define SPEC_in2_e2 0
5830
5831static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o)
5832{
5833    o->in2 = load_freg(get_field(f, r2));
5834}
5835#define SPEC_in2_f2 0
5836
5837/* Load the low double word of an extended (128-bit) format FP number */
5838static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o)
5839{
5840    o->in2 = load_freg(get_field(f, r2) + 2);
5841}
5842#define SPEC_in2_x2l SPEC_r2_f128
5843
5844static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5845{
5846    o->in2 = get_address(s, 0, get_field(f, r2), 0);
5847}
5848#define SPEC_in2_ra2 0
5849
5850static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5851{
5852    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5853    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5854}
5855#define SPEC_in2_a2 0
5856
5857static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5858{
5859    o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2);
5860}
5861#define SPEC_in2_ri2 0
5862
5863static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5864{
5865    help_l2_shift(s, f, o, 31);
5866}
5867#define SPEC_in2_sh32 0
5868
5869static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5870{
5871    help_l2_shift(s, f, o, 63);
5872}
5873#define SPEC_in2_sh64 0
5874
5875static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5876{
5877    in2_a2(s, f, o);
5878    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5879}
5880#define SPEC_in2_m2_8u 0
5881
5882static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5883{
5884    in2_a2(s, f, o);
5885    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5886}
5887#define SPEC_in2_m2_16s 0
5888
5889static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5890{
5891    in2_a2(s, f, o);
5892    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5893}
5894#define SPEC_in2_m2_16u 0
5895
5896static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5897{
5898    in2_a2(s, f, o);
5899    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5900}
5901#define SPEC_in2_m2_32s 0
5902
5903static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5904{
5905    in2_a2(s, f, o);
5906    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5907}
5908#define SPEC_in2_m2_32u 0
5909
5910#ifndef CONFIG_USER_ONLY
5911static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5912{
5913    in2_a2(s, f, o);
5914    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5915}
5916#define SPEC_in2_m2_32ua 0
5917#endif
5918
5919static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5920{
5921    in2_a2(s, f, o);
5922    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5923}
5924#define SPEC_in2_m2_64 0
5925
5926#ifndef CONFIG_USER_ONLY
5927static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5928{
5929    in2_a2(s, f, o);
5930    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5931}
5932#define SPEC_in2_m2_64a 0
5933#endif
5934
5935static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5936{
5937    in2_ri2(s, f, o);
5938    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5939}
5940#define SPEC_in2_mri2_16u 0
5941
5942static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5943{
5944    in2_ri2(s, f, o);
5945    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5946}
5947#define SPEC_in2_mri2_32s 0
5948
5949static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5950{
5951    in2_ri2(s, f, o);
5952    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5953}
5954#define SPEC_in2_mri2_32u 0
5955
5956static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5957{
5958    in2_ri2(s, f, o);
5959    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5960}
5961#define SPEC_in2_mri2_64 0
5962
5963static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5964{
5965    o->in2 = tcg_const_i64(get_field(f, i2));
5966}
5967#define SPEC_in2_i2 0
5968
5969static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5970{
5971    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5972}
5973#define SPEC_in2_i2_8u 0
5974
5975static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5976{
5977    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5978}
5979#define SPEC_in2_i2_16u 0
5980
5981static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5982{
5983    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5984}
5985#define SPEC_in2_i2_32u 0
5986
5987static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5988{
5989    uint64_t i2 = (uint16_t)get_field(f, i2);
5990    o->in2 = tcg_const_i64(i2 << s->insn->data);
5991}
5992#define SPEC_in2_i2_16u_shl 0
5993
5994static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5995{
5996    uint64_t i2 = (uint32_t)get_field(f, i2);
5997    o->in2 = tcg_const_i64(i2 << s->insn->data);
5998}
5999#define SPEC_in2_i2_32u_shl 0
6000
6001#ifndef CONFIG_USER_ONLY
6002static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
6003{
6004    o->in2 = tcg_const_i64(s->fields->raw_insn);
6005}
6006#define SPEC_in2_insn 0
6007#endif
6008
6009/* ====================================================================== */
6010
6011/* Find opc within the table of insns.  This is formulated as a switch
6012   statement so that (1) we get compile-time notice of cut-paste errors
6013   for duplicated opcodes, and (2) the compiler generates the binary
6014   search tree, rather than us having to post-process the table.  */
6015
6016#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6017    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6018
6019#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6020    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6021
6022#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6023    E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6024
6025#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6026
6027enum DisasInsnEnum {
6028#include "insn-data.def"
6029};
6030
6031#undef E
6032#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6033    .opc = OPC,                                                             \
6034    .flags = FL,                                                            \
6035    .fmt = FMT_##FT,                                                        \
6036    .fac = FAC_##FC,                                                        \
6037    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6038    .name = #NM,                                                            \
6039    .help_in1 = in1_##I1,                                                   \
6040    .help_in2 = in2_##I2,                                                   \
6041    .help_prep = prep_##P,                                                  \
6042    .help_wout = wout_##W,                                                  \
6043    .help_cout = cout_##CC,                                                 \
6044    .help_op = op_##OP,                                                     \
6045    .data = D                                                               \
6046 },
6047
6048/* Allow 0 to be used for NULL in the table below.  */
6049#define in1_0  NULL
6050#define in2_0  NULL
6051#define prep_0  NULL
6052#define wout_0  NULL
6053#define cout_0  NULL
6054#define op_0  NULL
6055
6056#define SPEC_in1_0 0
6057#define SPEC_in2_0 0
6058#define SPEC_prep_0 0
6059#define SPEC_wout_0 0
6060
6061/* Give smaller names to the various facilities.  */
6062#define FAC_Z           S390_FEAT_ZARCH
6063#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6064#define FAC_DFP         S390_FEAT_DFP
6065#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
6066#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6067#define FAC_EE          S390_FEAT_EXECUTE_EXT
6068#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6069#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6070#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
6071#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
6072#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6073#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6074#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6075#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
6076#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6077#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6078#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6079#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6080#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6081#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6082#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6083#define FAC_SFLE        S390_FEAT_STFLE
6084#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6085#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6086#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6087#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6088#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6089#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6090#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6091#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6092#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6093#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6094#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6095#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6096#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6097#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6098#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6099#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6100#define FAC_V           S390_FEAT_VECTOR /* vector facility */
6101
6102static const DisasInsn insn_info[] = {
6103#include "insn-data.def"
6104};
6105
6106#undef E
6107#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6108    case OPC: return &insn_info[insn_ ## NM];
6109
6110static const DisasInsn *lookup_opc(uint16_t opc)
6111{
6112    switch (opc) {
6113#include "insn-data.def"
6114    default:
6115        return NULL;
6116    }
6117}
6118
6119#undef F
6120#undef E
6121#undef D
6122#undef C
6123
6124/* Extract a field from the insn.  The INSN should be left-aligned in
6125   the uint64_t so that we can more easily utilize the big-bit-endian
6126   definitions we extract from the Principals of Operation.  */
6127
6128static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6129{
6130    uint32_t r, m;
6131
6132    if (f->size == 0) {
6133        return;
6134    }
6135
6136    /* Zero extract the field from the insn.  */
6137    r = (insn << f->beg) >> (64 - f->size);
6138
6139    /* Sign-extend, or un-swap the field as necessary.  */
6140    switch (f->type) {
6141    case 0: /* unsigned */
6142        break;
6143    case 1: /* signed */
6144        assert(f->size <= 32);
6145        m = 1u << (f->size - 1);
6146        r = (r ^ m) - m;
6147        break;
6148    case 2: /* dl+dh split, signed 20 bit. */
6149        r = ((int8_t)r << 12) | (r >> 8);
6150        break;
6151    case 3: /* MSB stored in RXB */
6152        g_assert(f->size == 4);
6153        switch (f->beg) {
6154        case 8:
6155            r |= extract64(insn, 63 - 36, 1) << 4;
6156            break;
6157        case 12:
6158            r |= extract64(insn, 63 - 37, 1) << 4;
6159            break;
6160        case 16:
6161            r |= extract64(insn, 63 - 38, 1) << 4;
6162            break;
6163        case 32:
6164            r |= extract64(insn, 63 - 39, 1) << 4;
6165            break;
6166        default:
6167            g_assert_not_reached();
6168        }
6169        break;
6170    default:
6171        abort();
6172    }
6173
6174    /* Validate that the "compressed" encoding we selected above is valid.
6175       I.e. we havn't make two different original fields overlap.  */
6176    assert(((o->presentC >> f->indexC) & 1) == 0);
6177    o->presentC |= 1 << f->indexC;
6178    o->presentO |= 1 << f->indexO;
6179
6180    o->c[f->indexC] = r;
6181}
6182
6183/* Lookup the insn at the current PC, extracting the operands into O and
6184   returning the info struct for the insn.  Returns NULL for invalid insn.  */
6185
6186static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
6187                                     DisasFields *f)
6188{
6189    uint64_t insn, pc = s->base.pc_next;
6190    int op, op2, ilen;
6191    const DisasInsn *info;
6192
6193    if (unlikely(s->ex_value)) {
6194        /* Drop the EX data now, so that it's clear on exception paths.  */
6195        TCGv_i64 zero = tcg_const_i64(0);
6196        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6197        tcg_temp_free_i64(zero);
6198
6199        /* Extract the values saved by EXECUTE.  */
6200        insn = s->ex_value & 0xffffffffffff0000ull;
6201        ilen = s->ex_value & 0xf;
6202        op = insn >> 56;
6203    } else {
6204        insn = ld_code2(env, pc);
6205        op = (insn >> 8) & 0xff;
6206        ilen = get_ilen(op);
6207        switch (ilen) {
6208        case 2:
6209            insn = insn << 48;
6210            break;
6211        case 4:
6212            insn = ld_code4(env, pc) << 32;
6213            break;
6214        case 6:
6215            insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
6216            break;
6217        default:
6218            g_assert_not_reached();
6219        }
6220    }
6221    s->pc_tmp = s->base.pc_next + ilen;
6222    s->ilen = ilen;
6223
6224    /* We can't actually determine the insn format until we've looked up
6225       the full insn opcode.  Which we can't do without locating the
6226       secondary opcode.  Assume by default that OP2 is at bit 40; for
6227       those smaller insns that don't actually have a secondary opcode
6228       this will correctly result in OP2 = 0. */
6229    switch (op) {
6230    case 0x01: /* E */
6231    case 0x80: /* S */
6232    case 0x82: /* S */
6233    case 0x93: /* S */
6234    case 0xb2: /* S, RRF, RRE, IE */
6235    case 0xb3: /* RRE, RRD, RRF */
6236    case 0xb9: /* RRE, RRF */
6237    case 0xe5: /* SSE, SIL */
6238        op2 = (insn << 8) >> 56;
6239        break;
6240    case 0xa5: /* RI */
6241    case 0xa7: /* RI */
6242    case 0xc0: /* RIL */
6243    case 0xc2: /* RIL */
6244    case 0xc4: /* RIL */
6245    case 0xc6: /* RIL */
6246    case 0xc8: /* SSF */
6247    case 0xcc: /* RIL */
6248        op2 = (insn << 12) >> 60;
6249        break;
6250    case 0xc5: /* MII */
6251    case 0xc7: /* SMI */
6252    case 0xd0 ... 0xdf: /* SS */
6253    case 0xe1: /* SS */
6254    case 0xe2: /* SS */
6255    case 0xe8: /* SS */
6256    case 0xe9: /* SS */
6257    case 0xea: /* SS */
6258    case 0xee ... 0xf3: /* SS */
6259    case 0xf8 ... 0xfd: /* SS */
6260        op2 = 0;
6261        break;
6262    default:
6263        op2 = (insn << 40) >> 56;
6264        break;
6265    }
6266
6267    memset(f, 0, sizeof(*f));
6268    f->raw_insn = insn;
6269    f->op = op;
6270    f->op2 = op2;
6271
6272    /* Lookup the instruction.  */
6273    info = lookup_opc(op << 8 | op2);
6274
6275    /* If we found it, extract the operands.  */
6276    if (info != NULL) {
6277        DisasFormat fmt = info->fmt;
6278        int i;
6279
6280        for (i = 0; i < NUM_C_FIELD; ++i) {
6281            extract_field(f, &format_info[fmt].op[i], insn);
6282        }
6283    }
6284    return info;
6285}
6286
6287static bool is_afp_reg(int reg)
6288{
6289    return reg % 2 || reg > 6;
6290}
6291
6292static bool is_fp_pair(int reg)
6293{
6294    /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6295    return !(reg & 0x2);
6296}
6297
6298static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6299{
6300    const DisasInsn *insn;
6301    DisasJumpType ret = DISAS_NEXT;
6302    DisasFields f;
6303    DisasOps o = {};
6304
6305    /* Search for the insn in the table.  */
6306    insn = extract_insn(env, s, &f);
6307
6308    /* Not found means unimplemented/illegal opcode.  */
6309    if (insn == NULL) {
6310        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6311                      f.op, f.op2);
6312        gen_illegal_opcode(s);
6313        return DISAS_NORETURN;
6314    }
6315
6316#ifndef CONFIG_USER_ONLY
6317    if (s->base.tb->flags & FLAG_MASK_PER) {
6318        TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6319        gen_helper_per_ifetch(cpu_env, addr);
6320        tcg_temp_free_i64(addr);
6321    }
6322#endif
6323
6324    /* process flags */
6325    if (insn->flags) {
6326        /* privileged instruction */
6327        if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6328            gen_program_exception(s, PGM_PRIVILEGED);
6329            return DISAS_NORETURN;
6330        }
6331
6332        /* if AFP is not enabled, instructions and registers are forbidden */
6333        if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6334            uint8_t dxc = 0;
6335
6336            if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) {
6337                dxc = 1;
6338            }
6339            if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) {
6340                dxc = 1;
6341            }
6342            if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) {
6343                dxc = 1;
6344            }
6345            if (insn->flags & IF_BFP) {
6346                dxc = 2;
6347            }
6348            if (insn->flags & IF_DFP) {
6349                dxc = 3;
6350            }
6351            if (insn->flags & IF_VEC) {
6352                dxc = 0xfe;
6353            }
6354            if (dxc) {
6355                gen_data_exception(dxc);
6356                return DISAS_NORETURN;
6357            }
6358        }
6359
6360        /* if vector instructions not enabled, executing them is forbidden */
6361        if (insn->flags & IF_VEC) {
6362            if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6363                gen_data_exception(0xfe);
6364                return DISAS_NORETURN;
6365            }
6366        }
6367    }
6368
6369    /* Check for insn specification exceptions.  */
6370    if (insn->spec) {
6371        if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) ||
6372            (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) ||
6373            (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) ||
6374            (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) ||
6375            (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) {
6376            gen_program_exception(s, PGM_SPECIFICATION);
6377            return DISAS_NORETURN;
6378        }
6379    }
6380
6381    /* Set up the strutures we use to communicate with the helpers. */
6382    s->insn = insn;
6383    s->fields = &f;
6384
6385    /* Implement the instruction.  */
6386    if (insn->help_in1) {
6387        insn->help_in1(s, &f, &o);
6388    }
6389    if (insn->help_in2) {
6390        insn->help_in2(s, &f, &o);
6391    }
6392    if (insn->help_prep) {
6393        insn->help_prep(s, &f, &o);
6394    }
6395    if (insn->help_op) {
6396        ret = insn->help_op(s, &o);
6397    }
6398    if (ret != DISAS_NORETURN) {
6399        if (insn->help_wout) {
6400            insn->help_wout(s, &f, &o);
6401        }
6402        if (insn->help_cout) {
6403            insn->help_cout(s, &o);
6404        }
6405    }
6406
6407    /* Free any temporaries created by the helpers.  */
6408    if (o.out && !o.g_out) {
6409        tcg_temp_free_i64(o.out);
6410    }
6411    if (o.out2 && !o.g_out2) {
6412        tcg_temp_free_i64(o.out2);
6413    }
6414    if (o.in1 && !o.g_in1) {
6415        tcg_temp_free_i64(o.in1);
6416    }
6417    if (o.in2 && !o.g_in2) {
6418        tcg_temp_free_i64(o.in2);
6419    }
6420    if (o.addr1) {
6421        tcg_temp_free_i64(o.addr1);
6422    }
6423
6424#ifndef CONFIG_USER_ONLY
6425    if (s->base.tb->flags & FLAG_MASK_PER) {
6426        /* An exception might be triggered, save PSW if not already done.  */
6427        if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
6428            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6429        }
6430
6431        /* Call the helper to check for a possible PER exception.  */
6432        gen_helper_per_check_exception(cpu_env);
6433    }
6434#endif
6435
6436    /* Advance to the next instruction.  */
6437    s->base.pc_next = s->pc_tmp;
6438    return ret;
6439}
6440
6441static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6442{
6443    DisasContext *dc = container_of(dcbase, DisasContext, base);
6444
6445    /* 31-bit mode */
6446    if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6447        dc->base.pc_first &= 0x7fffffff;
6448        dc->base.pc_next = dc->base.pc_first;
6449    }
6450
6451    dc->cc_op = CC_OP_DYNAMIC;
6452    dc->ex_value = dc->base.tb->cs_base;
6453    dc->do_debug = dc->base.singlestep_enabled;
6454}
6455
6456static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6457{
6458}
6459
6460static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6461{
6462    DisasContext *dc = container_of(dcbase, DisasContext, base);
6463
6464    tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6465}
6466
6467static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
6468                                      const CPUBreakpoint *bp)
6469{
6470    DisasContext *dc = container_of(dcbase, DisasContext, base);
6471
6472    dc->base.is_jmp = DISAS_PC_STALE;
6473    dc->do_debug = true;
6474    /* The address covered by the breakpoint must be included in
6475       [tb->pc, tb->pc + tb->size) in order to for it to be
6476       properly cleared -- thus we increment the PC here so that
6477       the logic setting tb->size does the right thing.  */
6478    dc->base.pc_next += 2;
6479    return true;
6480}
6481
6482static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6483{
6484    CPUS390XState *env = cs->env_ptr;
6485    DisasContext *dc = container_of(dcbase, DisasContext, base);
6486
6487    dc->base.is_jmp = translate_one(env, dc);
6488    if (dc->base.is_jmp == DISAS_NEXT) {
6489        uint64_t page_start;
6490
6491        page_start = dc->base.pc_first & TARGET_PAGE_MASK;
6492        if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
6493            dc->base.is_jmp = DISAS_TOO_MANY;
6494        }
6495    }
6496}
6497
6498static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6499{
6500    DisasContext *dc = container_of(dcbase, DisasContext, base);
6501
6502    switch (dc->base.is_jmp) {
6503    case DISAS_GOTO_TB:
6504    case DISAS_NORETURN:
6505        break;
6506    case DISAS_TOO_MANY:
6507    case DISAS_PC_STALE:
6508    case DISAS_PC_STALE_NOCHAIN:
6509        update_psw_addr(dc);
6510        /* FALLTHRU */
6511    case DISAS_PC_UPDATED:
6512        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6513           cc op type is in env */
6514        update_cc_op(dc);
6515        /* FALLTHRU */
6516    case DISAS_PC_CC_UPDATED:
6517        /* Exit the TB, either by raising a debug exception or by return.  */
6518        if (dc->do_debug) {
6519            gen_exception(EXCP_DEBUG);
6520        } else if (use_exit_tb(dc) ||
6521                   dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
6522            tcg_gen_exit_tb(NULL, 0);
6523        } else {
6524            tcg_gen_lookup_and_goto_ptr();
6525        }
6526        break;
6527    default:
6528        g_assert_not_reached();
6529    }
6530}
6531
6532static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
6533{
6534    DisasContext *dc = container_of(dcbase, DisasContext, base);
6535
6536    if (unlikely(dc->ex_value)) {
6537        /* ??? Unfortunately log_target_disas can't use host memory.  */
6538        qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
6539    } else {
6540        qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
6541        log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
6542    }
6543}
6544
6545static const TranslatorOps s390x_tr_ops = {
6546    .init_disas_context = s390x_tr_init_disas_context,
6547    .tb_start           = s390x_tr_tb_start,
6548    .insn_start         = s390x_tr_insn_start,
6549    .breakpoint_check   = s390x_tr_breakpoint_check,
6550    .translate_insn     = s390x_tr_translate_insn,
6551    .tb_stop            = s390x_tr_tb_stop,
6552    .disas_log          = s390x_tr_disas_log,
6553};
6554
6555void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
6556{
6557    DisasContext dc;
6558
6559    translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
6560}
6561
6562void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6563                          target_ulong *data)
6564{
6565    int cc_op = data[1];
6566    env->psw.addr = data[0];
6567    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6568        env->cc_op = cc_op;
6569    }
6570}
6571