qemu/target/s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg-op.h"
  37#include "tcg-op-gvec.h"
  38#include "qemu/log.h"
  39#include "qemu/host-utils.h"
  40#include "exec/cpu_ldst.h"
  41#include "exec/gen-icount.h"
  42#include "exec/helper-proto.h"
  43#include "exec/helper-gen.h"
  44
  45#include "trace-tcg.h"
  46#include "exec/translator.h"
  47#include "exec/log.h"
  48#include "qemu/atomic128.h"
  49
  50
  51/* Information that (most) every instruction needs to manipulate.  */
  52typedef struct DisasContext DisasContext;
  53typedef struct DisasInsn DisasInsn;
  54typedef struct DisasFields DisasFields;
  55
  56struct DisasContext {
  57    DisasContextBase base;
  58    const DisasInsn *insn;
  59    DisasFields *fields;
  60    uint64_t ex_value;
  61    /*
  62     * During translate_one(), pc_tmp is used to determine the instruction
  63     * to be executed after base.pc_next - e.g. next sequential instruction
  64     * or a branch target.
  65     */
  66    uint64_t pc_tmp;
  67    uint32_t ilen;
  68    enum cc_op cc_op;
  69    bool do_debug;
  70};
  71
  72/* Information carried about a condition to be evaluated.  */
  73typedef struct {
  74    TCGCond cond:8;
  75    bool is_64;
  76    bool g1;
  77    bool g2;
  78    union {
  79        struct { TCGv_i64 a, b; } s64;
  80        struct { TCGv_i32 a, b; } s32;
  81    } u;
  82} DisasCompare;
  83
  84#ifdef DEBUG_INLINE_BRANCHES
  85static uint64_t inline_branch_hit[CC_OP_MAX];
  86static uint64_t inline_branch_miss[CC_OP_MAX];
  87#endif
  88
  89static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
  90{
  91    TCGv_i64 tmp;
  92
  93    if (s->base.tb->flags & FLAG_MASK_32) {
  94        if (s->base.tb->flags & FLAG_MASK_64) {
  95            tcg_gen_movi_i64(out, pc);
  96            return;
  97        }
  98        pc |= 0x80000000;
  99    }
 100    assert(!(s->base.tb->flags & FLAG_MASK_64));
 101    tmp = tcg_const_i64(pc);
 102    tcg_gen_deposit_i64(out, out, tmp, 0, 32);
 103    tcg_temp_free_i64(tmp);
 104}
 105
 106static TCGv_i64 psw_addr;
 107static TCGv_i64 psw_mask;
 108static TCGv_i64 gbea;
 109
 110static TCGv_i32 cc_op;
 111static TCGv_i64 cc_src;
 112static TCGv_i64 cc_dst;
 113static TCGv_i64 cc_vr;
 114
 115static char cpu_reg_names[16][4];
 116static TCGv_i64 regs[16];
 117
 118void s390x_translate_init(void)
 119{
 120    int i;
 121
 122    psw_addr = tcg_global_mem_new_i64(cpu_env,
 123                                      offsetof(CPUS390XState, psw.addr),
 124                                      "psw_addr");
 125    psw_mask = tcg_global_mem_new_i64(cpu_env,
 126                                      offsetof(CPUS390XState, psw.mask),
 127                                      "psw_mask");
 128    gbea = tcg_global_mem_new_i64(cpu_env,
 129                                  offsetof(CPUS390XState, gbea),
 130                                  "gbea");
 131
 132    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 133                                   "cc_op");
 134    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 135                                    "cc_src");
 136    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 137                                    "cc_dst");
 138    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 139                                   "cc_vr");
 140
 141    for (i = 0; i < 16; i++) {
 142        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 143        regs[i] = tcg_global_mem_new(cpu_env,
 144                                     offsetof(CPUS390XState, regs[i]),
 145                                     cpu_reg_names[i]);
 146    }
 147}
 148
 149static inline int vec_full_reg_offset(uint8_t reg)
 150{
 151    g_assert(reg < 32);
 152    return offsetof(CPUS390XState, vregs[reg][0]);
 153}
 154
 155static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
 156{
 157    /* Convert element size (es) - e.g. MO_8 - to bytes */
 158    const uint8_t bytes = 1 << es;
 159    int offs = enr * bytes;
 160
 161    /*
 162     * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
 163     * of the 16 byte vector, on both, little and big endian systems.
 164     *
 165     * Big Endian (target/possible host)
 166     * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
 167     * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
 168     * W:  [             0][             1] - [             2][             3]
 169     * DW: [                             0] - [                             1]
 170     *
 171     * Little Endian (possible host)
 172     * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
 173     * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
 174     * W:  [             1][             0] - [             3][             2]
 175     * DW: [                             0] - [                             1]
 176     *
 177     * For 16 byte elements, the two 8 byte halves will not form a host
 178     * int128 if the host is little endian, since they're in the wrong order.
 179     * Some operations (e.g. xor) do not care. For operations like addition,
 180     * the two 8 byte elements have to be loaded separately. Let's force all
 181     * 16 byte operations to handle it in a special way.
 182     */
 183    g_assert(es <= MO_64);
 184#ifndef HOST_WORDS_BIGENDIAN
 185    offs ^= (8 - bytes);
 186#endif
 187    return offs + vec_full_reg_offset(reg);
 188}
 189
 190static inline int freg64_offset(uint8_t reg)
 191{
 192    g_assert(reg < 16);
 193    return vec_reg_offset(reg, 0, MO_64);
 194}
 195
 196static inline int freg32_offset(uint8_t reg)
 197{
 198    g_assert(reg < 16);
 199    return vec_reg_offset(reg, 0, MO_32);
 200}
 201
 202static TCGv_i64 load_reg(int reg)
 203{
 204    TCGv_i64 r = tcg_temp_new_i64();
 205    tcg_gen_mov_i64(r, regs[reg]);
 206    return r;
 207}
 208
 209static TCGv_i64 load_freg(int reg)
 210{
 211    TCGv_i64 r = tcg_temp_new_i64();
 212
 213    tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
 214    return r;
 215}
 216
 217static TCGv_i64 load_freg32_i64(int reg)
 218{
 219    TCGv_i64 r = tcg_temp_new_i64();
 220
 221    tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
 222    return r;
 223}
 224
 225static void store_reg(int reg, TCGv_i64 v)
 226{
 227    tcg_gen_mov_i64(regs[reg], v);
 228}
 229
 230static void store_freg(int reg, TCGv_i64 v)
 231{
 232    tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
 233}
 234
 235static void store_reg32_i64(int reg, TCGv_i64 v)
 236{
 237    /* 32 bit register writes keep the upper half */
 238    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 239}
 240
 241static void store_reg32h_i64(int reg, TCGv_i64 v)
 242{
 243    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 244}
 245
 246static void store_freg32_i64(int reg, TCGv_i64 v)
 247{
 248    tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
 249}
 250
 251static void return_low128(TCGv_i64 dest)
 252{
 253    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 254}
 255
 256static void update_psw_addr(DisasContext *s)
 257{
 258    /* psw.addr */
 259    tcg_gen_movi_i64(psw_addr, s->base.pc_next);
 260}
 261
 262static void per_branch(DisasContext *s, bool to_next)
 263{
 264#ifndef CONFIG_USER_ONLY
 265    tcg_gen_movi_i64(gbea, s->base.pc_next);
 266
 267    if (s->base.tb->flags & FLAG_MASK_PER) {
 268        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
 269        gen_helper_per_branch(cpu_env, gbea, next_pc);
 270        if (to_next) {
 271            tcg_temp_free_i64(next_pc);
 272        }
 273    }
 274#endif
 275}
 276
 277static void per_branch_cond(DisasContext *s, TCGCond cond,
 278                            TCGv_i64 arg1, TCGv_i64 arg2)
 279{
 280#ifndef CONFIG_USER_ONLY
 281    if (s->base.tb->flags & FLAG_MASK_PER) {
 282        TCGLabel *lab = gen_new_label();
 283        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 284
 285        tcg_gen_movi_i64(gbea, s->base.pc_next);
 286        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 287
 288        gen_set_label(lab);
 289    } else {
 290        TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
 291        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 292        tcg_temp_free_i64(pc);
 293    }
 294#endif
 295}
 296
 297static void per_breaking_event(DisasContext *s)
 298{
 299    tcg_gen_movi_i64(gbea, s->base.pc_next);
 300}
 301
 302static void update_cc_op(DisasContext *s)
 303{
 304    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 305        tcg_gen_movi_i32(cc_op, s->cc_op);
 306    }
 307}
 308
 309static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 310{
 311    return (uint64_t)cpu_lduw_code(env, pc);
 312}
 313
 314static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 315{
 316    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 317}
 318
 319static int get_mem_index(DisasContext *s)
 320{
 321#ifdef CONFIG_USER_ONLY
 322    return MMU_USER_IDX;
 323#else
 324    if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
 325        return MMU_REAL_IDX;
 326    }
 327
 328    switch (s->base.tb->flags & FLAG_MASK_ASC) {
 329    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 330        return MMU_PRIMARY_IDX;
 331    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 332        return MMU_SECONDARY_IDX;
 333    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 334        return MMU_HOME_IDX;
 335    default:
 336        tcg_abort();
 337        break;
 338    }
 339#endif
 340}
 341
 342static void gen_exception(int excp)
 343{
 344    TCGv_i32 tmp = tcg_const_i32(excp);
 345    gen_helper_exception(cpu_env, tmp);
 346    tcg_temp_free_i32(tmp);
 347}
 348
 349static void gen_program_exception(DisasContext *s, int code)
 350{
 351    TCGv_i32 tmp;
 352
 353    /* Remember what pgm exeption this was.  */
 354    tmp = tcg_const_i32(code);
 355    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 356    tcg_temp_free_i32(tmp);
 357
 358    tmp = tcg_const_i32(s->ilen);
 359    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 360    tcg_temp_free_i32(tmp);
 361
 362    /* update the psw */
 363    update_psw_addr(s);
 364
 365    /* Save off cc.  */
 366    update_cc_op(s);
 367
 368    /* Trigger exception.  */
 369    gen_exception(EXCP_PGM);
 370}
 371
 372static inline void gen_illegal_opcode(DisasContext *s)
 373{
 374    gen_program_exception(s, PGM_OPERATION);
 375}
 376
 377static inline void gen_data_exception(uint8_t dxc)
 378{
 379    TCGv_i32 tmp = tcg_const_i32(dxc);
 380    gen_helper_data_exception(cpu_env, tmp);
 381    tcg_temp_free_i32(tmp);
 382}
 383
 384static inline void gen_trap(DisasContext *s)
 385{
 386    /* Set DXC to 0xff */
 387    gen_data_exception(0xff);
 388}
 389
 390static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
 391                                  int64_t imm)
 392{
 393    tcg_gen_addi_i64(dst, src, imm);
 394    if (!(s->base.tb->flags & FLAG_MASK_64)) {
 395        if (s->base.tb->flags & FLAG_MASK_32) {
 396            tcg_gen_andi_i64(dst, dst, 0x7fffffff);
 397        } else {
 398            tcg_gen_andi_i64(dst, dst, 0x00ffffff);
 399        }
 400    }
 401}
 402
 403static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 404{
 405    TCGv_i64 tmp = tcg_temp_new_i64();
 406
 407    /*
 408     * Note that d2 is limited to 20 bits, signed.  If we crop negative
 409     * displacements early we create larger immedate addends.
 410     */
 411    if (b2 && x2) {
 412        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 413        gen_addi_and_wrap_i64(s, tmp, tmp, d2);
 414    } else if (b2) {
 415        gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
 416    } else if (x2) {
 417        gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
 418    } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
 419        if (s->base.tb->flags & FLAG_MASK_32) {
 420            tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
 421        } else {
 422            tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
 423        }
 424    } else {
 425        tcg_gen_movi_i64(tmp, d2);
 426    }
 427
 428    return tmp;
 429}
 430
 431static inline bool live_cc_data(DisasContext *s)
 432{
 433    return (s->cc_op != CC_OP_DYNAMIC
 434            && s->cc_op != CC_OP_STATIC
 435            && s->cc_op > 3);
 436}
 437
 438static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 439{
 440    if (live_cc_data(s)) {
 441        tcg_gen_discard_i64(cc_src);
 442        tcg_gen_discard_i64(cc_dst);
 443        tcg_gen_discard_i64(cc_vr);
 444    }
 445    s->cc_op = CC_OP_CONST0 + val;
 446}
 447
 448static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 449{
 450    if (live_cc_data(s)) {
 451        tcg_gen_discard_i64(cc_src);
 452        tcg_gen_discard_i64(cc_vr);
 453    }
 454    tcg_gen_mov_i64(cc_dst, dst);
 455    s->cc_op = op;
 456}
 457
 458static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 459                                  TCGv_i64 dst)
 460{
 461    if (live_cc_data(s)) {
 462        tcg_gen_discard_i64(cc_vr);
 463    }
 464    tcg_gen_mov_i64(cc_src, src);
 465    tcg_gen_mov_i64(cc_dst, dst);
 466    s->cc_op = op;
 467}
 468
 469static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 470                                  TCGv_i64 dst, TCGv_i64 vr)
 471{
 472    tcg_gen_mov_i64(cc_src, src);
 473    tcg_gen_mov_i64(cc_dst, dst);
 474    tcg_gen_mov_i64(cc_vr, vr);
 475    s->cc_op = op;
 476}
 477
 478static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 479{
 480    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 481}
 482
 483static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 484{
 485    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 486}
 487
 488static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 489{
 490    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 491}
 492
 493static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 494{
 495    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 496}
 497
 498/* CC value is in env->cc_op */
 499static void set_cc_static(DisasContext *s)
 500{
 501    if (live_cc_data(s)) {
 502        tcg_gen_discard_i64(cc_src);
 503        tcg_gen_discard_i64(cc_dst);
 504        tcg_gen_discard_i64(cc_vr);
 505    }
 506    s->cc_op = CC_OP_STATIC;
 507}
 508
 509/* calculates cc into cc_op */
 510static void gen_op_calc_cc(DisasContext *s)
 511{
 512    TCGv_i32 local_cc_op = NULL;
 513    TCGv_i64 dummy = NULL;
 514
 515    switch (s->cc_op) {
 516    default:
 517        dummy = tcg_const_i64(0);
 518        /* FALLTHRU */
 519    case CC_OP_ADD_64:
 520    case CC_OP_ADDU_64:
 521    case CC_OP_ADDC_64:
 522    case CC_OP_SUB_64:
 523    case CC_OP_SUBU_64:
 524    case CC_OP_SUBB_64:
 525    case CC_OP_ADD_32:
 526    case CC_OP_ADDU_32:
 527    case CC_OP_ADDC_32:
 528    case CC_OP_SUB_32:
 529    case CC_OP_SUBU_32:
 530    case CC_OP_SUBB_32:
 531        local_cc_op = tcg_const_i32(s->cc_op);
 532        break;
 533    case CC_OP_CONST0:
 534    case CC_OP_CONST1:
 535    case CC_OP_CONST2:
 536    case CC_OP_CONST3:
 537    case CC_OP_STATIC:
 538    case CC_OP_DYNAMIC:
 539        break;
 540    }
 541
 542    switch (s->cc_op) {
 543    case CC_OP_CONST0:
 544    case CC_OP_CONST1:
 545    case CC_OP_CONST2:
 546    case CC_OP_CONST3:
 547        /* s->cc_op is the cc value */
 548        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 549        break;
 550    case CC_OP_STATIC:
 551        /* env->cc_op already is the cc value */
 552        break;
 553    case CC_OP_NZ:
 554    case CC_OP_ABS_64:
 555    case CC_OP_NABS_64:
 556    case CC_OP_ABS_32:
 557    case CC_OP_NABS_32:
 558    case CC_OP_LTGT0_32:
 559    case CC_OP_LTGT0_64:
 560    case CC_OP_COMP_32:
 561    case CC_OP_COMP_64:
 562    case CC_OP_NZ_F32:
 563    case CC_OP_NZ_F64:
 564    case CC_OP_FLOGR:
 565    case CC_OP_LCBB:
 566        /* 1 argument */
 567        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 568        break;
 569    case CC_OP_ICM:
 570    case CC_OP_LTGT_32:
 571    case CC_OP_LTGT_64:
 572    case CC_OP_LTUGTU_32:
 573    case CC_OP_LTUGTU_64:
 574    case CC_OP_TM_32:
 575    case CC_OP_TM_64:
 576    case CC_OP_SLA_32:
 577    case CC_OP_SLA_64:
 578    case CC_OP_NZ_F128:
 579    case CC_OP_VC:
 580        /* 2 arguments */
 581        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 582        break;
 583    case CC_OP_ADD_64:
 584    case CC_OP_ADDU_64:
 585    case CC_OP_ADDC_64:
 586    case CC_OP_SUB_64:
 587    case CC_OP_SUBU_64:
 588    case CC_OP_SUBB_64:
 589    case CC_OP_ADD_32:
 590    case CC_OP_ADDU_32:
 591    case CC_OP_ADDC_32:
 592    case CC_OP_SUB_32:
 593    case CC_OP_SUBU_32:
 594    case CC_OP_SUBB_32:
 595        /* 3 arguments */
 596        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 597        break;
 598    case CC_OP_DYNAMIC:
 599        /* unknown operation - assume 3 arguments and cc_op in env */
 600        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 601        break;
 602    default:
 603        tcg_abort();
 604    }
 605
 606    if (local_cc_op) {
 607        tcg_temp_free_i32(local_cc_op);
 608    }
 609    if (dummy) {
 610        tcg_temp_free_i64(dummy);
 611    }
 612
 613    /* We now have cc in cc_op as constant */
 614    set_cc_static(s);
 615}
 616
 617static bool use_exit_tb(DisasContext *s)
 618{
 619    return s->base.singlestep_enabled ||
 620            (tb_cflags(s->base.tb) & CF_LAST_IO) ||
 621            (s->base.tb->flags & FLAG_MASK_PER);
 622}
 623
 624static bool use_goto_tb(DisasContext *s, uint64_t dest)
 625{
 626    if (unlikely(use_exit_tb(s))) {
 627        return false;
 628    }
 629#ifndef CONFIG_USER_ONLY
 630    return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) ||
 631           (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK);
 632#else
 633    return true;
 634#endif
 635}
 636
 637static void account_noninline_branch(DisasContext *s, int cc_op)
 638{
 639#ifdef DEBUG_INLINE_BRANCHES
 640    inline_branch_miss[cc_op]++;
 641#endif
 642}
 643
 644static void account_inline_branch(DisasContext *s, int cc_op)
 645{
 646#ifdef DEBUG_INLINE_BRANCHES
 647    inline_branch_hit[cc_op]++;
 648#endif
 649}
 650
 651/* Table of mask values to comparison codes, given a comparison as input.
 652   For such, CC=3 should not be possible.  */
 653static const TCGCond ltgt_cond[16] = {
 654    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 655    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 656    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 657    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 658    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 659    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 660    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 661    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 662};
 663
 664/* Table of mask values to comparison codes, given a logic op as input.
 665   For such, only CC=0 and CC=1 should be possible.  */
 666static const TCGCond nz_cond[16] = {
 667    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 668    TCG_COND_NEVER, TCG_COND_NEVER,
 669    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 670    TCG_COND_NE, TCG_COND_NE,
 671    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 672    TCG_COND_EQ, TCG_COND_EQ,
 673    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 674    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 675};
 676
 677/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 678   details required to generate a TCG comparison.  */
 679static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 680{
 681    TCGCond cond;
 682    enum cc_op old_cc_op = s->cc_op;
 683
 684    if (mask == 15 || mask == 0) {
 685        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 686        c->u.s32.a = cc_op;
 687        c->u.s32.b = cc_op;
 688        c->g1 = c->g2 = true;
 689        c->is_64 = false;
 690        return;
 691    }
 692
 693    /* Find the TCG condition for the mask + cc op.  */
 694    switch (old_cc_op) {
 695    case CC_OP_LTGT0_32:
 696    case CC_OP_LTGT0_64:
 697    case CC_OP_LTGT_32:
 698    case CC_OP_LTGT_64:
 699        cond = ltgt_cond[mask];
 700        if (cond == TCG_COND_NEVER) {
 701            goto do_dynamic;
 702        }
 703        account_inline_branch(s, old_cc_op);
 704        break;
 705
 706    case CC_OP_LTUGTU_32:
 707    case CC_OP_LTUGTU_64:
 708        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 709        if (cond == TCG_COND_NEVER) {
 710            goto do_dynamic;
 711        }
 712        account_inline_branch(s, old_cc_op);
 713        break;
 714
 715    case CC_OP_NZ:
 716        cond = nz_cond[mask];
 717        if (cond == TCG_COND_NEVER) {
 718            goto do_dynamic;
 719        }
 720        account_inline_branch(s, old_cc_op);
 721        break;
 722
 723    case CC_OP_TM_32:
 724    case CC_OP_TM_64:
 725        switch (mask) {
 726        case 8:
 727            cond = TCG_COND_EQ;
 728            break;
 729        case 4 | 2 | 1:
 730            cond = TCG_COND_NE;
 731            break;
 732        default:
 733            goto do_dynamic;
 734        }
 735        account_inline_branch(s, old_cc_op);
 736        break;
 737
 738    case CC_OP_ICM:
 739        switch (mask) {
 740        case 8:
 741            cond = TCG_COND_EQ;
 742            break;
 743        case 4 | 2 | 1:
 744        case 4 | 2:
 745            cond = TCG_COND_NE;
 746            break;
 747        default:
 748            goto do_dynamic;
 749        }
 750        account_inline_branch(s, old_cc_op);
 751        break;
 752
 753    case CC_OP_FLOGR:
 754        switch (mask & 0xa) {
 755        case 8: /* src == 0 -> no one bit found */
 756            cond = TCG_COND_EQ;
 757            break;
 758        case 2: /* src != 0 -> one bit found */
 759            cond = TCG_COND_NE;
 760            break;
 761        default:
 762            goto do_dynamic;
 763        }
 764        account_inline_branch(s, old_cc_op);
 765        break;
 766
 767    case CC_OP_ADDU_32:
 768    case CC_OP_ADDU_64:
 769        switch (mask) {
 770        case 8 | 2: /* vr == 0 */
 771            cond = TCG_COND_EQ;
 772            break;
 773        case 4 | 1: /* vr != 0 */
 774            cond = TCG_COND_NE;
 775            break;
 776        case 8 | 4: /* no carry -> vr >= src */
 777            cond = TCG_COND_GEU;
 778            break;
 779        case 2 | 1: /* carry -> vr < src */
 780            cond = TCG_COND_LTU;
 781            break;
 782        default:
 783            goto do_dynamic;
 784        }
 785        account_inline_branch(s, old_cc_op);
 786        break;
 787
 788    case CC_OP_SUBU_32:
 789    case CC_OP_SUBU_64:
 790        /* Note that CC=0 is impossible; treat it as dont-care.  */
 791        switch (mask & 7) {
 792        case 2: /* zero -> op1 == op2 */
 793            cond = TCG_COND_EQ;
 794            break;
 795        case 4 | 1: /* !zero -> op1 != op2 */
 796            cond = TCG_COND_NE;
 797            break;
 798        case 4: /* borrow (!carry) -> op1 < op2 */
 799            cond = TCG_COND_LTU;
 800            break;
 801        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
 802            cond = TCG_COND_GEU;
 803            break;
 804        default:
 805            goto do_dynamic;
 806        }
 807        account_inline_branch(s, old_cc_op);
 808        break;
 809
 810    default:
 811    do_dynamic:
 812        /* Calculate cc value.  */
 813        gen_op_calc_cc(s);
 814        /* FALLTHRU */
 815
 816    case CC_OP_STATIC:
 817        /* Jump based on CC.  We'll load up the real cond below;
 818           the assignment here merely avoids a compiler warning.  */
 819        account_noninline_branch(s, old_cc_op);
 820        old_cc_op = CC_OP_STATIC;
 821        cond = TCG_COND_NEVER;
 822        break;
 823    }
 824
 825    /* Load up the arguments of the comparison.  */
 826    c->is_64 = true;
 827    c->g1 = c->g2 = false;
 828    switch (old_cc_op) {
 829    case CC_OP_LTGT0_32:
 830        c->is_64 = false;
 831        c->u.s32.a = tcg_temp_new_i32();
 832        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 833        c->u.s32.b = tcg_const_i32(0);
 834        break;
 835    case CC_OP_LTGT_32:
 836    case CC_OP_LTUGTU_32:
 837    case CC_OP_SUBU_32:
 838        c->is_64 = false;
 839        c->u.s32.a = tcg_temp_new_i32();
 840        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 841        c->u.s32.b = tcg_temp_new_i32();
 842        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 843        break;
 844
 845    case CC_OP_LTGT0_64:
 846    case CC_OP_NZ:
 847    case CC_OP_FLOGR:
 848        c->u.s64.a = cc_dst;
 849        c->u.s64.b = tcg_const_i64(0);
 850        c->g1 = true;
 851        break;
 852    case CC_OP_LTGT_64:
 853    case CC_OP_LTUGTU_64:
 854    case CC_OP_SUBU_64:
 855        c->u.s64.a = cc_src;
 856        c->u.s64.b = cc_dst;
 857        c->g1 = c->g2 = true;
 858        break;
 859
 860    case CC_OP_TM_32:
 861    case CC_OP_TM_64:
 862    case CC_OP_ICM:
 863        c->u.s64.a = tcg_temp_new_i64();
 864        c->u.s64.b = tcg_const_i64(0);
 865        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 866        break;
 867
 868    case CC_OP_ADDU_32:
 869        c->is_64 = false;
 870        c->u.s32.a = tcg_temp_new_i32();
 871        c->u.s32.b = tcg_temp_new_i32();
 872        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
 873        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 874            tcg_gen_movi_i32(c->u.s32.b, 0);
 875        } else {
 876            tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
 877        }
 878        break;
 879
 880    case CC_OP_ADDU_64:
 881        c->u.s64.a = cc_vr;
 882        c->g1 = true;
 883        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 884            c->u.s64.b = tcg_const_i64(0);
 885        } else {
 886            c->u.s64.b = cc_src;
 887            c->g2 = true;
 888        }
 889        break;
 890
 891    case CC_OP_STATIC:
 892        c->is_64 = false;
 893        c->u.s32.a = cc_op;
 894        c->g1 = true;
 895        switch (mask) {
 896        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 897            cond = TCG_COND_NE;
 898            c->u.s32.b = tcg_const_i32(3);
 899            break;
 900        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 901            cond = TCG_COND_NE;
 902            c->u.s32.b = tcg_const_i32(2);
 903            break;
 904        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 905            cond = TCG_COND_NE;
 906            c->u.s32.b = tcg_const_i32(1);
 907            break;
 908        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 909            cond = TCG_COND_EQ;
 910            c->g1 = false;
 911            c->u.s32.a = tcg_temp_new_i32();
 912            c->u.s32.b = tcg_const_i32(0);
 913            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 914            break;
 915        case 0x8 | 0x4: /* cc < 2 */
 916            cond = TCG_COND_LTU;
 917            c->u.s32.b = tcg_const_i32(2);
 918            break;
 919        case 0x8: /* cc == 0 */
 920            cond = TCG_COND_EQ;
 921            c->u.s32.b = tcg_const_i32(0);
 922            break;
 923        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 924            cond = TCG_COND_NE;
 925            c->u.s32.b = tcg_const_i32(0);
 926            break;
 927        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 928            cond = TCG_COND_NE;
 929            c->g1 = false;
 930            c->u.s32.a = tcg_temp_new_i32();
 931            c->u.s32.b = tcg_const_i32(0);
 932            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 933            break;
 934        case 0x4: /* cc == 1 */
 935            cond = TCG_COND_EQ;
 936            c->u.s32.b = tcg_const_i32(1);
 937            break;
 938        case 0x2 | 0x1: /* cc > 1 */
 939            cond = TCG_COND_GTU;
 940            c->u.s32.b = tcg_const_i32(1);
 941            break;
 942        case 0x2: /* cc == 2 */
 943            cond = TCG_COND_EQ;
 944            c->u.s32.b = tcg_const_i32(2);
 945            break;
 946        case 0x1: /* cc == 3 */
 947            cond = TCG_COND_EQ;
 948            c->u.s32.b = tcg_const_i32(3);
 949            break;
 950        default:
 951            /* CC is masked by something else: (8 >> cc) & mask.  */
 952            cond = TCG_COND_NE;
 953            c->g1 = false;
 954            c->u.s32.a = tcg_const_i32(8);
 955            c->u.s32.b = tcg_const_i32(0);
 956            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 957            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 958            break;
 959        }
 960        break;
 961
 962    default:
 963        abort();
 964    }
 965    c->cond = cond;
 966}
 967
 968static void free_compare(DisasCompare *c)
 969{
 970    if (!c->g1) {
 971        if (c->is_64) {
 972            tcg_temp_free_i64(c->u.s64.a);
 973        } else {
 974            tcg_temp_free_i32(c->u.s32.a);
 975        }
 976    }
 977    if (!c->g2) {
 978        if (c->is_64) {
 979            tcg_temp_free_i64(c->u.s64.b);
 980        } else {
 981            tcg_temp_free_i32(c->u.s32.b);
 982        }
 983    }
 984}
 985
 986/* ====================================================================== */
 987/* Define the insn format enumeration.  */
 988#define F0(N)                         FMT_##N,
 989#define F1(N, X1)                     F0(N)
 990#define F2(N, X1, X2)                 F0(N)
 991#define F3(N, X1, X2, X3)             F0(N)
 992#define F4(N, X1, X2, X3, X4)         F0(N)
 993#define F5(N, X1, X2, X3, X4, X5)     F0(N)
 994#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
 995
 996typedef enum {
 997#include "insn-format.def"
 998} DisasFormat;
 999
1000#undef F0
1001#undef F1
1002#undef F2
1003#undef F3
1004#undef F4
1005#undef F5
1006#undef F6
1007
1008/* Define a structure to hold the decoded fields.  We'll store each inside
1009   an array indexed by an enum.  In order to conserve memory, we'll arrange
1010   for fields that do not exist at the same time to overlap, thus the "C"
1011   for compact.  For checking purposes there is an "O" for original index
1012   as well that will be applied to availability bitmaps.  */
1013
1014enum DisasFieldIndexO {
1015    FLD_O_r1,
1016    FLD_O_r2,
1017    FLD_O_r3,
1018    FLD_O_m1,
1019    FLD_O_m3,
1020    FLD_O_m4,
1021    FLD_O_m5,
1022    FLD_O_m6,
1023    FLD_O_b1,
1024    FLD_O_b2,
1025    FLD_O_b4,
1026    FLD_O_d1,
1027    FLD_O_d2,
1028    FLD_O_d4,
1029    FLD_O_x2,
1030    FLD_O_l1,
1031    FLD_O_l2,
1032    FLD_O_i1,
1033    FLD_O_i2,
1034    FLD_O_i3,
1035    FLD_O_i4,
1036    FLD_O_i5,
1037    FLD_O_v1,
1038    FLD_O_v2,
1039    FLD_O_v3,
1040    FLD_O_v4,
1041};
1042
1043enum DisasFieldIndexC {
1044    FLD_C_r1 = 0,
1045    FLD_C_m1 = 0,
1046    FLD_C_b1 = 0,
1047    FLD_C_i1 = 0,
1048    FLD_C_v1 = 0,
1049
1050    FLD_C_r2 = 1,
1051    FLD_C_b2 = 1,
1052    FLD_C_i2 = 1,
1053
1054    FLD_C_r3 = 2,
1055    FLD_C_m3 = 2,
1056    FLD_C_i3 = 2,
1057    FLD_C_v3 = 2,
1058
1059    FLD_C_m4 = 3,
1060    FLD_C_b4 = 3,
1061    FLD_C_i4 = 3,
1062    FLD_C_l1 = 3,
1063    FLD_C_v4 = 3,
1064
1065    FLD_C_i5 = 4,
1066    FLD_C_d1 = 4,
1067    FLD_C_m5 = 4,
1068
1069    FLD_C_d2 = 5,
1070    FLD_C_m6 = 5,
1071
1072    FLD_C_d4 = 6,
1073    FLD_C_x2 = 6,
1074    FLD_C_l2 = 6,
1075    FLD_C_v2 = 6,
1076
1077    NUM_C_FIELD = 7
1078};
1079
1080struct DisasFields {
1081    uint64_t raw_insn;
1082    unsigned op:8;
1083    unsigned op2:8;
1084    unsigned presentC:16;
1085    unsigned int presentO;
1086    int c[NUM_C_FIELD];
1087};
1088
1089/* This is the way fields are to be accessed out of DisasFields.  */
1090#define have_field(S, F)  have_field1((S), FLD_O_##F)
1091#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1092
1093static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1094{
1095    return (f->presentO >> c) & 1;
1096}
1097
1098static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1099                      enum DisasFieldIndexC c)
1100{
1101    assert(have_field1(f, o));
1102    return f->c[c];
1103}
1104
1105/* Describe the layout of each field in each format.  */
1106typedef struct DisasField {
1107    unsigned int beg:8;
1108    unsigned int size:8;
1109    unsigned int type:2;
1110    unsigned int indexC:6;
1111    enum DisasFieldIndexO indexO:8;
1112} DisasField;
1113
1114typedef struct DisasFormatInfo {
1115    DisasField op[NUM_C_FIELD];
1116} DisasFormatInfo;
1117
1118#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1119#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1120#define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1121#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1122                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1123#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1124                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1125                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1126#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1127                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1128#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1129                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1130                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1131#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1132#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1133
1134#define F0(N)                     { { } },
1135#define F1(N, X1)                 { { X1 } },
1136#define F2(N, X1, X2)             { { X1, X2 } },
1137#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1138#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1139#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1140#define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1141
1142static const DisasFormatInfo format_info[] = {
1143#include "insn-format.def"
1144};
1145
1146#undef F0
1147#undef F1
1148#undef F2
1149#undef F3
1150#undef F4
1151#undef F5
1152#undef F6
1153#undef R
1154#undef M
1155#undef V
1156#undef BD
1157#undef BXD
1158#undef BDL
1159#undef BXDL
1160#undef I
1161#undef L
1162
1163/* Generally, we'll extract operands into this structures, operate upon
1164   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1165   of routines below for more details.  */
1166typedef struct {
1167    bool g_out, g_out2, g_in1, g_in2;
1168    TCGv_i64 out, out2, in1, in2;
1169    TCGv_i64 addr1;
1170} DisasOps;
1171
1172/* Instructions can place constraints on their operands, raising specification
1173   exceptions if they are violated.  To make this easy to automate, each "in1",
1174   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1175   of the following, or 0.  To make this easy to document, we'll put the
1176   SPEC_<name> defines next to <name>.  */
1177
1178#define SPEC_r1_even    1
1179#define SPEC_r2_even    2
1180#define SPEC_r3_even    4
1181#define SPEC_r1_f128    8
1182#define SPEC_r2_f128    16
1183
1184/* Return values from translate_one, indicating the state of the TB.  */
1185
1186/* We are not using a goto_tb (for whatever reason), but have updated
1187   the PC (for whatever reason), so there's no need to do it again on
1188   exiting the TB.  */
1189#define DISAS_PC_UPDATED        DISAS_TARGET_0
1190
1191/* We have emitted one or more goto_tb.  No fixup required.  */
1192#define DISAS_GOTO_TB           DISAS_TARGET_1
1193
1194/* We have updated the PC and CC values.  */
1195#define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1196
1197/* We are exiting the TB, but have neither emitted a goto_tb, nor
1198   updated the PC for the next instruction to be executed.  */
1199#define DISAS_PC_STALE          DISAS_TARGET_3
1200
1201/* We are exiting the TB to the main loop.  */
1202#define DISAS_PC_STALE_NOCHAIN  DISAS_TARGET_4
1203
1204
1205/* Instruction flags */
1206#define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1207#define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1208#define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1209#define IF_BFP      0x0008      /* binary floating point instruction */
1210#define IF_DFP      0x0010      /* decimal floating point instruction */
1211#define IF_PRIV     0x0020      /* privileged instruction */
1212#define IF_VEC      0x0040      /* vector instruction */
1213
1214struct DisasInsn {
1215    unsigned opc:16;
1216    unsigned flags:16;
1217    DisasFormat fmt:8;
1218    unsigned fac:8;
1219    unsigned spec:8;
1220
1221    const char *name;
1222
1223    /* Pre-process arguments before HELP_OP.  */
1224    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1225    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1226    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1227
1228    /*
1229     * Post-process output after HELP_OP.
1230     * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1231     */
1232    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1233    void (*help_cout)(DisasContext *, DisasOps *);
1234
1235    /* Implement the operation itself.  */
1236    DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1237
1238    uint64_t data;
1239};
1240
1241/* ====================================================================== */
1242/* Miscellaneous helpers, used by several operations.  */
1243
1244static void help_l2_shift(DisasContext *s, DisasFields *f,
1245                          DisasOps *o, int mask)
1246{
1247    int b2 = get_field(f, b2);
1248    int d2 = get_field(f, d2);
1249
1250    if (b2 == 0) {
1251        o->in2 = tcg_const_i64(d2 & mask);
1252    } else {
1253        o->in2 = get_address(s, 0, b2, d2);
1254        tcg_gen_andi_i64(o->in2, o->in2, mask);
1255    }
1256}
1257
1258static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1259{
1260    if (dest == s->pc_tmp) {
1261        per_branch(s, true);
1262        return DISAS_NEXT;
1263    }
1264    if (use_goto_tb(s, dest)) {
1265        update_cc_op(s);
1266        per_breaking_event(s);
1267        tcg_gen_goto_tb(0);
1268        tcg_gen_movi_i64(psw_addr, dest);
1269        tcg_gen_exit_tb(s->base.tb, 0);
1270        return DISAS_GOTO_TB;
1271    } else {
1272        tcg_gen_movi_i64(psw_addr, dest);
1273        per_branch(s, false);
1274        return DISAS_PC_UPDATED;
1275    }
1276}
1277
1278static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1279                                 bool is_imm, int imm, TCGv_i64 cdest)
1280{
1281    DisasJumpType ret;
1282    uint64_t dest = s->base.pc_next + 2 * imm;
1283    TCGLabel *lab;
1284
1285    /* Take care of the special cases first.  */
1286    if (c->cond == TCG_COND_NEVER) {
1287        ret = DISAS_NEXT;
1288        goto egress;
1289    }
1290    if (is_imm) {
1291        if (dest == s->pc_tmp) {
1292            /* Branch to next.  */
1293            per_branch(s, true);
1294            ret = DISAS_NEXT;
1295            goto egress;
1296        }
1297        if (c->cond == TCG_COND_ALWAYS) {
1298            ret = help_goto_direct(s, dest);
1299            goto egress;
1300        }
1301    } else {
1302        if (!cdest) {
1303            /* E.g. bcr %r0 -> no branch.  */
1304            ret = DISAS_NEXT;
1305            goto egress;
1306        }
1307        if (c->cond == TCG_COND_ALWAYS) {
1308            tcg_gen_mov_i64(psw_addr, cdest);
1309            per_branch(s, false);
1310            ret = DISAS_PC_UPDATED;
1311            goto egress;
1312        }
1313    }
1314
1315    if (use_goto_tb(s, s->pc_tmp)) {
1316        if (is_imm && use_goto_tb(s, dest)) {
1317            /* Both exits can use goto_tb.  */
1318            update_cc_op(s);
1319
1320            lab = gen_new_label();
1321            if (c->is_64) {
1322                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1323            } else {
1324                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1325            }
1326
1327            /* Branch not taken.  */
1328            tcg_gen_goto_tb(0);
1329            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1330            tcg_gen_exit_tb(s->base.tb, 0);
1331
1332            /* Branch taken.  */
1333            gen_set_label(lab);
1334            per_breaking_event(s);
1335            tcg_gen_goto_tb(1);
1336            tcg_gen_movi_i64(psw_addr, dest);
1337            tcg_gen_exit_tb(s->base.tb, 1);
1338
1339            ret = DISAS_GOTO_TB;
1340        } else {
1341            /* Fallthru can use goto_tb, but taken branch cannot.  */
1342            /* Store taken branch destination before the brcond.  This
1343               avoids having to allocate a new local temp to hold it.
1344               We'll overwrite this in the not taken case anyway.  */
1345            if (!is_imm) {
1346                tcg_gen_mov_i64(psw_addr, cdest);
1347            }
1348
1349            lab = gen_new_label();
1350            if (c->is_64) {
1351                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1352            } else {
1353                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1354            }
1355
1356            /* Branch not taken.  */
1357            update_cc_op(s);
1358            tcg_gen_goto_tb(0);
1359            tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1360            tcg_gen_exit_tb(s->base.tb, 0);
1361
1362            gen_set_label(lab);
1363            if (is_imm) {
1364                tcg_gen_movi_i64(psw_addr, dest);
1365            }
1366            per_breaking_event(s);
1367            ret = DISAS_PC_UPDATED;
1368        }
1369    } else {
1370        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1371           Most commonly we're single-stepping or some other condition that
1372           disables all use of goto_tb.  Just update the PC and exit.  */
1373
1374        TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1375        if (is_imm) {
1376            cdest = tcg_const_i64(dest);
1377        }
1378
1379        if (c->is_64) {
1380            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1381                                cdest, next);
1382            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1383        } else {
1384            TCGv_i32 t0 = tcg_temp_new_i32();
1385            TCGv_i64 t1 = tcg_temp_new_i64();
1386            TCGv_i64 z = tcg_const_i64(0);
1387            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1388            tcg_gen_extu_i32_i64(t1, t0);
1389            tcg_temp_free_i32(t0);
1390            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1391            per_branch_cond(s, TCG_COND_NE, t1, z);
1392            tcg_temp_free_i64(t1);
1393            tcg_temp_free_i64(z);
1394        }
1395
1396        if (is_imm) {
1397            tcg_temp_free_i64(cdest);
1398        }
1399        tcg_temp_free_i64(next);
1400
1401        ret = DISAS_PC_UPDATED;
1402    }
1403
1404 egress:
1405    free_compare(c);
1406    return ret;
1407}
1408
1409/* ====================================================================== */
1410/* The operations.  These perform the bulk of the work for any insn,
1411   usually after the operands have been loaded and output initialized.  */
1412
1413static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1414{
1415    tcg_gen_abs_i64(o->out, o->in2);
1416    return DISAS_NEXT;
1417}
1418
1419static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1420{
1421    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1422    return DISAS_NEXT;
1423}
1424
1425static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1426{
1427    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1428    return DISAS_NEXT;
1429}
1430
1431static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1432{
1433    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1434    tcg_gen_mov_i64(o->out2, o->in2);
1435    return DISAS_NEXT;
1436}
1437
1438static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1439{
1440    tcg_gen_add_i64(o->out, o->in1, o->in2);
1441    return DISAS_NEXT;
1442}
1443
1444static DisasJumpType op_addc(DisasContext *s, DisasOps *o)
1445{
1446    DisasCompare cmp;
1447    TCGv_i64 carry;
1448
1449    tcg_gen_add_i64(o->out, o->in1, o->in2);
1450
1451    /* The carry flag is the msb of CC, therefore the branch mask that would
1452       create that comparison is 3.  Feeding the generated comparison to
1453       setcond produces the carry flag that we desire.  */
1454    disas_jcc(s, &cmp, 3);
1455    carry = tcg_temp_new_i64();
1456    if (cmp.is_64) {
1457        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1458    } else {
1459        TCGv_i32 t = tcg_temp_new_i32();
1460        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1461        tcg_gen_extu_i32_i64(carry, t);
1462        tcg_temp_free_i32(t);
1463    }
1464    free_compare(&cmp);
1465
1466    tcg_gen_add_i64(o->out, o->out, carry);
1467    tcg_temp_free_i64(carry);
1468    return DISAS_NEXT;
1469}
1470
1471static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1472{
1473    o->in1 = tcg_temp_new_i64();
1474
1475    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1476        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1477    } else {
1478        /* Perform the atomic addition in memory. */
1479        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1480                                     s->insn->data);
1481    }
1482
1483    /* Recompute also for atomic case: needed for setting CC. */
1484    tcg_gen_add_i64(o->out, o->in1, o->in2);
1485
1486    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1487        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1488    }
1489    return DISAS_NEXT;
1490}
1491
1492static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1493{
1494    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1495    return DISAS_NEXT;
1496}
1497
1498static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1499{
1500    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1501    return DISAS_NEXT;
1502}
1503
1504static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1505{
1506    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1507    return_low128(o->out2);
1508    return DISAS_NEXT;
1509}
1510
1511static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1512{
1513    tcg_gen_and_i64(o->out, o->in1, o->in2);
1514    return DISAS_NEXT;
1515}
1516
1517static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1518{
1519    int shift = s->insn->data & 0xff;
1520    int size = s->insn->data >> 8;
1521    uint64_t mask = ((1ull << size) - 1) << shift;
1522
1523    assert(!o->g_in2);
1524    tcg_gen_shli_i64(o->in2, o->in2, shift);
1525    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1526    tcg_gen_and_i64(o->out, o->in1, o->in2);
1527
1528    /* Produce the CC from only the bits manipulated.  */
1529    tcg_gen_andi_i64(cc_dst, o->out, mask);
1530    set_cc_nz_u64(s, cc_dst);
1531    return DISAS_NEXT;
1532}
1533
1534static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1535{
1536    o->in1 = tcg_temp_new_i64();
1537
1538    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1539        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1540    } else {
1541        /* Perform the atomic operation in memory. */
1542        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1543                                     s->insn->data);
1544    }
1545
1546    /* Recompute also for atomic case: needed for setting CC. */
1547    tcg_gen_and_i64(o->out, o->in1, o->in2);
1548
1549    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1550        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1551    }
1552    return DISAS_NEXT;
1553}
1554
1555static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1556{
1557    pc_to_link_info(o->out, s, s->pc_tmp);
1558    if (o->in2) {
1559        tcg_gen_mov_i64(psw_addr, o->in2);
1560        per_branch(s, false);
1561        return DISAS_PC_UPDATED;
1562    } else {
1563        return DISAS_NEXT;
1564    }
1565}
1566
1567static void save_link_info(DisasContext *s, DisasOps *o)
1568{
1569    TCGv_i64 t;
1570
1571    if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1572        pc_to_link_info(o->out, s, s->pc_tmp);
1573        return;
1574    }
1575    gen_op_calc_cc(s);
1576    tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1577    tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1578    t = tcg_temp_new_i64();
1579    tcg_gen_shri_i64(t, psw_mask, 16);
1580    tcg_gen_andi_i64(t, t, 0x0f000000);
1581    tcg_gen_or_i64(o->out, o->out, t);
1582    tcg_gen_extu_i32_i64(t, cc_op);
1583    tcg_gen_shli_i64(t, t, 28);
1584    tcg_gen_or_i64(o->out, o->out, t);
1585    tcg_temp_free_i64(t);
1586}
1587
1588static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1589{
1590    save_link_info(s, o);
1591    if (o->in2) {
1592        tcg_gen_mov_i64(psw_addr, o->in2);
1593        per_branch(s, false);
1594        return DISAS_PC_UPDATED;
1595    } else {
1596        return DISAS_NEXT;
1597    }
1598}
1599
1600static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1601{
1602    pc_to_link_info(o->out, s, s->pc_tmp);
1603    return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2));
1604}
1605
1606static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1607{
1608    int m1 = get_field(s->fields, m1);
1609    bool is_imm = have_field(s->fields, i2);
1610    int imm = is_imm ? get_field(s->fields, i2) : 0;
1611    DisasCompare c;
1612
1613    /* BCR with R2 = 0 causes no branching */
1614    if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1615        if (m1 == 14) {
1616            /* Perform serialization */
1617            /* FIXME: check for fast-BCR-serialization facility */
1618            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1619        }
1620        if (m1 == 15) {
1621            /* Perform serialization */
1622            /* FIXME: perform checkpoint-synchronisation */
1623            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1624        }
1625        return DISAS_NEXT;
1626    }
1627
1628    disas_jcc(s, &c, m1);
1629    return help_branch(s, &c, is_imm, imm, o->in2);
1630}
1631
1632static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1633{
1634    int r1 = get_field(s->fields, r1);
1635    bool is_imm = have_field(s->fields, i2);
1636    int imm = is_imm ? get_field(s->fields, i2) : 0;
1637    DisasCompare c;
1638    TCGv_i64 t;
1639
1640    c.cond = TCG_COND_NE;
1641    c.is_64 = false;
1642    c.g1 = false;
1643    c.g2 = false;
1644
1645    t = tcg_temp_new_i64();
1646    tcg_gen_subi_i64(t, regs[r1], 1);
1647    store_reg32_i64(r1, t);
1648    c.u.s32.a = tcg_temp_new_i32();
1649    c.u.s32.b = tcg_const_i32(0);
1650    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1651    tcg_temp_free_i64(t);
1652
1653    return help_branch(s, &c, is_imm, imm, o->in2);
1654}
1655
1656static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1657{
1658    int r1 = get_field(s->fields, r1);
1659    int imm = get_field(s->fields, i2);
1660    DisasCompare c;
1661    TCGv_i64 t;
1662
1663    c.cond = TCG_COND_NE;
1664    c.is_64 = false;
1665    c.g1 = false;
1666    c.g2 = false;
1667
1668    t = tcg_temp_new_i64();
1669    tcg_gen_shri_i64(t, regs[r1], 32);
1670    tcg_gen_subi_i64(t, t, 1);
1671    store_reg32h_i64(r1, t);
1672    c.u.s32.a = tcg_temp_new_i32();
1673    c.u.s32.b = tcg_const_i32(0);
1674    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1675    tcg_temp_free_i64(t);
1676
1677    return help_branch(s, &c, 1, imm, o->in2);
1678}
1679
1680static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1681{
1682    int r1 = get_field(s->fields, r1);
1683    bool is_imm = have_field(s->fields, i2);
1684    int imm = is_imm ? get_field(s->fields, i2) : 0;
1685    DisasCompare c;
1686
1687    c.cond = TCG_COND_NE;
1688    c.is_64 = true;
1689    c.g1 = true;
1690    c.g2 = false;
1691
1692    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1693    c.u.s64.a = regs[r1];
1694    c.u.s64.b = tcg_const_i64(0);
1695
1696    return help_branch(s, &c, is_imm, imm, o->in2);
1697}
1698
1699static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1700{
1701    int r1 = get_field(s->fields, r1);
1702    int r3 = get_field(s->fields, r3);
1703    bool is_imm = have_field(s->fields, i2);
1704    int imm = is_imm ? get_field(s->fields, i2) : 0;
1705    DisasCompare c;
1706    TCGv_i64 t;
1707
1708    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1709    c.is_64 = false;
1710    c.g1 = false;
1711    c.g2 = false;
1712
1713    t = tcg_temp_new_i64();
1714    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1715    c.u.s32.a = tcg_temp_new_i32();
1716    c.u.s32.b = tcg_temp_new_i32();
1717    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1718    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1719    store_reg32_i64(r1, t);
1720    tcg_temp_free_i64(t);
1721
1722    return help_branch(s, &c, is_imm, imm, o->in2);
1723}
1724
1725static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1726{
1727    int r1 = get_field(s->fields, r1);
1728    int r3 = get_field(s->fields, r3);
1729    bool is_imm = have_field(s->fields, i2);
1730    int imm = is_imm ? get_field(s->fields, i2) : 0;
1731    DisasCompare c;
1732
1733    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1734    c.is_64 = true;
1735
1736    if (r1 == (r3 | 1)) {
1737        c.u.s64.b = load_reg(r3 | 1);
1738        c.g2 = false;
1739    } else {
1740        c.u.s64.b = regs[r3 | 1];
1741        c.g2 = true;
1742    }
1743
1744    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1745    c.u.s64.a = regs[r1];
1746    c.g1 = true;
1747
1748    return help_branch(s, &c, is_imm, imm, o->in2);
1749}
1750
1751static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1752{
1753    int imm, m3 = get_field(s->fields, m3);
1754    bool is_imm;
1755    DisasCompare c;
1756
1757    c.cond = ltgt_cond[m3];
1758    if (s->insn->data) {
1759        c.cond = tcg_unsigned_cond(c.cond);
1760    }
1761    c.is_64 = c.g1 = c.g2 = true;
1762    c.u.s64.a = o->in1;
1763    c.u.s64.b = o->in2;
1764
1765    is_imm = have_field(s->fields, i4);
1766    if (is_imm) {
1767        imm = get_field(s->fields, i4);
1768    } else {
1769        imm = 0;
1770        o->out = get_address(s, 0, get_field(s->fields, b4),
1771                             get_field(s->fields, d4));
1772    }
1773
1774    return help_branch(s, &c, is_imm, imm, o->out);
1775}
1776
1777static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1778{
1779    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1780    set_cc_static(s);
1781    return DISAS_NEXT;
1782}
1783
1784static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1785{
1786    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1787    set_cc_static(s);
1788    return DISAS_NEXT;
1789}
1790
1791static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1792{
1793    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1794    set_cc_static(s);
1795    return DISAS_NEXT;
1796}
1797
1798static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1799                                   bool m4_with_fpe)
1800{
1801    const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1802    uint8_t m3 = get_field(s->fields, m3);
1803    uint8_t m4 = get_field(s->fields, m4);
1804
1805    /* m3 field was introduced with FPE */
1806    if (!fpe && m3_with_fpe) {
1807        m3 = 0;
1808    }
1809    /* m4 field was introduced with FPE */
1810    if (!fpe && m4_with_fpe) {
1811        m4 = 0;
1812    }
1813
1814    /* Check for valid rounding modes. Mode 3 was introduced later. */
1815    if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1816        gen_program_exception(s, PGM_SPECIFICATION);
1817        return NULL;
1818    }
1819
1820    return tcg_const_i32(deposit32(m3, 4, 4, m4));
1821}
1822
1823static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1824{
1825    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1826
1827    if (!m34) {
1828        return DISAS_NORETURN;
1829    }
1830    gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1831    tcg_temp_free_i32(m34);
1832    gen_set_cc_nz_f32(s, o->in2);
1833    return DISAS_NEXT;
1834}
1835
1836static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1837{
1838    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1839
1840    if (!m34) {
1841        return DISAS_NORETURN;
1842    }
1843    gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1844    tcg_temp_free_i32(m34);
1845    gen_set_cc_nz_f64(s, o->in2);
1846    return DISAS_NEXT;
1847}
1848
1849static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1850{
1851    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1852
1853    if (!m34) {
1854        return DISAS_NORETURN;
1855    }
1856    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1857    tcg_temp_free_i32(m34);
1858    gen_set_cc_nz_f128(s, o->in1, o->in2);
1859    return DISAS_NEXT;
1860}
1861
1862static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1863{
1864    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1865
1866    if (!m34) {
1867        return DISAS_NORETURN;
1868    }
1869    gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1870    tcg_temp_free_i32(m34);
1871    gen_set_cc_nz_f32(s, o->in2);
1872    return DISAS_NEXT;
1873}
1874
1875static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1876{
1877    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1878
1879    if (!m34) {
1880        return DISAS_NORETURN;
1881    }
1882    gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1883    tcg_temp_free_i32(m34);
1884    gen_set_cc_nz_f64(s, o->in2);
1885    return DISAS_NEXT;
1886}
1887
1888static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1889{
1890    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1891
1892    if (!m34) {
1893        return DISAS_NORETURN;
1894    }
1895    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1896    tcg_temp_free_i32(m34);
1897    gen_set_cc_nz_f128(s, o->in1, o->in2);
1898    return DISAS_NEXT;
1899}
1900
1901static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1902{
1903    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904
1905    if (!m34) {
1906        return DISAS_NORETURN;
1907    }
1908    gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1909    tcg_temp_free_i32(m34);
1910    gen_set_cc_nz_f32(s, o->in2);
1911    return DISAS_NEXT;
1912}
1913
1914static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1915{
1916    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1917
1918    if (!m34) {
1919        return DISAS_NORETURN;
1920    }
1921    gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1922    tcg_temp_free_i32(m34);
1923    gen_set_cc_nz_f64(s, o->in2);
1924    return DISAS_NEXT;
1925}
1926
1927static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1928{
1929    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1930
1931    if (!m34) {
1932        return DISAS_NORETURN;
1933    }
1934    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1935    tcg_temp_free_i32(m34);
1936    gen_set_cc_nz_f128(s, o->in1, o->in2);
1937    return DISAS_NEXT;
1938}
1939
1940static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1941{
1942    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1943
1944    if (!m34) {
1945        return DISAS_NORETURN;
1946    }
1947    gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1948    tcg_temp_free_i32(m34);
1949    gen_set_cc_nz_f32(s, o->in2);
1950    return DISAS_NEXT;
1951}
1952
1953static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1954{
1955    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1956
1957    if (!m34) {
1958        return DISAS_NORETURN;
1959    }
1960    gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1961    tcg_temp_free_i32(m34);
1962    gen_set_cc_nz_f64(s, o->in2);
1963    return DISAS_NEXT;
1964}
1965
1966static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1967{
1968    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1969
1970    if (!m34) {
1971        return DISAS_NORETURN;
1972    }
1973    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1974    tcg_temp_free_i32(m34);
1975    gen_set_cc_nz_f128(s, o->in1, o->in2);
1976    return DISAS_NEXT;
1977}
1978
1979static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1980{
1981    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1982
1983    if (!m34) {
1984        return DISAS_NORETURN;
1985    }
1986    gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1987    tcg_temp_free_i32(m34);
1988    return DISAS_NEXT;
1989}
1990
1991static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1992{
1993    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1994
1995    if (!m34) {
1996        return DISAS_NORETURN;
1997    }
1998    gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1999    tcg_temp_free_i32(m34);
2000    return DISAS_NEXT;
2001}
2002
2003static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2004{
2005    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2006
2007    if (!m34) {
2008        return DISAS_NORETURN;
2009    }
2010    gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
2011    tcg_temp_free_i32(m34);
2012    return_low128(o->out2);
2013    return DISAS_NEXT;
2014}
2015
2016static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2017{
2018    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2019
2020    if (!m34) {
2021        return DISAS_NORETURN;
2022    }
2023    gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2024    tcg_temp_free_i32(m34);
2025    return DISAS_NEXT;
2026}
2027
2028static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2029{
2030    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2031
2032    if (!m34) {
2033        return DISAS_NORETURN;
2034    }
2035    gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2036    tcg_temp_free_i32(m34);
2037    return DISAS_NEXT;
2038}
2039
2040static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2041{
2042    TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2043
2044    if (!m34) {
2045        return DISAS_NORETURN;
2046    }
2047    gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2048    tcg_temp_free_i32(m34);
2049    return_low128(o->out2);
2050    return DISAS_NEXT;
2051}
2052
2053static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2054{
2055    int r2 = get_field(s->fields, r2);
2056    TCGv_i64 len = tcg_temp_new_i64();
2057
2058    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2059    set_cc_static(s);
2060    return_low128(o->out);
2061
2062    tcg_gen_add_i64(regs[r2], regs[r2], len);
2063    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2064    tcg_temp_free_i64(len);
2065
2066    return DISAS_NEXT;
2067}
2068
2069static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2070{
2071    int l = get_field(s->fields, l1);
2072    TCGv_i32 vl;
2073
2074    switch (l + 1) {
2075    case 1:
2076        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2077        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2078        break;
2079    case 2:
2080        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2081        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2082        break;
2083    case 4:
2084        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2085        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2086        break;
2087    case 8:
2088        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2089        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2090        break;
2091    default:
2092        vl = tcg_const_i32(l);
2093        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2094        tcg_temp_free_i32(vl);
2095        set_cc_static(s);
2096        return DISAS_NEXT;
2097    }
2098    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2099    return DISAS_NEXT;
2100}
2101
2102static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2103{
2104    int r1 = get_field(s->fields, r1);
2105    int r2 = get_field(s->fields, r2);
2106    TCGv_i32 t1, t2;
2107
2108    /* r1 and r2 must be even.  */
2109    if (r1 & 1 || r2 & 1) {
2110        gen_program_exception(s, PGM_SPECIFICATION);
2111        return DISAS_NORETURN;
2112    }
2113
2114    t1 = tcg_const_i32(r1);
2115    t2 = tcg_const_i32(r2);
2116    gen_helper_clcl(cc_op, cpu_env, t1, t2);
2117    tcg_temp_free_i32(t1);
2118    tcg_temp_free_i32(t2);
2119    set_cc_static(s);
2120    return DISAS_NEXT;
2121}
2122
2123static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2124{
2125    int r1 = get_field(s->fields, r1);
2126    int r3 = get_field(s->fields, r3);
2127    TCGv_i32 t1, t3;
2128
2129    /* r1 and r3 must be even.  */
2130    if (r1 & 1 || r3 & 1) {
2131        gen_program_exception(s, PGM_SPECIFICATION);
2132        return DISAS_NORETURN;
2133    }
2134
2135    t1 = tcg_const_i32(r1);
2136    t3 = tcg_const_i32(r3);
2137    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2138    tcg_temp_free_i32(t1);
2139    tcg_temp_free_i32(t3);
2140    set_cc_static(s);
2141    return DISAS_NEXT;
2142}
2143
2144static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2145{
2146    int r1 = get_field(s->fields, r1);
2147    int r3 = get_field(s->fields, r3);
2148    TCGv_i32 t1, t3;
2149
2150    /* r1 and r3 must be even.  */
2151    if (r1 & 1 || r3 & 1) {
2152        gen_program_exception(s, PGM_SPECIFICATION);
2153        return DISAS_NORETURN;
2154    }
2155
2156    t1 = tcg_const_i32(r1);
2157    t3 = tcg_const_i32(r3);
2158    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2159    tcg_temp_free_i32(t1);
2160    tcg_temp_free_i32(t3);
2161    set_cc_static(s);
2162    return DISAS_NEXT;
2163}
2164
2165static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2166{
2167    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2168    TCGv_i32 t1 = tcg_temp_new_i32();
2169    tcg_gen_extrl_i64_i32(t1, o->in1);
2170    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2171    set_cc_static(s);
2172    tcg_temp_free_i32(t1);
2173    tcg_temp_free_i32(m3);
2174    return DISAS_NEXT;
2175}
2176
2177static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2178{
2179    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2180    set_cc_static(s);
2181    return_low128(o->in2);
2182    return DISAS_NEXT;
2183}
2184
2185static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2186{
2187    TCGv_i64 t = tcg_temp_new_i64();
2188    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2189    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2190    tcg_gen_or_i64(o->out, o->out, t);
2191    tcg_temp_free_i64(t);
2192    return DISAS_NEXT;
2193}
2194
2195static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2196{
2197    int d2 = get_field(s->fields, d2);
2198    int b2 = get_field(s->fields, b2);
2199    TCGv_i64 addr, cc;
2200
2201    /* Note that in1 = R3 (new value) and
2202       in2 = (zero-extended) R1 (expected value).  */
2203
2204    addr = get_address(s, 0, b2, d2);
2205    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2206                               get_mem_index(s), s->insn->data | MO_ALIGN);
2207    tcg_temp_free_i64(addr);
2208
2209    /* Are the memory and expected values (un)equal?  Note that this setcond
2210       produces the output CC value, thus the NE sense of the test.  */
2211    cc = tcg_temp_new_i64();
2212    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2213    tcg_gen_extrl_i64_i32(cc_op, cc);
2214    tcg_temp_free_i64(cc);
2215    set_cc_static(s);
2216
2217    return DISAS_NEXT;
2218}
2219
2220static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2221{
2222    int r1 = get_field(s->fields, r1);
2223    int r3 = get_field(s->fields, r3);
2224    int d2 = get_field(s->fields, d2);
2225    int b2 = get_field(s->fields, b2);
2226    DisasJumpType ret = DISAS_NEXT;
2227    TCGv_i64 addr;
2228    TCGv_i32 t_r1, t_r3;
2229
2230    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2231    addr = get_address(s, 0, b2, d2);
2232    t_r1 = tcg_const_i32(r1);
2233    t_r3 = tcg_const_i32(r3);
2234    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2235        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2236    } else if (HAVE_CMPXCHG128) {
2237        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2238    } else {
2239        gen_helper_exit_atomic(cpu_env);
2240        ret = DISAS_NORETURN;
2241    }
2242    tcg_temp_free_i64(addr);
2243    tcg_temp_free_i32(t_r1);
2244    tcg_temp_free_i32(t_r3);
2245
2246    set_cc_static(s);
2247    return ret;
2248}
2249
2250static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2251{
2252    int r3 = get_field(s->fields, r3);
2253    TCGv_i32 t_r3 = tcg_const_i32(r3);
2254
2255    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2256        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2257    } else {
2258        gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2259    }
2260    tcg_temp_free_i32(t_r3);
2261
2262    set_cc_static(s);
2263    return DISAS_NEXT;
2264}
2265
2266#ifndef CONFIG_USER_ONLY
2267static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2268{
2269    MemOp mop = s->insn->data;
2270    TCGv_i64 addr, old, cc;
2271    TCGLabel *lab = gen_new_label();
2272
2273    /* Note that in1 = R1 (zero-extended expected value),
2274       out = R1 (original reg), out2 = R1+1 (new value).  */
2275
2276    addr = tcg_temp_new_i64();
2277    old = tcg_temp_new_i64();
2278    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2279    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2280                               get_mem_index(s), mop | MO_ALIGN);
2281    tcg_temp_free_i64(addr);
2282
2283    /* Are the memory and expected values (un)equal?  */
2284    cc = tcg_temp_new_i64();
2285    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2286    tcg_gen_extrl_i64_i32(cc_op, cc);
2287
2288    /* Write back the output now, so that it happens before the
2289       following branch, so that we don't need local temps.  */
2290    if ((mop & MO_SIZE) == MO_32) {
2291        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2292    } else {
2293        tcg_gen_mov_i64(o->out, old);
2294    }
2295    tcg_temp_free_i64(old);
2296
2297    /* If the comparison was equal, and the LSB of R2 was set,
2298       then we need to flush the TLB (for all cpus).  */
2299    tcg_gen_xori_i64(cc, cc, 1);
2300    tcg_gen_and_i64(cc, cc, o->in2);
2301    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2302    tcg_temp_free_i64(cc);
2303
2304    gen_helper_purge(cpu_env);
2305    gen_set_label(lab);
2306
2307    return DISAS_NEXT;
2308}
2309#endif
2310
2311static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2312{
2313    TCGv_i64 t1 = tcg_temp_new_i64();
2314    TCGv_i32 t2 = tcg_temp_new_i32();
2315    tcg_gen_extrl_i64_i32(t2, o->in1);
2316    gen_helper_cvd(t1, t2);
2317    tcg_temp_free_i32(t2);
2318    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2319    tcg_temp_free_i64(t1);
2320    return DISAS_NEXT;
2321}
2322
2323static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2324{
2325    int m3 = get_field(s->fields, m3);
2326    TCGLabel *lab = gen_new_label();
2327    TCGCond c;
2328
2329    c = tcg_invert_cond(ltgt_cond[m3]);
2330    if (s->insn->data) {
2331        c = tcg_unsigned_cond(c);
2332    }
2333    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2334
2335    /* Trap.  */
2336    gen_trap(s);
2337
2338    gen_set_label(lab);
2339    return DISAS_NEXT;
2340}
2341
2342static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2343{
2344    int m3 = get_field(s->fields, m3);
2345    int r1 = get_field(s->fields, r1);
2346    int r2 = get_field(s->fields, r2);
2347    TCGv_i32 tr1, tr2, chk;
2348
2349    /* R1 and R2 must both be even.  */
2350    if ((r1 | r2) & 1) {
2351        gen_program_exception(s, PGM_SPECIFICATION);
2352        return DISAS_NORETURN;
2353    }
2354    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2355        m3 = 0;
2356    }
2357
2358    tr1 = tcg_const_i32(r1);
2359    tr2 = tcg_const_i32(r2);
2360    chk = tcg_const_i32(m3);
2361
2362    switch (s->insn->data) {
2363    case 12:
2364        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2365        break;
2366    case 14:
2367        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2368        break;
2369    case 21:
2370        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2371        break;
2372    case 24:
2373        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2374        break;
2375    case 41:
2376        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2377        break;
2378    case 42:
2379        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2380        break;
2381    default:
2382        g_assert_not_reached();
2383    }
2384
2385    tcg_temp_free_i32(tr1);
2386    tcg_temp_free_i32(tr2);
2387    tcg_temp_free_i32(chk);
2388    set_cc_static(s);
2389    return DISAS_NEXT;
2390}
2391
2392#ifndef CONFIG_USER_ONLY
2393static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2394{
2395    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2396    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2397    TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2398
2399    gen_helper_diag(cpu_env, r1, r3, func_code);
2400
2401    tcg_temp_free_i32(func_code);
2402    tcg_temp_free_i32(r3);
2403    tcg_temp_free_i32(r1);
2404    return DISAS_NEXT;
2405}
2406#endif
2407
2408static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2409{
2410    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2411    return_low128(o->out);
2412    return DISAS_NEXT;
2413}
2414
2415static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2416{
2417    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2418    return_low128(o->out);
2419    return DISAS_NEXT;
2420}
2421
2422static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2423{
2424    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2425    return_low128(o->out);
2426    return DISAS_NEXT;
2427}
2428
2429static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2430{
2431    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2432    return_low128(o->out);
2433    return DISAS_NEXT;
2434}
2435
2436static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2437{
2438    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2439    return DISAS_NEXT;
2440}
2441
2442static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2443{
2444    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2445    return DISAS_NEXT;
2446}
2447
2448static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2449{
2450    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2451    return_low128(o->out2);
2452    return DISAS_NEXT;
2453}
2454
2455static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2456{
2457    int r2 = get_field(s->fields, r2);
2458    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2459    return DISAS_NEXT;
2460}
2461
2462static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2463{
2464    /* No cache information provided.  */
2465    tcg_gen_movi_i64(o->out, -1);
2466    return DISAS_NEXT;
2467}
2468
2469static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2470{
2471    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2472    return DISAS_NEXT;
2473}
2474
2475static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2476{
2477    int r1 = get_field(s->fields, r1);
2478    int r2 = get_field(s->fields, r2);
2479    TCGv_i64 t = tcg_temp_new_i64();
2480
2481    /* Note the "subsequently" in the PoO, which implies a defined result
2482       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2483    tcg_gen_shri_i64(t, psw_mask, 32);
2484    store_reg32_i64(r1, t);
2485    if (r2 != 0) {
2486        store_reg32_i64(r2, psw_mask);
2487    }
2488
2489    tcg_temp_free_i64(t);
2490    return DISAS_NEXT;
2491}
2492
2493static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2494{
2495    int r1 = get_field(s->fields, r1);
2496    TCGv_i32 ilen;
2497    TCGv_i64 v1;
2498
2499    /* Nested EXECUTE is not allowed.  */
2500    if (unlikely(s->ex_value)) {
2501        gen_program_exception(s, PGM_EXECUTE);
2502        return DISAS_NORETURN;
2503    }
2504
2505    update_psw_addr(s);
2506    update_cc_op(s);
2507
2508    if (r1 == 0) {
2509        v1 = tcg_const_i64(0);
2510    } else {
2511        v1 = regs[r1];
2512    }
2513
2514    ilen = tcg_const_i32(s->ilen);
2515    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2516    tcg_temp_free_i32(ilen);
2517
2518    if (r1 == 0) {
2519        tcg_temp_free_i64(v1);
2520    }
2521
2522    return DISAS_PC_CC_UPDATED;
2523}
2524
2525static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2526{
2527    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2528
2529    if (!m34) {
2530        return DISAS_NORETURN;
2531    }
2532    gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2533    tcg_temp_free_i32(m34);
2534    return DISAS_NEXT;
2535}
2536
2537static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2538{
2539    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2540
2541    if (!m34) {
2542        return DISAS_NORETURN;
2543    }
2544    gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2545    tcg_temp_free_i32(m34);
2546    return DISAS_NEXT;
2547}
2548
2549static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2550{
2551    TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2552
2553    if (!m34) {
2554        return DISAS_NORETURN;
2555    }
2556    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2557    return_low128(o->out2);
2558    tcg_temp_free_i32(m34);
2559    return DISAS_NEXT;
2560}
2561
2562static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2563{
2564    /* We'll use the original input for cc computation, since we get to
2565       compare that against 0, which ought to be better than comparing
2566       the real output against 64.  It also lets cc_dst be a convenient
2567       temporary during our computation.  */
2568    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2569
2570    /* R1 = IN ? CLZ(IN) : 64.  */
2571    tcg_gen_clzi_i64(o->out, o->in2, 64);
2572
2573    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2574       value by 64, which is undefined.  But since the shift is 64 iff the
2575       input is zero, we still get the correct result after and'ing.  */
2576    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2577    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2578    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2579    return DISAS_NEXT;
2580}
2581
2582static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2583{
2584    int m3 = get_field(s->fields, m3);
2585    int pos, len, base = s->insn->data;
2586    TCGv_i64 tmp = tcg_temp_new_i64();
2587    uint64_t ccm;
2588
2589    switch (m3) {
2590    case 0xf:
2591        /* Effectively a 32-bit load.  */
2592        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2593        len = 32;
2594        goto one_insert;
2595
2596    case 0xc:
2597    case 0x6:
2598    case 0x3:
2599        /* Effectively a 16-bit load.  */
2600        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2601        len = 16;
2602        goto one_insert;
2603
2604    case 0x8:
2605    case 0x4:
2606    case 0x2:
2607    case 0x1:
2608        /* Effectively an 8-bit load.  */
2609        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2610        len = 8;
2611        goto one_insert;
2612
2613    one_insert:
2614        pos = base + ctz32(m3) * 8;
2615        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2616        ccm = ((1ull << len) - 1) << pos;
2617        break;
2618
2619    default:
2620        /* This is going to be a sequence of loads and inserts.  */
2621        pos = base + 32 - 8;
2622        ccm = 0;
2623        while (m3) {
2624            if (m3 & 0x8) {
2625                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2626                tcg_gen_addi_i64(o->in2, o->in2, 1);
2627                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2628                ccm |= 0xff << pos;
2629            }
2630            m3 = (m3 << 1) & 0xf;
2631            pos -= 8;
2632        }
2633        break;
2634    }
2635
2636    tcg_gen_movi_i64(tmp, ccm);
2637    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2638    tcg_temp_free_i64(tmp);
2639    return DISAS_NEXT;
2640}
2641
2642static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2643{
2644    int shift = s->insn->data & 0xff;
2645    int size = s->insn->data >> 8;
2646    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2647    return DISAS_NEXT;
2648}
2649
2650static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2651{
2652    TCGv_i64 t1, t2;
2653
2654    gen_op_calc_cc(s);
2655    t1 = tcg_temp_new_i64();
2656    tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2657    t2 = tcg_temp_new_i64();
2658    tcg_gen_extu_i32_i64(t2, cc_op);
2659    tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2660    tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2661    tcg_temp_free_i64(t1);
2662    tcg_temp_free_i64(t2);
2663    return DISAS_NEXT;
2664}
2665
2666#ifndef CONFIG_USER_ONLY
2667static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2668{
2669    TCGv_i32 m4;
2670
2671    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2672        m4 = tcg_const_i32(get_field(s->fields, m4));
2673    } else {
2674        m4 = tcg_const_i32(0);
2675    }
2676    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2677    tcg_temp_free_i32(m4);
2678    return DISAS_NEXT;
2679}
2680
2681static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2682{
2683    TCGv_i32 m4;
2684
2685    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2686        m4 = tcg_const_i32(get_field(s->fields, m4));
2687    } else {
2688        m4 = tcg_const_i32(0);
2689    }
2690    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2691    tcg_temp_free_i32(m4);
2692    return DISAS_NEXT;
2693}
2694
2695static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2696{
2697    gen_helper_iske(o->out, cpu_env, o->in2);
2698    return DISAS_NEXT;
2699}
2700#endif
2701
2702static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2703{
2704    int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2705    int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2706    int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2707    TCGv_i32 t_r1, t_r2, t_r3, type;
2708
2709    switch (s->insn->data) {
2710    case S390_FEAT_TYPE_KMCTR:
2711        if (r3 & 1 || !r3) {
2712            gen_program_exception(s, PGM_SPECIFICATION);
2713            return DISAS_NORETURN;
2714        }
2715        /* FALL THROUGH */
2716    case S390_FEAT_TYPE_PPNO:
2717    case S390_FEAT_TYPE_KMF:
2718    case S390_FEAT_TYPE_KMC:
2719    case S390_FEAT_TYPE_KMO:
2720    case S390_FEAT_TYPE_KM:
2721        if (r1 & 1 || !r1) {
2722            gen_program_exception(s, PGM_SPECIFICATION);
2723            return DISAS_NORETURN;
2724        }
2725        /* FALL THROUGH */
2726    case S390_FEAT_TYPE_KMAC:
2727    case S390_FEAT_TYPE_KIMD:
2728    case S390_FEAT_TYPE_KLMD:
2729        if (r2 & 1 || !r2) {
2730            gen_program_exception(s, PGM_SPECIFICATION);
2731            return DISAS_NORETURN;
2732        }
2733        /* FALL THROUGH */
2734    case S390_FEAT_TYPE_PCKMO:
2735    case S390_FEAT_TYPE_PCC:
2736        break;
2737    default:
2738        g_assert_not_reached();
2739    };
2740
2741    t_r1 = tcg_const_i32(r1);
2742    t_r2 = tcg_const_i32(r2);
2743    t_r3 = tcg_const_i32(r3);
2744    type = tcg_const_i32(s->insn->data);
2745    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2746    set_cc_static(s);
2747    tcg_temp_free_i32(t_r1);
2748    tcg_temp_free_i32(t_r2);
2749    tcg_temp_free_i32(t_r3);
2750    tcg_temp_free_i32(type);
2751    return DISAS_NEXT;
2752}
2753
2754static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2755{
2756    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2757    set_cc_static(s);
2758    return DISAS_NEXT;
2759}
2760
2761static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2762{
2763    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2764    set_cc_static(s);
2765    return DISAS_NEXT;
2766}
2767
2768static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2769{
2770    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2771    set_cc_static(s);
2772    return DISAS_NEXT;
2773}
2774
2775static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2776{
2777    /* The real output is indeed the original value in memory;
2778       recompute the addition for the computation of CC.  */
2779    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2780                                 s->insn->data | MO_ALIGN);
2781    /* However, we need to recompute the addition for setting CC.  */
2782    tcg_gen_add_i64(o->out, o->in1, o->in2);
2783    return DISAS_NEXT;
2784}
2785
2786static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2787{
2788    /* The real output is indeed the original value in memory;
2789       recompute the addition for the computation of CC.  */
2790    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2791                                 s->insn->data | MO_ALIGN);
2792    /* However, we need to recompute the operation for setting CC.  */
2793    tcg_gen_and_i64(o->out, o->in1, o->in2);
2794    return DISAS_NEXT;
2795}
2796
2797static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2798{
2799    /* The real output is indeed the original value in memory;
2800       recompute the addition for the computation of CC.  */
2801    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2802                                s->insn->data | MO_ALIGN);
2803    /* However, we need to recompute the operation for setting CC.  */
2804    tcg_gen_or_i64(o->out, o->in1, o->in2);
2805    return DISAS_NEXT;
2806}
2807
2808static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2809{
2810    /* The real output is indeed the original value in memory;
2811       recompute the addition for the computation of CC.  */
2812    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2813                                 s->insn->data | MO_ALIGN);
2814    /* However, we need to recompute the operation for setting CC.  */
2815    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2816    return DISAS_NEXT;
2817}
2818
2819static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2820{
2821    gen_helper_ldeb(o->out, cpu_env, o->in2);
2822    return DISAS_NEXT;
2823}
2824
2825static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2826{
2827    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2828
2829    if (!m34) {
2830        return DISAS_NORETURN;
2831    }
2832    gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2833    tcg_temp_free_i32(m34);
2834    return DISAS_NEXT;
2835}
2836
2837static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2838{
2839    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2840
2841    if (!m34) {
2842        return DISAS_NORETURN;
2843    }
2844    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2845    tcg_temp_free_i32(m34);
2846    return DISAS_NEXT;
2847}
2848
2849static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2850{
2851    TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2852
2853    if (!m34) {
2854        return DISAS_NORETURN;
2855    }
2856    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2857    tcg_temp_free_i32(m34);
2858    return DISAS_NEXT;
2859}
2860
2861static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2862{
2863    gen_helper_lxdb(o->out, cpu_env, o->in2);
2864    return_low128(o->out2);
2865    return DISAS_NEXT;
2866}
2867
2868static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2869{
2870    gen_helper_lxeb(o->out, cpu_env, o->in2);
2871    return_low128(o->out2);
2872    return DISAS_NEXT;
2873}
2874
2875static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2876{
2877    tcg_gen_shli_i64(o->out, o->in2, 32);
2878    return DISAS_NEXT;
2879}
2880
2881static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2882{
2883    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2884    return DISAS_NEXT;
2885}
2886
2887static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2888{
2889    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2890    return DISAS_NEXT;
2891}
2892
2893static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2894{
2895    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2896    return DISAS_NEXT;
2897}
2898
2899static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2900{
2901    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2902    return DISAS_NEXT;
2903}
2904
2905static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2906{
2907    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2908    return DISAS_NEXT;
2909}
2910
2911static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2912{
2913    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2914    return DISAS_NEXT;
2915}
2916
2917static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2918{
2919    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2920    return DISAS_NEXT;
2921}
2922
2923static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2924{
2925    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2926    return DISAS_NEXT;
2927}
2928
2929static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2930{
2931    TCGLabel *lab = gen_new_label();
2932    store_reg32_i64(get_field(s->fields, r1), o->in2);
2933    /* The value is stored even in case of trap. */
2934    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2935    gen_trap(s);
2936    gen_set_label(lab);
2937    return DISAS_NEXT;
2938}
2939
2940static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2941{
2942    TCGLabel *lab = gen_new_label();
2943    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2944    /* The value is stored even in case of trap. */
2945    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2946    gen_trap(s);
2947    gen_set_label(lab);
2948    return DISAS_NEXT;
2949}
2950
2951static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2952{
2953    TCGLabel *lab = gen_new_label();
2954    store_reg32h_i64(get_field(s->fields, r1), o->in2);
2955    /* The value is stored even in case of trap. */
2956    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2957    gen_trap(s);
2958    gen_set_label(lab);
2959    return DISAS_NEXT;
2960}
2961
2962static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2963{
2964    TCGLabel *lab = gen_new_label();
2965    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2966    /* The value is stored even in case of trap. */
2967    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2968    gen_trap(s);
2969    gen_set_label(lab);
2970    return DISAS_NEXT;
2971}
2972
2973static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2974{
2975    TCGLabel *lab = gen_new_label();
2976    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2977    /* The value is stored even in case of trap. */
2978    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2979    gen_trap(s);
2980    gen_set_label(lab);
2981    return DISAS_NEXT;
2982}
2983
2984static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2985{
2986    DisasCompare c;
2987
2988    disas_jcc(s, &c, get_field(s->fields, m3));
2989
2990    if (c.is_64) {
2991        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2992                            o->in2, o->in1);
2993        free_compare(&c);
2994    } else {
2995        TCGv_i32 t32 = tcg_temp_new_i32();
2996        TCGv_i64 t, z;
2997
2998        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2999        free_compare(&c);
3000
3001        t = tcg_temp_new_i64();
3002        tcg_gen_extu_i32_i64(t, t32);
3003        tcg_temp_free_i32(t32);
3004
3005        z = tcg_const_i64(0);
3006        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3007        tcg_temp_free_i64(t);
3008        tcg_temp_free_i64(z);
3009    }
3010
3011    return DISAS_NEXT;
3012}
3013
3014#ifndef CONFIG_USER_ONLY
3015static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3016{
3017    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3018    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3019    gen_helper_lctl(cpu_env, r1, o->in2, r3);
3020    tcg_temp_free_i32(r1);
3021    tcg_temp_free_i32(r3);
3022    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3023    return DISAS_PC_STALE_NOCHAIN;
3024}
3025
3026static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3027{
3028    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3029    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3030    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3031    tcg_temp_free_i32(r1);
3032    tcg_temp_free_i32(r3);
3033    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3034    return DISAS_PC_STALE_NOCHAIN;
3035}
3036
3037static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3038{
3039    gen_helper_lra(o->out, cpu_env, o->in2);
3040    set_cc_static(s);
3041    return DISAS_NEXT;
3042}
3043
3044static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3045{
3046    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3047    return DISAS_NEXT;
3048}
3049
3050static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3051{
3052    TCGv_i64 t1, t2;
3053
3054    per_breaking_event(s);
3055
3056    t1 = tcg_temp_new_i64();
3057    t2 = tcg_temp_new_i64();
3058    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3059                        MO_TEUL | MO_ALIGN_8);
3060    tcg_gen_addi_i64(o->in2, o->in2, 4);
3061    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3062    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3063    tcg_gen_shli_i64(t1, t1, 32);
3064    gen_helper_load_psw(cpu_env, t1, t2);
3065    tcg_temp_free_i64(t1);
3066    tcg_temp_free_i64(t2);
3067    return DISAS_NORETURN;
3068}
3069
3070static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3071{
3072    TCGv_i64 t1, t2;
3073
3074    per_breaking_event(s);
3075
3076    t1 = tcg_temp_new_i64();
3077    t2 = tcg_temp_new_i64();
3078    tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3079                        MO_TEQ | MO_ALIGN_8);
3080    tcg_gen_addi_i64(o->in2, o->in2, 8);
3081    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3082    gen_helper_load_psw(cpu_env, t1, t2);
3083    tcg_temp_free_i64(t1);
3084    tcg_temp_free_i64(t2);
3085    return DISAS_NORETURN;
3086}
3087#endif
3088
3089static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3090{
3091    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3092    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3093    gen_helper_lam(cpu_env, r1, o->in2, r3);
3094    tcg_temp_free_i32(r1);
3095    tcg_temp_free_i32(r3);
3096    return DISAS_NEXT;
3097}
3098
3099static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3100{
3101    int r1 = get_field(s->fields, r1);
3102    int r3 = get_field(s->fields, r3);
3103    TCGv_i64 t1, t2;
3104
3105    /* Only one register to read. */
3106    t1 = tcg_temp_new_i64();
3107    if (unlikely(r1 == r3)) {
3108        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3109        store_reg32_i64(r1, t1);
3110        tcg_temp_free(t1);
3111        return DISAS_NEXT;
3112    }
3113
3114    /* First load the values of the first and last registers to trigger
3115       possible page faults. */
3116    t2 = tcg_temp_new_i64();
3117    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3118    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3119    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3120    store_reg32_i64(r1, t1);
3121    store_reg32_i64(r3, t2);
3122
3123    /* Only two registers to read. */
3124    if (((r1 + 1) & 15) == r3) {
3125        tcg_temp_free(t2);
3126        tcg_temp_free(t1);
3127        return DISAS_NEXT;
3128    }
3129
3130    /* Then load the remaining registers. Page fault can't occur. */
3131    r3 = (r3 - 1) & 15;
3132    tcg_gen_movi_i64(t2, 4);
3133    while (r1 != r3) {
3134        r1 = (r1 + 1) & 15;
3135        tcg_gen_add_i64(o->in2, o->in2, t2);
3136        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3137        store_reg32_i64(r1, t1);
3138    }
3139    tcg_temp_free(t2);
3140    tcg_temp_free(t1);
3141
3142    return DISAS_NEXT;
3143}
3144
3145static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3146{
3147    int r1 = get_field(s->fields, r1);
3148    int r3 = get_field(s->fields, r3);
3149    TCGv_i64 t1, t2;
3150
3151    /* Only one register to read. */
3152    t1 = tcg_temp_new_i64();
3153    if (unlikely(r1 == r3)) {
3154        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3155        store_reg32h_i64(r1, t1);
3156        tcg_temp_free(t1);
3157        return DISAS_NEXT;
3158    }
3159
3160    /* First load the values of the first and last registers to trigger
3161       possible page faults. */
3162    t2 = tcg_temp_new_i64();
3163    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3164    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3165    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3166    store_reg32h_i64(r1, t1);
3167    store_reg32h_i64(r3, t2);
3168
3169    /* Only two registers to read. */
3170    if (((r1 + 1) & 15) == r3) {
3171        tcg_temp_free(t2);
3172        tcg_temp_free(t1);
3173        return DISAS_NEXT;
3174    }
3175
3176    /* Then load the remaining registers. Page fault can't occur. */
3177    r3 = (r3 - 1) & 15;
3178    tcg_gen_movi_i64(t2, 4);
3179    while (r1 != r3) {
3180        r1 = (r1 + 1) & 15;
3181        tcg_gen_add_i64(o->in2, o->in2, t2);
3182        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3183        store_reg32h_i64(r1, t1);
3184    }
3185    tcg_temp_free(t2);
3186    tcg_temp_free(t1);
3187
3188    return DISAS_NEXT;
3189}
3190
3191static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3192{
3193    int r1 = get_field(s->fields, r1);
3194    int r3 = get_field(s->fields, r3);
3195    TCGv_i64 t1, t2;
3196
3197    /* Only one register to read. */
3198    if (unlikely(r1 == r3)) {
3199        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3200        return DISAS_NEXT;
3201    }
3202
3203    /* First load the values of the first and last registers to trigger
3204       possible page faults. */
3205    t1 = tcg_temp_new_i64();
3206    t2 = tcg_temp_new_i64();
3207    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3208    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3209    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3210    tcg_gen_mov_i64(regs[r1], t1);
3211    tcg_temp_free(t2);
3212
3213    /* Only two registers to read. */
3214    if (((r1 + 1) & 15) == r3) {
3215        tcg_temp_free(t1);
3216        return DISAS_NEXT;
3217    }
3218
3219    /* Then load the remaining registers. Page fault can't occur. */
3220    r3 = (r3 - 1) & 15;
3221    tcg_gen_movi_i64(t1, 8);
3222    while (r1 != r3) {
3223        r1 = (r1 + 1) & 15;
3224        tcg_gen_add_i64(o->in2, o->in2, t1);
3225        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3226    }
3227    tcg_temp_free(t1);
3228
3229    return DISAS_NEXT;
3230}
3231
3232static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3233{
3234    TCGv_i64 a1, a2;
3235    MemOp mop = s->insn->data;
3236
3237    /* In a parallel context, stop the world and single step.  */
3238    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3239        update_psw_addr(s);
3240        update_cc_op(s);
3241        gen_exception(EXCP_ATOMIC);
3242        return DISAS_NORETURN;
3243    }
3244
3245    /* In a serial context, perform the two loads ... */
3246    a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
3247    a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3248    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3249    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3250    tcg_temp_free_i64(a1);
3251    tcg_temp_free_i64(a2);
3252
3253    /* ... and indicate that we performed them while interlocked.  */
3254    gen_op_movi_cc(s, 0);
3255    return DISAS_NEXT;
3256}
3257
3258static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3259{
3260    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3261        gen_helper_lpq(o->out, cpu_env, o->in2);
3262    } else if (HAVE_ATOMIC128) {
3263        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3264    } else {
3265        gen_helper_exit_atomic(cpu_env);
3266        return DISAS_NORETURN;
3267    }
3268    return_low128(o->out2);
3269    return DISAS_NEXT;
3270}
3271
3272#ifndef CONFIG_USER_ONLY
3273static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3274{
3275    gen_helper_lura(o->out, cpu_env, o->in2);
3276    return DISAS_NEXT;
3277}
3278
3279static DisasJumpType op_lurag(DisasContext *s, DisasOps *o)
3280{
3281    gen_helper_lurag(o->out, cpu_env, o->in2);
3282    return DISAS_NEXT;
3283}
3284#endif
3285
3286static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3287{
3288    tcg_gen_andi_i64(o->out, o->in2, -256);
3289    return DISAS_NEXT;
3290}
3291
3292static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3293{
3294    const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
3295
3296    if (get_field(s->fields, m3) > 6) {
3297        gen_program_exception(s, PGM_SPECIFICATION);
3298        return DISAS_NORETURN;
3299    }
3300
3301    tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3302    tcg_gen_neg_i64(o->addr1, o->addr1);
3303    tcg_gen_movi_i64(o->out, 16);
3304    tcg_gen_umin_i64(o->out, o->out, o->addr1);
3305    gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3306    return DISAS_NEXT;
3307}
3308
3309static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3310{
3311    o->out = o->in2;
3312    o->g_out = o->g_in2;
3313    o->in2 = NULL;
3314    o->g_in2 = false;
3315    return DISAS_NEXT;
3316}
3317
3318static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3319{
3320    int b2 = get_field(s->fields, b2);
3321    TCGv ar1 = tcg_temp_new_i64();
3322
3323    o->out = o->in2;
3324    o->g_out = o->g_in2;
3325    o->in2 = NULL;
3326    o->g_in2 = false;
3327
3328    switch (s->base.tb->flags & FLAG_MASK_ASC) {
3329    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3330        tcg_gen_movi_i64(ar1, 0);
3331        break;
3332    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3333        tcg_gen_movi_i64(ar1, 1);
3334        break;
3335    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3336        if (b2) {
3337            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3338        } else {
3339            tcg_gen_movi_i64(ar1, 0);
3340        }
3341        break;
3342    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3343        tcg_gen_movi_i64(ar1, 2);
3344        break;
3345    }
3346
3347    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3348    tcg_temp_free_i64(ar1);
3349
3350    return DISAS_NEXT;
3351}
3352
3353static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3354{
3355    o->out = o->in1;
3356    o->out2 = o->in2;
3357    o->g_out = o->g_in1;
3358    o->g_out2 = o->g_in2;
3359    o->in1 = NULL;
3360    o->in2 = NULL;
3361    o->g_in1 = o->g_in2 = false;
3362    return DISAS_NEXT;
3363}
3364
3365static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3366{
3367    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3368    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3369    tcg_temp_free_i32(l);
3370    return DISAS_NEXT;
3371}
3372
3373static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3374{
3375    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3376    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3377    tcg_temp_free_i32(l);
3378    return DISAS_NEXT;
3379}
3380
3381static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3382{
3383    int r1 = get_field(s->fields, r1);
3384    int r2 = get_field(s->fields, r2);
3385    TCGv_i32 t1, t2;
3386
3387    /* r1 and r2 must be even.  */
3388    if (r1 & 1 || r2 & 1) {
3389        gen_program_exception(s, PGM_SPECIFICATION);
3390        return DISAS_NORETURN;
3391    }
3392
3393    t1 = tcg_const_i32(r1);
3394    t2 = tcg_const_i32(r2);
3395    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3396    tcg_temp_free_i32(t1);
3397    tcg_temp_free_i32(t2);
3398    set_cc_static(s);
3399    return DISAS_NEXT;
3400}
3401
3402static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3403{
3404    int r1 = get_field(s->fields, r1);
3405    int r3 = get_field(s->fields, r3);
3406    TCGv_i32 t1, t3;
3407
3408    /* r1 and r3 must be even.  */
3409    if (r1 & 1 || r3 & 1) {
3410        gen_program_exception(s, PGM_SPECIFICATION);
3411        return DISAS_NORETURN;
3412    }
3413
3414    t1 = tcg_const_i32(r1);
3415    t3 = tcg_const_i32(r3);
3416    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3417    tcg_temp_free_i32(t1);
3418    tcg_temp_free_i32(t3);
3419    set_cc_static(s);
3420    return DISAS_NEXT;
3421}
3422
3423static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3424{
3425    int r1 = get_field(s->fields, r1);
3426    int r3 = get_field(s->fields, r3);
3427    TCGv_i32 t1, t3;
3428
3429    /* r1 and r3 must be even.  */
3430    if (r1 & 1 || r3 & 1) {
3431        gen_program_exception(s, PGM_SPECIFICATION);
3432        return DISAS_NORETURN;
3433    }
3434
3435    t1 = tcg_const_i32(r1);
3436    t3 = tcg_const_i32(r3);
3437    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3438    tcg_temp_free_i32(t1);
3439    tcg_temp_free_i32(t3);
3440    set_cc_static(s);
3441    return DISAS_NEXT;
3442}
3443
3444static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3445{
3446    int r3 = get_field(s->fields, r3);
3447    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3448    set_cc_static(s);
3449    return DISAS_NEXT;
3450}
3451
3452#ifndef CONFIG_USER_ONLY
3453static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3454{
3455    int r1 = get_field(s->fields, l1);
3456    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3457    set_cc_static(s);
3458    return DISAS_NEXT;
3459}
3460
3461static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3462{
3463    int r1 = get_field(s->fields, l1);
3464    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3465    set_cc_static(s);
3466    return DISAS_NEXT;
3467}
3468#endif
3469
3470static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3471{
3472    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3473    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3474    tcg_temp_free_i32(l);
3475    return DISAS_NEXT;
3476}
3477
3478static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3479{
3480    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3481    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3482    tcg_temp_free_i32(l);
3483    return DISAS_NEXT;
3484}
3485
3486static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3487{
3488    gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3489    set_cc_static(s);
3490    return DISAS_NEXT;
3491}
3492
3493static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3494{
3495    TCGv_i32 t1 = tcg_const_i32(get_field(s->fields, r1));
3496    TCGv_i32 t2 = tcg_const_i32(get_field(s->fields, r2));
3497
3498    gen_helper_mvst(cc_op, cpu_env, t1, t2);
3499    tcg_temp_free_i32(t1);
3500    tcg_temp_free_i32(t2);
3501    set_cc_static(s);
3502    return DISAS_NEXT;
3503}
3504
3505static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3506{
3507    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3508    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3509    tcg_temp_free_i32(l);
3510    return DISAS_NEXT;
3511}
3512
3513static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3514{
3515    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3516    return DISAS_NEXT;
3517}
3518
3519static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3520{
3521    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3522    return DISAS_NEXT;
3523}
3524
3525static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3526{
3527    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3528    return DISAS_NEXT;
3529}
3530
3531static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3532{
3533    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3534    return DISAS_NEXT;
3535}
3536
3537static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3538{
3539    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3540    return DISAS_NEXT;
3541}
3542
3543static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3544{
3545    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3546    return_low128(o->out2);
3547    return DISAS_NEXT;
3548}
3549
3550static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3551{
3552    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3553    return_low128(o->out2);
3554    return DISAS_NEXT;
3555}
3556
3557static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3558{
3559    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3560    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3561    tcg_temp_free_i64(r3);
3562    return DISAS_NEXT;
3563}
3564
3565static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3566{
3567    TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3568    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3569    tcg_temp_free_i64(r3);
3570    return DISAS_NEXT;
3571}
3572
3573static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3574{
3575    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3576    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3577    tcg_temp_free_i64(r3);
3578    return DISAS_NEXT;
3579}
3580
3581static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3582{
3583    TCGv_i64 r3 = load_freg(get_field(s->fields, r3));
3584    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3585    tcg_temp_free_i64(r3);
3586    return DISAS_NEXT;
3587}
3588
3589static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3590{
3591    TCGv_i64 z, n;
3592    z = tcg_const_i64(0);
3593    n = tcg_temp_new_i64();
3594    tcg_gen_neg_i64(n, o->in2);
3595    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3596    tcg_temp_free_i64(n);
3597    tcg_temp_free_i64(z);
3598    return DISAS_NEXT;
3599}
3600
3601static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3602{
3603    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3604    return DISAS_NEXT;
3605}
3606
3607static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3608{
3609    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3610    return DISAS_NEXT;
3611}
3612
3613static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3614{
3615    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3616    tcg_gen_mov_i64(o->out2, o->in2);
3617    return DISAS_NEXT;
3618}
3619
3620static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3621{
3622    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3623    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3624    tcg_temp_free_i32(l);
3625    set_cc_static(s);
3626    return DISAS_NEXT;
3627}
3628
3629static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3630{
3631    tcg_gen_neg_i64(o->out, o->in2);
3632    return DISAS_NEXT;
3633}
3634
3635static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3636{
3637    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3638    return DISAS_NEXT;
3639}
3640
3641static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3642{
3643    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3644    return DISAS_NEXT;
3645}
3646
3647static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3648{
3649    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3650    tcg_gen_mov_i64(o->out2, o->in2);
3651    return DISAS_NEXT;
3652}
3653
3654static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3655{
3656    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3657    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3658    tcg_temp_free_i32(l);
3659    set_cc_static(s);
3660    return DISAS_NEXT;
3661}
3662
3663static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3664{
3665    tcg_gen_or_i64(o->out, o->in1, o->in2);
3666    return DISAS_NEXT;
3667}
3668
3669static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3670{
3671    int shift = s->insn->data & 0xff;
3672    int size = s->insn->data >> 8;
3673    uint64_t mask = ((1ull << size) - 1) << shift;
3674
3675    assert(!o->g_in2);
3676    tcg_gen_shli_i64(o->in2, o->in2, shift);
3677    tcg_gen_or_i64(o->out, o->in1, o->in2);
3678
3679    /* Produce the CC from only the bits manipulated.  */
3680    tcg_gen_andi_i64(cc_dst, o->out, mask);
3681    set_cc_nz_u64(s, cc_dst);
3682    return DISAS_NEXT;
3683}
3684
3685static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3686{
3687    o->in1 = tcg_temp_new_i64();
3688
3689    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3690        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3691    } else {
3692        /* Perform the atomic operation in memory. */
3693        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3694                                    s->insn->data);
3695    }
3696
3697    /* Recompute also for atomic case: needed for setting CC. */
3698    tcg_gen_or_i64(o->out, o->in1, o->in2);
3699
3700    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3701        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3702    }
3703    return DISAS_NEXT;
3704}
3705
3706static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3707{
3708    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3709    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3710    tcg_temp_free_i32(l);
3711    return DISAS_NEXT;
3712}
3713
3714static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3715{
3716    int l2 = get_field(s->fields, l2) + 1;
3717    TCGv_i32 l;
3718
3719    /* The length must not exceed 32 bytes.  */
3720    if (l2 > 32) {
3721        gen_program_exception(s, PGM_SPECIFICATION);
3722        return DISAS_NORETURN;
3723    }
3724    l = tcg_const_i32(l2);
3725    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3726    tcg_temp_free_i32(l);
3727    return DISAS_NEXT;
3728}
3729
3730static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3731{
3732    int l2 = get_field(s->fields, l2) + 1;
3733    TCGv_i32 l;
3734
3735    /* The length must be even and should not exceed 64 bytes.  */
3736    if ((l2 & 1) || (l2 > 64)) {
3737        gen_program_exception(s, PGM_SPECIFICATION);
3738        return DISAS_NORETURN;
3739    }
3740    l = tcg_const_i32(l2);
3741    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3742    tcg_temp_free_i32(l);
3743    return DISAS_NEXT;
3744}
3745
3746static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3747{
3748    gen_helper_popcnt(o->out, o->in2);
3749    return DISAS_NEXT;
3750}
3751
3752#ifndef CONFIG_USER_ONLY
3753static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3754{
3755    gen_helper_ptlb(cpu_env);
3756    return DISAS_NEXT;
3757}
3758#endif
3759
3760static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3761{
3762    int i3 = get_field(s->fields, i3);
3763    int i4 = get_field(s->fields, i4);
3764    int i5 = get_field(s->fields, i5);
3765    int do_zero = i4 & 0x80;
3766    uint64_t mask, imask, pmask;
3767    int pos, len, rot;
3768
3769    /* Adjust the arguments for the specific insn.  */
3770    switch (s->fields->op2) {
3771    case 0x55: /* risbg */
3772    case 0x59: /* risbgn */
3773        i3 &= 63;
3774        i4 &= 63;
3775        pmask = ~0;
3776        break;
3777    case 0x5d: /* risbhg */
3778        i3 &= 31;
3779        i4 &= 31;
3780        pmask = 0xffffffff00000000ull;
3781        break;
3782    case 0x51: /* risblg */
3783        i3 &= 31;
3784        i4 &= 31;
3785        pmask = 0x00000000ffffffffull;
3786        break;
3787    default:
3788        g_assert_not_reached();
3789    }
3790
3791    /* MASK is the set of bits to be inserted from R2.
3792       Take care for I3/I4 wraparound.  */
3793    mask = pmask >> i3;
3794    if (i3 <= i4) {
3795        mask ^= pmask >> i4 >> 1;
3796    } else {
3797        mask |= ~(pmask >> i4 >> 1);
3798    }
3799    mask &= pmask;
3800
3801    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3802       insns, we need to keep the other half of the register.  */
3803    imask = ~mask | ~pmask;
3804    if (do_zero) {
3805        imask = ~pmask;
3806    }
3807
3808    len = i4 - i3 + 1;
3809    pos = 63 - i4;
3810    rot = i5 & 63;
3811    if (s->fields->op2 == 0x5d) {
3812        pos += 32;
3813    }
3814
3815    /* In some cases we can implement this with extract.  */
3816    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3817        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3818        return DISAS_NEXT;
3819    }
3820
3821    /* In some cases we can implement this with deposit.  */
3822    if (len > 0 && (imask == 0 || ~mask == imask)) {
3823        /* Note that we rotate the bits to be inserted to the lsb, not to
3824           the position as described in the PoO.  */
3825        rot = (rot - pos) & 63;
3826    } else {
3827        pos = -1;
3828    }
3829
3830    /* Rotate the input as necessary.  */
3831    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3832
3833    /* Insert the selected bits into the output.  */
3834    if (pos >= 0) {
3835        if (imask == 0) {
3836            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3837        } else {
3838            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3839        }
3840    } else if (imask == 0) {
3841        tcg_gen_andi_i64(o->out, o->in2, mask);
3842    } else {
3843        tcg_gen_andi_i64(o->in2, o->in2, mask);
3844        tcg_gen_andi_i64(o->out, o->out, imask);
3845        tcg_gen_or_i64(o->out, o->out, o->in2);
3846    }
3847    return DISAS_NEXT;
3848}
3849
3850static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3851{
3852    int i3 = get_field(s->fields, i3);
3853    int i4 = get_field(s->fields, i4);
3854    int i5 = get_field(s->fields, i5);
3855    uint64_t mask;
3856
3857    /* If this is a test-only form, arrange to discard the result.  */
3858    if (i3 & 0x80) {
3859        o->out = tcg_temp_new_i64();
3860        o->g_out = false;
3861    }
3862
3863    i3 &= 63;
3864    i4 &= 63;
3865    i5 &= 63;
3866
3867    /* MASK is the set of bits to be operated on from R2.
3868       Take care for I3/I4 wraparound.  */
3869    mask = ~0ull >> i3;
3870    if (i3 <= i4) {
3871        mask ^= ~0ull >> i4 >> 1;
3872    } else {
3873        mask |= ~(~0ull >> i4 >> 1);
3874    }
3875
3876    /* Rotate the input as necessary.  */
3877    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3878
3879    /* Operate.  */
3880    switch (s->fields->op2) {
3881    case 0x55: /* AND */
3882        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3883        tcg_gen_and_i64(o->out, o->out, o->in2);
3884        break;
3885    case 0x56: /* OR */
3886        tcg_gen_andi_i64(o->in2, o->in2, mask);
3887        tcg_gen_or_i64(o->out, o->out, o->in2);
3888        break;
3889    case 0x57: /* XOR */
3890        tcg_gen_andi_i64(o->in2, o->in2, mask);
3891        tcg_gen_xor_i64(o->out, o->out, o->in2);
3892        break;
3893    default:
3894        abort();
3895    }
3896
3897    /* Set the CC.  */
3898    tcg_gen_andi_i64(cc_dst, o->out, mask);
3899    set_cc_nz_u64(s, cc_dst);
3900    return DISAS_NEXT;
3901}
3902
3903static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3904{
3905    tcg_gen_bswap16_i64(o->out, o->in2);
3906    return DISAS_NEXT;
3907}
3908
3909static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3910{
3911    tcg_gen_bswap32_i64(o->out, o->in2);
3912    return DISAS_NEXT;
3913}
3914
3915static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3916{
3917    tcg_gen_bswap64_i64(o->out, o->in2);
3918    return DISAS_NEXT;
3919}
3920
3921static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3922{
3923    TCGv_i32 t1 = tcg_temp_new_i32();
3924    TCGv_i32 t2 = tcg_temp_new_i32();
3925    TCGv_i32 to = tcg_temp_new_i32();
3926    tcg_gen_extrl_i64_i32(t1, o->in1);
3927    tcg_gen_extrl_i64_i32(t2, o->in2);
3928    tcg_gen_rotl_i32(to, t1, t2);
3929    tcg_gen_extu_i32_i64(o->out, to);
3930    tcg_temp_free_i32(t1);
3931    tcg_temp_free_i32(t2);
3932    tcg_temp_free_i32(to);
3933    return DISAS_NEXT;
3934}
3935
3936static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3937{
3938    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3939    return DISAS_NEXT;
3940}
3941
3942#ifndef CONFIG_USER_ONLY
3943static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3944{
3945    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3946    set_cc_static(s);
3947    return DISAS_NEXT;
3948}
3949
3950static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3951{
3952    gen_helper_sacf(cpu_env, o->in2);
3953    /* Addressing mode has changed, so end the block.  */
3954    return DISAS_PC_STALE;
3955}
3956#endif
3957
3958static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3959{
3960    int sam = s->insn->data;
3961    TCGv_i64 tsam;
3962    uint64_t mask;
3963
3964    switch (sam) {
3965    case 0:
3966        mask = 0xffffff;
3967        break;
3968    case 1:
3969        mask = 0x7fffffff;
3970        break;
3971    default:
3972        mask = -1;
3973        break;
3974    }
3975
3976    /* Bizarre but true, we check the address of the current insn for the
3977       specification exception, not the next to be executed.  Thus the PoO
3978       documents that Bad Things Happen two bytes before the end.  */
3979    if (s->base.pc_next & ~mask) {
3980        gen_program_exception(s, PGM_SPECIFICATION);
3981        return DISAS_NORETURN;
3982    }
3983    s->pc_tmp &= mask;
3984
3985    tsam = tcg_const_i64(sam);
3986    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3987    tcg_temp_free_i64(tsam);
3988
3989    /* Always exit the TB, since we (may have) changed execution mode.  */
3990    return DISAS_PC_STALE;
3991}
3992
3993static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3994{
3995    int r1 = get_field(s->fields, r1);
3996    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3997    return DISAS_NEXT;
3998}
3999
4000static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4001{
4002    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4003    return DISAS_NEXT;
4004}
4005
4006static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4007{
4008    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4009    return DISAS_NEXT;
4010}
4011
4012static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4013{
4014    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4015    return_low128(o->out2);
4016    return DISAS_NEXT;
4017}
4018
4019static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4020{
4021    gen_helper_sqeb(o->out, cpu_env, o->in2);
4022    return DISAS_NEXT;
4023}
4024
4025static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4026{
4027    gen_helper_sqdb(o->out, cpu_env, o->in2);
4028    return DISAS_NEXT;
4029}
4030
4031static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4032{
4033    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4034    return_low128(o->out2);
4035    return DISAS_NEXT;
4036}
4037
4038#ifndef CONFIG_USER_ONLY
4039static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4040{
4041    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4042    set_cc_static(s);
4043    return DISAS_NEXT;
4044}
4045
4046static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4047{
4048    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4049    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4050    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4051    set_cc_static(s);
4052    tcg_temp_free_i32(r1);
4053    tcg_temp_free_i32(r3);
4054    return DISAS_NEXT;
4055}
4056#endif
4057
4058static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4059{
4060    DisasCompare c;
4061    TCGv_i64 a, h;
4062    TCGLabel *lab;
4063    int r1;
4064
4065    disas_jcc(s, &c, get_field(s->fields, m3));
4066
4067    /* We want to store when the condition is fulfilled, so branch
4068       out when it's not */
4069    c.cond = tcg_invert_cond(c.cond);
4070
4071    lab = gen_new_label();
4072    if (c.is_64) {
4073        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4074    } else {
4075        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4076    }
4077    free_compare(&c);
4078
4079    r1 = get_field(s->fields, r1);
4080    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
4081    switch (s->insn->data) {
4082    case 1: /* STOCG */
4083        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4084        break;
4085    case 0: /* STOC */
4086        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4087        break;
4088    case 2: /* STOCFH */
4089        h = tcg_temp_new_i64();
4090        tcg_gen_shri_i64(h, regs[r1], 32);
4091        tcg_gen_qemu_st32(h, a, get_mem_index(s));
4092        tcg_temp_free_i64(h);
4093        break;
4094    default:
4095        g_assert_not_reached();
4096    }
4097    tcg_temp_free_i64(a);
4098
4099    gen_set_label(lab);
4100    return DISAS_NEXT;
4101}
4102
4103static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4104{
4105    uint64_t sign = 1ull << s->insn->data;
4106    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
4107    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
4108    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4109    /* The arithmetic left shift is curious in that it does not affect
4110       the sign bit.  Copy that over from the source unchanged.  */
4111    tcg_gen_andi_i64(o->out, o->out, ~sign);
4112    tcg_gen_andi_i64(o->in1, o->in1, sign);
4113    tcg_gen_or_i64(o->out, o->out, o->in1);
4114    return DISAS_NEXT;
4115}
4116
4117static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4118{
4119    tcg_gen_shl_i64(o->out, o->in1, o->in2);
4120    return DISAS_NEXT;
4121}
4122
4123static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4124{
4125    tcg_gen_sar_i64(o->out, o->in1, o->in2);
4126    return DISAS_NEXT;
4127}
4128
4129static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4130{
4131    tcg_gen_shr_i64(o->out, o->in1, o->in2);
4132    return DISAS_NEXT;
4133}
4134
4135static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4136{
4137    gen_helper_sfpc(cpu_env, o->in2);
4138    return DISAS_NEXT;
4139}
4140
4141static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4142{
4143    gen_helper_sfas(cpu_env, o->in2);
4144    return DISAS_NEXT;
4145}
4146
4147static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4148{
4149    /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4150    tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4151    gen_helper_srnm(cpu_env, o->addr1);
4152    return DISAS_NEXT;
4153}
4154
4155static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4156{
4157    /* Bits 0-55 are are ignored. */
4158    tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4159    gen_helper_srnm(cpu_env, o->addr1);
4160    return DISAS_NEXT;
4161}
4162
4163static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4164{
4165    TCGv_i64 tmp = tcg_temp_new_i64();
4166
4167    /* Bits other than 61-63 are ignored. */
4168    tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4169
4170    /* No need to call a helper, we don't implement dfp */
4171    tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4172    tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4173    tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4174
4175    tcg_temp_free_i64(tmp);
4176    return DISAS_NEXT;
4177}
4178
4179static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4180{
4181    tcg_gen_extrl_i64_i32(cc_op, o->in1);
4182    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4183    set_cc_static(s);
4184
4185    tcg_gen_shri_i64(o->in1, o->in1, 24);
4186    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4187    return DISAS_NEXT;
4188}
4189
4190static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4191{
4192    int b1 = get_field(s->fields, b1);
4193    int d1 = get_field(s->fields, d1);
4194    int b2 = get_field(s->fields, b2);
4195    int d2 = get_field(s->fields, d2);
4196    int r3 = get_field(s->fields, r3);
4197    TCGv_i64 tmp = tcg_temp_new_i64();
4198
4199    /* fetch all operands first */
4200    o->in1 = tcg_temp_new_i64();
4201    tcg_gen_addi_i64(o->in1, regs[b1], d1);
4202    o->in2 = tcg_temp_new_i64();
4203    tcg_gen_addi_i64(o->in2, regs[b2], d2);
4204    o->addr1 = get_address(s, 0, r3, 0);
4205
4206    /* load the third operand into r3 before modifying anything */
4207    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4208
4209    /* subtract CPU timer from first operand and store in GR0 */
4210    gen_helper_stpt(tmp, cpu_env);
4211    tcg_gen_sub_i64(regs[0], o->in1, tmp);
4212
4213    /* store second operand in GR1 */
4214    tcg_gen_mov_i64(regs[1], o->in2);
4215
4216    tcg_temp_free_i64(tmp);
4217    return DISAS_NEXT;
4218}
4219
4220#ifndef CONFIG_USER_ONLY
4221static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4222{
4223    tcg_gen_shri_i64(o->in2, o->in2, 4);
4224    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4225    return DISAS_NEXT;
4226}
4227
4228static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4229{
4230    gen_helper_sske(cpu_env, o->in1, o->in2);
4231    return DISAS_NEXT;
4232}
4233
4234static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4235{
4236    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4237    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4238    return DISAS_PC_STALE_NOCHAIN;
4239}
4240
4241static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4242{
4243    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4244    return DISAS_NEXT;
4245}
4246#endif
4247
4248static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4249{
4250    gen_helper_stck(o->out, cpu_env);
4251    /* ??? We don't implement clock states.  */
4252    gen_op_movi_cc(s, 0);
4253    return DISAS_NEXT;
4254}
4255
4256static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4257{
4258    TCGv_i64 c1 = tcg_temp_new_i64();
4259    TCGv_i64 c2 = tcg_temp_new_i64();
4260    TCGv_i64 todpr = tcg_temp_new_i64();
4261    gen_helper_stck(c1, cpu_env);
4262    /* 16 bit value store in an uint32_t (only valid bits set) */
4263    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4264    /* Shift the 64-bit value into its place as a zero-extended
4265       104-bit value.  Note that "bit positions 64-103 are always
4266       non-zero so that they compare differently to STCK"; we set
4267       the least significant bit to 1.  */
4268    tcg_gen_shli_i64(c2, c1, 56);
4269    tcg_gen_shri_i64(c1, c1, 8);
4270    tcg_gen_ori_i64(c2, c2, 0x10000);
4271    tcg_gen_or_i64(c2, c2, todpr);
4272    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4273    tcg_gen_addi_i64(o->in2, o->in2, 8);
4274    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4275    tcg_temp_free_i64(c1);
4276    tcg_temp_free_i64(c2);
4277    tcg_temp_free_i64(todpr);
4278    /* ??? We don't implement clock states.  */
4279    gen_op_movi_cc(s, 0);
4280    return DISAS_NEXT;
4281}
4282
4283#ifndef CONFIG_USER_ONLY
4284static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4285{
4286    tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
4287    gen_helper_sck(cc_op, cpu_env, o->in1);
4288    set_cc_static(s);
4289    return DISAS_NEXT;
4290}
4291
4292static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4293{
4294    gen_helper_sckc(cpu_env, o->in2);
4295    return DISAS_NEXT;
4296}
4297
4298static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4299{
4300    gen_helper_sckpf(cpu_env, regs[0]);
4301    return DISAS_NEXT;
4302}
4303
4304static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4305{
4306    gen_helper_stckc(o->out, cpu_env);
4307    return DISAS_NEXT;
4308}
4309
4310static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4311{
4312    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4313    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4314    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4315    tcg_temp_free_i32(r1);
4316    tcg_temp_free_i32(r3);
4317    return DISAS_NEXT;
4318}
4319
4320static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4321{
4322    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4323    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4324    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4325    tcg_temp_free_i32(r1);
4326    tcg_temp_free_i32(r3);
4327    return DISAS_NEXT;
4328}
4329
4330static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4331{
4332    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4333    return DISAS_NEXT;
4334}
4335
4336static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4337{
4338    gen_helper_spt(cpu_env, o->in2);
4339    return DISAS_NEXT;
4340}
4341
4342static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4343{
4344    gen_helper_stfl(cpu_env);
4345    return DISAS_NEXT;
4346}
4347
4348static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4349{
4350    gen_helper_stpt(o->out, cpu_env);
4351    return DISAS_NEXT;
4352}
4353
4354static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4355{
4356    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4357    set_cc_static(s);
4358    return DISAS_NEXT;
4359}
4360
4361static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4362{
4363    gen_helper_spx(cpu_env, o->in2);
4364    return DISAS_NEXT;
4365}
4366
4367static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4368{
4369    gen_helper_xsch(cpu_env, regs[1]);
4370    set_cc_static(s);
4371    return DISAS_NEXT;
4372}
4373
4374static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4375{
4376    gen_helper_csch(cpu_env, regs[1]);
4377    set_cc_static(s);
4378    return DISAS_NEXT;
4379}
4380
4381static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4382{
4383    gen_helper_hsch(cpu_env, regs[1]);
4384    set_cc_static(s);
4385    return DISAS_NEXT;
4386}
4387
4388static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4389{
4390    gen_helper_msch(cpu_env, regs[1], o->in2);
4391    set_cc_static(s);
4392    return DISAS_NEXT;
4393}
4394
4395static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4396{
4397    gen_helper_rchp(cpu_env, regs[1]);
4398    set_cc_static(s);
4399    return DISAS_NEXT;
4400}
4401
4402static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4403{
4404    gen_helper_rsch(cpu_env, regs[1]);
4405    set_cc_static(s);
4406    return DISAS_NEXT;
4407}
4408
4409static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4410{
4411    gen_helper_sal(cpu_env, regs[1]);
4412    return DISAS_NEXT;
4413}
4414
4415static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4416{
4417    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4418    return DISAS_NEXT;
4419}
4420
4421static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4422{
4423    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4424    gen_op_movi_cc(s, 3);
4425    return DISAS_NEXT;
4426}
4427
4428static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4429{
4430    /* The instruction is suppressed if not provided. */
4431    return DISAS_NEXT;
4432}
4433
4434static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4435{
4436    gen_helper_ssch(cpu_env, regs[1], o->in2);
4437    set_cc_static(s);
4438    return DISAS_NEXT;
4439}
4440
4441static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4442{
4443    gen_helper_stsch(cpu_env, regs[1], o->in2);
4444    set_cc_static(s);
4445    return DISAS_NEXT;
4446}
4447
4448static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4449{
4450    gen_helper_stcrw(cpu_env, o->in2);
4451    set_cc_static(s);
4452    return DISAS_NEXT;
4453}
4454
4455static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4456{
4457    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4458    set_cc_static(s);
4459    return DISAS_NEXT;
4460}
4461
4462static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4463{
4464    gen_helper_tsch(cpu_env, regs[1], o->in2);
4465    set_cc_static(s);
4466    return DISAS_NEXT;
4467}
4468
4469static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4470{
4471    gen_helper_chsc(cpu_env, o->in2);
4472    set_cc_static(s);
4473    return DISAS_NEXT;
4474}
4475
4476static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4477{
4478    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4479    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4480    return DISAS_NEXT;
4481}
4482
4483static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4484{
4485    uint64_t i2 = get_field(s->fields, i2);
4486    TCGv_i64 t;
4487
4488    /* It is important to do what the instruction name says: STORE THEN.
4489       If we let the output hook perform the store then if we fault and
4490       restart, we'll have the wrong SYSTEM MASK in place.  */
4491    t = tcg_temp_new_i64();
4492    tcg_gen_shri_i64(t, psw_mask, 56);
4493    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4494    tcg_temp_free_i64(t);
4495
4496    if (s->fields->op == 0xac) {
4497        tcg_gen_andi_i64(psw_mask, psw_mask,
4498                         (i2 << 56) | 0x00ffffffffffffffull);
4499    } else {
4500        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4501    }
4502
4503    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4504    return DISAS_PC_STALE_NOCHAIN;
4505}
4506
4507static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4508{
4509    gen_helper_stura(cpu_env, o->in2, o->in1);
4510    return DISAS_NEXT;
4511}
4512
4513static DisasJumpType op_sturg(DisasContext *s, DisasOps *o)
4514{
4515    gen_helper_sturg(cpu_env, o->in2, o->in1);
4516    return DISAS_NEXT;
4517}
4518#endif
4519
4520static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4521{
4522    gen_helper_stfle(cc_op, cpu_env, o->in2);
4523    set_cc_static(s);
4524    return DISAS_NEXT;
4525}
4526
4527static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4528{
4529    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4530    return DISAS_NEXT;
4531}
4532
4533static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4534{
4535    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4536    return DISAS_NEXT;
4537}
4538
4539static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4540{
4541    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4542    return DISAS_NEXT;
4543}
4544
4545static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4546{
4547    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4548    return DISAS_NEXT;
4549}
4550
4551static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4552{
4553    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4554    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4555    gen_helper_stam(cpu_env, r1, o->in2, r3);
4556    tcg_temp_free_i32(r1);
4557    tcg_temp_free_i32(r3);
4558    return DISAS_NEXT;
4559}
4560
4561static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4562{
4563    int m3 = get_field(s->fields, m3);
4564    int pos, base = s->insn->data;
4565    TCGv_i64 tmp = tcg_temp_new_i64();
4566
4567    pos = base + ctz32(m3) * 8;
4568    switch (m3) {
4569    case 0xf:
4570        /* Effectively a 32-bit store.  */
4571        tcg_gen_shri_i64(tmp, o->in1, pos);
4572        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4573        break;
4574
4575    case 0xc:
4576    case 0x6:
4577    case 0x3:
4578        /* Effectively a 16-bit store.  */
4579        tcg_gen_shri_i64(tmp, o->in1, pos);
4580        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4581        break;
4582
4583    case 0x8:
4584    case 0x4:
4585    case 0x2:
4586    case 0x1:
4587        /* Effectively an 8-bit store.  */
4588        tcg_gen_shri_i64(tmp, o->in1, pos);
4589        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4590        break;
4591
4592    default:
4593        /* This is going to be a sequence of shifts and stores.  */
4594        pos = base + 32 - 8;
4595        while (m3) {
4596            if (m3 & 0x8) {
4597                tcg_gen_shri_i64(tmp, o->in1, pos);
4598                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4599                tcg_gen_addi_i64(o->in2, o->in2, 1);
4600            }
4601            m3 = (m3 << 1) & 0xf;
4602            pos -= 8;
4603        }
4604        break;
4605    }
4606    tcg_temp_free_i64(tmp);
4607    return DISAS_NEXT;
4608}
4609
4610static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4611{
4612    int r1 = get_field(s->fields, r1);
4613    int r3 = get_field(s->fields, r3);
4614    int size = s->insn->data;
4615    TCGv_i64 tsize = tcg_const_i64(size);
4616
4617    while (1) {
4618        if (size == 8) {
4619            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4620        } else {
4621            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4622        }
4623        if (r1 == r3) {
4624            break;
4625        }
4626        tcg_gen_add_i64(o->in2, o->in2, tsize);
4627        r1 = (r1 + 1) & 15;
4628    }
4629
4630    tcg_temp_free_i64(tsize);
4631    return DISAS_NEXT;
4632}
4633
4634static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4635{
4636    int r1 = get_field(s->fields, r1);
4637    int r3 = get_field(s->fields, r3);
4638    TCGv_i64 t = tcg_temp_new_i64();
4639    TCGv_i64 t4 = tcg_const_i64(4);
4640    TCGv_i64 t32 = tcg_const_i64(32);
4641
4642    while (1) {
4643        tcg_gen_shl_i64(t, regs[r1], t32);
4644        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4645        if (r1 == r3) {
4646            break;
4647        }
4648        tcg_gen_add_i64(o->in2, o->in2, t4);
4649        r1 = (r1 + 1) & 15;
4650    }
4651
4652    tcg_temp_free_i64(t);
4653    tcg_temp_free_i64(t4);
4654    tcg_temp_free_i64(t32);
4655    return DISAS_NEXT;
4656}
4657
4658static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4659{
4660    if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4661        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4662    } else if (HAVE_ATOMIC128) {
4663        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4664    } else {
4665        gen_helper_exit_atomic(cpu_env);
4666        return DISAS_NORETURN;
4667    }
4668    return DISAS_NEXT;
4669}
4670
4671static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4672{
4673    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4674    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4675
4676    gen_helper_srst(cpu_env, r1, r2);
4677
4678    tcg_temp_free_i32(r1);
4679    tcg_temp_free_i32(r2);
4680    set_cc_static(s);
4681    return DISAS_NEXT;
4682}
4683
4684static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4685{
4686    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4687    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4688
4689    gen_helper_srstu(cpu_env, r1, r2);
4690
4691    tcg_temp_free_i32(r1);
4692    tcg_temp_free_i32(r2);
4693    set_cc_static(s);
4694    return DISAS_NEXT;
4695}
4696
4697static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4698{
4699    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4700    return DISAS_NEXT;
4701}
4702
4703static DisasJumpType op_subb(DisasContext *s, DisasOps *o)
4704{
4705    DisasCompare cmp;
4706    TCGv_i64 borrow;
4707
4708    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4709
4710    /* The !borrow flag is the msb of CC.  Since we want the inverse of
4711       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
4712    disas_jcc(s, &cmp, 8 | 4);
4713    borrow = tcg_temp_new_i64();
4714    if (cmp.is_64) {
4715        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4716    } else {
4717        TCGv_i32 t = tcg_temp_new_i32();
4718        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4719        tcg_gen_extu_i32_i64(borrow, t);
4720        tcg_temp_free_i32(t);
4721    }
4722    free_compare(&cmp);
4723
4724    tcg_gen_sub_i64(o->out, o->out, borrow);
4725    tcg_temp_free_i64(borrow);
4726    return DISAS_NEXT;
4727}
4728
4729static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4730{
4731    TCGv_i32 t;
4732
4733    update_psw_addr(s);
4734    update_cc_op(s);
4735
4736    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4737    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4738    tcg_temp_free_i32(t);
4739
4740    t = tcg_const_i32(s->ilen);
4741    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4742    tcg_temp_free_i32(t);
4743
4744    gen_exception(EXCP_SVC);
4745    return DISAS_NORETURN;
4746}
4747
4748static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4749{
4750    int cc = 0;
4751
4752    cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4753    cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4754    gen_op_movi_cc(s, cc);
4755    return DISAS_NEXT;
4756}
4757
4758static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4759{
4760    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4761    set_cc_static(s);
4762    return DISAS_NEXT;
4763}
4764
4765static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4766{
4767    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4768    set_cc_static(s);
4769    return DISAS_NEXT;
4770}
4771
4772static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4773{
4774    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4775    set_cc_static(s);
4776    return DISAS_NEXT;
4777}
4778
4779#ifndef CONFIG_USER_ONLY
4780
4781static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4782{
4783    gen_helper_testblock(cc_op, cpu_env, o->in2);
4784    set_cc_static(s);
4785    return DISAS_NEXT;
4786}
4787
4788static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4789{
4790    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4791    set_cc_static(s);
4792    return DISAS_NEXT;
4793}
4794
4795#endif
4796
4797static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4798{
4799    TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4800    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4801    tcg_temp_free_i32(l1);
4802    set_cc_static(s);
4803    return DISAS_NEXT;
4804}
4805
4806static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4807{
4808    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4809    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4810    tcg_temp_free_i32(l);
4811    set_cc_static(s);
4812    return DISAS_NEXT;
4813}
4814
4815static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4816{
4817    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4818    return_low128(o->out2);
4819    set_cc_static(s);
4820    return DISAS_NEXT;
4821}
4822
4823static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4824{
4825    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4826    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4827    tcg_temp_free_i32(l);
4828    set_cc_static(s);
4829    return DISAS_NEXT;
4830}
4831
4832static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4833{
4834    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4835    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4836    tcg_temp_free_i32(l);
4837    set_cc_static(s);
4838    return DISAS_NEXT;
4839}
4840
4841static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4842{
4843    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4844    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4845    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4846    TCGv_i32 tst = tcg_temp_new_i32();
4847    int m3 = get_field(s->fields, m3);
4848
4849    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4850        m3 = 0;
4851    }
4852    if (m3 & 1) {
4853        tcg_gen_movi_i32(tst, -1);
4854    } else {
4855        tcg_gen_extrl_i64_i32(tst, regs[0]);
4856        if (s->insn->opc & 3) {
4857            tcg_gen_ext8u_i32(tst, tst);
4858        } else {
4859            tcg_gen_ext16u_i32(tst, tst);
4860        }
4861    }
4862    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4863
4864    tcg_temp_free_i32(r1);
4865    tcg_temp_free_i32(r2);
4866    tcg_temp_free_i32(sizes);
4867    tcg_temp_free_i32(tst);
4868    set_cc_static(s);
4869    return DISAS_NEXT;
4870}
4871
4872static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4873{
4874    TCGv_i32 t1 = tcg_const_i32(0xff);
4875    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4876    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4877    tcg_temp_free_i32(t1);
4878    set_cc_static(s);
4879    return DISAS_NEXT;
4880}
4881
4882static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4883{
4884    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4885    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4886    tcg_temp_free_i32(l);
4887    return DISAS_NEXT;
4888}
4889
4890static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4891{
4892    int l1 = get_field(s->fields, l1) + 1;
4893    TCGv_i32 l;
4894
4895    /* The length must not exceed 32 bytes.  */
4896    if (l1 > 32) {
4897        gen_program_exception(s, PGM_SPECIFICATION);
4898        return DISAS_NORETURN;
4899    }
4900    l = tcg_const_i32(l1);
4901    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4902    tcg_temp_free_i32(l);
4903    set_cc_static(s);
4904    return DISAS_NEXT;
4905}
4906
4907static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4908{
4909    int l1 = get_field(s->fields, l1) + 1;
4910    TCGv_i32 l;
4911
4912    /* The length must be even and should not exceed 64 bytes.  */
4913    if ((l1 & 1) || (l1 > 64)) {
4914        gen_program_exception(s, PGM_SPECIFICATION);
4915        return DISAS_NORETURN;
4916    }
4917    l = tcg_const_i32(l1);
4918    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4919    tcg_temp_free_i32(l);
4920    set_cc_static(s);
4921    return DISAS_NEXT;
4922}
4923
4924
4925static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4926{
4927    int d1 = get_field(s->fields, d1);
4928    int d2 = get_field(s->fields, d2);
4929    int b1 = get_field(s->fields, b1);
4930    int b2 = get_field(s->fields, b2);
4931    int l = get_field(s->fields, l1);
4932    TCGv_i32 t32;
4933
4934    o->addr1 = get_address(s, 0, b1, d1);
4935
4936    /* If the addresses are identical, this is a store/memset of zero.  */
4937    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4938        o->in2 = tcg_const_i64(0);
4939
4940        l++;
4941        while (l >= 8) {
4942            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4943            l -= 8;
4944            if (l > 0) {
4945                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4946            }
4947        }
4948        if (l >= 4) {
4949            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4950            l -= 4;
4951            if (l > 0) {
4952                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4953            }
4954        }
4955        if (l >= 2) {
4956            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4957            l -= 2;
4958            if (l > 0) {
4959                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4960            }
4961        }
4962        if (l) {
4963            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4964        }
4965        gen_op_movi_cc(s, 0);
4966        return DISAS_NEXT;
4967    }
4968
4969    /* But in general we'll defer to a helper.  */
4970    o->in2 = get_address(s, 0, b2, d2);
4971    t32 = tcg_const_i32(l);
4972    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4973    tcg_temp_free_i32(t32);
4974    set_cc_static(s);
4975    return DISAS_NEXT;
4976}
4977
4978static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4979{
4980    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4981    return DISAS_NEXT;
4982}
4983
4984static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4985{
4986    int shift = s->insn->data & 0xff;
4987    int size = s->insn->data >> 8;
4988    uint64_t mask = ((1ull << size) - 1) << shift;
4989
4990    assert(!o->g_in2);
4991    tcg_gen_shli_i64(o->in2, o->in2, shift);
4992    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4993
4994    /* Produce the CC from only the bits manipulated.  */
4995    tcg_gen_andi_i64(cc_dst, o->out, mask);
4996    set_cc_nz_u64(s, cc_dst);
4997    return DISAS_NEXT;
4998}
4999
5000static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5001{
5002    o->in1 = tcg_temp_new_i64();
5003
5004    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5005        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5006    } else {
5007        /* Perform the atomic operation in memory. */
5008        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5009                                     s->insn->data);
5010    }
5011
5012    /* Recompute also for atomic case: needed for setting CC. */
5013    tcg_gen_xor_i64(o->out, o->in1, o->in2);
5014
5015    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5016        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5017    }
5018    return DISAS_NEXT;
5019}
5020
5021static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5022{
5023    o->out = tcg_const_i64(0);
5024    return DISAS_NEXT;
5025}
5026
5027static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5028{
5029    o->out = tcg_const_i64(0);
5030    o->out2 = o->out;
5031    o->g_out2 = true;
5032    return DISAS_NEXT;
5033}
5034
5035#ifndef CONFIG_USER_ONLY
5036static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5037{
5038    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5039
5040    gen_helper_clp(cpu_env, r2);
5041    tcg_temp_free_i32(r2);
5042    set_cc_static(s);
5043    return DISAS_NEXT;
5044}
5045
5046static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5047{
5048    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5049    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5050
5051    gen_helper_pcilg(cpu_env, r1, r2);
5052    tcg_temp_free_i32(r1);
5053    tcg_temp_free_i32(r2);
5054    set_cc_static(s);
5055    return DISAS_NEXT;
5056}
5057
5058static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5059{
5060    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5061    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5062
5063    gen_helper_pcistg(cpu_env, r1, r2);
5064    tcg_temp_free_i32(r1);
5065    tcg_temp_free_i32(r2);
5066    set_cc_static(s);
5067    return DISAS_NEXT;
5068}
5069
5070static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5071{
5072    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5073    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5074
5075    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5076    tcg_temp_free_i32(ar);
5077    tcg_temp_free_i32(r1);
5078    set_cc_static(s);
5079    return DISAS_NEXT;
5080}
5081
5082static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5083{
5084    gen_helper_sic(cpu_env, o->in1, o->in2);
5085    return DISAS_NEXT;
5086}
5087
5088static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5089{
5090    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5091    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
5092
5093    gen_helper_rpcit(cpu_env, r1, r2);
5094    tcg_temp_free_i32(r1);
5095    tcg_temp_free_i32(r2);
5096    set_cc_static(s);
5097    return DISAS_NEXT;
5098}
5099
5100static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5101{
5102    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5103    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
5104    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5105
5106    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5107    tcg_temp_free_i32(ar);
5108    tcg_temp_free_i32(r1);
5109    tcg_temp_free_i32(r3);
5110    set_cc_static(s);
5111    return DISAS_NEXT;
5112}
5113
5114static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5115{
5116    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
5117    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
5118
5119    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5120    tcg_temp_free_i32(ar);
5121    tcg_temp_free_i32(r1);
5122    set_cc_static(s);
5123    return DISAS_NEXT;
5124}
5125#endif
5126
5127#include "translate_vx.inc.c"
5128
5129/* ====================================================================== */
5130/* The "Cc OUTput" generators.  Given the generated output (and in some cases
5131   the original inputs), update the various cc data structures in order to
5132   be able to compute the new condition code.  */
5133
5134static void cout_abs32(DisasContext *s, DisasOps *o)
5135{
5136    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5137}
5138
5139static void cout_abs64(DisasContext *s, DisasOps *o)
5140{
5141    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5142}
5143
5144static void cout_adds32(DisasContext *s, DisasOps *o)
5145{
5146    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5147}
5148
5149static void cout_adds64(DisasContext *s, DisasOps *o)
5150{
5151    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5152}
5153
5154static void cout_addu32(DisasContext *s, DisasOps *o)
5155{
5156    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
5157}
5158
5159static void cout_addu64(DisasContext *s, DisasOps *o)
5160{
5161    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
5162}
5163
5164static void cout_addc32(DisasContext *s, DisasOps *o)
5165{
5166    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
5167}
5168
5169static void cout_addc64(DisasContext *s, DisasOps *o)
5170{
5171    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
5172}
5173
5174static void cout_cmps32(DisasContext *s, DisasOps *o)
5175{
5176    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5177}
5178
5179static void cout_cmps64(DisasContext *s, DisasOps *o)
5180{
5181    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5182}
5183
5184static void cout_cmpu32(DisasContext *s, DisasOps *o)